All Products
Search
Document Center

Object Storage Service:Uploader (Go SDK V2)

Last Updated:Mar 20, 2026

The Uploader is a high-level abstraction for uploading files and streams to OSS. Internally, it uses multipart upload to split large files into parts and uploads them concurrently. For uploads that may be interrupted, the Uploader supports resumable upload: it records which parts have completed, so the next attempt can pick up from where it left off.

Prerequisites

Before you begin, make sure you have:

Usage notes

  • The sample code uses region ID cn-hangzhou. By default, a public endpoint is used. To access bucket resources from other Alibaba Cloud services in the same region, use the internal endpoint instead. For more information, see Regions and endpoints.

  • Access credentials in all examples are loaded from environment variables.

API reference

type Uploader struct { ... }

// NewUploader creates an Uploader. Pass option functions to customize behavior.
func (c *Client) NewUploader(optFns ...func(*UploaderOptions)) *Uploader

// UploadFrom uploads from a stream (io.Reader).
func (u *Uploader) UploadFrom(ctx context.Context, request *PutObjectRequest, body io.Reader, optFns ...func(*UploaderOptions)) (*UploadResult, error)

// UploadFile uploads a local file by path.
func (u *Uploader) UploadFile(ctx context.Context, request *PutObjectRequest, filePath string, optFns ...func(*UploaderOptions)) (*UploadResult, error)

Parameters

ParameterTypeDescription
ctxcontext.ContextRequest context
request*PutObjectRequestObject metadata and upload settings. Accepts the same parameters as the PutObject operation. See PutObjectRequest.
bodyio.ReaderThe stream to upload (UploadFrom only). If body implements only io.Reader, data must be buffered in memory. If body also implements io.Seeker and io.ReaderAt, no buffering is needed.
filePathstringPath to the local file (UploadFile only)
optFns...func(*UploaderOptions)Optional configuration functions. Can be set at the Uploader level or per call.

UploaderOptions

OptionTypeDefaultDescription
PartSizeint646 MiBSize of each uploaded part
ParallelNumint3Number of parts uploaded concurrently. Applies per call, not globally.
LeavePartsOnErrorboolfalseWhen true, parts already uploaded are kept in OSS if the upload fails, which allows for manual recovery. When false, all uploaded parts are deleted on failure.
EnableCheckpointboolfalseEnables resumable upload. Supported by UploadFile only — not available with UploadFrom.
CheckpointDirstringDirectory where checkpoint files are saved, for example /local/dir/. This parameter is valid only when EnableCheckpoint is true.

Options can be set at two levels:

  • Uploader level — applies to all uploads from that instance:

      u := client.NewUploader(func(uo *oss.UploaderOptions) {
          uo.PartSize = 10 * 1024 * 1024 // 10 MiB
      })
  • Per-call level — overrides Uploader-level options for a single upload:

      request := &oss.PutObjectRequest{Bucket: oss.Ptr("bucket"), Key: oss.Ptr("key")}
      result, err := u.UploadFile(context.TODO(), request, "/local/dir/example", func(uo *oss.UploaderOptions) {
          uo.PartSize = 10 * 1024 * 1024
      })

Upload a local file

The following example uploads a local file to OSS using the default Uploader configuration.

package main

import (
	"context"
	"flag"
	"log"

	"github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss"
	"github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials"
)

var (
	region     string
	bucketName string
	objectName string
)

func init() {
	flag.StringVar(&region, "region", "", "The region where the bucket is located.")
	flag.StringVar(&bucketName, "bucket", "", "The bucket name.")
	flag.StringVar(&objectName, "object", "", "The object name.")
}

func main() {
	flag.Parse()

	if len(bucketName) == 0 {
		flag.PrintDefaults()
		log.Fatalf("invalid parameters, bucket name required")
	}
	if len(region) == 0 {
		flag.PrintDefaults()
		log.Fatalf("invalid parameters, region required")
	}
	if len(objectName) == 0 {
		flag.PrintDefaults()
		log.Fatalf("invalid parameters, object name required")
	}

	// Load credentials from environment variables and set the region.
	cfg := oss.LoadDefaultConfig().
		WithCredentialsProvider(credentials.NewEnvironmentVariableCredentialsProvider()).
		WithRegion(region)

	client := oss.NewClient(cfg)

	// Create an Uploader with default options (6 MiB part size, 3 concurrent uploads).
	u := client.NewUploader()

	// Replace with the actual path to your local file.
	localFile := "/path/to/local/file"

	result, err := u.UploadFile(context.TODO(),
		&oss.PutObjectRequest{
			Bucket: oss.Ptr(bucketName),
			Key:    oss.Ptr(objectName),
		},
		localFile)
	if err != nil {
		log.Fatalf("failed to upload file: %v", err)
	}

	log.Printf("upload result: %#v\n", result)
}

Common scenarios

The following examples build on the setup from Upload a local file. They show only the Uploader configuration and the upload call — omit the flag parsing, credential loading, and client creation shown in that example.

Enable resumable upload

Resumable upload saves the progress of each part to a checkpoint file. If the upload is interrupted, the next UploadFile call for the same file resumes from the last completed part instead of restarting.

Note

EnableCheckpoint is supported by UploadFile only. It has no effect on UploadFrom.

u := client.NewUploader(func(uo *oss.UploaderOptions) {
	uo.EnableCheckpoint = true
	uo.CheckpointDir = "/path/to/checkpoint/dir/" // Directory to store checkpoint files.
})

result, err := u.UploadFile(context.TODO(),
	&oss.PutObjectRequest{
		Bucket: oss.Ptr(bucketName),
		Key:    oss.Ptr(objectName),
	},
	localFile)
if err != nil {
	log.Fatalf("failed to upload file: %v", err)
}

log.Printf("upload result: %#v\n", result)

Upload a file stream

Use UploadFrom to upload from any io.Reader source, such as an in-memory buffer, a network stream, or an open file handle.

// Open a local file and wrap it as an io.Reader.
file, err := os.Open("/path/to/local/file")
if err != nil {
	log.Fatalf("failed to open file: %v", err)
}
defer file.Close()

var r io.Reader = file

result, err := u.UploadFrom(context.TODO(),
	&oss.PutObjectRequest{
		Bucket: oss.Ptr(bucketName),
		Key:    oss.Ptr(objectName),
	},
	r)
if err != nil {
	log.Fatalf("failed to upload stream: %v", err)
}

log.Printf("upload ETag: %v\n", oss.ToString(result.ETag))

Set part size and concurrency

Tune PartSize and ParallelNum to match your network conditions and file sizes.

u := client.NewUploader(func(uo *oss.UploaderOptions) {
	uo.PartSize = 5 * 1024 * 1024 // 5 MiB per part
	uo.ParallelNum = 5            // Upload 5 parts concurrently.
})

result, err := u.UploadFile(context.TODO(),
	&oss.PutObjectRequest{
		Bucket: oss.Ptr(bucketName),
		Key:    oss.Ptr(objectName),
	},
	localFile)
if err != nil {
	log.Fatalf("failed to upload file: %v", err)
}

log.Printf("upload result: %#v\n", result)

Set upload callbacks

To notify your application server when an upload completes, include callback parameters in the PutObjectRequest. OSS sends an HTTP request to your callback URL after the object is stored.

import (
	"encoding/base64"
	"encoding/json"
)

u := client.NewUploader(func(uo *oss.UploaderOptions) {
	uo.PartSize = 5 * 1024 * 1024
	uo.ParallelNum = 5
})

// Build callback parameters.
callbackMap := map[string]string{
	"callbackUrl":      "https://example.com:23450/callback",
	"callbackBody":     "bucket=${bucket}&object=${object}&size=${size}&my_var_1=${x:my_var1}&my_var_2=${x:my_var2}",
	"callbackBodyType": "application/x-www-form-urlencoded",
}

callbackStr, err := json.Marshal(callbackMap)
if err != nil {
	log.Fatalf("failed to marshal callback: %v", err)
}
callbackBase64 := base64.StdEncoding.EncodeToString(callbackStr)

// Build custom callback variables.
callbackVarMap := map[string]string{
	"x:my_var1": "value1",
	"x:my_var2": "value2",
}
callbackVarStr, err := json.Marshal(callbackVarMap)
if err != nil {
	log.Fatalf("failed to marshal callback vars: %v", err)
}
callbackVarBase64 := base64.StdEncoding.EncodeToString(callbackVarStr)

result, err := u.UploadFile(context.TODO(),
	&oss.PutObjectRequest{
		Bucket:      oss.Ptr(bucketName),
		Key:         oss.Ptr(objectName),
		Callback:    oss.Ptr(callbackBase64),
		CallbackVar: oss.Ptr(callbackVarBase64),
	},
	localFile)
if err != nil {
	log.Fatalf("failed to upload file: %v", err)
}

log.Printf("upload result: %#v\n", result)

Track upload progress

Set ProgressFn in PutObjectRequest to receive incremental progress updates. The function receives three values: bytes uploaded in the current callback, total bytes uploaded so far, and the total file size.

u := client.NewUploader(func(uo *oss.UploaderOptions) {
	uo.PartSize = 5 * 1024 * 1024
	uo.ParallelNum = 3
})

result, err := u.UploadFile(context.TODO(),
	&oss.PutObjectRequest{
		Bucket: oss.Ptr(bucketName),
		Key:    oss.Ptr(objectName),
		ProgressFn: func(increment, transferred, total int64) {
			fmt.Printf("increment: %v, transferred: %v, total: %v\n", increment, transferred, total)
		},
	},
	localFile)
if err != nil {
	log.Fatalf("failed to upload file: %v", err)
}

log.Printf("upload result: %#v\n", result)

What's next