All Products
Search
Document Center

Object Storage Service:Multipart upload (Go SDK V2)

Last Updated:Mar 20, 2026

Multipart upload splits a large object into multiple parts and uploads them independently. After all parts are uploaded, call CompleteMultipartUpload to assemble them into a complete object.

Prerequisites

Before you begin, make sure you have:

How it works

A multipart upload consists of three steps:

  1. Initiate — Call Client.InitiateMultipartUpload to get a unique upload ID from OSS.

  2. Upload parts — Call Client.UploadPart to upload each part using that upload ID.

  3. Complete — Call Client.CompleteMultipartUpload to assemble all uploaded parts into a final object.

Part numbering behavior:

  • Part numbers define the order of parts in the final object. Uploading a new part with an existing part number overwrites the original.

  • OSS returns the MD5 hash of each uploaded part in the ETag response header, then validates it against the hash computed by OSS SDK for Go. If they differ, OSS returns InvalidDigest. For more information, see Can I use ETag values as OSS MD5 hashes to check data consistency.

The sample code uses region cn-hangzhou and the public endpoint. To access OSS from another Alibaba Cloud service in the same region, switch to the internal endpoint. For available regions and endpoints, see Regions and endpoints.

Upload a local file

The following example reads a local file into memory, splits it into 3 parts, uploads the parts concurrently using goroutines, and then assembles them.

package main

import (
	"bufio"
	"bytes"
	"context"
	"flag"
	"io"
	"log"
	"os"
	"sync"

	"github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss"
	"github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials"
)

var (
	region     string
	bucketName string
	objectName string
)

func init() {
	flag.StringVar(&region, "region", "", "The region in which the bucket is located.")
	flag.StringVar(&bucketName, "bucket", "", "The name of the bucket.")
	flag.StringVar(&objectName, "object", "", "The name of the object.")
}

func main() {
	flag.Parse()

	var uploadId string

	if len(bucketName) == 0 {
		flag.PrintDefaults()
		log.Fatalf("invalid parameters, bucket name required")
	}

	if len(region) == 0 {
		flag.PrintDefaults()
		log.Fatalf("invalid parameters, region required")
	}

	if len(objectName) == 0 {
		flag.PrintDefaults()
		log.Fatalf("invalid parameters, object name required")
	}

	cfg := oss.LoadDefaultConfig().
		WithCredentialsProvider(credentials.NewEnvironmentVariableCredentialsProvider()).
		WithRegion(region)

	client := oss.NewClient(cfg)

	// Initiate the multipart upload and get an upload ID.
	initRequest := &oss.InitiateMultipartUploadRequest{
		Bucket: oss.Ptr(bucketName),
		Key:    oss.Ptr(objectName),
	}
	initResult, err := client.InitiateMultipartUpload(context.TODO(), initRequest)
	if err != nil {
		log.Fatalf("failed to initiate multipart upload: %v", err)
	}
	log.Printf("upload ID: %s\n", *initResult.UploadId)
	uploadId = *initResult.UploadId

	var wg sync.WaitGroup
	var parts []oss.UploadPart
	var mu sync.Mutex
	count := 3

	// Read the local file into memory. Replace "yourLocalFile" with the actual file path.
	file, err := os.Open("yourLocalFile")
	if err != nil {
		log.Fatalf("failed to open file: %v", err)
	}
	defer file.Close()

	bufReader := bufio.NewReader(file)
	content, err := io.ReadAll(bufReader)
	if err != nil {
		log.Fatalf("failed to read file: %v", err)
	}
	log.Printf("file size: %d bytes\n", len(content))

	// Split the file into parts and upload them concurrently.
	chunkSize := len(content) / count
	if chunkSize == 0 {
		chunkSize = 1
	}

	for i := 0; i < count; i++ {
		start := i * chunkSize
		end := start + chunkSize
		if i == count-1 {
			end = len(content)
		}

		wg.Add(1)
		go func(partNumber int, start, end int) {
			defer wg.Done()

			partRequest := &oss.UploadPartRequest{
				Bucket:     oss.Ptr(bucketName),
				Key:        oss.Ptr(objectName),
				PartNumber: int32(partNumber),
				UploadId:   oss.Ptr(uploadId),
				Body:       bytes.NewReader(content[start:end]),
			}

			partResult, err := client.UploadPart(context.TODO(), partRequest)
			if err != nil {
				log.Fatalf("failed to upload part %d: %v", partNumber, err)
			}

			mu.Lock()
			parts = append(parts, oss.UploadPart{
				PartNumber: partRequest.PartNumber,
				ETag:       partResult.ETag,
			})
			mu.Unlock()
		}(i+1, start, end)
	}

	wg.Wait()

	// Assemble all uploaded parts into the final object.
	request := &oss.CompleteMultipartUploadRequest{
		Bucket:   oss.Ptr(bucketName),
		Key:      oss.Ptr(objectName),
		UploadId: oss.Ptr(uploadId),
		CompleteMultipartUpload: &oss.CompleteMultipartUpload{
			Parts: parts,
		},
	}
	result, err := client.CompleteMultipartUpload(context.TODO(), request)
	if err != nil {
		log.Fatalf("failed to complete multipart upload: %v", err)
	}
	log.Printf("multipart upload complete: %#v\n", result)
}

More examples

Test with generated content

The following example generates a random 400 KB string, splits it into 3 parts, and uploads them concurrently — useful for testing your upload pipeline without a real file.

package main

import (
	"bufio"
	"context"
	"flag"
	"io"
	"log"
	"math/rand"
	"strings"
	"sync"
	"time"

	"github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss"
	"github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials"
)

var (
	region     string
	bucketName string
	objectName string
	letters    = []rune("0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
)

func init() {
	flag.StringVar(&region, "region", "", "The region in which the bucket is located.")
	flag.StringVar(&bucketName, "bucket", "", "The name of the bucket.")
	flag.StringVar(&objectName, "object", "", "The name of the object.")
}

func main() {
	flag.Parse()

	var uploadId string

	if len(bucketName) == 0 {
		flag.PrintDefaults()
		log.Fatalf("invalid parameters, bucket name required")
	}

	if len(region) == 0 {
		flag.PrintDefaults()
		log.Fatalf("invalid parameters, region required")
	}

	if len(objectName) == 0 {
		flag.PrintDefaults()
		log.Fatalf("invalid parameters, object name required")
	}

	cfg := oss.LoadDefaultConfig().
		WithCredentialsProvider(credentials.NewEnvironmentVariableCredentialsProvider()).
		WithRegion(region)

	client := oss.NewClient(cfg)

	initRequest := &oss.InitiateMultipartUploadRequest{
		Bucket: oss.Ptr(bucketName),
		Key:    oss.Ptr(objectName),
	}

	initResult, err := client.InitiateMultipartUpload(context.TODO(), initRequest)
	if err != nil {
		log.Fatalf("failed to initiate multipart upload: %v", err)
	}
	log.Printf("upload ID: %#v\n", initResult)
	uploadId = *initResult.UploadId

	var wg sync.WaitGroup
	var parts []oss.UploadPart
	var mu sync.Mutex
	count := 3

	// Generate a random 400 KB string.
	body := randBody(400000)
	reader := strings.NewReader(body)
	bufReader := bufio.NewReader(reader)
	content, _ := io.ReadAll(bufReader)
	partSize := len(body) / count

	for i := 0; i < count; i++ {
		wg.Add(1)
		go func(partNumber int, partSize int, i int) {
			defer wg.Done()

			partRequest := &oss.UploadPartRequest{
				Bucket:     oss.Ptr(bucketName),
				Key:        oss.Ptr(objectName),
				PartNumber: int32(partNumber),
				UploadId:   oss.Ptr(uploadId),
				Body:       strings.NewReader(string(content[i*partSize : (i+1)*partSize])),
			}

			partResult, err := client.UploadPart(context.TODO(), partRequest)
			if err != nil {
				log.Fatalf("failed to upload part %d: %v", partNumber, err)
			}

			mu.Lock()
			parts = append(parts, oss.UploadPart{
				PartNumber: partRequest.PartNumber,
				ETag:       partResult.ETag,
			})
			mu.Unlock()
		}(i+1, partSize, i)
	}

	wg.Wait()
	log.Println("all parts uploaded")

	request := &oss.CompleteMultipartUploadRequest{
		Bucket:   oss.Ptr(bucketName),
		Key:      oss.Ptr(objectName),
		UploadId: oss.Ptr(uploadId),
		CompleteMultipartUpload: &oss.CompleteMultipartUpload{
			Parts: parts,
		},
	}

	result, err := client.CompleteMultipartUpload(context.TODO(), request)
	if err != nil {
		log.Fatalf("failed to complete multipart upload: %v", err)
	}
	log.Printf("multipart upload complete: %#v\n", result)
}

// randBody generates a random string of length n.
func randBody(n int) string {
	b := make([]rune, n)
	randMarker := rand.New(rand.NewSource(time.Now().UnixNano()))
	for i := range b {
		b[i] = letters[randMarker.Intn(len(letters))]
	}
	return string(b)
}

Abort a multipart upload

Use Client.AbortMultipartUpload to cancel an in-progress upload and release any storage used by the uploaded parts. Common reasons to abort:

  • Corrupted content — If you detect errors in the object during upload (for example, damaged data or unexpected content), abort the task to avoid storing unusable parts.

  • Network failure — If the connection is interrupted and parts may be lost or corrupted, abort and restart to ensure data integrity.

  • Insufficient storage — If storage capacity is a concern, abort the task to free resources for higher-priority uploads.

  • Accidental initiation — If you started an upload by mistake or uploaded the wrong version, abort to cancel it.

package main

import (
	"context"
	"flag"
	"log"

	"github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss"
	"github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials"
)

var (
	region     string
	bucketName string
	objectName string
)

func init() {
	flag.StringVar(&region, "region", "", "The region in which the bucket is located.")
	flag.StringVar(&bucketName, "bucket", "", "The name of the bucket.")
	flag.StringVar(&objectName, "object", "", "The name of the object.")
}

func main() {
	flag.Parse()

	var uploadId string

	if len(bucketName) == 0 {
		flag.PrintDefaults()
		log.Fatalf("invalid parameters, bucket name required")
	}

	if len(region) == 0 {
		flag.PrintDefaults()
		log.Fatalf("invalid parameters, region required")
	}

	if len(objectName) == 0 {
		flag.PrintDefaults()
		log.Fatalf("invalid parameters, object name required")
	}

	cfg := oss.LoadDefaultConfig().
		WithCredentialsProvider(credentials.NewEnvironmentVariableCredentialsProvider()).
		WithRegion(region)

	client := oss.NewClient(cfg)

	initRequest := &oss.InitiateMultipartUploadRequest{
		Bucket: oss.Ptr(bucketName),
		Key:    oss.Ptr(objectName),
	}

	initResult, err := client.InitiateMultipartUpload(context.TODO(), initRequest)
	if err != nil {
		log.Fatalf("failed to initiate multipart upload: %v", err)
	}
	log.Printf("upload ID: %s\n", *initResult.UploadId)
	uploadId = *initResult.UploadId

	request := &oss.AbortMultipartUploadRequest{
		Bucket:   oss.Ptr(bucketName),
		Key:      oss.Ptr(objectName),
		UploadId: oss.Ptr(uploadId),
	}
	result, err := client.AbortMultipartUpload(context.TODO(), request)
	if err != nil {
		log.Fatalf("failed to abort multipart upload: %v", err)
	}
	log.Printf("abort result: %#v\n", result)
}

List uploaded parts

Use Client.NewListPartsPaginator to list the parts that have been uploaded in a specific multipart upload task. The paginator returns each part's PartNumber, ETag, LastModified, Size, and HashCRC64. Common use cases include:

  • Resumable upload — When a connection is interrupted, list the already-uploaded parts to identify which parts need to be re-uploaded.

  • Monitor large object uploads — When uploading a very large object, list the uploaded parts to verify that the task is running as expected and detect issues early.

  • Troubleshooting — If an error occurs during upload, inspect the uploaded parts to identify which specific part failed, then resolve the issue accordingly.

  • Resource management — In scenarios with strict resource constraints, monitor upload progress to better manage storage capacity and bandwidth.

package main

import (
	"context"
	"flag"
	"log"

	"github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss"
	"github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials"
)

var (
	region     string
	bucketName string
	objectName string
)

func init() {
	flag.StringVar(&region, "region", "", "The region in which the bucket is located.")
	flag.StringVar(&bucketName, "bucket", "", "The name of the bucket.")
	flag.StringVar(&objectName, "object", "", "The name of the object.")
}

func main() {
	flag.Parse()

	var uploadId string

	if len(bucketName) == 0 {
		flag.PrintDefaults()
		log.Fatalf("invalid parameters, bucket name required")
	}

	if len(region) == 0 {
		flag.PrintDefaults()
		log.Fatalf("invalid parameters, region required")
	}

	if len(objectName) == 0 {
		flag.PrintDefaults()
		log.Fatalf("invalid parameters, object name required")
	}

	cfg := oss.LoadDefaultConfig().
		WithCredentialsProvider(credentials.NewEnvironmentVariableCredentialsProvider()).
		WithRegion(region)

	client := oss.NewClient(cfg)

	initRequest := &oss.InitiateMultipartUploadRequest{
		Bucket: oss.Ptr(bucketName),
		Key:    oss.Ptr(objectName),
	}

	initResult, err := client.InitiateMultipartUpload(context.TODO(), initRequest)
	if err != nil {
		log.Fatalf("failed to initiate multipart upload: %v", err)
	}
	log.Printf("upload ID: %s\n", *initResult.UploadId)
	uploadId = *initResult.UploadId

	request := &oss.ListPartsRequest{
		Bucket:   oss.Ptr(bucketName),
		Key:      oss.Ptr(objectName),
		UploadId: oss.Ptr(uploadId),
	}

	p := client.NewListPartsPaginator(request)

	var i int
	log.Println("uploaded parts:")

	for p.HasNext() {
		i++

		page, err := p.NextPage(context.TODO())
		if err != nil {
			log.Fatalf("failed to get page %d: %v", i, err)
		}

		for _, part := range page.Parts {
			log.Printf("part number: %v, ETag: %v, last modified: %v, size: %v, CRC64: %v\n",
				part.PartNumber,
				oss.ToString(part.ETag),
				oss.ToTime(part.LastModified),
				part.Size,
				oss.ToString(part.HashCRC64))
		}
	}
}

Find and clean up incomplete uploads

Use Client.NewListMultipartUploadsPaginator to list all ongoing multipart upload tasks in a bucket. An ongoing task is one that has been initiated but not yet completed or aborted.

This is useful when you need to:

  • Monitor batch uploads — Verify all upload tasks are progressing as expected when uploading large numbers of objects.

  • Detect stuck uploads — Identify tasks that stalled due to network issues and resume or abort them.

  • Optimize resource usage — Adjust bandwidth allocation or upload policies based on in-progress task status.

  • Track data migration — Monitor all active uploads during large-scale migrations to catch and resolve failures early.

Parameters

ParameterDescription
DelimiterGroups objects by name. Objects whose names share the same prefix up to the delimiter are returned as a single entry in CommonPrefixes.
MaxUploadsMaximum number of upload tasks to return. Default: 1000. Maximum: 1000.
KeyMarkerReturns only tasks with object names that come after this value alphabetically. Use with UploadIDMarker to set a pagination start point.
PrefixReturns only tasks whose object names start with this prefix.
UploadIDMarkerUsed with KeyMarker. If KeyMarker is not set, this parameter is ignored. If KeyMarker is set, results include: tasks with object names after KeyMarker, and tasks with the same object name as KeyMarker but with upload IDs greater than UploadIDMarker.

The following example lists up to 100 upload tasks whose object names start with file:

package main

import (
	"context"
	"flag"
	"log"

	"github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss"
	"github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials"
)

var (
	region     string
	bucketName string
	objectName string
)

func init() {
	flag.StringVar(&region, "region", "", "The region in which the bucket is located.")
	flag.StringVar(&bucketName, "bucket", "", "The name of the bucket.")
	flag.StringVar(&objectName, "object", "", "The name of the object.")
}

func main() {
	flag.Parse()

	if len(bucketName) == 0 {
		flag.PrintDefaults()
		log.Fatalf("invalid parameters, bucket name required")
	}

	if len(region) == 0 {
		flag.PrintDefaults()
		log.Fatalf("invalid parameters, region required")
	}

	if len(objectName) == 0 {
		flag.PrintDefaults()
		log.Fatalf("invalid parameters, object name required")
	}

	cfg := oss.LoadDefaultConfig().
		WithCredentialsProvider(credentials.NewEnvironmentVariableCredentialsProvider()).
		WithRegion(region)

	client := oss.NewClient(cfg)

	request := &oss.ListMultipartUploadsRequest{
		Bucket:     oss.Ptr(bucketName),
		MaxUploads: 100,
		Prefix:     oss.Ptr("file"),
	}

	p := client.NewListMultipartUploadsPaginator(request)

	var i int
	log.Println("ongoing multipart uploads:")

	for p.HasNext() {
		i++

		page, err := p.NextPage(context.TODO())
		if err != nil {
			log.Fatalf("failed to get page %d: %v", i, err)
		}

		for _, u := range page.Uploads {
			log.Printf("key: %v, upload ID: %v, initiated: %v\n",
				oss.ToString(u.Key),
				oss.ToString(u.UploadId),
				oss.ToTime(u.Initiated))
		}
	}
}

Upload with a callback

The following example uploads a 400 KB object in 3 concurrent parts and configures a callback that OSS calls after CompleteMultipartUpload succeeds. The callback URL, request body, and custom variables are Base64-encoded JSON strings passed in the CompleteMultipartUploadRequest.

package main

import (
	"bufio"
	"context"
	"encoding/base64"
	"encoding/json"
	"flag"
	"io"
	"log"
	"math/rand"
	"strings"
	"sync"
	"time"

	"github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss"
	"github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials"
)

var (
	region     string
	bucketName string
	objectName string
	letters    = []rune("0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
)

func init() {
	flag.StringVar(&region, "region", "", "The region in which the bucket is located.")
	flag.StringVar(&bucketName, "bucket", "", "The name of the bucket.")
	flag.StringVar(&objectName, "object", "", "The name of the object.")
}

func main() {
	flag.Parse()
	if len(bucketName) == 0 {
		flag.PrintDefaults()
		log.Fatalf("invalid parameters, bucket name required")
	}

	if len(region) == 0 {
		flag.PrintDefaults()
		log.Fatalf("invalid parameters, region required")
	}

	if len(objectName) == 0 {
		flag.PrintDefaults()
		log.Fatalf("invalid parameters, object name required")
	}

	cfg := oss.LoadDefaultConfig().
		WithCredentialsProvider(credentials.NewEnvironmentVariableCredentialsProvider()).
		WithRegion(region)

	client := oss.NewClient(cfg)

	initRequest := &oss.InitiateMultipartUploadRequest{
		Bucket: oss.Ptr(bucketName),
		Key:    oss.Ptr(objectName),
	}
	initResult, err := client.InitiateMultipartUpload(context.TODO(), initRequest)

	// Build and encode the callback configuration.
	callbackMap := map[string]string{
		"callbackUrl":      "https://example.com:23450",
		"callbackBody":     "bucket=${bucket}&object=${object}&size=${size}&my_var_1=${x:my_var1}&my_var_2=${x:my_var2}",
		"callbackBodyType": "application/x-www-form-urlencoded",
	}

	callbackStr, err := json.Marshal(callbackMap)
	if err != nil {
		log.Fatalf("failed to marshal callback config: %v", err)
	}
	callbackBase64 := base64.StdEncoding.EncodeToString(callbackStr)

	callbackVarMap := map[string]string{
		"x:my_var1": "this is var 1",
		"x:my_var2": "this is var 2",
	}
	callbackVarStr, err := json.Marshal(callbackVarMap)
	if err != nil {
		log.Fatalf("failed to marshal callback vars: %v", err)
	}
	callbackVarBase64 := base64.StdEncoding.EncodeToString(callbackVarStr)

	var wg sync.WaitGroup
	var parts []oss.UploadPart
	var mu sync.Mutex
	count := 3
	body := randBody(400000)
	reader := strings.NewReader(body)
	bufReader := bufio.NewReader(reader)
	content, _ := io.ReadAll(bufReader)
	partSize := len(body) / count

	for i := 0; i < count; i++ {
		wg.Add(1)
		go func(partNumber int, partSize int, i int) {
			defer wg.Done()
			partRequest := &oss.UploadPartRequest{
				Bucket:     oss.Ptr(bucketName),
				Key:        oss.Ptr(objectName),
				PartNumber: int32(partNumber),
				UploadId:   initResult.UploadId,
				Body:       strings.NewReader(string(content[i*partSize : (i+1)*partSize])),
			}
			partResult, err := client.UploadPart(context.TODO(), partRequest)
			if err != nil {
				log.Fatalf("failed to upload part %d: %v", partNumber, err)
			}
			mu.Lock()
			parts = append(parts, oss.UploadPart{
				PartNumber: partRequest.PartNumber,
				ETag:       partResult.ETag,
			})
			mu.Unlock()
		}(i+1, partSize, i)
	}
	wg.Wait()

	request := &oss.CompleteMultipartUploadRequest{
		Bucket:   oss.Ptr(bucketName),
		Key:      oss.Ptr(objectName),
		UploadId: initResult.UploadId,
		CompleteMultipartUpload: &oss.CompleteMultipartUpload{
			Parts: parts,
		},
		Callback:    oss.Ptr(callbackBase64),
		CallbackVar: oss.Ptr(callbackVarBase64),
	}
	result, err := client.CompleteMultipartUpload(context.TODO(), request)
	if err != nil {
		log.Fatalf("failed to complete multipart upload: %v", err)
	}
	log.Printf("multipart upload complete: %#v\n", result)
}

func randBody(n int) string {
	b := make([]rune, n)
	randMarker := rand.New(rand.NewSource(time.Now().UnixNano()))
	for i := range b {
		b[i] = letters[randMarker.Intn(len(letters))]
	}
	return string(b)
}

Monitor upload progress

Use the ProgressFn field in UploadPartRequest to track how much data has been transferred for each part. The callback receives three values: bytes uploaded in this increment, total bytes transferred so far, and total bytes for this part.

The following example uploads a local file in 5 concurrent parts and prints progress for each.

package main

import (
	"bufio"
	"bytes"
	"context"
	"flag"
	"fmt"
	"io"
	"log"
	"os"
	"sync"

	"github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss"
	"github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials"
)

var (
	region     string
	bucketName string
	objectName string
)

func init() {
	flag.StringVar(&region, "region", "", "The region in which the bucket is located.")
	flag.StringVar(&bucketName, "bucket", "", "The name of the bucket.")
	flag.StringVar(&objectName, "object", "", "The name of the object.")
}

func main() {
	flag.Parse()

	var uploadId string

	if len(bucketName) == 0 {
		flag.PrintDefaults()
		log.Fatalf("invalid parameters, bucket name required")
	}

	if len(region) == 0 {
		flag.PrintDefaults()
		log.Fatalf("invalid parameters, region required")
	}

	if len(objectName) == 0 {
		flag.PrintDefaults()
		log.Fatalf("invalid parameters, object name required")
	}

	cfg := oss.LoadDefaultConfig().
		WithCredentialsProvider(credentials.NewEnvironmentVariableCredentialsProvider()).
		WithRegion(region)

	client := oss.NewClient(cfg)

	initRequest := &oss.InitiateMultipartUploadRequest{
		Bucket: oss.Ptr(bucketName),
		Key:    oss.Ptr(objectName),
	}

	initResult, err := client.InitiateMultipartUpload(context.TODO(), initRequest)
	if err != nil {
		log.Fatalf("failed to initiate multipart upload: %v", err)
	}
	log.Printf("upload ID: %s\n", *initResult.UploadId)
	uploadId = *initResult.UploadId

	var wg sync.WaitGroup
	var parts []oss.UploadPart
	var mu sync.Mutex
	count := 5

	// Replace "/Users/yourLocalPath/yourFileName" with the actual file path.
	file, err := os.Open("/Users/yourLocalPath/yourFileName")
	if err != nil {
		log.Fatalf("failed to open file: %v", err)
	}
	defer file.Close()

	bufReader := bufio.NewReader(file)
	content, err := io.ReadAll(bufReader)
	if err != nil {
		log.Fatalf("failed to read file: %v", err)
	}
	log.Printf("file size: %d bytes\n", len(content))

	chunkSize := len(content) / count
	if chunkSize == 0 {
		chunkSize = 1
	}

	for i := 0; i < count; i++ {
		start := i * chunkSize
		end := start + chunkSize
		if i == count-1 {
			end = len(content)
		}

		wg.Add(1)
		go func(partNumber int, start, end int) {
			defer wg.Done()

			partRequest := &oss.UploadPartRequest{
				Bucket:     oss.Ptr(bucketName),
				Key:        oss.Ptr(objectName),
				PartNumber: int32(partNumber),
				UploadId:   oss.Ptr(uploadId),
				Body:       bytes.NewReader(content[start:end]),
				ProgressFn: func(increment, transferred, total int64) {
					fmt.Printf("part %d: increment=%d, transferred=%d, total=%d\n",
						partNumber, increment, transferred, total)
				},
			}

			partResult, err := client.UploadPart(context.TODO(), partRequest)
			if err != nil {
				log.Fatalf("failed to upload part %d: %v", partNumber, err)
			}
			log.Printf("uploaded part %d (bytes %d–%d)", partNumber, start, end)

			mu.Lock()
			parts = append(parts, oss.UploadPart{
				PartNumber: partRequest.PartNumber,
				ETag:       partResult.ETag,
			})
			mu.Unlock()
		}(i+1, start, end)
	}

	wg.Wait()

	request := &oss.CompleteMultipartUploadRequest{
		Bucket:   oss.Ptr(bucketName),
		Key:      oss.Ptr(objectName),
		UploadId: oss.Ptr(uploadId),
		CompleteMultipartUpload: &oss.CompleteMultipartUpload{
			Parts: parts,
		},
	}
	result, err := client.CompleteMultipartUpload(context.TODO(), request)
	if err != nil {
		log.Fatalf("failed to complete multipart upload: %v", err)
	}
	log.Printf("multipart upload complete: %#v\n", result)
}

API reference

OperationDescription
InitiateMultipartUploadInitiates a multipart upload task and returns an upload ID
UploadPartUploads a single part
CompleteMultipartUploadAssembles uploaded parts into a complete object
AbortMultipartUploadCancels a multipart upload task
NewListPartsPaginatorLists parts uploaded in a specific task
NewListMultipartUploadsPaginatorLists all ongoing multipart upload tasks in a bucket. Ongoing tasks are those that have been initiated but not yet completed or aborted.

For the complete sample code, see GitHub.