Browse Source

Fix s3 100MB max upload issue.

tags/v1.1.3
nrew225 4 years ago
parent
commit
372ff5f368
4 changed files with 13 additions and 6 deletions
  1. +1
    -0
      README.md
  2. +6
    -1
      cmd/cmd.go
  3. +5
    -5
      server/storage.go
  4. +1
    -0
      server/utils.go

+ 1
- 0
README.md View File

@@ -165,6 +165,7 @@ aws-secret-key | aws access key | | AWS_SECRET_KEY
bucket | aws bucket | | BUCKET
s3-region | region of the s3 bucket | eu-west-1 | S3_REGION
s3-no-multipart | disables s3 multipart upload | false | |
s3-part-size | Size of parts for S3 multipart upload. | 5(MB) | |
s3-path-style | Forces path style URLs, required for Minio. | false | |
basedir | path storage for local/gdrive provider| |
gdrive-client-json-filepath | path to oauth client json config for gdrive provider| |


+ 6
- 1
cmd/cmd.go View File

@@ -130,6 +130,11 @@ var globalFlags = []cli.Flag{
Name: "s3-no-multipart",
Usage: "Disables S3 Multipart Puts",
},
cli.Int64Flag{
Name: "s3-part-size",
Usage: "Size of parts for S3 multipart upload, default 5(MB)",
Value: 5,
},
cli.BoolFlag{
Name: "s3-path-style",
Usage: "Forces path style URLs, required for Minio.",
@@ -343,7 +348,7 @@ func New() *Cmd {
panic("secret-key not set.")
} else if bucket := c.String("bucket"); bucket == "" {
panic("bucket not set.")
} else if storage, err := server.NewS3Storage(accessKey, secretKey, bucket, c.String("s3-region"), c.String("s3-endpoint"), logger, c.Bool("s3-no-multipart"), c.Bool("s3-path-style")); err != nil {
} else if storage, err := server.NewS3Storage(accessKey, secretKey, bucket, c.String("s3-region"), c.String("s3-endpoint"), logger, c.Bool("s3-no-multipart"), c.Int64("s3-part-size"), c.Bool("s3-path-style")); err != nil {
panic(err)
} else {
options = append(options, server.UseStorage(storage))


+ 5
- 5
server/storage.go View File

@@ -130,12 +130,13 @@ type S3Storage struct {
s3 *s3.S3
logger *log.Logger
noMultipart bool
partSize int64
}

func NewS3Storage(accessKey, secretKey, bucketName, region, endpoint string, logger *log.Logger, disableMultipart bool, forcePathStyle bool) (*S3Storage, error) {
func NewS3Storage(accessKey, secretKey, bucketName, region, endpoint string, logger *log.Logger, disableMultipart bool, partSize int64, forcePathStyle bool) (*S3Storage, error) {
sess := getAwsSession(accessKey, secretKey, region, endpoint, forcePathStyle)

return &S3Storage{bucket: bucketName, s3: s3.New(sess), session: sess, logger: logger, noMultipart: disableMultipart}, nil
return &S3Storage{bucket: bucketName, s3: s3.New(sess), session: sess, logger: logger, noMultipart: disableMultipart, partSize: partSize}, nil
}

func (s *S3Storage) Type() string {
@@ -243,9 +244,8 @@ func (s *S3Storage) Put(token string, filename string, reader io.Reader, content

// Create an uploader with the session and custom options
uploader := s3manager.NewUploader(s.session, func(u *s3manager.Uploader) {
u.PartSize = (1 << 20) * 5 // The minimum/default allowed part size is 5MB
u.Concurrency = concurrency // default is 5
u.MaxUploadParts = concurrency
u.PartSize = s.partSize * 1024 * 1024 // The minimum/default allowed part size is 5MB
u.Concurrency = concurrency // default is 5
u.LeavePartsOnError = false
})



+ 1
- 0
server/utils.go View File

@@ -43,6 +43,7 @@ func getAwsSession(accessKey, secretKey, region, endpoint string, forcePathStyle
Endpoint: aws.String(endpoint),
Credentials: credentials.NewStaticCredentials(accessKey, secretKey, ""),
S3ForcePathStyle: aws.Bool(forcePathStyle),
//LogLevel: aws.LogLevel(aws.LogDebug),
}))
}



Loading…
Cancel
Save