You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 

279 lines
6.0 KiB

  1. package server
  2. import (
  3. "bytes"
  4. "fmt"
  5. "io"
  6. "log"
  7. "mime"
  8. "os"
  9. "path/filepath"
  10. "strconv"
  11. "sync"
  12. "github.com/goamz/goamz/s3"
  13. )
  14. type Storage interface {
  15. Get(token string, filename string) (reader io.ReadCloser, contentType string, contentLength uint64, err error)
  16. Head(token string, filename string) (contentType string, contentLength uint64, err error)
  17. Put(token string, filename string, reader io.Reader, contentType string, contentLength uint64) error
  18. IsNotExist(err error) bool
  19. Type() string
  20. }
  21. type LocalStorage struct {
  22. Storage
  23. basedir string
  24. }
  25. func NewLocalStorage(basedir string) (*LocalStorage, error) {
  26. return &LocalStorage{basedir: basedir}, nil
  27. }
  28. func (s *LocalStorage) Type() string {
  29. return "local"
  30. }
  31. func (s *LocalStorage) Head(token string, filename string) (contentType string, contentLength uint64, err error) {
  32. path := filepath.Join(s.basedir, token, filename)
  33. var fi os.FileInfo
  34. if fi, err = os.Lstat(path); err != nil {
  35. return
  36. }
  37. contentLength = uint64(fi.Size())
  38. contentType = mime.TypeByExtension(filepath.Ext(filename))
  39. return
  40. }
  41. func (s *LocalStorage) Get(token string, filename string) (reader io.ReadCloser, contentType string, contentLength uint64, err error) {
  42. path := filepath.Join(s.basedir, token, filename)
  43. // content type , content length
  44. if reader, err = os.Open(path); err != nil {
  45. return
  46. }
  47. var fi os.FileInfo
  48. if fi, err = os.Lstat(path); err != nil {
  49. return
  50. }
  51. contentLength = uint64(fi.Size())
  52. contentType = mime.TypeByExtension(filepath.Ext(filename))
  53. return
  54. }
  55. func (s *LocalStorage) IsNotExist(err error) bool {
  56. return os.IsNotExist(err)
  57. }
  58. func (s *LocalStorage) Put(token string, filename string, reader io.Reader, contentType string, contentLength uint64) error {
  59. var f io.WriteCloser
  60. var err error
  61. path := filepath.Join(s.basedir, token)
  62. if err = os.Mkdir(path, 0700); err != nil && !os.IsExist(err) {
  63. return err
  64. }
  65. if f, err = os.OpenFile(filepath.Join(path, filename), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600); err != nil {
  66. fmt.Printf("%s", err)
  67. return err
  68. }
  69. defer f.Close()
  70. if _, err = io.Copy(f, reader); err != nil {
  71. return err
  72. }
  73. return nil
  74. }
  75. type S3Storage struct {
  76. Storage
  77. bucket *s3.Bucket
  78. }
  79. func NewS3Storage(accessKey, secretKey, bucketName string) (*S3Storage, error) {
  80. bucket, err := getBucket(accessKey, secretKey, bucketName)
  81. if err != nil {
  82. return nil, err
  83. }
  84. return &S3Storage{bucket: bucket}, nil
  85. }
  86. func (s *S3Storage) Type() string {
  87. return "s3"
  88. }
  89. func (s *S3Storage) Head(token string, filename string) (contentType string, contentLength uint64, err error) {
  90. key := fmt.Sprintf("%s/%s", token, filename)
  91. // content type , content length
  92. response, err := s.bucket.Head(key, map[string][]string{})
  93. if err != nil {
  94. return
  95. }
  96. contentType = response.Header.Get("Content-Type")
  97. contentLength, err = strconv.ParseUint(response.Header.Get("Content-Length"), 10, 0)
  98. if err != nil {
  99. return
  100. }
  101. return
  102. }
  103. func (s *S3Storage) IsNotExist(err error) bool {
  104. log.Printf("IsNotExist: %s, %#v", err.Error(), err)
  105. b := (err.Error() == "The specified key does not exist.")
  106. b = b || (err.Error() == "Access Denied")
  107. return b
  108. }
  109. func (s *S3Storage) Get(token string, filename string) (reader io.ReadCloser, contentType string, contentLength uint64, err error) {
  110. key := fmt.Sprintf("%s/%s", token, filename)
  111. // content type , content length
  112. response, err := s.bucket.GetResponse(key)
  113. if err != nil {
  114. return
  115. }
  116. contentType = response.Header.Get("Content-Type")
  117. contentLength, err = strconv.ParseUint(response.Header.Get("Content-Length"), 10, 0)
  118. if err != nil {
  119. return
  120. }
  121. reader = response.Body
  122. return
  123. }
  124. func (s *S3Storage) Put(token string, filename string, reader io.Reader, contentType string, contentLength uint64) (err error) {
  125. key := fmt.Sprintf("%s/%s", token, filename)
  126. var (
  127. multi *s3.Multi
  128. parts []s3.Part
  129. )
  130. if multi, err = s.bucket.InitMulti(key, contentType, s3.Private); err != nil {
  131. log.Printf(err.Error())
  132. return
  133. }
  134. // 20 mb parts
  135. partsChan := make(chan interface{})
  136. // partsChan := make(chan s3.Part)
  137. go func() {
  138. // maximize to 20 threads
  139. sem := make(chan int, 20)
  140. index := 1
  141. var wg sync.WaitGroup
  142. for {
  143. // buffered in memory because goamz s3 multi needs seekable reader
  144. var (
  145. buffer []byte = make([]byte, (1<<20)*10)
  146. count int
  147. err error
  148. )
  149. // Amazon expects parts of at least 5MB, except for the last one
  150. if count, err = io.ReadAtLeast(reader, buffer, (1<<20)*5); err != nil && err != io.ErrUnexpectedEOF && err != io.EOF {
  151. log.Printf(err.Error())
  152. return
  153. }
  154. // always send minimal 1 part
  155. if err == io.EOF && index > 1 {
  156. log.Printf("Waiting for all parts to finish uploading.")
  157. // wait for all parts to be finished uploading
  158. wg.Wait()
  159. // and close the channel
  160. close(partsChan)
  161. return
  162. }
  163. wg.Add(1)
  164. sem <- 1
  165. // using goroutines because of retries when upload fails
  166. go func(multi *s3.Multi, buffer []byte, index int) {
  167. log.Printf("Uploading part %d %d", index, len(buffer))
  168. defer func() {
  169. log.Printf("Finished part %d %d", index, len(buffer))
  170. wg.Done()
  171. <-sem
  172. }()
  173. partReader := bytes.NewReader(buffer)
  174. var part s3.Part
  175. if part, err = multi.PutPart(index, partReader); err != nil {
  176. log.Printf("Error while uploading part %d %d %s", index, len(buffer), err.Error())
  177. partsChan <- err
  178. return
  179. }
  180. log.Printf("Finished uploading part %d %d", index, len(buffer))
  181. partsChan <- part
  182. }(multi, buffer[:count], index)
  183. index++
  184. }
  185. }()
  186. // wait for all parts to be uploaded
  187. for part := range partsChan {
  188. switch part.(type) {
  189. case s3.Part:
  190. parts = append(parts, part.(s3.Part))
  191. case error:
  192. // abort multi upload
  193. log.Printf("Error during upload, aborting %s.", part.(error).Error())
  194. err = part.(error)
  195. multi.Abort()
  196. return
  197. }
  198. }
  199. log.Printf("Completing upload %d parts", len(parts))
  200. if err = multi.Complete(parts); err != nil {
  201. log.Printf("Error during completing upload %d parts %s", len(parts), err.Error())
  202. return
  203. }
  204. log.Printf("Completed uploading %d", len(parts))
  205. return
  206. }