diff --git a/transfersh-server/vendor/github.com/PuerkitoBio/ghost/LICENSE b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/LICENSE new file mode 100644 index 0000000..d88451f --- /dev/null +++ b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/LICENSE @@ -0,0 +1,12 @@ +Copyright (c) 2013, Martin Angers +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/transfersh-server/vendor/github.com/PuerkitoBio/ghost/app.go b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/app.go new file mode 100644 index 0000000..5635f4c --- /dev/null +++ b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/app.go @@ -0,0 +1,12 @@ +package ghost + +import ( + "log" +) + +// Logging function, defaults to Go's native log.Printf function. The idea to use +// this instead of a *log.Logger struct is that it can be set to any of log.{Printf,Fatalf, Panicf}, +// but also to more flexible userland loggers like SeeLog (https://github.com/cihub/seelog). +// It could be set, for example, to SeeLog's Debugf function. Any function with the +// signature func(fmt string, params ...interface{}). +var LogFn = log.Printf diff --git a/transfersh-server/vendor/github.com/PuerkitoBio/ghost/ghostest/main.go b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/ghostest/main.go new file mode 100644 index 0000000..7f1d817 --- /dev/null +++ b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/ghostest/main.go @@ -0,0 +1,169 @@ +// Ghostest is an interactive end-to-end Web site application to test +// the ghost packages. It serves the following URLs, with the specified +// features (handlers): +// +// / : panic;log;gzip;static; -> serve file index.html +// /public/styles.css : panic;log;gzip;StripPrefix;FileServer; -> serve directory public/ +// /public/script.js : panic;log;gzip;StripPrefix;FileServer; -> serve directory public/ +// /public/logo.pn : panic;log;gzip;StripPrefix;FileServer; -> serve directory public/ +// /session : panic;log;gzip;session;context;Custom; -> serve dynamic Go template +// /session/auth : panic;log;gzip;session;context;basicAuth;Custom; -> serve dynamic template +// /panic : panic;log;gzip;Custom; -> panics +// /context : panic;log;gzip;context;Custom1;Custom2; -> serve dynamic Amber template +package main + +import ( + "log" + "net/http" + "time" + + "github.com/PuerkitoBio/ghost/handlers" + "github.com/PuerkitoBio/ghost/templates" + _ "github.com/PuerkitoBio/ghost/templates/amber" + _ "github.com/PuerkitoBio/ghost/templates/gotpl" + "github.com/bmizerany/pat" +) + +const ( + sessionPageTitle = "Session Page" + sessionPageAuthTitle = "Authenticated Session Page" + sessionPageKey = "txt" + contextPageKey = "time" + sessionExpiration = 10 // Session expires after 10 seconds +) + +var ( + // Create the common session store and secret + memStore = handlers.NewMemoryStore(1) + secret = "testimony of the ancients" +) + +// The struct used to pass data to the session template. +type sessionPageInfo struct { + SessionID string + Title string + Text string +} + +// Authenticate the Basic Auth credentials. +func authenticate(u, p string) (interface{}, bool) { + if u == "user" && p == "pwd" { + return u + p, true + } + return nil, false +} + +// Handle the session page requests. +func sessionPageRenderer(w handlers.GhostWriter, r *http.Request) { + var ( + txt interface{} + data sessionPageInfo + title string + ) + + ssn := w.Session() + if r.Method == "GET" { + txt = ssn.Data[sessionPageKey] + } else { + txt = r.FormValue(sessionPageKey) + ssn.Data[sessionPageKey] = txt + } + if r.URL.Path == "/session/auth" { + title = sessionPageAuthTitle + } else { + title = sessionPageTitle + } + if txt != nil { + data = sessionPageInfo{ssn.ID(), title, txt.(string)} + } else { + data = sessionPageInfo{ssn.ID(), title, "[nil]"} + } + err := templates.Render("templates/session.tmpl", w, data) + if err != nil { + panic(err) + } +} + +// Prepare the context value for the chained handlers context page. +func setContext(w handlers.GhostWriter, r *http.Request) { + w.Context()[contextPageKey] = time.Now().String() +} + +// Retrieve the context value and render the chained handlers context page. +func renderContextPage(w handlers.GhostWriter, r *http.Request) { + err := templates.Render("templates/amber/context.amber", + w, &struct{ Val string }{w.Context()[contextPageKey].(string)}) + if err != nil { + panic(err) + } +} + +// Prepare the web server and kick it off. +func main() { + // Blank the default logger's prefixes + log.SetFlags(0) + + // Compile the dynamic templates (native Go templates and Amber + // templates are both registered via the for-side-effects-only imports) + err := templates.CompileDir("./templates/") + if err != nil { + panic(err) + } + + // Set the simple routes for static files + mux := pat.New() + mux.Get("/", handlers.StaticFileHandler("./index.html")) + mux.Get("/public/", http.StripPrefix("/public/", http.FileServer(http.Dir("./public/")))) + + // Set the more complex routes for session handling and dynamic page (same + // handler is used for both GET and POST). + ssnOpts := handlers.NewSessionOptions(memStore, secret) + ssnOpts.CookieTemplate.MaxAge = sessionExpiration + hSsn := handlers.SessionHandler( + handlers.ContextHandlerFunc( + handlers.GhostHandlerFunc(sessionPageRenderer), + 1), + ssnOpts) + mux.Get("/session", hSsn) + mux.Post("/session", hSsn) + + hAuthSsn := handlers.BasicAuthHandler(hSsn, authenticate, "") + mux.Get("/session/auth", hAuthSsn) + mux.Post("/session/auth", hAuthSsn) + + // Set the handler for the chained context route + mux.Get("/context", handlers.ContextHandler(handlers.ChainHandlerFuncs( + handlers.GhostHandlerFunc(setContext), + handlers.GhostHandlerFunc(renderContextPage)), + 1)) + + // Set the panic route, which simply panics + mux.Get("/panic", http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + panic("explicit panic") + })) + + // Combine the top level handlers, that wrap around the muxer. + // Panic is the outermost, so that any panic is caught and responded to with a code 500. + // Log is next, so that every request is logged along with the URL, status code and response time. + // GZIP is then applied, so that content is compressed. + // Finally, the muxer finds the specific handler that applies to the route. + h := handlers.FaviconHandler( + handlers.PanicHandler( + handlers.LogHandler( + handlers.GZIPHandler( + mux, + nil), + handlers.NewLogOptions(nil, handlers.Ltiny)), + nil), + "./public/favicon.ico", + 48*time.Hour) + + // Assign the combined handler to the server. + http.Handle("/", h) + + // Start it up. + if err := http.ListenAndServe(":9000", nil); err != nil { + panic(err) + } +} diff --git a/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/basicauth.go b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/basicauth.go new file mode 100644 index 0000000..d77f139 --- /dev/null +++ b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/basicauth.go @@ -0,0 +1,123 @@ +package handlers + +// Inspired by node.js' Connect library implementation of the basicAuth middleware. +// https://github.com/senchalabs/connect + +import ( + "bytes" + "encoding/base64" + "fmt" + "net/http" + "strings" +) + +// Internal writer that keeps track of the currently authenticated user. +type userResponseWriter struct { + http.ResponseWriter + user interface{} + userName string +} + +// Implement the WrapWriter interface. +func (this *userResponseWriter) WrappedWriter() http.ResponseWriter { + return this.ResponseWriter +} + +// Writes an unauthorized response to the client, specifying the expected authentication +// information. +func Unauthorized(w http.ResponseWriter, realm string) { + w.Header().Set("Www-Authenticate", fmt.Sprintf(`Basic realm="%s"`, realm)) + w.WriteHeader(http.StatusUnauthorized) + w.Write([]byte("Unauthorized")) +} + +// Writes a bad request response to the client, with an optional message. +func BadRequest(w http.ResponseWriter, msg string) { + w.WriteHeader(http.StatusBadRequest) + if msg == "" { + msg = "Bad Request" + } + w.Write([]byte(msg)) +} + +// BasicAuthHandlerFunc is the same as BasicAuthHandler, it is just a convenience +// signature that accepts a func(http.ResponseWriter, *http.Request) instead of +// a http.Handler interface. It saves the boilerplate http.HandlerFunc() cast. +func BasicAuthHandlerFunc(h http.HandlerFunc, + authFn func(string, string) (interface{}, bool), realm string) http.HandlerFunc { + return BasicAuthHandler(h, authFn, realm) +} + +// Returns a Basic Authentication handler, protecting the wrapped handler from +// being accessed if the authentication function is not successful. +func BasicAuthHandler(h http.Handler, + authFn func(string, string) (interface{}, bool), realm string) http.HandlerFunc { + + if realm == "" { + realm = "Authorization Required" + } + return func(w http.ResponseWriter, r *http.Request) { + // Self-awareness + if _, ok := GetUser(w); ok { + h.ServeHTTP(w, r) + return + } + authInfo := r.Header.Get("Authorization") + if authInfo == "" { + // No authorization info, return 401 + Unauthorized(w, realm) + return + } + parts := strings.Split(authInfo, " ") + if len(parts) != 2 { + BadRequest(w, "Bad authorization header") + return + } + scheme := parts[0] + creds, err := base64.StdEncoding.DecodeString(parts[1]) + if err != nil { + BadRequest(w, "Bad credentials encoding") + return + } + index := bytes.Index(creds, []byte(":")) + if scheme != "Basic" || index < 0 { + BadRequest(w, "Bad authorization header") + return + } + user, pwd := string(creds[:index]), string(creds[index+1:]) + udata, ok := authFn(user, pwd) + if ok { + // Save user data and continue + uw := &userResponseWriter{w, udata, user} + h.ServeHTTP(uw, r) + } else { + Unauthorized(w, realm) + } + } +} + +// Return the currently authenticated user. This is the same data that was returned +// by the authentication function passed to BasicAuthHandler. +func GetUser(w http.ResponseWriter) (interface{}, bool) { + usr, ok := GetResponseWriter(w, func(tst http.ResponseWriter) bool { + _, ok := tst.(*userResponseWriter) + return ok + }) + if ok { + return usr.(*userResponseWriter).user, true + } + return nil, false +} + +// Return the currently authenticated user name. This is the user name that was +// authenticated for the current request. +func GetUserName(w http.ResponseWriter) (string, bool) { + usr, ok := GetResponseWriter(w, func(tst http.ResponseWriter) bool { + _, ok := tst.(*userResponseWriter) + return ok + }) + if ok { + return usr.(*userResponseWriter).userName, true + } + return "", false +} diff --git a/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/chain.go b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/chain.go new file mode 100644 index 0000000..e3ae5de --- /dev/null +++ b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/chain.go @@ -0,0 +1,63 @@ +package handlers + +import ( + "net/http" +) + +// ChainableHandler is a valid Handler interface, and adds the possibility to +// chain other handlers. +type ChainableHandler interface { + http.Handler + Chain(http.Handler) ChainableHandler + ChainFunc(http.HandlerFunc) ChainableHandler +} + +// Default implementation of a simple ChainableHandler +type chainHandler struct { + http.Handler +} + +func (this *chainHandler) ChainFunc(h http.HandlerFunc) ChainableHandler { + return this.Chain(h) +} + +// Implementation of the ChainableHandler interface, calls the chained handler +// after the current one (sequential). +func (this *chainHandler) Chain(h http.Handler) ChainableHandler { + return &chainHandler{ + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Add the chained handler after the call to this handler + this.ServeHTTP(w, r) + h.ServeHTTP(w, r) + }), + } +} + +// Convert a standard http handler to a chainable handler interface. +func NewChainableHandler(h http.Handler) ChainableHandler { + return &chainHandler{ + h, + } +} + +// Helper function to chain multiple handler functions in a single call. +func ChainHandlerFuncs(h ...http.HandlerFunc) ChainableHandler { + return &chainHandler{ + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + for _, v := range h { + v(w, r) + } + }), + } +} + +// Helper function to chain multiple handlers in a single call. +func ChainHandlers(h ...http.Handler) ChainableHandler { + return &chainHandler{ + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + for _, v := range h { + v.ServeHTTP(w, r) + } + }), + } +} diff --git a/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/context.go b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/context.go new file mode 100644 index 0000000..ccac8c3 --- /dev/null +++ b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/context.go @@ -0,0 +1,55 @@ +package handlers + +import ( + "net/http" +) + +// Structure that holds the context map and exposes the ResponseWriter interface. +type contextResponseWriter struct { + http.ResponseWriter + m map[interface{}]interface{} +} + +// Implement the WrapWriter interface. +func (this *contextResponseWriter) WrappedWriter() http.ResponseWriter { + return this.ResponseWriter +} + +// ContextHandlerFunc is the same as ContextHandler, it is just a convenience +// signature that accepts a func(http.ResponseWriter, *http.Request) instead of +// a http.Handler interface. It saves the boilerplate http.HandlerFunc() cast. +func ContextHandlerFunc(h http.HandlerFunc, cap int) http.HandlerFunc { + return ContextHandler(h, cap) +} + +// ContextHandler gives a context storage that lives only for the duration of +// the request, with no locking involved. +func ContextHandler(h http.Handler, cap int) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if _, ok := GetContext(w); ok { + // Self-awareness, context handler is already set up + h.ServeHTTP(w, r) + return + } + + // Create the context-providing ResponseWriter replacement. + ctxw := &contextResponseWriter{ + w, + make(map[interface{}]interface{}, cap), + } + // Call the wrapped handler with the context-aware writer + h.ServeHTTP(ctxw, r) + } +} + +// Helper function to retrieve the context map from the ResponseWriter interface. +func GetContext(w http.ResponseWriter) (map[interface{}]interface{}, bool) { + ctxw, ok := GetResponseWriter(w, func(tst http.ResponseWriter) bool { + _, ok := tst.(*contextResponseWriter) + return ok + }) + if ok { + return ctxw.(*contextResponseWriter).m, true + } + return nil, false +} diff --git a/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/doc.go b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/doc.go new file mode 100644 index 0000000..642c299 --- /dev/null +++ b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/doc.go @@ -0,0 +1,29 @@ +// Package handlers define reusable handler components that focus on offering +// a single well-defined feature. Note that any http.Handler implementation +// can be used with Ghost's chainable or wrappable handlers design. +// +// Go's standard library provides a number of such useful handlers in net/http: +// +// - FileServer(http.FileSystem) +// - NotFoundHandler() +// - RedirectHandler(string, int) +// - StripPrefix(string, http.Handler) +// - TimeoutHandler(http.Handler, time.Duration, string) +// +// This package adds the following list of handlers: +// +// - BasicAuthHandler(http.Handler, func(string, string) (interface{}, bool), string) +// a Basic Authentication handler. +// - ContextHandler(http.Handler, int) : a volatile storage map valid only +// for the duration of the request, with no locking required. +// - FaviconHandler(http.Handler, string, time.Duration) : an efficient favicon +// handler. +// - GZIPHandler(http.Handler) : compress the content of the body if the client +// accepts gzip compression. +// - LogHandler(http.Handler, *LogOptions) : customizable request logger. +// - PanicHandler(http.Handler) : handle panics gracefully so that the client +// receives a response (status code 500). +// - SessionHandler(http.Handler, *SessionOptions) : a cookie-based, store-agnostic +// persistent session handler. +// - StaticFileHandler(string) : serve the contents of a specific file. +package handlers diff --git a/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/favicon.go b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/favicon.go new file mode 100644 index 0000000..0f460fa --- /dev/null +++ b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/favicon.go @@ -0,0 +1,71 @@ +package handlers + +import ( + "crypto/md5" + "io/ioutil" + "net/http" + "strconv" + "time" + + "github.com/PuerkitoBio/ghost" +) + +// FaviconHandlerFunc is the same as FaviconHandler, it is just a convenience +// signature that accepts a func(http.ResponseWriter, *http.Request) instead of +// a http.Handler interface. It saves the boilerplate http.HandlerFunc() cast. +func FaviconHandlerFunc(h http.HandlerFunc, path string, maxAge time.Duration) http.HandlerFunc { + return FaviconHandler(h, path, maxAge) +} + +// Efficient favicon handler, mostly a port of node's Connect library implementation +// of the favicon middleware. +// https://github.com/senchalabs/connect +func FaviconHandler(h http.Handler, path string, maxAge time.Duration) http.HandlerFunc { + var buf []byte + var hash string + + return func(w http.ResponseWriter, r *http.Request) { + var err error + if r.URL.Path == "/favicon.ico" { + if buf == nil { + // Read from file and cache + ghost.LogFn("ghost.favicon : serving from %s", path) + buf, err = ioutil.ReadFile(path) + if err != nil { + ghost.LogFn("ghost.favicon : error reading file : %s", err) + http.NotFound(w, r) + return + } + hash = hashContent(buf) + } + writeHeaders(w.Header(), buf, maxAge, hash) + writeBody(w, r, buf) + } else { + h.ServeHTTP(w, r) + } + } +} + +// Write the content of the favicon, or respond with a 404 not found +// in case of error (hardly a critical error). +func writeBody(w http.ResponseWriter, r *http.Request, buf []byte) { + _, err := w.Write(buf) + if err != nil { + ghost.LogFn("ghost.favicon : error writing response : %s", err) + http.NotFound(w, r) + } +} + +// Correctly set the http headers. +func writeHeaders(hdr http.Header, buf []byte, maxAge time.Duration, hash string) { + hdr.Set("Content-Type", "image/x-icon") + hdr.Set("Content-Length", strconv.Itoa(len(buf))) + hdr.Set("Etag", hash) + hdr.Set("Cache-Control", "public, max-age="+strconv.Itoa(int(maxAge.Seconds()))) +} + +// Get the MD5 hash of the content. +func hashContent(buf []byte) string { + h := md5.New() + return string(h.Sum(buf)) +} diff --git a/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/ghost.go b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/ghost.go new file mode 100644 index 0000000..2707e75 --- /dev/null +++ b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/ghost.go @@ -0,0 +1,75 @@ +package handlers + +import ( + "net/http" +) + +// Interface giving easy access to the most common augmented features. +type GhostWriter interface { + http.ResponseWriter + UserName() string + User() interface{} + Context() map[interface{}]interface{} + Session() *Session +} + +// Internal implementation of the GhostWriter interface. +type ghostWriter struct { + http.ResponseWriter + userName string + user interface{} + ctx map[interface{}]interface{} + ssn *Session +} + +func (this *ghostWriter) UserName() string { + return this.userName +} + +func (this *ghostWriter) User() interface{} { + return this.user +} + +func (this *ghostWriter) Context() map[interface{}]interface{} { + return this.ctx +} + +func (this *ghostWriter) Session() *Session { + return this.ssn +} + +// Convenience handler that wraps a custom function with direct access to the +// authenticated user, context and session on the writer. +func GhostHandlerFunc(h func(w GhostWriter, r *http.Request)) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if gw, ok := getGhostWriter(w); ok { + // Self-awareness + h(gw, r) + return + } + uid, _ := GetUserName(w) + usr, _ := GetUser(w) + ctx, _ := GetContext(w) + ssn, _ := GetSession(w) + gw := &ghostWriter{ + w, + uid, + usr, + ctx, + ssn, + } + h(gw, r) + } +} + +// Check the writer chain to find a ghostWriter. +func getGhostWriter(w http.ResponseWriter) (*ghostWriter, bool) { + gw, ok := GetResponseWriter(w, func(tst http.ResponseWriter) bool { + _, ok := tst.(*ghostWriter) + return ok + }) + if ok { + return gw.(*ghostWriter), true + } + return nil, false +} diff --git a/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/gzip.go b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/gzip.go new file mode 100644 index 0000000..0d772a8 --- /dev/null +++ b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/gzip.go @@ -0,0 +1,168 @@ +package handlers + +import ( + "compress/gzip" + "io" + "net/http" +) + +// Thanks to Andrew Gerrand for inspiration: +// https://groups.google.com/d/msg/golang-nuts/eVnTcMwNVjM/4vYU8id9Q2UJ +// +// Also, node's Connect library implementation of the compress middleware: +// https://github.com/senchalabs/connect/blob/master/lib/middleware/compress.js +// +// And StackOverflow's explanation of Vary: Accept-Encoding header: +// http://stackoverflow.com/questions/7848796/what-does-varyaccept-encoding-mean + +// Internal gzipped writer that satisfies both the (body) writer in gzipped format, +// and maintains the rest of the ResponseWriter interface for header manipulation. +type gzipResponseWriter struct { + io.Writer + http.ResponseWriter + r *http.Request // Keep a hold of the Request, for the filter function + filtered bool // Has the request been run through the filter function? + dogzip bool // Should we do GZIP compression for this request? + filterFn func(http.ResponseWriter, *http.Request) bool +} + +// Make sure the filter function is applied. +func (w *gzipResponseWriter) applyFilter() { + if !w.filtered { + if w.dogzip = w.filterFn(w, w.r); w.dogzip { + setGzipHeaders(w.Header()) + } + w.filtered = true + } +} + +// Unambiguous Write() implementation (otherwise both ResponseWriter and Writer +// want to claim this method). +func (w *gzipResponseWriter) Write(b []byte) (int, error) { + w.applyFilter() + if w.dogzip { + // Write compressed + return w.Writer.Write(b) + } + // Write uncompressed + return w.ResponseWriter.Write(b) +} + +// Intercept the WriteHeader call to correctly set the GZIP headers. +func (w *gzipResponseWriter) WriteHeader(code int) { + w.applyFilter() + w.ResponseWriter.WriteHeader(code) +} + +// Implement WrapWriter interface +func (w *gzipResponseWriter) WrappedWriter() http.ResponseWriter { + return w.ResponseWriter +} + +var ( + defaultFilterTypes = [...]string{ + "text", + "javascript", + "json", + } +) + +// Default filter to check if the response should be GZIPped. +// By default, all text (html, css, xml, ...), javascript and json +// content types are candidates for GZIP. +func defaultFilter(w http.ResponseWriter, r *http.Request) bool { + hdr := w.Header() + for _, tp := range defaultFilterTypes { + ok := HeaderMatch(hdr, "Content-Type", HmContains, tp) + if ok { + return true + } + } + return false +} + +// GZIPHandlerFunc is the same as GZIPHandler, it is just a convenience +// signature that accepts a func(http.ResponseWriter, *http.Request) instead of +// a http.Handler interface. It saves the boilerplate http.HandlerFunc() cast. +func GZIPHandlerFunc(h http.HandlerFunc, filterFn func(http.ResponseWriter, *http.Request) bool) http.HandlerFunc { + return GZIPHandler(h, filterFn) +} + +// Gzip compression HTTP handler. If the client supports it, it compresses the response +// written by the wrapped handler. The filter function is called when the response is about +// to be written to determine if compression should be applied. If this argument is nil, +// the default filter will GZIP only content types containing /json|text|javascript/. +func GZIPHandler(h http.Handler, filterFn func(http.ResponseWriter, *http.Request) bool) http.HandlerFunc { + if filterFn == nil { + filterFn = defaultFilter + } + return func(w http.ResponseWriter, r *http.Request) { + if _, ok := getGzipWriter(w); ok { + // Self-awareness, gzip handler is already set up + h.ServeHTTP(w, r) + return + } + hdr := w.Header() + setVaryHeader(hdr) + + // Do nothing on a HEAD request + if r.Method == "HEAD" { + h.ServeHTTP(w, r) + return + } + if !acceptsGzip(r.Header) { + // No gzip support from the client, return uncompressed + h.ServeHTTP(w, r) + return + } + + // Prepare a gzip response container + gz := gzip.NewWriter(w) + gzw := &gzipResponseWriter{ + Writer: gz, + ResponseWriter: w, + r: r, + filterFn: filterFn, + } + h.ServeHTTP(gzw, r) + // Iff the handler completed successfully (no panic) and GZIP was indeed used, close the gzip writer, + // which seems to generate a Write to the underlying writer. + if gzw.dogzip { + gz.Close() + } + } +} + +// Add the vary by "accept-encoding" header if it is not already set. +func setVaryHeader(hdr http.Header) { + if !HeaderMatch(hdr, "Vary", HmContains, "accept-encoding") { + hdr.Add("Vary", "Accept-Encoding") + } +} + +// Checks if the client accepts GZIP-encoded responses. +func acceptsGzip(hdr http.Header) bool { + ok := HeaderMatch(hdr, "Accept-Encoding", HmContains, "gzip") + if !ok { + ok = HeaderMatch(hdr, "Accept-Encoding", HmEquals, "*") + } + return ok +} + +func setGzipHeaders(hdr http.Header) { + // The content-type will be explicitly set somewhere down the path of handlers + hdr.Set("Content-Encoding", "gzip") + hdr.Del("Content-Length") +} + +// Helper function to retrieve the gzip writer. +func getGzipWriter(w http.ResponseWriter) (*gzipResponseWriter, bool) { + gz, ok := GetResponseWriter(w, func(tst http.ResponseWriter) bool { + _, ok := tst.(*gzipResponseWriter) + return ok + }) + if ok { + return gz.(*gzipResponseWriter), true + } + return nil, false +} diff --git a/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/header.go b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/header.go new file mode 100644 index 0000000..f015bfc --- /dev/null +++ b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/header.go @@ -0,0 +1,50 @@ +package handlers + +import ( + "net/http" + "strings" +) + +// Kind of match to apply to the header check. +type HeaderMatchType int + +const ( + HmEquals HeaderMatchType = iota + HmStartsWith + HmEndsWith + HmContains +) + +// Check if the specified header matches the test string, applying the header match type +// specified. +func HeaderMatch(hdr http.Header, nm string, matchType HeaderMatchType, test string) bool { + // First get the header value + val := hdr[http.CanonicalHeaderKey(nm)] + if len(val) == 0 { + return false + } + // Prepare the match test + test = strings.ToLower(test) + for _, v := range val { + v = strings.Trim(strings.ToLower(v), " \n\t") + switch matchType { + case HmEquals: + if v == test { + return true + } + case HmStartsWith: + if strings.HasPrefix(v, test) { + return true + } + case HmEndsWith: + if strings.HasSuffix(v, test) { + return true + } + case HmContains: + if strings.Contains(v, test) { + return true + } + } + } + return false +} diff --git a/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/log.go b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/log.go new file mode 100644 index 0000000..5a43a71 --- /dev/null +++ b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/log.go @@ -0,0 +1,231 @@ +package handlers + +// Inspired by node's Connect library implementation of the logging middleware +// https://github.com/senchalabs/connect + +import ( + "fmt" + "net/http" + "regexp" + "strings" + "time" + + "github.com/PuerkitoBio/ghost" +) + +const ( + // Predefined logging formats that can be passed as format string. + Ldefault = "_default_" + Lshort = "_short_" + Ltiny = "_tiny_" +) + +var ( + // Token parser for request and response headers + rxHeaders = regexp.MustCompile(`^(req|res)\[([^\]]+)\]$`) + + // Lookup table for predefined formats + predefFormats = map[string]struct { + fmt string + toks []string + }{ + Ldefault: { + `%s - - [%s] "%s %s HTTP/%s" %d %s "%s" "%s"`, + []string{"remote-addr", "date", "method", "url", "http-version", "status", "res[Content-Length]", "referrer", "user-agent"}, + }, + Lshort: { + `%s - %s %s HTTP/%s %d %s - %.3f s`, + []string{"remote-addr", "method", "url", "http-version", "status", "res[Content-Length]", "response-time"}, + }, + Ltiny: { + `%s %s %d %s - %.3f s`, + []string{"method", "url", "status", "res[Content-Length]", "response-time"}, + }, + } +) + +// Augmented ResponseWriter implementation that captures the status code for the logger. +type statusResponseWriter struct { + http.ResponseWriter + code int + oriURL string +} + +// Intercept the WriteHeader call to save the status code. +func (this *statusResponseWriter) WriteHeader(code int) { + this.code = code + this.ResponseWriter.WriteHeader(code) +} + +// Intercept the Write call to save the default status code. +func (this *statusResponseWriter) Write(data []byte) (int, error) { + if this.code == 0 { + this.code = http.StatusOK + } + return this.ResponseWriter.Write(data) +} + +// Implement the WrapWriter interface. +func (this *statusResponseWriter) WrappedWriter() http.ResponseWriter { + return this.ResponseWriter +} + +// LogHandler options +type LogOptions struct { + LogFn func(string, ...interface{}) // Defaults to ghost.LogFn if nil + Format string + Tokens []string + CustomTokens map[string]func(http.ResponseWriter, *http.Request) string + Immediate bool + DateFormat string +} + +// Create a new LogOptions struct. The DateFormat defaults to time.RFC3339. +func NewLogOptions(l func(string, ...interface{}), ft string, tok ...string) *LogOptions { + return &LogOptions{ + LogFn: l, + Format: ft, + Tokens: tok, + CustomTokens: make(map[string]func(http.ResponseWriter, *http.Request) string), + DateFormat: time.RFC3339, + } +} + +// LogHandlerFunc is the same as LogHandler, it is just a convenience +// signature that accepts a func(http.ResponseWriter, *http.Request) instead of +// a http.Handler interface. It saves the boilerplate http.HandlerFunc() cast. +func LogHandlerFunc(h http.HandlerFunc, opts *LogOptions) http.HandlerFunc { + return LogHandler(h, opts) +} + +// Create a log handler for every request it receives. +func LogHandler(h http.Handler, opts *LogOptions) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if _, ok := getStatusWriter(w); ok { + // Self-awareness, logging handler already set up + h.ServeHTTP(w, r) + return + } + + // Save the response start time + st := time.Now() + // Call the wrapped handler, with the augmented ResponseWriter to handle the status code + stw := &statusResponseWriter{w, 0, ""} + + // Log immediately if requested, otherwise on exit + if opts.Immediate { + logRequest(stw, r, st, opts) + } else { + // Store original URL, may get modified by handlers (i.e. StripPrefix) + stw.oriURL = r.URL.String() + defer logRequest(stw, r, st, opts) + } + h.ServeHTTP(stw, r) + } +} + +func getIpAddress(r *http.Request) string { + hdr := r.Header + hdrRealIp := hdr.Get("X-Real-Ip") + hdrForwardedFor := hdr.Get("X-Forwarded-For") + if hdrRealIp == "" && hdrForwardedFor == "" { + return r.RemoteAddr + } + if hdrForwardedFor != "" { + // X-Forwarded-For is potentially a list of addresses separated with "," + part := strings.Split(hdrForwardedFor, ",")[0] + return strings.TrimSpace(part) + ":0" + } + return hdrRealIp +} + +// Check if the specified token is a predefined one, and if so return its current value. +func getPredefinedTokenValue(t string, w *statusResponseWriter, r *http.Request, + st time.Time, opts *LogOptions) (interface{}, bool) { + + switch t { + case "http-version": + return fmt.Sprintf("%d.%d", r.ProtoMajor, r.ProtoMinor), true + case "response-time": + return time.Now().Sub(st).Seconds(), true + case "remote-addr": + return getIpAddress(r), true + case "date": + return time.Now().Format(opts.DateFormat), true + case "method": + return r.Method, true + case "url": + if w.oriURL != "" { + return w.oriURL, true + } + return r.URL.String(), true + case "referrer", "referer": + return r.Referer(), true + case "user-agent": + return r.UserAgent(), true + case "status": + return w.code, true + } + + // Handle special cases for header + mtch := rxHeaders.FindStringSubmatch(t) + if len(mtch) > 2 { + if mtch[1] == "req" { + return r.Header.Get(mtch[2]), true + } else { + // This only works for headers explicitly set via the Header() map of + // the writer, not those added by the http package under the covers. + return w.Header().Get(mtch[2]), true + } + } + return nil, false +} + +// Do the actual logging. +func logRequest(w *statusResponseWriter, r *http.Request, st time.Time, opts *LogOptions) { + var ( + fn func(string, ...interface{}) + ok bool + format string + toks []string + ) + + // If no specific log function, use the default one from the ghost package + if opts.LogFn == nil { + fn = ghost.LogFn + } else { + fn = opts.LogFn + } + + // If this is a predefined format, use it instead + if v, ok := predefFormats[opts.Format]; ok { + format = v.fmt + toks = v.toks + } else { + format = opts.Format + toks = opts.Tokens + } + args := make([]interface{}, len(toks)) + for i, t := range toks { + if args[i], ok = getPredefinedTokenValue(t, w, r, st, opts); !ok { + if f, ok := opts.CustomTokens[t]; ok && f != nil { + args[i] = f(w, r) + } else { + args[i] = "?" + } + } + } + fn(format, args...) +} + +// Helper function to retrieve the status writer. +func getStatusWriter(w http.ResponseWriter) (*statusResponseWriter, bool) { + st, ok := GetResponseWriter(w, func(tst http.ResponseWriter) bool { + _, ok := tst.(*statusResponseWriter) + return ok + }) + if ok { + return st.(*statusResponseWriter), true + } + return nil, false +} diff --git a/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/panic.go b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/panic.go new file mode 100644 index 0000000..e1362c2 --- /dev/null +++ b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/panic.go @@ -0,0 +1,57 @@ +package handlers + +import ( + "fmt" + "net/http" +) + +// Augmented response writer to hold the panic data (can be anything, not necessarily an error +// interface). +type errResponseWriter struct { + http.ResponseWriter + perr interface{} +} + +// Implement the WrapWriter interface. +func (this *errResponseWriter) WrappedWriter() http.ResponseWriter { + return this.ResponseWriter +} + +// PanicHandlerFunc is the same as PanicHandler, it is just a convenience +// signature that accepts a func(http.ResponseWriter, *http.Request) instead of +// a http.Handler interface. It saves the boilerplate http.HandlerFunc() cast. +func PanicHandlerFunc(h http.HandlerFunc, errH http.HandlerFunc) http.HandlerFunc { + return PanicHandler(h, errH) +} + +// Calls the wrapped handler and on panic calls the specified error handler. If the error handler is nil, +// responds with a 500 error message. +func PanicHandler(h http.Handler, errH http.Handler) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + defer func() { + if err := recover(); err != nil { + if errH != nil { + ew := &errResponseWriter{w, err} + errH.ServeHTTP(ew, r) + } else { + http.Error(w, fmt.Sprintf("%s", err), http.StatusInternalServerError) + } + } + }() + + // Call the protected handler + h.ServeHTTP(w, r) + } +} + +// Helper function to retrieve the panic error, if any. +func GetPanicError(w http.ResponseWriter) (interface{}, bool) { + er, ok := GetResponseWriter(w, func(tst http.ResponseWriter) bool { + _, ok := tst.(*errResponseWriter) + return ok + }) + if ok { + return er.(*errResponseWriter).perr, true + } + return nil, false +} diff --git a/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/redisstore.go b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/redisstore.go new file mode 100644 index 0000000..2974e22 --- /dev/null +++ b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/redisstore.go @@ -0,0 +1,135 @@ +package handlers + +import ( + "encoding/json" + "errors" + "time" + + "github.com/garyburd/redigo/redis" +) + +var ( + ErrNoKeyPrefix = errors.New("cannot get session keys without a key prefix") +) + +type RedisStoreOptions struct { + Network string + Address string + ConnectTimeout time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + Database int // Redis database to use for session keys + KeyPrefix string // If set, keys will be KeyPrefix:SessionID (semicolon added) + BrowserSessServerTTL time.Duration // Defaults to 2 days +} + +type RedisStore struct { + opts *RedisStoreOptions + conn redis.Conn +} + +// Create a redis session store with the specified options. +func NewRedisStore(opts *RedisStoreOptions) *RedisStore { + var err error + rs := &RedisStore{opts, nil} + rs.conn, err = redis.DialTimeout(opts.Network, opts.Address, opts.ConnectTimeout, + opts.ReadTimeout, opts.WriteTimeout) + if err != nil { + panic(err) + } + return rs +} + +// Get the session from the store. +func (this *RedisStore) Get(id string) (*Session, error) { + key := id + if this.opts.KeyPrefix != "" { + key = this.opts.KeyPrefix + ":" + id + } + b, err := redis.Bytes(this.conn.Do("GET", key)) + if err != nil { + return nil, err + } + var sess Session + err = json.Unmarshal(b, &sess) + if err != nil { + return nil, err + } + return &sess, nil +} + +// Save the session into the store. +func (this *RedisStore) Set(sess *Session) error { + b, err := json.Marshal(sess) + if err != nil { + return err + } + key := sess.ID() + if this.opts.KeyPrefix != "" { + key = this.opts.KeyPrefix + ":" + sess.ID() + } + ttl := sess.MaxAge() + if ttl == 0 { + // Browser session, set to specified TTL + ttl = this.opts.BrowserSessServerTTL + if ttl == 0 { + ttl = 2 * 24 * time.Hour // Default to 2 days + } + } + _, err = this.conn.Do("SETEX", key, int(ttl.Seconds()), b) + if err != nil { + return err + } + return nil +} + +// Delete the session from the store. +func (this *RedisStore) Delete(id string) error { + key := id + if this.opts.KeyPrefix != "" { + key = this.opts.KeyPrefix + ":" + id + } + _, err := this.conn.Do("DEL", key) + if err != nil { + return err + } + return nil +} + +// Clear all sessions from the store. Requires the use of a key +// prefix in the store options, otherwise the method refuses to delete all keys. +func (this *RedisStore) Clear() error { + vals, err := this.getSessionKeys() + if err != nil { + return err + } + if len(vals) > 0 { + this.conn.Send("MULTI") + for _, v := range vals { + this.conn.Send("DEL", v) + } + _, err = this.conn.Do("EXEC") + if err != nil { + return err + } + } + return nil +} + +// Get the number of session keys in the store. Requires the use of a +// key prefix in the store options, otherwise returns -1 (cannot tell +// session keys from other keys). +func (this *RedisStore) Len() int { + vals, err := this.getSessionKeys() + if err != nil { + return -1 + } + return len(vals) +} + +func (this *RedisStore) getSessionKeys() ([]interface{}, error) { + if this.opts.KeyPrefix != "" { + return redis.Values(this.conn.Do("KEYS", this.opts.KeyPrefix+":*")) + } + return nil, ErrNoKeyPrefix +} diff --git a/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/reswriter.go b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/reswriter.go new file mode 100644 index 0000000..1ae6ad3 --- /dev/null +++ b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/reswriter.go @@ -0,0 +1,30 @@ +package handlers + +import ( + "net/http" +) + +// This interface can be implemented by an augmented ResponseWriter, so that +// it doesn't hide other augmented writers in the chain. +type WrapWriter interface { + http.ResponseWriter + WrappedWriter() http.ResponseWriter +} + +// Helper function to retrieve a specific ResponseWriter. +func GetResponseWriter(w http.ResponseWriter, + predicate func(http.ResponseWriter) bool) (http.ResponseWriter, bool) { + + for { + // Check if this writer is the one we're looking for + if w != nil && predicate(w) { + return w, true + } + // If it is a WrapWriter, move back the chain of wrapped writers + ww, ok := w.(WrapWriter) + if !ok { + return nil, false + } + w = ww.WrappedWriter() + } +} diff --git a/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/session.go b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/session.go new file mode 100644 index 0000000..fb96faa --- /dev/null +++ b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/session.go @@ -0,0 +1,321 @@ +package handlers + +import ( + "encoding/json" + "errors" + "hash/crc32" + "net/http" + "strings" + "time" + + "github.com/PuerkitoBio/ghost" + "github.com/gorilla/securecookie" + "github.com/nu7hatch/gouuid" +) + +const defaultCookieName = "ghost.sid" + +var ( + ErrSessionSecretMissing = errors.New("session secret is missing") + ErrNoSessionID = errors.New("session ID could not be generated") +) + +// The Session holds the data map that persists for the duration of the session. +// The information stored in this map should be marshalable for the target Session store +// format (i.e. json, sql, gob, etc. depending on how the store persists the data). +type Session struct { + isNew bool // keep private, not saved to JSON, will be false once read from the store + internalSession +} + +// Use a separate private struct to hold the private fields of the Session, +// although those fields are exposed (public). This is a trick to simplify +// JSON encoding. +type internalSession struct { + Data map[string]interface{} // JSON cannot marshal a map[interface{}]interface{} + ID string + Created time.Time + MaxAge time.Duration +} + +// Create a new Session instance. It panics in the unlikely event that a new random ID cannot be generated. +func newSession(maxAge int) *Session { + uid, err := uuid.NewV4() + if err != nil { + panic(ErrNoSessionID) + } + return &Session{ + true, // is new + internalSession{ + make(map[string]interface{}), + uid.String(), + time.Now(), + time.Duration(maxAge) * time.Second, + }, + } +} + +// Gets the ID of the session. +func (ø *Session) ID() string { + return ø.internalSession.ID +} + +// Get the max age duration +func (ø *Session) MaxAge() time.Duration { + return ø.internalSession.MaxAge +} + +// Get the creation time of the session. +func (ø *Session) Created() time.Time { + return ø.internalSession.Created +} + +// Is this a new Session (created by the current request) +func (ø *Session) IsNew() bool { + return ø.isNew +} + +// TODO : Resets the max age property of the session to its original value (sliding expiration). +func (ø *Session) resetMaxAge() { +} + +// Marshal the session to JSON. +func (ø *Session) MarshalJSON() ([]byte, error) { + return json.Marshal(ø.internalSession) +} + +// Unmarshal the JSON into the internal session struct. +func (ø *Session) UnmarshalJSON(b []byte) error { + return json.Unmarshal(b, &ø.internalSession) +} + +// Options object for the session handler. It specified the Session store to use for +// persistence, the template for the session cookie (name, path, maxage, etc.), +// whether or not the proxy should be trusted to determine if the connection is secure, +// and the required secret to sign the session cookie. +type SessionOptions struct { + Store SessionStore + CookieTemplate http.Cookie + TrustProxy bool + Secret string +} + +// Create a new SessionOptions struct, using default cookie and proxy values. +func NewSessionOptions(store SessionStore, secret string) *SessionOptions { + return &SessionOptions{ + Store: store, + Secret: secret, + } +} + +// The augmented ResponseWriter struct for the session handler. It holds the current +// Session object and Session store, as well as flags and function to send the actual +// session cookie at the end of the request. +type sessResponseWriter struct { + http.ResponseWriter + sess *Session + sessStore SessionStore + sessSent bool + sendCookieFn func() +} + +// Implement the WrapWriter interface. +func (ø *sessResponseWriter) WrappedWriter() http.ResponseWriter { + return ø.ResponseWriter +} + +// Intercept the Write() method to add the Set-Cookie header before it's too late. +func (ø *sessResponseWriter) Write(data []byte) (int, error) { + if !ø.sessSent { + ø.sendCookieFn() + ø.sessSent = true + } + return ø.ResponseWriter.Write(data) +} + +// Intercept the WriteHeader() method to add the Set-Cookie header before it's too late. +func (ø *sessResponseWriter) WriteHeader(code int) { + if !ø.sessSent { + ø.sendCookieFn() + ø.sessSent = true + } + ø.ResponseWriter.WriteHeader(code) +} + +// SessionHandlerFunc is the same as SessionHandler, it is just a convenience +// signature that accepts a func(http.ResponseWriter, *http.Request) instead of +// a http.Handler interface. It saves the boilerplate http.HandlerFunc() cast. +func SessionHandlerFunc(h http.HandlerFunc, opts *SessionOptions) http.HandlerFunc { + return SessionHandler(h, opts) +} + +// Create a Session handler to offer the Session behaviour to the specified handler. +func SessionHandler(h http.Handler, opts *SessionOptions) http.HandlerFunc { + // Make sure the required cookie fields are set + if opts.CookieTemplate.Name == "" { + opts.CookieTemplate.Name = defaultCookieName + } + if opts.CookieTemplate.Path == "" { + opts.CookieTemplate.Path = "/" + } + // Secret is required + if opts.Secret == "" { + panic(ErrSessionSecretMissing) + } + + // Return the actual handler + return func(w http.ResponseWriter, r *http.Request) { + if _, ok := getSessionWriter(w); ok { + // Self-awareness + h.ServeHTTP(w, r) + return + } + + if strings.Index(r.URL.Path, opts.CookieTemplate.Path) != 0 { + // Session does not apply to this path + h.ServeHTTP(w, r) + return + } + + // Create a new Session or retrieve the existing session based on the + // session cookie received. + var sess *Session + var ckSessId string + exCk, err := r.Cookie(opts.CookieTemplate.Name) + if err != nil { + sess = newSession(opts.CookieTemplate.MaxAge) + ghost.LogFn("ghost.session : error getting session cookie : %s", err) + } else { + ckSessId, err = parseSignedCookie(exCk, opts.Secret) + if err != nil { + sess = newSession(opts.CookieTemplate.MaxAge) + ghost.LogFn("ghost.session : error parsing signed cookie : %s", err) + } else if ckSessId == "" { + sess = newSession(opts.CookieTemplate.MaxAge) + ghost.LogFn("ghost.session : no existing session ID") + } else { + // Get the session + sess, err = opts.Store.Get(ckSessId) + if err != nil { + sess = newSession(opts.CookieTemplate.MaxAge) + ghost.LogFn("ghost.session : error getting session from store : %s", err) + } else if sess == nil { + sess = newSession(opts.CookieTemplate.MaxAge) + ghost.LogFn("ghost.session : nil session") + } + } + } + // Save the original hash of the session, used to compare if the contents + // have changed during the handling of the request, so that it has to be + // saved to the stored. + oriHash := hash(sess) + + // Create the augmented ResponseWriter. + srw := &sessResponseWriter{w, sess, opts.Store, false, func() { + // This function is called when the header is about to be written, so that + // the session cookie is correctly set. + + // Check if the connection is secure + proto := strings.Trim(strings.ToLower(r.Header.Get("X-Forwarded-Proto")), " ") + tls := r.TLS != nil || (strings.HasPrefix(proto, "https") && opts.TrustProxy) + if opts.CookieTemplate.Secure && !tls { + ghost.LogFn("ghost.session : secure cookie on a non-secure connection, cookie not sent") + return + } + if !sess.IsNew() { + // If this is not a new session, no need to send back the cookie + // TODO : Handle expires? + return + } + + // Send the session cookie + ck := opts.CookieTemplate + ck.Value = sess.ID() + err := signCookie(&ck, opts.Secret) + if err != nil { + ghost.LogFn("ghost.session : error signing cookie : %s", err) + return + } + http.SetCookie(w, &ck) + }} + + // Call wrapped handler + h.ServeHTTP(srw, r) + + // TODO : Expiration management? srw.sess.resetMaxAge() + // Do not save if content is the same, unless session is new (to avoid + // creating a new session and sending a cookie on each successive request). + if newHash := hash(sess); !sess.IsNew() && oriHash == newHash && newHash != 0 { + // No changes to the session, no need to save + ghost.LogFn("ghost.session : no changes to save to store") + return + } + err = opts.Store.Set(sess) + if err != nil { + ghost.LogFn("ghost.session : error saving session to store : %s", err) + } + } +} + +// Helper function to retrieve the session for the current request. +func GetSession(w http.ResponseWriter) (*Session, bool) { + ss, ok := getSessionWriter(w) + if ok { + return ss.sess, true + } + return nil, false +} + +// Helper function to retrieve the session store +func GetSessionStore(w http.ResponseWriter) (SessionStore, bool) { + ss, ok := getSessionWriter(w) + if ok { + return ss.sessStore, true + } + return nil, false +} + +// Internal helper function to retrieve the session writer object. +func getSessionWriter(w http.ResponseWriter) (*sessResponseWriter, bool) { + ss, ok := GetResponseWriter(w, func(tst http.ResponseWriter) bool { + _, ok := tst.(*sessResponseWriter) + return ok + }) + if ok { + return ss.(*sessResponseWriter), true + } + return nil, false +} + +// Parse a signed cookie and return the cookie value +func parseSignedCookie(ck *http.Cookie, secret string) (string, error) { + var val string + + sck := securecookie.New([]byte(secret), nil) + err := sck.Decode(ck.Name, ck.Value, &val) + if err != nil { + return "", err + } + return val, nil +} + +// Sign the specified cookie's value +func signCookie(ck *http.Cookie, secret string) error { + sck := securecookie.New([]byte(secret), nil) + enc, err := sck.Encode(ck.Name, ck.Value) + if err != nil { + return err + } + ck.Value = enc + return nil +} + +// Compute a CRC32 hash of the session's JSON-encoded contents. +func hash(s *Session) uint32 { + data, err := json.Marshal(s) + if err != nil { + ghost.LogFn("ghost.session : error hash : %s", err) + return 0 // 0 is always treated as "modified" session content + } + return crc32.ChecksumIEEE(data) +} diff --git a/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/sstore.go b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/sstore.go new file mode 100644 index 0000000..624993f --- /dev/null +++ b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/sstore.go @@ -0,0 +1,90 @@ +package handlers + +import ( + "sync" + "time" +) + +// SessionStore interface, must be implemented by any store to be used +// for session storage. +type SessionStore interface { + Get(id string) (*Session, error) // Get the session from the store + Set(sess *Session) error // Save the session in the store + Delete(id string) error // Delete the session from the store + Clear() error // Delete all sessions from the store + Len() int // Get the number of sessions in the store +} + +// In-memory implementation of a session store. Not recommended for production +// use. +type MemoryStore struct { + l sync.RWMutex + m map[string]*Session + capc int +} + +// Create a new memory store. +func NewMemoryStore(capc int) *MemoryStore { + m := &MemoryStore{} + m.capc = capc + m.newMap() + return m +} + +// Get the number of sessions saved in the store. +func (this *MemoryStore) Len() int { + return len(this.m) +} + +// Get the requested session from the store. +func (this *MemoryStore) Get(id string) (*Session, error) { + this.l.RLock() + defer this.l.RUnlock() + return this.m[id], nil +} + +// Save the session to the store. +func (this *MemoryStore) Set(sess *Session) error { + this.l.Lock() + defer this.l.Unlock() + this.m[sess.ID()] = sess + if sess.IsNew() { + // Since the memory store doesn't marshal to a string without the isNew, if it is left + // to true, it will stay true forever. + sess.isNew = false + // Expire in the given time. If the maxAge is 0 (which means browser-session lifetime), + // expire in a reasonable delay, 2 days. The weird case of a negative maxAge will + // cause the immediate Delete call. + wait := sess.MaxAge() + if wait == 0 { + wait = 2 * 24 * time.Hour + } + go func() { + // Clear the session after the specified delay + <-time.After(wait) + this.Delete(sess.ID()) + }() + } + return nil +} + +// Delete the specified session ID from the store. +func (this *MemoryStore) Delete(id string) error { + this.l.Lock() + defer this.l.Unlock() + delete(this.m, id) + return nil +} + +// Clear all sessions from the store. +func (this *MemoryStore) Clear() error { + this.l.Lock() + defer this.l.Unlock() + this.newMap() + return nil +} + +// Re-create the internal map, dropping all existing sessions. +func (this *MemoryStore) newMap() { + this.m = make(map[string]*Session, this.capc) +} diff --git a/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/static.go b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/static.go new file mode 100644 index 0000000..7d07055 --- /dev/null +++ b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/handlers/static.go @@ -0,0 +1,13 @@ +package handlers + +import ( + "net/http" +) + +// StaticFileHandler, unlike net/http.FileServer, serves the contents of a specific +// file when it is called. +func StaticFileHandler(path string) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + http.ServeFile(w, r, path) + } +} diff --git a/transfersh-server/vendor/github.com/PuerkitoBio/ghost/templates/amber/amber.go b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/templates/amber/amber.go new file mode 100644 index 0000000..81e67e1 --- /dev/null +++ b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/templates/amber/amber.go @@ -0,0 +1,38 @@ +package amber + +import ( + "github.com/PuerkitoBio/ghost/templates" + "github.com/eknkc/amber" +) + +// The template compiler for Amber templates. +type AmberCompiler struct { + Options amber.Options + c *amber.Compiler +} + +// Create a new Amber compiler with the specified Amber-specific options. +func NewAmberCompiler(opts amber.Options) *AmberCompiler { + return &AmberCompiler{ + opts, + nil, + } +} + +// Implementation of the TemplateCompiler interface. +func (this *AmberCompiler) Compile(f string) (templates.Templater, error) { + // amber.CompileFile creates a new compiler each time. To limit the number + // of allocations, reuse a compiler. + if this.c == nil { + this.c = amber.New() + } + this.c.Options = this.Options + if err := this.c.ParseFile(f); err != nil { + return nil, err + } + return this.c.Compile() +} + +func init() { + templates.Register(".amber", NewAmberCompiler(amber.DefaultOptions)) +} diff --git a/transfersh-server/vendor/github.com/PuerkitoBio/ghost/templates/gotpl/gotpl.go b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/templates/gotpl/gotpl.go new file mode 100644 index 0000000..c012f52 --- /dev/null +++ b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/templates/gotpl/gotpl.go @@ -0,0 +1,19 @@ +package gotpl + +import ( + "html/template" + + "github.com/PuerkitoBio/ghost/templates" +) + +// The template compiler for native Go templates. +type GoTemplateCompiler struct{} + +// Implementation of the TemplateCompiler interface. +func (this *GoTemplateCompiler) Compile(f string) (templates.Templater, error) { + return template.ParseFiles(f) +} + +func init() { + templates.Register(".tmpl", new(GoTemplateCompiler)) +} diff --git a/transfersh-server/vendor/github.com/PuerkitoBio/ghost/templates/template.go b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/templates/template.go new file mode 100644 index 0000000..1e1250a --- /dev/null +++ b/transfersh-server/vendor/github.com/PuerkitoBio/ghost/templates/template.go @@ -0,0 +1,129 @@ +package templates + +import ( + "errors" + "io" + "net/http" + "os" + "path" + "path/filepath" + "sync" + + "github.com/PuerkitoBio/ghost" +) + +var ( + ErrTemplateNotExist = errors.New("template does not exist") + ErrDirNotExist = errors.New("directory does not exist") + + compilers = make(map[string]TemplateCompiler) + + // The mutex guards the templaters map + mu sync.RWMutex + templaters = make(map[string]Templater) +) + +// Defines the interface that the template compiler must return. The Go native +// templates implement this interface. +type Templater interface { + Execute(wr io.Writer, data interface{}) error +} + +// The interface that a template engine must implement to be used by Ghost. +type TemplateCompiler interface { + Compile(fileName string) (Templater, error) +} + +// TODO : How to manage Go nested templates? +// TODO : Support Go's port of the mustache template? + +// Register a template compiler for the specified extension. Extensions are case-sensitive. +// The extension must start with a dot (it is compared to the result of path.Ext() on a +// given file name). +// +// Registering is not thread-safe. Compilers should be registered before the http server +// is started. +// Compiling templates, on the other hand, is thread-safe. +func Register(ext string, c TemplateCompiler) { + if c == nil { + panic("ghost: Register TemplateCompiler is nil") + } + if _, dup := compilers[ext]; dup { + panic("ghost: Register called twice for extension " + ext) + } + compilers[ext] = c +} + +// Compile all templates that have a matching compiler (based on their extension) in the +// specified directory. +func CompileDir(dir string) error { + mu.Lock() + defer mu.Unlock() + + return filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error { + if fi == nil { + return ErrDirNotExist + } + if !fi.IsDir() { + err = compileTemplate(path, dir) + if err != nil { + ghost.LogFn("ghost.templates : error compiling template %s : %s", path, err) + return err + } + } + return nil + }) +} + +// Compile a single template file, using the specified base directory. The base +// directory is used to set the name of the template (the part of the path relative to this +// base directory is used as the name of the template). +func Compile(path, base string) error { + mu.Lock() + defer mu.Unlock() + + return compileTemplate(path, base) +} + +// Compile the specified template file if there is a matching compiler. +func compileTemplate(p, base string) error { + ext := path.Ext(p) + c, ok := compilers[ext] + // Ignore file if no template compiler exist for this extension + if ok { + t, err := c.Compile(p) + if err != nil { + return err + } + key, err := filepath.Rel(base, p) + if err != nil { + return err + } + ghost.LogFn("ghost.templates : storing template for file %s", key) + templaters[key] = t + } + return nil +} + +// Execute the template. +func Execute(tplName string, w io.Writer, data interface{}) error { + mu.RLock() + t, ok := templaters[tplName] + mu.RUnlock() + if !ok { + return ErrTemplateNotExist + } + return t.Execute(w, data) +} + +// Render is the same as Execute, except that it takes a http.ResponseWriter +// instead of a generic io.Writer, and sets the Content-Type to text/html. +func Render(tplName string, w http.ResponseWriter, data interface{}) (err error) { + w.Header().Set("Content-Type", "text/html") + defer func() { + if err != nil { + w.Header().Del("Content-Type") + } + }() + return Execute(tplName, w, data) +} diff --git a/transfersh-server/vendor/github.com/bmizerany/pat/example/hello.go b/transfersh-server/vendor/github.com/bmizerany/pat/example/hello.go new file mode 100644 index 0000000..32f171b --- /dev/null +++ b/transfersh-server/vendor/github.com/bmizerany/pat/example/hello.go @@ -0,0 +1,27 @@ +package main + +import ( + "io" + "log" + "net/http" + + "github.com/bmizerany/pat" +) + +// hello world, the web server +func HelloServer(w http.ResponseWriter, req *http.Request) { + io.WriteString(w, "hello, "+req.URL.Query().Get(":name")+"!\n") +} + +func main() { + m := pat.New() + m.Get("/hello/:name", http.HandlerFunc(HelloServer)) + + // Register this pat with the default serve mux so that other packages + // may also be exported. (i.e. /debug/pprof/*) + http.Handle("/", m) + err := http.ListenAndServe(":12345", nil) + if err != nil { + log.Fatal("ListenAndServe: ", err) + } +} diff --git a/transfersh-server/vendor/github.com/bmizerany/pat/example/patexample/hello_appengine.go b/transfersh-server/vendor/github.com/bmizerany/pat/example/patexample/hello_appengine.go new file mode 100644 index 0000000..25d9d03 --- /dev/null +++ b/transfersh-server/vendor/github.com/bmizerany/pat/example/patexample/hello_appengine.go @@ -0,0 +1,29 @@ +// hello.go ported for appengine +// +// this differs from the standard hello.go example in two ways: appengine +// already provides an http server for you, obviating the need for the +// ListenAndServe call (with associated logging), and the package must not be +// called main (appengine reserves package 'main' for the underlying program). + +package patexample + +import ( + "io" + "net/http" + + "github.com/bmizerany/pat" +) + +// hello world, the web server +func HelloServer(w http.ResponseWriter, req *http.Request) { + io.WriteString(w, "hello, "+req.URL.Query().Get(":name")+"!\n") +} + +func init() { + m := pat.New() + m.Get("/hello/:name", http.HandlerFunc(HelloServer)) + + // Register this pat with the default serve mux so that other packages + // may also be exported. (i.e. /debug/pprof/*) + http.Handle("/", m) +} diff --git a/transfersh-server/vendor/github.com/bmizerany/pat/mux.go b/transfersh-server/vendor/github.com/bmizerany/pat/mux.go new file mode 100644 index 0000000..ec86fcd --- /dev/null +++ b/transfersh-server/vendor/github.com/bmizerany/pat/mux.go @@ -0,0 +1,310 @@ +// Package pat implements a simple URL pattern muxer +package pat + +import ( + "net/http" + "net/url" + "strings" +) + +// PatternServeMux is an HTTP request multiplexer. It matches the URL of each +// incoming request against a list of registered patterns with their associated +// methods and calls the handler for the pattern that most closely matches the +// URL. +// +// Pattern matching attempts each pattern in the order in which they were +// registered. +// +// Patterns may contain literals or captures. Capture names start with a colon +// and consist of letters A-Z, a-z, _, and 0-9. The rest of the pattern +// matches literally. The portion of the URL matching each name ends with an +// occurrence of the character in the pattern immediately following the name, +// or a /, whichever comes first. It is possible for a name to match the empty +// string. +// +// Example pattern with one capture: +// /hello/:name +// Will match: +// /hello/blake +// /hello/keith +// Will not match: +// /hello/blake/ +// /hello/blake/foo +// /foo +// /foo/bar +// +// Example 2: +// /hello/:name/ +// Will match: +// /hello/blake/ +// /hello/keith/foo +// /hello/blake +// /hello/keith +// Will not match: +// /foo +// /foo/bar +// +// A pattern ending with a slash will add an implicit redirect for its non-slash +// version. For example: Get("/foo/", handler) also registers +// Get("/foo", handler) as a redirect. You may override it by registering +// Get("/foo", anotherhandler) before the slash version. +// +// Retrieve the capture from the r.URL.Query().Get(":name") in a handler (note +// the colon). If a capture name appears more than once, the additional values +// are appended to the previous values (see +// http://golang.org/pkg/net/url/#Values) +// +// A trivial example server is: +// +// package main +// +// import ( +// "io" +// "net/http" +// "github.com/bmizerany/pat" +// "log" +// ) +// +// // hello world, the web server +// func HelloServer(w http.ResponseWriter, req *http.Request) { +// io.WriteString(w, "hello, "+req.URL.Query().Get(":name")+"!\n") +// } +// +// func main() { +// m := pat.New() +// m.Get("/hello/:name", http.HandlerFunc(HelloServer)) +// +// // Register this pat with the default serve mux so that other packages +// // may also be exported. (i.e. /debug/pprof/*) +// http.Handle("/", m) +// err := http.ListenAndServe(":12345", nil) +// if err != nil { +// log.Fatal("ListenAndServe: ", err) +// } +// } +// +// When "Method Not Allowed": +// +// Pat knows what methods are allowed given a pattern and a URI. For +// convenience, PatternServeMux will add the Allow header for requests that +// match a pattern for a method other than the method requested and set the +// Status to "405 Method Not Allowed". +// +// If the NotFound handler is set, then it is used whenever the pattern doesn't +// match the request path for the current method (and the Allow header is not +// altered). +type PatternServeMux struct { + // NotFound, if set, is used whenever the request doesn't match any + // pattern for its method. NotFound should be set before serving any + // requests. + NotFound http.Handler + handlers map[string][]*patHandler +} + +// New returns a new PatternServeMux. +func New() *PatternServeMux { + return &PatternServeMux{handlers: make(map[string][]*patHandler)} +} + +// ServeHTTP matches r.URL.Path against its routing table using the rules +// described above. +func (p *PatternServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + for _, ph := range p.handlers[r.Method] { + if params, ok := ph.try(r.URL.Path); ok { + if len(params) > 0 && !ph.redirect { + r.URL.RawQuery = url.Values(params).Encode() + "&" + r.URL.RawQuery + } + ph.ServeHTTP(w, r) + return + } + } + + if p.NotFound != nil { + p.NotFound.ServeHTTP(w, r) + return + } + + allowed := make([]string, 0, len(p.handlers)) + for meth, handlers := range p.handlers { + if meth == r.Method { + continue + } + + for _, ph := range handlers { + if _, ok := ph.try(r.URL.Path); ok { + allowed = append(allowed, meth) + } + } + } + + if len(allowed) == 0 { + http.NotFound(w, r) + return + } + + w.Header().Add("Allow", strings.Join(allowed, ", ")) + http.Error(w, "Method Not Allowed", 405) +} + +// Head will register a pattern with a handler for HEAD requests. +func (p *PatternServeMux) Head(pat string, h http.Handler) { + p.Add("HEAD", pat, h) +} + +// Get will register a pattern with a handler for GET requests. +// It also registers pat for HEAD requests. If this needs to be overridden, use +// Head before Get with pat. +func (p *PatternServeMux) Get(pat string, h http.Handler) { + p.Add("HEAD", pat, h) + p.Add("GET", pat, h) +} + +// Post will register a pattern with a handler for POST requests. +func (p *PatternServeMux) Post(pat string, h http.Handler) { + p.Add("POST", pat, h) +} + +// Put will register a pattern with a handler for PUT requests. +func (p *PatternServeMux) Put(pat string, h http.Handler) { + p.Add("PUT", pat, h) +} + +// Del will register a pattern with a handler for DELETE requests. +func (p *PatternServeMux) Del(pat string, h http.Handler) { + p.Add("DELETE", pat, h) +} + +// Options will register a pattern with a handler for OPTIONS requests. +func (p *PatternServeMux) Options(pat string, h http.Handler) { + p.Add("OPTIONS", pat, h) +} + +// Patch will register a pattern with a handler for PATCH requests. +func (p *PatternServeMux) Patch(pat string, h http.Handler) { + p.Add("PATCH", pat, h) +} + +// Add will register a pattern with a handler for meth requests. +func (p *PatternServeMux) Add(meth, pat string, h http.Handler) { + p.add(meth, pat, h, false) +} + +func (p *PatternServeMux) add(meth, pat string, h http.Handler, redirect bool) { + handlers := p.handlers[meth] + for _, p1 := range handlers { + if p1.pat == pat { + return // found existing pattern; do nothing + } + } + handler := &patHandler{ + pat: pat, + Handler: h, + redirect: redirect, + } + p.handlers[meth] = append(handlers, handler) + + n := len(pat) + if n > 0 && pat[n-1] == '/' { + p.add(meth, pat[:n-1], http.HandlerFunc(addSlashRedirect), true) + } +} + +func addSlashRedirect(w http.ResponseWriter, r *http.Request) { + u := *r.URL + u.Path += "/" + http.Redirect(w, r, u.String(), http.StatusMovedPermanently) +} + +// Tail returns the trailing string in path after the final slash for a pat ending with a slash. +// +// Examples: +// +// Tail("/hello/:title/", "/hello/mr/mizerany") == "mizerany" +// Tail("/:a/", "/x/y/z") == "y/z" +// +func Tail(pat, path string) string { + var i, j int + for i < len(path) { + switch { + case j >= len(pat): + if pat[len(pat)-1] == '/' { + return path[i:] + } + return "" + case pat[j] == ':': + var nextc byte + _, nextc, j = match(pat, isAlnum, j+1) + _, _, i = match(path, matchPart(nextc), i) + case path[i] == pat[j]: + i++ + j++ + default: + return "" + } + } + return "" +} + +type patHandler struct { + pat string + http.Handler + redirect bool +} + +func (ph *patHandler) try(path string) (url.Values, bool) { + p := make(url.Values) + var i, j int + for i < len(path) { + switch { + case j >= len(ph.pat): + if ph.pat != "/" && len(ph.pat) > 0 && ph.pat[len(ph.pat)-1] == '/' { + return p, true + } + return nil, false + case ph.pat[j] == ':': + var name, val string + var nextc byte + name, nextc, j = match(ph.pat, isAlnum, j+1) + val, _, i = match(path, matchPart(nextc), i) + p.Add(":"+name, val) + case path[i] == ph.pat[j]: + i++ + j++ + default: + return nil, false + } + } + if j != len(ph.pat) { + return nil, false + } + return p, true +} + +func matchPart(b byte) func(byte) bool { + return func(c byte) bool { + return c != b && c != '/' + } +} + +func match(s string, f func(byte) bool, i int) (matched string, next byte, j int) { + j = i + for j < len(s) && f(s[j]) { + j++ + } + if j < len(s) { + next = s[j] + } + return s[i:j], next, j +} + +func isAlpha(ch byte) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' +} + +func isDigit(ch byte) bool { + return '0' <= ch && ch <= '9' +} + +func isAlnum(ch byte) bool { + return isAlpha(ch) || isDigit(ch) +} diff --git a/transfersh-server/vendor/github.com/dutchcoders/go-clamd/LICENSE b/transfersh-server/vendor/github.com/dutchcoders/go-clamd/LICENSE new file mode 100644 index 0000000..e85f3f1 --- /dev/null +++ b/transfersh-server/vendor/github.com/dutchcoders/go-clamd/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2014 dutchcoders + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/transfersh-server/vendor/github.com/dutchcoders/go-clamd/clamd.go b/transfersh-server/vendor/github.com/dutchcoders/go-clamd/clamd.go new file mode 100644 index 0000000..5199f63 --- /dev/null +++ b/transfersh-server/vendor/github.com/dutchcoders/go-clamd/clamd.go @@ -0,0 +1,311 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 DutchCoders + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package clamd + +import ( + "errors" + "fmt" + "io" + "net/url" + "strings" +) + +const ( + RES_OK = "OK" + RES_FOUND = "FOUND" + RES_ERROR = "ERROR" + RES_PARSE_ERROR = "PARSE ERROR" +) + +type Clamd struct { + address string +} + +type Stats struct { + Pools string + State string + Threads string + Memstats string + Queue string +} + +type ScanResult struct { + Raw string + Description string + Path string + Hash string + Size int + Status string +} + +var EICAR = []byte(`X5O!P%@AP[4\PZX54(P^)7CC)7}$EICAR-STANDARD-ANTIVIRUS-TEST-FILE!$H+H*`) + +func (c *Clamd) newConnection() (conn *CLAMDConn, err error) { + + var u *url.URL + + if u, err = url.Parse(c.address); err != nil { + return + } + + switch u.Scheme { + case "tcp": + conn, err = newCLAMDTcpConn(u.Host) + case "unix": + conn, err = newCLAMDUnixConn(u.Path) + default: + conn, err = newCLAMDUnixConn(c.address) + } + + return +} + +func (c *Clamd) simpleCommand(command string) (chan *ScanResult, error) { + conn, err := c.newConnection() + if err != nil { + return nil, err + } + + err = conn.sendCommand(command) + if err != nil { + return nil, err + } + + ch, wg, err := conn.readResponse() + + go func() { + wg.Wait() + conn.Close() + }() + + return ch, err +} + +/* +Check the daemon's state (should reply with PONG). +*/ +func (c *Clamd) Ping() error { + ch, err := c.simpleCommand("PING") + if err != nil { + return err + } + + select { + case s := (<-ch): + switch s.Raw { + case "PONG": + return nil + default: + return errors.New(fmt.Sprintf("Invalid response, got %s.", s)) + } + } + + return nil +} + +/* +Print program and database versions. +*/ +func (c *Clamd) Version() (chan *ScanResult, error) { + dataArrays, err := c.simpleCommand("VERSION") + return dataArrays, err +} + +/* +On this command clamd provides statistics about the scan queue, contents of scan +queue, and memory usage. The exact reply format is subject to changes in future +releases. +*/ +func (c *Clamd) Stats() (*Stats, error) { + ch, err := c.simpleCommand("STATS") + if err != nil { + return nil, err + } + + stats := &Stats{} + + for s := range ch { + if strings.HasPrefix(s.Raw, "POOLS") { + stats.Pools = strings.Trim(s.Raw[6:], " ") + } else if strings.HasPrefix(s.Raw, "STATE") { + stats.State = s.Raw + } else if strings.HasPrefix(s.Raw, "THREADS") { + stats.Threads = s.Raw + } else if strings.HasPrefix(s.Raw, "QUEUE") { + stats.Queue = s.Raw + } else if strings.HasPrefix(s.Raw, "MEMSTATS") { + stats.Memstats = s.Raw + } else if strings.HasPrefix(s.Raw, "END") { + } else { + // return nil, errors.New(fmt.Sprintf("Unknown response, got %s.", s)) + } + } + + return stats, nil +} + +/* +Reload the databases. +*/ +func (c *Clamd) Reload() error { + ch, err := c.simpleCommand("RELOAD") + if err != nil { + return err + } + + select { + case s := (<-ch): + switch s.Raw { + case "RELOADING": + return nil + default: + return errors.New(fmt.Sprintf("Invalid response, got %s.", s)) + } + } + + return nil +} + +func (c *Clamd) Shutdown() error { + _, err := c.simpleCommand("SHUTDOWN") + if err != nil { + return err + } + + return err +} + +/* +Scan file or directory (recursively) with archive support enabled (a full path is +required). +*/ +func (c *Clamd) ScanFile(path string) (chan *ScanResult, error) { + command := fmt.Sprintf("SCAN %s", path) + ch, err := c.simpleCommand(command) + return ch, err +} + +/* +Scan file or directory (recursively) with archive and special file support disabled +(a full path is required). +*/ +func (c *Clamd) RawScanFile(path string) (chan *ScanResult, error) { + command := fmt.Sprintf("RAWSCAN %s", path) + ch, err := c.simpleCommand(command) + return ch, err +} + +/* +Scan file in a standard way or scan directory (recursively) using multiple threads +(to make the scanning faster on SMP machines). +*/ +func (c *Clamd) MultiScanFile(path string) (chan *ScanResult, error) { + command := fmt.Sprintf("MULTISCAN %s", path) + ch, err := c.simpleCommand(command) + return ch, err +} + +/* +Scan file or directory (recursively) with archive support enabled and don’t stop +the scanning when a virus is found. +*/ +func (c *Clamd) ContScanFile(path string) (chan *ScanResult, error) { + command := fmt.Sprintf("CONTSCAN %s", path) + ch, err := c.simpleCommand(command) + return ch, err +} + +/* +Scan file or directory (recursively) with archive support enabled and don’t stop +the scanning when a virus is found. +*/ +func (c *Clamd) AllMatchScanFile(path string) (chan *ScanResult, error) { + command := fmt.Sprintf("ALLMATCHSCAN %s", path) + ch, err := c.simpleCommand(command) + return ch, err +} + +/* +Scan a stream of data. The stream is sent to clamd in chunks, after INSTREAM, +on the same socket on which the command was sent. This avoids the overhead +of establishing new TCP connections and problems with NAT. The format of the +chunk is: where is the size of the following data in +bytes expressed as a 4 byte unsigned integer in network byte order and is +the actual chunk. Streaming is terminated by sending a zero-length chunk. Note: +do not exceed StreamMaxLength as defined in clamd.conf, otherwise clamd will +reply with INSTREAM size limit exceeded and close the connection +*/ +func (c *Clamd) ScanStream(r io.Reader, abort chan bool) (chan *ScanResult, error) { + conn, err := c.newConnection() + if err != nil { + return nil, err + } + + go func() { + for { + _, allowRunning := <-abort + if !allowRunning { + break + } + } + conn.Close() + }() + + conn.sendCommand("INSTREAM") + + for { + buf := make([]byte, CHUNK_SIZE) + + nr, err := r.Read(buf) + if nr > 0 { + conn.sendChunk(buf[0:nr]) + } + + if err != nil { + break + } + + } + + err = conn.sendEOF() + if err != nil { + return nil, err + } + + ch, wg, err := conn.readResponse() + + go func() { + wg.Wait() + conn.Close() + }() + + return ch, nil +} + +func NewClamd(address string) *Clamd { + clamd := &Clamd{address: address} + return clamd +} diff --git a/transfersh-server/vendor/github.com/dutchcoders/go-clamd/conn.go b/transfersh-server/vendor/github.com/dutchcoders/go-clamd/conn.go new file mode 100644 index 0000000..5c9f7f9 --- /dev/null +++ b/transfersh-server/vendor/github.com/dutchcoders/go-clamd/conn.go @@ -0,0 +1,178 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 DutchCoders + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package clamd + +import ( + "bufio" + "fmt" + "io" + "net" + "regexp" + "strconv" + "strings" + "sync" + "time" +) + +const CHUNK_SIZE = 1024 +const TCP_TIMEOUT = time.Second * 2 + +var resultRegex = regexp.MustCompile( + `^(?P[^:]+): ((?P[^:]+)(\((?P([^:]+)):(?P\d+)\))? )?(?PFOUND|ERROR|OK)$`, +) + +type CLAMDConn struct { + net.Conn +} + +func (conn *CLAMDConn) sendCommand(command string) error { + commandBytes := []byte(fmt.Sprintf("n%s\n", command)) + + _, err := conn.Write(commandBytes) + return err +} + +func (conn *CLAMDConn) sendEOF() error { + _, err := conn.Write([]byte{0, 0, 0, 0}) + return err +} + +func (conn *CLAMDConn) sendChunk(data []byte) error { + var buf [4]byte + lenData := len(data) + buf[0] = byte(lenData >> 24) + buf[1] = byte(lenData >> 16) + buf[2] = byte(lenData >> 8) + buf[3] = byte(lenData >> 0) + + a := buf + + b := make([]byte, len(a)) + for i := range a { + b[i] = a[i] + } + + conn.Write(b) + + _, err := conn.Write(data) + return err +} + +func (c *CLAMDConn) readResponse() (chan *ScanResult, *sync.WaitGroup, error) { + var wg sync.WaitGroup + + wg.Add(1) + reader := bufio.NewReader(c) + ch := make(chan *ScanResult) + + go func() { + defer func() { + close(ch) + wg.Done() + }() + + for { + line, err := reader.ReadString('\n') + if err == io.EOF { + return + } + + if err != nil { + return + } + + line = strings.TrimRight(line, " \t\r\n") + ch <- parseResult(line) + } + }() + + return ch, &wg, nil +} + +func parseResult(line string) *ScanResult { + res := &ScanResult{} + res.Raw = line + + matches := resultRegex.FindStringSubmatch(line) + if len(matches) == 0 { + res.Description = "Regex had no matches" + res.Status = RES_PARSE_ERROR + return res + } + + for i, name := range resultRegex.SubexpNames() { + switch name { + case "path": + res.Path = matches[i] + case "desc": + res.Description = matches[i] + case "virhash": + res.Hash = matches[i] + case "virsize": + i, err := strconv.Atoi(matches[i]) + if err == nil { + res.Size = i + } + case "status": + switch matches[i] { + case RES_OK: + case RES_FOUND: + case RES_ERROR: + break + default: + res.Description = "Invalid status field: " + matches[i] + res.Status = RES_PARSE_ERROR + return res + } + res.Status = matches[i] + } + } + + return res +} + +func newCLAMDTcpConn(address string) (*CLAMDConn, error) { + conn, err := net.DialTimeout("tcp", address, TCP_TIMEOUT) + + if err != nil { + if nerr, isOk := err.(net.Error); isOk && nerr.Timeout() { + return nil, nerr + } + + return nil, err + } + + return &CLAMDConn{Conn: conn}, err +} + +func newCLAMDUnixConn(address string) (*CLAMDConn, error) { + conn, err := net.Dial("unix", address) + if err != nil { + return nil, err + } + + return &CLAMDConn{Conn: conn}, err +} diff --git a/transfersh-server/vendor/github.com/dutchcoders/go-clamd/examples/main.go b/transfersh-server/vendor/github.com/dutchcoders/go-clamd/examples/main.go new file mode 100644 index 0000000..1b4ccf2 --- /dev/null +++ b/transfersh-server/vendor/github.com/dutchcoders/go-clamd/examples/main.go @@ -0,0 +1,72 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 DutchCoders + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package main + +import ( + _ "bytes" + "fmt" + "github.com/dutchcoders/go-clamd" +) + +func main() { + fmt.Println("Made with <3 DutchCoders") + + c := clamd.NewClamd("/tmp/clamd.socket") + _ = c + + /* + reader := bytes.NewReader(clamd.EICAR) + response, err := c.ScanStream(reader) + + for s := range response { + fmt.Printf("%v %v\n", s, err) + } + + response, err = c.ScanFile(".") + + for s := range response { + fmt.Printf("%v %v\n", s, err) + } + + response, err = c.Version() + + for s := range response { + fmt.Printf("%v %v\n", s, err) + } + */ + + err := c.Ping() + fmt.Printf("Ping: %v\n", err) + + stats, err := c.Stats() + fmt.Printf("%v %v\n", stats, err) + + err = c.Reload() + fmt.Printf("Reload: %v\n", err) + + // response, err = c.Shutdown() + // fmt.Println(response) +} diff --git a/transfersh-server/vendor/github.com/dutchcoders/go-virustotal/LICENSE b/transfersh-server/vendor/github.com/dutchcoders/go-virustotal/LICENSE new file mode 100644 index 0000000..9c1b27c --- /dev/null +++ b/transfersh-server/vendor/github.com/dutchcoders/go-virustotal/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 dutchcoders + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/transfersh-server/vendor/github.com/dutchcoders/go-virustotal/virustotal.go b/transfersh-server/vendor/github.com/dutchcoders/go-virustotal/virustotal.go new file mode 100644 index 0000000..9c590db --- /dev/null +++ b/transfersh-server/vendor/github.com/dutchcoders/go-virustotal/virustotal.go @@ -0,0 +1,361 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 DutchCoders + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package virustotal + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/url" + "path/filepath" + "strings" +) + +type VirusTotal struct { + apikey string +} + +type VirusTotalResponse struct { + ResponseCode int `json:"response_code"` + Message string `json:"verbose_msg"` +} + +type ScanResponse struct { + VirusTotalResponse + + ScanId string `json:"scan_id"` + Sha1 string `json:"sha1"` + Resource string `json:"resource"` + Sha256 string `json:"sha256"` + Permalink string `json:"permalink"` + Md5 string `json:"md5"` +} + +type FileScan struct { + Detected bool `json:"detected"` + Version string `json:"version"` + Result string `json:"result"` + Update string `json:"update"` +} + +type ReportResponse struct { + VirusTotalResponse + Resource string `json:"resource"` + ScanId string `json:"scan_id"` + Sha1 string `json:"sha1"` + Sha256 string `json:"sha256"` + Md5 string `json:"md5"` + Scandate string `json:"scan_date"` + Positives int `json:"positives"` + Total int `json:"total"` + Permalink string `json:"permalink"` + Scans map[string]FileScan `json:"scans"` +} + +func (sr *ScanResponse) String() string { + return fmt.Sprintf("scanid: %s, resource: %s, permalink: %s, md5: %s", sr.ScanId, sr.Resource, sr.Permalink, sr.Md5) +} + +type ScanUrlResponse struct { + ScanResponse +} + +type RescanResponse struct { + ScanResponse +} + +func (sr *RescanResponse) String() string { + return fmt.Sprintf("scanid: %s, resource: %s, permalink: %s, md5: %s", sr.ScanId, sr.Resource, sr.Permalink, sr.Md5) +} + +type DetectedUrl struct { + ScanDate string `json:"scan_date"` + Url string `json:"url"` + Positives int `json:"positives"` + Total int `json:"total"` +} + +type Resolution struct { + LastResolved string `json:"last_resolved"` + Hostname string `json:"hostname"` +} + +type IpAddressReportResponse struct { + VirusTotalResponse + Resolutions []Resolution `json:"resolutions"` + DetectedUrls []DetectedUrl `json:"detected_urls"` +} + +type DomainReportResponse struct { + VirusTotalResponse + Resolutions []Resolution `json:"resolutions"` + DetectedUrls []DetectedUrl `json:"detected_urls"` +} + +type CommentResponse struct { + VirusTotalResponse +} + +func NewVirusTotal(apikey string) (*VirusTotal, error) { + vt := &VirusTotal{apikey: apikey} + return vt, nil +} + +func (vt *VirusTotal) DomainReport(domain string) (*DomainReportResponse, error) { + u, err := url.Parse("https://www.virustotal.com/vtapi/v2/domain/report") + u.RawQuery = url.Values{"apikey": {vt.apikey}, "domain": {domain}}.Encode() + + resp, err := http.Get(u.String()) + + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + contents, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + var domainReportResponse = &DomainReportResponse{} + + err = json.Unmarshal(contents, &domainReportResponse) + + return domainReportResponse, err +} + +func (vt *VirusTotal) ScanUrl(url2 *url.URL) (*ScanResponse, error) { + u, err := url.Parse("https://www.virustotal.com/vtapi/v2/url/scan") + + params := url.Values{"apikey": {vt.apikey}, "url": {url2.String()}} + + resp, err := http.PostForm(u.String(), params) + + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + contents, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return nil, err + } + + var scanResponse = &ScanResponse{} + + err = json.Unmarshal(contents, &scanResponse) + + return scanResponse, err +} + +func (vt *VirusTotal) Report(resource string) (*ReportResponse, error) { + u, err := url.Parse("https://www.virustotal.com/vtapi/v2/file/report") + + params := url.Values{"apikey": {vt.apikey}, "resource": {resource}} + + resp, err := http.PostForm(u.String(), params) + + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + contents, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return nil, err + } + + var reportResponse = &ReportResponse{} + + err = json.Unmarshal(contents, &reportResponse) + + return reportResponse, err +} + +func (vt *VirusTotal) ReportUrl(url2 *url.URL) (*ReportResponse, error) { + params := url.Values{"apikey": {vt.apikey}, "resource": {url2.String()}} + + u, err := url.Parse("https://www.virustotal.com/vtapi/v2/url/report") + + resp, err := http.PostForm(u.String(), params) + + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + contents, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return nil, err + } + + var reportResponse = &ReportResponse{} + + err = json.Unmarshal(contents, &reportResponse) + + return reportResponse, err +} + +func (vt *VirusTotal) Comment(resource string, comment string) (*CommentResponse, error) { + u, err := url.Parse("https://www.virustotal.com/vtapi/v2/comments/put") + params := url.Values{"apikey": {vt.apikey}, "resource": {resource}, "comment": {comment}} + + resp, err := http.PostForm(u.String(), params) + + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + contents, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return nil, err + } + + var commentResponse = &CommentResponse{} + + err = json.Unmarshal(contents, &commentResponse) + + return commentResponse, err +} + +func (vt *VirusTotal) IpAddressReport(ip string) (*IpAddressReportResponse, error) { + u, err := url.Parse("http://www.virustotal.com/vtapi/v2/ip-address/report") + u.RawQuery = url.Values{"apikey": {vt.apikey}, "ip": {ip}}.Encode() + + resp, err := http.Get(u.String()) + + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + contents, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + var ipAddressReportResponse = &IpAddressReportResponse{} + + err = json.Unmarshal(contents, &ipAddressReportResponse) + + return ipAddressReportResponse, err +} + +func (vt *VirusTotal) Rescan(hash []string) (*RescanResponse, error) { + resource := strings.Join(hash, ",") + + resp, err := http.PostForm("https://www.virustotal.com/vtapi/v2/file/rescan", url.Values{"apikey": {vt.apikey}, "resource": {resource}}) + + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + contents, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + var rescanResponse = &RescanResponse{} + + err = json.Unmarshal(contents, &rescanResponse) + + return rescanResponse, err +} + +func (vt *VirusTotal) Scan(path string, file io.Reader) (*ScanResponse, error) { + params := map[string]string{ + "apikey": vt.apikey, + } + + request, err := newfileUploadRequest("http://www.virustotal.com/vtapi/v2/file/scan", params, path, file) + + if err != nil { + return nil, err + } + + client := &http.Client{} + + resp, err := client.Do(request) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + contents, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + var scanResponse = &ScanResponse{} + err = json.Unmarshal(contents, &scanResponse) + + return scanResponse, err +} + +// Creates a new file upload http request with optional extra params +func newfileUploadRequest(uri string, params map[string]string, path string, file io.Reader) (*http.Request, error) { + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + + for key, val := range params { + _ = writer.WriteField(key, val) + } + + part, err := writer.CreateFormFile("file", filepath.Base(path)) + if err != nil { + return nil, err + } + _, err = io.Copy(part, file) + + err = writer.Close() + + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", uri, body) + + req.Header.Set("Content-Type", writer.FormDataContentType()) + return req, err +} diff --git a/transfersh-server/vendor/github.com/eknkc/amber/amberc/cli.go b/transfersh-server/vendor/github.com/eknkc/amber/amberc/cli.go new file mode 100644 index 0000000..4ce3163 --- /dev/null +++ b/transfersh-server/vendor/github.com/eknkc/amber/amberc/cli.go @@ -0,0 +1,48 @@ +package main + +import ( + "flag" + "fmt" + amber "github.com/eknkc/amber" + "os" +) + +var prettyPrint bool +var lineNumbers bool + +func init() { + flag.BoolVar(&prettyPrint, "prettyprint", true, "Use pretty indentation in output html.") + flag.BoolVar(&prettyPrint, "pp", true, "Use pretty indentation in output html.") + + flag.BoolVar(&lineNumbers, "linenos", true, "Enable debugging information in output html.") + flag.BoolVar(&lineNumbers, "ln", true, "Enable debugging information in output html.") + + flag.Parse() +} + +func main() { + input := flag.Arg(0) + + if len(input) == 0 { + fmt.Fprintln(os.Stderr, "Please provide an input file. (amberc input.amber)") + os.Exit(1) + } + + cmp := amber.New() + cmp.PrettyPrint = prettyPrint + cmp.LineNumbers = lineNumbers + + err := cmp.ParseFile(input) + + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + + err = cmp.CompileWriter(os.Stdout) + + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} diff --git a/transfersh-server/vendor/github.com/eknkc/amber/compiler.go b/transfersh-server/vendor/github.com/eknkc/amber/compiler.go new file mode 100644 index 0000000..7b30d94 --- /dev/null +++ b/transfersh-server/vendor/github.com/eknkc/amber/compiler.go @@ -0,0 +1,781 @@ +package amber + +import ( + "bytes" + "container/list" + "errors" + "fmt" + "go/ast" + gp "go/parser" + gt "go/token" + "html/template" + "io" + "os" + "path/filepath" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/eknkc/amber/parser" +) + +var builtinFunctions = [...]string{ + "len", + "print", + "printf", + "println", + "urlquery", + "js", + "json", + "index", + "html", + "unescaped", +} + +// Compiler is the main interface of Amber Template Engine. +// In order to use an Amber template, it is required to create a Compiler and +// compile an Amber source to native Go template. +// compiler := amber.New() +// // Parse the input file +// err := compiler.ParseFile("./input.amber") +// if err == nil { +// // Compile input file to Go template +// tpl, err := compiler.Compile() +// if err == nil { +// // Check built in html/template documentation for further details +// tpl.Execute(os.Stdout, somedata) +// } +// } +type Compiler struct { + // Compiler options + Options + filename string + node parser.Node + indentLevel int + newline bool + buffer *bytes.Buffer + tempvarIndex int + mixins map[string]*parser.Mixin +} + +// New creates and initialize a new Compiler. +func New() *Compiler { + compiler := new(Compiler) + compiler.filename = "" + compiler.tempvarIndex = 0 + compiler.PrettyPrint = true + compiler.Options = DefaultOptions + compiler.mixins = make(map[string]*parser.Mixin) + + return compiler +} + +// Options defines template output behavior. +type Options struct { + // Setting if pretty printing is enabled. + // Pretty printing ensures that the output html is properly indented and in human readable form. + // If disabled, produced HTML is compact. This might be more suitable in production environments. + // Default: true + PrettyPrint bool + // Setting if line number emitting is enabled + // In this form, Amber emits line number comments in the output template. It is usable in debugging environments. + // Default: false + LineNumbers bool +} + +// DirOptions is used to provide options to directory compilation. +type DirOptions struct { + // File extension to match for compilation + Ext string + // Whether or not to walk subdirectories + Recursive bool +} + +// DefaultOptions sets pretty-printing to true and line numbering to false. +var DefaultOptions = Options{true, false} + +// DefaultDirOptions sets expected file extension to ".amber" and recursive search for templates within a directory to true. +var DefaultDirOptions = DirOptions{".amber", true} + +// Compile parses and compiles the supplied amber template string. Returns corresponding Go Template (html/templates) instance. +// Necessary runtime functions will be injected and the template will be ready to be executed. +func Compile(input string, options Options) (*template.Template, error) { + comp := New() + comp.Options = options + + err := comp.Parse(input) + if err != nil { + return nil, err + } + + return comp.Compile() +} + +// Compile parses and compiles the supplied amber template []byte. +// Returns corresponding Go Template (html/templates) instance. +// Necessary runtime functions will be injected and the template will be ready to be executed. +func CompileData(input []byte, filename string, options Options) (*template.Template, error) { + comp := New() + comp.Options = options + + err := comp.ParseData(input, filename) + if err != nil { + return nil, err + } + + return comp.Compile() +} + +// MustCompile is the same as Compile, except the input is assumed error free. If else, panic. +func MustCompile(input string, options Options) *template.Template { + t, err := Compile(input, options) + if err != nil { + panic(err) + } + return t +} + +// CompileFile parses and compiles the contents of supplied filename. Returns corresponding Go Template (html/templates) instance. +// Necessary runtime functions will be injected and the template will be ready to be executed. +func CompileFile(filename string, options Options) (*template.Template, error) { + comp := New() + comp.Options = options + + err := comp.ParseFile(filename) + if err != nil { + return nil, err + } + + return comp.Compile() +} + +// MustCompileFile is the same as CompileFile, except the input is assumed error free. If else, panic. +func MustCompileFile(filename string, options Options) *template.Template { + t, err := CompileFile(filename, options) + if err != nil { + panic(err) + } + return t +} + +// CompileDir parses and compiles the contents of a supplied directory path, with options. +// Returns a map of a template identifier (key) to a Go Template instance. +// Ex: if the dirname="templates/" had a file "index.amber" the key would be "index" +// If option for recursive is True, this parses every file of relevant extension +// in all subdirectories. The key then is the path e.g: "layouts/layout" +func CompileDir(dirname string, dopt DirOptions, opt Options) (map[string]*template.Template, error) { + dir, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer dir.Close() + + files, err := dir.Readdir(0) + if err != nil { + return nil, err + } + + compiled := make(map[string]*template.Template) + for _, file := range files { + // filename is for example "index.amber" + filename := file.Name() + fileext := filepath.Ext(filename) + + // If recursive is true and there's a subdirectory, recurse + if dopt.Recursive && file.IsDir() { + dirpath := filepath.Join(dirname, filename) + subcompiled, err := CompileDir(dirpath, dopt, opt) + if err != nil { + return nil, err + } + // Copy templates from subdirectory into parent template mapping + for k, v := range subcompiled { + // Concat with parent directory name for unique paths + key := filepath.Join(filename, k) + compiled[key] = v + } + } else if fileext == dopt.Ext { + // Otherwise compile the file and add to mapping + fullpath := filepath.Join(dirname, filename) + tmpl, err := CompileFile(fullpath, opt) + if err != nil { + return nil, err + } + // Strip extension + key := filename[0 : len(filename)-len(fileext)] + compiled[key] = tmpl + } + } + + return compiled, nil +} + +// MustCompileDir is the same as CompileDir, except input is assumed error free. If else, panic. +func MustCompileDir(dirname string, dopt DirOptions, opt Options) map[string]*template.Template { + m, err := CompileDir(dirname, dopt, opt) + if err != nil { + panic(err) + } + return m +} + +// Parse given raw amber template string. +func (c *Compiler) Parse(input string) (err error) { + defer func() { + if r := recover(); r != nil { + err = errors.New(r.(string)) + } + }() + + parser, err := parser.StringParser(input) + + if err != nil { + return + } + + c.node = parser.Parse() + return +} + +// Parse given raw amber template bytes, and the filename that belongs with it +func (c *Compiler) ParseData(input []byte, filename string) (err error) { + defer func() { + if r := recover(); r != nil { + err = errors.New(r.(string)) + } + }() + + parser, err := parser.ByteParser(input) + parser.SetFilename(filename) + + if err != nil { + return + } + + c.node = parser.Parse() + return +} + +// ParseFile parses the amber template file in given path. +func (c *Compiler) ParseFile(filename string) (err error) { + defer func() { + if r := recover(); r != nil { + err = errors.New(r.(string)) + } + }() + + parser, err := parser.FileParser(filename) + + if err != nil { + return + } + + c.node = parser.Parse() + c.filename = filename + return +} + +// Compile amber and create a Go Template (html/templates) instance. +// Necessary runtime functions will be injected and the template will be ready to be executed. +func (c *Compiler) Compile() (*template.Template, error) { + return c.CompileWithName(filepath.Base(c.filename)) +} + +// CompileWithName is the same as Compile, but allows to specify a name for the template. +func (c *Compiler) CompileWithName(name string) (*template.Template, error) { + return c.CompileWithTemplate(template.New(name)) +} + +// CompileWithTemplate is the same as Compile but allows to specify a template. +func (c *Compiler) CompileWithTemplate(t *template.Template) (*template.Template, error) { + data, err := c.CompileString() + + if err != nil { + return nil, err + } + + tpl, err := t.Funcs(FuncMap).Parse(data) + + if err != nil { + return nil, err + } + + return tpl, nil +} + +// CompileWriter compiles amber and writes the Go Template source into given io.Writer instance. +// You would not be using this unless debugging / checking the output. Please use Compile +// method to obtain a template instance directly. +func (c *Compiler) CompileWriter(out io.Writer) (err error) { + defer func() { + if r := recover(); r != nil { + err = errors.New(r.(string)) + } + }() + + c.buffer = new(bytes.Buffer) + c.visit(c.node) + + if c.buffer.Len() > 0 { + c.write("\n") + } + + _, err = c.buffer.WriteTo(out) + return +} + +// CompileString compiles the template and returns the Go Template source. +// You would not be using this unless debugging / checking the output. Please use Compile +// method to obtain a template instance directly. +func (c *Compiler) CompileString() (string, error) { + var buf bytes.Buffer + + if err := c.CompileWriter(&buf); err != nil { + return "", err + } + + result := buf.String() + + return result, nil +} + +func (c *Compiler) visit(node parser.Node) { + defer func() { + if r := recover(); r != nil { + if rs, ok := r.(string); ok && rs[:len("Amber Error")] == "Amber Error" { + panic(r) + } + + pos := node.Pos() + + if len(pos.Filename) > 0 { + panic(fmt.Sprintf("Amber Error in <%s>: %v - Line: %d, Column: %d, Length: %d", pos.Filename, r, pos.LineNum, pos.ColNum, pos.TokenLength)) + } else { + panic(fmt.Sprintf("Amber Error: %v - Line: %d, Column: %d, Length: %d", r, pos.LineNum, pos.ColNum, pos.TokenLength)) + } + } + }() + + switch node.(type) { + case *parser.Block: + c.visitBlock(node.(*parser.Block)) + case *parser.Doctype: + c.visitDoctype(node.(*parser.Doctype)) + case *parser.Comment: + c.visitComment(node.(*parser.Comment)) + case *parser.Tag: + c.visitTag(node.(*parser.Tag)) + case *parser.Text: + c.visitText(node.(*parser.Text)) + case *parser.Condition: + c.visitCondition(node.(*parser.Condition)) + case *parser.Each: + c.visitEach(node.(*parser.Each)) + case *parser.Assignment: + c.visitAssignment(node.(*parser.Assignment)) + case *parser.Mixin: + c.visitMixin(node.(*parser.Mixin)) + case *parser.MixinCall: + c.visitMixinCall(node.(*parser.MixinCall)) + } +} + +func (c *Compiler) write(value string) { + c.buffer.WriteString(value) +} + +func (c *Compiler) indent(offset int, newline bool) { + if !c.PrettyPrint { + return + } + + if newline && c.buffer.Len() > 0 { + c.write("\n") + } + + for i := 0; i < c.indentLevel+offset; i++ { + c.write("\t") + } +} + +func (c *Compiler) tempvar() string { + c.tempvarIndex++ + return "$__amber_" + strconv.Itoa(c.tempvarIndex) +} + +func (c *Compiler) escape(input string) string { + return strings.Replace(strings.Replace(input, `\`, `\\`, -1), `"`, `\"`, -1) +} + +func (c *Compiler) visitBlock(block *parser.Block) { + for _, node := range block.Children { + if _, ok := node.(*parser.Text); !block.CanInline() && ok { + c.indent(0, true) + } + + c.visit(node) + } +} + +func (c *Compiler) visitDoctype(doctype *parser.Doctype) { + c.write(doctype.String()) +} + +func (c *Compiler) visitComment(comment *parser.Comment) { + if comment.Silent { + return + } + + c.indent(0, false) + + if comment.Block == nil { + c.write(`{{unescaped ""}}`) + } else { + c.write(``) + } +} + +func (c *Compiler) visitCondition(condition *parser.Condition) { + c.write(`{{if ` + c.visitRawInterpolation(condition.Expression) + `}}`) + c.visitBlock(condition.Positive) + if condition.Negative != nil { + c.write(`{{else}}`) + c.visitBlock(condition.Negative) + } + c.write(`{{end}}`) +} + +func (c *Compiler) visitEach(each *parser.Each) { + if each.Block == nil { + return + } + + if len(each.Y) == 0 { + c.write(`{{range ` + each.X + ` := ` + c.visitRawInterpolation(each.Expression) + `}}`) + } else { + c.write(`{{range ` + each.X + `, ` + each.Y + ` := ` + c.visitRawInterpolation(each.Expression) + `}}`) + } + c.visitBlock(each.Block) + c.write(`{{end}}`) +} + +func (c *Compiler) visitAssignment(assgn *parser.Assignment) { + c.write(`{{` + assgn.X + ` := ` + c.visitRawInterpolation(assgn.Expression) + `}}`) +} + +func (c *Compiler) visitTag(tag *parser.Tag) { + type attrib struct { + name string + value string + condition string + } + + attribs := make(map[string]*attrib) + + for _, item := range tag.Attributes { + attr := new(attrib) + attr.name = item.Name + + if !item.IsRaw { + attr.value = c.visitInterpolation(item.Value) + } else if item.Value == "" { + attr.value = "" + } else { + attr.value = item.Value + } + + if len(item.Condition) != 0 { + attr.condition = c.visitRawInterpolation(item.Condition) + } + + if attr.name == "class" && attribs["class"] != nil { + prevclass := attribs["class"] + attr.value = ` ` + attr.value + + if len(attr.condition) > 0 { + attr.value = `{{if ` + attr.condition + `}}` + attr.value + `{{end}}` + attr.condition = "" + } + + if len(prevclass.condition) > 0 { + prevclass.value = `{{if ` + prevclass.condition + `}}` + prevclass.value + `{{end}}` + prevclass.condition = "" + } + + prevclass.value = prevclass.value + attr.value + } else { + attribs[item.Name] = attr + } + } + + keys := make([]string, 0, len(attribs)) + for key := range attribs { + keys = append(keys, key) + } + sort.Strings(keys) + + c.indent(0, true) + c.write("<" + tag.Name) + + for _, name := range keys { + value := attribs[name] + + if len(value.condition) > 0 { + c.write(`{{if ` + value.condition + `}}`) + } + + if value.value == "" { + c.write(` ` + name) + } else { + c.write(` ` + name + `="` + value.value + `"`) + } + + if len(value.condition) > 0 { + c.write(`{{end}}`) + } + } + + if tag.IsSelfClosing() { + c.write(` />`) + } else { + c.write(`>`) + + if tag.Block != nil { + if !tag.Block.CanInline() { + c.indentLevel++ + } + + c.visitBlock(tag.Block) + + if !tag.Block.CanInline() { + c.indentLevel-- + c.indent(0, true) + } + } + + c.write(``) + } +} + +var textInterpolateRegexp = regexp.MustCompile(`#\{(.*?)\}`) +var textEscapeRegexp = regexp.MustCompile(`\{\{(.*?)\}\}`) + +func (c *Compiler) visitText(txt *parser.Text) { + value := textEscapeRegexp.ReplaceAllStringFunc(txt.Value, func(value string) string { + return `{{"{{"}}` + value[2:len(value)-2] + `{{"}}"}}` + }) + + value = textInterpolateRegexp.ReplaceAllStringFunc(value, func(value string) string { + return c.visitInterpolation(value[2 : len(value)-1]) + }) + + lines := strings.Split(value, "\n") + for i := 0; i < len(lines); i++ { + c.write(lines[i]) + + if i < len(lines)-1 { + c.write("\n") + c.indent(0, false) + } + } +} + +func (c *Compiler) visitInterpolation(value string) string { + return `{{` + c.visitRawInterpolation(value) + `}}` +} + +func (c *Compiler) visitRawInterpolation(value string) string { + if value == "" { + value = "\"\"" + } + + value = strings.Replace(value, "$", "__DOLLAR__", -1) + expr, err := gp.ParseExpr(value) + if err != nil { + panic("Unable to parse expression.") + } + value = strings.Replace(c.visitExpression(expr), "__DOLLAR__", "$", -1) + return value +} + +func (c *Compiler) visitExpression(outerexpr ast.Expr) string { + stack := list.New() + + pop := func() string { + if stack.Front() == nil { + return "" + } + + val := stack.Front().Value.(string) + stack.Remove(stack.Front()) + return val + } + + var exec func(ast.Expr) + + exec = func(expr ast.Expr) { + switch expr.(type) { + case *ast.BinaryExpr: + { + be := expr.(*ast.BinaryExpr) + + exec(be.Y) + exec(be.X) + + negate := false + name := c.tempvar() + c.write(`{{` + name + ` := `) + + switch be.Op { + case gt.ADD: + c.write("__amber_add ") + case gt.SUB: + c.write("__amber_sub ") + case gt.MUL: + c.write("__amber_mul ") + case gt.QUO: + c.write("__amber_quo ") + case gt.REM: + c.write("__amber_rem ") + case gt.LAND: + c.write("and ") + case gt.LOR: + c.write("or ") + case gt.EQL: + c.write("__amber_eql ") + case gt.NEQ: + c.write("__amber_eql ") + negate = true + case gt.LSS: + c.write("__amber_lss ") + case gt.GTR: + c.write("__amber_gtr ") + case gt.LEQ: + c.write("__amber_gtr ") + negate = true + case gt.GEQ: + c.write("__amber_lss ") + negate = true + default: + panic("Unexpected operator!") + } + + c.write(pop() + ` ` + pop() + `}}`) + + if !negate { + stack.PushFront(name) + } else { + negname := c.tempvar() + c.write(`{{` + negname + ` := not ` + name + `}}`) + stack.PushFront(negname) + } + } + case *ast.UnaryExpr: + { + ue := expr.(*ast.UnaryExpr) + + exec(ue.X) + + name := c.tempvar() + c.write(`{{` + name + ` := `) + + switch ue.Op { + case gt.SUB: + c.write("__amber_minus ") + case gt.ADD: + c.write("__amber_plus ") + case gt.NOT: + c.write("not ") + default: + panic("Unexpected operator!") + } + + c.write(pop() + `}}`) + stack.PushFront(name) + } + case *ast.ParenExpr: + exec(expr.(*ast.ParenExpr).X) + case *ast.BasicLit: + stack.PushFront(expr.(*ast.BasicLit).Value) + case *ast.Ident: + name := expr.(*ast.Ident).Name + if len(name) >= len("__DOLLAR__") && name[:len("__DOLLAR__")] == "__DOLLAR__" { + if name == "__DOLLAR__" { + stack.PushFront(`.`) + } else { + stack.PushFront(`$` + expr.(*ast.Ident).Name[len("__DOLLAR__"):]) + } + } else { + stack.PushFront(`.` + expr.(*ast.Ident).Name) + } + case *ast.SelectorExpr: + se := expr.(*ast.SelectorExpr) + exec(se.X) + x := pop() + + if x == "." { + x = "" + } + + name := c.tempvar() + c.write(`{{` + name + ` := ` + x + `.` + se.Sel.Name + `}}`) + stack.PushFront(name) + case *ast.CallExpr: + ce := expr.(*ast.CallExpr) + + for i := len(ce.Args) - 1; i >= 0; i-- { + exec(ce.Args[i]) + } + + name := c.tempvar() + builtin := false + + if ident, ok := ce.Fun.(*ast.Ident); ok { + for _, fname := range builtinFunctions { + if fname == ident.Name { + builtin = true + break + } + } + } + + if builtin { + stack.PushFront(ce.Fun.(*ast.Ident).Name) + c.write(`{{` + name + ` := ` + pop()) + } else { + exec(ce.Fun) + c.write(`{{` + name + ` := call ` + pop()) + } + + for i := 0; i < len(ce.Args); i++ { + c.write(` `) + c.write(pop()) + } + + c.write(`}}`) + + stack.PushFront(name) + default: + panic("Unable to parse expression. Unsupported: " + reflect.TypeOf(expr).String()) + } + } + + exec(outerexpr) + return pop() +} + +func (c *Compiler) visitMixin(mixin *parser.Mixin) { + c.mixins[mixin.Name] = mixin +} + +func (c *Compiler) visitMixinCall(mixinCall *parser.MixinCall) { + mixin := c.mixins[mixinCall.Name] + for i, arg := range mixin.Args { + c.write(fmt.Sprintf(`{{%s := %s}}`, arg, c.visitRawInterpolation(mixinCall.Args[i]))) + } + c.visitBlock(mixin.Block) +} diff --git a/transfersh-server/vendor/github.com/eknkc/amber/doc.go b/transfersh-server/vendor/github.com/eknkc/amber/doc.go new file mode 100644 index 0000000..76ee96a --- /dev/null +++ b/transfersh-server/vendor/github.com/eknkc/amber/doc.go @@ -0,0 +1,257 @@ +/* +Package amber is an elegant templating engine for Go Programming Language. +It is inspired from HAML and Jade. + +Tags + +A tag is simply a word: + + html + +is converted to + + + +It is possible to add ID and CLASS attributes to tags: + + div#main + span.time + +are converted to + +
+ + +Any arbitrary attribute name / value pair can be added this way: + + a[href="http://www.google.com"] + +You can mix multiple attributes together + + a#someid[href="/"][title="Main Page"].main.link Click Link + +gets converted to + + Click Link + +It is also possible to define these attributes within the block of a tag + + a + #someid + [href="/"] + [title="Main Page"] + .main + .link + | Click Link + +Doctypes + +To add a doctype, use `!!!` or `doctype` keywords: + + !!! transitional + // + +or use `doctype` + + doctype 5 + // + +Available options: `5`, `default`, `xml`, `transitional`, `strict`, `frameset`, `1.1`, `basic`, `mobile` + +Tag Content + +For single line tag text, you can just append the text after tag name: + + p Testing! + +would yield + +

Testing!

+ +For multi line tag text, or nested tags, use indentation: + + html + head + title Page Title + body + div#content + p + | This is a long page content + | These lines are all part of the parent p + + a[href="/"] Go To Main Page + +Data + +Input template data can be reached by key names directly. For example, assuming the template has been +executed with following JSON data: + + { + "Name": "Ekin", + "LastName": "Koc", + "Repositories": [ + "amber", + "dateformat" + ], + "Avatar": "/images/ekin.jpg", + "Friends": 17 + } + +It is possible to interpolate fields using `#{}` + + p Welcome #{Name}! + +would print + +

Welcome Ekin!

+ +Attributes can have field names as well + + a[title=Name][href="/ekin.koc"] + +would print + + + +Expressions + +Amber can expand basic expressions. For example, it is possible to concatenate strings with + operator: + + p Welcome #{Name + " " + LastName} + +Arithmetic expressions are also supported: + + p You need #{50 - Friends} more friends to reach 50! + +Expressions can be used within attributes + + img[alt=Name + " " + LastName][src=Avatar] + +Variables + +It is possible to define dynamic variables within templates, +all variables must start with a $ character and can be assigned as in the following example: + + div + $fullname = Name + " " + LastName + p Welcome #{$fullname} + +If you need to access the supplied data itself (i.e. the object containing Name, LastName etc fields.) you can use `$` variable + + p $.Name + +Conditions + +For conditional blocks, it is possible to use `if ` + + div + if Friends > 10 + p You have more than 10 friends + else if Friends > 5 + p You have more than 5 friends + else + p You need more friends + +Again, it is possible to use arithmetic and boolean operators + + div + if Name == "Ekin" && LastName == "Koc" + p Hey! I know you.. + +There is a special syntax for conditional attributes. Only block attributes can have conditions; + + div + .hasfriends ? Friends > 0 + +This would yield a div with `hasfriends` class only if the `Friends > 0` condition holds. It is +perfectly fine to use the same method for other types of attributes: + + div + #foo ? Name == "Ekin" + [bar=baz] ? len(Repositories) > 0 + +Iterations + +It is possible to iterate over arrays and maps using `each`: + + each $repo in Repositories + p #{$repo} + +would print + + p amber + p dateformat + +It is also possible to iterate over values and indexes at the same time + + each $i, $repo in Repositories + p + .even ? $i % 2 == 0 + .odd ? $i % 2 == 1 + +Includes + +A template can include other templates using `include`: + + a.amber + p this is template a + + b.amber + p this is template b + + c.amber + div + include a + include b + +gets compiled to + + div + p this is template a + p this is template b + +Inheritance + +A template can inherit other templates. In order to inherit another template, an `extends` keyword should be used. +Parent template can define several named blocks and child template can modify the blocks. + + master.amber + !!! 5 + html + head + block meta + meta[name="description"][content="This is a great website"] + + title + block title + | Default title + body + block content + + subpage.amber + extends master + + block title + | Some sub page! + + block append meta + // This will be added after the description meta tag. It is also possible + // to prepend something to an existing block + meta[name="keywords"][content="foo bar"] + + block content + div#main + p Some content here + +License +(The MIT License) + +Copyright (c) 2012 Ekin Koc + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ +package amber diff --git a/transfersh-server/vendor/github.com/eknkc/amber/parser/nodes.go b/transfersh-server/vendor/github.com/eknkc/amber/parser/nodes.go new file mode 100644 index 0000000..4724546 --- /dev/null +++ b/transfersh-server/vendor/github.com/eknkc/amber/parser/nodes.go @@ -0,0 +1,281 @@ +package parser + +import "regexp" +import "strings" + +var selfClosingTags = [...]string{ + "meta", + "img", + "link", + "input", + "source", + "area", + "base", + "col", + "br", + "hr", +} + +var doctypes = map[string]string{ + "5": ``, + "default": ``, + "xml": ``, + "transitional": ``, + "strict": ``, + "frameset": ``, + "1.1": ``, + "basic": ``, + "mobile": ``, +} + +type Node interface { + Pos() SourcePosition +} + +type SourcePosition struct { + LineNum int + ColNum int + TokenLength int + Filename string +} + +func (s *SourcePosition) Pos() SourcePosition { + return *s +} + +type Doctype struct { + SourcePosition + Value string +} + +func newDoctype(value string) *Doctype { + dt := new(Doctype) + dt.Value = value + return dt +} + +func (d *Doctype) String() string { + if defined := doctypes[d.Value]; len(defined) != 0 { + return defined + } + + return `` +} + +type Comment struct { + SourcePosition + Value string + Block *Block + Silent bool +} + +func newComment(value string) *Comment { + dt := new(Comment) + dt.Value = value + dt.Block = nil + dt.Silent = false + return dt +} + +type Text struct { + SourcePosition + Value string + Raw bool +} + +func newText(value string, raw bool) *Text { + dt := new(Text) + dt.Value = value + dt.Raw = raw + return dt +} + +type Block struct { + SourcePosition + Children []Node +} + +func newBlock() *Block { + block := new(Block) + block.Children = make([]Node, 0) + return block +} + +func (b *Block) push(node Node) { + b.Children = append(b.Children, node) +} + +func (b *Block) pushFront(node Node) { + b.Children = append([]Node{node}, b.Children...) +} + +func (b *Block) CanInline() bool { + if len(b.Children) == 0 { + return true + } + + allText := true + + for _, child := range b.Children { + if txt, ok := child.(*Text); !ok || txt.Raw { + allText = false + break + } + } + + return allText +} + +const ( + NamedBlockDefault = iota + NamedBlockAppend + NamedBlockPrepend +) + +type NamedBlock struct { + Block + Name string + Modifier int +} + +func newNamedBlock(name string) *NamedBlock { + bb := new(NamedBlock) + bb.Name = name + bb.Block.Children = make([]Node, 0) + bb.Modifier = NamedBlockDefault + return bb +} + +type Attribute struct { + SourcePosition + Name string + Value string + IsRaw bool + Condition string +} + +type Tag struct { + SourcePosition + Block *Block + Name string + IsInterpolated bool + Attributes []Attribute +} + +func newTag(name string) *Tag { + tag := new(Tag) + tag.Block = nil + tag.Name = name + tag.Attributes = make([]Attribute, 0) + tag.IsInterpolated = false + return tag + +} + +func (t *Tag) IsSelfClosing() bool { + for _, tag := range selfClosingTags { + if tag == t.Name { + return true + } + } + + return false +} + +func (t *Tag) IsRawText() bool { + return t.Name == "style" || t.Name == "script" +} + +type Condition struct { + SourcePosition + Positive *Block + Negative *Block + Expression string +} + +func newCondition(exp string) *Condition { + cond := new(Condition) + cond.Expression = exp + return cond +} + +type Each struct { + SourcePosition + X string + Y string + Expression string + Block *Block +} + +func newEach(exp string) *Each { + each := new(Each) + each.Expression = exp + return each +} + +type Assignment struct { + SourcePosition + X string + Expression string +} + +func newAssignment(x, expression string) *Assignment { + assgn := new(Assignment) + assgn.X = x + assgn.Expression = expression + return assgn +} + +type Mixin struct { + SourcePosition + Block *Block + Name string + Args []string +} + +func newMixin(name, args string) *Mixin { + mixin := new(Mixin) + mixin.Name = name + + delExp := regexp.MustCompile(`,\s`) + mixin.Args = delExp.Split(args, -1) + + for i := 0; i < len(mixin.Args); i++ { + mixin.Args[i] = strings.TrimSpace(mixin.Args[i]) + if mixin.Args[i] == "" { + mixin.Args = append(mixin.Args[:i], mixin.Args[i+1:]...) + i-- + } + } + + return mixin +} + +type MixinCall struct { + SourcePosition + Name string + Args []string +} + +func newMixinCall(name, args string) *MixinCall { + mixinCall := new(MixinCall) + mixinCall.Name = name + + const t = "%s" + quoteExp := regexp.MustCompile(`"(.*?)"`) + delExp := regexp.MustCompile(`,\s`) + + quotes := quoteExp.FindAllString(args, -1) + replaced := quoteExp.ReplaceAllString(args, t) + mixinCall.Args = delExp.Split(replaced, -1) + + qi := 0 + for i, arg := range mixinCall.Args { + if arg == t { + mixinCall.Args[i] = quotes[qi] + qi++ + } + } + + return mixinCall +} diff --git a/transfersh-server/vendor/github.com/eknkc/amber/parser/parser.go b/transfersh-server/vendor/github.com/eknkc/amber/parser/parser.go new file mode 100644 index 0000000..380e4a3 --- /dev/null +++ b/transfersh-server/vendor/github.com/eknkc/amber/parser/parser.go @@ -0,0 +1,454 @@ +package parser + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "path/filepath" + "strings" +) + +type Parser struct { + scanner *scanner + filename string + currenttoken *token + namedBlocks map[string]*NamedBlock + parent *Parser + result *Block +} + +func newParser(rdr io.Reader) *Parser { + p := new(Parser) + p.scanner = newScanner(rdr) + p.namedBlocks = make(map[string]*NamedBlock) + return p +} + +func StringParser(input string) (*Parser, error) { + return newParser(bytes.NewReader([]byte(input))), nil +} + +func ByteParser(input []byte) (*Parser, error) { + return newParser(bytes.NewReader(input)), nil +} + +func (p *Parser) SetFilename(filename string) { + p.filename = filename +} + +func FileParser(filename string) (*Parser, error) { + data, err := ioutil.ReadFile(filename) + + if err != nil { + return nil, err + } + + parser := newParser(bytes.NewReader(data)) + parser.filename = filename + return parser, nil +} + +func (p *Parser) Parse() *Block { + if p.result != nil { + return p.result + } + + defer func() { + if r := recover(); r != nil { + if rs, ok := r.(string); ok && rs[:len("Amber Error")] == "Amber Error" { + panic(r) + } + + pos := p.pos() + + if len(pos.Filename) > 0 { + panic(fmt.Sprintf("Amber Error in <%s>: %v - Line: %d, Column: %d, Length: %d", pos.Filename, r, pos.LineNum, pos.ColNum, pos.TokenLength)) + } else { + panic(fmt.Sprintf("Amber Error: %v - Line: %d, Column: %d, Length: %d", r, pos.LineNum, pos.ColNum, pos.TokenLength)) + } + } + }() + + block := newBlock() + p.advance() + + for { + if p.currenttoken == nil || p.currenttoken.Kind == tokEOF { + break + } + + if p.currenttoken.Kind == tokBlank { + p.advance() + continue + } + + block.push(p.parse()) + } + + if p.parent != nil { + p.parent.Parse() + + for _, prev := range p.parent.namedBlocks { + ours := p.namedBlocks[prev.Name] + + if ours == nil { + // Put a copy of the named block into current context, so that sub-templates can use the block + p.namedBlocks[prev.Name] = prev + continue + } + + top := findTopmostParentWithNamedBlock(p, prev.Name) + nb := top.namedBlocks[prev.Name] + switch ours.Modifier { + case NamedBlockAppend: + for i := 0; i < len(ours.Children); i++ { + nb.push(ours.Children[i]) + } + case NamedBlockPrepend: + for i := len(ours.Children) - 1; i >= 0; i-- { + nb.pushFront(ours.Children[i]) + } + default: + nb.Children = ours.Children + } + } + + block = p.parent.result + } + + p.result = block + return block +} + +func (p *Parser) pos() SourcePosition { + pos := p.scanner.Pos() + pos.Filename = p.filename + return pos +} + +func (p *Parser) parseRelativeFile(filename string) *Parser { + if len(p.filename) == 0 { + panic("Unable to import or extend " + filename + " in a non filesystem based parser.") + } + + filename = filepath.Join(filepath.Dir(p.filename), filename) + + if strings.IndexRune(filepath.Base(filename), '.') < 0 { + filename = filename + ".amber" + } + + parser, err := FileParser(filename) + if err != nil { + panic("Unable to read " + filename + ", Error: " + string(err.Error())) + } + + return parser +} + +func (p *Parser) parse() Node { + switch p.currenttoken.Kind { + case tokDoctype: + return p.parseDoctype() + case tokComment: + return p.parseComment() + case tokText: + return p.parseText() + case tokIf: + return p.parseIf() + case tokEach: + return p.parseEach() + case tokImport: + return p.parseImport() + case tokTag: + return p.parseTag() + case tokAssignment: + return p.parseAssignment() + case tokNamedBlock: + return p.parseNamedBlock() + case tokExtends: + return p.parseExtends() + case tokIndent: + return p.parseBlock(nil) + case tokMixin: + return p.parseMixin() + case tokMixinCall: + return p.parseMixinCall() + } + + panic(fmt.Sprintf("Unexpected token: %d", p.currenttoken.Kind)) +} + +func (p *Parser) expect(typ rune) *token { + if p.currenttoken.Kind != typ { + panic("Unexpected token!") + } + curtok := p.currenttoken + p.advance() + return curtok +} + +func (p *Parser) advance() { + p.currenttoken = p.scanner.Next() +} + +func (p *Parser) parseExtends() *Block { + if p.parent != nil { + panic("Unable to extend multiple parent templates.") + } + + tok := p.expect(tokExtends) + parser := p.parseRelativeFile(tok.Value) + parser.Parse() + p.parent = parser + return newBlock() +} + +func (p *Parser) parseBlock(parent Node) *Block { + p.expect(tokIndent) + block := newBlock() + block.SourcePosition = p.pos() + + for { + if p.currenttoken == nil || p.currenttoken.Kind == tokEOF || p.currenttoken.Kind == tokOutdent { + break + } + + if p.currenttoken.Kind == tokBlank { + p.advance() + continue + } + + if p.currenttoken.Kind == tokId || + p.currenttoken.Kind == tokClassName || + p.currenttoken.Kind == tokAttribute { + + if tag, ok := parent.(*Tag); ok { + attr := p.expect(p.currenttoken.Kind) + cond := attr.Data["Condition"] + + switch attr.Kind { + case tokId: + tag.Attributes = append(tag.Attributes, Attribute{p.pos(), "id", attr.Value, true, cond}) + case tokClassName: + tag.Attributes = append(tag.Attributes, Attribute{p.pos(), "class", attr.Value, true, cond}) + case tokAttribute: + tag.Attributes = append(tag.Attributes, Attribute{p.pos(), attr.Value, attr.Data["Content"], attr.Data["Mode"] == "raw", cond}) + } + + continue + } else { + panic("Conditional attributes must be placed immediately within a parent tag.") + } + } + + block.push(p.parse()) + } + + p.expect(tokOutdent) + + return block +} + +func (p *Parser) parseIf() *Condition { + tok := p.expect(tokIf) + cnd := newCondition(tok.Value) + cnd.SourcePosition = p.pos() + +readmore: + switch p.currenttoken.Kind { + case tokIndent: + cnd.Positive = p.parseBlock(cnd) + goto readmore + case tokElse: + p.expect(tokElse) + if p.currenttoken.Kind == tokIf { + cnd.Negative = newBlock() + cnd.Negative.push(p.parseIf()) + } else if p.currenttoken.Kind == tokIndent { + cnd.Negative = p.parseBlock(cnd) + } else { + panic("Unexpected token!") + } + goto readmore + } + + return cnd +} + +func (p *Parser) parseEach() *Each { + tok := p.expect(tokEach) + ech := newEach(tok.Value) + ech.SourcePosition = p.pos() + ech.X = tok.Data["X"] + ech.Y = tok.Data["Y"] + + if p.currenttoken.Kind == tokIndent { + ech.Block = p.parseBlock(ech) + } + + return ech +} + +func (p *Parser) parseImport() *Block { + tok := p.expect(tokImport) + node := p.parseRelativeFile(tok.Value).Parse() + node.SourcePosition = p.pos() + return node +} + +func (p *Parser) parseNamedBlock() *Block { + tok := p.expect(tokNamedBlock) + + if p.namedBlocks[tok.Value] != nil { + panic("Multiple definitions of named blocks are not permitted. Block " + tok.Value + " has been re defined.") + } + + block := newNamedBlock(tok.Value) + block.SourcePosition = p.pos() + + if tok.Data["Modifier"] == "append" { + block.Modifier = NamedBlockAppend + } else if tok.Data["Modifier"] == "prepend" { + block.Modifier = NamedBlockPrepend + } + + if p.currenttoken.Kind == tokIndent { + block.Block = *(p.parseBlock(nil)) + } + + p.namedBlocks[block.Name] = block + + if block.Modifier == NamedBlockDefault { + return &block.Block + } + + return newBlock() +} + +func (p *Parser) parseDoctype() *Doctype { + tok := p.expect(tokDoctype) + node := newDoctype(tok.Value) + node.SourcePosition = p.pos() + return node +} + +func (p *Parser) parseComment() *Comment { + tok := p.expect(tokComment) + cmnt := newComment(tok.Value) + cmnt.SourcePosition = p.pos() + cmnt.Silent = tok.Data["Mode"] == "silent" + + if p.currenttoken.Kind == tokIndent { + cmnt.Block = p.parseBlock(cmnt) + } + + return cmnt +} + +func (p *Parser) parseText() *Text { + tok := p.expect(tokText) + node := newText(tok.Value, tok.Data["Mode"] == "raw") + node.SourcePosition = p.pos() + return node +} + +func (p *Parser) parseAssignment() *Assignment { + tok := p.expect(tokAssignment) + node := newAssignment(tok.Data["X"], tok.Value) + node.SourcePosition = p.pos() + return node +} + +func (p *Parser) parseTag() *Tag { + tok := p.expect(tokTag) + tag := newTag(tok.Value) + tag.SourcePosition = p.pos() + + ensureBlock := func() { + if tag.Block == nil { + tag.Block = newBlock() + } + } + +readmore: + switch p.currenttoken.Kind { + case tokIndent: + if tag.IsRawText() { + p.scanner.readRaw = true + } + + block := p.parseBlock(tag) + if tag.Block == nil { + tag.Block = block + } else { + for _, c := range block.Children { + tag.Block.push(c) + } + } + case tokId: + id := p.expect(tokId) + if len(id.Data["Condition"]) > 0 { + panic("Conditional attributes must be placed in a block within a tag.") + } + tag.Attributes = append(tag.Attributes, Attribute{p.pos(), "id", id.Value, true, ""}) + goto readmore + case tokClassName: + cls := p.expect(tokClassName) + if len(cls.Data["Condition"]) > 0 { + panic("Conditional attributes must be placed in a block within a tag.") + } + tag.Attributes = append(tag.Attributes, Attribute{p.pos(), "class", cls.Value, true, ""}) + goto readmore + case tokAttribute: + attr := p.expect(tokAttribute) + if len(attr.Data["Condition"]) > 0 { + panic("Conditional attributes must be placed in a block within a tag.") + } + tag.Attributes = append(tag.Attributes, Attribute{p.pos(), attr.Value, attr.Data["Content"], attr.Data["Mode"] == "raw", ""}) + goto readmore + case tokText: + if p.currenttoken.Data["Mode"] != "piped" { + ensureBlock() + tag.Block.pushFront(p.parseText()) + goto readmore + } + } + + return tag +} + +func (p *Parser) parseMixin() *Mixin { + tok := p.expect(tokMixin) + mixin := newMixin(tok.Value, tok.Data["Args"]) + mixin.SourcePosition = p.pos() + + if p.currenttoken.Kind == tokIndent { + mixin.Block = p.parseBlock(mixin) + } + + return mixin +} + +func (p *Parser) parseMixinCall() *MixinCall { + tok := p.expect(tokMixinCall) + mixinCall := newMixinCall(tok.Value, tok.Data["Args"]) + mixinCall.SourcePosition = p.pos() + return mixinCall +} + +func findTopmostParentWithNamedBlock(p *Parser, name string) *Parser { + top := p + + for { + if top.namedBlocks[name] == nil { + return nil + } + if top.parent == nil { + return top + } + if top.parent.namedBlocks[name] != nil { + top = top.parent + } + } +} diff --git a/transfersh-server/vendor/github.com/eknkc/amber/parser/scanner.go b/transfersh-server/vendor/github.com/eknkc/amber/parser/scanner.go new file mode 100644 index 0000000..76b7013 --- /dev/null +++ b/transfersh-server/vendor/github.com/eknkc/amber/parser/scanner.go @@ -0,0 +1,501 @@ +package parser + +import ( + "bufio" + "container/list" + "fmt" + "io" + "regexp" +) + +const ( + tokEOF = -(iota + 1) + tokDoctype + tokComment + tokIndent + tokOutdent + tokBlank + tokId + tokClassName + tokTag + tokText + tokAttribute + tokIf + tokElse + tokEach + tokAssignment + tokImport + tokNamedBlock + tokExtends + tokMixin + tokMixinCall +) + +const ( + scnNewLine = iota + scnLine + scnEOF +) + +type scanner struct { + reader *bufio.Reader + indentStack *list.List + stash *list.List + + state int32 + buffer string + + line int + col int + lastTokenLine int + lastTokenCol int + lastTokenSize int + + readRaw bool +} + +type token struct { + Kind rune + Value string + Data map[string]string +} + +func newScanner(r io.Reader) *scanner { + s := new(scanner) + s.reader = bufio.NewReader(r) + s.indentStack = list.New() + s.stash = list.New() + s.state = scnNewLine + s.line = -1 + s.col = 0 + + return s +} + +func (s *scanner) Pos() SourcePosition { + return SourcePosition{s.lastTokenLine + 1, s.lastTokenCol + 1, s.lastTokenSize, ""} +} + +// Returns next token found in buffer +func (s *scanner) Next() *token { + if s.readRaw { + s.readRaw = false + return s.NextRaw() + } + + s.ensureBuffer() + + if stashed := s.stash.Front(); stashed != nil { + tok := stashed.Value.(*token) + s.stash.Remove(stashed) + return tok + } + + switch s.state { + case scnEOF: + if outdent := s.indentStack.Back(); outdent != nil { + s.indentStack.Remove(outdent) + return &token{tokOutdent, "", nil} + } + + return &token{tokEOF, "", nil} + case scnNewLine: + s.state = scnLine + + if tok := s.scanIndent(); tok != nil { + return tok + } + + return s.Next() + case scnLine: + if tok := s.scanMixin(); tok != nil { + return tok + } + + if tok := s.scanMixinCall(); tok != nil { + return tok + } + + if tok := s.scanDoctype(); tok != nil { + return tok + } + + if tok := s.scanCondition(); tok != nil { + return tok + } + + if tok := s.scanEach(); tok != nil { + return tok + } + + if tok := s.scanImport(); tok != nil { + return tok + } + + if tok := s.scanExtends(); tok != nil { + return tok + } + + if tok := s.scanBlock(); tok != nil { + return tok + } + + if tok := s.scanAssignment(); tok != nil { + return tok + } + + if tok := s.scanTag(); tok != nil { + return tok + } + + if tok := s.scanId(); tok != nil { + return tok + } + + if tok := s.scanClassName(); tok != nil { + return tok + } + + if tok := s.scanAttribute(); tok != nil { + return tok + } + + if tok := s.scanComment(); tok != nil { + return tok + } + + if tok := s.scanText(); tok != nil { + return tok + } + } + + return nil +} + +func (s *scanner) NextRaw() *token { + result := "" + level := 0 + + for { + s.ensureBuffer() + + switch s.state { + case scnEOF: + return &token{tokText, result, map[string]string{"Mode": "raw"}} + case scnNewLine: + s.state = scnLine + + if tok := s.scanIndent(); tok != nil { + if tok.Kind == tokIndent { + level++ + } else if tok.Kind == tokOutdent { + level-- + } else { + result = result + "\n" + continue + } + + if level < 0 { + s.stash.PushBack(&token{tokOutdent, "", nil}) + + if len(result) > 0 && result[len(result)-1] == '\n' { + result = result[:len(result)-1] + } + + return &token{tokText, result, map[string]string{"Mode": "raw"}} + } + } + case scnLine: + if len(result) > 0 { + result = result + "\n" + } + for i := 0; i < level; i++ { + result += "\t" + } + result = result + s.buffer + s.consume(len(s.buffer)) + } + } + + return nil +} + +var rgxIndent = regexp.MustCompile(`^(\s+)`) + +func (s *scanner) scanIndent() *token { + if len(s.buffer) == 0 { + return &token{tokBlank, "", nil} + } + + var head *list.Element + for head = s.indentStack.Front(); head != nil; head = head.Next() { + value := head.Value.(*regexp.Regexp) + + if match := value.FindString(s.buffer); len(match) != 0 { + s.consume(len(match)) + } else { + break + } + } + + newIndent := rgxIndent.FindString(s.buffer) + + if len(newIndent) != 0 && head == nil { + s.indentStack.PushBack(regexp.MustCompile(regexp.QuoteMeta(newIndent))) + s.consume(len(newIndent)) + return &token{tokIndent, newIndent, nil} + } + + if len(newIndent) == 0 && head != nil { + for head != nil { + next := head.Next() + s.indentStack.Remove(head) + if next == nil { + return &token{tokOutdent, "", nil} + } else { + s.stash.PushBack(&token{tokOutdent, "", nil}) + } + head = next + } + } + + if len(newIndent) != 0 && head != nil { + panic("Mismatching indentation. Please use a coherent indent schema.") + } + + return nil +} + +var rgxDoctype = regexp.MustCompile(`^(!!!|doctype)\s*(.*)`) + +func (s *scanner) scanDoctype() *token { + if sm := rgxDoctype.FindStringSubmatch(s.buffer); len(sm) != 0 { + if len(sm[2]) == 0 { + sm[2] = "html" + } + + s.consume(len(sm[0])) + return &token{tokDoctype, sm[2], nil} + } + + return nil +} + +var rgxIf = regexp.MustCompile(`^if\s+(.+)$`) +var rgxElse = regexp.MustCompile(`^else\s*`) + +func (s *scanner) scanCondition() *token { + if sm := rgxIf.FindStringSubmatch(s.buffer); len(sm) != 0 { + s.consume(len(sm[0])) + return &token{tokIf, sm[1], nil} + } + + if sm := rgxElse.FindStringSubmatch(s.buffer); len(sm) != 0 { + s.consume(len(sm[0])) + return &token{tokElse, "", nil} + } + + return nil +} + +var rgxEach = regexp.MustCompile(`^each\s+(\$[\w0-9\-_]*)(?:\s*,\s*(\$[\w0-9\-_]*))?\s+in\s+(.+)$`) + +func (s *scanner) scanEach() *token { + if sm := rgxEach.FindStringSubmatch(s.buffer); len(sm) != 0 { + s.consume(len(sm[0])) + return &token{tokEach, sm[3], map[string]string{"X": sm[1], "Y": sm[2]}} + } + + return nil +} + +var rgxAssignment = regexp.MustCompile(`^(\$[\w0-9\-_]*)?\s*=\s*(.+)$`) + +func (s *scanner) scanAssignment() *token { + if sm := rgxAssignment.FindStringSubmatch(s.buffer); len(sm) != 0 { + s.consume(len(sm[0])) + return &token{tokAssignment, sm[2], map[string]string{"X": sm[1]}} + } + + return nil +} + +var rgxComment = regexp.MustCompile(`^\/\/(-)?\s*(.*)$`) + +func (s *scanner) scanComment() *token { + if sm := rgxComment.FindStringSubmatch(s.buffer); len(sm) != 0 { + mode := "embed" + if len(sm[1]) != 0 { + mode = "silent" + } + + s.consume(len(sm[0])) + return &token{tokComment, sm[2], map[string]string{"Mode": mode}} + } + + return nil +} + +var rgxId = regexp.MustCompile(`^#([\w-]+)(?:\s*\?\s*(.*)$)?`) + +func (s *scanner) scanId() *token { + if sm := rgxId.FindStringSubmatch(s.buffer); len(sm) != 0 { + s.consume(len(sm[0])) + return &token{tokId, sm[1], map[string]string{"Condition": sm[2]}} + } + + return nil +} + +var rgxClassName = regexp.MustCompile(`^\.([\w-]+)(?:\s*\?\s*(.*)$)?`) + +func (s *scanner) scanClassName() *token { + if sm := rgxClassName.FindStringSubmatch(s.buffer); len(sm) != 0 { + s.consume(len(sm[0])) + return &token{tokClassName, sm[1], map[string]string{"Condition": sm[2]}} + } + + return nil +} + +var rgxAttribute = regexp.MustCompile(`^\[([\w\-]+)\s*(?:=\s*(\"([^\"\\]*)\"|([^\]]+)))?\](?:\s*\?\s*(.*)$)?`) + +func (s *scanner) scanAttribute() *token { + if sm := rgxAttribute.FindStringSubmatch(s.buffer); len(sm) != 0 { + s.consume(len(sm[0])) + + if len(sm[3]) != 0 || sm[2] == "" { + return &token{tokAttribute, sm[1], map[string]string{"Content": sm[3], "Mode": "raw", "Condition": sm[5]}} + } + + return &token{tokAttribute, sm[1], map[string]string{"Content": sm[4], "Mode": "expression", "Condition": sm[5]}} + } + + return nil +} + +var rgxImport = regexp.MustCompile(`^import\s+([0-9a-zA-Z_\-\. \/]*)$`) + +func (s *scanner) scanImport() *token { + if sm := rgxImport.FindStringSubmatch(s.buffer); len(sm) != 0 { + s.consume(len(sm[0])) + return &token{tokImport, sm[1], nil} + } + + return nil +} + +var rgxExtends = regexp.MustCompile(`^extends\s+([0-9a-zA-Z_\-\. \/]*)$`) + +func (s *scanner) scanExtends() *token { + if sm := rgxExtends.FindStringSubmatch(s.buffer); len(sm) != 0 { + s.consume(len(sm[0])) + return &token{tokExtends, sm[1], nil} + } + + return nil +} + +var rgxBlock = regexp.MustCompile(`^block\s+(?:(append|prepend)\s+)?([0-9a-zA-Z_\-\. \/]*)$`) + +func (s *scanner) scanBlock() *token { + if sm := rgxBlock.FindStringSubmatch(s.buffer); len(sm) != 0 { + s.consume(len(sm[0])) + return &token{tokNamedBlock, sm[2], map[string]string{"Modifier": sm[1]}} + } + + return nil +} + +var rgxTag = regexp.MustCompile(`^(\w[-:\w]*)`) + +func (s *scanner) scanTag() *token { + if sm := rgxTag.FindStringSubmatch(s.buffer); len(sm) != 0 { + s.consume(len(sm[0])) + return &token{tokTag, sm[1], nil} + } + + return nil +} + +var rgxMixin = regexp.MustCompile(`^mixin ([a-zA-Z_]+\w*)(\(((\$\w*(,\s)?)*)\))?$`) + +func (s *scanner) scanMixin() *token { + if sm := rgxMixin.FindStringSubmatch(s.buffer); len(sm) != 0 { + s.consume(len(sm[0])) + return &token{tokMixin, sm[1], map[string]string{"Args": sm[3]}} + } + + return nil +} + +var rgxMixinCall = regexp.MustCompile(`^\+([A-Za-z_]+\w*)(\((.+(,\s)?)*\))?$`) + +func (s *scanner) scanMixinCall() *token { + if sm := rgxMixinCall.FindStringSubmatch(s.buffer); len(sm) != 0 { + s.consume(len(sm[0])) + return &token{tokMixinCall, sm[1], map[string]string{"Args": sm[3]}} + } + + return nil +} + +var rgxText = regexp.MustCompile(`^(\|)? ?(.*)$`) + +func (s *scanner) scanText() *token { + if sm := rgxText.FindStringSubmatch(s.buffer); len(sm) != 0 { + s.consume(len(sm[0])) + + mode := "inline" + if sm[1] == "|" { + mode = "piped" + } + + return &token{tokText, sm[2], map[string]string{"Mode": mode}} + } + + return nil +} + +// Moves position forward, and removes beginning of s.buffer (len bytes) +func (s *scanner) consume(runes int) { + if len(s.buffer) < runes { + panic(fmt.Sprintf("Unable to consume %d runes from buffer.", runes)) + } + + s.lastTokenLine = s.line + s.lastTokenCol = s.col + s.lastTokenSize = runes + + s.buffer = s.buffer[runes:] + s.col += runes +} + +// Reads string into s.buffer +func (s *scanner) ensureBuffer() { + if len(s.buffer) > 0 { + return + } + + buf, err := s.reader.ReadString('\n') + + if err != nil && err != io.EOF { + panic(err) + } else if err != nil && len(buf) == 0 { + s.state = scnEOF + } else { + // endline "LF only" or "\n" use Unix, Linux, modern MacOS X, FreeBSD, BeOS, RISC OS + if buf[len(buf)-1] == '\n' { + buf = buf[:len(buf)-1] + } + // endline "CR+LF" or "\r\n" use internet protocols, DEC RT-11, Windows, CP/M, MS-DOS, OS/2, Symbian OS + if len(buf) > 0 && buf[len(buf)-1] == '\r' { + buf = buf[:len(buf)-1] + } + + s.state = scnNewLine + s.buffer = buf + s.line += 1 + s.col = 0 + } +} diff --git a/transfersh-server/vendor/github.com/eknkc/amber/runtime.go b/transfersh-server/vendor/github.com/eknkc/amber/runtime.go new file mode 100644 index 0000000..6438fd1 --- /dev/null +++ b/transfersh-server/vendor/github.com/eknkc/amber/runtime.go @@ -0,0 +1,287 @@ +package amber + +import ( + "encoding/json" + "fmt" + "html/template" + "reflect" +) + +var FuncMap = template.FuncMap{ + "__amber_add": runtime_add, + "__amber_sub": runtime_sub, + "__amber_mul": runtime_mul, + "__amber_quo": runtime_quo, + "__amber_rem": runtime_rem, + "__amber_minus": runtime_minus, + "__amber_plus": runtime_plus, + "__amber_eql": runtime_eql, + "__amber_gtr": runtime_gtr, + "__amber_lss": runtime_lss, + + "json": runtime_json, + "unescaped": runtime_unescaped, +} + +func runtime_add(x, y interface{}) interface{} { + vx, vy := reflect.ValueOf(x), reflect.ValueOf(y) + switch vx.Kind() { + case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8: + { + switch vy.Kind() { + case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8: + return vx.Int() + vy.Int() + case reflect.Float32, reflect.Float64: + return float64(vx.Int()) + vy.Float() + case reflect.String: + return fmt.Sprintf("%d%s", vx.Int(), vy.String()) + } + } + case reflect.Float32, reflect.Float64: + { + switch vy.Kind() { + case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8: + return vx.Float() + float64(vy.Int()) + case reflect.Float32, reflect.Float64: + return vx.Float() + vy.Float() + case reflect.String: + return fmt.Sprintf("%f%s", vx.Float(), vy.String()) + } + } + case reflect.String: + { + switch vy.Kind() { + case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8: + return fmt.Sprintf("%s%d", vx.String(), vy.Int()) + case reflect.Float32, reflect.Float64: + return fmt.Sprintf("%s%f", vx.String(), vy.Float()) + case reflect.String: + return fmt.Sprintf("%s%s", vx.String(), vy.String()) + } + } + } + + return "" +} + +func runtime_sub(x, y interface{}) interface{} { + vx, vy := reflect.ValueOf(x), reflect.ValueOf(y) + switch vx.Kind() { + case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8: + { + switch vy.Kind() { + case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8: + return vx.Int() - vy.Int() + case reflect.Float32, reflect.Float64: + return float64(vx.Int()) - vy.Float() + } + } + case reflect.Float32, reflect.Float64: + { + switch vy.Kind() { + case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8: + return vx.Float() - float64(vy.Int()) + case reflect.Float32, reflect.Float64: + return vx.Float() - vy.Float() + } + } + } + + return "" +} + +func runtime_mul(x, y interface{}) interface{} { + vx, vy := reflect.ValueOf(x), reflect.ValueOf(y) + switch vx.Kind() { + case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8: + { + switch vy.Kind() { + case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8: + return vx.Int() * vy.Int() + case reflect.Float32, reflect.Float64: + return float64(vx.Int()) * vy.Float() + } + } + case reflect.Float32, reflect.Float64: + { + switch vy.Kind() { + case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8: + return vx.Float() * float64(vy.Int()) + case reflect.Float32, reflect.Float64: + return vx.Float() * vy.Float() + } + } + } + + return "" +} + +func runtime_quo(x, y interface{}) interface{} { + vx, vy := reflect.ValueOf(x), reflect.ValueOf(y) + switch vx.Kind() { + case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8: + { + switch vy.Kind() { + case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8: + return vx.Int() / vy.Int() + case reflect.Float32, reflect.Float64: + return float64(vx.Int()) / vy.Float() + } + } + case reflect.Float32, reflect.Float64: + { + switch vy.Kind() { + case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8: + return vx.Float() / float64(vy.Int()) + case reflect.Float32, reflect.Float64: + return vx.Float() / vy.Float() + } + } + } + + return "" +} + +func runtime_rem(x, y interface{}) interface{} { + vx, vy := reflect.ValueOf(x), reflect.ValueOf(y) + switch vx.Kind() { + case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8: + { + switch vy.Kind() { + case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8: + return vx.Int() % vy.Int() + } + } + } + + return "" +} + +func runtime_minus(x interface{}) interface{} { + vx := reflect.ValueOf(x) + switch vx.Kind() { + case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8: + return -vx.Int() + case reflect.Float32, reflect.Float64: + return -vx.Float() + } + + return "" +} + +func runtime_plus(x interface{}) interface{} { + vx := reflect.ValueOf(x) + switch vx.Kind() { + case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8: + return +vx.Int() + case reflect.Float32, reflect.Float64: + return +vx.Float() + } + + return "" +} + +func runtime_eql(x, y interface{}) bool { + vx, vy := reflect.ValueOf(x), reflect.ValueOf(y) + switch vx.Kind() { + case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8: + { + switch vy.Kind() { + case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8: + return vx.Int() == vy.Int() + case reflect.Float32, reflect.Float64: + return float64(vx.Int()) == vy.Float() + case reflect.String: + return fmt.Sprintf("%d", vx.Int()) == vy.String() + } + } + case reflect.Float32, reflect.Float64: + { + switch vy.Kind() { + case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8: + return vx.Float() == float64(vy.Int()) + case reflect.Float32, reflect.Float64: + return vx.Float() == vy.Float() + case reflect.String: + return fmt.Sprintf("%f", vx.Float()) == vy.String() + } + } + case reflect.String: + { + switch vy.Kind() { + case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8: + return vx.String() == fmt.Sprintf("%d", vy.Int()) + case reflect.Float32, reflect.Float64: + return vx.String() == fmt.Sprintf("%f", vy.Float()) + case reflect.String: + return vx.String() == fmt.Sprintf("%s", vy.String()) + } + } + case reflect.Bool: + { + switch vy.Kind() { + case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8: + return vx.Bool() && vy.Int() != 0 + case reflect.Bool: + return vx.Bool() == vy.Bool() + } + } + } + + return false +} + +func runtime_lss(x, y interface{}) bool { + vx, vy := reflect.ValueOf(x), reflect.ValueOf(y) + switch vx.Kind() { + case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8: + { + switch vy.Kind() { + case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8: + return vx.Int() < vy.Int() + case reflect.Float32, reflect.Float64: + return float64(vx.Int()) < vy.Float() + case reflect.String: + return fmt.Sprintf("%d", vx.Int()) < vy.String() + } + } + case reflect.Float32, reflect.Float64: + { + switch vy.Kind() { + case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8: + return vx.Float() < float64(vy.Int()) + case reflect.Float32, reflect.Float64: + return vx.Float() < vy.Float() + case reflect.String: + return fmt.Sprintf("%f", vx.Float()) < vy.String() + } + } + case reflect.String: + { + switch vy.Kind() { + case reflect.Int, reflect.Int32, reflect.Int64, reflect.Int16, reflect.Int8: + return vx.String() < fmt.Sprintf("%d", vy.Int()) + case reflect.Float32, reflect.Float64: + return vx.String() < fmt.Sprintf("%f", vy.Float()) + case reflect.String: + return vx.String() < vy.String() + } + } + } + + return false +} + +func runtime_gtr(x, y interface{}) bool { + return !runtime_lss(x, y) && !runtime_eql(x, y) +} + +func runtime_json(x interface{}) (res string, err error) { + bres, err := json.Marshal(x) + res = string(bres) + return +} + +func runtime_unescaped(x string) interface{} { + return template.HTML(x) +} diff --git a/transfersh-server/vendor/github.com/garyburd/redigo/internal/LICENSE b/transfersh-server/vendor/github.com/garyburd/redigo/internal/LICENSE new file mode 100644 index 0000000..67db858 --- /dev/null +++ b/transfersh-server/vendor/github.com/garyburd/redigo/internal/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/transfersh-server/vendor/github.com/garyburd/redigo/internal/commandinfo.go b/transfersh-server/vendor/github.com/garyburd/redigo/internal/commandinfo.go new file mode 100644 index 0000000..11e5842 --- /dev/null +++ b/transfersh-server/vendor/github.com/garyburd/redigo/internal/commandinfo.go @@ -0,0 +1,54 @@ +// Copyright 2014 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package internal // import "github.com/garyburd/redigo/internal" + +import ( + "strings" +) + +const ( + WatchState = 1 << iota + MultiState + SubscribeState + MonitorState +) + +type CommandInfo struct { + Set, Clear int +} + +var commandInfos = map[string]CommandInfo{ + "WATCH": {Set: WatchState}, + "UNWATCH": {Clear: WatchState}, + "MULTI": {Set: MultiState}, + "EXEC": {Clear: WatchState | MultiState}, + "DISCARD": {Clear: WatchState | MultiState}, + "PSUBSCRIBE": {Set: SubscribeState}, + "SUBSCRIBE": {Set: SubscribeState}, + "MONITOR": {Set: MonitorState}, +} + +func init() { + for n, ci := range commandInfos { + commandInfos[strings.ToLower(n)] = ci + } +} + +func LookupCommandInfo(commandName string) CommandInfo { + if ci, ok := commandInfos[commandName]; ok { + return ci + } + return commandInfos[strings.ToUpper(commandName)] +} diff --git a/transfersh-server/vendor/github.com/garyburd/redigo/internal/redistest/testdb.go b/transfersh-server/vendor/github.com/garyburd/redigo/internal/redistest/testdb.go new file mode 100644 index 0000000..b6f205b --- /dev/null +++ b/transfersh-server/vendor/github.com/garyburd/redigo/internal/redistest/testdb.go @@ -0,0 +1,68 @@ +// Copyright 2014 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package redistest contains utilities for writing Redigo tests. +package redistest + +import ( + "errors" + "time" + + "github.com/garyburd/redigo/redis" +) + +type testConn struct { + redis.Conn +} + +func (t testConn) Close() error { + _, err := t.Conn.Do("SELECT", "9") + if err != nil { + return nil + } + _, err = t.Conn.Do("FLUSHDB") + if err != nil { + return err + } + return t.Conn.Close() +} + +// Dial dials the local Redis server and selects database 9. To prevent +// stomping on real data, DialTestDB fails if database 9 contains data. The +// returned connection flushes database 9 on close. +func Dial() (redis.Conn, error) { + c, err := redis.DialTimeout("tcp", ":6379", 0, 1*time.Second, 1*time.Second) + if err != nil { + return nil, err + } + + _, err = c.Do("SELECT", "9") + if err != nil { + c.Close() + return nil, err + } + + n, err := redis.Int(c.Do("DBSIZE")) + if err != nil { + c.Close() + return nil, err + } + + if n != 0 { + c.Close() + return nil, errors.New("database #9 is not empty, test can not continue") + } + + return testConn{c}, nil +} diff --git a/transfersh-server/vendor/github.com/garyburd/redigo/redis/LICENSE b/transfersh-server/vendor/github.com/garyburd/redigo/redis/LICENSE new file mode 100644 index 0000000..67db858 --- /dev/null +++ b/transfersh-server/vendor/github.com/garyburd/redigo/redis/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/transfersh-server/vendor/github.com/garyburd/redigo/redis/conn.go b/transfersh-server/vendor/github.com/garyburd/redigo/redis/conn.go new file mode 100644 index 0000000..ed358c6 --- /dev/null +++ b/transfersh-server/vendor/github.com/garyburd/redigo/redis/conn.go @@ -0,0 +1,570 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "net" + "net/url" + "regexp" + "strconv" + "sync" + "time" +) + +// conn is the low-level implementation of Conn +type conn struct { + + // Shared + mu sync.Mutex + pending int + err error + conn net.Conn + + // Read + readTimeout time.Duration + br *bufio.Reader + + // Write + writeTimeout time.Duration + bw *bufio.Writer + + // Scratch space for formatting argument length. + // '*' or '$', length, "\r\n" + lenScratch [32]byte + + // Scratch space for formatting integers and floats. + numScratch [40]byte +} + +// DialTimeout acts like Dial but takes timeouts for establishing the +// connection to the server, writing a command and reading a reply. +// +// Deprecated: Use Dial with options instead. +func DialTimeout(network, address string, connectTimeout, readTimeout, writeTimeout time.Duration) (Conn, error) { + return Dial(network, address, + DialConnectTimeout(connectTimeout), + DialReadTimeout(readTimeout), + DialWriteTimeout(writeTimeout)) +} + +// DialOption specifies an option for dialing a Redis server. +type DialOption struct { + f func(*dialOptions) +} + +type dialOptions struct { + readTimeout time.Duration + writeTimeout time.Duration + dial func(network, addr string) (net.Conn, error) + db int + password string +} + +// DialReadTimeout specifies the timeout for reading a single command reply. +func DialReadTimeout(d time.Duration) DialOption { + return DialOption{func(do *dialOptions) { + do.readTimeout = d + }} +} + +// DialWriteTimeout specifies the timeout for writing a single command. +func DialWriteTimeout(d time.Duration) DialOption { + return DialOption{func(do *dialOptions) { + do.writeTimeout = d + }} +} + +// DialConnectTimeout specifies the timeout for connecting to the Redis server. +func DialConnectTimeout(d time.Duration) DialOption { + return DialOption{func(do *dialOptions) { + dialer := net.Dialer{Timeout: d} + do.dial = dialer.Dial + }} +} + +// DialNetDial specifies a custom dial function for creating TCP +// connections. If this option is left out, then net.Dial is +// used. DialNetDial overrides DialConnectTimeout. +func DialNetDial(dial func(network, addr string) (net.Conn, error)) DialOption { + return DialOption{func(do *dialOptions) { + do.dial = dial + }} +} + +// DialDatabase specifies the database to select when dialing a connection. +func DialDatabase(db int) DialOption { + return DialOption{func(do *dialOptions) { + do.db = db + }} +} + +// DialPassword specifies the password to use when connecting to +// the Redis server. +func DialPassword(password string) DialOption { + return DialOption{func(do *dialOptions) { + do.password = password + }} +} + +// Dial connects to the Redis server at the given network and +// address using the specified options. +func Dial(network, address string, options ...DialOption) (Conn, error) { + do := dialOptions{ + dial: net.Dial, + } + for _, option := range options { + option.f(&do) + } + + netConn, err := do.dial(network, address) + if err != nil { + return nil, err + } + c := &conn{ + conn: netConn, + bw: bufio.NewWriter(netConn), + br: bufio.NewReader(netConn), + readTimeout: do.readTimeout, + writeTimeout: do.writeTimeout, + } + + if do.password != "" { + if _, err := c.Do("AUTH", do.password); err != nil { + netConn.Close() + return nil, err + } + } + + if do.db != 0 { + if _, err := c.Do("SELECT", do.db); err != nil { + netConn.Close() + return nil, err + } + } + + return c, nil +} + +var pathDBRegexp = regexp.MustCompile(`/(\d*)\z`) + +// DialURL connects to a Redis server at the given URL using the Redis +// URI scheme. URLs should follow the draft IANA specification for the +// scheme (https://www.iana.org/assignments/uri-schemes/prov/redis). +func DialURL(rawurl string, options ...DialOption) (Conn, error) { + u, err := url.Parse(rawurl) + if err != nil { + return nil, err + } + + if u.Scheme != "redis" { + return nil, fmt.Errorf("invalid redis URL scheme: %s", u.Scheme) + } + + // As per the IANA draft spec, the host defaults to localhost and + // the port defaults to 6379. + host, port, err := net.SplitHostPort(u.Host) + if err != nil { + // assume port is missing + host = u.Host + port = "6379" + } + if host == "" { + host = "localhost" + } + address := net.JoinHostPort(host, port) + + if u.User != nil { + password, isSet := u.User.Password() + if isSet { + options = append(options, DialPassword(password)) + } + } + + match := pathDBRegexp.FindStringSubmatch(u.Path) + if len(match) == 2 { + db := 0 + if len(match[1]) > 0 { + db, err = strconv.Atoi(match[1]) + if err != nil { + return nil, fmt.Errorf("invalid database: %s", u.Path[1:]) + } + } + if db != 0 { + options = append(options, DialDatabase(db)) + } + } else if u.Path != "" { + return nil, fmt.Errorf("invalid database: %s", u.Path[1:]) + } + + return Dial("tcp", address, options...) +} + +// NewConn returns a new Redigo connection for the given net connection. +func NewConn(netConn net.Conn, readTimeout, writeTimeout time.Duration) Conn { + return &conn{ + conn: netConn, + bw: bufio.NewWriter(netConn), + br: bufio.NewReader(netConn), + readTimeout: readTimeout, + writeTimeout: writeTimeout, + } +} + +func (c *conn) Close() error { + c.mu.Lock() + err := c.err + if c.err == nil { + c.err = errors.New("redigo: closed") + err = c.conn.Close() + } + c.mu.Unlock() + return err +} + +func (c *conn) fatal(err error) error { + c.mu.Lock() + if c.err == nil { + c.err = err + // Close connection to force errors on subsequent calls and to unblock + // other reader or writer. + c.conn.Close() + } + c.mu.Unlock() + return err +} + +func (c *conn) Err() error { + c.mu.Lock() + err := c.err + c.mu.Unlock() + return err +} + +func (c *conn) writeLen(prefix byte, n int) error { + c.lenScratch[len(c.lenScratch)-1] = '\n' + c.lenScratch[len(c.lenScratch)-2] = '\r' + i := len(c.lenScratch) - 3 + for { + c.lenScratch[i] = byte('0' + n%10) + i -= 1 + n = n / 10 + if n == 0 { + break + } + } + c.lenScratch[i] = prefix + _, err := c.bw.Write(c.lenScratch[i:]) + return err +} + +func (c *conn) writeString(s string) error { + c.writeLen('$', len(s)) + c.bw.WriteString(s) + _, err := c.bw.WriteString("\r\n") + return err +} + +func (c *conn) writeBytes(p []byte) error { + c.writeLen('$', len(p)) + c.bw.Write(p) + _, err := c.bw.WriteString("\r\n") + return err +} + +func (c *conn) writeInt64(n int64) error { + return c.writeBytes(strconv.AppendInt(c.numScratch[:0], n, 10)) +} + +func (c *conn) writeFloat64(n float64) error { + return c.writeBytes(strconv.AppendFloat(c.numScratch[:0], n, 'g', -1, 64)) +} + +func (c *conn) writeCommand(cmd string, args []interface{}) (err error) { + c.writeLen('*', 1+len(args)) + err = c.writeString(cmd) + for _, arg := range args { + if err != nil { + break + } + switch arg := arg.(type) { + case string: + err = c.writeString(arg) + case []byte: + err = c.writeBytes(arg) + case int: + err = c.writeInt64(int64(arg)) + case int64: + err = c.writeInt64(arg) + case float64: + err = c.writeFloat64(arg) + case bool: + if arg { + err = c.writeString("1") + } else { + err = c.writeString("0") + } + case nil: + err = c.writeString("") + default: + var buf bytes.Buffer + fmt.Fprint(&buf, arg) + err = c.writeBytes(buf.Bytes()) + } + } + return err +} + +type protocolError string + +func (pe protocolError) Error() string { + return fmt.Sprintf("redigo: %s (possible server error or unsupported concurrent read by application)", string(pe)) +} + +func (c *conn) readLine() ([]byte, error) { + p, err := c.br.ReadSlice('\n') + if err == bufio.ErrBufferFull { + return nil, protocolError("long response line") + } + if err != nil { + return nil, err + } + i := len(p) - 2 + if i < 0 || p[i] != '\r' { + return nil, protocolError("bad response line terminator") + } + return p[:i], nil +} + +// parseLen parses bulk string and array lengths. +func parseLen(p []byte) (int, error) { + if len(p) == 0 { + return -1, protocolError("malformed length") + } + + if p[0] == '-' && len(p) == 2 && p[1] == '1' { + // handle $-1 and $-1 null replies. + return -1, nil + } + + var n int + for _, b := range p { + n *= 10 + if b < '0' || b > '9' { + return -1, protocolError("illegal bytes in length") + } + n += int(b - '0') + } + + return n, nil +} + +// parseInt parses an integer reply. +func parseInt(p []byte) (interface{}, error) { + if len(p) == 0 { + return 0, protocolError("malformed integer") + } + + var negate bool + if p[0] == '-' { + negate = true + p = p[1:] + if len(p) == 0 { + return 0, protocolError("malformed integer") + } + } + + var n int64 + for _, b := range p { + n *= 10 + if b < '0' || b > '9' { + return 0, protocolError("illegal bytes in length") + } + n += int64(b - '0') + } + + if negate { + n = -n + } + return n, nil +} + +var ( + okReply interface{} = "OK" + pongReply interface{} = "PONG" +) + +func (c *conn) readReply() (interface{}, error) { + line, err := c.readLine() + if err != nil { + return nil, err + } + if len(line) == 0 { + return nil, protocolError("short response line") + } + switch line[0] { + case '+': + switch { + case len(line) == 3 && line[1] == 'O' && line[2] == 'K': + // Avoid allocation for frequent "+OK" response. + return okReply, nil + case len(line) == 5 && line[1] == 'P' && line[2] == 'O' && line[3] == 'N' && line[4] == 'G': + // Avoid allocation in PING command benchmarks :) + return pongReply, nil + default: + return string(line[1:]), nil + } + case '-': + return Error(string(line[1:])), nil + case ':': + return parseInt(line[1:]) + case '$': + n, err := parseLen(line[1:]) + if n < 0 || err != nil { + return nil, err + } + p := make([]byte, n) + _, err = io.ReadFull(c.br, p) + if err != nil { + return nil, err + } + if line, err := c.readLine(); err != nil { + return nil, err + } else if len(line) != 0 { + return nil, protocolError("bad bulk string format") + } + return p, nil + case '*': + n, err := parseLen(line[1:]) + if n < 0 || err != nil { + return nil, err + } + r := make([]interface{}, n) + for i := range r { + r[i], err = c.readReply() + if err != nil { + return nil, err + } + } + return r, nil + } + return nil, protocolError("unexpected response line") +} + +func (c *conn) Send(cmd string, args ...interface{}) error { + c.mu.Lock() + c.pending += 1 + c.mu.Unlock() + if c.writeTimeout != 0 { + c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) + } + if err := c.writeCommand(cmd, args); err != nil { + return c.fatal(err) + } + return nil +} + +func (c *conn) Flush() error { + if c.writeTimeout != 0 { + c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) + } + if err := c.bw.Flush(); err != nil { + return c.fatal(err) + } + return nil +} + +func (c *conn) Receive() (reply interface{}, err error) { + if c.readTimeout != 0 { + c.conn.SetReadDeadline(time.Now().Add(c.readTimeout)) + } + if reply, err = c.readReply(); err != nil { + return nil, c.fatal(err) + } + // When using pub/sub, the number of receives can be greater than the + // number of sends. To enable normal use of the connection after + // unsubscribing from all channels, we do not decrement pending to a + // negative value. + // + // The pending field is decremented after the reply is read to handle the + // case where Receive is called before Send. + c.mu.Lock() + if c.pending > 0 { + c.pending -= 1 + } + c.mu.Unlock() + if err, ok := reply.(Error); ok { + return nil, err + } + return +} + +func (c *conn) Do(cmd string, args ...interface{}) (interface{}, error) { + c.mu.Lock() + pending := c.pending + c.pending = 0 + c.mu.Unlock() + + if cmd == "" && pending == 0 { + return nil, nil + } + + if c.writeTimeout != 0 { + c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) + } + + if cmd != "" { + if err := c.writeCommand(cmd, args); err != nil { + return nil, c.fatal(err) + } + } + + if err := c.bw.Flush(); err != nil { + return nil, c.fatal(err) + } + + if c.readTimeout != 0 { + c.conn.SetReadDeadline(time.Now().Add(c.readTimeout)) + } + + if cmd == "" { + reply := make([]interface{}, pending) + for i := range reply { + r, e := c.readReply() + if e != nil { + return nil, c.fatal(e) + } + reply[i] = r + } + return reply, nil + } + + var err error + var reply interface{} + for i := 0; i <= pending; i++ { + var e error + if reply, e = c.readReply(); e != nil { + return nil, c.fatal(e) + } + if e, ok := reply.(Error); ok && err == nil { + err = e + } + } + return reply, err +} diff --git a/transfersh-server/vendor/github.com/garyburd/redigo/redis/doc.go b/transfersh-server/vendor/github.com/garyburd/redigo/redis/doc.go new file mode 100644 index 0000000..b9b2c99 --- /dev/null +++ b/transfersh-server/vendor/github.com/garyburd/redigo/redis/doc.go @@ -0,0 +1,168 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package redis is a client for the Redis database. +// +// The Redigo FAQ (https://github.com/garyburd/redigo/wiki/FAQ) contains more +// documentation about this package. +// +// Connections +// +// The Conn interface is the primary interface for working with Redis. +// Applications create connections by calling the Dial, DialWithTimeout or +// NewConn functions. In the future, functions will be added for creating +// sharded and other types of connections. +// +// The application must call the connection Close method when the application +// is done with the connection. +// +// Executing Commands +// +// The Conn interface has a generic method for executing Redis commands: +// +// Do(commandName string, args ...interface{}) (reply interface{}, err error) +// +// The Redis command reference (http://redis.io/commands) lists the available +// commands. An example of using the Redis APPEND command is: +// +// n, err := conn.Do("APPEND", "key", "value") +// +// The Do method converts command arguments to binary strings for transmission +// to the server as follows: +// +// Go Type Conversion +// []byte Sent as is +// string Sent as is +// int, int64 strconv.FormatInt(v) +// float64 strconv.FormatFloat(v, 'g', -1, 64) +// bool true -> "1", false -> "0" +// nil "" +// all other types fmt.Print(v) +// +// Redis command reply types are represented using the following Go types: +// +// Redis type Go type +// error redis.Error +// integer int64 +// simple string string +// bulk string []byte or nil if value not present. +// array []interface{} or nil if value not present. +// +// Use type assertions or the reply helper functions to convert from +// interface{} to the specific Go type for the command result. +// +// Pipelining +// +// Connections support pipelining using the Send, Flush and Receive methods. +// +// Send(commandName string, args ...interface{}) error +// Flush() error +// Receive() (reply interface{}, err error) +// +// Send writes the command to the connection's output buffer. Flush flushes the +// connection's output buffer to the server. Receive reads a single reply from +// the server. The following example shows a simple pipeline. +// +// c.Send("SET", "foo", "bar") +// c.Send("GET", "foo") +// c.Flush() +// c.Receive() // reply from SET +// v, err = c.Receive() // reply from GET +// +// The Do method combines the functionality of the Send, Flush and Receive +// methods. The Do method starts by writing the command and flushing the output +// buffer. Next, the Do method receives all pending replies including the reply +// for the command just sent by Do. If any of the received replies is an error, +// then Do returns the error. If there are no errors, then Do returns the last +// reply. If the command argument to the Do method is "", then the Do method +// will flush the output buffer and receive pending replies without sending a +// command. +// +// Use the Send and Do methods to implement pipelined transactions. +// +// c.Send("MULTI") +// c.Send("INCR", "foo") +// c.Send("INCR", "bar") +// r, err := c.Do("EXEC") +// fmt.Println(r) // prints [1, 1] +// +// Concurrency +// +// Connections support one concurrent caller to the Recieve method and one +// concurrent caller to the Send and Flush methods. No other concurrency is +// supported including concurrent calls to the Do method. +// +// For full concurrent access to Redis, use the thread-safe Pool to get, use +// and release a connection from within a goroutine. Connections returned from +// a Pool have the concurrency restrictions described in the previous +// paragraph. +// +// Publish and Subscribe +// +// Use the Send, Flush and Receive methods to implement Pub/Sub subscribers. +// +// c.Send("SUBSCRIBE", "example") +// c.Flush() +// for { +// reply, err := c.Receive() +// if err != nil { +// return err +// } +// // process pushed message +// } +// +// The PubSubConn type wraps a Conn with convenience methods for implementing +// subscribers. The Subscribe, PSubscribe, Unsubscribe and PUnsubscribe methods +// send and flush a subscription management command. The receive method +// converts a pushed message to convenient types for use in a type switch. +// +// psc := redis.PubSubConn{c} +// psc.Subscribe("example") +// for { +// switch v := psc.Receive().(type) { +// case redis.Message: +// fmt.Printf("%s: message: %s\n", v.Channel, v.Data) +// case redis.Subscription: +// fmt.Printf("%s: %s %d\n", v.Channel, v.Kind, v.Count) +// case error: +// return v +// } +// } +// +// Reply Helpers +// +// The Bool, Int, Bytes, String, Strings and Values functions convert a reply +// to a value of a specific type. To allow convenient wrapping of calls to the +// connection Do and Receive methods, the functions take a second argument of +// type error. If the error is non-nil, then the helper function returns the +// error. If the error is nil, the function converts the reply to the specified +// type: +// +// exists, err := redis.Bool(c.Do("EXISTS", "foo")) +// if err != nil { +// // handle error return from c.Do or type conversion error. +// } +// +// The Scan function converts elements of a array reply to Go types: +// +// var value1 int +// var value2 string +// reply, err := redis.Values(c.Do("MGET", "key1", "key2")) +// if err != nil { +// // handle error +// } +// if _, err := redis.Scan(reply, &value1, &value2); err != nil { +// // handle error +// } +package redis // import "github.com/garyburd/redigo/redis" diff --git a/transfersh-server/vendor/github.com/garyburd/redigo/redis/log.go b/transfersh-server/vendor/github.com/garyburd/redigo/redis/log.go new file mode 100644 index 0000000..129b86d --- /dev/null +++ b/transfersh-server/vendor/github.com/garyburd/redigo/redis/log.go @@ -0,0 +1,117 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "bytes" + "fmt" + "log" +) + +// NewLoggingConn returns a logging wrapper around a connection. +func NewLoggingConn(conn Conn, logger *log.Logger, prefix string) Conn { + if prefix != "" { + prefix = prefix + "." + } + return &loggingConn{conn, logger, prefix} +} + +type loggingConn struct { + Conn + logger *log.Logger + prefix string +} + +func (c *loggingConn) Close() error { + err := c.Conn.Close() + var buf bytes.Buffer + fmt.Fprintf(&buf, "%sClose() -> (%v)", c.prefix, err) + c.logger.Output(2, buf.String()) + return err +} + +func (c *loggingConn) printValue(buf *bytes.Buffer, v interface{}) { + const chop = 32 + switch v := v.(type) { + case []byte: + if len(v) > chop { + fmt.Fprintf(buf, "%q...", v[:chop]) + } else { + fmt.Fprintf(buf, "%q", v) + } + case string: + if len(v) > chop { + fmt.Fprintf(buf, "%q...", v[:chop]) + } else { + fmt.Fprintf(buf, "%q", v) + } + case []interface{}: + if len(v) == 0 { + buf.WriteString("[]") + } else { + sep := "[" + fin := "]" + if len(v) > chop { + v = v[:chop] + fin = "...]" + } + for _, vv := range v { + buf.WriteString(sep) + c.printValue(buf, vv) + sep = ", " + } + buf.WriteString(fin) + } + default: + fmt.Fprint(buf, v) + } +} + +func (c *loggingConn) print(method, commandName string, args []interface{}, reply interface{}, err error) { + var buf bytes.Buffer + fmt.Fprintf(&buf, "%s%s(", c.prefix, method) + if method != "Receive" { + buf.WriteString(commandName) + for _, arg := range args { + buf.WriteString(", ") + c.printValue(&buf, arg) + } + } + buf.WriteString(") -> (") + if method != "Send" { + c.printValue(&buf, reply) + buf.WriteString(", ") + } + fmt.Fprintf(&buf, "%v)", err) + c.logger.Output(3, buf.String()) +} + +func (c *loggingConn) Do(commandName string, args ...interface{}) (interface{}, error) { + reply, err := c.Conn.Do(commandName, args...) + c.print("Do", commandName, args, reply, err) + return reply, err +} + +func (c *loggingConn) Send(commandName string, args ...interface{}) error { + err := c.Conn.Send(commandName, args...) + c.print("Send", commandName, args, nil, err) + return err +} + +func (c *loggingConn) Receive() (interface{}, error) { + reply, err := c.Conn.Receive() + c.print("Receive", "", nil, reply, err) + return reply, err +} diff --git a/transfersh-server/vendor/github.com/garyburd/redigo/redis/pool.go b/transfersh-server/vendor/github.com/garyburd/redigo/redis/pool.go new file mode 100644 index 0000000..d66ef84 --- /dev/null +++ b/transfersh-server/vendor/github.com/garyburd/redigo/redis/pool.go @@ -0,0 +1,393 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "bytes" + "container/list" + "crypto/rand" + "crypto/sha1" + "errors" + "io" + "strconv" + "sync" + "time" + + "github.com/garyburd/redigo/internal" +) + +var nowFunc = time.Now // for testing + +// ErrPoolExhausted is returned from a pool connection method (Do, Send, +// Receive, Flush, Err) when the maximum number of database connections in the +// pool has been reached. +var ErrPoolExhausted = errors.New("redigo: connection pool exhausted") + +var ( + errPoolClosed = errors.New("redigo: connection pool closed") + errConnClosed = errors.New("redigo: connection closed") +) + +// Pool maintains a pool of connections. The application calls the Get method +// to get a connection from the pool and the connection's Close method to +// return the connection's resources to the pool. +// +// The following example shows how to use a pool in a web application. The +// application creates a pool at application startup and makes it available to +// request handlers using a global variable. +// +// func newPool(server, password string) *redis.Pool { +// return &redis.Pool{ +// MaxIdle: 3, +// IdleTimeout: 240 * time.Second, +// Dial: func () (redis.Conn, error) { +// c, err := redis.Dial("tcp", server) +// if err != nil { +// return nil, err +// } +// if _, err := c.Do("AUTH", password); err != nil { +// c.Close() +// return nil, err +// } +// return c, err +// }, +// TestOnBorrow: func(c redis.Conn, t time.Time) error { +// _, err := c.Do("PING") +// return err +// }, +// } +// } +// +// var ( +// pool *redis.Pool +// redisServer = flag.String("redisServer", ":6379", "") +// redisPassword = flag.String("redisPassword", "", "") +// ) +// +// func main() { +// flag.Parse() +// pool = newPool(*redisServer, *redisPassword) +// ... +// } +// +// A request handler gets a connection from the pool and closes the connection +// when the handler is done: +// +// func serveHome(w http.ResponseWriter, r *http.Request) { +// conn := pool.Get() +// defer conn.Close() +// .... +// } +// +type Pool struct { + + // Dial is an application supplied function for creating and configuring a + // connection. + // + // The connection returned from Dial must not be in a special state + // (subscribed to pubsub channel, transaction started, ...). + Dial func() (Conn, error) + + // TestOnBorrow is an optional application supplied function for checking + // the health of an idle connection before the connection is used again by + // the application. Argument t is the time that the connection was returned + // to the pool. If the function returns an error, then the connection is + // closed. + TestOnBorrow func(c Conn, t time.Time) error + + // Maximum number of idle connections in the pool. + MaxIdle int + + // Maximum number of connections allocated by the pool at a given time. + // When zero, there is no limit on the number of connections in the pool. + MaxActive int + + // Close connections after remaining idle for this duration. If the value + // is zero, then idle connections are not closed. Applications should set + // the timeout to a value less than the server's timeout. + IdleTimeout time.Duration + + // If Wait is true and the pool is at the MaxActive limit, then Get() waits + // for a connection to be returned to the pool before returning. + Wait bool + + // mu protects fields defined below. + mu sync.Mutex + cond *sync.Cond + closed bool + active int + + // Stack of idleConn with most recently used at the front. + idle list.List +} + +type idleConn struct { + c Conn + t time.Time +} + +// NewPool creates a new pool. +// +// Deprecated: Initialize the Pool directory as shown in the example. +func NewPool(newFn func() (Conn, error), maxIdle int) *Pool { + return &Pool{Dial: newFn, MaxIdle: maxIdle} +} + +// Get gets a connection. The application must close the returned connection. +// This method always returns a valid connection so that applications can defer +// error handling to the first use of the connection. If there is an error +// getting an underlying connection, then the connection Err, Do, Send, Flush +// and Receive methods return that error. +func (p *Pool) Get() Conn { + c, err := p.get() + if err != nil { + return errorConnection{err} + } + return &pooledConnection{p: p, c: c} +} + +// ActiveCount returns the number of active connections in the pool. +func (p *Pool) ActiveCount() int { + p.mu.Lock() + active := p.active + p.mu.Unlock() + return active +} + +// Close releases the resources used by the pool. +func (p *Pool) Close() error { + p.mu.Lock() + idle := p.idle + p.idle.Init() + p.closed = true + p.active -= idle.Len() + if p.cond != nil { + p.cond.Broadcast() + } + p.mu.Unlock() + for e := idle.Front(); e != nil; e = e.Next() { + e.Value.(idleConn).c.Close() + } + return nil +} + +// release decrements the active count and signals waiters. The caller must +// hold p.mu during the call. +func (p *Pool) release() { + p.active -= 1 + if p.cond != nil { + p.cond.Signal() + } +} + +// get prunes stale connections and returns a connection from the idle list or +// creates a new connection. +func (p *Pool) get() (Conn, error) { + p.mu.Lock() + + // Prune stale connections. + + if timeout := p.IdleTimeout; timeout > 0 { + for i, n := 0, p.idle.Len(); i < n; i++ { + e := p.idle.Back() + if e == nil { + break + } + ic := e.Value.(idleConn) + if ic.t.Add(timeout).After(nowFunc()) { + break + } + p.idle.Remove(e) + p.release() + p.mu.Unlock() + ic.c.Close() + p.mu.Lock() + } + } + + for { + + // Get idle connection. + + for i, n := 0, p.idle.Len(); i < n; i++ { + e := p.idle.Front() + if e == nil { + break + } + ic := e.Value.(idleConn) + p.idle.Remove(e) + test := p.TestOnBorrow + p.mu.Unlock() + if test == nil || test(ic.c, ic.t) == nil { + return ic.c, nil + } + ic.c.Close() + p.mu.Lock() + p.release() + } + + // Check for pool closed before dialing a new connection. + + if p.closed { + p.mu.Unlock() + return nil, errors.New("redigo: get on closed pool") + } + + // Dial new connection if under limit. + + if p.MaxActive == 0 || p.active < p.MaxActive { + dial := p.Dial + p.active += 1 + p.mu.Unlock() + c, err := dial() + if err != nil { + p.mu.Lock() + p.release() + p.mu.Unlock() + c = nil + } + return c, err + } + + if !p.Wait { + p.mu.Unlock() + return nil, ErrPoolExhausted + } + + if p.cond == nil { + p.cond = sync.NewCond(&p.mu) + } + p.cond.Wait() + } +} + +func (p *Pool) put(c Conn, forceClose bool) error { + err := c.Err() + p.mu.Lock() + if !p.closed && err == nil && !forceClose { + p.idle.PushFront(idleConn{t: nowFunc(), c: c}) + if p.idle.Len() > p.MaxIdle { + c = p.idle.Remove(p.idle.Back()).(idleConn).c + } else { + c = nil + } + } + + if c == nil { + if p.cond != nil { + p.cond.Signal() + } + p.mu.Unlock() + return nil + } + + p.release() + p.mu.Unlock() + return c.Close() +} + +type pooledConnection struct { + p *Pool + c Conn + state int +} + +var ( + sentinel []byte + sentinelOnce sync.Once +) + +func initSentinel() { + p := make([]byte, 64) + if _, err := rand.Read(p); err == nil { + sentinel = p + } else { + h := sha1.New() + io.WriteString(h, "Oops, rand failed. Use time instead.") + io.WriteString(h, strconv.FormatInt(time.Now().UnixNano(), 10)) + sentinel = h.Sum(nil) + } +} + +func (pc *pooledConnection) Close() error { + c := pc.c + if _, ok := c.(errorConnection); ok { + return nil + } + pc.c = errorConnection{errConnClosed} + + if pc.state&internal.MultiState != 0 { + c.Send("DISCARD") + pc.state &^= (internal.MultiState | internal.WatchState) + } else if pc.state&internal.WatchState != 0 { + c.Send("UNWATCH") + pc.state &^= internal.WatchState + } + if pc.state&internal.SubscribeState != 0 { + c.Send("UNSUBSCRIBE") + c.Send("PUNSUBSCRIBE") + // To detect the end of the message stream, ask the server to echo + // a sentinel value and read until we see that value. + sentinelOnce.Do(initSentinel) + c.Send("ECHO", sentinel) + c.Flush() + for { + p, err := c.Receive() + if err != nil { + break + } + if p, ok := p.([]byte); ok && bytes.Equal(p, sentinel) { + pc.state &^= internal.SubscribeState + break + } + } + } + c.Do("") + pc.p.put(c, pc.state != 0) + return nil +} + +func (pc *pooledConnection) Err() error { + return pc.c.Err() +} + +func (pc *pooledConnection) Do(commandName string, args ...interface{}) (reply interface{}, err error) { + ci := internal.LookupCommandInfo(commandName) + pc.state = (pc.state | ci.Set) &^ ci.Clear + return pc.c.Do(commandName, args...) +} + +func (pc *pooledConnection) Send(commandName string, args ...interface{}) error { + ci := internal.LookupCommandInfo(commandName) + pc.state = (pc.state | ci.Set) &^ ci.Clear + return pc.c.Send(commandName, args...) +} + +func (pc *pooledConnection) Flush() error { + return pc.c.Flush() +} + +func (pc *pooledConnection) Receive() (reply interface{}, err error) { + return pc.c.Receive() +} + +type errorConnection struct{ err error } + +func (ec errorConnection) Do(string, ...interface{}) (interface{}, error) { return nil, ec.err } +func (ec errorConnection) Send(string, ...interface{}) error { return ec.err } +func (ec errorConnection) Err() error { return ec.err } +func (ec errorConnection) Close() error { return ec.err } +func (ec errorConnection) Flush() error { return ec.err } +func (ec errorConnection) Receive() (interface{}, error) { return nil, ec.err } diff --git a/transfersh-server/vendor/github.com/garyburd/redigo/redis/pubsub.go b/transfersh-server/vendor/github.com/garyburd/redigo/redis/pubsub.go new file mode 100644 index 0000000..c0ecce8 --- /dev/null +++ b/transfersh-server/vendor/github.com/garyburd/redigo/redis/pubsub.go @@ -0,0 +1,144 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import "errors" + +// Subscription represents a subscribe or unsubscribe notification. +type Subscription struct { + + // Kind is "subscribe", "unsubscribe", "psubscribe" or "punsubscribe" + Kind string + + // The channel that was changed. + Channel string + + // The current number of subscriptions for connection. + Count int +} + +// Message represents a message notification. +type Message struct { + + // The originating channel. + Channel string + + // The message data. + Data []byte +} + +// PMessage represents a pmessage notification. +type PMessage struct { + + // The matched pattern. + Pattern string + + // The originating channel. + Channel string + + // The message data. + Data []byte +} + +// Pong represents a pubsub pong notification. +type Pong struct { + Data string +} + +// PubSubConn wraps a Conn with convenience methods for subscribers. +type PubSubConn struct { + Conn Conn +} + +// Close closes the connection. +func (c PubSubConn) Close() error { + return c.Conn.Close() +} + +// Subscribe subscribes the connection to the specified channels. +func (c PubSubConn) Subscribe(channel ...interface{}) error { + c.Conn.Send("SUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// PSubscribe subscribes the connection to the given patterns. +func (c PubSubConn) PSubscribe(channel ...interface{}) error { + c.Conn.Send("PSUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// Unsubscribe unsubscribes the connection from the given channels, or from all +// of them if none is given. +func (c PubSubConn) Unsubscribe(channel ...interface{}) error { + c.Conn.Send("UNSUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// PUnsubscribe unsubscribes the connection from the given patterns, or from all +// of them if none is given. +func (c PubSubConn) PUnsubscribe(channel ...interface{}) error { + c.Conn.Send("PUNSUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// Ping sends a PING to the server with the specified data. +func (c PubSubConn) Ping(data string) error { + c.Conn.Send("PING", data) + return c.Conn.Flush() +} + +// Receive returns a pushed message as a Subscription, Message, PMessage, Pong +// or error. The return value is intended to be used directly in a type switch +// as illustrated in the PubSubConn example. +func (c PubSubConn) Receive() interface{} { + reply, err := Values(c.Conn.Receive()) + if err != nil { + return err + } + + var kind string + reply, err = Scan(reply, &kind) + if err != nil { + return err + } + + switch kind { + case "message": + var m Message + if _, err := Scan(reply, &m.Channel, &m.Data); err != nil { + return err + } + return m + case "pmessage": + var pm PMessage + if _, err := Scan(reply, &pm.Pattern, &pm.Channel, &pm.Data); err != nil { + return err + } + return pm + case "subscribe", "psubscribe", "unsubscribe", "punsubscribe": + s := Subscription{Kind: kind} + if _, err := Scan(reply, &s.Channel, &s.Count); err != nil { + return err + } + return s + case "pong": + var p Pong + if _, err := Scan(reply, &p.Data); err != nil { + return err + } + return p + } + return errors.New("redigo: unknown pubsub notification") +} diff --git a/transfersh-server/vendor/github.com/garyburd/redigo/redis/redis.go b/transfersh-server/vendor/github.com/garyburd/redigo/redis/redis.go new file mode 100644 index 0000000..c90a48e --- /dev/null +++ b/transfersh-server/vendor/github.com/garyburd/redigo/redis/redis.go @@ -0,0 +1,44 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +// Error represents an error returned in a command reply. +type Error string + +func (err Error) Error() string { return string(err) } + +// Conn represents a connection to a Redis server. +type Conn interface { + // Close closes the connection. + Close() error + + // Err returns a non-nil value if the connection is broken. The returned + // value is either the first non-nil value returned from the underlying + // network connection or a protocol parsing error. Applications should + // close broken connections. + Err() error + + // Do sends a command to the server and returns the received reply. + Do(commandName string, args ...interface{}) (reply interface{}, err error) + + // Send writes the command to the client's output buffer. + Send(commandName string, args ...interface{}) error + + // Flush flushes the output buffer to the Redis server. + Flush() error + + // Receive receives a single reply from the Redis server + Receive() (reply interface{}, err error) +} diff --git a/transfersh-server/vendor/github.com/garyburd/redigo/redis/reply.go b/transfersh-server/vendor/github.com/garyburd/redigo/redis/reply.go new file mode 100644 index 0000000..5789614 --- /dev/null +++ b/transfersh-server/vendor/github.com/garyburd/redigo/redis/reply.go @@ -0,0 +1,393 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "errors" + "fmt" + "strconv" +) + +// ErrNil indicates that a reply value is nil. +var ErrNil = errors.New("redigo: nil returned") + +// Int is a helper that converts a command reply to an integer. If err is not +// equal to nil, then Int returns 0, err. Otherwise, Int converts the +// reply to an int as follows: +// +// Reply type Result +// integer int(reply), nil +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Int(reply interface{}, err error) (int, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case int64: + x := int(reply) + if int64(x) != reply { + return 0, strconv.ErrRange + } + return x, nil + case []byte: + n, err := strconv.ParseInt(string(reply), 10, 0) + return int(n), err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Int, got type %T", reply) +} + +// Int64 is a helper that converts a command reply to 64 bit integer. If err is +// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the +// reply to an int64 as follows: +// +// Reply type Result +// integer reply, nil +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Int64(reply interface{}, err error) (int64, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case int64: + return reply, nil + case []byte: + n, err := strconv.ParseInt(string(reply), 10, 64) + return n, err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Int64, got type %T", reply) +} + +var errNegativeInt = errors.New("redigo: unexpected value for Uint64") + +// Uint64 is a helper that converts a command reply to 64 bit integer. If err is +// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the +// reply to an int64 as follows: +// +// Reply type Result +// integer reply, nil +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Uint64(reply interface{}, err error) (uint64, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case int64: + if reply < 0 { + return 0, errNegativeInt + } + return uint64(reply), nil + case []byte: + n, err := strconv.ParseUint(string(reply), 10, 64) + return n, err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Uint64, got type %T", reply) +} + +// Float64 is a helper that converts a command reply to 64 bit float. If err is +// not equal to nil, then Float64 returns 0, err. Otherwise, Float64 converts +// the reply to an int as follows: +// +// Reply type Result +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Float64(reply interface{}, err error) (float64, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case []byte: + n, err := strconv.ParseFloat(string(reply), 64) + return n, err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Float64, got type %T", reply) +} + +// String is a helper that converts a command reply to a string. If err is not +// equal to nil, then String returns "", err. Otherwise String converts the +// reply to a string as follows: +// +// Reply type Result +// bulk string string(reply), nil +// simple string reply, nil +// nil "", ErrNil +// other "", error +func String(reply interface{}, err error) (string, error) { + if err != nil { + return "", err + } + switch reply := reply.(type) { + case []byte: + return string(reply), nil + case string: + return reply, nil + case nil: + return "", ErrNil + case Error: + return "", reply + } + return "", fmt.Errorf("redigo: unexpected type for String, got type %T", reply) +} + +// Bytes is a helper that converts a command reply to a slice of bytes. If err +// is not equal to nil, then Bytes returns nil, err. Otherwise Bytes converts +// the reply to a slice of bytes as follows: +// +// Reply type Result +// bulk string reply, nil +// simple string []byte(reply), nil +// nil nil, ErrNil +// other nil, error +func Bytes(reply interface{}, err error) ([]byte, error) { + if err != nil { + return nil, err + } + switch reply := reply.(type) { + case []byte: + return reply, nil + case string: + return []byte(reply), nil + case nil: + return nil, ErrNil + case Error: + return nil, reply + } + return nil, fmt.Errorf("redigo: unexpected type for Bytes, got type %T", reply) +} + +// Bool is a helper that converts a command reply to a boolean. If err is not +// equal to nil, then Bool returns false, err. Otherwise Bool converts the +// reply to boolean as follows: +// +// Reply type Result +// integer value != 0, nil +// bulk string strconv.ParseBool(reply) +// nil false, ErrNil +// other false, error +func Bool(reply interface{}, err error) (bool, error) { + if err != nil { + return false, err + } + switch reply := reply.(type) { + case int64: + return reply != 0, nil + case []byte: + return strconv.ParseBool(string(reply)) + case nil: + return false, ErrNil + case Error: + return false, reply + } + return false, fmt.Errorf("redigo: unexpected type for Bool, got type %T", reply) +} + +// MultiBulk is a helper that converts an array command reply to a []interface{}. +// +// Deprecated: Use Values instead. +func MultiBulk(reply interface{}, err error) ([]interface{}, error) { return Values(reply, err) } + +// Values is a helper that converts an array command reply to a []interface{}. +// If err is not equal to nil, then Values returns nil, err. Otherwise, Values +// converts the reply as follows: +// +// Reply type Result +// array reply, nil +// nil nil, ErrNil +// other nil, error +func Values(reply interface{}, err error) ([]interface{}, error) { + if err != nil { + return nil, err + } + switch reply := reply.(type) { + case []interface{}: + return reply, nil + case nil: + return nil, ErrNil + case Error: + return nil, reply + } + return nil, fmt.Errorf("redigo: unexpected type for Values, got type %T", reply) +} + +// Strings is a helper that converts an array command reply to a []string. If +// err is not equal to nil, then Strings returns nil, err. Nil array items are +// converted to "" in the output slice. Strings returns an error if an array +// item is not a bulk string or nil. +func Strings(reply interface{}, err error) ([]string, error) { + if err != nil { + return nil, err + } + switch reply := reply.(type) { + case []interface{}: + result := make([]string, len(reply)) + for i := range reply { + if reply[i] == nil { + continue + } + p, ok := reply[i].([]byte) + if !ok { + return nil, fmt.Errorf("redigo: unexpected element type for Strings, got type %T", reply[i]) + } + result[i] = string(p) + } + return result, nil + case nil: + return nil, ErrNil + case Error: + return nil, reply + } + return nil, fmt.Errorf("redigo: unexpected type for Strings, got type %T", reply) +} + +// ByteSlices is a helper that converts an array command reply to a [][]byte. +// If err is not equal to nil, then ByteSlices returns nil, err. Nil array +// items are stay nil. ByteSlices returns an error if an array item is not a +// bulk string or nil. +func ByteSlices(reply interface{}, err error) ([][]byte, error) { + if err != nil { + return nil, err + } + switch reply := reply.(type) { + case []interface{}: + result := make([][]byte, len(reply)) + for i := range reply { + if reply[i] == nil { + continue + } + p, ok := reply[i].([]byte) + if !ok { + return nil, fmt.Errorf("redigo: unexpected element type for ByteSlices, got type %T", reply[i]) + } + result[i] = p + } + return result, nil + case nil: + return nil, ErrNil + case Error: + return nil, reply + } + return nil, fmt.Errorf("redigo: unexpected type for ByteSlices, got type %T", reply) +} + +// Ints is a helper that converts an array command reply to a []int. If +// err is not equal to nil, then Ints returns nil, err. +func Ints(reply interface{}, err error) ([]int, error) { + var ints []int + values, err := Values(reply, err) + if err != nil { + return ints, err + } + if err := ScanSlice(values, &ints); err != nil { + return ints, err + } + return ints, nil +} + +// StringMap is a helper that converts an array of strings (alternating key, value) +// into a map[string]string. The HGETALL and CONFIG GET commands return replies in this format. +// Requires an even number of values in result. +func StringMap(result interface{}, err error) (map[string]string, error) { + values, err := Values(result, err) + if err != nil { + return nil, err + } + if len(values)%2 != 0 { + return nil, errors.New("redigo: StringMap expects even number of values result") + } + m := make(map[string]string, len(values)/2) + for i := 0; i < len(values); i += 2 { + key, okKey := values[i].([]byte) + value, okValue := values[i+1].([]byte) + if !okKey || !okValue { + return nil, errors.New("redigo: ScanMap key not a bulk string value") + } + m[string(key)] = string(value) + } + return m, nil +} + +// IntMap is a helper that converts an array of strings (alternating key, value) +// into a map[string]int. The HGETALL commands return replies in this format. +// Requires an even number of values in result. +func IntMap(result interface{}, err error) (map[string]int, error) { + values, err := Values(result, err) + if err != nil { + return nil, err + } + if len(values)%2 != 0 { + return nil, errors.New("redigo: IntMap expects even number of values result") + } + m := make(map[string]int, len(values)/2) + for i := 0; i < len(values); i += 2 { + key, ok := values[i].([]byte) + if !ok { + return nil, errors.New("redigo: ScanMap key not a bulk string value") + } + value, err := Int(values[i+1], nil) + if err != nil { + return nil, err + } + m[string(key)] = value + } + return m, nil +} + +// Int64Map is a helper that converts an array of strings (alternating key, value) +// into a map[string]int64. The HGETALL commands return replies in this format. +// Requires an even number of values in result. +func Int64Map(result interface{}, err error) (map[string]int64, error) { + values, err := Values(result, err) + if err != nil { + return nil, err + } + if len(values)%2 != 0 { + return nil, errors.New("redigo: Int64Map expects even number of values result") + } + m := make(map[string]int64, len(values)/2) + for i := 0; i < len(values); i += 2 { + key, ok := values[i].([]byte) + if !ok { + return nil, errors.New("redigo: ScanMap key not a bulk string value") + } + value, err := Int64(values[i+1], nil) + if err != nil { + return nil, err + } + m[string(key)] = value + } + return m, nil +} diff --git a/transfersh-server/vendor/github.com/garyburd/redigo/redis/scan.go b/transfersh-server/vendor/github.com/garyburd/redigo/redis/scan.go new file mode 100644 index 0000000..962e94b --- /dev/null +++ b/transfersh-server/vendor/github.com/garyburd/redigo/redis/scan.go @@ -0,0 +1,555 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "sync" +) + +func ensureLen(d reflect.Value, n int) { + if n > d.Cap() { + d.Set(reflect.MakeSlice(d.Type(), n, n)) + } else { + d.SetLen(n) + } +} + +func cannotConvert(d reflect.Value, s interface{}) error { + var sname string + switch s.(type) { + case string: + sname = "Redis simple string" + case Error: + sname = "Redis error" + case int64: + sname = "Redis integer" + case []byte: + sname = "Redis bulk string" + case []interface{}: + sname = "Redis array" + default: + sname = reflect.TypeOf(s).String() + } + return fmt.Errorf("cannot convert from %s to %s", sname, d.Type()) +} + +func convertAssignBulkString(d reflect.Value, s []byte) (err error) { + switch d.Type().Kind() { + case reflect.Float32, reflect.Float64: + var x float64 + x, err = strconv.ParseFloat(string(s), d.Type().Bits()) + d.SetFloat(x) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + var x int64 + x, err = strconv.ParseInt(string(s), 10, d.Type().Bits()) + d.SetInt(x) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + var x uint64 + x, err = strconv.ParseUint(string(s), 10, d.Type().Bits()) + d.SetUint(x) + case reflect.Bool: + var x bool + x, err = strconv.ParseBool(string(s)) + d.SetBool(x) + case reflect.String: + d.SetString(string(s)) + case reflect.Slice: + if d.Type().Elem().Kind() != reflect.Uint8 { + err = cannotConvert(d, s) + } else { + d.SetBytes(s) + } + default: + err = cannotConvert(d, s) + } + return +} + +func convertAssignInt(d reflect.Value, s int64) (err error) { + switch d.Type().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + d.SetInt(s) + if d.Int() != s { + err = strconv.ErrRange + d.SetInt(0) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if s < 0 { + err = strconv.ErrRange + } else { + x := uint64(s) + d.SetUint(x) + if d.Uint() != x { + err = strconv.ErrRange + d.SetUint(0) + } + } + case reflect.Bool: + d.SetBool(s != 0) + default: + err = cannotConvert(d, s) + } + return +} + +func convertAssignValue(d reflect.Value, s interface{}) (err error) { + switch s := s.(type) { + case []byte: + err = convertAssignBulkString(d, s) + case int64: + err = convertAssignInt(d, s) + default: + err = cannotConvert(d, s) + } + return err +} + +func convertAssignArray(d reflect.Value, s []interface{}) error { + if d.Type().Kind() != reflect.Slice { + return cannotConvert(d, s) + } + ensureLen(d, len(s)) + for i := 0; i < len(s); i++ { + if err := convertAssignValue(d.Index(i), s[i]); err != nil { + return err + } + } + return nil +} + +func convertAssign(d interface{}, s interface{}) (err error) { + // Handle the most common destination types using type switches and + // fall back to reflection for all other types. + switch s := s.(type) { + case nil: + // ingore + case []byte: + switch d := d.(type) { + case *string: + *d = string(s) + case *int: + *d, err = strconv.Atoi(string(s)) + case *bool: + *d, err = strconv.ParseBool(string(s)) + case *[]byte: + *d = s + case *interface{}: + *d = s + case nil: + // skip value + default: + if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { + err = cannotConvert(d, s) + } else { + err = convertAssignBulkString(d.Elem(), s) + } + } + case int64: + switch d := d.(type) { + case *int: + x := int(s) + if int64(x) != s { + err = strconv.ErrRange + x = 0 + } + *d = x + case *bool: + *d = s != 0 + case *interface{}: + *d = s + case nil: + // skip value + default: + if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { + err = cannotConvert(d, s) + } else { + err = convertAssignInt(d.Elem(), s) + } + } + case string: + switch d := d.(type) { + case *string: + *d = string(s) + default: + err = cannotConvert(reflect.ValueOf(d), s) + } + case []interface{}: + switch d := d.(type) { + case *[]interface{}: + *d = s + case *interface{}: + *d = s + case nil: + // skip value + default: + if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { + err = cannotConvert(d, s) + } else { + err = convertAssignArray(d.Elem(), s) + } + } + case Error: + err = s + default: + err = cannotConvert(reflect.ValueOf(d), s) + } + return +} + +// Scan copies from src to the values pointed at by dest. +// +// The values pointed at by dest must be an integer, float, boolean, string, +// []byte, interface{} or slices of these types. Scan uses the standard strconv +// package to convert bulk strings to numeric and boolean types. +// +// If a dest value is nil, then the corresponding src value is skipped. +// +// If a src element is nil, then the corresponding dest value is not modified. +// +// To enable easy use of Scan in a loop, Scan returns the slice of src +// following the copied values. +func Scan(src []interface{}, dest ...interface{}) ([]interface{}, error) { + if len(src) < len(dest) { + return nil, errors.New("redigo.Scan: array short") + } + var err error + for i, d := range dest { + err = convertAssign(d, src[i]) + if err != nil { + err = fmt.Errorf("redigo.Scan: cannot assign to dest %d: %v", i, err) + break + } + } + return src[len(dest):], err +} + +type fieldSpec struct { + name string + index []int + omitEmpty bool +} + +type structSpec struct { + m map[string]*fieldSpec + l []*fieldSpec +} + +func (ss *structSpec) fieldSpec(name []byte) *fieldSpec { + return ss.m[string(name)] +} + +func compileStructSpec(t reflect.Type, depth map[string]int, index []int, ss *structSpec) { + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + switch { + case f.PkgPath != "" && !f.Anonymous: + // Ignore unexported fields. + case f.Anonymous: + // TODO: Handle pointers. Requires change to decoder and + // protection against infinite recursion. + if f.Type.Kind() == reflect.Struct { + compileStructSpec(f.Type, depth, append(index, i), ss) + } + default: + fs := &fieldSpec{name: f.Name} + tag := f.Tag.Get("redis") + p := strings.Split(tag, ",") + if len(p) > 0 { + if p[0] == "-" { + continue + } + if len(p[0]) > 0 { + fs.name = p[0] + } + for _, s := range p[1:] { + switch s { + case "omitempty": + fs.omitEmpty = true + default: + panic(fmt.Errorf("redigo: unknown field tag %s for type %s", s, t.Name())) + } + } + } + d, found := depth[fs.name] + if !found { + d = 1 << 30 + } + switch { + case len(index) == d: + // At same depth, remove from result. + delete(ss.m, fs.name) + j := 0 + for i := 0; i < len(ss.l); i++ { + if fs.name != ss.l[i].name { + ss.l[j] = ss.l[i] + j += 1 + } + } + ss.l = ss.l[:j] + case len(index) < d: + fs.index = make([]int, len(index)+1) + copy(fs.index, index) + fs.index[len(index)] = i + depth[fs.name] = len(index) + ss.m[fs.name] = fs + ss.l = append(ss.l, fs) + } + } + } +} + +var ( + structSpecMutex sync.RWMutex + structSpecCache = make(map[reflect.Type]*structSpec) + defaultFieldSpec = &fieldSpec{} +) + +func structSpecForType(t reflect.Type) *structSpec { + + structSpecMutex.RLock() + ss, found := structSpecCache[t] + structSpecMutex.RUnlock() + if found { + return ss + } + + structSpecMutex.Lock() + defer structSpecMutex.Unlock() + ss, found = structSpecCache[t] + if found { + return ss + } + + ss = &structSpec{m: make(map[string]*fieldSpec)} + compileStructSpec(t, make(map[string]int), nil, ss) + structSpecCache[t] = ss + return ss +} + +var errScanStructValue = errors.New("redigo.ScanStruct: value must be non-nil pointer to a struct") + +// ScanStruct scans alternating names and values from src to a struct. The +// HGETALL and CONFIG GET commands return replies in this format. +// +// ScanStruct uses exported field names to match values in the response. Use +// 'redis' field tag to override the name: +// +// Field int `redis:"myName"` +// +// Fields with the tag redis:"-" are ignored. +// +// Integer, float, boolean, string and []byte fields are supported. Scan uses the +// standard strconv package to convert bulk string values to numeric and +// boolean types. +// +// If a src element is nil, then the corresponding field is not modified. +func ScanStruct(src []interface{}, dest interface{}) error { + d := reflect.ValueOf(dest) + if d.Kind() != reflect.Ptr || d.IsNil() { + return errScanStructValue + } + d = d.Elem() + if d.Kind() != reflect.Struct { + return errScanStructValue + } + ss := structSpecForType(d.Type()) + + if len(src)%2 != 0 { + return errors.New("redigo.ScanStruct: number of values not a multiple of 2") + } + + for i := 0; i < len(src); i += 2 { + s := src[i+1] + if s == nil { + continue + } + name, ok := src[i].([]byte) + if !ok { + return fmt.Errorf("redigo.ScanStruct: key %d not a bulk string value", i) + } + fs := ss.fieldSpec(name) + if fs == nil { + continue + } + if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { + return fmt.Errorf("redigo.ScanStruct: cannot assign field %s: %v", fs.name, err) + } + } + return nil +} + +var ( + errScanSliceValue = errors.New("redigo.ScanSlice: dest must be non-nil pointer to a struct") +) + +// ScanSlice scans src to the slice pointed to by dest. The elements the dest +// slice must be integer, float, boolean, string, struct or pointer to struct +// values. +// +// Struct fields must be integer, float, boolean or string values. All struct +// fields are used unless a subset is specified using fieldNames. +func ScanSlice(src []interface{}, dest interface{}, fieldNames ...string) error { + d := reflect.ValueOf(dest) + if d.Kind() != reflect.Ptr || d.IsNil() { + return errScanSliceValue + } + d = d.Elem() + if d.Kind() != reflect.Slice { + return errScanSliceValue + } + + isPtr := false + t := d.Type().Elem() + if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { + isPtr = true + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + ensureLen(d, len(src)) + for i, s := range src { + if s == nil { + continue + } + if err := convertAssignValue(d.Index(i), s); err != nil { + return fmt.Errorf("redigo.ScanSlice: cannot assign element %d: %v", i, err) + } + } + return nil + } + + ss := structSpecForType(t) + fss := ss.l + if len(fieldNames) > 0 { + fss = make([]*fieldSpec, len(fieldNames)) + for i, name := range fieldNames { + fss[i] = ss.m[name] + if fss[i] == nil { + return fmt.Errorf("redigo.ScanSlice: ScanSlice bad field name %s", name) + } + } + } + + if len(fss) == 0 { + return errors.New("redigo.ScanSlice: no struct fields") + } + + n := len(src) / len(fss) + if n*len(fss) != len(src) { + return errors.New("redigo.ScanSlice: length not a multiple of struct field count") + } + + ensureLen(d, n) + for i := 0; i < n; i++ { + d := d.Index(i) + if isPtr { + if d.IsNil() { + d.Set(reflect.New(t)) + } + d = d.Elem() + } + for j, fs := range fss { + s := src[i*len(fss)+j] + if s == nil { + continue + } + if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { + return fmt.Errorf("redigo.ScanSlice: cannot assign element %d to field %s: %v", i*len(fss)+j, fs.name, err) + } + } + } + return nil +} + +// Args is a helper for constructing command arguments from structured values. +type Args []interface{} + +// Add returns the result of appending value to args. +func (args Args) Add(value ...interface{}) Args { + return append(args, value...) +} + +// AddFlat returns the result of appending the flattened value of v to args. +// +// Maps are flattened by appending the alternating keys and map values to args. +// +// Slices are flattened by appending the slice elements to args. +// +// Structs are flattened by appending the alternating names and values of +// exported fields to args. If v is a nil struct pointer, then nothing is +// appended. The 'redis' field tag overrides struct field names. See ScanStruct +// for more information on the use of the 'redis' field tag. +// +// Other types are appended to args as is. +func (args Args) AddFlat(v interface{}) Args { + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Struct: + args = flattenStruct(args, rv) + case reflect.Slice: + for i := 0; i < rv.Len(); i++ { + args = append(args, rv.Index(i).Interface()) + } + case reflect.Map: + for _, k := range rv.MapKeys() { + args = append(args, k.Interface(), rv.MapIndex(k).Interface()) + } + case reflect.Ptr: + if rv.Type().Elem().Kind() == reflect.Struct { + if !rv.IsNil() { + args = flattenStruct(args, rv.Elem()) + } + } else { + args = append(args, v) + } + default: + args = append(args, v) + } + return args +} + +func flattenStruct(args Args, v reflect.Value) Args { + ss := structSpecForType(v.Type()) + for _, fs := range ss.l { + fv := v.FieldByIndex(fs.index) + if fs.omitEmpty { + var empty = false + switch fv.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + empty = fv.Len() == 0 + case reflect.Bool: + empty = !fv.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + empty = fv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + empty = fv.Uint() == 0 + case reflect.Float32, reflect.Float64: + empty = fv.Float() == 0 + case reflect.Interface, reflect.Ptr: + empty = fv.IsNil() + } + if empty { + continue + } + } + args = append(args, fs.name, fv.Interface()) + } + return args +} diff --git a/transfersh-server/vendor/github.com/garyburd/redigo/redis/script.go b/transfersh-server/vendor/github.com/garyburd/redigo/redis/script.go new file mode 100644 index 0000000..78605a9 --- /dev/null +++ b/transfersh-server/vendor/github.com/garyburd/redigo/redis/script.go @@ -0,0 +1,86 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "crypto/sha1" + "encoding/hex" + "io" + "strings" +) + +// Script encapsulates the source, hash and key count for a Lua script. See +// http://redis.io/commands/eval for information on scripts in Redis. +type Script struct { + keyCount int + src string + hash string +} + +// NewScript returns a new script object. If keyCount is greater than or equal +// to zero, then the count is automatically inserted in the EVAL command +// argument list. If keyCount is less than zero, then the application supplies +// the count as the first value in the keysAndArgs argument to the Do, Send and +// SendHash methods. +func NewScript(keyCount int, src string) *Script { + h := sha1.New() + io.WriteString(h, src) + return &Script{keyCount, src, hex.EncodeToString(h.Sum(nil))} +} + +func (s *Script) args(spec string, keysAndArgs []interface{}) []interface{} { + var args []interface{} + if s.keyCount < 0 { + args = make([]interface{}, 1+len(keysAndArgs)) + args[0] = spec + copy(args[1:], keysAndArgs) + } else { + args = make([]interface{}, 2+len(keysAndArgs)) + args[0] = spec + args[1] = s.keyCount + copy(args[2:], keysAndArgs) + } + return args +} + +// Do evaluates the script. Under the covers, Do optimistically evaluates the +// script using the EVALSHA command. If the command fails because the script is +// not loaded, then Do evaluates the script using the EVAL command (thus +// causing the script to load). +func (s *Script) Do(c Conn, keysAndArgs ...interface{}) (interface{}, error) { + v, err := c.Do("EVALSHA", s.args(s.hash, keysAndArgs)...) + if e, ok := err.(Error); ok && strings.HasPrefix(string(e), "NOSCRIPT ") { + v, err = c.Do("EVAL", s.args(s.src, keysAndArgs)...) + } + return v, err +} + +// SendHash evaluates the script without waiting for the reply. The script is +// evaluated with the EVALSHA command. The application must ensure that the +// script is loaded by a previous call to Send, Do or Load methods. +func (s *Script) SendHash(c Conn, keysAndArgs ...interface{}) error { + return c.Send("EVALSHA", s.args(s.hash, keysAndArgs)...) +} + +// Send evaluates the script without waiting for the reply. +func (s *Script) Send(c Conn, keysAndArgs ...interface{}) error { + return c.Send("EVAL", s.args(s.src, keysAndArgs)...) +} + +// Load loads the script without evaluating it. +func (s *Script) Load(c Conn) error { + _, err := c.Do("SCRIPT", "LOAD", s.src) + return err +} diff --git a/transfersh-server/vendor/github.com/goamz/goamz/aws/LICENSE b/transfersh-server/vendor/github.com/goamz/goamz/aws/LICENSE new file mode 100644 index 0000000..53320c3 --- /dev/null +++ b/transfersh-server/vendor/github.com/goamz/goamz/aws/LICENSE @@ -0,0 +1,185 @@ +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/transfersh-server/vendor/github.com/goamz/goamz/aws/attempt.go b/transfersh-server/vendor/github.com/goamz/goamz/aws/attempt.go new file mode 100644 index 0000000..c0654f5 --- /dev/null +++ b/transfersh-server/vendor/github.com/goamz/goamz/aws/attempt.go @@ -0,0 +1,74 @@ +package aws + +import ( + "time" +) + +// AttemptStrategy represents a strategy for waiting for an action +// to complete successfully. This is an internal type used by the +// implementation of other goamz packages. +type AttemptStrategy struct { + Total time.Duration // total duration of attempt. + Delay time.Duration // interval between each try in the burst. + Min int // minimum number of retries; overrides Total +} + +type Attempt struct { + strategy AttemptStrategy + last time.Time + end time.Time + force bool + count int +} + +// Start begins a new sequence of attempts for the given strategy. +func (s AttemptStrategy) Start() *Attempt { + now := time.Now() + return &Attempt{ + strategy: s, + last: now, + end: now.Add(s.Total), + force: true, + } +} + +// Next waits until it is time to perform the next attempt or returns +// false if it is time to stop trying. +func (a *Attempt) Next() bool { + now := time.Now() + sleep := a.nextSleep(now) + if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count { + return false + } + a.force = false + if sleep > 0 && a.count > 0 { + time.Sleep(sleep) + now = time.Now() + } + a.count++ + a.last = now + return true +} + +func (a *Attempt) nextSleep(now time.Time) time.Duration { + sleep := a.strategy.Delay - now.Sub(a.last) + if sleep < 0 { + return 0 + } + return sleep +} + +// HasNext returns whether another attempt will be made if the current +// one fails. If it returns true, the following call to Next is +// guaranteed to return true. +func (a *Attempt) HasNext() bool { + if a.force || a.strategy.Min > a.count { + return true + } + now := time.Now() + if now.Add(a.nextSleep(now)).Before(a.end) { + a.force = true + return true + } + return false +} diff --git a/transfersh-server/vendor/github.com/goamz/goamz/aws/aws.go b/transfersh-server/vendor/github.com/goamz/goamz/aws/aws.go new file mode 100644 index 0000000..5703858 --- /dev/null +++ b/transfersh-server/vendor/github.com/goamz/goamz/aws/aws.go @@ -0,0 +1,433 @@ +// +// goamz - Go packages to interact with the Amazon Web Services. +// +// https://wiki.ubuntu.com/goamz +// +// Copyright (c) 2011 Canonical Ltd. +// +// Written by Gustavo Niemeyer +// +package aws + +import ( + "encoding/json" + "encoding/xml" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "time" + + "github.com/vaughan0/go-ini" +) + +// Defines the valid signers +const ( + V2Signature = iota + V4Signature = iota + Route53Signature = iota +) + +// Defines the service endpoint and correct Signer implementation to use +// to sign requests for this endpoint +type ServiceInfo struct { + Endpoint string + Signer uint +} + +// Region defines the URLs where AWS services may be accessed. +// +// See http://goo.gl/d8BP1 for more details. +type Region struct { + Name string // the canonical name of this region. + EC2Endpoint string + S3Endpoint string + S3BucketEndpoint string // Not needed by AWS S3. Use ${bucket} for bucket name. + S3LocationConstraint bool // true if this region requires a LocationConstraint declaration. + S3LowercaseBucket bool // true if the region requires bucket names to be lower case. + SDBEndpoint string + SESEndpoint string + SNSEndpoint string + SQSEndpoint string + IAMEndpoint string + ELBEndpoint string + DynamoDBEndpoint string + CloudWatchServicepoint ServiceInfo + AutoScalingEndpoint string + RDSEndpoint ServiceInfo + STSEndpoint string + CloudFormationEndpoint string + ECSEndpoint string + DynamoDBStreamsEndpoint string +} + +var Regions = map[string]Region{ + APNortheast.Name: APNortheast, + APSoutheast.Name: APSoutheast, + APSoutheast2.Name: APSoutheast2, + EUCentral.Name: EUCentral, + EUWest.Name: EUWest, + USEast.Name: USEast, + USWest.Name: USWest, + USWest2.Name: USWest2, + USGovWest.Name: USGovWest, + SAEast.Name: SAEast, + CNNorth.Name: CNNorth, +} + +// Designates a signer interface suitable for signing AWS requests, params +// should be appropriately encoded for the request before signing. +// +// A signer should be initialized with Auth and the appropriate endpoint. +type Signer interface { + Sign(method, path string, params map[string]string) +} + +// An AWS Service interface with the API to query the AWS service +// +// Supplied as an easy way to mock out service calls during testing. +type AWSService interface { + // Queries the AWS service at a given method/path with the params and + // returns an http.Response and error + Query(method, path string, params map[string]string) (*http.Response, error) + // Builds an error given an XML payload in the http.Response, can be used + // to process an error if the status code is not 200 for example. + BuildError(r *http.Response) error +} + +// Implements a Server Query/Post API to easily query AWS services and build +// errors when desired +type Service struct { + service ServiceInfo + signer Signer +} + +// Create a base set of params for an action +func MakeParams(action string) map[string]string { + params := make(map[string]string) + params["Action"] = action + return params +} + +// Create a new AWS server to handle making requests +func NewService(auth Auth, service ServiceInfo) (s *Service, err error) { + var signer Signer + switch service.Signer { + case V2Signature: + signer, err = NewV2Signer(auth, service) + // case V4Signature: + // signer, err = NewV4Signer(auth, service, Regions["eu-west-1"]) + default: + err = fmt.Errorf("Unsupported signer for service") + } + if err != nil { + return + } + s = &Service{service: service, signer: signer} + return +} + +func (s *Service) Query(method, path string, params map[string]string) (resp *http.Response, err error) { + params["Timestamp"] = time.Now().UTC().Format(time.RFC3339) + u, err := url.Parse(s.service.Endpoint) + if err != nil { + return nil, err + } + u.Path = path + + s.signer.Sign(method, path, params) + if method == "GET" { + u.RawQuery = multimap(params).Encode() + resp, err = http.Get(u.String()) + } else if method == "POST" { + resp, err = http.PostForm(u.String(), multimap(params)) + } + + return +} + +func (s *Service) BuildError(r *http.Response) error { + errors := ErrorResponse{} + xml.NewDecoder(r.Body).Decode(&errors) + var err Error + err = errors.Errors + err.RequestId = errors.RequestId + err.StatusCode = r.StatusCode + if err.Message == "" { + err.Message = r.Status + } + return &err +} + +type ErrorResponse struct { + Errors Error `xml:"Error"` + RequestId string // A unique ID for tracking the request +} + +type Error struct { + StatusCode int + Type string + Code string + Message string + RequestId string +} + +func (err *Error) Error() string { + return fmt.Sprintf("Type: %s, Code: %s, Message: %s", + err.Type, err.Code, err.Message, + ) +} + +type Auth struct { + AccessKey, SecretKey string + token string + expiration time.Time +} + +func (a *Auth) Token() string { + if a.token == "" { + return "" + } + if time.Since(a.expiration) >= -30*time.Second { //in an ideal world this should be zero assuming the instance is synching it's clock + *a, _ = GetAuth("", "", "", time.Time{}) + } + return a.token +} + +func (a *Auth) Expiration() time.Time { + return a.expiration +} + +// To be used with other APIs that return auth credentials such as STS +func NewAuth(accessKey, secretKey, token string, expiration time.Time) *Auth { + return &Auth{ + AccessKey: accessKey, + SecretKey: secretKey, + token: token, + expiration: expiration, + } +} + +// ResponseMetadata +type ResponseMetadata struct { + RequestId string // A unique ID for tracking the request +} + +type BaseResponse struct { + ResponseMetadata ResponseMetadata +} + +var unreserved = make([]bool, 128) +var hex = "0123456789ABCDEF" + +func init() { + // RFC3986 + u := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567890-_.~" + for _, c := range u { + unreserved[c] = true + } +} + +func multimap(p map[string]string) url.Values { + q := make(url.Values, len(p)) + for k, v := range p { + q[k] = []string{v} + } + return q +} + +type credentials struct { + Code string + LastUpdated string + Type string + AccessKeyId string + SecretAccessKey string + Token string + Expiration string +} + +// GetMetaData retrieves instance metadata about the current machine. +// +// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html for more details. +func GetMetaData(path string) (contents []byte, err error) { + url := "http://169.254.169.254/latest/meta-data/" + path + + resp, err := RetryingClient.Get(url) + if err != nil { + return + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + err = fmt.Errorf("Code %d returned for url %s", resp.StatusCode, url) + return + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + return []byte(body), err +} + +func getInstanceCredentials() (cred credentials, err error) { + credentialPath := "iam/security-credentials/" + + // Get the instance role + role, err := GetMetaData(credentialPath) + if err != nil { + return + } + + // Get the instance role credentials + credentialJSON, err := GetMetaData(credentialPath + string(role)) + if err != nil { + return + } + + err = json.Unmarshal([]byte(credentialJSON), &cred) + return +} + +// GetAuth creates an Auth based on either passed in credentials, +// environment information or instance based role credentials. +func GetAuth(accessKey string, secretKey, token string, expiration time.Time) (auth Auth, err error) { + // First try passed in credentials + if accessKey != "" && secretKey != "" { + return Auth{accessKey, secretKey, token, expiration}, nil + } + + // Next try to get auth from the shared credentials file + auth, err = SharedAuth() + if err == nil { + // Found auth, return + return + } + + // Next try to get auth from the environment + auth, err = EnvAuth() + if err == nil { + // Found auth, return + return + } + + // Next try getting auth from the instance role + cred, err := getInstanceCredentials() + if err == nil { + // Found auth, return + auth.AccessKey = cred.AccessKeyId + auth.SecretKey = cred.SecretAccessKey + auth.token = cred.Token + exptdate, err := time.Parse("2006-01-02T15:04:05Z", cred.Expiration) + if err != nil { + err = fmt.Errorf("Error Parseing expiration date: cred.Expiration :%s , error: %s \n", cred.Expiration, err) + } + auth.expiration = exptdate + return auth, err + } + err = errors.New("No valid AWS authentication found") + return auth, err +} + +// EnvAuth creates an Auth based on environment information. +// The AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment +// variables are used. +// AWS_SESSION_TOKEN is used if present. +func EnvAuth() (auth Auth, err error) { + auth.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID") + if auth.AccessKey == "" { + auth.AccessKey = os.Getenv("AWS_ACCESS_KEY") + } + + auth.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY") + if auth.SecretKey == "" { + auth.SecretKey = os.Getenv("AWS_SECRET_KEY") + } + if auth.AccessKey == "" { + err = errors.New("AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment") + } + if auth.SecretKey == "" { + err = errors.New("AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment") + } + + auth.token = os.Getenv("AWS_SESSION_TOKEN") + return +} + +// SharedAuth creates an Auth based on shared credentials stored in +// $HOME/.aws/credentials. The AWS_PROFILE environment variables is used to +// select the profile. +func SharedAuth() (auth Auth, err error) { + var profileName = os.Getenv("AWS_PROFILE") + + if profileName == "" { + profileName = "default" + } + + var credentialsFile = os.Getenv("AWS_CREDENTIAL_FILE") + if credentialsFile == "" { + var homeDir = os.Getenv("HOME") + if homeDir == "" { + err = errors.New("Could not get HOME") + return + } + credentialsFile = homeDir + "/.aws/credentials" + } + + file, err := ini.LoadFile(credentialsFile) + if err != nil { + err = errors.New("Couldn't parse AWS credentials file") + return + } + + var profile = file[profileName] + if profile == nil { + err = errors.New("Couldn't find profile in AWS credentials file") + return + } + + auth.AccessKey = profile["aws_access_key_id"] + auth.SecretKey = profile["aws_secret_access_key"] + auth.token = profile["aws_session_token"] + + if auth.AccessKey == "" { + err = errors.New("AWS_ACCESS_KEY_ID not found in environment in credentials file") + } + if auth.SecretKey == "" { + err = errors.New("AWS_SECRET_ACCESS_KEY not found in credentials file") + } + return +} + +// Encode takes a string and URI-encodes it in a way suitable +// to be used in AWS signatures. +func Encode(s string) string { + encode := false + for i := 0; i != len(s); i++ { + c := s[i] + if c > 127 || !unreserved[c] { + encode = true + break + } + } + if !encode { + return s + } + e := make([]byte, len(s)*3) + ei := 0 + for i := 0; i != len(s); i++ { + c := s[i] + if c > 127 || !unreserved[c] { + e[ei] = '%' + e[ei+1] = hex[c>>4] + e[ei+2] = hex[c&0xF] + ei += 3 + } else { + e[ei] = c + ei += 1 + } + } + return string(e[:ei]) +} diff --git a/transfersh-server/vendor/github.com/goamz/goamz/aws/client.go b/transfersh-server/vendor/github.com/goamz/goamz/aws/client.go new file mode 100644 index 0000000..86d2cce --- /dev/null +++ b/transfersh-server/vendor/github.com/goamz/goamz/aws/client.go @@ -0,0 +1,124 @@ +package aws + +import ( + "math" + "net" + "net/http" + "time" +) + +type RetryableFunc func(*http.Request, *http.Response, error) bool +type WaitFunc func(try int) +type DeadlineFunc func() time.Time + +type ResilientTransport struct { + // Timeout is the maximum amount of time a dial will wait for + // a connect to complete. + // + // The default is no timeout. + // + // With or without a timeout, the operating system may impose + // its own earlier timeout. For instance, TCP timeouts are + // often around 3 minutes. + DialTimeout time.Duration + + // MaxTries, if non-zero, specifies the number of times we will retry on + // failure. Retries are only attempted for temporary network errors or known + // safe failures. + MaxTries int + Deadline DeadlineFunc + ShouldRetry RetryableFunc + Wait WaitFunc + transport *http.Transport +} + +// Convenience method for creating an http client +func NewClient(rt *ResilientTransport) *http.Client { + rt.transport = &http.Transport{ + Dial: func(netw, addr string) (net.Conn, error) { + c, err := net.DialTimeout(netw, addr, rt.DialTimeout) + if err != nil { + return nil, err + } + c.SetDeadline(rt.Deadline()) + return c, nil + }, + Proxy: http.ProxyFromEnvironment, + } + // TODO: Would be nice is ResilientTransport allowed clients to initialize + // with http.Transport attributes. + return &http.Client{ + Transport: rt, + } +} + +var retryingTransport = &ResilientTransport{ + Deadline: func() time.Time { + return time.Now().Add(5 * time.Second) + }, + DialTimeout: 10 * time.Second, + MaxTries: 3, + ShouldRetry: awsRetry, + Wait: ExpBackoff, +} + +// Exported default client +var RetryingClient = NewClient(retryingTransport) + +func (t *ResilientTransport) RoundTrip(req *http.Request) (*http.Response, error) { + return t.tries(req) +} + +// Retry a request a maximum of t.MaxTries times. +// We'll only retry if the proper criteria are met. +// If a wait function is specified, wait that amount of time +// In between requests. +func (t *ResilientTransport) tries(req *http.Request) (res *http.Response, err error) { + for try := 0; try < t.MaxTries; try += 1 { + res, err = t.transport.RoundTrip(req) + + if !t.ShouldRetry(req, res, err) { + break + } + if res != nil { + res.Body.Close() + } + if t.Wait != nil { + t.Wait(try) + } + } + + return +} + +func ExpBackoff(try int) { + time.Sleep(100 * time.Millisecond * + time.Duration(math.Exp2(float64(try)))) +} + +func LinearBackoff(try int) { + time.Sleep(time.Duration(try*100) * time.Millisecond) +} + +// Decide if we should retry a request. +// In general, the criteria for retrying a request is described here +// http://docs.aws.amazon.com/general/latest/gr/api-retries.html +func awsRetry(req *http.Request, res *http.Response, err error) bool { + retry := false + + // Retry if there's a temporary network error. + if neterr, ok := err.(net.Error); ok { + if neterr.Temporary() { + retry = true + } + } + + // Retry if we get a 5xx series error. + if res != nil { + if res.StatusCode >= 500 && res.StatusCode < 600 { + retry = true + } + } + + return retry +} diff --git a/transfersh-server/vendor/github.com/goamz/goamz/aws/regions.go b/transfersh-server/vendor/github.com/goamz/goamz/aws/regions.go new file mode 100644 index 0000000..5e18f02 --- /dev/null +++ b/transfersh-server/vendor/github.com/goamz/goamz/aws/regions.go @@ -0,0 +1,254 @@ +package aws + +var USGovWest = Region{ + "us-gov-west-1", + "https://ec2.us-gov-west-1.amazonaws.com", + "https://s3-fips-us-gov-west-1.amazonaws.com", + "", + true, + true, + "", + "", + "https://sns.us-gov-west-1.amazonaws.com", + "https://sqs.us-gov-west-1.amazonaws.com", + "https://iam.us-gov.amazonaws.com", + "https://elasticloadbalancing.us-gov-west-1.amazonaws.com", + "https://dynamodb.us-gov-west-1.amazonaws.com", + ServiceInfo{"https://monitoring.us-gov-west-1.amazonaws.com", V2Signature}, + "https://autoscaling.us-gov-west-1.amazonaws.com", + ServiceInfo{"https://rds.us-gov-west-1.amazonaws.com", V2Signature}, + "https://sts.amazonaws.com", + "https://cloudformation.us-gov-west-1.amazonaws.com", + "https://ecs.us-gov-west-1.amazonaws.com", + "https://streams.dynamodb.us-gov-west-1.amazonaws.com", +} + +var USEast = Region{ + "us-east-1", + "https://ec2.us-east-1.amazonaws.com", + "https://s3.amazonaws.com", + "", + false, + false, + "https://sdb.amazonaws.com", + "https://email.us-east-1.amazonaws.com", + "https://sns.us-east-1.amazonaws.com", + "https://sqs.us-east-1.amazonaws.com", + "https://iam.amazonaws.com", + "https://elasticloadbalancing.us-east-1.amazonaws.com", + "https://dynamodb.us-east-1.amazonaws.com", + ServiceInfo{"https://monitoring.us-east-1.amazonaws.com", V2Signature}, + "https://autoscaling.us-east-1.amazonaws.com", + ServiceInfo{"https://rds.us-east-1.amazonaws.com", V2Signature}, + "https://sts.amazonaws.com", + "https://cloudformation.us-east-1.amazonaws.com", + "https://ecs.us-east-1.amazonaws.com", + "https://streams.dynamodb.us-east-1.amazonaws.com", +} + +var USWest = Region{ + "us-west-1", + "https://ec2.us-west-1.amazonaws.com", + "https://s3-us-west-1.amazonaws.com", + "", + true, + true, + "https://sdb.us-west-1.amazonaws.com", + "", + "https://sns.us-west-1.amazonaws.com", + "https://sqs.us-west-1.amazonaws.com", + "https://iam.amazonaws.com", + "https://elasticloadbalancing.us-west-1.amazonaws.com", + "https://dynamodb.us-west-1.amazonaws.com", + ServiceInfo{"https://monitoring.us-west-1.amazonaws.com", V2Signature}, + "https://autoscaling.us-west-1.amazonaws.com", + ServiceInfo{"https://rds.us-west-1.amazonaws.com", V2Signature}, + "https://sts.amazonaws.com", + "https://cloudformation.us-west-1.amazonaws.com", + "https://ecs.us-west-1.amazonaws.com", + "https://streams.dynamodb.us-west-1.amazonaws.com", +} + +var USWest2 = Region{ + "us-west-2", + "https://ec2.us-west-2.amazonaws.com", + "https://s3-us-west-2.amazonaws.com", + "", + true, + true, + "https://sdb.us-west-2.amazonaws.com", + "https://email.us-west-2.amazonaws.com", + "https://sns.us-west-2.amazonaws.com", + "https://sqs.us-west-2.amazonaws.com", + "https://iam.amazonaws.com", + "https://elasticloadbalancing.us-west-2.amazonaws.com", + "https://dynamodb.us-west-2.amazonaws.com", + ServiceInfo{"https://monitoring.us-west-2.amazonaws.com", V2Signature}, + "https://autoscaling.us-west-2.amazonaws.com", + ServiceInfo{"https://rds.us-west-2.amazonaws.com", V2Signature}, + "https://sts.amazonaws.com", + "https://cloudformation.us-west-2.amazonaws.com", + "https://ecs.us-west-2.amazonaws.com", + "https://streams.dynamodb.us-west-2.amazonaws.com", +} + +var EUWest = Region{ + "eu-west-1", + "https://ec2.eu-west-1.amazonaws.com", + "https://s3-eu-west-1.amazonaws.com", + "", + true, + true, + "https://sdb.eu-west-1.amazonaws.com", + "https://email.eu-west-1.amazonaws.com", + "https://sns.eu-west-1.amazonaws.com", + "https://sqs.eu-west-1.amazonaws.com", + "https://iam.amazonaws.com", + "https://elasticloadbalancing.eu-west-1.amazonaws.com", + "https://dynamodb.eu-west-1.amazonaws.com", + ServiceInfo{"https://monitoring.eu-west-1.amazonaws.com", V2Signature}, + "https://autoscaling.eu-west-1.amazonaws.com", + ServiceInfo{"https://rds.eu-west-1.amazonaws.com", V2Signature}, + "https://sts.amazonaws.com", + "https://cloudformation.eu-west-1.amazonaws.com", + "https://ecs.eu-west-1.amazonaws.com", + "https://streams.dynamodb.eu-west-1.amazonaws.com", +} + +var EUCentral = Region{ + "eu-central-1", + "https://ec2.eu-central-1.amazonaws.com", + "https://s3-eu-central-1.amazonaws.com", + "", + true, + true, + "https://sdb.eu-central-1.amazonaws.com", + "https://email.eu-central-1.amazonaws.com", + "https://sns.eu-central-1.amazonaws.com", + "https://sqs.eu-central-1.amazonaws.com", + "https://iam.amazonaws.com", + "https://elasticloadbalancing.eu-central-1.amazonaws.com", + "https://dynamodb.eu-central-1.amazonaws.com", + ServiceInfo{"https://monitoring.eu-central-1.amazonaws.com", V2Signature}, + "https://autoscaling.eu-central-1.amazonaws.com", + ServiceInfo{"https://rds.eu-central-1.amazonaws.com", V2Signature}, + "https://sts.amazonaws.com", + "https://cloudformation.eu-central-1.amazonaws.com", + "https://ecs.eu-central-1.amazonaws.com", + "https://streams.dynamodb.eu-central-1.amazonaws.com", +} + +var APSoutheast = Region{ + "ap-southeast-1", + "https://ec2.ap-southeast-1.amazonaws.com", + "https://s3-ap-southeast-1.amazonaws.com", + "", + true, + true, + "https://sdb.ap-southeast-1.amazonaws.com", + "", + "https://sns.ap-southeast-1.amazonaws.com", + "https://sqs.ap-southeast-1.amazonaws.com", + "https://iam.amazonaws.com", + "https://elasticloadbalancing.ap-southeast-1.amazonaws.com", + "https://dynamodb.ap-southeast-1.amazonaws.com", + ServiceInfo{"https://monitoring.ap-southeast-1.amazonaws.com", V2Signature}, + "https://autoscaling.ap-southeast-1.amazonaws.com", + ServiceInfo{"https://rds.ap-southeast-1.amazonaws.com", V2Signature}, + "https://sts.amazonaws.com", + "https://cloudformation.ap-southeast-1.amazonaws.com", + "https://ecs.ap-southeast-1.amazonaws.com", + "https://streams.dynamodb.ap-southeast-1.amazonaws.com", +} + +var APSoutheast2 = Region{ + "ap-southeast-2", + "https://ec2.ap-southeast-2.amazonaws.com", + "https://s3-ap-southeast-2.amazonaws.com", + "", + true, + true, + "https://sdb.ap-southeast-2.amazonaws.com", + "", + "https://sns.ap-southeast-2.amazonaws.com", + "https://sqs.ap-southeast-2.amazonaws.com", + "https://iam.amazonaws.com", + "https://elasticloadbalancing.ap-southeast-2.amazonaws.com", + "https://dynamodb.ap-southeast-2.amazonaws.com", + ServiceInfo{"https://monitoring.ap-southeast-2.amazonaws.com", V2Signature}, + "https://autoscaling.ap-southeast-2.amazonaws.com", + ServiceInfo{"https://rds.ap-southeast-2.amazonaws.com", V2Signature}, + "https://sts.amazonaws.com", + "https://cloudformation.ap-southeast-2.amazonaws.com", + "https://ecs.ap-southeast-2.amazonaws.com", + "https://streams.dynamodb.ap-southeast-2.amazonaws.com", +} + +var APNortheast = Region{ + "ap-northeast-1", + "https://ec2.ap-northeast-1.amazonaws.com", + "https://s3-ap-northeast-1.amazonaws.com", + "", + true, + true, + "https://sdb.ap-northeast-1.amazonaws.com", + "", + "https://sns.ap-northeast-1.amazonaws.com", + "https://sqs.ap-northeast-1.amazonaws.com", + "https://iam.amazonaws.com", + "https://elasticloadbalancing.ap-northeast-1.amazonaws.com", + "https://dynamodb.ap-northeast-1.amazonaws.com", + ServiceInfo{"https://monitoring.ap-northeast-1.amazonaws.com", V2Signature}, + "https://autoscaling.ap-northeast-1.amazonaws.com", + ServiceInfo{"https://rds.ap-northeast-1.amazonaws.com", V2Signature}, + "https://sts.amazonaws.com", + "https://cloudformation.ap-northeast-1.amazonaws.com", + "https://ecs.ap-northeast-1.amazonaws.com", + "https://streams.dynamodb.ap-northeast-1.amazonaws.com", +} + +var SAEast = Region{ + "sa-east-1", + "https://ec2.sa-east-1.amazonaws.com", + "https://s3-sa-east-1.amazonaws.com", + "", + true, + true, + "https://sdb.sa-east-1.amazonaws.com", + "", + "https://sns.sa-east-1.amazonaws.com", + "https://sqs.sa-east-1.amazonaws.com", + "https://iam.amazonaws.com", + "https://elasticloadbalancing.sa-east-1.amazonaws.com", + "https://dynamodb.sa-east-1.amazonaws.com", + ServiceInfo{"https://monitoring.sa-east-1.amazonaws.com", V2Signature}, + "https://autoscaling.sa-east-1.amazonaws.com", + ServiceInfo{"https://rds.sa-east-1.amazonaws.com", V2Signature}, + "https://sts.amazonaws.com", + "https://cloudformation.sa-east-1.amazonaws.com", + "https://ecs.sa-east-1.amazonaws.com", + "https://streams.dynamodb.sa-east-1.amazonaws.com", +} + +var CNNorth = Region{ + "cn-north-1", + "https://ec2.cn-north-1.amazonaws.com.cn", + "https://s3.cn-north-1.amazonaws.com.cn", + "", + true, + true, + "https://sdb.cn-north-1.amazonaws.com.cn", + "", + "https://sns.cn-north-1.amazonaws.com.cn", + "https://sqs.cn-north-1.amazonaws.com.cn", + "https://iam.cn-north-1.amazonaws.com.cn", + "https://elasticloadbalancing.cn-north-1.amazonaws.com.cn", + "https://dynamodb.cn-north-1.amazonaws.com.cn", + ServiceInfo{"https://monitoring.cn-north-1.amazonaws.com.cn", V4Signature}, + "https://autoscaling.cn-north-1.amazonaws.com.cn", + ServiceInfo{"https://rds.cn-north-1.amazonaws.com.cn", V4Signature}, + "https://sts.cn-north-1.amazonaws.com.cn", + "https://cloudformation.cn-north-1.amazonaws.com.cn", + "https://ecs.cn-north-1.amazonaws.com.cn", + "https://streams.dynamodb.cn-north-1.amazonaws.com.cn", +} diff --git a/transfersh-server/vendor/github.com/goamz/goamz/aws/sign.go b/transfersh-server/vendor/github.com/goamz/goamz/aws/sign.go new file mode 100644 index 0000000..22ce078 --- /dev/null +++ b/transfersh-server/vendor/github.com/goamz/goamz/aws/sign.go @@ -0,0 +1,357 @@ +package aws + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "path" + "sort" + "strings" + "time" +) + +type V2Signer struct { + auth Auth + service ServiceInfo + host string +} + +var b64 = base64.StdEncoding + +func NewV2Signer(auth Auth, service ServiceInfo) (*V2Signer, error) { + u, err := url.Parse(service.Endpoint) + if err != nil { + return nil, err + } + return &V2Signer{auth: auth, service: service, host: u.Host}, nil +} + +func (s *V2Signer) Sign(method, path string, params map[string]string) { + params["AWSAccessKeyId"] = s.auth.AccessKey + params["SignatureVersion"] = "2" + params["SignatureMethod"] = "HmacSHA256" + if s.auth.Token() != "" { + params["SecurityToken"] = s.auth.Token() + } + + // AWS specifies that the parameters in a signed request must + // be provided in the natural order of the keys. This is distinct + // from the natural order of the encoded value of key=value. + // Percent and Equals affect the sorting order. + var keys, sarray []string + for k, _ := range params { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + sarray = append(sarray, Encode(k)+"="+Encode(params[k])) + } + joined := strings.Join(sarray, "&") + payload := method + "\n" + s.host + "\n" + path + "\n" + joined + hash := hmac.New(sha256.New, []byte(s.auth.SecretKey)) + hash.Write([]byte(payload)) + signature := make([]byte, b64.EncodedLen(hash.Size())) + b64.Encode(signature, hash.Sum(nil)) + + params["Signature"] = string(signature) +} + +// Common date formats for signing requests +const ( + ISO8601BasicFormat = "20060102T150405Z" + ISO8601BasicFormatShort = "20060102" +) + +type Route53Signer struct { + auth Auth +} + +func NewRoute53Signer(auth Auth) *Route53Signer { + return &Route53Signer{auth: auth} +} + +// getCurrentDate fetches the date stamp from the aws servers to +// ensure the auth headers are within 5 minutes of the server time +func (s *Route53Signer) getCurrentDate() string { + response, err := http.Get("https://route53.amazonaws.com/date") + if err != nil { + fmt.Print("Unable to get date from amazon: ", err) + return "" + } + + response.Body.Close() + return response.Header.Get("Date") +} + +// Creates the authorize signature based on the date stamp and secret key +func (s *Route53Signer) getHeaderAuthorize(message string) string { + hmacSha256 := hmac.New(sha256.New, []byte(s.auth.SecretKey)) + hmacSha256.Write([]byte(message)) + cryptedString := hmacSha256.Sum(nil) + + return base64.StdEncoding.EncodeToString(cryptedString) +} + +// Adds all the required headers for AWS Route53 API to the request +// including the authorization +func (s *Route53Signer) Sign(req *http.Request) { + date := s.getCurrentDate() + authHeader := fmt.Sprintf("AWS3-HTTPS AWSAccessKeyId=%s,Algorithm=%s,Signature=%s", + s.auth.AccessKey, "HmacSHA256", s.getHeaderAuthorize(date)) + + req.Header.Set("Host", req.Host) + req.Header.Set("X-Amzn-Authorization", authHeader) + req.Header.Set("X-Amz-Date", date) + req.Header.Set("Content-Type", "application/xml") +} + +/* +The V4Signer encapsulates all of the functionality to sign a request with the AWS +Signature Version 4 Signing Process. (http://goo.gl/u1OWZz) +*/ +type V4Signer struct { + auth Auth + serviceName string + region Region +} + +/* +Return a new instance of a V4Signer capable of signing AWS requests. +*/ +func NewV4Signer(auth Auth, serviceName string, region Region) *V4Signer { + return &V4Signer{auth: auth, serviceName: serviceName, region: region} +} + +/* +Sign a request according to the AWS Signature Version 4 Signing Process. (http://goo.gl/u1OWZz) + +The signed request will include an "x-amz-date" header with a current timestamp if a valid "x-amz-date" +or "date" header was not available in the original request. In addition, AWS Signature Version 4 requires +the "host" header to be a signed header, therefor the Sign method will manually set a "host" header from +the request.Host. + +The signed request will include a new "Authorization" header indicating that the request has been signed. + +Any changes to the request after signing the request will invalidate the signature. +*/ +func (s *V4Signer) Sign(req *http.Request) { + req.Header.Set("host", req.Host) // host header must be included as a signed header + t := s.requestTime(req) // Get requst time + creq := s.canonicalRequest(req) // Build canonical request + sts := s.stringToSign(t, creq) // Build string to sign + signature := s.signature(t, sts) // Calculate the AWS Signature Version 4 + auth := s.authorization(req.Header, t, signature) // Create Authorization header value + req.Header.Set("Authorization", auth) // Add Authorization header to request + return +} + +/* +requestTime method will parse the time from the request "x-amz-date" or "date" headers. +If the "x-amz-date" header is present, that will take priority over the "date" header. +If neither header is defined or we are unable to parse either header as a valid date +then we will create a new "x-amz-date" header with the current time. +*/ +func (s *V4Signer) requestTime(req *http.Request) time.Time { + + // Get "x-amz-date" header + date := req.Header.Get("x-amz-date") + + // Attempt to parse as ISO8601BasicFormat + t, err := time.Parse(ISO8601BasicFormat, date) + if err == nil { + return t + } + + // Attempt to parse as http.TimeFormat + t, err = time.Parse(http.TimeFormat, date) + if err == nil { + req.Header.Set("x-amz-date", t.Format(ISO8601BasicFormat)) + return t + } + + // Get "date" header + date = req.Header.Get("date") + + // Attempt to parse as http.TimeFormat + t, err = time.Parse(http.TimeFormat, date) + if err == nil { + return t + } + + // Create a current time header to be used + t = time.Now().UTC() + req.Header.Set("x-amz-date", t.Format(ISO8601BasicFormat)) + return t +} + +/* +canonicalRequest method creates the canonical request according to Task 1 of the AWS Signature Version 4 Signing Process. (http://goo.gl/eUUZ3S) + + CanonicalRequest = + HTTPRequestMethod + '\n' + + CanonicalURI + '\n' + + CanonicalQueryString + '\n' + + CanonicalHeaders + '\n' + + SignedHeaders + '\n' + + HexEncode(Hash(Payload)) +*/ +func (s *V4Signer) canonicalRequest(req *http.Request) string { + c := new(bytes.Buffer) + fmt.Fprintf(c, "%s\n", req.Method) + fmt.Fprintf(c, "%s\n", s.canonicalURI(req.URL)) + fmt.Fprintf(c, "%s\n", s.canonicalQueryString(req.URL)) + fmt.Fprintf(c, "%s\n\n", s.canonicalHeaders(req.Header)) + fmt.Fprintf(c, "%s\n", s.signedHeaders(req.Header)) + fmt.Fprintf(c, "%s", s.payloadHash(req)) + return c.String() +} + +func (s *V4Signer) canonicalURI(u *url.URL) string { + canonicalPath := u.RequestURI() + if u.RawQuery != "" { + canonicalPath = canonicalPath[:len(canonicalPath)-len(u.RawQuery)-1] + } + slash := strings.HasSuffix(canonicalPath, "/") + canonicalPath = path.Clean(canonicalPath) + if canonicalPath != "/" && slash { + canonicalPath += "/" + } + return canonicalPath +} + +func (s *V4Signer) canonicalQueryString(u *url.URL) string { + var a []string + for k, vs := range u.Query() { + k = Encode(k) + for _, v := range vs { + if v == "" { + a = append(a, k+"=") + } else { + v = Encode(v) + a = append(a, k+"="+v) + } + } + } + sort.Strings(a) + return strings.Join(a, "&") +} + +func (s *V4Signer) canonicalHeaders(h http.Header) string { + i, a := 0, make([]string, len(h)) + for k, v := range h { + for j, w := range v { + v[j] = strings.Trim(w, " ") + } + sort.Strings(v) + a[i] = strings.ToLower(k) + ":" + strings.Join(v, ",") + i++ + } + sort.Strings(a) + return strings.Join(a, "\n") +} + +func (s *V4Signer) signedHeaders(h http.Header) string { + i, a := 0, make([]string, len(h)) + for k, _ := range h { + a[i] = strings.ToLower(k) + i++ + } + sort.Strings(a) + return strings.Join(a, ";") +} + +func (s *V4Signer) payloadHash(req *http.Request) string { + var b []byte + if req.Body == nil { + b = []byte("") + } else { + var err error + b, err = ioutil.ReadAll(req.Body) + if err != nil { + // TODO: I REALLY DON'T LIKE THIS PANIC!!!! + panic(err) + } + } + req.Body = ioutil.NopCloser(bytes.NewBuffer(b)) + return s.hash(string(b)) +} + +/* +stringToSign method creates the string to sign accorting to Task 2 of the AWS Signature Version 4 Signing Process. (http://goo.gl/es1PAu) + + StringToSign = + Algorithm + '\n' + + RequestDate + '\n' + + CredentialScope + '\n' + + HexEncode(Hash(CanonicalRequest)) +*/ +func (s *V4Signer) stringToSign(t time.Time, creq string) string { + w := new(bytes.Buffer) + fmt.Fprint(w, "AWS4-HMAC-SHA256\n") + fmt.Fprintf(w, "%s\n", t.Format(ISO8601BasicFormat)) + fmt.Fprintf(w, "%s\n", s.credentialScope(t)) + fmt.Fprintf(w, "%s", s.hash(creq)) + return w.String() +} + +func (s *V4Signer) credentialScope(t time.Time) string { + return fmt.Sprintf("%s/%s/%s/aws4_request", t.Format(ISO8601BasicFormatShort), s.region.Name, s.serviceName) +} + +/* +signature method calculates the AWS Signature Version 4 according to Task 3 of the AWS Signature Version 4 Signing Process. (http://goo.gl/j0Yqe1) + + signature = HexEncode(HMAC(derived-signing-key, string-to-sign)) +*/ +func (s *V4Signer) signature(t time.Time, sts string) string { + h := s.hmac(s.derivedKey(t), []byte(sts)) + return fmt.Sprintf("%x", h) +} + +/* +derivedKey method derives a signing key to be used for signing a request. + + kSecret = Your AWS Secret Access Key + kDate = HMAC("AWS4" + kSecret, Date) + kRegion = HMAC(kDate, Region) + kService = HMAC(kRegion, Service) + kSigning = HMAC(kService, "aws4_request") +*/ +func (s *V4Signer) derivedKey(t time.Time) []byte { + h := s.hmac([]byte("AWS4"+s.auth.SecretKey), []byte(t.Format(ISO8601BasicFormatShort))) + h = s.hmac(h, []byte(s.region.Name)) + h = s.hmac(h, []byte(s.serviceName)) + h = s.hmac(h, []byte("aws4_request")) + return h +} + +/* +authorization method generates the authorization header value. +*/ +func (s *V4Signer) authorization(header http.Header, t time.Time, signature string) string { + w := new(bytes.Buffer) + fmt.Fprint(w, "AWS4-HMAC-SHA256 ") + fmt.Fprintf(w, "Credential=%s/%s, ", s.auth.AccessKey, s.credentialScope(t)) + fmt.Fprintf(w, "SignedHeaders=%s, ", s.signedHeaders(header)) + fmt.Fprintf(w, "Signature=%s", signature) + return w.String() +} + +// hash method calculates the sha256 hash for a given string +func (s *V4Signer) hash(in string) string { + h := sha256.New() + fmt.Fprintf(h, "%s", in) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +// hmac method calculates the sha256 hmac for a given slice of bytes +func (s *V4Signer) hmac(key, data []byte) []byte { + h := hmac.New(sha256.New, key) + h.Write(data) + return h.Sum(nil) +} diff --git a/transfersh-server/vendor/github.com/goamz/goamz/s3/LICENSE b/transfersh-server/vendor/github.com/goamz/goamz/s3/LICENSE new file mode 100644 index 0000000..53320c3 --- /dev/null +++ b/transfersh-server/vendor/github.com/goamz/goamz/s3/LICENSE @@ -0,0 +1,185 @@ +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/transfersh-server/vendor/github.com/goamz/goamz/s3/multi.go b/transfersh-server/vendor/github.com/goamz/goamz/s3/multi.go new file mode 100644 index 0000000..348ead3 --- /dev/null +++ b/transfersh-server/vendor/github.com/goamz/goamz/s3/multi.go @@ -0,0 +1,439 @@ +package s3 + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "encoding/hex" + "encoding/xml" + "errors" + "io" + "sort" + "strconv" +) + +// Multi represents an unfinished multipart upload. +// +// Multipart uploads allow sending big objects in smaller chunks. +// After all parts have been sent, the upload must be explicitly +// completed by calling Complete with the list of parts. +// +// See http://goo.gl/vJfTG for an overview of multipart uploads. +type Multi struct { + Bucket *Bucket + Key string + UploadId string +} + +// That's the default. Here just for testing. +var listMultiMax = 1000 + +type listMultiResp struct { + NextKeyMarker string + NextUploadIdMarker string + IsTruncated bool + Upload []Multi + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` +} + +// ListMulti returns the list of unfinished multipart uploads in b. +// +// The prefix parameter limits the response to keys that begin with the +// specified prefix. You can use prefixes to separate a bucket into different +// groupings of keys (to get the feeling of folders, for example). +// +// The delim parameter causes the response to group all of the keys that +// share a common prefix up to the next delimiter in a single entry within +// the CommonPrefixes field. You can use delimiters to separate a bucket +// into different groupings of keys, similar to how folders would work. +// +// See http://goo.gl/ePioY for details. +func (b *Bucket) ListMulti(prefix, delim string) (multis []*Multi, prefixes []string, err error) { + params := map[string][]string{ + "uploads": {""}, + "max-uploads": {strconv.FormatInt(int64(listMultiMax), 10)}, + "prefix": {prefix}, + "delimiter": {delim}, + } + for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); { + req := &request{ + method: "GET", + bucket: b.Name, + params: params, + } + var resp listMultiResp + err := b.S3.query(req, &resp) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err != nil { + return nil, nil, err + } + for i := range resp.Upload { + multi := &resp.Upload[i] + multi.Bucket = b + multis = append(multis, multi) + } + prefixes = append(prefixes, resp.CommonPrefixes...) + if !resp.IsTruncated { + return multis, prefixes, nil + } + params["key-marker"] = []string{resp.NextKeyMarker} + params["upload-id-marker"] = []string{resp.NextUploadIdMarker} + attempt = b.S3.AttemptStrategy.Start() // Last request worked. + } + panic("unreachable") +} + +// Multi returns a multipart upload handler for the provided key +// inside b. If a multipart upload exists for key, it is returned, +// otherwise a new multipart upload is initiated with contType and perm. +func (b *Bucket) Multi(key, contType string, perm ACL) (*Multi, error) { + multis, _, err := b.ListMulti(key, "") + if err != nil && !hasCode(err, "NoSuchUpload") { + return nil, err + } + for _, m := range multis { + if m.Key == key { + return m, nil + } + } + return b.InitMulti(key, contType, perm) +} + +// InitMulti initializes a new multipart upload at the provided +// key inside b and returns a value for manipulating it. +// +// See http://goo.gl/XP8kL for details. +func (b *Bucket) InitMulti(key string, contType string, perm ACL) (*Multi, error) { + headers := map[string][]string{ + "Content-Type": {contType}, + "Content-Length": {"0"}, + "x-amz-acl": {string(perm)}, + } + params := map[string][]string{ + "uploads": {""}, + } + req := &request{ + method: "POST", + bucket: b.Name, + path: key, + headers: headers, + params: params, + } + var err error + var resp struct { + UploadId string `xml:"UploadId"` + } + for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); { + err = b.S3.query(req, &resp) + if !shouldRetry(err) { + break + } + } + if err != nil { + return nil, err + } + return &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil +} + +// PutPart sends part n of the multipart upload, reading all the content from r. +// Each part, except for the last one, must be at least 5MB in size. +// +// See http://goo.gl/pqZer for details. +func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) { + partSize, _, md5b64, err := seekerInfo(r) + if err != nil { + return Part{}, err + } + return m.putPart(n, r, partSize, md5b64) +} + +func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string) (Part, error) { + headers := map[string][]string{ + "Content-Length": {strconv.FormatInt(partSize, 10)}, + "Content-MD5": {md5b64}, + } + params := map[string][]string{ + "uploadId": {m.UploadId}, + "partNumber": {strconv.FormatInt(int64(n), 10)}, + } + for attempt := m.Bucket.S3.AttemptStrategy.Start(); attempt.Next(); { + _, err := r.Seek(0, 0) + if err != nil { + return Part{}, err + } + req := &request{ + method: "PUT", + bucket: m.Bucket.Name, + path: m.Key, + headers: headers, + params: params, + payload: r, + } + err = m.Bucket.S3.prepare(req) + if err != nil { + return Part{}, err + } + resp, err := m.Bucket.S3.run(req, nil) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err != nil { + return Part{}, err + } + etag := resp.Header.Get("ETag") + if etag == "" { + return Part{}, errors.New("part upload succeeded with no ETag") + } + return Part{n, etag, partSize}, nil + } + panic("unreachable") +} + +func seekerInfo(r io.ReadSeeker) (size int64, md5hex string, md5b64 string, err error) { + _, err = r.Seek(0, 0) + if err != nil { + return 0, "", "", err + } + digest := md5.New() + size, err = io.Copy(digest, r) + if err != nil { + return 0, "", "", err + } + sum := digest.Sum(nil) + md5hex = hex.EncodeToString(sum) + md5b64 = base64.StdEncoding.EncodeToString(sum) + return size, md5hex, md5b64, nil +} + +type Part struct { + N int `xml:"PartNumber"` + ETag string + Size int64 +} + +type partSlice []Part + +func (s partSlice) Len() int { return len(s) } +func (s partSlice) Less(i, j int) bool { return s[i].N < s[j].N } +func (s partSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type listPartsResp struct { + NextPartNumberMarker string + IsTruncated bool + Part []Part +} + +// That's the default. Here just for testing. +var listPartsMax = 1000 + +// ListParts returns the list of previously uploaded parts in m, +// ordered by part number. +// +// See http://goo.gl/ePioY for details. +func (m *Multi) ListParts() ([]Part, error) { + params := map[string][]string{ + "uploadId": {m.UploadId}, + "max-parts": {strconv.FormatInt(int64(listPartsMax), 10)}, + } + var parts partSlice + for attempt := m.Bucket.S3.AttemptStrategy.Start(); attempt.Next(); { + req := &request{ + method: "GET", + bucket: m.Bucket.Name, + path: m.Key, + params: params, + } + var resp listPartsResp + err := m.Bucket.S3.query(req, &resp) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err != nil { + return nil, err + } + parts = append(parts, resp.Part...) + if !resp.IsTruncated { + sort.Sort(parts) + return parts, nil + } + params["part-number-marker"] = []string{resp.NextPartNumberMarker} + attempt = m.Bucket.S3.AttemptStrategy.Start() // Last request worked. + } + panic("unreachable") +} + +type ReaderAtSeeker interface { + io.ReaderAt + io.ReadSeeker +} + +// PutAll sends all of r via a multipart upload with parts no larger +// than partSize bytes, which must be set to at least 5MB. +// Parts previously uploaded are either reused if their checksum +// and size match the new part, or otherwise overwritten with the +// new content. +// PutAll returns all the parts of m (reused or not). +func (m *Multi) PutAll(r ReaderAtSeeker, partSize int64) ([]Part, error) { + old, err := m.ListParts() + if err != nil && !hasCode(err, "NoSuchUpload") { + return nil, err + } + reuse := 0 // Index of next old part to consider reusing. + current := 1 // Part number of latest good part handled. + totalSize, err := r.Seek(0, 2) + if err != nil { + return nil, err + } + first := true // Must send at least one empty part if the file is empty. + var result []Part +NextSection: + for offset := int64(0); offset < totalSize || first; offset += partSize { + first = false + if offset+partSize > totalSize { + partSize = totalSize - offset + } + section := io.NewSectionReader(r, offset, partSize) + _, md5hex, md5b64, err := seekerInfo(section) + if err != nil { + return nil, err + } + for reuse < len(old) && old[reuse].N <= current { + // Looks like this part was already sent. + part := &old[reuse] + etag := `"` + md5hex + `"` + if part.N == current && part.Size == partSize && part.ETag == etag { + // Checksum matches. Reuse the old part. + result = append(result, *part) + current++ + continue NextSection + } + reuse++ + } + + // Part wasn't found or doesn't match. Send it. + part, err := m.putPart(current, section, partSize, md5b64) + if err != nil { + return nil, err + } + result = append(result, part) + current++ + } + return result, nil +} + +type completeUpload struct { + XMLName xml.Name `xml:"CompleteMultipartUpload"` + Parts completeParts `xml:"Part"` +} + +type completePart struct { + PartNumber int + ETag string +} + +type completeParts []completePart + +func (p completeParts) Len() int { return len(p) } +func (p completeParts) Less(i, j int) bool { return p[i].PartNumber < p[j].PartNumber } +func (p completeParts) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type completeResponse struct { + // The element name: should be either CompleteMultipartUploadResult or Error. + XMLName xml.Name + // If the element was error, then it should have the following: + Code string + Message string + RequestId string + HostId string +} + +// Complete assembles the given previously uploaded parts into the +// final object. This operation may take several minutes. +// +// The complete call to AMZ may still fail after returning HTTP 200, +// so even though it's unusued, the body of the reply must be demarshalled +// and checked to see whether or not the complete succeeded. +// +// See http://goo.gl/2Z7Tw for details. +func (m *Multi) Complete(parts []Part) error { + params := map[string][]string{ + "uploadId": {m.UploadId}, + } + c := completeUpload{} + for _, p := range parts { + c.Parts = append(c.Parts, completePart{p.N, p.ETag}) + } + sort.Sort(c.Parts) + data, err := xml.Marshal(&c) + if err != nil { + return err + } + + // Setting Content-Length prevents breakage on DreamObjects + for attempt := m.Bucket.S3.AttemptStrategy.Start(); attempt.Next(); { + req := &request{ + method: "POST", + bucket: m.Bucket.Name, + path: m.Key, + params: params, + payload: bytes.NewReader(data), + headers: map[string][]string{ + "Content-Length": []string{strconv.Itoa(len(data))}, + }, + } + + resp := &completeResponse{} + err := m.Bucket.S3.query(req, resp) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err == nil && resp.XMLName.Local == "Error" { + err = &Error{ + StatusCode: 200, + Code: resp.Code, + Message: resp.Message, + RequestId: resp.RequestId, + HostId: resp.HostId, + } + } + return err + } + panic("unreachable") +} + +// Abort deletes an unifinished multipart upload and any previously +// uploaded parts for it. +// +// After a multipart upload is aborted, no additional parts can be +// uploaded using it. However, if any part uploads are currently in +// progress, those part uploads might or might not succeed. As a result, +// it might be necessary to abort a given multipart upload multiple +// times in order to completely free all storage consumed by all parts. +// +// NOTE: If the described scenario happens to you, please report back to +// the goamz authors with details. In the future such retrying should be +// handled internally, but it's not clear what happens precisely (Is an +// error returned? Is the issue completely undetectable?). +// +// See http://goo.gl/dnyJw for details. +func (m *Multi) Abort() error { + params := map[string][]string{ + "uploadId": {m.UploadId}, + } + for attempt := m.Bucket.S3.AttemptStrategy.Start(); attempt.Next(); { + req := &request{ + method: "DELETE", + bucket: m.Bucket.Name, + path: m.Key, + params: params, + } + err := m.Bucket.S3.query(req, nil) + if shouldRetry(err) && attempt.HasNext() { + continue + } + return err + } + panic("unreachable") +} diff --git a/transfersh-server/vendor/github.com/goamz/goamz/s3/s3.go b/transfersh-server/vendor/github.com/goamz/goamz/s3/s3.go new file mode 100644 index 0000000..c659aa6 --- /dev/null +++ b/transfersh-server/vendor/github.com/goamz/goamz/s3/s3.go @@ -0,0 +1,1161 @@ +// +// goamz - Go packages to interact with the Amazon Web Services. +// +// https://wiki.ubuntu.com/goamz +// +// Copyright (c) 2011 Canonical Ltd. +// +// Written by Gustavo Niemeyer +// + +package s3 + +import ( + "bytes" + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/http/httputil" + "net/url" + "strconv" + "strings" + "time" + + "github.com/goamz/goamz/aws" +) + +const debug = false + +// The S3 type encapsulates operations with an S3 region. +type S3 struct { + aws.Auth + aws.Region + + // ConnectTimeout is the maximum time a request attempt will + // wait for a successful connection to be made. + // + // A value of zero means no timeout. + ConnectTimeout time.Duration + + // ReadTimeout is the maximum time a request attempt will wait + // for an individual read to complete. + // + // A value of zero means no timeout. + ReadTimeout time.Duration + + // WriteTimeout is the maximum time a request attempt will + // wait for an individual write to complete. + // + // A value of zero means no timeout. + WriteTimeout time.Duration + + // RequestTimeout is the maximum time a request attempt can + // take before operations return a timeout error. + // + // This includes connection time, any redirects, and reading + // the response body. The timer remains running after the request + // is made so it can interrupt reading of the response data. + // + // A Timeout of zero means no timeout. + RequestTimeout time.Duration + + // AttemptStrategy is the attempt strategy used for requests. + aws.AttemptStrategy + + // Reserve the right of using private data. + private byte + + // client used for requests + client *http.Client +} + +// The Bucket type encapsulates operations with an S3 bucket. +type Bucket struct { + *S3 + Name string +} + +// The Owner type represents the owner of the object in an S3 bucket. +type Owner struct { + ID string + DisplayName string +} + +// Fold options into an Options struct +// +type Options struct { + SSE bool + Meta map[string][]string + ContentEncoding string + CacheControl string + RedirectLocation string + ContentMD5 string + // What else? + // Content-Disposition string + //// The following become headers so they are []strings rather than strings... I think + // x-amz-storage-class []string +} + +type CopyOptions struct { + Options + MetadataDirective string + ContentType string +} + +// CopyObjectResult is the output from a Copy request +type CopyObjectResult struct { + ETag string + LastModified string +} + +// DefaultAttemptStrategy is the default AttemptStrategy used by S3 objects created by New. +var DefaultAttemptStrategy = aws.AttemptStrategy{ + Min: 5, + Total: 5 * time.Second, + Delay: 200 * time.Millisecond, +} + +// New creates a new S3. Optional client argument allows for custom http.clients to be used. +func New(auth aws.Auth, region aws.Region, client ...*http.Client) *S3 { + + var httpclient *http.Client + + if len(client) > 0 { + httpclient = client[0] + } + + return &S3{Auth: auth, Region: region, AttemptStrategy: DefaultAttemptStrategy, client: httpclient} +} + +// Bucket returns a Bucket with the given name. +func (s3 *S3) Bucket(name string) *Bucket { + if s3.Region.S3BucketEndpoint != "" || s3.Region.S3LowercaseBucket { + name = strings.ToLower(name) + } + return &Bucket{s3, name} +} + +var createBucketConfiguration = ` + %s +` + +// locationConstraint returns an io.Reader specifying a LocationConstraint if +// required for the region. +// +// See http://goo.gl/bh9Kq for details. +func (s3 *S3) locationConstraint() io.Reader { + constraint := "" + if s3.Region.S3LocationConstraint { + constraint = fmt.Sprintf(createBucketConfiguration, s3.Region.Name) + } + return strings.NewReader(constraint) +} + +type ACL string + +const ( + Private = ACL("private") + PublicRead = ACL("public-read") + PublicReadWrite = ACL("public-read-write") + AuthenticatedRead = ACL("authenticated-read") + BucketOwnerRead = ACL("bucket-owner-read") + BucketOwnerFull = ACL("bucket-owner-full-control") +) + +// PutBucket creates a new bucket. +// +// See http://goo.gl/ndjnR for details. +func (b *Bucket) PutBucket(perm ACL) error { + headers := map[string][]string{ + "x-amz-acl": {string(perm)}, + } + req := &request{ + method: "PUT", + bucket: b.Name, + path: "/", + headers: headers, + payload: b.locationConstraint(), + } + return b.S3.query(req, nil) +} + +// DelBucket removes an existing S3 bucket. All objects in the bucket must +// be removed before the bucket itself can be removed. +// +// See http://goo.gl/GoBrY for details. +func (b *Bucket) DelBucket() (err error) { + req := &request{ + method: "DELETE", + bucket: b.Name, + path: "/", + } + for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); { + err = b.S3.query(req, nil) + if !shouldRetry(err) { + break + } + } + return err +} + +// Get retrieves an object from an S3 bucket. +// +// See http://goo.gl/isCO7 for details. +func (b *Bucket) Get(path string) (data []byte, err error) { + body, err := b.GetReader(path) + defer func() { + if body != nil { + body.Close() + } + }() + if err != nil { + return nil, err + } + data, err = ioutil.ReadAll(body) + return data, err +} + +// GetReader retrieves an object from an S3 bucket, +// returning the body of the HTTP response. +// It is the caller's responsibility to call Close on rc when +// finished reading. +func (b *Bucket) GetReader(path string) (rc io.ReadCloser, err error) { + resp, err := b.GetResponse(path) + if resp != nil { + return resp.Body, err + } + return nil, err +} + +// GetResponse retrieves an object from an S3 bucket, +// returning the HTTP response. +// It is the caller's responsibility to call Close on rc when +// finished reading +func (b *Bucket) GetResponse(path string) (resp *http.Response, err error) { + return b.GetResponseWithHeaders(path, make(http.Header)) +} + +// GetReaderWithHeaders retrieves an object from an S3 bucket +// Accepts custom headers to be sent as the second parameter +// returning the body of the HTTP response. +// It is the caller's responsibility to call Close on rc when +// finished reading +func (b *Bucket) GetResponseWithHeaders(path string, headers map[string][]string) (resp *http.Response, err error) { + req := &request{ + bucket: b.Name, + path: path, + headers: headers, + } + err = b.S3.prepare(req) + if err != nil { + return nil, err + } + for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); { + resp, err := b.S3.run(req, nil) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err != nil { + return nil, err + } + return resp, nil + } + panic("unreachable") +} + +// Exists checks whether or not an object exists on an S3 bucket using a HEAD request. +func (b *Bucket) Exists(path string) (exists bool, err error) { + req := &request{ + method: "HEAD", + bucket: b.Name, + path: path, + } + err = b.S3.prepare(req) + if err != nil { + return + } + for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); { + resp, err := b.S3.run(req, nil) + + if shouldRetry(err) && attempt.HasNext() { + continue + } + + if err != nil { + // We can treat a 403 or 404 as non existance + if e, ok := err.(*Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) { + return false, nil + } + return false, err + } + + if resp.StatusCode/100 == 2 { + exists = true + } + return exists, err + } + return false, fmt.Errorf("S3 Currently Unreachable") +} + +// Head HEADs an object in the S3 bucket, returns the response with +// no body see http://bit.ly/17K1ylI +func (b *Bucket) Head(path string, headers map[string][]string) (*http.Response, error) { + req := &request{ + method: "HEAD", + bucket: b.Name, + path: path, + headers: headers, + } + err := b.S3.prepare(req) + if err != nil { + return nil, err + } + + for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); { + resp, err := b.S3.run(req, nil) + if shouldRetry(err) && attempt.HasNext() { + continue + } + if err != nil { + return nil, err + } + return resp, err + } + return nil, fmt.Errorf("S3 Currently Unreachable") +} + +// Put inserts an object into the S3 bucket. +// +// See http://goo.gl/FEBPD for details. +func (b *Bucket) Put(path string, data []byte, contType string, perm ACL, options Options) error { + body := bytes.NewBuffer(data) + return b.PutReader(path, body, int64(len(data)), contType, perm, options) +} + +// PutCopy puts a copy of an object given by the key path into bucket b using b.Path as the target key +func (b *Bucket) PutCopy(path string, perm ACL, options CopyOptions, source string) (result *CopyObjectResult, err error) { + headers := map[string][]string{ + "x-amz-acl": {string(perm)}, + "x-amz-copy-source": {source}, + } + options.addHeaders(headers) + req := &request{ + method: "PUT", + bucket: b.Name, + path: path, + headers: headers, + } + result = &CopyObjectResult{} + for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); { + err = b.S3.query(req, result) + if !shouldRetry(err) { + break + } + } + if err != nil { + return nil, err + } + return result, nil +} + +/* +PutHeader - like Put, inserts an object into the S3 bucket. +Instead of Content-Type string, pass in custom headers to override defaults. +*/ +func (b *Bucket) PutHeader(path string, data []byte, customHeaders map[string][]string, perm ACL) error { + body := bytes.NewBuffer(data) + return b.PutReaderHeader(path, body, int64(len(data)), customHeaders, perm) +} + +// PutReader inserts an object into the S3 bucket by consuming data +// from r until EOF. +func (b *Bucket) PutReader(path string, r io.Reader, length int64, contType string, perm ACL, options Options) error { + headers := map[string][]string{ + "Content-Length": {strconv.FormatInt(length, 10)}, + "Content-Type": {contType}, + "x-amz-acl": {string(perm)}, + } + options.addHeaders(headers) + req := &request{ + method: "PUT", + bucket: b.Name, + path: path, + headers: headers, + payload: r, + } + return b.S3.query(req, nil) +} + +/* +PutReaderHeader - like PutReader, inserts an object into S3 from a reader. +Instead of Content-Type string, pass in custom headers to override defaults. +*/ +func (b *Bucket) PutReaderHeader(path string, r io.Reader, length int64, customHeaders map[string][]string, perm ACL) error { + // Default headers + headers := map[string][]string{ + "Content-Length": {strconv.FormatInt(length, 10)}, + "Content-Type": {"application/text"}, + "x-amz-acl": {string(perm)}, + } + + // Override with custom headers + for key, value := range customHeaders { + headers[key] = value + } + + req := &request{ + method: "PUT", + bucket: b.Name, + path: path, + headers: headers, + payload: r, + } + return b.S3.query(req, nil) +} + +// addHeaders adds o's specified fields to headers +func (o Options) addHeaders(headers map[string][]string) { + if o.SSE { + headers["x-amz-server-side-encryption"] = []string{"AES256"} + } + if len(o.ContentEncoding) != 0 { + headers["Content-Encoding"] = []string{o.ContentEncoding} + } + if len(o.CacheControl) != 0 { + headers["Cache-Control"] = []string{o.CacheControl} + } + if len(o.ContentMD5) != 0 { + headers["Content-MD5"] = []string{o.ContentMD5} + } + if len(o.RedirectLocation) != 0 { + headers["x-amz-website-redirect-location"] = []string{o.RedirectLocation} + } + for k, v := range o.Meta { + headers["x-amz-meta-"+k] = v + } +} + +// addHeaders adds o's specified fields to headers +func (o CopyOptions) addHeaders(headers map[string][]string) { + o.Options.addHeaders(headers) + if len(o.MetadataDirective) != 0 { + headers["x-amz-metadata-directive"] = []string{o.MetadataDirective} + } + if len(o.ContentType) != 0 { + headers["Content-Type"] = []string{o.ContentType} + } +} + +func makeXmlBuffer(doc []byte) *bytes.Buffer { + buf := new(bytes.Buffer) + buf.WriteString(xml.Header) + buf.Write(doc) + return buf +} + +type RoutingRule struct { + ConditionKeyPrefixEquals string `xml:"Condition>KeyPrefixEquals"` + RedirectReplaceKeyPrefixWith string `xml:"Redirect>ReplaceKeyPrefixWith,omitempty"` + RedirectReplaceKeyWith string `xml:"Redirect>ReplaceKeyWith,omitempty"` +} + +type WebsiteConfiguration struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ WebsiteConfiguration"` + IndexDocumentSuffix string `xml:"IndexDocument>Suffix"` + ErrorDocumentKey string `xml:"ErrorDocument>Key"` + RoutingRules *[]RoutingRule `xml:"RoutingRules>RoutingRule,omitempty"` +} + +func (b *Bucket) PutBucketWebsite(configuration WebsiteConfiguration) error { + + doc, err := xml.Marshal(configuration) + if err != nil { + return err + } + + buf := makeXmlBuffer(doc) + + return b.PutBucketSubresource("website", buf, int64(buf.Len())) +} + +func (b *Bucket) PutBucketSubresource(subresource string, r io.Reader, length int64) error { + headers := map[string][]string{ + "Content-Length": {strconv.FormatInt(length, 10)}, + } + req := &request{ + path: "/", + method: "PUT", + bucket: b.Name, + headers: headers, + payload: r, + params: url.Values{subresource: {""}}, + } + + return b.S3.query(req, nil) +} + +// Del removes an object from the S3 bucket. +// +// See http://goo.gl/APeTt for details. +func (b *Bucket) Del(path string) error { + req := &request{ + method: "DELETE", + bucket: b.Name, + path: path, + } + return b.S3.query(req, nil) +} + +type Delete struct { + Quiet bool `xml:"Quiet,omitempty"` + Objects []Object `xml:"Object"` +} + +type Object struct { + Key string `xml:"Key"` + VersionId string `xml:"VersionId,omitempty"` +} + +// DelMulti removes up to 1000 objects from the S3 bucket. +// +// See http://goo.gl/jx6cWK for details. +func (b *Bucket) DelMulti(objects Delete) error { + doc, err := xml.Marshal(objects) + if err != nil { + return err + } + + buf := makeXmlBuffer(doc) + digest := md5.New() + size, err := digest.Write(buf.Bytes()) + if err != nil { + return err + } + + headers := map[string][]string{ + "Content-Length": {strconv.FormatInt(int64(size), 10)}, + "Content-MD5": {base64.StdEncoding.EncodeToString(digest.Sum(nil))}, + "Content-Type": {"text/xml"}, + } + req := &request{ + path: "/", + method: "POST", + params: url.Values{"delete": {""}}, + bucket: b.Name, + headers: headers, + payload: buf, + } + + return b.S3.query(req, nil) +} + +// The ListResp type holds the results of a List bucket operation. +type ListResp struct { + Name string + Prefix string + Delimiter string + Marker string + NextMarker string + MaxKeys int + + // IsTruncated is true if the results have been truncated because + // there are more keys and prefixes than can fit in MaxKeys. + // N.B. this is the opposite sense to that documented (incorrectly) in + // http://goo.gl/YjQTc + IsTruncated bool + Contents []Key + CommonPrefixes []string `xml:">Prefix"` +} + +// The Key type represents an item stored in an S3 bucket. +type Key struct { + Key string + LastModified string + Size int64 + // ETag gives the hex-encoded MD5 sum of the contents, + // surrounded with double-quotes. + ETag string + StorageClass string + Owner Owner +} + +// List returns information about objects in an S3 bucket. +// +// The prefix parameter limits the response to keys that begin with the +// specified prefix. +// +// The delim parameter causes the response to group all of the keys that +// share a common prefix up to the next delimiter in a single entry within +// the CommonPrefixes field. You can use delimiters to separate a bucket +// into different groupings of keys, similar to how folders would work. +// +// The marker parameter specifies the key to start with when listing objects +// in a bucket. Amazon S3 lists objects in alphabetical order and +// will return keys alphabetically greater than the marker. +// +// The max parameter specifies how many keys + common prefixes to return in +// the response. The default is 1000. +// +// For example, given these keys in a bucket: +// +// index.html +// index2.html +// photos/2006/January/sample.jpg +// photos/2006/February/sample2.jpg +// photos/2006/February/sample3.jpg +// photos/2006/February/sample4.jpg +// +// Listing this bucket with delimiter set to "/" would yield the +// following result: +// +// &ListResp{ +// Name: "sample-bucket", +// MaxKeys: 1000, +// Delimiter: "/", +// Contents: []Key{ +// {Key: "index.html", "index2.html"}, +// }, +// CommonPrefixes: []string{ +// "photos/", +// }, +// } +// +// Listing the same bucket with delimiter set to "/" and prefix set to +// "photos/2006/" would yield the following result: +// +// &ListResp{ +// Name: "sample-bucket", +// MaxKeys: 1000, +// Delimiter: "/", +// Prefix: "photos/2006/", +// CommonPrefixes: []string{ +// "photos/2006/February/", +// "photos/2006/January/", +// }, +// } +// +// See http://goo.gl/YjQTc for details. +func (b *Bucket) List(prefix, delim, marker string, max int) (result *ListResp, err error) { + params := map[string][]string{ + "prefix": {prefix}, + "delimiter": {delim}, + "marker": {marker}, + } + if max != 0 { + params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)} + } + req := &request{ + bucket: b.Name, + params: params, + } + result = &ListResp{} + for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); { + err = b.S3.query(req, result) + if !shouldRetry(err) { + break + } + } + if err != nil { + return nil, err + } + return result, nil +} + +// The VersionsResp type holds the results of a list bucket Versions operation. +type VersionsResp struct { + Name string + Prefix string + KeyMarker string + VersionIdMarker string + MaxKeys int + Delimiter string + IsTruncated bool + Versions []Version + CommonPrefixes []string `xml:">Prefix"` +} + +// The Version type represents an object version stored in an S3 bucket. +type Version struct { + Key string + VersionId string + IsLatest bool + LastModified string + // ETag gives the hex-encoded MD5 sum of the contents, + // surrounded with double-quotes. + ETag string + Size int64 + Owner Owner + StorageClass string +} + +func (b *Bucket) Versions(prefix, delim, keyMarker string, versionIdMarker string, max int) (result *VersionsResp, err error) { + params := map[string][]string{ + "versions": {""}, + "prefix": {prefix}, + "delimiter": {delim}, + } + + if len(versionIdMarker) != 0 { + params["version-id-marker"] = []string{versionIdMarker} + } + if len(keyMarker) != 0 { + params["key-marker"] = []string{keyMarker} + } + + if max != 0 { + params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)} + } + req := &request{ + bucket: b.Name, + params: params, + } + result = &VersionsResp{} + for attempt := b.S3.AttemptStrategy.Start(); attempt.Next(); { + err = b.S3.query(req, result) + if !shouldRetry(err) { + break + } + } + if err != nil { + return nil, err + } + return result, nil +} + +// Returns a mapping of all key names in this bucket to Key objects +func (b *Bucket) GetBucketContents() (*map[string]Key, error) { + bucket_contents := map[string]Key{} + prefix := "" + path_separator := "" + marker := "" + for { + contents, err := b.List(prefix, path_separator, marker, 1000) + if err != nil { + return &bucket_contents, err + } + for _, key := range contents.Contents { + bucket_contents[key.Key] = key + } + if contents.IsTruncated { + marker = contents.NextMarker + } else { + break + } + } + + return &bucket_contents, nil +} + +// URL returns a non-signed URL that allows retriving the +// object at path. It only works if the object is publicly +// readable (see SignedURL). +func (b *Bucket) URL(path string) string { + req := &request{ + bucket: b.Name, + path: path, + } + err := b.S3.prepare(req) + if err != nil { + panic(err) + } + u, err := req.url() + if err != nil { + panic(err) + } + u.RawQuery = "" + return u.String() +} + +// SignedURL returns a signed URL that allows anyone holding the URL +// to retrieve the object at path. The signature is valid until expires. +func (b *Bucket) SignedURL(path string, expires time.Time) string { + req := &request{ + bucket: b.Name, + path: path, + params: url.Values{"Expires": {strconv.FormatInt(expires.Unix(), 10)}}, + } + err := b.S3.prepare(req) + if err != nil { + panic(err) + } + u, err := req.url() + if err != nil { + panic(err) + } + if b.S3.Auth.Token() != "" { + return u.String() + "&x-amz-security-token=" + url.QueryEscape(req.headers["X-Amz-Security-Token"][0]) + } else { + return u.String() + } +} + +// UploadSignedURL returns a signed URL that allows anyone holding the URL +// to upload the object at path. The signature is valid until expires. +// contenttype is a string like image/png +// path is the resource name in s3 terminalogy like images/ali.png [obviously exclusing the bucket name itself] +func (b *Bucket) UploadSignedURL(path, method, content_type string, expires time.Time) string { + expire_date := expires.Unix() + if method != "POST" { + method = "PUT" + } + stringToSign := method + "\n\n" + content_type + "\n" + strconv.FormatInt(expire_date, 10) + "\n/" + b.Name + "/" + path + fmt.Println("String to sign:\n", stringToSign) + a := b.S3.Auth + secretKey := a.SecretKey + accessId := a.AccessKey + mac := hmac.New(sha1.New, []byte(secretKey)) + mac.Write([]byte(stringToSign)) + macsum := mac.Sum(nil) + signature := base64.StdEncoding.EncodeToString([]byte(macsum)) + signature = strings.TrimSpace(signature) + + signedurl, err := url.Parse("https://" + b.Name + ".s3.amazonaws.com/") + if err != nil { + log.Println("ERROR sining url for S3 upload", err) + return "" + } + signedurl.Path += path + params := url.Values{} + params.Add("AWSAccessKeyId", accessId) + params.Add("Expires", strconv.FormatInt(expire_date, 10)) + params.Add("Signature", signature) + if a.Token() != "" { + params.Add("token", a.Token()) + } + + signedurl.RawQuery = params.Encode() + return signedurl.String() +} + +// PostFormArgs returns the action and input fields needed to allow anonymous +// uploads to a bucket within the expiration limit +func (b *Bucket) PostFormArgs(path string, expires time.Time, redirect string) (action string, fields map[string]string) { + conditions := make([]string, 0) + fields = map[string]string{ + "AWSAccessKeyId": b.Auth.AccessKey, + "key": path, + } + + conditions = append(conditions, fmt.Sprintf("{\"key\": \"%s\"}", path)) + conditions = append(conditions, fmt.Sprintf("{\"bucket\": \"%s\"}", b.Name)) + if redirect != "" { + conditions = append(conditions, fmt.Sprintf("{\"success_action_redirect\": \"%s\"}", redirect)) + fields["success_action_redirect"] = redirect + } + + vExpiration := expires.Format("2006-01-02T15:04:05Z") + vConditions := strings.Join(conditions, ",") + policy := fmt.Sprintf("{\"expiration\": \"%s\", \"conditions\": [%s]}", vExpiration, vConditions) + policy64 := base64.StdEncoding.EncodeToString([]byte(policy)) + fields["policy"] = policy64 + + signer := hmac.New(sha1.New, []byte(b.Auth.SecretKey)) + signer.Write([]byte(policy64)) + fields["signature"] = base64.StdEncoding.EncodeToString(signer.Sum(nil)) + + action = fmt.Sprintf("%s/%s/", b.S3.Region.S3Endpoint, b.Name) + return +} + +type request struct { + method string + bucket string + path string + signpath string + params url.Values + headers http.Header + baseurl string + payload io.Reader + prepared bool +} + +func (req *request) url() (*url.URL, error) { + u, err := url.Parse(req.baseurl) + if err != nil { + return nil, fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err) + } + u.RawQuery = req.params.Encode() + u.Path = req.path + return u, nil +} + +// query prepares and runs the req request. +// If resp is not nil, the XML data contained in the response +// body will be unmarshalled on it. +func (s3 *S3) query(req *request, resp interface{}) error { + err := s3.prepare(req) + if err == nil { + var httpResponse *http.Response + httpResponse, err = s3.run(req, resp) + if resp == nil && httpResponse != nil { + httpResponse.Body.Close() + } + } + return err +} + +// prepare sets up req to be delivered to S3. +func (s3 *S3) prepare(req *request) error { + var signpath = req.path + + if !req.prepared { + req.prepared = true + if req.method == "" { + req.method = "GET" + } + // Copy so they can be mutated without affecting on retries. + params := make(url.Values) + headers := make(http.Header) + for k, v := range req.params { + params[k] = v + } + for k, v := range req.headers { + headers[k] = v + } + req.params = params + req.headers = headers + if !strings.HasPrefix(req.path, "/") { + req.path = "/" + req.path + } + signpath = req.path + if req.bucket != "" { + req.baseurl = s3.Region.S3BucketEndpoint + if req.baseurl == "" { + // Use the path method to address the bucket. + req.baseurl = s3.Region.S3Endpoint + req.path = "/" + req.bucket + req.path + } else { + // Just in case, prevent injection. + if strings.IndexAny(req.bucket, "/:@") >= 0 { + return fmt.Errorf("bad S3 bucket: %q", req.bucket) + } + req.baseurl = strings.Replace(req.baseurl, "${bucket}", req.bucket, -1) + } + signpath = "/" + req.bucket + signpath + } + } + + // Always sign again as it's not clear how far the + // server has handled a previous attempt. + u, err := url.Parse(req.baseurl) + if err != nil { + return fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err) + } + reqSignpathSpaceFix := (&url.URL{Path: signpath}).String() + req.headers["Host"] = []string{u.Host} + req.headers["Date"] = []string{time.Now().In(time.UTC).Format(time.RFC1123)} + if s3.Auth.Token() != "" { + req.headers["X-Amz-Security-Token"] = []string{s3.Auth.Token()} + } + sign(s3.Auth, req.method, reqSignpathSpaceFix, req.params, req.headers) + return nil +} + +// run sends req and returns the http response from the server. +// If resp is not nil, the XML data contained in the response +// body will be unmarshalled on it. +func (s3 *S3) run(req *request, resp interface{}) (*http.Response, error) { + if debug { + log.Printf("Running S3 request: %#v", req) + } + + u, err := req.url() + if err != nil { + return nil, err + } + + hreq := http.Request{ + URL: u, + Method: req.method, + ProtoMajor: 1, + ProtoMinor: 1, + Close: true, + Header: req.headers, + } + + if v, ok := req.headers["Content-Length"]; ok { + hreq.ContentLength, _ = strconv.ParseInt(v[0], 10, 64) + delete(req.headers, "Content-Length") + } + if req.payload != nil { + hreq.Body = ioutil.NopCloser(req.payload) + } + + if s3.client == nil { + s3.client = &http.Client{ + Transport: &http.Transport{ + Dial: func(netw, addr string) (c net.Conn, err error) { + c, err = net.DialTimeout(netw, addr, s3.ConnectTimeout) + if err != nil { + return + } + + var deadline time.Time + if s3.RequestTimeout > 0 { + deadline = time.Now().Add(s3.RequestTimeout) + c.SetDeadline(deadline) + } + + if s3.ReadTimeout > 0 || s3.WriteTimeout > 0 { + c = &ioTimeoutConn{ + TCPConn: c.(*net.TCPConn), + readTimeout: s3.ReadTimeout, + writeTimeout: s3.WriteTimeout, + requestDeadline: deadline, + } + } + return + }, + }, + } + } + + hresp, err := s3.client.Do(&hreq) + if err != nil { + return nil, err + } + if debug { + dump, _ := httputil.DumpResponse(hresp, true) + log.Printf("} -> %s\n", dump) + } + if hresp.StatusCode != 200 && hresp.StatusCode != 204 && hresp.StatusCode != 206 { + defer hresp.Body.Close() + return nil, buildError(hresp) + } + if resp != nil { + err = xml.NewDecoder(hresp.Body).Decode(resp) + hresp.Body.Close() + if debug { + log.Printf("goamz.s3> decoded xml into %#v", resp) + } + } + return hresp, err +} + +// Error represents an error in an operation with S3. +type Error struct { + StatusCode int // HTTP status code (200, 403, ...) + Code string // EC2 error code ("UnsupportedOperation", ...) + Message string // The human-oriented error message + BucketName string + RequestId string + HostId string +} + +func (e *Error) Error() string { + return e.Message +} + +func buildError(r *http.Response) error { + if debug { + log.Printf("got error (status code %v)", r.StatusCode) + data, err := ioutil.ReadAll(r.Body) + if err != nil { + log.Printf("\tread error: %v", err) + } else { + log.Printf("\tdata:\n%s\n\n", data) + } + r.Body = ioutil.NopCloser(bytes.NewBuffer(data)) + } + + err := Error{} + // TODO return error if Unmarshal fails? + xml.NewDecoder(r.Body).Decode(&err) + r.Body.Close() + err.StatusCode = r.StatusCode + if err.Message == "" { + err.Message = r.Status + } + if debug { + log.Printf("err: %#v\n", err) + } + return &err +} + +func shouldRetry(err error) bool { + if err == nil { + return false + } + if e, ok := err.(*url.Error); ok { + // Transport returns this string if it detects a write on a connection which + // has already had an error + if e.Err.Error() == "http: can't write HTTP request on broken connection" { + return true + } + err = e.Err + } + + switch err { + case io.ErrUnexpectedEOF, io.EOF: + return true + } + switch e := err.(type) { + case *net.DNSError: + return true + case *net.OpError: + switch e.Op { + case "read", "write", "WSARecv", "WSASend", "ConnectEx": + return true + } + case *Error: + switch e.Code { + case "InternalError", "NoSuchUpload", "NoSuchBucket", "RequestTimeout": + return true + } + // let's handle tls handshake timeout issues and similar temporary errors + case net.Error: + return e.Temporary() + } + + return false +} + +func hasCode(err error, code string) bool { + s3err, ok := err.(*Error) + return ok && s3err.Code == code +} + +// ioTimeoutConn is a net.Conn which sets a deadline for each Read or Write operation +type ioTimeoutConn struct { + *net.TCPConn + readTimeout time.Duration + writeTimeout time.Duration + requestDeadline time.Time +} + +func (c *ioTimeoutConn) deadline(timeout time.Duration) time.Time { + dl := time.Now().Add(timeout) + if c.requestDeadline.IsZero() || dl.Before(c.requestDeadline) { + return dl + } + + return c.requestDeadline +} + +func (c *ioTimeoutConn) Read(b []byte) (int, error) { + if c.readTimeout > 0 { + err := c.TCPConn.SetReadDeadline(c.deadline(c.readTimeout)) + if err != nil { + return 0, err + } + } + return c.TCPConn.Read(b) +} + +func (c *ioTimeoutConn) Write(b []byte) (int, error) { + if c.writeTimeout > 0 { + err := c.TCPConn.SetWriteDeadline(c.deadline(c.writeTimeout)) + if err != nil { + return 0, err + } + } + return c.TCPConn.Write(b) +} diff --git a/transfersh-server/vendor/github.com/goamz/goamz/s3/s3test/server.go b/transfersh-server/vendor/github.com/goamz/goamz/s3/s3test/server.go new file mode 100644 index 0000000..bf4dd8a --- /dev/null +++ b/transfersh-server/vendor/github.com/goamz/goamz/s3/s3test/server.go @@ -0,0 +1,640 @@ +package s3test + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "encoding/hex" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/url" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/goamz/goamz/s3" +) + +const debug = false + +type s3Error struct { + statusCode int + XMLName struct{} `xml:"Error"` + Code string + Message string + BucketName string + RequestId string + HostId string +} + +type action struct { + srv *Server + w http.ResponseWriter + req *http.Request + reqId string +} + +// Config controls the internal behaviour of the Server. A nil config is the default +// and behaves as if all configurations assume their default behaviour. Once passed +// to NewServer, the configuration must not be modified. +type Config struct { + // Send409Conflict controls how the Server will respond to calls to PUT on a + // previously existing bucket. The default is false, and corresponds to the + // us-east-1 s3 enpoint. Setting this value to true emulates the behaviour of + // all other regions. + // http://docs.amazonwebservices.com/AmazonS3/latest/API/ErrorResponses.html + Send409Conflict bool + // Set the host string on which to serve s3test server. + Host string +} + +func (c *Config) send409Conflict() bool { + if c != nil { + return c.Send409Conflict + } + return false +} + +// Server is a fake S3 server for testing purposes. +// All of the data for the server is kept in memory. +type Server struct { + url string + reqId int + listener net.Listener + mu sync.Mutex + buckets map[string]*bucket + config *Config +} + +type bucket struct { + name string + acl s3.ACL + ctime time.Time + objects map[string]*object +} + +type object struct { + name string + mtime time.Time + meta http.Header // metadata to return with requests. + checksum []byte // also held as Content-MD5 in meta. + data []byte +} + +// A resource encapsulates the subject of an HTTP request. +// The resource referred to may or may not exist +// when the request is made. +type resource interface { + put(a *action) interface{} + get(a *action) interface{} + post(a *action) interface{} + delete(a *action) interface{} +} + +func NewServer(config *Config) (*Server, error) { + if config.Host == "" { + config.Host = "localhost:0" + } + + l, err := net.Listen("tcp", config.Host) + if err != nil { + return nil, fmt.Errorf("cannot listen on localhost: %v", err) + } + srv := &Server{ + listener: l, + url: "http://" + l.Addr().String(), + buckets: make(map[string]*bucket), + config: config, + } + go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + srv.serveHTTP(w, req) + })) + return srv, nil +} + +// Quit closes down the server. +func (srv *Server) Quit() { + srv.listener.Close() +} + +// URL returns a URL for the server. +func (srv *Server) URL() string { + return srv.url +} + +func fatalf(code int, codeStr string, errf string, a ...interface{}) { + panic(&s3Error{ + statusCode: code, + Code: codeStr, + Message: fmt.Sprintf(errf, a...), + }) +} + +// serveHTTP serves the S3 protocol. +func (srv *Server) serveHTTP(w http.ResponseWriter, req *http.Request) { + // ignore error from ParseForm as it's usually spurious. + req.ParseForm() + + srv.mu.Lock() + defer srv.mu.Unlock() + + if debug { + log.Printf("s3test %q %q", req.Method, req.URL) + } + a := &action{ + srv: srv, + w: w, + req: req, + reqId: fmt.Sprintf("%09X", srv.reqId), + } + srv.reqId++ + + var r resource + defer func() { + switch err := recover().(type) { + case *s3Error: + switch r := r.(type) { + case objectResource: + err.BucketName = r.bucket.name + case bucketResource: + err.BucketName = r.name + } + err.RequestId = a.reqId + // TODO HostId + w.Header().Set("Content-Type", `xml version="1.0" encoding="UTF-8"`) + w.WriteHeader(err.statusCode) + xmlMarshal(w, err) + case nil: + default: + panic(err) + } + }() + + r = srv.resourceForURL(req.URL) + + var resp interface{} + switch req.Method { + case "PUT": + resp = r.put(a) + case "GET", "HEAD": + resp = r.get(a) + case "DELETE": + resp = r.delete(a) + case "POST": + resp = r.post(a) + default: + fatalf(400, "MethodNotAllowed", "unknown http request method %q", req.Method) + } + if resp != nil && req.Method != "HEAD" { + xmlMarshal(w, resp) + } +} + +// xmlMarshal is the same as xml.Marshal except that +// it panics on error. The marshalling should not fail, +// but we want to know if it does. +func xmlMarshal(w io.Writer, x interface{}) { + if err := xml.NewEncoder(w).Encode(x); err != nil { + panic(fmt.Errorf("error marshalling %#v: %v", x, err)) + } +} + +// In a fully implemented test server, each of these would have +// its own resource type. +var unimplementedBucketResourceNames = map[string]bool{ + "acl": true, + "lifecycle": true, + "policy": true, + "location": true, + "logging": true, + "notification": true, + "versions": true, + "requestPayment": true, + "versioning": true, + "website": true, + "uploads": true, +} + +var unimplementedObjectResourceNames = map[string]bool{ + "uploadId": true, + "acl": true, + "torrent": true, + "uploads": true, +} + +var pathRegexp = regexp.MustCompile("/(([^/]+)(/(.*))?)?") + +// resourceForURL returns a resource object for the given URL. +func (srv *Server) resourceForURL(u *url.URL) (r resource) { + m := pathRegexp.FindStringSubmatch(u.Path) + if m == nil { + fatalf(404, "InvalidURI", "Couldn't parse the specified URI") + } + bucketName := m[2] + objectName := m[4] + if bucketName == "" { + return nullResource{} // root + } + b := bucketResource{ + name: bucketName, + bucket: srv.buckets[bucketName], + } + q := u.Query() + if objectName == "" { + for name := range q { + if unimplementedBucketResourceNames[name] { + return nullResource{} + } + } + return b + + } + if b.bucket == nil { + fatalf(404, "NoSuchBucket", "The specified bucket does not exist") + } + objr := objectResource{ + name: objectName, + version: q.Get("versionId"), + bucket: b.bucket, + } + for name := range q { + if unimplementedObjectResourceNames[name] { + return nullResource{} + } + } + if obj := objr.bucket.objects[objr.name]; obj != nil { + objr.object = obj + } + return objr +} + +// nullResource has error stubs for all resource methods. +type nullResource struct{} + +func notAllowed() interface{} { + fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource") + return nil +} + +func (nullResource) put(a *action) interface{} { return notAllowed() } +func (nullResource) get(a *action) interface{} { return notAllowed() } +func (nullResource) post(a *action) interface{} { return notAllowed() } +func (nullResource) delete(a *action) interface{} { return notAllowed() } + +const timeFormat = "2006-01-02T15:04:05.000Z07:00" + +type bucketResource struct { + name string + bucket *bucket // non-nil if the bucket already exists. +} + +// GET on a bucket lists the objects in the bucket. +// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketGET.html +func (r bucketResource) get(a *action) interface{} { + if r.bucket == nil { + fatalf(404, "NoSuchBucket", "The specified bucket does not exist") + } + delimiter := a.req.Form.Get("delimiter") + marker := a.req.Form.Get("marker") + maxKeys := -1 + if s := a.req.Form.Get("max-keys"); s != "" { + i, err := strconv.Atoi(s) + if err != nil || i < 0 { + fatalf(400, "invalid value for max-keys: %q", s) + } + maxKeys = i + } + prefix := a.req.Form.Get("prefix") + a.w.Header().Set("Content-Type", "application/xml") + + if a.req.Method == "HEAD" { + return nil + } + + var objs orderedObjects + + // first get all matching objects and arrange them in alphabetical order. + for name, obj := range r.bucket.objects { + if strings.HasPrefix(name, prefix) { + objs = append(objs, obj) + } + } + sort.Sort(objs) + + if maxKeys <= 0 { + maxKeys = 1000 + } + resp := &s3.ListResp{ + Name: r.bucket.name, + Prefix: prefix, + Delimiter: delimiter, + Marker: marker, + MaxKeys: maxKeys, + } + + var prefixes []string + for _, obj := range objs { + if !strings.HasPrefix(obj.name, prefix) { + continue + } + name := obj.name + isPrefix := false + if delimiter != "" { + if i := strings.Index(obj.name[len(prefix):], delimiter); i >= 0 { + name = obj.name[:len(prefix)+i+len(delimiter)] + if prefixes != nil && prefixes[len(prefixes)-1] == name { + continue + } + isPrefix = true + } + } + if name <= marker { + continue + } + if len(resp.Contents)+len(prefixes) >= maxKeys { + resp.IsTruncated = true + break + } + if isPrefix { + prefixes = append(prefixes, name) + } else { + // Contents contains only keys not found in CommonPrefixes + resp.Contents = append(resp.Contents, obj.s3Key()) + } + } + resp.CommonPrefixes = prefixes + return resp +} + +// orderedObjects holds a slice of objects that can be sorted +// by name. +type orderedObjects []*object + +func (s orderedObjects) Len() int { + return len(s) +} +func (s orderedObjects) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s orderedObjects) Less(i, j int) bool { + return s[i].name < s[j].name +} + +func (obj *object) s3Key() s3.Key { + return s3.Key{ + Key: obj.name, + LastModified: obj.mtime.Format(timeFormat), + Size: int64(len(obj.data)), + ETag: fmt.Sprintf(`"%x"`, obj.checksum), + // TODO StorageClass + // TODO Owner + } +} + +// DELETE on a bucket deletes the bucket if it's not empty. +func (r bucketResource) delete(a *action) interface{} { + b := r.bucket + if b == nil { + fatalf(404, "NoSuchBucket", "The specified bucket does not exist") + } + if len(b.objects) > 0 { + fatalf(400, "BucketNotEmpty", "The bucket you tried to delete is not empty") + } + delete(a.srv.buckets, b.name) + return nil +} + +// PUT on a bucket creates the bucket. +// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTBucketPUT.html +func (r bucketResource) put(a *action) interface{} { + var created bool + if r.bucket == nil { + if !validBucketName(r.name) { + fatalf(400, "InvalidBucketName", "The specified bucket is not valid") + } + if loc := locationConstraint(a); loc == "" { + fatalf(400, "InvalidRequets", "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to.") + } + // TODO validate acl + r.bucket = &bucket{ + name: r.name, + // TODO default acl + objects: make(map[string]*object), + } + a.srv.buckets[r.name] = r.bucket + created = true + } + if !created && a.srv.config.send409Conflict() { + fatalf(409, "BucketAlreadyOwnedByYou", "Your previous request to create the named bucket succeeded and you already own it.") + } + r.bucket.acl = s3.ACL(a.req.Header.Get("x-amz-acl")) + return nil +} + +func (bucketResource) post(a *action) interface{} { + fatalf(400, "Method", "bucket POST method not available") + return nil +} + +// validBucketName returns whether name is a valid bucket name. +// Here are the rules, from: +// http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/BucketRestrictions.html +// +// Can contain lowercase letters, numbers, periods (.), underscores (_), +// and dashes (-). You can use uppercase letters for buckets only in the +// US Standard region. +// +// Must start with a number or letter +// +// Must be between 3 and 255 characters long +// +// There's one extra rule (Must not be formatted as an IP address (e.g., 192.168.5.4) +// but the real S3 server does not seem to check that rule, so we will not +// check it either. +// +func validBucketName(name string) bool { + if len(name) < 3 || len(name) > 255 { + return false + } + r := name[0] + if !(r >= '0' && r <= '9' || r >= 'a' && r <= 'z') { + return false + } + for _, r := range name { + switch { + case r >= '0' && r <= '9': + case r >= 'a' && r <= 'z': + case r == '_' || r == '-': + case r == '.': + default: + return false + } + } + return true +} + +var responseParams = map[string]bool{ + "content-type": true, + "content-language": true, + "expires": true, + "cache-control": true, + "content-disposition": true, + "content-encoding": true, +} + +type objectResource struct { + name string + version string + bucket *bucket // always non-nil. + object *object // may be nil. +} + +// GET on an object gets the contents of the object. +// http://docs.amazonwebservices.com/AmazonS3/latest/API/RESTObjectGET.html +func (objr objectResource) get(a *action) interface{} { + obj := objr.object + if obj == nil { + fatalf(404, "NoSuchKey", "The specified key does not exist.") + } + h := a.w.Header() + // add metadata + for name, d := range obj.meta { + h[name] = d + } + // override header values in response to request parameters. + for name, vals := range a.req.Form { + if strings.HasPrefix(name, "response-") { + name = name[len("response-"):] + if !responseParams[name] { + continue + } + h.Set(name, vals[0]) + } + } + if r := a.req.Header.Get("Range"); r != "" { + fatalf(400, "NotImplemented", "range unimplemented") + } + // TODO Last-Modified-Since + // TODO If-Modified-Since + // TODO If-Unmodified-Since + // TODO If-Match + // TODO If-None-Match + // TODO Connection: close ?? + // TODO x-amz-request-id + h.Set("Content-Length", fmt.Sprint(len(obj.data))) + h.Set("ETag", hex.EncodeToString(obj.checksum)) + h.Set("Last-Modified", obj.mtime.Format(time.RFC1123)) + if a.req.Method == "HEAD" { + return nil + } + // TODO avoid holding the lock when writing data. + _, err := a.w.Write(obj.data) + if err != nil { + // we can't do much except just log the fact. + log.Printf("error writing data: %v", err) + } + return nil +} + +var metaHeaders = map[string]bool{ + "Content-MD5": true, + "x-amz-acl": true, + "Content-Type": true, + "Content-Encoding": true, + "Content-Disposition": true, +} + +// PUT on an object creates the object. +func (objr objectResource) put(a *action) interface{} { + // TODO Cache-Control header + // TODO Expires header + // TODO x-amz-server-side-encryption + // TODO x-amz-storage-class + + // TODO is this correct, or should we erase all previous metadata? + obj := objr.object + if obj == nil { + obj = &object{ + name: objr.name, + meta: make(http.Header), + } + } + + var expectHash []byte + if c := a.req.Header.Get("Content-MD5"); c != "" { + var err error + expectHash, err = base64.StdEncoding.DecodeString(c) + if err != nil || len(expectHash) != md5.Size { + fatalf(400, "InvalidDigest", "The Content-MD5 you specified was invalid") + } + } + sum := md5.New() + // TODO avoid holding lock while reading data. + data, err := ioutil.ReadAll(io.TeeReader(a.req.Body, sum)) + if err != nil { + fatalf(400, "TODO", "read error") + } + gotHash := sum.Sum(nil) + if expectHash != nil && bytes.Compare(gotHash, expectHash) != 0 { + fatalf(400, "BadDigest", "The Content-MD5 you specified did not match what we received") + } + if a.req.ContentLength >= 0 && int64(len(data)) != a.req.ContentLength { + fatalf(400, "IncompleteBody", "You did not provide the number of bytes specified by the Content-Length HTTP header") + } + + // PUT request has been successful - save data and metadata + for key, values := range a.req.Header { + key = http.CanonicalHeaderKey(key) + if metaHeaders[key] || strings.HasPrefix(key, "X-Amz-Meta-") { + obj.meta[key] = values + } + } + obj.data = data + obj.checksum = gotHash + obj.mtime = time.Now() + objr.bucket.objects[objr.name] = obj + + h := a.w.Header() + h.Set("ETag", fmt.Sprintf(`"%s"`, hex.EncodeToString(obj.checksum))) + + return nil +} + +func (objr objectResource) delete(a *action) interface{} { + delete(objr.bucket.objects, objr.name) + return nil +} + +func (objr objectResource) post(a *action) interface{} { + fatalf(400, "MethodNotAllowed", "The specified method is not allowed against this resource") + return nil +} + +type CreateBucketConfiguration struct { + LocationConstraint string +} + +// locationConstraint parses the request body (if present). +// If there is no body, an empty string will be returned. +func locationConstraint(a *action) string { + var body bytes.Buffer + if _, err := io.Copy(&body, a.req.Body); err != nil { + fatalf(400, "InvalidRequest", err.Error()) + } + if body.Len() == 0 { + return "" + } + var loc CreateBucketConfiguration + if err := xml.NewDecoder(&body).Decode(&loc); err != nil { + fatalf(400, "InvalidRequest", err.Error()) + } + return loc.LocationConstraint +} diff --git a/transfersh-server/vendor/github.com/goamz/goamz/s3/sign.go b/transfersh-server/vendor/github.com/goamz/goamz/s3/sign.go new file mode 100644 index 0000000..722d97d --- /dev/null +++ b/transfersh-server/vendor/github.com/goamz/goamz/s3/sign.go @@ -0,0 +1,141 @@ +package s3 + +import ( + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "github.com/goamz/goamz/aws" + "log" + "sort" + "strings" +) + +var b64 = base64.StdEncoding + +// ---------------------------------------------------------------------------- +// S3 signing (http://goo.gl/G1LrK) + +var s3ParamsToSign = map[string]bool{ + "acl": true, + "location": true, + "logging": true, + "notification": true, + "partNumber": true, + "policy": true, + "requestPayment": true, + "torrent": true, + "uploadId": true, + "uploads": true, + "versionId": true, + "versioning": true, + "versions": true, + "response-content-type": true, + "response-content-language": true, + "response-expires": true, + "response-cache-control": true, + "response-content-disposition": true, + "response-content-encoding": true, + "website": true, + "delete": true, +} + +type keySortableTupleList []keySortableTuple + +type keySortableTuple struct { + Key string + TupleString string +} + +func (l keySortableTupleList) StringSlice() []string { + slice := make([]string, len(l)) + for i, v := range l { + slice[i] = v.TupleString + } + return slice +} + +func (l keySortableTupleList) Len() int { + return len(l) +} + +func (l keySortableTupleList) Less(i, j int) bool { + return l[i].Key < l[j].Key +} + +func (l keySortableTupleList) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +func sign(auth aws.Auth, method, canonicalPath string, params, headers map[string][]string) { + var md5, ctype, date, xamz string + var xamzDate bool + var sarray keySortableTupleList + for k, v := range headers { + k = strings.ToLower(k) + switch k { + case "content-md5": + md5 = v[0] + case "content-type": + ctype = v[0] + case "date": + if !xamzDate { + date = v[0] + } + default: + if strings.HasPrefix(k, "x-amz-") { + vall := strings.Join(v, ",") + sarray = append(sarray, keySortableTuple{k, k + ":" + vall}) + if k == "x-amz-date" { + xamzDate = true + date = "" + } + } + } + } + if len(sarray) > 0 { + sort.Sort(sarray) + xamz = strings.Join(sarray.StringSlice(), "\n") + "\n" + } + + expires := false + if v, ok := params["Expires"]; ok { + // Query string request authentication alternative. + expires = true + date = v[0] + params["AWSAccessKeyId"] = []string{auth.AccessKey} + } + + sarray = sarray[0:0] + for k, v := range params { + if s3ParamsToSign[k] { + for _, vi := range v { + if vi == "" { + sarray = append(sarray, keySortableTuple{k, k}) + } else { + // "When signing you do not encode these values." + sarray = append(sarray, keySortableTuple{k, k + "=" + vi}) + } + } + } + } + if len(sarray) > 0 { + sort.Sort(sarray) + canonicalPath = canonicalPath + "?" + strings.Join(sarray.StringSlice(), "&") + } + + payload := method + "\n" + md5 + "\n" + ctype + "\n" + date + "\n" + xamz + canonicalPath + hash := hmac.New(sha1.New, []byte(auth.SecretKey)) + hash.Write([]byte(payload)) + signature := make([]byte, b64.EncodedLen(hash.Size())) + b64.Encode(signature, hash.Sum(nil)) + + if expires { + params["Signature"] = []string{string(signature)} + } else { + headers["Authorization"] = []string{"AWS " + auth.AccessKey + ":" + string(signature)} + } + if debug { + log.Printf("Signature payload: %q", payload) + log.Printf("Signature: %q", signature) + } +} diff --git a/transfersh-server/vendor/github.com/goamz/goamz/testutil/LICENSE b/transfersh-server/vendor/github.com/goamz/goamz/testutil/LICENSE new file mode 100644 index 0000000..53320c3 --- /dev/null +++ b/transfersh-server/vendor/github.com/goamz/goamz/testutil/LICENSE @@ -0,0 +1,185 @@ +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/transfersh-server/vendor/github.com/goamz/goamz/testutil/http.go b/transfersh-server/vendor/github.com/goamz/goamz/testutil/http.go new file mode 100644 index 0000000..ccc570c --- /dev/null +++ b/transfersh-server/vendor/github.com/goamz/goamz/testutil/http.go @@ -0,0 +1,180 @@ +package testutil + +import ( + "bytes" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "time" +) + +type HTTPServer struct { + URL string + Timeout time.Duration + started bool + request chan *http.Request + response chan ResponseFunc +} + +type Response struct { + Status int + Headers map[string]string + Body string +} + +var DefaultClient = &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + }, +} + +func NewHTTPServer() *HTTPServer { + return &HTTPServer{URL: "http://localhost:4444", Timeout: 5 * time.Second} +} + +type ResponseFunc func(path string) Response + +func (s *HTTPServer) Start() { + if s.started { + return + } + s.started = true + s.request = make(chan *http.Request, 1024) + s.response = make(chan ResponseFunc, 1024) + u, err := url.Parse(s.URL) + if err != nil { + panic(err) + } + l, err := net.Listen("tcp", u.Host) + if err != nil { + panic(err) + } + go http.Serve(l, s) + + s.Response(203, nil, "") + for { + // Wait for it to be up. + resp, err := http.Get(s.URL) + if err == nil && resp.StatusCode == 203 { + break + } + time.Sleep(1e8) + } + s.WaitRequest() // Consume dummy request. +} + +// Flush discards all pending requests and responses. +func (s *HTTPServer) Flush() { + for { + select { + case <-s.request: + case <-s.response: + default: + return + } + } +} + +func body(req *http.Request) string { + data, err := ioutil.ReadAll(req.Body) + if err != nil { + panic(err) + } + return string(data) +} + +func (s *HTTPServer) ServeHTTP(w http.ResponseWriter, req *http.Request) { + req.ParseMultipartForm(1e6) + data, err := ioutil.ReadAll(req.Body) + if err != nil { + panic(err) + } + req.Body = ioutil.NopCloser(bytes.NewBuffer(data)) + s.request <- req + var resp Response + select { + case respFunc := <-s.response: + resp = respFunc(req.URL.Path) + case <-time.After(s.Timeout): + const msg = "ERROR: Timeout waiting for test to prepare a response\n" + fmt.Fprintf(os.Stderr, msg) + resp = Response{500, nil, msg} + } + if resp.Headers != nil { + h := w.Header() + for k, v := range resp.Headers { + h.Set(k, v) + } + } + if resp.Status != 0 { + w.WriteHeader(resp.Status) + } + w.Write([]byte(resp.Body)) +} + +// WaitRequests returns the next n requests made to the http server from +// the queue. If not enough requests were previously made, it waits until +// the timeout value for them to be made. +func (s *HTTPServer) WaitRequests(n int) []*http.Request { + reqs := make([]*http.Request, 0, n) + for i := 0; i < n; i++ { + select { + case req := <-s.request: + reqs = append(reqs, req) + case <-time.After(s.Timeout): + panic("Timeout waiting for request") + } + } + return reqs +} + +// WaitRequest returns the next request made to the http server from +// the queue. If no requests were previously made, it waits until the +// timeout value for one to be made. +func (s *HTTPServer) WaitRequest() *http.Request { + return s.WaitRequests(1)[0] +} + +// ResponseFunc prepares the test server to respond the following n +// requests using f to build each response. +func (s *HTTPServer) ResponseFunc(n int, f ResponseFunc) { + for i := 0; i < n; i++ { + s.response <- f + } +} + +// ResponseMap maps request paths to responses. +type ResponseMap map[string]Response + +// ResponseMap prepares the test server to respond the following n +// requests using the m to obtain the responses. +func (s *HTTPServer) ResponseMap(n int, m ResponseMap) { + f := func(path string) Response { + for rpath, resp := range m { + if rpath == path { + return resp + } + } + body := "Path not found in response map: " + path + return Response{Status: 500, Body: body} + } + s.ResponseFunc(n, f) +} + +// Responses prepares the test server to respond the following n requests +// using the provided response parameters. +func (s *HTTPServer) Responses(n int, status int, headers map[string]string, body string) { + f := func(path string) Response { + return Response{status, headers, body} + } + s.ResponseFunc(n, f) +} + +// Response prepares the test server to respond the following request +// using the provided response parameters. +func (s *HTTPServer) Response(status int, headers map[string]string, body string) { + s.Responses(1, status, headers, body) +} diff --git a/transfersh-server/vendor/github.com/goamz/goamz/testutil/suite.go b/transfersh-server/vendor/github.com/goamz/goamz/testutil/suite.go new file mode 100644 index 0000000..f4519aa --- /dev/null +++ b/transfersh-server/vendor/github.com/goamz/goamz/testutil/suite.go @@ -0,0 +1,31 @@ +package testutil + +import ( + "flag" + + "github.com/goamz/goamz/aws" + . "gopkg.in/check.v1" +) + +// Amazon must be used by all tested packages to determine whether to +// run functional tests against the real AWS servers. +var Amazon bool + +func init() { + flag.BoolVar(&Amazon, "amazon", false, "Enable tests against amazon server") +} + +type LiveSuite struct { + auth aws.Auth +} + +func (s *LiveSuite) SetUpSuite(c *C) { + if !Amazon { + c.Skip("amazon tests not enabled (-amazon flag)") + } + auth, err := aws.EnvAuth() + if err != nil { + c.Fatal(err.Error()) + } + s.auth = auth +} diff --git a/transfersh-server/vendor/github.com/golang/gddo/httputil/header/LICENSE b/transfersh-server/vendor/github.com/golang/gddo/httputil/header/LICENSE new file mode 100644 index 0000000..65d761b --- /dev/null +++ b/transfersh-server/vendor/github.com/golang/gddo/httputil/header/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/transfersh-server/vendor/github.com/golang/gddo/httputil/header/header.go b/transfersh-server/vendor/github.com/golang/gddo/httputil/header/header.go new file mode 100644 index 0000000..0f1572e --- /dev/null +++ b/transfersh-server/vendor/github.com/golang/gddo/httputil/header/header.go @@ -0,0 +1,298 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// Package header provides functions for parsing HTTP headers. +package header + +import ( + "net/http" + "strings" + "time" +) + +// Octet types from RFC 2616. +var octetTypes [256]octetType + +type octetType byte + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 + if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// Copy returns a shallow copy of the header. +func Copy(header http.Header) http.Header { + h := make(http.Header) + for k, vs := range header { + h[k] = vs + } + return h +} + +var timeLayouts = []string{"Mon, 02 Jan 2006 15:04:05 GMT", time.RFC850, time.ANSIC} + +// ParseTime parses the header as time. The zero value is returned if the +// header is not present or there is an error parsing the +// header. +func ParseTime(header http.Header, key string) time.Time { + if s := header.Get(key); s != "" { + for _, layout := range timeLayouts { + if t, err := time.Parse(layout, s); err == nil { + return t.UTC() + } + } + } + return time.Time{} +} + +// ParseList parses a comma separated list of values. Commas are ignored in +// quoted strings. Quoted values are not unescaped or unquoted. Whitespace is +// trimmed. +func ParseList(header http.Header, key string) []string { + var result []string + for _, s := range header[http.CanonicalHeaderKey(key)] { + begin := 0 + end := 0 + escape := false + quote := false + for i := 0; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + end = i + 1 + case quote: + switch b { + case '\\': + escape = true + case '"': + quote = false + } + end = i + 1 + case b == '"': + quote = true + end = i + 1 + case octetTypes[b]&isSpace != 0: + if begin == end { + begin = i + 1 + end = begin + } + case b == ',': + if begin < end { + result = append(result, s[begin:end]) + } + begin = i + 1 + end = begin + default: + end = i + 1 + } + } + if begin < end { + result = append(result, s[begin:end]) + } + } + return result +} + +// ParseValueAndParams parses a comma separated list of values with optional +// semicolon separated name-value pairs. Content-Type and Content-Disposition +// headers are in this format. +func ParseValueAndParams(header http.Header, key string) (value string, params map[string]string) { + params = make(map[string]string) + s := header.Get(key) + value, s = expectTokenSlash(s) + if value == "" { + return + } + value = strings.ToLower(value) + s = skipSpace(s) + for strings.HasPrefix(s, ";") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +// AcceptSpec describes an Accept* header. +type AcceptSpec struct { + Value string + Q float64 +} + +// ParseAccept parses Accept* headers. +func ParseAccept(header http.Header, key string) (specs []AcceptSpec) { +loop: + for _, s := range header[key] { + for { + var spec AcceptSpec + spec.Value, s = expectTokenSlash(s) + if spec.Value == "" { + continue loop + } + spec.Q = 1.0 + s = skipSpace(s) + if strings.HasPrefix(s, ";") { + s = skipSpace(s[1:]) + if !strings.HasPrefix(s, "q=") { + continue loop + } + spec.Q, s = expectQuality(s[2:]) + if spec.Q < 0.0 { + continue loop + } + } + specs = append(specs, spec) + s = skipSpace(s) + if !strings.HasPrefix(s, ",") { + continue loop + } + s = skipSpace(s[1:]) + } + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenSlash(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + b := s[i] + if (octetTypes[b]&isToken == 0) && b != '/' { + break + } + } + return s[:i], s[i:] +} + +func expectQuality(s string) (q float64, rest string) { + switch { + case len(s) == 0: + return -1, "" + case s[0] == '0': + q = 0 + case s[0] == '1': + q = 1 + default: + return -1, "" + } + s = s[1:] + if !strings.HasPrefix(s, ".") { + return q, s + } + s = s[1:] + i := 0 + n := 0 + d := 1 + for ; i < len(s); i++ { + b := s[i] + if b < '0' || b > '9' { + break + } + n = n*10 + int(b) - '0' + d *= 10 + } + return q + float64(n)/float64(d), s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/transfersh-server/vendor/github.com/gorilla/context/LICENSE b/transfersh-server/vendor/github.com/gorilla/context/LICENSE new file mode 100644 index 0000000..0e5fb87 --- /dev/null +++ b/transfersh-server/vendor/github.com/gorilla/context/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/transfersh-server/vendor/github.com/gorilla/context/context.go b/transfersh-server/vendor/github.com/gorilla/context/context.go new file mode 100644 index 0000000..81cb128 --- /dev/null +++ b/transfersh-server/vendor/github.com/gorilla/context/context.go @@ -0,0 +1,143 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context + +import ( + "net/http" + "sync" + "time" +) + +var ( + mutex sync.RWMutex + data = make(map[*http.Request]map[interface{}]interface{}) + datat = make(map[*http.Request]int64) +) + +// Set stores a value for a given key in a given request. +func Set(r *http.Request, key, val interface{}) { + mutex.Lock() + if data[r] == nil { + data[r] = make(map[interface{}]interface{}) + datat[r] = time.Now().Unix() + } + data[r][key] = val + mutex.Unlock() +} + +// Get returns a value stored for a given key in a given request. +func Get(r *http.Request, key interface{}) interface{} { + mutex.RLock() + if ctx := data[r]; ctx != nil { + value := ctx[key] + mutex.RUnlock() + return value + } + mutex.RUnlock() + return nil +} + +// GetOk returns stored value and presence state like multi-value return of map access. +func GetOk(r *http.Request, key interface{}) (interface{}, bool) { + mutex.RLock() + if _, ok := data[r]; ok { + value, ok := data[r][key] + mutex.RUnlock() + return value, ok + } + mutex.RUnlock() + return nil, false +} + +// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests. +func GetAll(r *http.Request) map[interface{}]interface{} { + mutex.RLock() + if context, ok := data[r]; ok { + result := make(map[interface{}]interface{}, len(context)) + for k, v := range context { + result[k] = v + } + mutex.RUnlock() + return result + } + mutex.RUnlock() + return nil +} + +// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if +// the request was registered. +func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) { + mutex.RLock() + context, ok := data[r] + result := make(map[interface{}]interface{}, len(context)) + for k, v := range context { + result[k] = v + } + mutex.RUnlock() + return result, ok +} + +// Delete removes a value stored for a given key in a given request. +func Delete(r *http.Request, key interface{}) { + mutex.Lock() + if data[r] != nil { + delete(data[r], key) + } + mutex.Unlock() +} + +// Clear removes all values stored for a given request. +// +// This is usually called by a handler wrapper to clean up request +// variables at the end of a request lifetime. See ClearHandler(). +func Clear(r *http.Request) { + mutex.Lock() + clear(r) + mutex.Unlock() +} + +// clear is Clear without the lock. +func clear(r *http.Request) { + delete(data, r) + delete(datat, r) +} + +// Purge removes request data stored for longer than maxAge, in seconds. +// It returns the amount of requests removed. +// +// If maxAge <= 0, all request data is removed. +// +// This is only used for sanity check: in case context cleaning was not +// properly set some request data can be kept forever, consuming an increasing +// amount of memory. In case this is detected, Purge() must be called +// periodically until the problem is fixed. +func Purge(maxAge int) int { + mutex.Lock() + count := 0 + if maxAge <= 0 { + count = len(data) + data = make(map[*http.Request]map[interface{}]interface{}) + datat = make(map[*http.Request]int64) + } else { + min := time.Now().Unix() - int64(maxAge) + for r := range data { + if datat[r] < min { + clear(r) + count++ + } + } + } + mutex.Unlock() + return count +} + +// ClearHandler wraps an http.Handler and clears request values at the end +// of a request lifetime. +func ClearHandler(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer Clear(r) + h.ServeHTTP(w, r) + }) +} diff --git a/transfersh-server/vendor/github.com/gorilla/context/doc.go b/transfersh-server/vendor/github.com/gorilla/context/doc.go new file mode 100644 index 0000000..73c7400 --- /dev/null +++ b/transfersh-server/vendor/github.com/gorilla/context/doc.go @@ -0,0 +1,82 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package context stores values shared during a request lifetime. + +For example, a router can set variables extracted from the URL and later +application handlers can access those values, or it can be used to store +sessions values to be saved at the end of a request. There are several +others common uses. + +The idea was posted by Brad Fitzpatrick to the go-nuts mailing list: + + http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53 + +Here's the basic usage: first define the keys that you will need. The key +type is interface{} so a key can be of any type that supports equality. +Here we define a key using a custom int type to avoid name collisions: + + package foo + + import ( + "github.com/gorilla/context" + ) + + type key int + + const MyKey key = 0 + +Then set a variable. Variables are bound to an http.Request object, so you +need a request instance to set a value: + + context.Set(r, MyKey, "bar") + +The application can later access the variable using the same key you provided: + + func MyHandler(w http.ResponseWriter, r *http.Request) { + // val is "bar". + val := context.Get(r, foo.MyKey) + + // returns ("bar", true) + val, ok := context.GetOk(r, foo.MyKey) + // ... + } + +And that's all about the basic usage. We discuss some other ideas below. + +Any type can be stored in the context. To enforce a given type, make the key +private and wrap Get() and Set() to accept and return values of a specific +type: + + type key int + + const mykey key = 0 + + // GetMyKey returns a value for this package from the request values. + func GetMyKey(r *http.Request) SomeType { + if rv := context.Get(r, mykey); rv != nil { + return rv.(SomeType) + } + return nil + } + + // SetMyKey sets a value for this package in the request values. + func SetMyKey(r *http.Request, val SomeType) { + context.Set(r, mykey, val) + } + +Variables must be cleared at the end of a request, to remove all values +that were stored. This can be done in an http.Handler, after a request was +served. Just call Clear() passing the request: + + context.Clear(r) + +...or use ClearHandler(), which conveniently wraps an http.Handler to clear +variables at the end of a request lifetime. + +The Routers from the packages gorilla/mux and gorilla/pat call Clear() +so if you are using either of them you don't need to clear the context manually. +*/ +package context diff --git a/transfersh-server/vendor/github.com/gorilla/mux/LICENSE b/transfersh-server/vendor/github.com/gorilla/mux/LICENSE new file mode 100644 index 0000000..0e5fb87 --- /dev/null +++ b/transfersh-server/vendor/github.com/gorilla/mux/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/transfersh-server/vendor/github.com/gorilla/mux/context_gorilla.go b/transfersh-server/vendor/github.com/gorilla/mux/context_gorilla.go new file mode 100644 index 0000000..d7adaa8 --- /dev/null +++ b/transfersh-server/vendor/github.com/gorilla/mux/context_gorilla.go @@ -0,0 +1,26 @@ +// +build !go1.7 + +package mux + +import ( + "net/http" + + "github.com/gorilla/context" +) + +func contextGet(r *http.Request, key interface{}) interface{} { + return context.Get(r, key) +} + +func contextSet(r *http.Request, key, val interface{}) *http.Request { + if val == nil { + return r + } + + context.Set(r, key, val) + return r +} + +func contextClear(r *http.Request) { + context.Clear(r) +} diff --git a/transfersh-server/vendor/github.com/gorilla/mux/context_native.go b/transfersh-server/vendor/github.com/gorilla/mux/context_native.go new file mode 100644 index 0000000..209cbea --- /dev/null +++ b/transfersh-server/vendor/github.com/gorilla/mux/context_native.go @@ -0,0 +1,24 @@ +// +build go1.7 + +package mux + +import ( + "context" + "net/http" +) + +func contextGet(r *http.Request, key interface{}) interface{} { + return r.Context().Value(key) +} + +func contextSet(r *http.Request, key, val interface{}) *http.Request { + if val == nil { + return r + } + + return r.WithContext(context.WithValue(r.Context(), key, val)) +} + +func contextClear(r *http.Request) { + return +} diff --git a/transfersh-server/vendor/github.com/gorilla/mux/doc.go b/transfersh-server/vendor/github.com/gorilla/mux/doc.go new file mode 100644 index 0000000..835f534 --- /dev/null +++ b/transfersh-server/vendor/github.com/gorilla/mux/doc.go @@ -0,0 +1,206 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package mux implements a request router and dispatcher. + +The name mux stands for "HTTP request multiplexer". Like the standard +http.ServeMux, mux.Router matches incoming requests against a list of +registered routes and calls a handler for the route that matches the URL +or other conditions. The main features are: + + * Requests can be matched based on URL host, path, path prefix, schemes, + header and query values, HTTP methods or using custom matchers. + * URL hosts and paths can have variables with an optional regular + expression. + * Registered URLs can be built, or "reversed", which helps maintaining + references to resources. + * Routes can be used as subrouters: nested routes are only tested if the + parent route matches. This is useful to define groups of routes that + share common conditions like a host, a path prefix or other repeated + attributes. As a bonus, this optimizes request matching. + * It implements the http.Handler interface so it is compatible with the + standard http.ServeMux. + +Let's start registering a couple of URL paths and handlers: + + func main() { + r := mux.NewRouter() + r.HandleFunc("/", HomeHandler) + r.HandleFunc("/products", ProductsHandler) + r.HandleFunc("/articles", ArticlesHandler) + http.Handle("/", r) + } + +Here we register three routes mapping URL paths to handlers. This is +equivalent to how http.HandleFunc() works: if an incoming request URL matches +one of the paths, the corresponding handler is called passing +(http.ResponseWriter, *http.Request) as parameters. + +Paths can have variables. They are defined using the format {name} or +{name:pattern}. If a regular expression pattern is not defined, the matched +variable will be anything until the next slash. For example: + + r := mux.NewRouter() + r.HandleFunc("/products/{key}", ProductHandler) + r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) + +The names are used to create a map of route variables which can be retrieved +calling mux.Vars(): + + vars := mux.Vars(request) + category := vars["category"] + +And this is all you need to know about the basic usage. More advanced options +are explained below. + +Routes can also be restricted to a domain or subdomain. Just define a host +pattern to be matched. They can also have variables: + + r := mux.NewRouter() + // Only matches if domain is "www.example.com". + r.Host("www.example.com") + // Matches a dynamic subdomain. + r.Host("{subdomain:[a-z]+}.domain.com") + +There are several other matchers that can be added. To match path prefixes: + + r.PathPrefix("/products/") + +...or HTTP methods: + + r.Methods("GET", "POST") + +...or URL schemes: + + r.Schemes("https") + +...or header values: + + r.Headers("X-Requested-With", "XMLHttpRequest") + +...or query values: + + r.Queries("key", "value") + +...or to use a custom matcher function: + + r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { + return r.ProtoMajor == 0 + }) + +...and finally, it is possible to combine several matchers in a single route: + + r.HandleFunc("/products", ProductsHandler). + Host("www.example.com"). + Methods("GET"). + Schemes("http") + +Setting the same matching conditions again and again can be boring, so we have +a way to group several routes that share the same requirements. +We call it "subrouting". + +For example, let's say we have several URLs that should only match when the +host is "www.example.com". Create a route for that host and get a "subrouter" +from it: + + r := mux.NewRouter() + s := r.Host("www.example.com").Subrouter() + +Then register routes in the subrouter: + + s.HandleFunc("/products/", ProductsHandler) + s.HandleFunc("/products/{key}", ProductHandler) + s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) + +The three URL paths we registered above will only be tested if the domain is +"www.example.com", because the subrouter is tested first. This is not +only convenient, but also optimizes request matching. You can create +subrouters combining any attribute matchers accepted by a route. + +Subrouters can be used to create domain or path "namespaces": you define +subrouters in a central place and then parts of the app can register its +paths relatively to a given subrouter. + +There's one more thing about subroutes. When a subrouter has a path prefix, +the inner routes use it as base for their paths: + + r := mux.NewRouter() + s := r.PathPrefix("/products").Subrouter() + // "/products/" + s.HandleFunc("/", ProductsHandler) + // "/products/{key}/" + s.HandleFunc("/{key}/", ProductHandler) + // "/products/{key}/details" + s.HandleFunc("/{key}/details", ProductDetailsHandler) + +Now let's see how to build registered URLs. + +Routes can be named. All routes that define a name can have their URLs built, +or "reversed". We define a name calling Name() on a route. For example: + + r := mux.NewRouter() + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). + Name("article") + +To build a URL, get the route and call the URL() method, passing a sequence of +key/value pairs for the route variables. For the previous route, we would do: + + url, err := r.Get("article").URL("category", "technology", "id", "42") + +...and the result will be a url.URL with the following path: + + "/articles/technology/42" + +This also works for host variables: + + r := mux.NewRouter() + r.Host("{subdomain}.domain.com"). + Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // url.String() will be "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") + +All variables defined in the route are required, and their values must +conform to the corresponding patterns. These requirements guarantee that a +generated URL will always match a registered route -- the only exception is +for explicitly defined "build-only" routes which never match. + +Regex support also exists for matching Headers within a route. For example, we could do: + + r.HeadersRegexp("Content-Type", "application/(text|json)") + +...and the route will match both requests with a Content-Type of `application/json` as well as +`application/text` + +There's also a way to build only the URL host or path for a route: +use the methods URLHost() or URLPath() instead. For the previous route, +we would do: + + // "http://news.domain.com/" + host, err := r.Get("article").URLHost("subdomain", "news") + + // "/articles/technology/42" + path, err := r.Get("article").URLPath("category", "technology", "id", "42") + +And if you use subrouters, host and path defined separately can be built +as well: + + r := mux.NewRouter() + s := r.Host("{subdomain}.domain.com").Subrouter() + s.Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") +*/ +package mux diff --git a/transfersh-server/vendor/github.com/gorilla/mux/mux.go b/transfersh-server/vendor/github.com/gorilla/mux/mux.go new file mode 100644 index 0000000..f8c10f3 --- /dev/null +++ b/transfersh-server/vendor/github.com/gorilla/mux/mux.go @@ -0,0 +1,495 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "errors" + "fmt" + "net/http" + "path" + "regexp" +) + +// NewRouter returns a new router instance. +func NewRouter() *Router { + return &Router{namedRoutes: make(map[string]*Route), KeepContext: false} +} + +// Router registers routes to be matched and dispatches a handler. +// +// It implements the http.Handler interface, so it can be registered to serve +// requests: +// +// var router = mux.NewRouter() +// +// func main() { +// http.Handle("/", router) +// } +// +// Or, for Google App Engine, register it in a init() function: +// +// func init() { +// http.Handle("/", router) +// } +// +// This will send all incoming requests to the router. +type Router struct { + // Configurable Handler to be used when no route matches. + NotFoundHandler http.Handler + // Parent route, if this is a subrouter. + parent parentRoute + // Routes to be matched, in order. + routes []*Route + // Routes by name for URL building. + namedRoutes map[string]*Route + // See Router.StrictSlash(). This defines the flag for new routes. + strictSlash bool + // See Router.SkipClean(). This defines the flag for new routes. + skipClean bool + // If true, do not clear the request context after handling the request. + // This has no effect when go1.7+ is used, since the context is stored + // on the request itself. + KeepContext bool +} + +// Match matches registered routes against the request. +func (r *Router) Match(req *http.Request, match *RouteMatch) bool { + for _, route := range r.routes { + if route.Match(req, match) { + return true + } + } + + // Closest match for a router (includes sub-routers) + if r.NotFoundHandler != nil { + match.Handler = r.NotFoundHandler + return true + } + return false +} + +// ServeHTTP dispatches the handler registered in the matched route. +// +// When there is a match, the route variables can be retrieved calling +// mux.Vars(request). +func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if !r.skipClean { + // Clean path to canonical form and redirect. + if p := cleanPath(req.URL.Path); p != req.URL.Path { + + // Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query. + // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: + // http://code.google.com/p/go/issues/detail?id=5252 + url := *req.URL + url.Path = p + p = url.String() + + w.Header().Set("Location", p) + w.WriteHeader(http.StatusMovedPermanently) + return + } + } + var match RouteMatch + var handler http.Handler + if r.Match(req, &match) { + handler = match.Handler + req = setVars(req, match.Vars) + req = setCurrentRoute(req, match.Route) + } + if handler == nil { + handler = http.NotFoundHandler() + } + if !r.KeepContext { + defer contextClear(req) + } + handler.ServeHTTP(w, req) +} + +// Get returns a route registered with the given name. +func (r *Router) Get(name string) *Route { + return r.getNamedRoutes()[name] +} + +// GetRoute returns a route registered with the given name. This method +// was renamed to Get() and remains here for backwards compatibility. +func (r *Router) GetRoute(name string) *Route { + return r.getNamedRoutes()[name] +} + +// StrictSlash defines the trailing slash behavior for new routes. The initial +// value is false. +// +// When true, if the route path is "/path/", accessing "/path" will redirect +// to the former and vice versa. In other words, your application will always +// see the path as specified in the route. +// +// When false, if the route path is "/path", accessing "/path/" will not match +// this route and vice versa. +// +// Special case: when a route sets a path prefix using the PathPrefix() method, +// strict slash is ignored for that route because the redirect behavior can't +// be determined from a prefix alone. However, any subrouters created from that +// route inherit the original StrictSlash setting. +func (r *Router) StrictSlash(value bool) *Router { + r.strictSlash = value + return r +} + +// SkipClean defines the path cleaning behaviour for new routes. The initial +// value is false. Users should be careful about which routes are not cleaned +// +// When true, if the route path is "/path//to", it will remain with the double +// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/ +// +// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will +// become /fetch/http/xkcd.com/534 +func (r *Router) SkipClean(value bool) *Router { + r.skipClean = value + return r +} + +// ---------------------------------------------------------------------------- +// parentRoute +// ---------------------------------------------------------------------------- + +// getNamedRoutes returns the map where named routes are registered. +func (r *Router) getNamedRoutes() map[string]*Route { + if r.namedRoutes == nil { + if r.parent != nil { + r.namedRoutes = r.parent.getNamedRoutes() + } else { + r.namedRoutes = make(map[string]*Route) + } + } + return r.namedRoutes +} + +// getRegexpGroup returns regexp definitions from the parent route, if any. +func (r *Router) getRegexpGroup() *routeRegexpGroup { + if r.parent != nil { + return r.parent.getRegexpGroup() + } + return nil +} + +func (r *Router) buildVars(m map[string]string) map[string]string { + if r.parent != nil { + m = r.parent.buildVars(m) + } + return m +} + +// ---------------------------------------------------------------------------- +// Route factories +// ---------------------------------------------------------------------------- + +// NewRoute registers an empty route. +func (r *Router) NewRoute() *Route { + route := &Route{parent: r, strictSlash: r.strictSlash, skipClean: r.skipClean} + r.routes = append(r.routes, route) + return route +} + +// Handle registers a new route with a matcher for the URL path. +// See Route.Path() and Route.Handler(). +func (r *Router) Handle(path string, handler http.Handler) *Route { + return r.NewRoute().Path(path).Handler(handler) +} + +// HandleFunc registers a new route with a matcher for the URL path. +// See Route.Path() and Route.HandlerFunc(). +func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, + *http.Request)) *Route { + return r.NewRoute().Path(path).HandlerFunc(f) +} + +// Headers registers a new route with a matcher for request header values. +// See Route.Headers(). +func (r *Router) Headers(pairs ...string) *Route { + return r.NewRoute().Headers(pairs...) +} + +// Host registers a new route with a matcher for the URL host. +// See Route.Host(). +func (r *Router) Host(tpl string) *Route { + return r.NewRoute().Host(tpl) +} + +// MatcherFunc registers a new route with a custom matcher function. +// See Route.MatcherFunc(). +func (r *Router) MatcherFunc(f MatcherFunc) *Route { + return r.NewRoute().MatcherFunc(f) +} + +// Methods registers a new route with a matcher for HTTP methods. +// See Route.Methods(). +func (r *Router) Methods(methods ...string) *Route { + return r.NewRoute().Methods(methods...) +} + +// Path registers a new route with a matcher for the URL path. +// See Route.Path(). +func (r *Router) Path(tpl string) *Route { + return r.NewRoute().Path(tpl) +} + +// PathPrefix registers a new route with a matcher for the URL path prefix. +// See Route.PathPrefix(). +func (r *Router) PathPrefix(tpl string) *Route { + return r.NewRoute().PathPrefix(tpl) +} + +// Queries registers a new route with a matcher for URL query values. +// See Route.Queries(). +func (r *Router) Queries(pairs ...string) *Route { + return r.NewRoute().Queries(pairs...) +} + +// Schemes registers a new route with a matcher for URL schemes. +// See Route.Schemes(). +func (r *Router) Schemes(schemes ...string) *Route { + return r.NewRoute().Schemes(schemes...) +} + +// BuildVarsFunc registers a new route with a custom function for modifying +// route variables before building a URL. +func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route { + return r.NewRoute().BuildVarsFunc(f) +} + +// Walk walks the router and all its sub-routers, calling walkFn for each route +// in the tree. The routes are walked in the order they were added. Sub-routers +// are explored depth-first. +func (r *Router) Walk(walkFn WalkFunc) error { + return r.walk(walkFn, []*Route{}) +} + +// SkipRouter is used as a return value from WalkFuncs to indicate that the +// router that walk is about to descend down to should be skipped. +var SkipRouter = errors.New("skip this router") + +// WalkFunc is the type of the function called for each route visited by Walk. +// At every invocation, it is given the current route, and the current router, +// and a list of ancestor routes that lead to the current route. +type WalkFunc func(route *Route, router *Router, ancestors []*Route) error + +func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error { + for _, t := range r.routes { + if t.regexp == nil || t.regexp.path == nil || t.regexp.path.template == "" { + continue + } + + err := walkFn(t, r, ancestors) + if err == SkipRouter { + continue + } + for _, sr := range t.matchers { + if h, ok := sr.(*Router); ok { + err := h.walk(walkFn, ancestors) + if err != nil { + return err + } + } + } + if h, ok := t.handler.(*Router); ok { + ancestors = append(ancestors, t) + err := h.walk(walkFn, ancestors) + if err != nil { + return err + } + ancestors = ancestors[:len(ancestors)-1] + } + } + return nil +} + +// ---------------------------------------------------------------------------- +// Context +// ---------------------------------------------------------------------------- + +// RouteMatch stores information about a matched route. +type RouteMatch struct { + Route *Route + Handler http.Handler + Vars map[string]string +} + +type contextKey int + +const ( + varsKey contextKey = iota + routeKey +) + +// Vars returns the route variables for the current request, if any. +func Vars(r *http.Request) map[string]string { + if rv := contextGet(r, varsKey); rv != nil { + return rv.(map[string]string) + } + return nil +} + +// CurrentRoute returns the matched route for the current request, if any. +// This only works when called inside the handler of the matched route +// because the matched route is stored in the request context which is cleared +// after the handler returns, unless the KeepContext option is set on the +// Router. +func CurrentRoute(r *http.Request) *Route { + if rv := contextGet(r, routeKey); rv != nil { + return rv.(*Route) + } + return nil +} + +func setVars(r *http.Request, val interface{}) *http.Request { + return contextSet(r, varsKey, val) +} + +func setCurrentRoute(r *http.Request, val interface{}) *http.Request { + return contextSet(r, routeKey, val) +} + +// ---------------------------------------------------------------------------- +// Helpers +// ---------------------------------------------------------------------------- + +// cleanPath returns the canonical path for p, eliminating . and .. elements. +// Borrowed from the net/http package. +func cleanPath(p string) string { + if p == "" { + return "/" + } + if p[0] != '/' { + p = "/" + p + } + np := path.Clean(p) + // path.Clean removes trailing slash except for root; + // put the trailing slash back if necessary. + if p[len(p)-1] == '/' && np != "/" { + np += "/" + } + + return np +} + +// uniqueVars returns an error if two slices contain duplicated strings. +func uniqueVars(s1, s2 []string) error { + for _, v1 := range s1 { + for _, v2 := range s2 { + if v1 == v2 { + return fmt.Errorf("mux: duplicated route variable %q", v2) + } + } + } + return nil +} + +// checkPairs returns the count of strings passed in, and an error if +// the count is not an even number. +func checkPairs(pairs ...string) (int, error) { + length := len(pairs) + if length%2 != 0 { + return length, fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + } + return length, nil +} + +// mapFromPairsToString converts variadic string parameters to a +// string to string map. +func mapFromPairsToString(pairs ...string) (map[string]string, error) { + length, err := checkPairs(pairs...) + if err != nil { + return nil, err + } + m := make(map[string]string, length/2) + for i := 0; i < length; i += 2 { + m[pairs[i]] = pairs[i+1] + } + return m, nil +} + +// mapFromPairsToRegex converts variadic string paramers to a +// string to regex map. +func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) { + length, err := checkPairs(pairs...) + if err != nil { + return nil, err + } + m := make(map[string]*regexp.Regexp, length/2) + for i := 0; i < length; i += 2 { + regex, err := regexp.Compile(pairs[i+1]) + if err != nil { + return nil, err + } + m[pairs[i]] = regex + } + return m, nil +} + +// matchInArray returns true if the given string value is in the array. +func matchInArray(arr []string, value string) bool { + for _, v := range arr { + if v == value { + return true + } + } + return false +} + +// matchMapWithString returns true if the given key/value pairs exist in a given map. +func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool { + for k, v := range toCheck { + // Check if key exists. + if canonicalKey { + k = http.CanonicalHeaderKey(k) + } + if values := toMatch[k]; values == nil { + return false + } else if v != "" { + // If value was defined as an empty string we only check that the + // key exists. Otherwise we also check for equality. + valueExists := false + for _, value := range values { + if v == value { + valueExists = true + break + } + } + if !valueExists { + return false + } + } + } + return true +} + +// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against +// the given regex +func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool { + for k, v := range toCheck { + // Check if key exists. + if canonicalKey { + k = http.CanonicalHeaderKey(k) + } + if values := toMatch[k]; values == nil { + return false + } else if v != nil { + // If value was defined as an empty string we only check that the + // key exists. Otherwise we also check for equality. + valueExists := false + for _, value := range values { + if v.MatchString(value) { + valueExists = true + break + } + } + if !valueExists { + return false + } + } + } + return true +} diff --git a/transfersh-server/vendor/github.com/gorilla/mux/regexp.go b/transfersh-server/vendor/github.com/gorilla/mux/regexp.go new file mode 100644 index 0000000..08710bc --- /dev/null +++ b/transfersh-server/vendor/github.com/gorilla/mux/regexp.go @@ -0,0 +1,312 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "bytes" + "fmt" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" +) + +// newRouteRegexp parses a route template and returns a routeRegexp, +// used to match a host, a path or a query string. +// +// It will extract named variables, assemble a regexp to be matched, create +// a "reverse" template to build URLs and compile regexps to validate variable +// values used in URL building. +// +// Previously we accepted only Python-like identifiers for variable +// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that +// name and pattern can't be empty, and names can't contain a colon. +func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash bool) (*routeRegexp, error) { + // Check if it is well-formed. + idxs, errBraces := braceIndices(tpl) + if errBraces != nil { + return nil, errBraces + } + // Backup the original. + template := tpl + // Now let's parse it. + defaultPattern := "[^/]+" + if matchQuery { + defaultPattern = "[^?&]*" + } else if matchHost { + defaultPattern = "[^.]+" + matchPrefix = false + } + // Only match strict slash if not matching + if matchPrefix || matchHost || matchQuery { + strictSlash = false + } + // Set a flag for strictSlash. + endSlash := false + if strictSlash && strings.HasSuffix(tpl, "/") { + tpl = tpl[:len(tpl)-1] + endSlash = true + } + varsN := make([]string, len(idxs)/2) + varsR := make([]*regexp.Regexp, len(idxs)/2) + pattern := bytes.NewBufferString("") + pattern.WriteByte('^') + reverse := bytes.NewBufferString("") + var end int + var err error + for i := 0; i < len(idxs); i += 2 { + // Set all values we are interested in. + raw := tpl[end:idxs[i]] + end = idxs[i+1] + parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) + name := parts[0] + patt := defaultPattern + if len(parts) == 2 { + patt = parts[1] + } + // Name or pattern can't be empty. + if name == "" || patt == "" { + return nil, fmt.Errorf("mux: missing name or pattern in %q", + tpl[idxs[i]:end]) + } + // Build the regexp pattern. + fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt) + + // Build the reverse template. + fmt.Fprintf(reverse, "%s%%s", raw) + + // Append variable name and compiled pattern. + varsN[i/2] = name + varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) + if err != nil { + return nil, err + } + } + // Add the remaining. + raw := tpl[end:] + pattern.WriteString(regexp.QuoteMeta(raw)) + if strictSlash { + pattern.WriteString("[/]?") + } + if matchQuery { + // Add the default pattern if the query value is empty + if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" { + pattern.WriteString(defaultPattern) + } + } + if !matchPrefix { + pattern.WriteByte('$') + } + reverse.WriteString(raw) + if endSlash { + reverse.WriteByte('/') + } + // Compile full regexp. + reg, errCompile := regexp.Compile(pattern.String()) + if errCompile != nil { + return nil, errCompile + } + // Done! + return &routeRegexp{ + template: template, + matchHost: matchHost, + matchQuery: matchQuery, + strictSlash: strictSlash, + regexp: reg, + reverse: reverse.String(), + varsN: varsN, + varsR: varsR, + }, nil +} + +// routeRegexp stores a regexp to match a host or path and information to +// collect and validate route variables. +type routeRegexp struct { + // The unmodified template. + template string + // True for host match, false for path or query string match. + matchHost bool + // True for query string match, false for path and host match. + matchQuery bool + // The strictSlash value defined on the route, but disabled if PathPrefix was used. + strictSlash bool + // Expanded regexp. + regexp *regexp.Regexp + // Reverse template. + reverse string + // Variable names. + varsN []string + // Variable regexps (validators). + varsR []*regexp.Regexp +} + +// Match matches the regexp against the URL host or path. +func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { + if !r.matchHost { + if r.matchQuery { + return r.matchQueryString(req) + } + + return r.regexp.MatchString(req.URL.Path) + } + + return r.regexp.MatchString(getHost(req)) +} + +// url builds a URL part using the given values. +func (r *routeRegexp) url(values map[string]string) (string, error) { + urlValues := make([]interface{}, len(r.varsN)) + for k, v := range r.varsN { + value, ok := values[v] + if !ok { + return "", fmt.Errorf("mux: missing route variable %q", v) + } + urlValues[k] = value + } + rv := fmt.Sprintf(r.reverse, urlValues...) + if !r.regexp.MatchString(rv) { + // The URL is checked against the full regexp, instead of checking + // individual variables. This is faster but to provide a good error + // message, we check individual regexps if the URL doesn't match. + for k, v := range r.varsN { + if !r.varsR[k].MatchString(values[v]) { + return "", fmt.Errorf( + "mux: variable %q doesn't match, expected %q", values[v], + r.varsR[k].String()) + } + } + } + return rv, nil +} + +// getURLQuery returns a single query parameter from a request URL. +// For a URL with foo=bar&baz=ding, we return only the relevant key +// value pair for the routeRegexp. +func (r *routeRegexp) getURLQuery(req *http.Request) string { + if !r.matchQuery { + return "" + } + templateKey := strings.SplitN(r.template, "=", 2)[0] + for key, vals := range req.URL.Query() { + if key == templateKey && len(vals) > 0 { + return key + "=" + vals[0] + } + } + return "" +} + +func (r *routeRegexp) matchQueryString(req *http.Request) bool { + return r.regexp.MatchString(r.getURLQuery(req)) +} + +// braceIndices returns the first level curly brace indices from a string. +// It returns an error in case of unbalanced braces. +func braceIndices(s string) ([]int, error) { + var level, idx int + var idxs []int + for i := 0; i < len(s); i++ { + switch s[i] { + case '{': + if level++; level == 1 { + idx = i + } + case '}': + if level--; level == 0 { + idxs = append(idxs, idx, i+1) + } else if level < 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + } + } + if level != 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + return idxs, nil +} + +// varGroupName builds a capturing group name for the indexed variable. +func varGroupName(idx int) string { + return "v" + strconv.Itoa(idx) +} + +// ---------------------------------------------------------------------------- +// routeRegexpGroup +// ---------------------------------------------------------------------------- + +// routeRegexpGroup groups the route matchers that carry variables. +type routeRegexpGroup struct { + host *routeRegexp + path *routeRegexp + queries []*routeRegexp +} + +// setMatch extracts the variables from the URL once a route matches. +func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { + // Store host variables. + if v.host != nil { + host := getHost(req) + matches := v.host.regexp.FindStringSubmatchIndex(host) + if len(matches) > 0 { + extractVars(host, matches, v.host.varsN, m.Vars) + } + } + // Store path variables. + if v.path != nil { + matches := v.path.regexp.FindStringSubmatchIndex(req.URL.Path) + if len(matches) > 0 { + extractVars(req.URL.Path, matches, v.path.varsN, m.Vars) + // Check if we should redirect. + if v.path.strictSlash { + p1 := strings.HasSuffix(req.URL.Path, "/") + p2 := strings.HasSuffix(v.path.template, "/") + if p1 != p2 { + u, _ := url.Parse(req.URL.String()) + if p1 { + u.Path = u.Path[:len(u.Path)-1] + } else { + u.Path += "/" + } + m.Handler = http.RedirectHandler(u.String(), 301) + } + } + } + } + // Store query string variables. + for _, q := range v.queries { + queryURL := q.getURLQuery(req) + matches := q.regexp.FindStringSubmatchIndex(queryURL) + if len(matches) > 0 { + extractVars(queryURL, matches, q.varsN, m.Vars) + } + } +} + +// getHost tries its best to return the request host. +func getHost(r *http.Request) string { + if r.URL.IsAbs() { + return r.URL.Host + } + host := r.Host + // Slice off any port information. + if i := strings.Index(host, ":"); i != -1 { + host = host[:i] + } + return host + +} + +func extractVars(input string, matches []int, names []string, output map[string]string) { + matchesCount := 0 + prevEnd := -1 + for i := 2; i < len(matches) && matchesCount < len(names); i += 2 { + if prevEnd < matches[i+1] { + value := input[matches[i]:matches[i+1]] + output[names[matchesCount]] = value + prevEnd = matches[i+1] + matchesCount++ + } + } +} diff --git a/transfersh-server/vendor/github.com/gorilla/mux/route.go b/transfersh-server/vendor/github.com/gorilla/mux/route.go new file mode 100644 index 0000000..6c53f9f --- /dev/null +++ b/transfersh-server/vendor/github.com/gorilla/mux/route.go @@ -0,0 +1,634 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "regexp" + "strings" +) + +// Route stores information to match a request and build URLs. +type Route struct { + // Parent where the route was registered (a Router). + parent parentRoute + // Request handler for the route. + handler http.Handler + // List of matchers. + matchers []matcher + // Manager for the variables from host and path. + regexp *routeRegexpGroup + // If true, when the path pattern is "/path/", accessing "/path" will + // redirect to the former and vice versa. + strictSlash bool + // If true, when the path pattern is "/path//to", accessing "/path//to" + // will not redirect + skipClean bool + // If true, this route never matches: it is only used to build URLs. + buildOnly bool + // The name used to build URLs. + name string + // Error resulted from building a route. + err error + + buildVarsFunc BuildVarsFunc +} + +func (r *Route) SkipClean() bool { + return r.skipClean +} + +// Match matches the route against the request. +func (r *Route) Match(req *http.Request, match *RouteMatch) bool { + if r.buildOnly || r.err != nil { + return false + } + // Match everything. + for _, m := range r.matchers { + if matched := m.Match(req, match); !matched { + return false + } + } + // Yay, we have a match. Let's collect some info about it. + if match.Route == nil { + match.Route = r + } + if match.Handler == nil { + match.Handler = r.handler + } + if match.Vars == nil { + match.Vars = make(map[string]string) + } + // Set variables. + if r.regexp != nil { + r.regexp.setMatch(req, match, r) + } + return true +} + +// ---------------------------------------------------------------------------- +// Route attributes +// ---------------------------------------------------------------------------- + +// GetError returns an error resulted from building the route, if any. +func (r *Route) GetError() error { + return r.err +} + +// BuildOnly sets the route to never match: it is only used to build URLs. +func (r *Route) BuildOnly() *Route { + r.buildOnly = true + return r +} + +// Handler -------------------------------------------------------------------- + +// Handler sets a handler for the route. +func (r *Route) Handler(handler http.Handler) *Route { + if r.err == nil { + r.handler = handler + } + return r +} + +// HandlerFunc sets a handler function for the route. +func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { + return r.Handler(http.HandlerFunc(f)) +} + +// GetHandler returns the handler for the route, if any. +func (r *Route) GetHandler() http.Handler { + return r.handler +} + +// Name ----------------------------------------------------------------------- + +// Name sets the name for the route, used to build URLs. +// If the name was registered already it will be overwritten. +func (r *Route) Name(name string) *Route { + if r.name != "" { + r.err = fmt.Errorf("mux: route already has name %q, can't set %q", + r.name, name) + } + if r.err == nil { + r.name = name + r.getNamedRoutes()[name] = r + } + return r +} + +// GetName returns the name for the route, if any. +func (r *Route) GetName() string { + return r.name +} + +// ---------------------------------------------------------------------------- +// Matchers +// ---------------------------------------------------------------------------- + +// matcher types try to match a request. +type matcher interface { + Match(*http.Request, *RouteMatch) bool +} + +// addMatcher adds a matcher to the route. +func (r *Route) addMatcher(m matcher) *Route { + if r.err == nil { + r.matchers = append(r.matchers, m) + } + return r +} + +// addRegexpMatcher adds a host or path matcher and builder to a route. +func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error { + if r.err != nil { + return r.err + } + r.regexp = r.getRegexpGroup() + if !matchHost && !matchQuery { + if len(tpl) == 0 || tpl[0] != '/' { + return fmt.Errorf("mux: path must start with a slash, got %q", tpl) + } + if r.regexp.path != nil { + tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl + } + } + rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash) + if err != nil { + return err + } + for _, q := range r.regexp.queries { + if err = uniqueVars(rr.varsN, q.varsN); err != nil { + return err + } + } + if matchHost { + if r.regexp.path != nil { + if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { + return err + } + } + r.regexp.host = rr + } else { + if r.regexp.host != nil { + if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { + return err + } + } + if matchQuery { + r.regexp.queries = append(r.regexp.queries, rr) + } else { + r.regexp.path = rr + } + } + r.addMatcher(rr) + return nil +} + +// Headers -------------------------------------------------------------------- + +// headerMatcher matches the request against header values. +type headerMatcher map[string]string + +func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchMapWithString(m, r.Header, true) +} + +// Headers adds a matcher for request header values. +// It accepts a sequence of key/value pairs to be matched. For example: +// +// r := mux.NewRouter() +// r.Headers("Content-Type", "application/json", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will only match if both request header values match. +// If the value is an empty string, it will match any value if the key is set. +func (r *Route) Headers(pairs ...string) *Route { + if r.err == nil { + var headers map[string]string + headers, r.err = mapFromPairsToString(pairs...) + return r.addMatcher(headerMatcher(headers)) + } + return r +} + +// headerRegexMatcher matches the request against the route given a regex for the header +type headerRegexMatcher map[string]*regexp.Regexp + +func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchMapWithRegex(m, r.Header, true) +} + +// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex +// support. For example: +// +// r := mux.NewRouter() +// r.HeadersRegexp("Content-Type", "application/(text|json)", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will only match if both the request header matches both regular expressions. +// It the value is an empty string, it will match any value if the key is set. +func (r *Route) HeadersRegexp(pairs ...string) *Route { + if r.err == nil { + var headers map[string]*regexp.Regexp + headers, r.err = mapFromPairsToRegex(pairs...) + return r.addMatcher(headerRegexMatcher(headers)) + } + return r +} + +// Host ----------------------------------------------------------------------- + +// Host adds a matcher for the URL host. +// It accepts a template with zero or more URL variables enclosed by {}. +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next dot. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter() +// r.Host("www.example.com") +// r.Host("{subdomain}.domain.com") +// r.Host("{subdomain:[a-z]+}.domain.com") +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Host(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, true, false, false) + return r +} + +// MatcherFunc ---------------------------------------------------------------- + +// MatcherFunc is the function signature used by custom matchers. +type MatcherFunc func(*http.Request, *RouteMatch) bool + +// Match returns the match for a given request. +func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { + return m(r, match) +} + +// MatcherFunc adds a custom function to be used as request matcher. +func (r *Route) MatcherFunc(f MatcherFunc) *Route { + return r.addMatcher(f) +} + +// Methods -------------------------------------------------------------------- + +// methodMatcher matches the request against HTTP methods. +type methodMatcher []string + +func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchInArray(m, r.Method) +} + +// Methods adds a matcher for HTTP methods. +// It accepts a sequence of one or more methods to be matched, e.g.: +// "GET", "POST", "PUT". +func (r *Route) Methods(methods ...string) *Route { + for k, v := range methods { + methods[k] = strings.ToUpper(v) + } + return r.addMatcher(methodMatcher(methods)) +} + +// Path ----------------------------------------------------------------------- + +// Path adds a matcher for the URL path. +// It accepts a template with zero or more URL variables enclosed by {}. The +// template must start with a "/". +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter() +// r.Path("/products/").Handler(ProductsHandler) +// r.Path("/products/{key}").Handler(ProductsHandler) +// r.Path("/articles/{category}/{id:[0-9]+}"). +// Handler(ArticleHandler) +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Path(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, false, false, false) + return r +} + +// PathPrefix ----------------------------------------------------------------- + +// PathPrefix adds a matcher for the URL path prefix. This matches if the given +// template is a prefix of the full URL path. See Route.Path() for details on +// the tpl argument. +// +// Note that it does not treat slashes specially ("/foobar/" will be matched by +// the prefix "/foo") so you may want to use a trailing slash here. +// +// Also note that the setting of Router.StrictSlash() has no effect on routes +// with a PathPrefix matcher. +func (r *Route) PathPrefix(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, false, true, false) + return r +} + +// Query ---------------------------------------------------------------------- + +// Queries adds a matcher for URL query values. +// It accepts a sequence of key/value pairs. Values may define variables. +// For example: +// +// r := mux.NewRouter() +// r.Queries("foo", "bar", "id", "{id:[0-9]+}") +// +// The above route will only match if the URL contains the defined queries +// values, e.g.: ?foo=bar&id=42. +// +// It the value is an empty string, it will match any value if the key is set. +// +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +func (r *Route) Queries(pairs ...string) *Route { + length := len(pairs) + if length%2 != 0 { + r.err = fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + return nil + } + for i := 0; i < length; i += 2 { + if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, false, true); r.err != nil { + return r + } + } + + return r +} + +// Schemes -------------------------------------------------------------------- + +// schemeMatcher matches the request against URL schemes. +type schemeMatcher []string + +func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchInArray(m, r.URL.Scheme) +} + +// Schemes adds a matcher for URL schemes. +// It accepts a sequence of schemes to be matched, e.g.: "http", "https". +func (r *Route) Schemes(schemes ...string) *Route { + for k, v := range schemes { + schemes[k] = strings.ToLower(v) + } + return r.addMatcher(schemeMatcher(schemes)) +} + +// BuildVarsFunc -------------------------------------------------------------- + +// BuildVarsFunc is the function signature used by custom build variable +// functions (which can modify route variables before a route's URL is built). +type BuildVarsFunc func(map[string]string) map[string]string + +// BuildVarsFunc adds a custom function to be used to modify build variables +// before a route's URL is built. +func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { + r.buildVarsFunc = f + return r +} + +// Subrouter ------------------------------------------------------------------ + +// Subrouter creates a subrouter for the route. +// +// It will test the inner routes only if the parent route matched. For example: +// +// r := mux.NewRouter() +// s := r.Host("www.example.com").Subrouter() +// s.HandleFunc("/products/", ProductsHandler) +// s.HandleFunc("/products/{key}", ProductHandler) +// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) +// +// Here, the routes registered in the subrouter won't be tested if the host +// doesn't match. +func (r *Route) Subrouter() *Router { + router := &Router{parent: r, strictSlash: r.strictSlash} + r.addMatcher(router) + return router +} + +// ---------------------------------------------------------------------------- +// URL building +// ---------------------------------------------------------------------------- + +// URL builds a URL for the route. +// +// It accepts a sequence of key/value pairs for the route variables. For +// example, given this route: +// +// r := mux.NewRouter() +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") +// +// ...a URL for it can be built using: +// +// url, err := r.Get("article").URL("category", "technology", "id", "42") +// +// ...which will return an url.URL with the following path: +// +// "/articles/technology/42" +// +// This also works for host variables: +// +// r := mux.NewRouter() +// r.Host("{subdomain}.domain.com"). +// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") +// +// // url.String() will be "http://news.domain.com/articles/technology/42" +// url, err := r.Get("article").URL("subdomain", "news", +// "category", "technology", +// "id", "42") +// +// All variables defined in the route are required, and their values must +// conform to the corresponding patterns. +func (r *Route) URL(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil { + return nil, errors.New("mux: route doesn't have a host or path") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + var scheme, host, path string + if r.regexp.host != nil { + // Set a default scheme. + scheme = "http" + if host, err = r.regexp.host.url(values); err != nil { + return nil, err + } + } + if r.regexp.path != nil { + if path, err = r.regexp.path.url(values); err != nil { + return nil, err + } + } + return &url.URL{ + Scheme: scheme, + Host: host, + Path: path, + }, nil +} + +// URLHost builds the host part of the URL for a route. See Route.URL(). +// +// The route must have a host defined. +func (r *Route) URLHost(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil || r.regexp.host == nil { + return nil, errors.New("mux: route doesn't have a host") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + host, err := r.regexp.host.url(values) + if err != nil { + return nil, err + } + return &url.URL{ + Scheme: "http", + Host: host, + }, nil +} + +// URLPath builds the path part of the URL for a route. See Route.URL(). +// +// The route must have a path defined. +func (r *Route) URLPath(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil || r.regexp.path == nil { + return nil, errors.New("mux: route doesn't have a path") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + path, err := r.regexp.path.url(values) + if err != nil { + return nil, err + } + return &url.URL{ + Path: path, + }, nil +} + +// GetPathTemplate returns the template used to build the +// route match. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define a path. +func (r *Route) GetPathTemplate() (string, error) { + if r.err != nil { + return "", r.err + } + if r.regexp == nil || r.regexp.path == nil { + return "", errors.New("mux: route doesn't have a path") + } + return r.regexp.path.template, nil +} + +// GetHostTemplate returns the template used to build the +// route match. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define a host. +func (r *Route) GetHostTemplate() (string, error) { + if r.err != nil { + return "", r.err + } + if r.regexp == nil || r.regexp.host == nil { + return "", errors.New("mux: route doesn't have a host") + } + return r.regexp.host.template, nil +} + +// prepareVars converts the route variable pairs into a map. If the route has a +// BuildVarsFunc, it is invoked. +func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { + m, err := mapFromPairsToString(pairs...) + if err != nil { + return nil, err + } + return r.buildVars(m), nil +} + +func (r *Route) buildVars(m map[string]string) map[string]string { + if r.parent != nil { + m = r.parent.buildVars(m) + } + if r.buildVarsFunc != nil { + m = r.buildVarsFunc(m) + } + return m +} + +// ---------------------------------------------------------------------------- +// parentRoute +// ---------------------------------------------------------------------------- + +// parentRoute allows routes to know about parent host and path definitions. +type parentRoute interface { + getNamedRoutes() map[string]*Route + getRegexpGroup() *routeRegexpGroup + buildVars(map[string]string) map[string]string +} + +// getNamedRoutes returns the map where named routes are registered. +func (r *Route) getNamedRoutes() map[string]*Route { + if r.parent == nil { + // During tests router is not always set. + r.parent = NewRouter() + } + return r.parent.getNamedRoutes() +} + +// getRegexpGroup returns regexp definitions from this route. +func (r *Route) getRegexpGroup() *routeRegexpGroup { + if r.regexp == nil { + if r.parent == nil { + // During tests router is not always set. + r.parent = NewRouter() + } + regexp := r.parent.getRegexpGroup() + if regexp == nil { + r.regexp = new(routeRegexpGroup) + } else { + // Copy. + r.regexp = &routeRegexpGroup{ + host: regexp.host, + path: regexp.path, + queries: regexp.queries, + } + } + } + return r.regexp +} diff --git a/transfersh-server/vendor/github.com/gorilla/securecookie/LICENSE b/transfersh-server/vendor/github.com/gorilla/securecookie/LICENSE new file mode 100644 index 0000000..0e5fb87 --- /dev/null +++ b/transfersh-server/vendor/github.com/gorilla/securecookie/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/transfersh-server/vendor/github.com/gorilla/securecookie/doc.go b/transfersh-server/vendor/github.com/gorilla/securecookie/doc.go new file mode 100644 index 0000000..ae89408 --- /dev/null +++ b/transfersh-server/vendor/github.com/gorilla/securecookie/doc.go @@ -0,0 +1,61 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package securecookie encodes and decodes authenticated and optionally +encrypted cookie values. + +Secure cookies can't be forged, because their values are validated using HMAC. +When encrypted, the content is also inaccessible to malicious eyes. + +To use it, first create a new SecureCookie instance: + + var hashKey = []byte("very-secret") + var blockKey = []byte("a-lot-secret") + var s = securecookie.New(hashKey, blockKey) + +The hashKey is required, used to authenticate the cookie value using HMAC. +It is recommended to use a key with 32 or 64 bytes. + +The blockKey is optional, used to encrypt the cookie value -- set it to nil +to not use encryption. If set, the length must correspond to the block size +of the encryption algorithm. For AES, used by default, valid lengths are +16, 24, or 32 bytes to select AES-128, AES-192, or AES-256. + +Strong keys can be created using the convenience function GenerateRandomKey(). + +Once a SecureCookie instance is set, use it to encode a cookie value: + + func SetCookieHandler(w http.ResponseWriter, r *http.Request) { + value := map[string]string{ + "foo": "bar", + } + if encoded, err := s.Encode("cookie-name", value); err == nil { + cookie := &http.Cookie{ + Name: "cookie-name", + Value: encoded, + Path: "/", + } + http.SetCookie(w, cookie) + } + } + +Later, use the same SecureCookie instance to decode and validate a cookie +value: + + func ReadCookieHandler(w http.ResponseWriter, r *http.Request) { + if cookie, err := r.Cookie("cookie-name"); err == nil { + value := make(map[string]string) + if err = s2.Decode("cookie-name", cookie.Value, &value); err == nil { + fmt.Fprintf(w, "The value of foo is %q", value["foo"]) + } + } + } + +We stored a map[string]string, but secure cookies can hold any value that +can be encoded using encoding/gob. To store custom types, they must be +registered first using gob.Register(). For basic types this is not needed; +it works out of the box. +*/ +package securecookie diff --git a/transfersh-server/vendor/github.com/gorilla/securecookie/fuzz.go b/transfersh-server/vendor/github.com/gorilla/securecookie/fuzz.go new file mode 100644 index 0000000..e4d0534 --- /dev/null +++ b/transfersh-server/vendor/github.com/gorilla/securecookie/fuzz.go @@ -0,0 +1,25 @@ +// +build gofuzz + +package securecookie + +var hashKey = []byte("very-secret12345") +var blockKey = []byte("a-lot-secret1234") +var s = New(hashKey, blockKey) + +type Cookie struct { + B bool + I int + S string +} + +func Fuzz(data []byte) int { + datas := string(data) + var c Cookie + if err := s.Decode("fuzz", datas, &c); err != nil { + return 0 + } + if _, err := s.Encode("fuzz", c); err != nil { + panic(err) + } + return 1 +} diff --git a/transfersh-server/vendor/github.com/gorilla/securecookie/fuzz/gencorpus.go b/transfersh-server/vendor/github.com/gorilla/securecookie/fuzz/gencorpus.go new file mode 100644 index 0000000..368192b --- /dev/null +++ b/transfersh-server/vendor/github.com/gorilla/securecookie/fuzz/gencorpus.go @@ -0,0 +1,47 @@ +package main + +import ( + "fmt" + "io" + "math/rand" + "os" + "reflect" + "testing/quick" + + "github.com/gorilla/securecookie" +) + +var hashKey = []byte("very-secret12345") +var blockKey = []byte("a-lot-secret1234") +var s = securecookie.New(hashKey, blockKey) + +type Cookie struct { + B bool + I int + S string +} + +func main() { + var c Cookie + t := reflect.TypeOf(c) + rnd := rand.New(rand.NewSource(0)) + for i := 0; i < 100; i++ { + v, ok := quick.Value(t, rnd) + if !ok { + panic("couldn't generate value") + } + encoded, err := s.Encode("fuzz", v.Interface()) + if err != nil { + panic(err) + } + f, err := os.Create(fmt.Sprintf("corpus/%d.sc", i)) + if err != nil { + panic(err) + } + _, err = io.WriteString(f, encoded) + if err != nil { + panic(err) + } + f.Close() + } +} diff --git a/transfersh-server/vendor/github.com/gorilla/securecookie/securecookie.go b/transfersh-server/vendor/github.com/gorilla/securecookie/securecookie.go new file mode 100644 index 0000000..83dd606 --- /dev/null +++ b/transfersh-server/vendor/github.com/gorilla/securecookie/securecookie.go @@ -0,0 +1,646 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securecookie + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/hmac" + "crypto/rand" + "crypto/sha256" + "crypto/subtle" + "encoding/base64" + "encoding/gob" + "encoding/json" + "fmt" + "hash" + "io" + "strconv" + "strings" + "time" +) + +// Error is the interface of all errors returned by functions in this library. +type Error interface { + error + + // IsUsage returns true for errors indicating the client code probably + // uses this library incorrectly. For example, the client may have + // failed to provide a valid hash key, or may have failed to configure + // the Serializer adequately for encoding value. + IsUsage() bool + + // IsDecode returns true for errors indicating that a cookie could not + // be decoded and validated. Since cookies are usually untrusted + // user-provided input, errors of this type should be expected. + // Usually, the proper action is simply to reject the request. + IsDecode() bool + + // IsInternal returns true for unexpected errors occurring in the + // securecookie implementation. + IsInternal() bool + + // Cause, if it returns a non-nil value, indicates that this error was + // propagated from some underlying library. If this method returns nil, + // this error was raised directly by this library. + // + // Cause is provided principally for debugging/logging purposes; it is + // rare that application logic should perform meaningfully different + // logic based on Cause. See, for example, the caveats described on + // (MultiError).Cause(). + Cause() error +} + +// errorType is a bitmask giving the error type(s) of an cookieError value. +type errorType int + +const ( + usageError = errorType(1 << iota) + decodeError + internalError +) + +type cookieError struct { + typ errorType + msg string + cause error +} + +func (e cookieError) IsUsage() bool { return (e.typ & usageError) != 0 } +func (e cookieError) IsDecode() bool { return (e.typ & decodeError) != 0 } +func (e cookieError) IsInternal() bool { return (e.typ & internalError) != 0 } + +func (e cookieError) Cause() error { return e.cause } + +func (e cookieError) Error() string { + parts := []string{"securecookie: "} + if e.msg == "" { + parts = append(parts, "error") + } else { + parts = append(parts, e.msg) + } + if c := e.Cause(); c != nil { + parts = append(parts, " - caused by: ", c.Error()) + } + return strings.Join(parts, "") +} + +var ( + errGeneratingIV = cookieError{typ: internalError, msg: "failed to generate random iv"} + + errNoCodecs = cookieError{typ: usageError, msg: "no codecs provided"} + errHashKeyNotSet = cookieError{typ: usageError, msg: "hash key is not set"} + errBlockKeyNotSet = cookieError{typ: usageError, msg: "block key is not set"} + errEncodedValueTooLong = cookieError{typ: usageError, msg: "the value is too long"} + + errValueToDecodeTooLong = cookieError{typ: decodeError, msg: "the value is too long"} + errTimestampInvalid = cookieError{typ: decodeError, msg: "invalid timestamp"} + errTimestampTooNew = cookieError{typ: decodeError, msg: "timestamp is too new"} + errTimestampExpired = cookieError{typ: decodeError, msg: "expired timestamp"} + errDecryptionFailed = cookieError{typ: decodeError, msg: "the value could not be decrypted"} + errValueNotByte = cookieError{typ: decodeError, msg: "value not a []byte."} + + // ErrMacInvalid indicates that cookie decoding failed because the HMAC + // could not be extracted and verified. Direct use of this error + // variable is deprecated; it is public only for legacy compatibility, + // and may be privatized in the future, as it is rarely useful to + // distinguish between this error and other Error implementations. + ErrMacInvalid = cookieError{typ: decodeError, msg: "the value is not valid"} +) + +// Codec defines an interface to encode and decode cookie values. +type Codec interface { + Encode(name string, value interface{}) (string, error) + Decode(name, value string, dst interface{}) error +} + +// New returns a new SecureCookie. +// +// hashKey is required, used to authenticate values using HMAC. Create it using +// GenerateRandomKey(). It is recommended to use a key with 32 or 64 bytes. +// +// blockKey is optional, used to encrypt values. Create it using +// GenerateRandomKey(). The key length must correspond to the block size +// of the encryption algorithm. For AES, used by default, valid lengths are +// 16, 24, or 32 bytes to select AES-128, AES-192, or AES-256. +// The default encoder used for cookie serialization is encoding/gob. +// +// Note that keys created using GenerateRandomKey() are not automatically +// persisted. New keys will be created when the application is restarted, and +// previously issued cookies will not be able to be decoded. +func New(hashKey, blockKey []byte) *SecureCookie { + s := &SecureCookie{ + hashKey: hashKey, + blockKey: blockKey, + hashFunc: sha256.New, + maxAge: 86400 * 30, + maxLength: 4096, + sz: GobEncoder{}, + } + if hashKey == nil { + s.err = errHashKeyNotSet + } + if blockKey != nil { + s.BlockFunc(aes.NewCipher) + } + return s +} + +// SecureCookie encodes and decodes authenticated and optionally encrypted +// cookie values. +type SecureCookie struct { + hashKey []byte + hashFunc func() hash.Hash + blockKey []byte + block cipher.Block + maxLength int + maxAge int64 + minAge int64 + err error + sz Serializer + // For testing purposes, the function that returns the current timestamp. + // If not set, it will use time.Now().UTC().Unix(). + timeFunc func() int64 +} + +// Serializer provides an interface for providing custom serializers for cookie +// values. +type Serializer interface { + Serialize(src interface{}) ([]byte, error) + Deserialize(src []byte, dst interface{}) error +} + +// GobEncoder encodes cookie values using encoding/gob. This is the simplest +// encoder and can handle complex types via gob.Register. +type GobEncoder struct{} + +// JSONEncoder encodes cookie values using encoding/json. Users who wish to +// encode complex types need to satisfy the json.Marshaller and +// json.Unmarshaller interfaces. +type JSONEncoder struct{} + +// NopEncoder does not encode cookie values, and instead simply accepts a []byte +// (as an interface{}) and returns a []byte. This is particularly useful when +// you encoding an object upstream and do not wish to re-encode it. +type NopEncoder struct{} + +// MaxLength restricts the maximum length, in bytes, for the cookie value. +// +// Default is 4096, which is the maximum value accepted by Internet Explorer. +func (s *SecureCookie) MaxLength(value int) *SecureCookie { + s.maxLength = value + return s +} + +// MaxAge restricts the maximum age, in seconds, for the cookie value. +// +// Default is 86400 * 30. Set it to 0 for no restriction. +func (s *SecureCookie) MaxAge(value int) *SecureCookie { + s.maxAge = int64(value) + return s +} + +// MinAge restricts the minimum age, in seconds, for the cookie value. +// +// Default is 0 (no restriction). +func (s *SecureCookie) MinAge(value int) *SecureCookie { + s.minAge = int64(value) + return s +} + +// HashFunc sets the hash function used to create HMAC. +// +// Default is crypto/sha256.New. +func (s *SecureCookie) HashFunc(f func() hash.Hash) *SecureCookie { + s.hashFunc = f + return s +} + +// BlockFunc sets the encryption function used to create a cipher.Block. +// +// Default is crypto/aes.New. +func (s *SecureCookie) BlockFunc(f func([]byte) (cipher.Block, error)) *SecureCookie { + if s.blockKey == nil { + s.err = errBlockKeyNotSet + } else if block, err := f(s.blockKey); err == nil { + s.block = block + } else { + s.err = cookieError{cause: err, typ: usageError} + } + return s +} + +// Encoding sets the encoding/serialization method for cookies. +// +// Default is encoding/gob. To encode special structures using encoding/gob, +// they must be registered first using gob.Register(). +func (s *SecureCookie) SetSerializer(sz Serializer) *SecureCookie { + s.sz = sz + + return s +} + +// Encode encodes a cookie value. +// +// It serializes, optionally encrypts, signs with a message authentication code, +// and finally encodes the value. +// +// The name argument is the cookie name. It is stored with the encoded value. +// The value argument is the value to be encoded. It can be any value that can +// be encoded using the currently selected serializer; see SetSerializer(). +// +// It is the client's responsibility to ensure that value, when encoded using +// the current serialization/encryption settings on s and then base64-encoded, +// is shorter than the maximum permissible length. +func (s *SecureCookie) Encode(name string, value interface{}) (string, error) { + if s.err != nil { + return "", s.err + } + if s.hashKey == nil { + s.err = errHashKeyNotSet + return "", s.err + } + var err error + var b []byte + // 1. Serialize. + if b, err = s.sz.Serialize(value); err != nil { + return "", cookieError{cause: err, typ: usageError} + } + // 2. Encrypt (optional). + if s.block != nil { + if b, err = encrypt(s.block, b); err != nil { + return "", cookieError{cause: err, typ: usageError} + } + } + b = encode(b) + // 3. Create MAC for "name|date|value". Extra pipe to be used later. + b = []byte(fmt.Sprintf("%s|%d|%s|", name, s.timestamp(), b)) + mac := createMac(hmac.New(s.hashFunc, s.hashKey), b[:len(b)-1]) + // Append mac, remove name. + b = append(b, mac...)[len(name)+1:] + // 4. Encode to base64. + b = encode(b) + // 5. Check length. + if s.maxLength != 0 && len(b) > s.maxLength { + return "", errEncodedValueTooLong + } + // Done. + return string(b), nil +} + +// Decode decodes a cookie value. +// +// It decodes, verifies a message authentication code, optionally decrypts and +// finally deserializes the value. +// +// The name argument is the cookie name. It must be the same name used when +// it was stored. The value argument is the encoded cookie value. The dst +// argument is where the cookie will be decoded. It must be a pointer. +func (s *SecureCookie) Decode(name, value string, dst interface{}) error { + if s.err != nil { + return s.err + } + if s.hashKey == nil { + s.err = errHashKeyNotSet + return s.err + } + // 1. Check length. + if s.maxLength != 0 && len(value) > s.maxLength { + return errValueToDecodeTooLong + } + // 2. Decode from base64. + b, err := decode([]byte(value)) + if err != nil { + return err + } + // 3. Verify MAC. Value is "date|value|mac". + parts := bytes.SplitN(b, []byte("|"), 3) + if len(parts) != 3 { + return ErrMacInvalid + } + h := hmac.New(s.hashFunc, s.hashKey) + b = append([]byte(name+"|"), b[:len(b)-len(parts[2])-1]...) + if err = verifyMac(h, b, parts[2]); err != nil { + return err + } + // 4. Verify date ranges. + var t1 int64 + if t1, err = strconv.ParseInt(string(parts[0]), 10, 64); err != nil { + return errTimestampInvalid + } + t2 := s.timestamp() + if s.minAge != 0 && t1 > t2-s.minAge { + return errTimestampTooNew + } + if s.maxAge != 0 && t1 < t2-s.maxAge { + return errTimestampExpired + } + // 5. Decrypt (optional). + b, err = decode(parts[1]) + if err != nil { + return err + } + if s.block != nil { + if b, err = decrypt(s.block, b); err != nil { + return err + } + } + // 6. Deserialize. + if err = s.sz.Deserialize(b, dst); err != nil { + return cookieError{cause: err, typ: decodeError} + } + // Done. + return nil +} + +// timestamp returns the current timestamp, in seconds. +// +// For testing purposes, the function that generates the timestamp can be +// overridden. If not set, it will return time.Now().UTC().Unix(). +func (s *SecureCookie) timestamp() int64 { + if s.timeFunc == nil { + return time.Now().UTC().Unix() + } + return s.timeFunc() +} + +// Authentication ------------------------------------------------------------- + +// createMac creates a message authentication code (MAC). +func createMac(h hash.Hash, value []byte) []byte { + h.Write(value) + return h.Sum(nil) +} + +// verifyMac verifies that a message authentication code (MAC) is valid. +func verifyMac(h hash.Hash, value []byte, mac []byte) error { + mac2 := createMac(h, value) + // Check that both MACs are of equal length, as subtle.ConstantTimeCompare + // does not do this prior to Go 1.4. + if len(mac) == len(mac2) && subtle.ConstantTimeCompare(mac, mac2) == 1 { + return nil + } + return ErrMacInvalid +} + +// Encryption ----------------------------------------------------------------- + +// encrypt encrypts a value using the given block in counter mode. +// +// A random initialization vector (http://goo.gl/zF67k) with the length of the +// block size is prepended to the resulting ciphertext. +func encrypt(block cipher.Block, value []byte) ([]byte, error) { + iv := GenerateRandomKey(block.BlockSize()) + if iv == nil { + return nil, errGeneratingIV + } + // Encrypt it. + stream := cipher.NewCTR(block, iv) + stream.XORKeyStream(value, value) + // Return iv + ciphertext. + return append(iv, value...), nil +} + +// decrypt decrypts a value using the given block in counter mode. +// +// The value to be decrypted must be prepended by a initialization vector +// (http://goo.gl/zF67k) with the length of the block size. +func decrypt(block cipher.Block, value []byte) ([]byte, error) { + size := block.BlockSize() + if len(value) > size { + // Extract iv. + iv := value[:size] + // Extract ciphertext. + value = value[size:] + // Decrypt it. + stream := cipher.NewCTR(block, iv) + stream.XORKeyStream(value, value) + return value, nil + } + return nil, errDecryptionFailed +} + +// Serialization -------------------------------------------------------------- + +// Serialize encodes a value using gob. +func (e GobEncoder) Serialize(src interface{}) ([]byte, error) { + buf := new(bytes.Buffer) + enc := gob.NewEncoder(buf) + if err := enc.Encode(src); err != nil { + return nil, cookieError{cause: err, typ: usageError} + } + return buf.Bytes(), nil +} + +// Deserialize decodes a value using gob. +func (e GobEncoder) Deserialize(src []byte, dst interface{}) error { + dec := gob.NewDecoder(bytes.NewBuffer(src)) + if err := dec.Decode(dst); err != nil { + return cookieError{cause: err, typ: decodeError} + } + return nil +} + +// Serialize encodes a value using encoding/json. +func (e JSONEncoder) Serialize(src interface{}) ([]byte, error) { + buf := new(bytes.Buffer) + enc := json.NewEncoder(buf) + if err := enc.Encode(src); err != nil { + return nil, cookieError{cause: err, typ: usageError} + } + return buf.Bytes(), nil +} + +// Deserialize decodes a value using encoding/json. +func (e JSONEncoder) Deserialize(src []byte, dst interface{}) error { + dec := json.NewDecoder(bytes.NewReader(src)) + if err := dec.Decode(dst); err != nil { + return cookieError{cause: err, typ: decodeError} + } + return nil +} + +// Serialize passes a []byte through as-is. +func (e NopEncoder) Serialize(src interface{}) ([]byte, error) { + if b, ok := src.([]byte); ok { + return b, nil + } + + return nil, errValueNotByte +} + +// Deserialize passes a []byte through as-is. +func (e NopEncoder) Deserialize(src []byte, dst interface{}) error { + if _, ok := dst.([]byte); ok { + dst = src + return nil + } + + return errValueNotByte +} + +// Encoding ------------------------------------------------------------------- + +// encode encodes a value using base64. +func encode(value []byte) []byte { + encoded := make([]byte, base64.URLEncoding.EncodedLen(len(value))) + base64.URLEncoding.Encode(encoded, value) + return encoded +} + +// decode decodes a cookie using base64. +func decode(value []byte) ([]byte, error) { + decoded := make([]byte, base64.URLEncoding.DecodedLen(len(value))) + b, err := base64.URLEncoding.Decode(decoded, value) + if err != nil { + return nil, cookieError{cause: err, typ: decodeError, msg: "base64 decode failed"} + } + return decoded[:b], nil +} + +// Helpers -------------------------------------------------------------------- + +// GenerateRandomKey creates a random key with the given length in bytes. +// On failure, returns nil. +// +// Callers should explicitly check for the possibility of a nil return, treat +// it as a failure of the system random number generator, and not continue. +func GenerateRandomKey(length int) []byte { + k := make([]byte, length) + if _, err := io.ReadFull(rand.Reader, k); err != nil { + return nil + } + return k +} + +// CodecsFromPairs returns a slice of SecureCookie instances. +// +// It is a convenience function to create a list of codecs for key rotation. Note +// that the generated Codecs will have the default options applied: callers +// should iterate over each Codec and type-assert the underlying *SecureCookie to +// change these. +// +// Example: +// +// codecs := securecookie.CodecsFromPairs( +// []byte("new-hash-key"), +// []byte("new-block-key"), +// []byte("old-hash-key"), +// []byte("old-block-key"), +// ) +// +// // Modify each instance. +// for _, s := range codecs { +// if cookie, ok := s.(*securecookie.SecureCookie); ok { +// cookie.MaxAge(86400 * 7) +// cookie.SetSerializer(securecookie.JSONEncoder{}) +// cookie.HashFunc(sha512.New512_256) +// } +// } +// +func CodecsFromPairs(keyPairs ...[]byte) []Codec { + codecs := make([]Codec, len(keyPairs)/2+len(keyPairs)%2) + for i := 0; i < len(keyPairs); i += 2 { + var blockKey []byte + if i+1 < len(keyPairs) { + blockKey = keyPairs[i+1] + } + codecs[i/2] = New(keyPairs[i], blockKey) + } + return codecs +} + +// EncodeMulti encodes a cookie value using a group of codecs. +// +// The codecs are tried in order. Multiple codecs are accepted to allow +// key rotation. +// +// On error, may return a MultiError. +func EncodeMulti(name string, value interface{}, codecs ...Codec) (string, error) { + if len(codecs) == 0 { + return "", errNoCodecs + } + + var errors MultiError + for _, codec := range codecs { + encoded, err := codec.Encode(name, value) + if err == nil { + return encoded, nil + } + errors = append(errors, err) + } + return "", errors +} + +// DecodeMulti decodes a cookie value using a group of codecs. +// +// The codecs are tried in order. Multiple codecs are accepted to allow +// key rotation. +// +// On error, may return a MultiError. +func DecodeMulti(name string, value string, dst interface{}, codecs ...Codec) error { + if len(codecs) == 0 { + return errNoCodecs + } + + var errors MultiError + for _, codec := range codecs { + err := codec.Decode(name, value, dst) + if err == nil { + return nil + } + errors = append(errors, err) + } + return errors +} + +// MultiError groups multiple errors. +type MultiError []error + +func (m MultiError) IsUsage() bool { return m.any(func(e Error) bool { return e.IsUsage() }) } +func (m MultiError) IsDecode() bool { return m.any(func(e Error) bool { return e.IsDecode() }) } +func (m MultiError) IsInternal() bool { return m.any(func(e Error) bool { return e.IsInternal() }) } + +// Cause returns nil for MultiError; there is no unique underlying cause in the +// general case. +// +// Note: we could conceivably return a non-nil Cause only when there is exactly +// one child error with a Cause. However, it would be brittle for client code +// to rely on the arity of causes inside a MultiError, so we have opted not to +// provide this functionality. Clients which really wish to access the Causes +// of the underlying errors are free to iterate through the errors themselves. +func (m MultiError) Cause() error { return nil } + +func (m MultiError) Error() string { + s, n := "", 0 + for _, e := range m { + if e != nil { + if n == 0 { + s = e.Error() + } + n++ + } + } + switch n { + case 0: + return "(0 errors)" + case 1: + return s + case 2: + return s + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", s, n-1) +} + +// any returns true if any element of m is an Error for which pred returns true. +func (m MultiError) any(pred func(Error) bool) bool { + for _, e := range m { + if ourErr, ok := e.(Error); ok && pred(ourErr) { + return true + } + } + return false +} diff --git a/transfersh-server/vendor/github.com/kennygrant/sanitize/sanitize.go b/transfersh-server/vendor/github.com/kennygrant/sanitize/sanitize.go new file mode 100644 index 0000000..86d9e7d --- /dev/null +++ b/transfersh-server/vendor/github.com/kennygrant/sanitize/sanitize.go @@ -0,0 +1,384 @@ +// Package sanitize provides functions for sanitizing text. +package sanitize + +import ( + "bytes" + "html" + "html/template" + "io" + "path" + "regexp" + "strings" + + parser "golang.org/x/net/html" +) + +var ( + ignoreTags = []string{"title", "script", "style", "iframe", "frame", "frameset", "noframes", "noembed", "embed", "applet", "object", "base"} + + defaultTags = []string{"h1", "h2", "h3", "h4", "h5", "h6", "div", "span", "hr", "p", "br", "b", "i", "strong", "em", "ol", "ul", "li", "a", "img", "pre", "code", "blockquote"} + + defaultAttributes = []string{"id", "class", "src", "href", "title", "alt", "name", "rel"} +) + +// HTMLAllowing sanitizes html, allowing some tags. +// Arrays of allowed tags and allowed attributes may optionally be passed as the second and third arguments. +func HTMLAllowing(s string, args ...[]string) (string, error) { + + allowedTags := defaultTags + if len(args) > 0 { + allowedTags = args[0] + } + allowedAttributes := defaultAttributes + if len(args) > 1 { + allowedAttributes = args[1] + } + + // Parse the html + tokenizer := parser.NewTokenizer(strings.NewReader(s)) + + buffer := bytes.NewBufferString("") + ignore := "" + + for { + tokenType := tokenizer.Next() + token := tokenizer.Token() + + switch tokenType { + + case parser.ErrorToken: + err := tokenizer.Err() + if err == io.EOF { + return buffer.String(), nil + } + return "", err + + case parser.StartTagToken: + + if len(ignore) == 0 && includes(allowedTags, token.Data) { + token.Attr = cleanAttributes(token.Attr, allowedAttributes) + buffer.WriteString(token.String()) + } else if includes(ignoreTags, token.Data) { + ignore = token.Data + } + + case parser.SelfClosingTagToken: + + if len(ignore) == 0 && includes(allowedTags, token.Data) { + token.Attr = cleanAttributes(token.Attr, allowedAttributes) + buffer.WriteString(token.String()) + } else if token.Data == ignore { + ignore = "" + } + + case parser.EndTagToken: + if len(ignore) == 0 && includes(allowedTags, token.Data) { + token.Attr = []parser.Attribute{} + buffer.WriteString(token.String()) + } else if token.Data == ignore { + ignore = "" + } + + case parser.TextToken: + // We allow text content through, unless ignoring this entire tag and its contents (including other tags) + if ignore == "" { + buffer.WriteString(token.String()) + } + case parser.CommentToken: + // We ignore comments by default + case parser.DoctypeToken: + // We ignore doctypes by default - html5 does not require them and this is intended for sanitizing snippets of text + default: + // We ignore unknown token types by default + + } + + } + +} + +// HTML strips html tags, replace common entities, and escapes <>&;'" in the result. +// Note the returned text may contain entities as it is escaped by HTMLEscapeString, and most entities are not translated. +func HTML(s string) string { + + output := "" + + // Shortcut strings with no tags in them + if !strings.ContainsAny(s, "<>") { + output = s + } else { + + // First remove line breaks etc as these have no meaning outside html tags (except pre) + // this means pre sections will lose formatting... but will result in less uninentional paras. + s = strings.Replace(s, "\n", "", -1) + + // Then replace line breaks with newlines, to preserve that formatting + s = strings.Replace(s, "

", "\n", -1) + s = strings.Replace(s, "
", "\n", -1) + s = strings.Replace(s, "
", "\n", -1) + s = strings.Replace(s, "
", "\n", -1) + + // Walk through the string removing all tags + b := bytes.NewBufferString("") + inTag := false + for _, r := range s { + switch r { + case '<': + inTag = true + case '>': + inTag = false + default: + if !inTag { + b.WriteRune(r) + } + } + } + output = b.String() + } + + // Remove a few common harmless entities, to arrive at something more like plain text + output = strings.Replace(output, "‘", "'", -1) + output = strings.Replace(output, "’", "'", -1) + output = strings.Replace(output, "“", "\"", -1) + output = strings.Replace(output, "”", "\"", -1) + output = strings.Replace(output, " ", " ", -1) + output = strings.Replace(output, """, "\"", -1) + output = strings.Replace(output, "'", "'", -1) + + // Translate some entities into their plain text equivalent (for example accents, if encoded as entities) + output = html.UnescapeString(output) + + // In case we have missed any tags above, escape the text - removes <, >, &, ' and ". + output = template.HTMLEscapeString(output) + + // After processing, remove some harmless entities &, ' and " which are encoded by HTMLEscapeString + output = strings.Replace(output, """, "\"", -1) + output = strings.Replace(output, "'", "'", -1) + output = strings.Replace(output, "& ", "& ", -1) // NB space after + output = strings.Replace(output, "&amp; ", "& ", -1) // NB space after + + return output +} + +// We are very restrictive as this is intended for ascii url slugs +var illegalPath = regexp.MustCompile(`[^[:alnum:]\~\-\./]`) + +// Path makes a string safe to use as an url path. +func Path(s string) string { + // Start with lowercase string + filePath := strings.ToLower(s) + filePath = strings.Replace(filePath, "..", "", -1) + filePath = path.Clean(filePath) + + // Remove illegal characters for paths, flattening accents and replacing some common separators with - + filePath = cleanString(filePath, illegalPath) + + // NB this may be of length 0, caller must check + return filePath +} + +// Remove all other unrecognised characters apart from +var illegalName = regexp.MustCompile(`[^[:alnum:]-.]`) + +// Name makes a string safe to use in a file name by first finding the path basename, then replacing non-ascii characters. +func Name(s string) string { + // Start with lowercase string + fileName := strings.ToLower(s) + fileName = path.Clean(path.Base(fileName)) + + // Remove illegal characters for names, replacing some common separators with - + fileName = cleanString(fileName, illegalName) + + // NB this may be of length 0, caller must check + return fileName +} + +// Replace these separators with - +var baseNameSeparators = regexp.MustCompile(`[./]`) + +// BaseName makes a string safe to use in a file name, producing a sanitized basename replacing . or / with -. +// No attempt is made to normalise a path or normalise case. +func BaseName(s string) string { + + // Replace certain joining characters with a dash + baseName := baseNameSeparators.ReplaceAllString(s, "-") + + // Remove illegal characters for names, replacing some common separators with - + baseName = cleanString(baseName, illegalName) + + // NB this may be of length 0, caller must check + return baseName +} + +// A very limited list of transliterations to catch common european names translated to urls. +// This set could be expanded with at least caps and many more characters. +var transliterations = map[rune]string{ + 'À': "A", + 'Á': "A", + 'Â': "A", + 'Ã': "A", + 'Ä': "A", + 'Å': "AA", + 'Æ': "AE", + 'Ç': "C", + 'È': "E", + 'É': "E", + 'Ê': "E", + 'Ë': "E", + 'Ì': "I", + 'Í': "I", + 'Î': "I", + 'Ï': "I", + 'Ð': "D", + 'Ł': "L", + 'Ñ': "N", + 'Ò': "O", + 'Ó': "O", + 'Ô': "O", + 'Õ': "O", + 'Ö': "O", + 'Ø': "OE", + 'Ù': "U", + 'Ú': "U", + 'Ü': "U", + 'Û': "U", + 'Ý': "Y", + 'Þ': "Th", + 'ß': "ss", + 'à': "a", + 'á': "a", + 'â': "a", + 'ã': "a", + 'ä': "a", + 'å': "aa", + 'æ': "ae", + 'ç': "c", + 'è': "e", + 'é': "e", + 'ê': "e", + 'ë': "e", + 'ì': "i", + 'í': "i", + 'î': "i", + 'ï': "i", + 'ð': "d", + 'ł': "l", + 'ñ': "n", + 'ń': "n", + 'ò': "o", + 'ó': "o", + 'ô': "o", + 'õ': "o", + 'ō': "o", + 'ö': "o", + 'ø': "oe", + 'ś': "s", + 'ù': "u", + 'ú': "u", + 'û': "u", + 'ū': "u", + 'ü': "u", + 'ý': "y", + 'þ': "th", + 'ÿ': "y", + 'ż': "z", + 'Œ': "OE", + 'œ': "oe", +} + +// Accents replaces a set of accented characters with ascii equivalents. +func Accents(s string) string { + // Replace some common accent characters + b := bytes.NewBufferString("") + for _, c := range s { + // Check transliterations first + if val, ok := transliterations[c]; ok { + b.WriteString(val) + } else { + b.WriteRune(c) + } + } + return b.String() +} + +var ( + // If the attribute contains data: or javascript: anywhere, ignore it + // we don't allow this in attributes as it is so frequently used for xss + // NB we allow spaces in the value, and lowercase. + illegalAttr = regexp.MustCompile(`(d\s*a\s*t\s*a|j\s*a\s*v\s*a\s*s\s*c\s*r\s*i\s*p\s*t\s*)\s*:`) + + // We are far more restrictive with href attributes. + legalHrefAttr = regexp.MustCompile(`\A[/#][^/\\]?|mailto://|http://|https://`) +) + +// cleanAttributes returns an array of attributes after removing malicious ones. +func cleanAttributes(a []parser.Attribute, allowed []string) []parser.Attribute { + if len(a) == 0 { + return a + } + + var cleaned []parser.Attribute + for _, attr := range a { + if includes(allowed, attr.Key) { + + val := strings.ToLower(attr.Val) + + // Check for illegal attribute values + if illegalAttr.FindString(val) != "" { + attr.Val = "" + } + + // Check for legal href values - / mailto:// http:// or https:// + if attr.Key == "href" { + if legalHrefAttr.FindString(val) == "" { + attr.Val = "" + } + } + + // If we still have an attribute, append it to the array + if attr.Val != "" { + cleaned = append(cleaned, attr) + } + } + } + return cleaned +} + +// A list of characters we consider separators in normal strings and replace with our canonical separator - rather than removing. +var ( + separators = regexp.MustCompile(`[ &_=+:]`) + + dashes = regexp.MustCompile(`[\-]+`) +) + +// cleanString replaces separators with - and removes characters listed in the regexp provided from string. +// Accents, spaces, and all characters not in A-Za-z0-9 are replaced. +func cleanString(s string, r *regexp.Regexp) string { + + // Remove any trailing space to avoid ending on - + s = strings.Trim(s, " ") + + // Flatten accents first so that if we remove non-ascii we still get a legible name + s = Accents(s) + + // Replace certain joining characters with a dash + s = separators.ReplaceAllString(s, "-") + + // Remove all other unrecognised characters - NB we do allow any printable characters + s = r.ReplaceAllString(s, "") + + // Remove any multiple dashes caused by replacements above + s = dashes.ReplaceAllString(s, "-") + + return s +} + +// includes checks for inclusion of a string in a []string. +func includes(a []string, s string) bool { + for _, as := range a { + if as == s { + return true + } + } + return false +} diff --git a/transfersh-server/vendor/github.com/nu7hatch/gouuid/COPYING b/transfersh-server/vendor/github.com/nu7hatch/gouuid/COPYING new file mode 100644 index 0000000..d7849fd --- /dev/null +++ b/transfersh-server/vendor/github.com/nu7hatch/gouuid/COPYING @@ -0,0 +1,19 @@ +Copyright (C) 2011 by Krzysztof Kowalik + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/transfersh-server/vendor/github.com/nu7hatch/gouuid/uuid.go b/transfersh-server/vendor/github.com/nu7hatch/gouuid/uuid.go new file mode 100644 index 0000000..ac9623b --- /dev/null +++ b/transfersh-server/vendor/github.com/nu7hatch/gouuid/uuid.go @@ -0,0 +1,173 @@ +// This package provides immutable UUID structs and the functions +// NewV3, NewV4, NewV5 and Parse() for generating versions 3, 4 +// and 5 UUIDs as specified in RFC 4122. +// +// Copyright (C) 2011 by Krzysztof Kowalik +package uuid + +import ( + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "encoding/hex" + "errors" + "fmt" + "hash" + "regexp" +) + +// The UUID reserved variants. +const ( + ReservedNCS byte = 0x80 + ReservedRFC4122 byte = 0x40 + ReservedMicrosoft byte = 0x20 + ReservedFuture byte = 0x00 +) + +// The following standard UUIDs are for use with NewV3() or NewV5(). +var ( + NamespaceDNS, _ = ParseHex("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + NamespaceURL, _ = ParseHex("6ba7b811-9dad-11d1-80b4-00c04fd430c8") + NamespaceOID, _ = ParseHex("6ba7b812-9dad-11d1-80b4-00c04fd430c8") + NamespaceX500, _ = ParseHex("6ba7b814-9dad-11d1-80b4-00c04fd430c8") +) + +// Pattern used to parse hex string representation of the UUID. +// FIXME: do something to consider both brackets at one time, +// current one allows to parse string with only one opening +// or closing bracket. +const hexPattern = "^(urn\\:uuid\\:)?\\{?([a-z0-9]{8})-([a-z0-9]{4})-" + + "([1-5][a-z0-9]{3})-([a-z0-9]{4})-([a-z0-9]{12})\\}?$" + +var re = regexp.MustCompile(hexPattern) + +// A UUID representation compliant with specification in +// RFC 4122 document. +type UUID [16]byte + +// ParseHex creates a UUID object from given hex string +// representation. Function accepts UUID string in following +// formats: +// +// uuid.ParseHex("6ba7b814-9dad-11d1-80b4-00c04fd430c8") +// uuid.ParseHex("{6ba7b814-9dad-11d1-80b4-00c04fd430c8}") +// uuid.ParseHex("urn:uuid:6ba7b814-9dad-11d1-80b4-00c04fd430c8") +// +func ParseHex(s string) (u *UUID, err error) { + md := re.FindStringSubmatch(s) + if md == nil { + err = errors.New("Invalid UUID string") + return + } + hash := md[2] + md[3] + md[4] + md[5] + md[6] + b, err := hex.DecodeString(hash) + if err != nil { + return + } + u = new(UUID) + copy(u[:], b) + return +} + +// Parse creates a UUID object from given bytes slice. +func Parse(b []byte) (u *UUID, err error) { + if len(b) != 16 { + err = errors.New("Given slice is not valid UUID sequence") + return + } + u = new(UUID) + copy(u[:], b) + return +} + +// Generate a UUID based on the MD5 hash of a namespace identifier +// and a name. +func NewV3(ns *UUID, name []byte) (u *UUID, err error) { + if ns == nil { + err = errors.New("Invalid namespace UUID") + return + } + u = new(UUID) + // Set all bits to MD5 hash generated from namespace and name. + u.setBytesFromHash(md5.New(), ns[:], name) + u.setVariant(ReservedRFC4122) + u.setVersion(3) + return +} + +// Generate a random UUID. +func NewV4() (u *UUID, err error) { + u = new(UUID) + // Set all bits to randomly (or pseudo-randomly) chosen values. + _, err = rand.Read(u[:]) + if err != nil { + return + } + u.setVariant(ReservedRFC4122) + u.setVersion(4) + return +} + +// Generate a UUID based on the SHA-1 hash of a namespace identifier +// and a name. +func NewV5(ns *UUID, name []byte) (u *UUID, err error) { + u = new(UUID) + // Set all bits to truncated SHA1 hash generated from namespace + // and name. + u.setBytesFromHash(sha1.New(), ns[:], name) + u.setVariant(ReservedRFC4122) + u.setVersion(5) + return +} + +// Generate a MD5 hash of a namespace and a name, and copy it to the +// UUID slice. +func (u *UUID) setBytesFromHash(hash hash.Hash, ns, name []byte) { + hash.Write(ns[:]) + hash.Write(name) + copy(u[:], hash.Sum([]byte{})[:16]) +} + +// Set the two most significant bits (bits 6 and 7) of the +// clock_seq_hi_and_reserved to zero and one, respectively. +func (u *UUID) setVariant(v byte) { + switch v { + case ReservedNCS: + u[8] = (u[8] | ReservedNCS) & 0xBF + case ReservedRFC4122: + u[8] = (u[8] | ReservedRFC4122) & 0x7F + case ReservedMicrosoft: + u[8] = (u[8] | ReservedMicrosoft) & 0x3F + } +} + +// Variant returns the UUID Variant, which determines the internal +// layout of the UUID. This will be one of the constants: RESERVED_NCS, +// RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE. +func (u *UUID) Variant() byte { + if u[8]&ReservedNCS == ReservedNCS { + return ReservedNCS + } else if u[8]&ReservedRFC4122 == ReservedRFC4122 { + return ReservedRFC4122 + } else if u[8]&ReservedMicrosoft == ReservedMicrosoft { + return ReservedMicrosoft + } + return ReservedFuture +} + +// Set the four most significant bits (bits 12 through 15) of the +// time_hi_and_version field to the 4-bit version number. +func (u *UUID) setVersion(v byte) { + u[6] = (u[6] & 0xF) | (v << 4) +} + +// Version returns a version number of the algorithm used to +// generate the UUID sequence. +func (u *UUID) Version() uint { + return uint(u[6] >> 4) +} + +// Returns unparsed version of the generated UUID sequence. +func (u *UUID) String() string { + return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} diff --git a/transfersh-server/vendor/github.com/russross/blackfriday/LICENSE.txt b/transfersh-server/vendor/github.com/russross/blackfriday/LICENSE.txt new file mode 100644 index 0000000..2885af3 --- /dev/null +++ b/transfersh-server/vendor/github.com/russross/blackfriday/LICENSE.txt @@ -0,0 +1,29 @@ +Blackfriday is distributed under the Simplified BSD License: + +> Copyright © 2011 Russ Ross +> All rights reserved. +> +> Redistribution and use in source and binary forms, with or without +> modification, are permitted provided that the following conditions +> are met: +> +> 1. Redistributions of source code must retain the above copyright +> notice, this list of conditions and the following disclaimer. +> +> 2. Redistributions in binary form must reproduce the above +> copyright notice, this list of conditions and the following +> disclaimer in the documentation and/or other materials provided with +> the distribution. +> +> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +> POSSIBILITY OF SUCH DAMAGE. diff --git a/transfersh-server/vendor/github.com/russross/blackfriday/block.go b/transfersh-server/vendor/github.com/russross/blackfriday/block.go new file mode 100644 index 0000000..3f4af78 --- /dev/null +++ b/transfersh-server/vendor/github.com/russross/blackfriday/block.go @@ -0,0 +1,1420 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// Functions to parse block-level elements. +// + +package blackfriday + +import ( + "bytes" + + "github.com/shurcooL/sanitized_anchor_name" +) + +// Parse block-level data. +// Note: this function and many that it calls assume that +// the input buffer ends with a newline. +func (p *parser) block(out *bytes.Buffer, data []byte) { + if len(data) == 0 || data[len(data)-1] != '\n' { + panic("block input is missing terminating newline") + } + + // this is called recursively: enforce a maximum depth + if p.nesting >= p.maxNesting { + return + } + p.nesting++ + + // parse out one block-level construct at a time + for len(data) > 0 { + // prefixed header: + // + // # Header 1 + // ## Header 2 + // ... + // ###### Header 6 + if p.isPrefixHeader(data) { + data = data[p.prefixHeader(out, data):] + continue + } + + // block of preformatted HTML: + // + //
+ // ... + //
+ if data[0] == '<' { + if i := p.html(out, data, true); i > 0 { + data = data[i:] + continue + } + } + + // title block + // + // % stuff + // % more stuff + // % even more stuff + if p.flags&EXTENSION_TITLEBLOCK != 0 { + if data[0] == '%' { + if i := p.titleBlock(out, data, true); i > 0 { + data = data[i:] + continue + } + } + } + + // blank lines. note: returns the # of bytes to skip + if i := p.isEmpty(data); i > 0 { + data = data[i:] + continue + } + + // indented code block: + // + // func max(a, b int) int { + // if a > b { + // return a + // } + // return b + // } + if p.codePrefix(data) > 0 { + data = data[p.code(out, data):] + continue + } + + // fenced code block: + // + // ``` go + // func fact(n int) int { + // if n <= 1 { + // return n + // } + // return n * fact(n-1) + // } + // ``` + if p.flags&EXTENSION_FENCED_CODE != 0 { + if i := p.fencedCode(out, data, true); i > 0 { + data = data[i:] + continue + } + } + + // horizontal rule: + // + // ------ + // or + // ****** + // or + // ______ + if p.isHRule(data) { + p.r.HRule(out) + var i int + for i = 0; data[i] != '\n'; i++ { + } + data = data[i:] + continue + } + + // block quote: + // + // > A big quote I found somewhere + // > on the web + if p.quotePrefix(data) > 0 { + data = data[p.quote(out, data):] + continue + } + + // table: + // + // Name | Age | Phone + // ------|-----|--------- + // Bob | 31 | 555-1234 + // Alice | 27 | 555-4321 + if p.flags&EXTENSION_TABLES != 0 { + if i := p.table(out, data); i > 0 { + data = data[i:] + continue + } + } + + // an itemized/unordered list: + // + // * Item 1 + // * Item 2 + // + // also works with + or - + if p.uliPrefix(data) > 0 { + data = data[p.list(out, data, 0):] + continue + } + + // a numbered/ordered list: + // + // 1. Item 1 + // 2. Item 2 + if p.oliPrefix(data) > 0 { + data = data[p.list(out, data, LIST_TYPE_ORDERED):] + continue + } + + // definition lists: + // + // Term 1 + // : Definition a + // : Definition b + // + // Term 2 + // : Definition c + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if p.dliPrefix(data) > 0 { + data = data[p.list(out, data, LIST_TYPE_DEFINITION):] + continue + } + } + + // anything else must look like a normal paragraph + // note: this finds underlined headers, too + data = data[p.paragraph(out, data):] + } + + p.nesting-- +} + +func (p *parser) isPrefixHeader(data []byte) bool { + if data[0] != '#' { + return false + } + + if p.flags&EXTENSION_SPACE_HEADERS != 0 { + level := 0 + for level < 6 && data[level] == '#' { + level++ + } + if data[level] != ' ' { + return false + } + } + return true +} + +func (p *parser) prefixHeader(out *bytes.Buffer, data []byte) int { + level := 0 + for level < 6 && data[level] == '#' { + level++ + } + i := skipChar(data, level, ' ') + end := skipUntilChar(data, i, '\n') + skip := end + id := "" + if p.flags&EXTENSION_HEADER_IDS != 0 { + j, k := 0, 0 + // find start/end of header id + for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { + } + for k = j + 1; k < end && data[k] != '}'; k++ { + } + // extract header id iff found + if j < end && k < end { + id = string(data[j+2 : k]) + end = j + skip = k + 1 + for end > 0 && data[end-1] == ' ' { + end-- + } + } + } + for end > 0 && data[end-1] == '#' { + if isBackslashEscaped(data, end-1) { + break + } + end-- + } + for end > 0 && data[end-1] == ' ' { + end-- + } + if end > i { + if id == "" && p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { + id = sanitized_anchor_name.Create(string(data[i:end])) + } + work := func() bool { + p.inline(out, data[i:end]) + return true + } + p.r.Header(out, work, level, id) + } + return skip +} + +func (p *parser) isUnderlinedHeader(data []byte) int { + // test of level 1 header + if data[0] == '=' { + i := skipChar(data, 1, '=') + i = skipChar(data, i, ' ') + if data[i] == '\n' { + return 1 + } else { + return 0 + } + } + + // test of level 2 header + if data[0] == '-' { + i := skipChar(data, 1, '-') + i = skipChar(data, i, ' ') + if data[i] == '\n' { + return 2 + } else { + return 0 + } + } + + return 0 +} + +func (p *parser) titleBlock(out *bytes.Buffer, data []byte, doRender bool) int { + if data[0] != '%' { + return 0 + } + splitData := bytes.Split(data, []byte("\n")) + var i int + for idx, b := range splitData { + if !bytes.HasPrefix(b, []byte("%")) { + i = idx // - 1 + break + } + } + + data = bytes.Join(splitData[0:i], []byte("\n")) + p.r.TitleBlock(out, data) + + return len(data) +} + +func (p *parser) html(out *bytes.Buffer, data []byte, doRender bool) int { + var i, j int + + // identify the opening tag + if data[0] != '<' { + return 0 + } + curtag, tagfound := p.htmlFindTag(data[1:]) + + // handle special cases + if !tagfound { + // check for an HTML comment + if size := p.htmlComment(out, data, doRender); size > 0 { + return size + } + + // check for an
tag + if size := p.htmlHr(out, data, doRender); size > 0 { + return size + } + + // check for HTML CDATA + if size := p.htmlCDATA(out, data, doRender); size > 0 { + return size + } + + // no special case recognized + return 0 + } + + // look for an unindented matching closing tag + // followed by a blank line + found := false + /* + closetag := []byte("\n") + j = len(curtag) + 1 + for !found { + // scan for a closing tag at the beginning of a line + if skip := bytes.Index(data[j:], closetag); skip >= 0 { + j += skip + len(closetag) + } else { + break + } + + // see if it is the only thing on the line + if skip := p.isEmpty(data[j:]); skip > 0 { + // see if it is followed by a blank line/eof + j += skip + if j >= len(data) { + found = true + i = j + } else { + if skip := p.isEmpty(data[j:]); skip > 0 { + j += skip + found = true + i = j + } + } + } + } + */ + + // if not found, try a second pass looking for indented match + // but not if tag is "ins" or "del" (following original Markdown.pl) + if !found && curtag != "ins" && curtag != "del" { + i = 1 + for i < len(data) { + i++ + for i < len(data) && !(data[i-1] == '<' && data[i] == '/') { + i++ + } + + if i+2+len(curtag) >= len(data) { + break + } + + j = p.htmlFindEnd(curtag, data[i-1:]) + + if j > 0 { + i += j - 1 + found = true + break + } + } + } + + if !found { + return 0 + } + + // the end of the block has been found + if doRender { + // trim newlines + end := i + for end > 0 && data[end-1] == '\n' { + end-- + } + p.r.BlockHtml(out, data[:end]) + } + + return i +} + +func (p *parser) renderHTMLBlock(out *bytes.Buffer, data []byte, start int, doRender bool) int { + // html block needs to end with a blank line + if i := p.isEmpty(data[start:]); i > 0 { + size := start + i + if doRender { + // trim trailing newlines + end := size + for end > 0 && data[end-1] == '\n' { + end-- + } + p.r.BlockHtml(out, data[:end]) + } + return size + } + return 0 +} + +// HTML comment, lax form +func (p *parser) htmlComment(out *bytes.Buffer, data []byte, doRender bool) int { + i := p.inlineHTMLComment(out, data) + return p.renderHTMLBlock(out, data, i, doRender) +} + +// HTML CDATA section +func (p *parser) htmlCDATA(out *bytes.Buffer, data []byte, doRender bool) int { + const cdataTag = "') { + i++ + } + i++ + // no end-of-comment marker + if i >= len(data) { + return 0 + } + return p.renderHTMLBlock(out, data, i, doRender) +} + +// HR, which is the only self-closing block tag considered +func (p *parser) htmlHr(out *bytes.Buffer, data []byte, doRender bool) int { + if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { + return 0 + } + if data[3] != ' ' && data[3] != '/' && data[3] != '>' { + // not an
tag after all; at least not a valid one + return 0 + } + + i := 3 + for data[i] != '>' && data[i] != '\n' { + i++ + } + + if data[i] == '>' { + return p.renderHTMLBlock(out, data, i+1, doRender) + } + + return 0 +} + +func (p *parser) htmlFindTag(data []byte) (string, bool) { + i := 0 + for isalnum(data[i]) { + i++ + } + key := string(data[:i]) + if _, ok := blockTags[key]; ok { + return key, true + } + return "", false +} + +func (p *parser) htmlFindEnd(tag string, data []byte) int { + // assume data[0] == '<' && data[1] == '/' already tested + + // check if tag is a match + closetag := []byte("") + if !bytes.HasPrefix(data, closetag) { + return 0 + } + i := len(closetag) + + // check that the rest of the line is blank + skip := 0 + if skip = p.isEmpty(data[i:]); skip == 0 { + return 0 + } + i += skip + skip = 0 + + if i >= len(data) { + return i + } + + if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 { + return i + } + if skip = p.isEmpty(data[i:]); skip == 0 { + // following line must be blank + return 0 + } + + return i + skip +} + +func (p *parser) isEmpty(data []byte) int { + // it is okay to call isEmpty on an empty buffer + if len(data) == 0 { + return 0 + } + + var i int + for i = 0; i < len(data) && data[i] != '\n'; i++ { + if data[i] != ' ' && data[i] != '\t' { + return 0 + } + } + return i + 1 +} + +func (p *parser) isHRule(data []byte) bool { + i := 0 + + // skip up to three spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // look at the hrule char + if data[i] != '*' && data[i] != '-' && data[i] != '_' { + return false + } + c := data[i] + + // the whole line must be the char or whitespace + n := 0 + for data[i] != '\n' { + switch { + case data[i] == c: + n++ + case data[i] != ' ': + return false + } + i++ + } + + return n >= 3 +} + +func (p *parser) isFencedCode(data []byte, syntax **string, oldmarker string) (skip int, marker string) { + i, size := 0, 0 + skip = 0 + + // skip up to three spaces + for i < len(data) && i < 3 && data[i] == ' ' { + i++ + } + if i >= len(data) { + return + } + + // check for the marker characters: ~ or ` + if data[i] != '~' && data[i] != '`' { + return + } + + c := data[i] + + // the whole line must be the same char or whitespace + for i < len(data) && data[i] == c { + size++ + i++ + } + + if i >= len(data) { + return + } + + // the marker char must occur at least 3 times + if size < 3 { + return + } + marker = string(data[i-size : i]) + + // if this is the end marker, it must match the beginning marker + if oldmarker != "" && marker != oldmarker { + return + } + + if syntax != nil { + syn := 0 + i = skipChar(data, i, ' ') + + if i >= len(data) { + return + } + + syntaxStart := i + + if data[i] == '{' { + i++ + syntaxStart++ + + for i < len(data) && data[i] != '}' && data[i] != '\n' { + syn++ + i++ + } + + if i >= len(data) || data[i] != '}' { + return + } + + // strip all whitespace at the beginning and the end + // of the {} block + for syn > 0 && isspace(data[syntaxStart]) { + syntaxStart++ + syn-- + } + + for syn > 0 && isspace(data[syntaxStart+syn-1]) { + syn-- + } + + i++ + } else { + for i < len(data) && !isspace(data[i]) { + syn++ + i++ + } + } + + language := string(data[syntaxStart : syntaxStart+syn]) + *syntax = &language + } + + i = skipChar(data, i, ' ') + if i >= len(data) || data[i] != '\n' { + return + } + + skip = i + 1 + return +} + +func (p *parser) fencedCode(out *bytes.Buffer, data []byte, doRender bool) int { + var lang *string + beg, marker := p.isFencedCode(data, &lang, "") + if beg == 0 || beg >= len(data) { + return 0 + } + + var work bytes.Buffer + + for { + // safe to assume beg < len(data) + + // check for the end of the code block + fenceEnd, _ := p.isFencedCode(data[beg:], nil, marker) + if fenceEnd != 0 { + beg += fenceEnd + break + } + + // copy the current line + end := skipUntilChar(data, beg, '\n') + 1 + + // did we reach the end of the buffer without a closing marker? + if end >= len(data) { + return 0 + } + + // verbatim copy to the working buffer + if doRender { + work.Write(data[beg:end]) + } + beg = end + } + + syntax := "" + if lang != nil { + syntax = *lang + } + + if doRender { + p.r.BlockCode(out, work.Bytes(), syntax) + } + + return beg +} + +func (p *parser) table(out *bytes.Buffer, data []byte) int { + var header bytes.Buffer + i, columns := p.tableHeader(&header, data) + if i == 0 { + return 0 + } + + var body bytes.Buffer + + for i < len(data) { + pipes, rowStart := 0, i + for ; data[i] != '\n'; i++ { + if data[i] == '|' { + pipes++ + } + } + + if pipes == 0 { + i = rowStart + break + } + + // include the newline in data sent to tableRow + i++ + p.tableRow(&body, data[rowStart:i], columns, false) + } + + p.r.Table(out, header.Bytes(), body.Bytes(), columns) + + return i +} + +// check if the specified position is preceded by an odd number of backslashes +func isBackslashEscaped(data []byte, i int) bool { + backslashes := 0 + for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' { + backslashes++ + } + return backslashes&1 == 1 +} + +func (p *parser) tableHeader(out *bytes.Buffer, data []byte) (size int, columns []int) { + i := 0 + colCount := 1 + for i = 0; data[i] != '\n'; i++ { + if data[i] == '|' && !isBackslashEscaped(data, i) { + colCount++ + } + } + + // doesn't look like a table header + if colCount == 1 { + return + } + + // include the newline in the data sent to tableRow + header := data[:i+1] + + // column count ignores pipes at beginning or end of line + if data[0] == '|' { + colCount-- + } + if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) { + colCount-- + } + + columns = make([]int, colCount) + + // move on to the header underline + i++ + if i >= len(data) { + return + } + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + i = skipChar(data, i, ' ') + + // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3 + // and trailing | optional on last column + col := 0 + for data[i] != '\n' { + dashes := 0 + + if data[i] == ':' { + i++ + columns[col] |= TABLE_ALIGNMENT_LEFT + dashes++ + } + for data[i] == '-' { + i++ + dashes++ + } + if data[i] == ':' { + i++ + columns[col] |= TABLE_ALIGNMENT_RIGHT + dashes++ + } + for data[i] == ' ' { + i++ + } + + // end of column test is messy + switch { + case dashes < 3: + // not a valid column + return + + case data[i] == '|' && !isBackslashEscaped(data, i): + // marker found, now skip past trailing whitespace + col++ + i++ + for data[i] == ' ' { + i++ + } + + // trailing junk found after last column + if col >= colCount && data[i] != '\n' { + return + } + + case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount: + // something else found where marker was required + return + + case data[i] == '\n': + // marker is optional for the last column + col++ + + default: + // trailing junk found after last column + return + } + } + if col != colCount { + return + } + + p.tableRow(out, header, columns, true) + size = i + 1 + return +} + +func (p *parser) tableRow(out *bytes.Buffer, data []byte, columns []int, header bool) { + i, col := 0, 0 + var rowWork bytes.Buffer + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + + for col = 0; col < len(columns) && i < len(data); col++ { + for data[i] == ' ' { + i++ + } + + cellStart := i + + for (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' { + i++ + } + + cellEnd := i + + // skip the end-of-cell marker, possibly taking us past end of buffer + i++ + + for cellEnd > cellStart && data[cellEnd-1] == ' ' { + cellEnd-- + } + + var cellWork bytes.Buffer + p.inline(&cellWork, data[cellStart:cellEnd]) + + if header { + p.r.TableHeaderCell(&rowWork, cellWork.Bytes(), columns[col]) + } else { + p.r.TableCell(&rowWork, cellWork.Bytes(), columns[col]) + } + } + + // pad it out with empty columns to get the right number + for ; col < len(columns); col++ { + if header { + p.r.TableHeaderCell(&rowWork, nil, columns[col]) + } else { + p.r.TableCell(&rowWork, nil, columns[col]) + } + } + + // silently ignore rows with too many cells + + p.r.TableRow(out, rowWork.Bytes()) +} + +// returns blockquote prefix length +func (p *parser) quotePrefix(data []byte) int { + i := 0 + for i < 3 && data[i] == ' ' { + i++ + } + if data[i] == '>' { + if data[i+1] == ' ' { + return i + 2 + } + return i + 1 + } + return 0 +} + +// blockquote ends with at least one blank line +// followed by something without a blockquote prefix +func (p *parser) terminateBlockquote(data []byte, beg, end int) bool { + if p.isEmpty(data[beg:]) <= 0 { + return false + } + if end >= len(data) { + return true + } + return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0 +} + +// parse a blockquote fragment +func (p *parser) quote(out *bytes.Buffer, data []byte) int { + var raw bytes.Buffer + beg, end := 0, 0 + for beg < len(data) { + end = beg + // Step over whole lines, collecting them. While doing that, check for + // fenced code and if one's found, incorporate it altogether, + // irregardless of any contents inside it + for data[end] != '\n' { + if p.flags&EXTENSION_FENCED_CODE != 0 { + if i := p.fencedCode(out, data[end:], false); i > 0 { + // -1 to compensate for the extra end++ after the loop: + end += i - 1 + break + } + } + end++ + } + end++ + + if pre := p.quotePrefix(data[beg:]); pre > 0 { + // skip the prefix + beg += pre + } else if p.terminateBlockquote(data, beg, end) { + break + } + + // this line is part of the blockquote + raw.Write(data[beg:end]) + beg = end + } + + var cooked bytes.Buffer + p.block(&cooked, raw.Bytes()) + p.r.BlockQuote(out, cooked.Bytes()) + return end +} + +// returns prefix length for block code +func (p *parser) codePrefix(data []byte) int { + if data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' { + return 4 + } + return 0 +} + +func (p *parser) code(out *bytes.Buffer, data []byte) int { + var work bytes.Buffer + + i := 0 + for i < len(data) { + beg := i + for data[i] != '\n' { + i++ + } + i++ + + blankline := p.isEmpty(data[beg:i]) > 0 + if pre := p.codePrefix(data[beg:i]); pre > 0 { + beg += pre + } else if !blankline { + // non-empty, non-prefixed line breaks the pre + i = beg + break + } + + // verbatim copy to the working buffeu + if blankline { + work.WriteByte('\n') + } else { + work.Write(data[beg:i]) + } + } + + // trim all the \n off the end of work + workbytes := work.Bytes() + eol := len(workbytes) + for eol > 0 && workbytes[eol-1] == '\n' { + eol-- + } + if eol != len(workbytes) { + work.Truncate(eol) + } + + work.WriteByte('\n') + + p.r.BlockCode(out, work.Bytes(), "") + + return i +} + +// returns unordered list item prefix +func (p *parser) uliPrefix(data []byte) int { + i := 0 + + // start with up to 3 spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // need a *, +, or - followed by a space + if (data[i] != '*' && data[i] != '+' && data[i] != '-') || + data[i+1] != ' ' { + return 0 + } + return i + 2 +} + +// returns ordered list item prefix +func (p *parser) oliPrefix(data []byte) int { + i := 0 + + // start with up to 3 spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // count the digits + start := i + for data[i] >= '0' && data[i] <= '9' { + i++ + } + + // we need >= 1 digits followed by a dot and a space + if start == i || data[i] != '.' || data[i+1] != ' ' { + return 0 + } + return i + 2 +} + +// returns definition list item prefix +func (p *parser) dliPrefix(data []byte) int { + i := 0 + + // need a : followed by a spaces + if data[i] != ':' || data[i+1] != ' ' { + return 0 + } + for data[i] == ' ' { + i++ + } + return i + 2 +} + +// parse ordered or unordered list block +func (p *parser) list(out *bytes.Buffer, data []byte, flags int) int { + i := 0 + flags |= LIST_ITEM_BEGINNING_OF_LIST + work := func() bool { + for i < len(data) { + skip := p.listItem(out, data[i:], &flags) + i += skip + + if skip == 0 || flags&LIST_ITEM_END_OF_LIST != 0 { + break + } + flags &= ^LIST_ITEM_BEGINNING_OF_LIST + } + return true + } + + p.r.List(out, work, flags) + return i +} + +// Parse a single list item. +// Assumes initial prefix is already removed if this is a sublist. +func (p *parser) listItem(out *bytes.Buffer, data []byte, flags *int) int { + // keep track of the indentation of the first line + itemIndent := 0 + for itemIndent < 3 && data[itemIndent] == ' ' { + itemIndent++ + } + + i := p.uliPrefix(data) + if i == 0 { + i = p.oliPrefix(data) + } + if i == 0 { + i = p.dliPrefix(data) + // reset definition term flag + if i > 0 { + *flags &= ^LIST_TYPE_TERM + } + } + if i == 0 { + // if in defnition list, set term flag and continue + if *flags&LIST_TYPE_DEFINITION != 0 { + *flags |= LIST_TYPE_TERM + } else { + return 0 + } + } + + // skip leading whitespace on first line + for data[i] == ' ' { + i++ + } + + // find the end of the line + line := i + for i > 0 && data[i-1] != '\n' { + i++ + } + + // get working buffer + var raw bytes.Buffer + + // put the first line into the working buffer + raw.Write(data[line:i]) + line = i + + // process the following lines + containsBlankLine := false + sublist := 0 + +gatherlines: + for line < len(data) { + i++ + + // find the end of this line + for data[i-1] != '\n' { + i++ + } + + // if it is an empty line, guess that it is part of this item + // and move on to the next line + if p.isEmpty(data[line:i]) > 0 { + containsBlankLine = true + raw.Write(data[line:i]) + line = i + continue + } + + // calculate the indentation + indent := 0 + for indent < 4 && line+indent < i && data[line+indent] == ' ' { + indent++ + } + + chunk := data[line+indent : i] + + // evaluate how this line fits in + switch { + // is this a nested list item? + case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) || + p.oliPrefix(chunk) > 0 || + p.dliPrefix(chunk) > 0: + + if containsBlankLine { + // end the list if the type changed after a blank line + if indent <= itemIndent && + ((*flags&LIST_TYPE_ORDERED != 0 && p.uliPrefix(chunk) > 0) || + (*flags&LIST_TYPE_ORDERED == 0 && p.oliPrefix(chunk) > 0)) { + + *flags |= LIST_ITEM_END_OF_LIST + break gatherlines + } + *flags |= LIST_ITEM_CONTAINS_BLOCK + } + + // to be a nested list, it must be indented more + // if not, it is the next item in the same list + if indent <= itemIndent { + break gatherlines + } + + // is this the first item in the nested list? + if sublist == 0 { + sublist = raw.Len() + } + + // is this a nested prefix header? + case p.isPrefixHeader(chunk): + // if the header is not indented, it is not nested in the list + // and thus ends the list + if containsBlankLine && indent < 4 { + *flags |= LIST_ITEM_END_OF_LIST + break gatherlines + } + *flags |= LIST_ITEM_CONTAINS_BLOCK + + // anything following an empty line is only part + // of this item if it is indented 4 spaces + // (regardless of the indentation of the beginning of the item) + case containsBlankLine && indent < 4: + if *flags&LIST_TYPE_DEFINITION != 0 && i < len(data)-1 { + // is the next item still a part of this list? + next := i + for data[next] != '\n' { + next++ + } + for next < len(data)-1 && data[next] == '\n' { + next++ + } + if i < len(data)-1 && data[i] != ':' && data[next] != ':' { + *flags |= LIST_ITEM_END_OF_LIST + } + } else { + *flags |= LIST_ITEM_END_OF_LIST + } + break gatherlines + + // a blank line means this should be parsed as a block + case containsBlankLine: + *flags |= LIST_ITEM_CONTAINS_BLOCK + } + + containsBlankLine = false + + // add the line into the working buffer without prefix + raw.Write(data[line+indent : i]) + + line = i + } + + rawBytes := raw.Bytes() + + // render the contents of the list item + var cooked bytes.Buffer + if *flags&LIST_ITEM_CONTAINS_BLOCK != 0 && *flags&LIST_TYPE_TERM == 0 { + // intermediate render of block item, except for definition term + if sublist > 0 { + p.block(&cooked, rawBytes[:sublist]) + p.block(&cooked, rawBytes[sublist:]) + } else { + p.block(&cooked, rawBytes) + } + } else { + // intermediate render of inline item + if sublist > 0 { + p.inline(&cooked, rawBytes[:sublist]) + p.block(&cooked, rawBytes[sublist:]) + } else { + p.inline(&cooked, rawBytes) + } + } + + // render the actual list item + cookedBytes := cooked.Bytes() + parsedEnd := len(cookedBytes) + + // strip trailing newlines + for parsedEnd > 0 && cookedBytes[parsedEnd-1] == '\n' { + parsedEnd-- + } + p.r.ListItem(out, cookedBytes[:parsedEnd], *flags) + + return line +} + +// render a single paragraph that has already been parsed out +func (p *parser) renderParagraph(out *bytes.Buffer, data []byte) { + if len(data) == 0 { + return + } + + // trim leading spaces + beg := 0 + for data[beg] == ' ' { + beg++ + } + + // trim trailing newline + end := len(data) - 1 + + // trim trailing spaces + for end > beg && data[end-1] == ' ' { + end-- + } + + work := func() bool { + p.inline(out, data[beg:end]) + return true + } + p.r.Paragraph(out, work) +} + +func (p *parser) paragraph(out *bytes.Buffer, data []byte) int { + // prev: index of 1st char of previous line + // line: index of 1st char of current line + // i: index of cursor/end of current line + var prev, line, i int + + // keep going until we find something to mark the end of the paragraph + for i < len(data) { + // mark the beginning of the current line + prev = line + current := data[i:] + line = i + + // did we find a blank line marking the end of the paragraph? + if n := p.isEmpty(current); n > 0 { + // did this blank line followed by a definition list item? + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if i < len(data)-1 && data[i+1] == ':' { + return p.list(out, data[prev:], LIST_TYPE_DEFINITION) + } + } + + p.renderParagraph(out, data[:i]) + return i + n + } + + // an underline under some text marks a header, so our paragraph ended on prev line + if i > 0 { + if level := p.isUnderlinedHeader(current); level > 0 { + // render the paragraph + p.renderParagraph(out, data[:prev]) + + // ignore leading and trailing whitespace + eol := i - 1 + for prev < eol && data[prev] == ' ' { + prev++ + } + for eol > prev && data[eol-1] == ' ' { + eol-- + } + + // render the header + // this ugly double closure avoids forcing variables onto the heap + work := func(o *bytes.Buffer, pp *parser, d []byte) func() bool { + return func() bool { + pp.inline(o, d) + return true + } + }(out, p, data[prev:eol]) + + id := "" + if p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { + id = sanitized_anchor_name.Create(string(data[prev:eol])) + } + + p.r.Header(out, work, level, id) + + // find the end of the underline + for data[i] != '\n' { + i++ + } + return i + } + } + + // if the next line starts a block of HTML, then the paragraph ends here + if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 { + if data[i] == '<' && p.html(out, current, false) > 0 { + // rewind to before the HTML block + p.renderParagraph(out, data[:i]) + return i + } + } + + // if there's a prefixed header or a horizontal rule after this, paragraph is over + if p.isPrefixHeader(current) || p.isHRule(current) { + p.renderParagraph(out, data[:i]) + return i + } + + // if there's a fenced code block, paragraph is over + if p.flags&EXTENSION_FENCED_CODE != 0 { + if p.fencedCode(out, current, false) > 0 { + p.renderParagraph(out, data[:i]) + return i + } + } + + // if there's a definition list item, prev line is a definition term + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if p.dliPrefix(current) != 0 { + return p.list(out, data[prev:], LIST_TYPE_DEFINITION) + } + } + + // if there's a list after this, paragraph is over + if p.flags&EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK != 0 { + if p.uliPrefix(current) != 0 || + p.oliPrefix(current) != 0 || + p.quotePrefix(current) != 0 || + p.codePrefix(current) != 0 { + p.renderParagraph(out, data[:i]) + return i + } + } + + // otherwise, scan to the beginning of the next line + for data[i] != '\n' { + i++ + } + i++ + } + + p.renderParagraph(out, data[:i]) + return i +} diff --git a/transfersh-server/vendor/github.com/russross/blackfriday/html.go b/transfersh-server/vendor/github.com/russross/blackfriday/html.go new file mode 100644 index 0000000..74e67ee --- /dev/null +++ b/transfersh-server/vendor/github.com/russross/blackfriday/html.go @@ -0,0 +1,949 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// +// HTML rendering backend +// +// + +package blackfriday + +import ( + "bytes" + "fmt" + "regexp" + "strconv" + "strings" +) + +// Html renderer configuration options. +const ( + HTML_SKIP_HTML = 1 << iota // skip preformatted HTML blocks + HTML_SKIP_STYLE // skip embedded