* GDrive provider support * More reliable basedir ownership * Fix mimetypetags/v1.0.0
@@ -2,7 +2,7 @@ | |||||
Easy and fast file sharing from the command-line. This code contains the server with everything you need to create your own instance. | Easy and fast file sharing from the command-line. This code contains the server with everything you need to create your own instance. | ||||
Transfer.sh support currently the s3 (Amazon S3) provider and local file system (local). | |||||
Transfer.sh support currently the s3 (Amazon S3) and gdrive (Google Drive) providers and local file system (local). | |||||
## Usage | ## Usage | ||||
@@ -46,12 +46,14 @@ tls-cert-file | path to tls certificate | | | |||||
tls-private-key | path to tls private key | | | tls-private-key | path to tls private key | | | ||||
temp-path | path to temp folder | system temp | | temp-path | path to temp folder | system temp | | ||||
web-path | path to static web files (for development) | | | web-path | path to static web files (for development) | | | ||||
provider | which storage provider to use | (s3 or local) | | |||||
provider | which storage provider to use | (s3, grdrive or local) | | |||||
aws-access-key | aws access key | | AWS_ACCESS_KEY | aws-access-key | aws access key | | AWS_ACCESS_KEY | ||||
aws-secret-key | aws access key | | AWS_SECRET_KEY | aws-secret-key | aws access key | | AWS_SECRET_KEY | ||||
bucket | aws bucket | | BUCKET | bucket | aws bucket | | BUCKET | ||||
basedir | path storage for local provider| | | |||||
lets-encrypt-hosts | hosts to use for lets encrypt certificates (comma seperated) | | | |||||
basedir | path storage for local/gdrive provider| | | |||||
gdrive-client-json-filepath | path to client json config for gdrive provider| | | |||||
gdrive-local-config-path | path to local transfer.sh config cache for gdrive provider| | | |||||
lets-encrypt-hosts | hosts to use for lets encrypt certificates (comma seperated) | | | |||||
log | path to log file| | | log | path to log file| | | ||||
If you want to use TLS using lets encrypt certificates, set lets-encrypt-hosts to your domain, set tls-listener to :443 and enable force-https. | If you want to use TLS using lets encrypt certificates, set lets-encrypt-hosts to your domain, set tls-listener to :443 and enable force-https. | ||||
@@ -74,7 +74,7 @@ var globalFlags = []cli.Flag{ | |||||
}, | }, | ||||
cli.StringFlag{ | cli.StringFlag{ | ||||
Name: "provider", | Name: "provider", | ||||
Usage: "s3|local", | |||||
Usage: "s3|gdrive|local", | |||||
Value: "", | Value: "", | ||||
}, | }, | ||||
cli.StringFlag{ | cli.StringFlag{ | ||||
@@ -101,6 +101,18 @@ var globalFlags = []cli.Flag{ | |||||
Value: "", | Value: "", | ||||
EnvVar: "BUCKET", | EnvVar: "BUCKET", | ||||
}, | }, | ||||
cli.StringFlag{ | |||||
Name: "gdrive-client-json-filepath", | |||||
Usage: "", | |||||
Value: "", | |||||
EnvVar: "", | |||||
}, | |||||
cli.StringFlag{ | |||||
Name: "gdrive-local-config-path", | |||||
Usage: "", | |||||
Value: "", | |||||
EnvVar: "", | |||||
}, | |||||
cli.IntFlag{ | cli.IntFlag{ | ||||
Name: "rate-limit", | Name: "rate-limit", | ||||
Usage: "requests per minute", | Usage: "requests per minute", | ||||
@@ -233,6 +245,18 @@ func New() *Cmd { | |||||
} else { | } else { | ||||
options = append(options, server.UseStorage(storage)) | options = append(options, server.UseStorage(storage)) | ||||
} | } | ||||
case "gdrive": | |||||
if clientJsonFilepath := c.String("gdrive-client-json-filepath"); clientJsonFilepath == "" { | |||||
panic("client-json-filepath not set.") | |||||
} else if localConfigPath := c.String("gdrive-local-config-path"); localConfigPath == "" { | |||||
panic("local-config-path not set.") | |||||
} else if basedir := c.String("basedir"); basedir == "" { | |||||
panic("basedir not set.") | |||||
} else if storage, err := server.NewGDriveStorage(clientJsonFilepath, localConfigPath, basedir); err != nil { | |||||
panic(err) | |||||
} else { | |||||
options = append(options, server.UseStorage(storage)) | |||||
} | |||||
case "local": | case "local": | ||||
if v := c.String("basedir"); v == "" { | if v := c.String("basedir"); v == "" { | ||||
panic("basedir not set.") | panic("basedir not set.") | ||||
@@ -12,6 +12,16 @@ import ( | |||||
"sync" | "sync" | ||||
"github.com/goamz/goamz/s3" | "github.com/goamz/goamz/s3" | ||||
"encoding/json" | |||||
"golang.org/x/oauth2" | |||||
"golang.org/x/net/context" | |||||
"golang.org/x/oauth2/google" | |||||
"google.golang.org/api/drive/v3" | |||||
"google.golang.org/api/googleapi" | |||||
"net/http" | |||||
"io/ioutil" | |||||
"time" | |||||
) | ) | ||||
type Storage interface { | type Storage interface { | ||||
@@ -284,3 +294,395 @@ func (s *S3Storage) Put(token string, filename string, reader io.Reader, content | |||||
return | return | ||||
} | } | ||||
type GDrive struct { | |||||
service *drive.Service | |||||
rootId string | |||||
basedir string | |||||
localConfigPath string | |||||
} | |||||
func NewGDriveStorage(clientJsonFilepath string, localConfigPath string, basedir string) (*GDrive, error) { | |||||
b, err := ioutil.ReadFile(clientJsonFilepath) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
// If modifying these scopes, delete your previously saved client_secret.json. | |||||
config, err := google.ConfigFromJSON(b, drive.DriveScope, drive.DriveMetadataScope) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
srv, err := drive.New(getGDriveClient(config)) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
storage := &GDrive{service: srv, basedir: basedir, rootId: "", localConfigPath:localConfigPath} | |||||
err = storage.setupRoot() | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return storage, nil | |||||
} | |||||
const GDriveRootConfigFile = "root_id.conf" | |||||
const GDriveTimeoutTimerInterval = time.Second * 10 | |||||
const GDriveDirectoryMimeType = "application/vnd.google-apps.folder" | |||||
type gDriveTimeoutReaderWrapper func(io.Reader) io.Reader | |||||
func (s *GDrive) setupRoot() error { | |||||
rootFileConfig := filepath.Join(s.localConfigPath, GDriveRootConfigFile) | |||||
rootId, err := ioutil.ReadFile(rootFileConfig) | |||||
if err != nil && !os.IsNotExist(err) { | |||||
return err | |||||
} | |||||
if string(rootId) != "" { | |||||
s.rootId = string(rootId) | |||||
return nil | |||||
} | |||||
dir := &drive.File{ | |||||
Name: s.basedir, | |||||
MimeType: GDriveDirectoryMimeType, | |||||
} | |||||
di, err := s.service.Files.Create(dir).Fields("id").Do() | |||||
if err != nil { | |||||
return err | |||||
} | |||||
s.rootId = di.Id | |||||
err = ioutil.WriteFile(rootFileConfig, []byte(s.rootId), os.FileMode(0600)) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
return nil | |||||
} | |||||
func (s *GDrive) getTimeoutReader(r io.Reader, cancel context.CancelFunc, timeout time.Duration) io.Reader { | |||||
return &GDriveTimeoutReader{ | |||||
reader: r, | |||||
cancel: cancel, | |||||
mutex: &sync.Mutex{}, | |||||
maxIdleTimeout: timeout, | |||||
} | |||||
} | |||||
type GDriveTimeoutReader struct { | |||||
reader io.Reader | |||||
cancel context.CancelFunc | |||||
lastActivity time.Time | |||||
timer *time.Timer | |||||
mutex *sync.Mutex | |||||
maxIdleTimeout time.Duration | |||||
done bool | |||||
} | |||||
func (r *GDriveTimeoutReader) Read(p []byte) (int, error) { | |||||
if r.timer == nil { | |||||
r.startTimer() | |||||
} | |||||
r.mutex.Lock() | |||||
// Read | |||||
n, err := r.reader.Read(p) | |||||
r.lastActivity = time.Now() | |||||
r.done = (err != nil) | |||||
r.mutex.Unlock() | |||||
if r.done { | |||||
r.stopTimer() | |||||
} | |||||
return n, err | |||||
} | |||||
func (r *GDriveTimeoutReader) Close() error { | |||||
return r.reader.(io.ReadCloser).Close() | |||||
} | |||||
func (r *GDriveTimeoutReader) startTimer() { | |||||
r.mutex.Lock() | |||||
defer r.mutex.Unlock() | |||||
if !r.done { | |||||
r.timer = time.AfterFunc(GDriveTimeoutTimerInterval, r.timeout) | |||||
} | |||||
} | |||||
func (r *GDriveTimeoutReader) stopTimer() { | |||||
r.mutex.Lock() | |||||
defer r.mutex.Unlock() | |||||
if r.timer != nil { | |||||
r.timer.Stop() | |||||
} | |||||
} | |||||
func (r *GDriveTimeoutReader) timeout() { | |||||
r.mutex.Lock() | |||||
if r.done { | |||||
r.mutex.Unlock() | |||||
return | |||||
} | |||||
if time.Since(r.lastActivity) > r.maxIdleTimeout { | |||||
r.cancel() | |||||
r.mutex.Unlock() | |||||
return | |||||
} | |||||
r.mutex.Unlock() | |||||
r.startTimer() | |||||
} | |||||
func (s *GDrive) getTimeoutReaderWrapperContext(timeout time.Duration) (gDriveTimeoutReaderWrapper, context.Context) { | |||||
ctx, cancel := context.WithCancel(context.TODO()) | |||||
wrapper := func(r io.Reader) io.Reader { | |||||
// Return untouched reader if timeout is 0 | |||||
if timeout == 0 { | |||||
return r | |||||
} | |||||
return s.getTimeoutReader(r, cancel, timeout) | |||||
} | |||||
return wrapper, ctx | |||||
} | |||||
func (s *GDrive) hasChecksum(f *drive.File) bool { | |||||
return f.Md5Checksum != "" | |||||
} | |||||
func (s *GDrive) list(nextPageToken string, q string) (*drive.FileList, error){ | |||||
return s.service.Files.List().Fields("nextPageToken, files(id, name, mimeType)").Q(q).PageToken(nextPageToken).Do() | |||||
} | |||||
func (s *GDrive) findId(filename string, token string) (string, error) { | |||||
fileId, tokenId, nextPageToken := "", "", "" | |||||
q := fmt.Sprintf("'%s' in parents and name='%s' and mimeType='%s' and trashed=false", s.rootId, token, GDriveDirectoryMimeType) | |||||
l, err := s.list(nextPageToken, q) | |||||
for 0 < len(l.Files) { | |||||
if err != nil { | |||||
return "", err | |||||
} | |||||
for _, fi := range l.Files { | |||||
tokenId = fi.Id | |||||
break | |||||
} | |||||
if l.NextPageToken == "" { | |||||
break | |||||
} | |||||
l, err = s.list(l.NextPageToken, q) | |||||
} | |||||
if filename == "" { | |||||
return tokenId, nil | |||||
} else if tokenId == "" { | |||||
return "", fmt.Errorf("Cannot find file %s/%s", token, filename) | |||||
} | |||||
q = fmt.Sprintf("'%s' in parents and name='%s' and mimeType!='%s' and trashed=false", tokenId, filename, GDriveDirectoryMimeType) | |||||
l, err = s.list(nextPageToken, q) | |||||
for 0 < len(l.Files) { | |||||
if err != nil { | |||||
return "", err | |||||
} | |||||
for _, fi := range l.Files { | |||||
fileId = fi.Id | |||||
break | |||||
} | |||||
if l.NextPageToken == "" { | |||||
break | |||||
} | |||||
l, err = s.list(l.NextPageToken, q) | |||||
} | |||||
if fileId == "" { | |||||
return "", fmt.Errorf("Cannot find file %s/%s", token, filename) | |||||
} | |||||
return fileId, nil | |||||
} | |||||
func (s *GDrive) Type() string { | |||||
return "gdrive" | |||||
} | |||||
func (s *GDrive) Head(token string, filename string) (contentType string, contentLength uint64, err error) { | |||||
var fileId string | |||||
fileId, err = s.findId(filename, token) | |||||
if err != nil { | |||||
return | |||||
} | |||||
var fi *drive.File | |||||
if fi, err = s.service.Files.Get(fileId).Fields("mimeType", "size").Do(); err != nil { | |||||
return | |||||
} | |||||
contentLength = uint64(fi.Size) | |||||
contentType = fi.MimeType | |||||
return | |||||
} | |||||
func (s *GDrive) Get(token string, filename string) (reader io.ReadCloser, contentType string, contentLength uint64, err error) { | |||||
var fileId string | |||||
fileId, err = s.findId(filename, token) | |||||
if err != nil { | |||||
return | |||||
} | |||||
var fi *drive.File | |||||
fi, err = s.service.Files.Get(fileId).Fields("mimeType", "size", "md5Checksum").Do() | |||||
if !s.hasChecksum(fi) { | |||||
err = fmt.Errorf("Cannot find file %s/%s", token, filename) | |||||
return | |||||
} | |||||
contentLength = uint64(fi.Size) | |||||
contentType = fi.MimeType | |||||
// Get timeout reader wrapper and context | |||||
timeoutReaderWrapper, ctx := s.getTimeoutReaderWrapperContext(time.Duration(GDriveTimeoutTimerInterval)) | |||||
var res *http.Response | |||||
res, err = s.service.Files.Get(fileId).Context(ctx).Download() | |||||
if err != nil { | |||||
return | |||||
} | |||||
reader = timeoutReaderWrapper(res.Body).(io.ReadCloser) | |||||
return | |||||
} | |||||
func (s *GDrive) IsNotExist(err error) bool { | |||||
if err == nil { | |||||
return false | |||||
} | |||||
if err != nil { | |||||
if e, ok := err.(*googleapi.Error); ok { | |||||
return e.Code == http.StatusNotFound | |||||
} | |||||
} | |||||
return false | |||||
} | |||||
func (s *GDrive) Put(token string, filename string, reader io.Reader, contentType string, contentLength uint64) error { | |||||
dirId, err := s.findId("", token) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
if dirId == "" { | |||||
dir := &drive.File{ | |||||
Name: token, | |||||
Parents: []string{s.rootId}, | |||||
MimeType: GDriveDirectoryMimeType, | |||||
} | |||||
di, err := s.service.Files.Create(dir).Fields("id").Do() | |||||
if err != nil { | |||||
return err | |||||
} | |||||
dirId = di.Id | |||||
} | |||||
// Wrap reader in timeout reader | |||||
timeoutReaderWrapper, ctx := s.getTimeoutReaderWrapperContext(time.Duration(GDriveTimeoutTimerInterval)) | |||||
// Instantiate empty drive file | |||||
dst := &drive.File{ | |||||
Name: filename, | |||||
Parents: []string{dirId}, | |||||
MimeType: contentType, | |||||
} | |||||
_, err = s.service.Files.Create(dst).Context(ctx).Media(timeoutReaderWrapper(reader)).Do() | |||||
if err != nil { | |||||
return err | |||||
} | |||||
return nil | |||||
} | |||||
// Retrieve a token, saves the token, then returns the generated client. | |||||
func getGDriveClient(config *oauth2.Config) *http.Client { | |||||
tokenFile := "token.json" | |||||
tok, err := gDriveTokenFromFile(tokenFile) | |||||
if err != nil { | |||||
tok = getGDriveTokenFromWeb(config) | |||||
saveGDriveToken(tokenFile, tok) | |||||
} | |||||
return config.Client(context.Background(), tok) | |||||
} | |||||
// Request a token from the web, then returns the retrieved token. | |||||
func getGDriveTokenFromWeb(config *oauth2.Config) *oauth2.Token { | |||||
authURL := config.AuthCodeURL("state-token", oauth2.AccessTypeOffline) | |||||
fmt.Printf("Go to the following link in your browser then type the "+ | |||||
"authorization code: \n%v\n", authURL) | |||||
var authCode string | |||||
if _, err := fmt.Scan(&authCode); err != nil { | |||||
log.Fatalf("Unable to read authorization code %v", err) | |||||
} | |||||
tok, err := config.Exchange(oauth2.NoContext, authCode) | |||||
if err != nil { | |||||
log.Fatalf("Unable to retrieve token from web %v", err) | |||||
} | |||||
return tok | |||||
} | |||||
// Retrieves a token from a local file. | |||||
func gDriveTokenFromFile(file string) (*oauth2.Token, error) { | |||||
f, err := os.Open(file) | |||||
defer f.Close() | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
tok := &oauth2.Token{} | |||||
err = json.NewDecoder(f).Decode(tok) | |||||
return tok, err | |||||
} | |||||
// Saves a token to a file path. | |||||
func saveGDriveToken(path string, token *oauth2.Token) { | |||||
fmt.Printf("Saving credential file to: %s\n", path) | |||||
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) | |||||
defer f.Close() | |||||
if err != nil { | |||||
log.Fatalf("Unable to cache oauth token: %v", err) | |||||
} | |||||
json.NewEncoder(f).Encode(token) | |||||
} |
@@ -0,0 +1,23 @@ | |||||
sudo: false | |||||
language: go | |||||
go: | |||||
- 1.6.x | |||||
- 1.7.x | |||||
- 1.8.x | |||||
- 1.9.x | |||||
- 1.10.x | |||||
install: | |||||
- go get -v cloud.google.com/go/... | |||||
script: | |||||
- openssl aes-256-cbc -K $encrypted_a8b3f4fc85f4_key -iv $encrypted_a8b3f4fc85f4_iv -in keys.tar.enc -out keys.tar -d | |||||
- tar xvf keys.tar | |||||
- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" | |||||
GCLOUD_TESTS_GOLANG_KEY="$(pwd)/dulcet-port-762-key.json" | |||||
GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID="gcloud-golang-firestore-tests" | |||||
GCLOUD_TESTS_GOLANG_FIRESTORE_KEY="$(pwd)/gcloud-golang-firestore-tests-key.json" | |||||
GCLOUD_TESTS_GOLANG_KEYRING="projects/dulcet-port-762/locations/us/keyRings/go-integration-test" | |||||
./run-tests.sh $TRAVIS_COMMIT | |||||
env: | |||||
matrix: | |||||
# The GCLOUD_TESTS_API_KEY environment variable. | |||||
secure: VdldogUOoubQ60LhuHJ+g/aJoBiujkSkWEWl79Zb8cvQorcQbxISS+JsOOp4QkUOU4WwaHAm8/3pIH1QMWOR6O78DaLmDKi5Q4RpkVdCpUXy+OAfQaZIcBsispMrjxLXnqFjo9ELnrArfjoeCTzaX0QTCfwQwVmigC8rR30JBKI= |
@@ -0,0 +1,15 @@ | |||||
# This is the official list of cloud authors for copyright purposes. | |||||
# This file is distinct from the CONTRIBUTORS files. | |||||
# See the latter for an explanation. | |||||
# Names should be added to this file as: | |||||
# Name or Organization <email address> | |||||
# The email address is not required for organizations. | |||||
Filippo Valsorda <hi@filippo.io> | |||||
Google Inc. | |||||
Ingo Oeser <nightlyone@googlemail.com> | |||||
Palm Stone Games, Inc. | |||||
Paweł Knap <pawelknap88@gmail.com> | |||||
Péter Szilágyi <peterke@gmail.com> | |||||
Tyler Treat <ttreat31@gmail.com> |
@@ -0,0 +1,167 @@ | |||||
# Contributing | |||||
1. Sign one of the contributor license agreements below. | |||||
1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool. | |||||
1. You will need to ensure that your `GOBIN` directory (by default | |||||
`$GOPATH/bin`) is in your `PATH` so that git can find the command. | |||||
1. If you would like, you may want to set up aliases for git-codereview, | |||||
such that `git codereview change` becomes `git change`. See the | |||||
[godoc](https://godoc.org/golang.org/x/review/git-codereview) for details. | |||||
1. Should you run into issues with the git-codereview tool, please note | |||||
that all error messages will assume that you have set up these | |||||
aliases. | |||||
1. Get the cloud package by running `go get -d cloud.google.com/go`. | |||||
1. If you have already checked out the source, make sure that the remote git | |||||
origin is https://code.googlesource.com/gocloud: | |||||
git remote set-url origin https://code.googlesource.com/gocloud | |||||
1. Make sure your auth is configured correctly by visiting | |||||
https://code.googlesource.com, clicking "Generate Password", and following | |||||
the directions. | |||||
1. Make changes and create a change by running `git codereview change <name>`, | |||||
provide a commit message, and use `git codereview mail` to create a Gerrit CL. | |||||
1. Keep amending to the change with `git codereview change` and mail as your receive | |||||
feedback. Each new mailed amendment will create a new patch set for your change in Gerrit. | |||||
## Integration Tests | |||||
In addition to the unit tests, you may run the integration test suite. | |||||
To run the integrations tests, creating and configuration of a project in the | |||||
Google Developers Console is required. | |||||
After creating a project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount). | |||||
Ensure the project-level **Owner** | |||||
[IAM role](console.cloud.google.com/iam-admin/iam/project) role is added to the | |||||
service account. Alternatively, the account can be granted all of the following roles: | |||||
- **Editor** | |||||
- **Logs Configuration Writer** | |||||
- **PubSub Admin** | |||||
Once you create a project, set the following environment variables to be able to | |||||
run the against the actual APIs. | |||||
- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455) | |||||
- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file. | |||||
Some packages require additional environment variables to be set: | |||||
- firestore | |||||
- **GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID**: project ID for Firestore. | |||||
- **GCLOUD_TESTS_GOLANG_FIRESTORE_KEY**: The path to the JSON key file. | |||||
- storage | |||||
- **GCLOUD_TESTS_GOLANG_KEYRING**: The full name of the keyring for the tests, in the | |||||
form "projects/P/locations/L/keyRings/R". | |||||
- translate | |||||
- **GCLOUD_TESTS_API_KEY**: API key for using the Translate API. | |||||
- profiler | |||||
- **GCLOUD_TESTS_GOLANG_ZONE**: Compute Engine zone. | |||||
Install the [gcloud command-line tool][gcloudcli] to your machine and use it | |||||
to create some resources used in integration tests. | |||||
From the project's root directory: | |||||
``` sh | |||||
# Set the default project in your env. | |||||
$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID | |||||
# Authenticate the gcloud tool with your account. | |||||
$ gcloud auth login | |||||
# Create the indexes used in the datastore integration tests. | |||||
$ gcloud preview datastore create-indexes datastore/testdata/index.yaml | |||||
# Create a Google Cloud storage bucket with the same name as your test project, | |||||
# and with the Stackdriver Logging service account as owner, for the sink | |||||
# integration tests in logging. | |||||
$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID | |||||
$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID | |||||
# Create a PubSub topic for integration tests of storage notifications. | |||||
$ gcloud beta pubsub topics create go-storage-notification-test | |||||
# Create a Spanner instance for the spanner integration tests. | |||||
$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 1 --description 'Instance for go client test' | |||||
# NOTE: Spanner instances are priced by the node-hour, so you may want to delete | |||||
# the instance after testing with 'gcloud beta spanner instances delete'. | |||||
# For Storage integration tests: | |||||
# Enable KMS for your project in the Cloud Console. | |||||
# Create a KMS keyring, in the same location as the default location for your project's buckets. | |||||
$ gcloud kms keyrings create MY_KEYRING --location MY_LOCATION | |||||
# Create two keys in the keyring, named key1 and key2. | |||||
$ gcloud kms keys create key1 --keyring MY_KEYRING --location MY_LOCATION --purpose encryption | |||||
$ gcloud kms keys create key2 --keyring MY_KEYRING --location MY_LOCATION --purpose encryption | |||||
# As mentioned above, set the GCLOUD_TESTS_GOLANG_KEYRING environment variable. | |||||
$ export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/MY_LOCATION/keyRings/MY_KEYRING | |||||
# Authorize Google Cloud Storage to encrypt and decrypt using key1. | |||||
gsutil kms authorize -p $GCLOUD_TESTS_GOLANG_PROJECT_ID -k $GCLOUD_TESTS_GOLANG_KEYRING/cryptoKeys/key1 | |||||
``` | |||||
Once you've done the necessary setup, you can run the integration tests by running: | |||||
``` sh | |||||
$ go test -v cloud.google.com/go/... | |||||
``` | |||||
## Contributor License Agreements | |||||
Before we can accept your pull requests you'll need to sign a Contributor | |||||
License Agreement (CLA): | |||||
- **If you are an individual writing original source code** and **you own the | |||||
intellectual property**, then you'll need to sign an [individual CLA][indvcla]. | |||||
- **If you work for a company that wants to allow you to contribute your | |||||
work**, then you'll need to sign a [corporate CLA][corpcla]. | |||||
You can sign these electronically (just scroll to the bottom). After that, | |||||
we'll be able to accept your pull requests. | |||||
## Contributor Code of Conduct | |||||
As contributors and maintainers of this project, | |||||
and in the interest of fostering an open and welcoming community, | |||||
we pledge to respect all people who contribute through reporting issues, | |||||
posting feature requests, updating documentation, | |||||
submitting pull requests or patches, and other activities. | |||||
We are committed to making participation in this project | |||||
a harassment-free experience for everyone, | |||||
regardless of level of experience, gender, gender identity and expression, | |||||
sexual orientation, disability, personal appearance, | |||||
body size, race, ethnicity, age, religion, or nationality. | |||||
Examples of unacceptable behavior by participants include: | |||||
* The use of sexualized language or imagery | |||||
* Personal attacks | |||||
* Trolling or insulting/derogatory comments | |||||
* Public or private harassment | |||||
* Publishing other's private information, | |||||
such as physical or electronic | |||||
addresses, without explicit permission | |||||
* Other unethical or unprofessional conduct. | |||||
Project maintainers have the right and responsibility to remove, edit, or reject | |||||
comments, commits, code, wiki edits, issues, and other contributions | |||||
that are not aligned to this Code of Conduct. | |||||
By adopting this Code of Conduct, | |||||
project maintainers commit themselves to fairly and consistently | |||||
applying these principles to every aspect of managing this project. | |||||
Project maintainers who do not follow or enforce the Code of Conduct | |||||
may be permanently removed from the project team. | |||||
This code of conduct applies both within project spaces and in public spaces | |||||
when an individual is representing the project or its community. | |||||
Instances of abusive, harassing, or otherwise unacceptable behavior | |||||
may be reported by opening an issue | |||||
or contacting one or more of the project maintainers. | |||||
This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, | |||||
available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) | |||||
[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/ | |||||
[indvcla]: https://developers.google.com/open-source/cla/individual | |||||
[corpcla]: https://developers.google.com/open-source/cla/corporate |
@@ -0,0 +1,40 @@ | |||||
# People who have agreed to one of the CLAs and can contribute patches. | |||||
# The AUTHORS file lists the copyright holders; this file | |||||
# lists people. For example, Google employees are listed here | |||||
# but not in AUTHORS, because Google holds the copyright. | |||||
# | |||||
# https://developers.google.com/open-source/cla/individual | |||||
# https://developers.google.com/open-source/cla/corporate | |||||
# | |||||
# Names should be added to this file as: | |||||
# Name <email address> | |||||
# Keep the list alphabetically sorted. | |||||
Alexis Hunt <lexer@google.com> | |||||
Andreas Litt <andreas.litt@gmail.com> | |||||
Andrew Gerrand <adg@golang.org> | |||||
Brad Fitzpatrick <bradfitz@golang.org> | |||||
Burcu Dogan <jbd@google.com> | |||||
Dave Day <djd@golang.org> | |||||
David Sansome <me@davidsansome.com> | |||||
David Symonds <dsymonds@golang.org> | |||||
Filippo Valsorda <hi@filippo.io> | |||||
Glenn Lewis <gmlewis@google.com> | |||||
Ingo Oeser <nightlyone@googlemail.com> | |||||
James Hall <james.hall@shopify.com> | |||||
Johan Euphrosine <proppy@google.com> | |||||
Jonathan Amsterdam <jba@google.com> | |||||
Kunpei Sakai <namusyaka@gmail.com> | |||||
Luna Duclos <luna.duclos@palmstonegames.com> | |||||
Magnus Hiie <magnus.hiie@gmail.com> | |||||
Mario Castro <mariocaster@gmail.com> | |||||
Michael McGreevy <mcgreevy@golang.org> | |||||
Omar Jarjur <ojarjur@google.com> | |||||
Paweł Knap <pawelknap88@gmail.com> | |||||
Péter Szilágyi <peterke@gmail.com> | |||||
Sarah Adams <shadams@google.com> | |||||
Thanatat Tamtan <acoshift@gmail.com> | |||||
Toby Burress <kurin@google.com> | |||||
Tuo Shan <shantuo@google.com> | |||||
Tyler Treat <ttreat31@gmail.com> |
@@ -0,0 +1,202 @@ | |||||
Apache License | |||||
Version 2.0, January 2004 | |||||
http://www.apache.org/licenses/ | |||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | |||||
1. Definitions. | |||||
"License" shall mean the terms and conditions for use, reproduction, | |||||
and distribution as defined by Sections 1 through 9 of this document. | |||||
"Licensor" shall mean the copyright owner or entity authorized by | |||||
the copyright owner that is granting the License. | |||||
"Legal Entity" shall mean the union of the acting entity and all | |||||
other entities that control, are controlled by, or are under common | |||||
control with that entity. For the purposes of this definition, | |||||
"control" means (i) the power, direct or indirect, to cause the | |||||
direction or management of such entity, whether by contract or | |||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the | |||||
outstanding shares, or (iii) beneficial ownership of such entity. | |||||
"You" (or "Your") shall mean an individual or Legal Entity | |||||
exercising permissions granted by this License. | |||||
"Source" form shall mean the preferred form for making modifications, | |||||
including but not limited to software source code, documentation | |||||
source, and configuration files. | |||||
"Object" form shall mean any form resulting from mechanical | |||||
transformation or translation of a Source form, including but | |||||
not limited to compiled object code, generated documentation, | |||||
and conversions to other media types. | |||||
"Work" shall mean the work of authorship, whether in Source or | |||||
Object form, made available under the License, as indicated by a | |||||
copyright notice that is included in or attached to the work | |||||
(an example is provided in the Appendix below). | |||||
"Derivative Works" shall mean any work, whether in Source or Object | |||||
form, that is based on (or derived from) the Work and for which the | |||||
editorial revisions, annotations, elaborations, or other modifications | |||||
represent, as a whole, an original work of authorship. For the purposes | |||||
of this License, Derivative Works shall not include works that remain | |||||
separable from, or merely link (or bind by name) to the interfaces of, | |||||
the Work and Derivative Works thereof. | |||||
"Contribution" shall mean any work of authorship, including | |||||
the original version of the Work and any modifications or additions | |||||
to that Work or Derivative Works thereof, that is intentionally | |||||
submitted to Licensor for inclusion in the Work by the copyright owner | |||||
or by an individual or Legal Entity authorized to submit on behalf of | |||||
the copyright owner. For the purposes of this definition, "submitted" | |||||
means any form of electronic, verbal, or written communication sent | |||||
to the Licensor or its representatives, including but not limited to | |||||
communication on electronic mailing lists, source code control systems, | |||||
and issue tracking systems that are managed by, or on behalf of, the | |||||
Licensor for the purpose of discussing and improving the Work, but | |||||
excluding communication that is conspicuously marked or otherwise | |||||
designated in writing by the copyright owner as "Not a Contribution." | |||||
"Contributor" shall mean Licensor and any individual or Legal Entity | |||||
on behalf of whom a Contribution has been received by Licensor and | |||||
subsequently incorporated within the Work. | |||||
2. Grant of Copyright License. Subject to the terms and conditions of | |||||
this License, each Contributor hereby grants to You a perpetual, | |||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||||
copyright license to reproduce, prepare Derivative Works of, | |||||
publicly display, publicly perform, sublicense, and distribute the | |||||
Work and such Derivative Works in Source or Object form. | |||||
3. Grant of Patent License. Subject to the terms and conditions of | |||||
this License, each Contributor hereby grants to You a perpetual, | |||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||||
(except as stated in this section) patent license to make, have made, | |||||
use, offer to sell, sell, import, and otherwise transfer the Work, | |||||
where such license applies only to those patent claims licensable | |||||
by such Contributor that are necessarily infringed by their | |||||
Contribution(s) alone or by combination of their Contribution(s) | |||||
with the Work to which such Contribution(s) was submitted. If You | |||||
institute patent litigation against any entity (including a | |||||
cross-claim or counterclaim in a lawsuit) alleging that the Work | |||||
or a Contribution incorporated within the Work constitutes direct | |||||
or contributory patent infringement, then any patent licenses | |||||
granted to You under this License for that Work shall terminate | |||||
as of the date such litigation is filed. | |||||
4. Redistribution. You may reproduce and distribute copies of the | |||||
Work or Derivative Works thereof in any medium, with or without | |||||
modifications, and in Source or Object form, provided that You | |||||
meet the following conditions: | |||||
(a) You must give any other recipients of the Work or | |||||
Derivative Works a copy of this License; and | |||||
(b) You must cause any modified files to carry prominent notices | |||||
stating that You changed the files; and | |||||
(c) You must retain, in the Source form of any Derivative Works | |||||
that You distribute, all copyright, patent, trademark, and | |||||
attribution notices from the Source form of the Work, | |||||
excluding those notices that do not pertain to any part of | |||||
the Derivative Works; and | |||||
(d) If the Work includes a "NOTICE" text file as part of its | |||||
distribution, then any Derivative Works that You distribute must | |||||
include a readable copy of the attribution notices contained | |||||
within such NOTICE file, excluding those notices that do not | |||||
pertain to any part of the Derivative Works, in at least one | |||||
of the following places: within a NOTICE text file distributed | |||||
as part of the Derivative Works; within the Source form or | |||||
documentation, if provided along with the Derivative Works; or, | |||||
within a display generated by the Derivative Works, if and | |||||
wherever such third-party notices normally appear. The contents | |||||
of the NOTICE file are for informational purposes only and | |||||
do not modify the License. You may add Your own attribution | |||||
notices within Derivative Works that You distribute, alongside | |||||
or as an addendum to the NOTICE text from the Work, provided | |||||
that such additional attribution notices cannot be construed | |||||
as modifying the License. | |||||
You may add Your own copyright statement to Your modifications and | |||||
may provide additional or different license terms and conditions | |||||
for use, reproduction, or distribution of Your modifications, or | |||||
for any such Derivative Works as a whole, provided Your use, | |||||
reproduction, and distribution of the Work otherwise complies with | |||||
the conditions stated in this License. | |||||
5. Submission of Contributions. Unless You explicitly state otherwise, | |||||
any Contribution intentionally submitted for inclusion in the Work | |||||
by You to the Licensor shall be under the terms and conditions of | |||||
this License, without any additional terms or conditions. | |||||
Notwithstanding the above, nothing herein shall supersede or modify | |||||
the terms of any separate license agreement you may have executed | |||||
with Licensor regarding such Contributions. | |||||
6. Trademarks. This License does not grant permission to use the trade | |||||
names, trademarks, service marks, or product names of the Licensor, | |||||
except as required for reasonable and customary use in describing the | |||||
origin of the Work and reproducing the content of the NOTICE file. | |||||
7. Disclaimer of Warranty. Unless required by applicable law or | |||||
agreed to in writing, Licensor provides the Work (and each | |||||
Contributor provides its Contributions) on an "AS IS" BASIS, | |||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | |||||
implied, including, without limitation, any warranties or conditions | |||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | |||||
PARTICULAR PURPOSE. You are solely responsible for determining the | |||||
appropriateness of using or redistributing the Work and assume any | |||||
risks associated with Your exercise of permissions under this License. | |||||
8. Limitation of Liability. In no event and under no legal theory, | |||||
whether in tort (including negligence), contract, or otherwise, | |||||
unless required by applicable law (such as deliberate and grossly | |||||
negligent acts) or agreed to in writing, shall any Contributor be | |||||
liable to You for damages, including any direct, indirect, special, | |||||
incidental, or consequential damages of any character arising as a | |||||
result of this License or out of the use or inability to use the | |||||
Work (including but not limited to damages for loss of goodwill, | |||||
work stoppage, computer failure or malfunction, or any and all | |||||
other commercial damages or losses), even if such Contributor | |||||
has been advised of the possibility of such damages. | |||||
9. Accepting Warranty or Additional Liability. While redistributing | |||||
the Work or Derivative Works thereof, You may choose to offer, | |||||
and charge a fee for, acceptance of support, warranty, indemnity, | |||||
or other liability obligations and/or rights consistent with this | |||||
License. However, in accepting such obligations, You may act only | |||||
on Your own behalf and on Your sole responsibility, not on behalf | |||||
of any other Contributor, and only if You agree to indemnify, | |||||
defend, and hold each Contributor harmless for any liability | |||||
incurred by, or claims asserted against, such Contributor by reason | |||||
of your accepting any such warranty or additional liability. | |||||
END OF TERMS AND CONDITIONS | |||||
APPENDIX: How to apply the Apache License to your work. | |||||
To apply the Apache License to your work, attach the following | |||||
boilerplate notice, with the fields enclosed by brackets "[]" | |||||
replaced with your own identifying information. (Don't include | |||||
the brackets!) The text should be enclosed in the appropriate | |||||
comment syntax for the file format. We also recommend that a | |||||
file or class name and description of purpose be included on the | |||||
same "printed page" as the copyright notice for easier | |||||
identification within third-party archives. | |||||
Copyright [yyyy] [name of copyright owner] | |||||
Licensed under the Apache License, Version 2.0 (the "License"); | |||||
you may not use this file except in compliance with the License. | |||||
You may obtain a copy of the License at | |||||
http://www.apache.org/licenses/LICENSE-2.0 | |||||
Unless required by applicable law or agreed to in writing, software | |||||
distributed under the License is distributed on an "AS IS" BASIS, | |||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
See the License for the specific language governing permissions and | |||||
limitations under the License. |
@@ -0,0 +1,54 @@ | |||||
# Code Changes | |||||
## v0.10.0 | |||||
- pubsub: Replace | |||||
``` | |||||
sub.ModifyPushConfig(ctx, pubsub.PushConfig{Endpoint: "https://example.com/push"}) | |||||
``` | |||||
with | |||||
``` | |||||
sub.Update(ctx, pubsub.SubscriptionConfigToUpdate{ | |||||
PushConfig: &pubsub.PushConfig{Endpoint: "https://example.com/push"}, | |||||
}) | |||||
``` | |||||
- trace: traceGRPCServerInterceptor will be provided from *trace.Client. | |||||
Given an initialized `*trace.Client` named `tc`, instead of | |||||
``` | |||||
s := grpc.NewServer(grpc.UnaryInterceptor(trace.GRPCServerInterceptor(tc))) | |||||
``` | |||||
write | |||||
``` | |||||
s := grpc.NewServer(grpc.UnaryInterceptor(tc.GRPCServerInterceptor())) | |||||
``` | |||||
- trace trace.GRPCClientInterceptor will also provided from *trace.Client. | |||||
Instead of | |||||
``` | |||||
conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(trace.GRPCClientInterceptor())) | |||||
``` | |||||
write | |||||
``` | |||||
conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor())) | |||||
``` | |||||
- trace: We removed the deprecated `trace.EnableGRPCTracing`. Use the gRPC | |||||
interceptor as a dial option as shown below when initializing Cloud package | |||||
clients: | |||||
``` | |||||
c, err := pubsub.NewClient(ctx, "project-id", option.WithGRPCDialOption(grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor()))) | |||||
if err != nil { | |||||
... | |||||
} | |||||
``` |
@@ -0,0 +1,660 @@ | |||||
# Google Cloud Client Libraries for Go | |||||
[![GoDoc](https://godoc.org/cloud.google.com/go?status.svg)](https://godoc.org/cloud.google.com/go) | |||||
Go packages for [Google Cloud Platform](https://cloud.google.com) services. | |||||
``` go | |||||
import "cloud.google.com/go" | |||||
``` | |||||
To install the packages on your system, *do not clone the repo*. Instead use | |||||
``` | |||||
$ go get -u cloud.google.com/go/... | |||||
``` | |||||
**NOTE:** Some of these packages are under development, and may occasionally | |||||
make backwards-incompatible changes. | |||||
**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud). | |||||
* [News](#news) | |||||
* [Supported APIs](#supported-apis) | |||||
* [Go Versions Supported](#go-versions-supported) | |||||
* [Authorization](#authorization) | |||||
* [Cloud Datastore](#cloud-datastore-) | |||||
* [Cloud Storage](#cloud-storage-) | |||||
* [Cloud Pub/Sub](#cloud-pub-sub-) | |||||
* [Cloud BigQuery](#cloud-bigquery-) | |||||
* [Stackdriver Logging](#stackdriver-logging-) | |||||
* [Cloud Spanner](#cloud-spanner-) | |||||
## News | |||||
_May 18, 2018_ | |||||
*v0.23.0* | |||||
- bigquery: Add DDL stats to query statistics. | |||||
- bigtable: | |||||
- cbt: Add cells-per-column limit for row lookup. | |||||
- cbt: Make it possible to combine read filters. | |||||
- dlp: v2beta2 client removed. Use the v2 client instead. | |||||
- firestore, spanner: Fix compilation errors due to protobuf changes. | |||||
_May 8, 2018_ | |||||
*v0.22.0* | |||||
- bigtable: | |||||
- cbt: Support cells per column limit for row read. | |||||
- bttest: Correctly handle empty RowSet. | |||||
- Fix ReadModifyWrite operation in emulator. | |||||
- Fix API path in GetCluster. | |||||
- bigquery: | |||||
- BEHAVIOR CHANGE: Retry on 503 status code. | |||||
- Add dataset.DeleteWithContents. | |||||
- Add SchemaUpdateOptions for query jobs. | |||||
- Add Timeline to QueryStatistics. | |||||
- Add more stats to ExplainQueryStage. | |||||
- Support Parquet data format. | |||||
- datastore: | |||||
- Support omitempty for times. | |||||
- dlp: | |||||
- **BREAKING CHANGE:** Remove v1beta1 client. Please migrate to the v2 client, | |||||
which is now out of beta. | |||||
- Add v2 client. | |||||
- firestore: | |||||
- BEHAVIOR CHANGE: Treat set({}, MergeAll) as valid. | |||||
- iam: | |||||
- Support JWT signing via SignJwt callopt. | |||||
- profiler: | |||||
- BEHAVIOR CHANGE: PollForSerialOutput returns an error when context.Done. | |||||
- BEHAVIOR CHANGE: Increase the initial backoff to 1 minute. | |||||
- Avoid returning empty serial port output. | |||||
- pubsub: | |||||
- BEHAVIOR CHANGE: Don't backoff during next retryable error once stream is healthy. | |||||
- BEHAVIOR CHANGE: Don't backoff on EOF. | |||||
- pstest: Support Acknowledge and ModifyAckDeadline RPCs. | |||||
- redis: | |||||
- Add v1 beta Redis client. | |||||
- spanner: | |||||
- Support SessionLabels. | |||||
- speech: | |||||
- Add api v1 beta1 client. | |||||
- storage: | |||||
- BEHAVIOR CHANGE: Retry reads when retryable error occurs. | |||||
- Fix delete of object in requester-pays bucket. | |||||
- Support KMS integration. | |||||
_April 9, 2018_ | |||||
*v0.21.0* | |||||
- bigquery: | |||||
- Add OpenCensus tracing. | |||||
- firestore: | |||||
- **BREAKING CHANGE:** If a document does not exist, return a DocumentSnapshot | |||||
whose Exists method returns false. DocumentRef.Get and Transaction.Get | |||||
return the non-nil DocumentSnapshot in addition to a NotFound error. | |||||
**DocumentRef.GetAll and Transaction.GetAll return a non-nil | |||||
DocumentSnapshot instead of nil.** | |||||
- Add DocumentIterator.Stop. **Call Stop whenever you are done with a | |||||
DocumentIterator.** | |||||
- Added Query.Snapshots and DocumentRef.Snapshots, which provide realtime | |||||
notification of updates. See https://cloud.google.com/firestore/docs/query-data/listen. | |||||
- Canceling an RPC now always returns a grpc.Status with codes.Canceled. | |||||
- spanner: | |||||
- Add `CommitTimestamp`, which supports inserting the commit timestamp of a | |||||
transaction into a column. | |||||
_March 22, 2018_ | |||||
*v0.20.0* | |||||
- bigquery: Support SchemaUpdateOptions for load jobs. | |||||
- bigtable: | |||||
- Add SampleRowKeys. | |||||
- cbt: Support union, intersection GCPolicy. | |||||
- Retry admin RPCS. | |||||
- Add trace spans to retries. | |||||
- datastore: Add OpenCensus tracing. | |||||
- firestore: | |||||
- Fix queries involving Null and NaN. | |||||
- Allow Timestamp protobuffers for time values. | |||||
- logging: Add a WriteTimeout option. | |||||
- spanner: Support Batch API. | |||||
- storage: Add OpenCensus tracing. | |||||
_February 26, 2018_ | |||||
*v0.19.0* | |||||
- bigquery: | |||||
- Support customer-managed encryption keys. | |||||
- bigtable: | |||||
- Improved emulator support. | |||||
- Support GetCluster. | |||||
- datastore: | |||||
- Add general mutations. | |||||
- Support pointer struct fields. | |||||
- Support transaction options. | |||||
- firestore: | |||||
- Add Transaction.GetAll. | |||||
- Support document cursors. | |||||
- logging: | |||||
- Support concurrent RPCs to the service. | |||||
- Support per-entry resources. | |||||
- profiler: | |||||
- Add config options to disable heap and thread profiling. | |||||
- Read the project ID from $GOOGLE_CLOUD_PROJECT when it's set. | |||||
- pubsub: | |||||
- BEHAVIOR CHANGE: Release flow control after ack/nack (instead of after the | |||||
callback returns). | |||||
- Add SubscriptionInProject. | |||||
- Add OpenCensus instrumentation for streaming pull. | |||||
- storage: | |||||
- Support CORS. | |||||
_January 18, 2018_ | |||||
*v0.18.0* | |||||
- bigquery: | |||||
- Marked stable. | |||||
- Schema inference of nullable fields supported. | |||||
- Added TimePartitioning to QueryConfig. | |||||
- firestore: Data provided to DocumentRef.Set with a Merge option can contain | |||||
Delete sentinels. | |||||
- logging: Clients can accept parent resources other than projects. | |||||
- pubsub: | |||||
- pubsub/pstest: A lighweight fake for pubsub. Experimental; feedback welcome. | |||||
- Support updating more subscription metadata: AckDeadline, | |||||
RetainAckedMessages and RetentionDuration. | |||||
- oslogin/apiv1beta: New client for the Cloud OS Login API. | |||||
- rpcreplay: A package for recording and replaying gRPC traffic. | |||||
- spanner: | |||||
- Add a ReadWithOptions that supports a row limit, as well as an index. | |||||
- Support query plan and execution statistics. | |||||
- Added [OpenCensus](http://opencensus.io) support. | |||||
- storage: Clarify checksum validation for gzipped files (it is not validated | |||||
when the file is served uncompressed). | |||||
_December 11, 2017_ | |||||
*v0.17.0* | |||||
- firestore BREAKING CHANGES: | |||||
- Remove UpdateMap and UpdateStruct; rename UpdatePaths to Update. | |||||
Change | |||||
`docref.UpdateMap(ctx, map[string]interface{}{"a.b", 1})` | |||||
to | |||||
`docref.Update(ctx, []firestore.Update{{Path: "a.b", Value: 1}})` | |||||
Change | |||||
`docref.UpdateStruct(ctx, []string{"Field"}, aStruct)` | |||||
to | |||||
`docref.Update(ctx, []firestore.Update{{Path: "Field", Value: aStruct.Field}})` | |||||
- Rename MergePaths to Merge; require args to be FieldPaths | |||||
- A value stored as an integer can be read into a floating-point field, and vice versa. | |||||
- bigtable/cmd/cbt: | |||||
- Support deleting a column. | |||||
- Add regex option for row read. | |||||
- spanner: Mark stable. | |||||
- storage: | |||||
- Add Reader.ContentEncoding method. | |||||
- Fix handling of SignedURL headers. | |||||
- bigquery: | |||||
- If Uploader.Put is called with no rows, it returns nil without making a | |||||
call. | |||||
- Schema inference supports the "nullable" option in struct tags for | |||||
non-required fields. | |||||
- TimePartitioning supports "Field". | |||||
[Older news](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/old-news.md) | |||||
## Supported APIs | |||||
Google API | Status | Package | |||||
---------------------------------|--------------|----------------------------------------------------------- | |||||
[BigQuery][cloud-bigquery] | stable | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref] | |||||
[Bigtable][cloud-bigtable] | stable | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref] | |||||
[Container][cloud-container] | alpha | [`cloud.google.com/go/container/apiv1`][cloud-container-ref] | |||||
[Data Loss Prevention][cloud-dlp]| alpha | [`cloud.google.com/go/dlp/apiv2beta1`][cloud-dlp-ref] | |||||
[Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`][cloud-datastore-ref] | |||||
[Debugger][cloud-debugger] | alpha | [`cloud.google.com/go/debugger/apiv2`][cloud-debugger-ref] | |||||
[ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errorreporting`][cloud-errors-ref] | |||||
[Firestore][cloud-firestore] | beta | [`cloud.google.com/go/firestore`][cloud-firestore-ref] | |||||
[Language][cloud-language] | stable | [`cloud.google.com/go/language/apiv1`][cloud-language-ref] | |||||
[Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`][cloud-logging-ref] | |||||
[Monitoring][cloud-monitoring] | beta | [`cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref] | |||||
[OS Login][cloud-oslogin] | alpha | [`cloud.google.com/compute/docs/oslogin/rest`][cloud-oslogin-ref] | |||||
[Pub/Sub][cloud-pubsub] | stable | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref] | |||||
[Spanner][cloud-spanner] | stable | [`cloud.google.com/go/spanner`][cloud-spanner-ref] | |||||
[Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref] | |||||
[Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`][cloud-storage-ref] | |||||
[Translation][cloud-translation] | stable | [`cloud.google.com/go/translate`][cloud-translation-ref] | |||||
[Video Intelligence][cloud-video]| beta | [`cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref] | |||||
[Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`][cloud-vision-ref] | |||||
> **Alpha status**: the API is still being actively developed. As a | |||||
> result, it might change in backward-incompatible ways and is not recommended | |||||
> for production use. | |||||
> | |||||
> **Beta status**: the API is largely complete, but still has outstanding | |||||
> features and bugs to be addressed. There may be minor backwards-incompatible | |||||
> changes where necessary. | |||||
> | |||||
> **Stable status**: the API is mature and ready for production use. We will | |||||
> continue addressing bugs and feature requests. | |||||
Documentation and examples are available at | |||||
https://godoc.org/cloud.google.com/go | |||||
Visit or join the | |||||
[google-api-go-announce group](https://groups.google.com/forum/#!forum/google-api-go-announce) | |||||
for updates on these packages. | |||||
## Go Versions Supported | |||||
We support the two most recent major versions of Go. If Google App Engine uses | |||||
an older version, we support that as well. You can see which versions are | |||||
currently supported by looking at the lines following `go:` in | |||||
[`.travis.yml`](.travis.yml). | |||||
## Authorization | |||||
By default, each API will use [Google Application Default Credentials][default-creds] | |||||
for authorization credentials used in calling the API endpoints. This will allow your | |||||
application to run in many environments without requiring explicit configuration. | |||||
[snip]:# (auth) | |||||
```go | |||||
client, err := storage.NewClient(ctx) | |||||
``` | |||||
To authorize using a | |||||
[JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys), | |||||
pass | |||||
[`option.WithServiceAccountFile`](https://godoc.org/google.golang.org/api/option#WithServiceAccountFile) | |||||
to the `NewClient` function of the desired package. For example: | |||||
[snip]:# (auth-JSON) | |||||
```go | |||||
client, err := storage.NewClient(ctx, option.WithServiceAccountFile("path/to/keyfile.json")) | |||||
``` | |||||
You can exert more control over authorization by using the | |||||
[`golang.org/x/oauth2`](https://godoc.org/golang.org/x/oauth2) package to | |||||
create an `oauth2.TokenSource`. Then pass | |||||
[`option.WithTokenSource`](https://godoc.org/google.golang.org/api/option#WithTokenSource) | |||||
to the `NewClient` function: | |||||
[snip]:# (auth-ts) | |||||
```go | |||||
tokenSource := ... | |||||
client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource)) | |||||
``` | |||||
## Cloud Datastore [![GoDoc](https://godoc.org/cloud.google.com/go/datastore?status.svg)](https://godoc.org/cloud.google.com/go/datastore) | |||||
- [About Cloud Datastore][cloud-datastore] | |||||
- [Activating the API for your project][cloud-datastore-activation] | |||||
- [API documentation][cloud-datastore-docs] | |||||
- [Go client documentation](https://godoc.org/cloud.google.com/go/datastore) | |||||
- [Complete sample program](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/datastore/tasks) | |||||
### Example Usage | |||||
First create a `datastore.Client` to use throughout your application: | |||||
[snip]:# (datastore-1) | |||||
```go | |||||
client, err := datastore.NewClient(ctx, "my-project-id") | |||||
if err != nil { | |||||
log.Fatal(err) | |||||
} | |||||
``` | |||||
Then use that client to interact with the API: | |||||
[snip]:# (datastore-2) | |||||
```go | |||||
type Post struct { | |||||
Title string | |||||
Body string `datastore:",noindex"` | |||||
PublishedAt time.Time | |||||
} | |||||
keys := []*datastore.Key{ | |||||
datastore.NameKey("Post", "post1", nil), | |||||
datastore.NameKey("Post", "post2", nil), | |||||
} | |||||
posts := []*Post{ | |||||
{Title: "Post 1", Body: "...", PublishedAt: time.Now()}, | |||||
{Title: "Post 2", Body: "...", PublishedAt: time.Now()}, | |||||
} | |||||
if _, err := client.PutMulti(ctx, keys, posts); err != nil { | |||||
log.Fatal(err) | |||||
} | |||||
``` | |||||
## Cloud Storage [![GoDoc](https://godoc.org/cloud.google.com/go/storage?status.svg)](https://godoc.org/cloud.google.com/go/storage) | |||||
- [About Cloud Storage][cloud-storage] | |||||
- [API documentation][cloud-storage-docs] | |||||
- [Go client documentation](https://godoc.org/cloud.google.com/go/storage) | |||||
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/storage) | |||||
### Example Usage | |||||
First create a `storage.Client` to use throughout your application: | |||||
[snip]:# (storage-1) | |||||
```go | |||||
client, err := storage.NewClient(ctx) | |||||
if err != nil { | |||||
log.Fatal(err) | |||||
} | |||||
``` | |||||
[snip]:# (storage-2) | |||||
```go | |||||
// Read the object1 from bucket. | |||||
rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx) | |||||
if err != nil { | |||||
log.Fatal(err) | |||||
} | |||||
defer rc.Close() | |||||
body, err := ioutil.ReadAll(rc) | |||||
if err != nil { | |||||
log.Fatal(err) | |||||
} | |||||
``` | |||||
## Cloud Pub/Sub [![GoDoc](https://godoc.org/cloud.google.com/go/pubsub?status.svg)](https://godoc.org/cloud.google.com/go/pubsub) | |||||
- [About Cloud Pubsub][cloud-pubsub] | |||||
- [API documentation][cloud-pubsub-docs] | |||||
- [Go client documentation](https://godoc.org/cloud.google.com/go/pubsub) | |||||
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/pubsub) | |||||
### Example Usage | |||||
First create a `pubsub.Client` to use throughout your application: | |||||
[snip]:# (pubsub-1) | |||||
```go | |||||
client, err := pubsub.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
log.Fatal(err) | |||||
} | |||||
``` | |||||
Then use the client to publish and subscribe: | |||||
[snip]:# (pubsub-2) | |||||
```go | |||||
// Publish "hello world" on topic1. | |||||
topic := client.Topic("topic1") | |||||
res := topic.Publish(ctx, &pubsub.Message{ | |||||
Data: []byte("hello world"), | |||||
}) | |||||
// The publish happens asynchronously. | |||||
// Later, you can get the result from res: | |||||
... | |||||
msgID, err := res.Get(ctx) | |||||
if err != nil { | |||||
log.Fatal(err) | |||||
} | |||||
// Use a callback to receive messages via subscription1. | |||||
sub := client.Subscription("subscription1") | |||||
err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) { | |||||
fmt.Println(m.Data) | |||||
m.Ack() // Acknowledge that we've consumed the message. | |||||
}) | |||||
if err != nil { | |||||
log.Println(err) | |||||
} | |||||
``` | |||||
## Cloud BigQuery [![GoDoc](https://godoc.org/cloud.google.com/go/bigquery?status.svg)](https://godoc.org/cloud.google.com/go/bigquery) | |||||
- [About Cloud BigQuery][cloud-bigquery] | |||||
- [API documentation][cloud-bigquery-docs] | |||||
- [Go client documentation][cloud-bigquery-ref] | |||||
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/bigquery) | |||||
### Example Usage | |||||
First create a `bigquery.Client` to use throughout your application: | |||||
[snip]:# (bq-1) | |||||
```go | |||||
c, err := bigquery.NewClient(ctx, "my-project-ID") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
``` | |||||
Then use that client to interact with the API: | |||||
[snip]:# (bq-2) | |||||
```go | |||||
// Construct a query. | |||||
q := c.Query(` | |||||
SELECT year, SUM(number) | |||||
FROM [bigquery-public-data:usa_names.usa_1910_2013] | |||||
WHERE name = "William" | |||||
GROUP BY year | |||||
ORDER BY year | |||||
`) | |||||
// Execute the query. | |||||
it, err := q.Read(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// Iterate through the results. | |||||
for { | |||||
var values []bigquery.Value | |||||
err := it.Next(&values) | |||||
if err == iterator.Done { | |||||
break | |||||
} | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
fmt.Println(values) | |||||
} | |||||
``` | |||||
## Stackdriver Logging [![GoDoc](https://godoc.org/cloud.google.com/go/logging?status.svg)](https://godoc.org/cloud.google.com/go/logging) | |||||
- [About Stackdriver Logging][cloud-logging] | |||||
- [API documentation][cloud-logging-docs] | |||||
- [Go client documentation][cloud-logging-ref] | |||||
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/logging) | |||||
### Example Usage | |||||
First create a `logging.Client` to use throughout your application: | |||||
[snip]:# (logging-1) | |||||
```go | |||||
ctx := context.Background() | |||||
client, err := logging.NewClient(ctx, "my-project") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
``` | |||||
Usually, you'll want to add log entries to a buffer to be periodically flushed | |||||
(automatically and asynchronously) to the Stackdriver Logging service. | |||||
[snip]:# (logging-2) | |||||
```go | |||||
logger := client.Logger("my-log") | |||||
logger.Log(logging.Entry{Payload: "something happened!"}) | |||||
``` | |||||
Close your client before your program exits, to flush any buffered log entries. | |||||
[snip]:# (logging-3) | |||||
```go | |||||
err = client.Close() | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
``` | |||||
## Cloud Spanner [![GoDoc](https://godoc.org/cloud.google.com/go/spanner?status.svg)](https://godoc.org/cloud.google.com/go/spanner) | |||||
- [About Cloud Spanner][cloud-spanner] | |||||
- [API documentation][cloud-spanner-docs] | |||||
- [Go client documentation](https://godoc.org/cloud.google.com/go/spanner) | |||||
### Example Usage | |||||
First create a `spanner.Client` to use throughout your application: | |||||
[snip]:# (spanner-1) | |||||
```go | |||||
client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D") | |||||
if err != nil { | |||||
log.Fatal(err) | |||||
} | |||||
``` | |||||
[snip]:# (spanner-2) | |||||
```go | |||||
// Simple Reads And Writes | |||||
_, err = client.Apply(ctx, []*spanner.Mutation{ | |||||
spanner.Insert("Users", | |||||
[]string{"name", "email"}, | |||||
[]interface{}{"alice", "a@example.com"})}) | |||||
if err != nil { | |||||
log.Fatal(err) | |||||
} | |||||
row, err := client.Single().ReadRow(ctx, "Users", | |||||
spanner.Key{"alice"}, []string{"email"}) | |||||
if err != nil { | |||||
log.Fatal(err) | |||||
} | |||||
``` | |||||
## Contributing | |||||
Contributions are welcome. Please, see the | |||||
[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md) | |||||
document for details. We're using Gerrit for our code reviews. Please don't open pull | |||||
requests against this repo, new pull requests will be automatically closed. | |||||
Please note that this project is released with a Contributor Code of Conduct. | |||||
By participating in this project you agree to abide by its terms. | |||||
See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md#contributor-code-of-conduct) | |||||
for more information. | |||||
[cloud-datastore]: https://cloud.google.com/datastore/ | |||||
[cloud-datastore-ref]: https://godoc.org/cloud.google.com/go/datastore | |||||
[cloud-datastore-docs]: https://cloud.google.com/datastore/docs | |||||
[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate | |||||
[cloud-firestore]: https://cloud.google.com/firestore/ | |||||
[cloud-firestore-ref]: https://godoc.org/cloud.google.com/go/firestore | |||||
[cloud-firestore-docs]: https://cloud.google.com/firestore/docs | |||||
[cloud-firestore-activation]: https://cloud.google.com/firestore/docs/activate | |||||
[cloud-pubsub]: https://cloud.google.com/pubsub/ | |||||
[cloud-pubsub-ref]: https://godoc.org/cloud.google.com/go/pubsub | |||||
[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs | |||||
[cloud-storage]: https://cloud.google.com/storage/ | |||||
[cloud-storage-ref]: https://godoc.org/cloud.google.com/go/storage | |||||
[cloud-storage-docs]: https://cloud.google.com/storage/docs | |||||
[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets | |||||
[cloud-bigtable]: https://cloud.google.com/bigtable/ | |||||
[cloud-bigtable-ref]: https://godoc.org/cloud.google.com/go/bigtable | |||||
[cloud-bigquery]: https://cloud.google.com/bigquery/ | |||||
[cloud-bigquery-docs]: https://cloud.google.com/bigquery/docs | |||||
[cloud-bigquery-ref]: https://godoc.org/cloud.google.com/go/bigquery | |||||
[cloud-logging]: https://cloud.google.com/logging/ | |||||
[cloud-logging-docs]: https://cloud.google.com/logging/docs | |||||
[cloud-logging-ref]: https://godoc.org/cloud.google.com/go/logging | |||||
[cloud-monitoring]: https://cloud.google.com/monitoring/ | |||||
[cloud-monitoring-ref]: https://godoc.org/cloud.google.com/go/monitoring/apiv3 | |||||
[cloud-vision]: https://cloud.google.com/vision | |||||
[cloud-vision-ref]: https://godoc.org/cloud.google.com/go/vision/apiv1 | |||||
[cloud-language]: https://cloud.google.com/natural-language | |||||
[cloud-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1 | |||||
[cloud-oslogin]: https://cloud.google.com/compute/docs/oslogin/rest | |||||
[cloud-oslogin-ref]: https://cloud.google.com/compute/docs/oslogin/rest | |||||
[cloud-speech]: https://cloud.google.com/speech | |||||
[cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1 | |||||
[cloud-spanner]: https://cloud.google.com/spanner/ | |||||
[cloud-spanner-ref]: https://godoc.org/cloud.google.com/go/spanner | |||||
[cloud-spanner-docs]: https://cloud.google.com/spanner/docs | |||||
[cloud-translation]: https://cloud.google.com/translation | |||||
[cloud-translation-ref]: https://godoc.org/cloud.google.com/go/translation | |||||
[cloud-video]: https://cloud.google.com/video-intelligence/ | |||||
[cloud-video-ref]: https://godoc.org/cloud.google.com/go/videointelligence/apiv1beta1 | |||||
[cloud-errors]: https://cloud.google.com/error-reporting/ | |||||
[cloud-errors-ref]: https://godoc.org/cloud.google.com/go/errorreporting | |||||
[cloud-container]: https://cloud.google.com/containers/ | |||||
[cloud-container-ref]: https://godoc.org/cloud.google.com/go/container/apiv1 | |||||
[cloud-debugger]: https://cloud.google.com/debugger/ | |||||
[cloud-debugger-ref]: https://godoc.org/cloud.google.com/go/debugger/apiv2 | |||||
[cloud-dlp]: https://cloud.google.com/dlp/ | |||||
[cloud-dlp-ref]: https://godoc.org/cloud.google.com/go/dlp/apiv2beta1 | |||||
[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials |
@@ -0,0 +1,13 @@ | |||||
# How to Release this Repo | |||||
1. Determine the current release version with `git tag -l`. It should look | |||||
something like `vX.Y.Z`. We'll call the current | |||||
version `$CV` and the new version `$NV`. | |||||
1. On master, run `git log $CV..` to list all the changes since the last | |||||
release. | |||||
1. Edit the News section of `README.md` to include a summary of the changes. | |||||
1. Mail the CL containing the `README.md` changes. When the CL is approved, submit it. | |||||
1. Without submitting any other CLs: | |||||
a. Switch to master. | |||||
b. Tag the repo with the next version: `git tag $NV`. | |||||
c. Push the tag: `git push origin $NV`. |
@@ -0,0 +1,32 @@ | |||||
# This file configures AppVeyor (http://www.appveyor.com), | |||||
# a Windows-based CI service similar to Travis. | |||||
# Identifier for this run | |||||
version: "{build}" | |||||
# Clone the repo into this path, which conforms to the standard | |||||
# Go workspace structure. | |||||
clone_folder: c:\gopath\src\cloud.google.com\go | |||||
environment: | |||||
GOPATH: c:\gopath | |||||
GCLOUD_TESTS_GOLANG_PROJECT_ID: dulcet-port-762 | |||||
GCLOUD_TESTS_GOLANG_KEY: c:\gopath\src\cloud.google.com\go\key.json | |||||
KEYFILE_CONTENTS: | |||||
secure: IvRbDAhM2PIQqzVkjzJ4FjizUvoQ+c3vG/qhJQG+HlZ/L5KEkqLu+x6WjLrExrNMyGku4znB2jmbTrUW3Ob4sGG+R5vvqeQ3YMHCVIkw5CxY+/bUDkW5RZWsVbuCnNa/vKsWmCP+/sZW6ICe29yKJ2ZOb6QaauI4s9R6j+cqBbU9pumMGYFRb0Rw3uUU7DKmVFCy+NjTENZIlDP9rmjANgAzigowJJEb2Tg9sLlQKmQeKiBSRN8lKc5Nq60a+fIzHGKvql4eIitDDDpOpyHv15/Xr1BzFw2yDoiR4X1lng0u7q0X9RgX4VIYa6gT16NXBEmQgbuX8gh7SfPMp9RhiZD9sVUaV+yogEabYpyPnmUURo0hXwkctKaBkQlEmKvjHwF5dvbg8+yqGhwtjAgFNimXG3INrwQsfQsZskkQWanutbJf9xy50GyWWFZZdi0uT4oXP/b5P7aklPXKXsvrJKBh7RjEaqBrhi86IJwOjBspvoR4l2WmcQyxb2xzQS1pjbBJFQfYJJ8+JgsstTL8PBO9d4ybJC0li1Om1qnWxkaewvPxxuoHJ9LpRKof19yRYWBmhTXb2tTASKG/zslvl4fgG4DmQBS93WC7dsiGOhAraGw2eCTgd0lYZOhk1FjWl9TS80aktXxzH/7nTvem5ohm+eDl6O0wnTL4KXjQVNSQ1PyLn4lGRJ5MNGzBTRFWIr2API2rca4Fysyfh/UdmazPGlNbY9JPGqb9+F04QzLfqm+Zz/cHy59E7lOSMBlUI4KD6d6ZNNKNRH+/g9i+fSiyiXKugTfda8KBnWGyPwprxuWGYaiQUGUYOwJY5R6x5c4mjImAB310V+Wo33UbWFJiwxEDsiCNqW1meVkBzt2er26vh4qbgCUIQ3iM3gFPfHgy+QxkmIhic7Q1HYacQElt8AAP41M7cCKWCuZidegP37MBB//mjjiNt047ZSQEvB4tqsX/OvfbByVef+cbtVw9T0yjHvmCdPW1XrhyrCCgclu6oYYdbmc5D7BBDRbjjMWGv6YvceAbfGf6ukdB5PuV+TGEN/FoQ1QTRA6Aqf+3fLMg4mS4oyTfw5xyYNbv3qoyLPrp+BnxI53WB9p0hfMg4n9FD6NntBxjDq+Q3Lk/bjC/Y4MaRWdzbMzF9a0lgGfcw9DURlK5p7uGJC9vg34feNoQprxVEZRQ01cHLeob6eGkYm4HxSRx8JY39Mh+9wzJo+k/aIvFleNC3e35NOrkXr6wb5e42n2DwBdPqdNolTLtLFRglAL1LTpp27UjvjieWJAKfoDTR5CKl01sZqt0wPdLLcvsMj6CiPFmccUIOYeZMe86kLBD61Qa5F1EwkgO3Om2qSjW96FzL4skRc+BmU5RrHlAFSldR1wpUgtkUMv9vH5Cy+UJdcvpZ8KbmhZ2PsjF7ddJ1ve9RAw3cP325AyIMwZ77Ef1mgTM0NJze6eSW1qKlEsgt1FADPyeUu1NQTA2H2dueMPGlArWTSUgyWR9AdfpqouT7eg0JWI5w+yUZZC+/rPglYbt84oLmYpwuli0z8FyEQRPIc3EtkfWIv/yYgDr2TZ0N2KvGfpi/MAUWgxI1gleC2uKgEOEtuJthd3XZjF2NoE7IBqjQOINybcJOjyeB5vRLDY1FLuxYzdg1y1etkV4XQig/vje | |||||
install: | |||||
# Info for debugging. | |||||
- echo %PATH% | |||||
- go version | |||||
- go env | |||||
- go get -v -d -t ./... | |||||
# Provide a build script, or AppVeyor will call msbuild. | |||||
build_script: | |||||
- go install -v ./... | |||||
- echo %KEYFILE_CONTENTS% > %GCLOUD_TESTS_GOLANG_KEY% | |||||
test_script: | |||||
- go test -v ./... |
@@ -0,0 +1,72 @@ | |||||
// Copyright 2016 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package cloud_test | |||||
import ( | |||||
"cloud.google.com/go/datastore" | |||||
"cloud.google.com/go/pubsub" | |||||
"golang.org/x/net/context" | |||||
"golang.org/x/oauth2/google" | |||||
"google.golang.org/api/option" | |||||
) | |||||
// Google Application Default Credentials is the recommended way to authorize | |||||
// and authenticate clients. | |||||
// | |||||
// For information on how to create and obtain Application Default Credentials, see | |||||
// https://developers.google.com/identity/protocols/application-default-credentials. | |||||
func Example_applicationDefaultCredentials() { | |||||
client, err := datastore.NewClient(context.Background(), "project-id") | |||||
if err != nil { | |||||
// TODO: handle error. | |||||
} | |||||
_ = client // Use the client. | |||||
} | |||||
// You can use a file with credentials to authenticate and authorize, such as a JSON | |||||
// key file associated with a Google service account. Service Account keys can be | |||||
// created and downloaded from | |||||
// https://console.developers.google.com/permissions/serviceaccounts. | |||||
// | |||||
// This example uses the Datastore client, but the same steps apply to | |||||
// the other client libraries underneath this package. | |||||
func Example_credentialsFile() { | |||||
client, err := datastore.NewClient(context.Background(), | |||||
"project-id", option.WithCredentialsFile("/path/to/service-account-key.json")) | |||||
if err != nil { | |||||
// TODO: handle error. | |||||
} | |||||
_ = client // Use the client. | |||||
} | |||||
// In some cases (for instance, you don't want to store secrets on disk), you can | |||||
// create credentials from in-memory JSON and use the WithCredentials option. | |||||
// | |||||
// The google package in this example is at golang.org/x/oauth2/google. | |||||
// | |||||
// This example uses the PubSub client, but the same steps apply to | |||||
// the other client libraries underneath this package. | |||||
func Example_credentialsFromJSON() { | |||||
ctx := context.Background() | |||||
creds, err := google.CredentialsFromJSON(ctx, []byte("JSON creds"), pubsub.ScopePubSub) | |||||
if err != nil { | |||||
// TODO: handle error. | |||||
} | |||||
client, err := pubsub.NewClient(ctx, "project-id", option.WithCredentials(creds)) | |||||
if err != nil { | |||||
// TODO: handle error. | |||||
} | |||||
_ = client // Use the client. | |||||
} |
@@ -0,0 +1,8 @@ | |||||
# BigQuery Benchmark | |||||
This directory contains benchmarks for BigQuery client. | |||||
## Usage | |||||
`go run bench.go -- <your project id> queries.json` | |||||
BigQuery service caches requests so the benchmark should be run | |||||
at least twice, disregarding the first result. |
@@ -0,0 +1,85 @@ | |||||
// Copyright 2017 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
//+build ignore | |||||
package main | |||||
import ( | |||||
"encoding/json" | |||||
"flag" | |||||
"io/ioutil" | |||||
"log" | |||||
"time" | |||||
"cloud.google.com/go/bigquery" | |||||
"golang.org/x/net/context" | |||||
"google.golang.org/api/iterator" | |||||
) | |||||
func main() { | |||||
flag.Parse() | |||||
ctx := context.Background() | |||||
c, err := bigquery.NewClient(ctx, flag.Arg(0)) | |||||
if err != nil { | |||||
log.Fatal(err) | |||||
} | |||||
queriesJSON, err := ioutil.ReadFile(flag.Arg(1)) | |||||
if err != nil { | |||||
log.Fatal(err) | |||||
} | |||||
var queries []string | |||||
if err := json.Unmarshal(queriesJSON, &queries); err != nil { | |||||
log.Fatal(err) | |||||
} | |||||
for _, q := range queries { | |||||
doQuery(ctx, c, q) | |||||
} | |||||
} | |||||
func doQuery(ctx context.Context, c *bigquery.Client, qt string) { | |||||
startTime := time.Now() | |||||
q := c.Query(qt) | |||||
it, err := q.Read(ctx) | |||||
if err != nil { | |||||
log.Fatal(err) | |||||
} | |||||
numRows, numCols := 0, 0 | |||||
var firstByte time.Duration | |||||
for { | |||||
var values []bigquery.Value | |||||
err := it.Next(&values) | |||||
if err == iterator.Done { | |||||
break | |||||
} | |||||
if err != nil { | |||||
log.Fatal(err) | |||||
} | |||||
if numRows == 0 { | |||||
numCols = len(values) | |||||
firstByte = time.Since(startTime) | |||||
} else if numCols != len(values) { | |||||
log.Fatalf("got %d columns, want %d", len(values), numCols) | |||||
} | |||||
numRows++ | |||||
} | |||||
log.Printf("query %q: %d rows, %d cols, first byte %f sec, total %f sec", | |||||
qt, numRows, numCols, firstByte.Seconds(), time.Since(startTime).Seconds()) | |||||
} |
@@ -0,0 +1,10 @@ | |||||
[ | |||||
"SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 10000", | |||||
"SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 100000", | |||||
"SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 1000000", | |||||
"SELECT title FROM `bigquery-public-data.samples.wikipedia` ORDER BY title LIMIT 1000", | |||||
"SELECT title, id, timestamp, contributor_ip FROM `bigquery-public-data.samples.wikipedia` WHERE title like 'Blo%' ORDER BY id", | |||||
"SELECT * FROM `bigquery-public-data.baseball.games_post_wide` ORDER BY gameId", | |||||
"SELECT * FROM `bigquery-public-data.samples.github_nested` WHERE repository.has_downloads ORDER BY repository.created_at LIMIT 10000", | |||||
"SELECT repo_name, path FROM `bigquery-public-data.github_repos.files` WHERE path LIKE '%.java' ORDER BY id LIMIT 1000000" | |||||
] |
@@ -0,0 +1,164 @@ | |||||
// Copyright 2015 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"fmt" | |||||
"io" | |||||
"net/http" | |||||
"time" | |||||
gax "github.com/googleapis/gax-go" | |||||
"cloud.google.com/go/internal" | |||||
"cloud.google.com/go/internal/version" | |||||
"google.golang.org/api/googleapi" | |||||
"google.golang.org/api/option" | |||||
htransport "google.golang.org/api/transport/http" | |||||
"golang.org/x/net/context" | |||||
bq "google.golang.org/api/bigquery/v2" | |||||
) | |||||
const ( | |||||
prodAddr = "https://www.googleapis.com/bigquery/v2/" | |||||
Scope = "https://www.googleapis.com/auth/bigquery" | |||||
userAgent = "gcloud-golang-bigquery/20160429" | |||||
) | |||||
var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo) | |||||
func setClientHeader(headers http.Header) { | |||||
headers.Set("x-goog-api-client", xGoogHeader) | |||||
} | |||||
// Client may be used to perform BigQuery operations. | |||||
type Client struct { | |||||
// Location, if set, will be used as the default location for all subsequent | |||||
// dataset creation and job operations. A location specified directly in one of | |||||
// those operations will override this value. | |||||
Location string | |||||
projectID string | |||||
bqs *bq.Service | |||||
} | |||||
// NewClient constructs a new Client which can perform BigQuery operations. | |||||
// Operations performed via the client are billed to the specified GCP project. | |||||
func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { | |||||
o := []option.ClientOption{ | |||||
option.WithEndpoint(prodAddr), | |||||
option.WithScopes(Scope), | |||||
option.WithUserAgent(userAgent), | |||||
} | |||||
o = append(o, opts...) | |||||
httpClient, endpoint, err := htransport.NewClient(ctx, o...) | |||||
if err != nil { | |||||
return nil, fmt.Errorf("bigquery: dialing: %v", err) | |||||
} | |||||
bqs, err := bq.New(httpClient) | |||||
if err != nil { | |||||
return nil, fmt.Errorf("bigquery: constructing client: %v", err) | |||||
} | |||||
bqs.BasePath = endpoint | |||||
c := &Client{ | |||||
projectID: projectID, | |||||
bqs: bqs, | |||||
} | |||||
return c, nil | |||||
} | |||||
// Close closes any resources held by the client. | |||||
// Close should be called when the client is no longer needed. | |||||
// It need not be called at program exit. | |||||
func (c *Client) Close() error { | |||||
return nil | |||||
} | |||||
// Calls the Jobs.Insert RPC and returns a Job. | |||||
func (c *Client) insertJob(ctx context.Context, job *bq.Job, media io.Reader) (*Job, error) { | |||||
call := c.bqs.Jobs.Insert(c.projectID, job).Context(ctx) | |||||
setClientHeader(call.Header()) | |||||
if media != nil { | |||||
call.Media(media) | |||||
} | |||||
var res *bq.Job | |||||
var err error | |||||
invoke := func() error { | |||||
res, err = call.Do() | |||||
return err | |||||
} | |||||
// A job with a client-generated ID can be retried; the presence of the | |||||
// ID makes the insert operation idempotent. | |||||
// We don't retry if there is media, because it is an io.Reader. We'd | |||||
// have to read the contents and keep it in memory, and that could be expensive. | |||||
// TODO(jba): Look into retrying if media != nil. | |||||
if job.JobReference != nil && media == nil { | |||||
err = runWithRetry(ctx, invoke) | |||||
} else { | |||||
err = invoke() | |||||
} | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return bqToJob(res, c) | |||||
} | |||||
// Convert a number of milliseconds since the Unix epoch to a time.Time. | |||||
// Treat an input of zero specially: convert it to the zero time, | |||||
// rather than the start of the epoch. | |||||
func unixMillisToTime(m int64) time.Time { | |||||
if m == 0 { | |||||
return time.Time{} | |||||
} | |||||
return time.Unix(0, m*1e6) | |||||
} | |||||
// runWithRetry calls the function until it returns nil or a non-retryable error, or | |||||
// the context is done. | |||||
// See the similar function in ../storage/invoke.go. The main difference is the | |||||
// reason for retrying. | |||||
func runWithRetry(ctx context.Context, call func() error) error { | |||||
// These parameters match the suggestions in https://cloud.google.com/bigquery/sla. | |||||
backoff := gax.Backoff{ | |||||
Initial: 1 * time.Second, | |||||
Max: 32 * time.Second, | |||||
Multiplier: 2, | |||||
} | |||||
return internal.Retry(ctx, backoff, func() (stop bool, err error) { | |||||
err = call() | |||||
if err == nil { | |||||
return true, nil | |||||
} | |||||
return !retryableError(err), err | |||||
}) | |||||
} | |||||
// This is the correct definition of retryable according to the BigQuery team. It | |||||
// also considers 502 ("Bad Gateway") and 503 ("Service Unavailable") errors | |||||
// retryable; these are returned by systems between the client and the BigQuery | |||||
// service. | |||||
func retryableError(err error) bool { | |||||
e, ok := err.(*googleapi.Error) | |||||
if !ok { | |||||
return false | |||||
} | |||||
var reason string | |||||
if len(e.Errors) > 0 { | |||||
reason = e.Errors[0].Reason | |||||
} | |||||
return e.Code == http.StatusServiceUnavailable || e.Code == http.StatusBadGateway || reason == "backendError" || reason == "rateLimitExceeded" | |||||
} |
@@ -0,0 +1,106 @@ | |||||
// Copyright 2016 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"golang.org/x/net/context" | |||||
bq "google.golang.org/api/bigquery/v2" | |||||
) | |||||
// CopyConfig holds the configuration for a copy job. | |||||
type CopyConfig struct { | |||||
// Srcs are the tables from which data will be copied. | |||||
Srcs []*Table | |||||
// Dst is the table into which the data will be copied. | |||||
Dst *Table | |||||
// CreateDisposition specifies the circumstances under which the destination table will be created. | |||||
// The default is CreateIfNeeded. | |||||
CreateDisposition TableCreateDisposition | |||||
// WriteDisposition specifies how existing data in the destination table is treated. | |||||
// The default is WriteEmpty. | |||||
WriteDisposition TableWriteDisposition | |||||
// The labels associated with this job. | |||||
Labels map[string]string | |||||
// Custom encryption configuration (e.g., Cloud KMS keys). | |||||
DestinationEncryptionConfig *EncryptionConfig | |||||
} | |||||
func (c *CopyConfig) toBQ() *bq.JobConfiguration { | |||||
var ts []*bq.TableReference | |||||
for _, t := range c.Srcs { | |||||
ts = append(ts, t.toBQ()) | |||||
} | |||||
return &bq.JobConfiguration{ | |||||
Labels: c.Labels, | |||||
Copy: &bq.JobConfigurationTableCopy{ | |||||
CreateDisposition: string(c.CreateDisposition), | |||||
WriteDisposition: string(c.WriteDisposition), | |||||
DestinationTable: c.Dst.toBQ(), | |||||
DestinationEncryptionConfiguration: c.DestinationEncryptionConfig.toBQ(), | |||||
SourceTables: ts, | |||||
}, | |||||
} | |||||
} | |||||
func bqToCopyConfig(q *bq.JobConfiguration, c *Client) *CopyConfig { | |||||
cc := &CopyConfig{ | |||||
Labels: q.Labels, | |||||
CreateDisposition: TableCreateDisposition(q.Copy.CreateDisposition), | |||||
WriteDisposition: TableWriteDisposition(q.Copy.WriteDisposition), | |||||
Dst: bqToTable(q.Copy.DestinationTable, c), | |||||
DestinationEncryptionConfig: bqToEncryptionConfig(q.Copy.DestinationEncryptionConfiguration), | |||||
} | |||||
for _, t := range q.Copy.SourceTables { | |||||
cc.Srcs = append(cc.Srcs, bqToTable(t, c)) | |||||
} | |||||
return cc | |||||
} | |||||
// A Copier copies data into a BigQuery table from one or more BigQuery tables. | |||||
type Copier struct { | |||||
JobIDConfig | |||||
CopyConfig | |||||
c *Client | |||||
} | |||||
// CopierFrom returns a Copier which can be used to copy data into a | |||||
// BigQuery table from one or more BigQuery tables. | |||||
// The returned Copier may optionally be further configured before its Run method is called. | |||||
func (t *Table) CopierFrom(srcs ...*Table) *Copier { | |||||
return &Copier{ | |||||
c: t.c, | |||||
CopyConfig: CopyConfig{ | |||||
Srcs: srcs, | |||||
Dst: t, | |||||
}, | |||||
} | |||||
} | |||||
// Run initiates a copy job. | |||||
func (c *Copier) Run(ctx context.Context) (*Job, error) { | |||||
return c.c.insertJob(ctx, c.newJob(), nil) | |||||
} | |||||
func (c *Copier) newJob() *bq.Job { | |||||
return &bq.Job{ | |||||
JobReference: c.JobIDConfig.createJobRef(c.c), | |||||
Configuration: c.CopyConfig.toBQ(), | |||||
} | |||||
} |
@@ -0,0 +1,165 @@ | |||||
// Copyright 2015 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"testing" | |||||
"github.com/google/go-cmp/cmp/cmpopts" | |||||
"cloud.google.com/go/internal/testutil" | |||||
bq "google.golang.org/api/bigquery/v2" | |||||
) | |||||
func defaultCopyJob() *bq.Job { | |||||
return &bq.Job{ | |||||
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"}, | |||||
Configuration: &bq.JobConfiguration{ | |||||
Copy: &bq.JobConfigurationTableCopy{ | |||||
DestinationTable: &bq.TableReference{ | |||||
ProjectId: "d-project-id", | |||||
DatasetId: "d-dataset-id", | |||||
TableId: "d-table-id", | |||||
}, | |||||
SourceTables: []*bq.TableReference{ | |||||
{ | |||||
ProjectId: "s-project-id", | |||||
DatasetId: "s-dataset-id", | |||||
TableId: "s-table-id", | |||||
}, | |||||
}, | |||||
}, | |||||
}, | |||||
} | |||||
} | |||||
func TestCopy(t *testing.T) { | |||||
defer fixRandomID("RANDOM")() | |||||
testCases := []struct { | |||||
dst *Table | |||||
srcs []*Table | |||||
jobID string | |||||
location string | |||||
config CopyConfig | |||||
want *bq.Job | |||||
}{ | |||||
{ | |||||
dst: &Table{ | |||||
ProjectID: "d-project-id", | |||||
DatasetID: "d-dataset-id", | |||||
TableID: "d-table-id", | |||||
}, | |||||
srcs: []*Table{ | |||||
{ | |||||
ProjectID: "s-project-id", | |||||
DatasetID: "s-dataset-id", | |||||
TableID: "s-table-id", | |||||
}, | |||||
}, | |||||
want: defaultCopyJob(), | |||||
}, | |||||
{ | |||||
dst: &Table{ | |||||
ProjectID: "d-project-id", | |||||
DatasetID: "d-dataset-id", | |||||
TableID: "d-table-id", | |||||
}, | |||||
srcs: []*Table{ | |||||
{ | |||||
ProjectID: "s-project-id", | |||||
DatasetID: "s-dataset-id", | |||||
TableID: "s-table-id", | |||||
}, | |||||
}, | |||||
config: CopyConfig{ | |||||
CreateDisposition: CreateNever, | |||||
WriteDisposition: WriteTruncate, | |||||
DestinationEncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"}, | |||||
Labels: map[string]string{"a": "b"}, | |||||
}, | |||||
want: func() *bq.Job { | |||||
j := defaultCopyJob() | |||||
j.Configuration.Labels = map[string]string{"a": "b"} | |||||
j.Configuration.Copy.CreateDisposition = "CREATE_NEVER" | |||||
j.Configuration.Copy.WriteDisposition = "WRITE_TRUNCATE" | |||||
j.Configuration.Copy.DestinationEncryptionConfiguration = &bq.EncryptionConfiguration{KmsKeyName: "keyName"} | |||||
return j | |||||
}(), | |||||
}, | |||||
{ | |||||
dst: &Table{ | |||||
ProjectID: "d-project-id", | |||||
DatasetID: "d-dataset-id", | |||||
TableID: "d-table-id", | |||||
}, | |||||
srcs: []*Table{ | |||||
{ | |||||
ProjectID: "s-project-id", | |||||
DatasetID: "s-dataset-id", | |||||
TableID: "s-table-id", | |||||
}, | |||||
}, | |||||
jobID: "job-id", | |||||
want: func() *bq.Job { | |||||
j := defaultCopyJob() | |||||
j.JobReference.JobId = "job-id" | |||||
return j | |||||
}(), | |||||
}, | |||||
{ | |||||
dst: &Table{ | |||||
ProjectID: "d-project-id", | |||||
DatasetID: "d-dataset-id", | |||||
TableID: "d-table-id", | |||||
}, | |||||
srcs: []*Table{ | |||||
{ | |||||
ProjectID: "s-project-id", | |||||
DatasetID: "s-dataset-id", | |||||
TableID: "s-table-id", | |||||
}, | |||||
}, | |||||
location: "asia-northeast1", | |||||
want: func() *bq.Job { | |||||
j := defaultCopyJob() | |||||
j.JobReference.Location = "asia-northeast1" | |||||
return j | |||||
}(), | |||||
}, | |||||
} | |||||
c := &Client{projectID: "client-project-id"} | |||||
for i, tc := range testCases { | |||||
tc.dst.c = c | |||||
copier := tc.dst.CopierFrom(tc.srcs...) | |||||
copier.JobID = tc.jobID | |||||
copier.Location = tc.location | |||||
tc.config.Srcs = tc.srcs | |||||
tc.config.Dst = tc.dst | |||||
copier.CopyConfig = tc.config | |||||
got := copier.newJob() | |||||
checkJob(t, i, got, tc.want) | |||||
jc, err := bqToJobConfig(got.Configuration, c) | |||||
if err != nil { | |||||
t.Fatalf("#%d: %v", i, err) | |||||
} | |||||
diff := testutil.Diff(jc.(*CopyConfig), &copier.CopyConfig, | |||||
cmpopts.IgnoreUnexported(Table{})) | |||||
if diff != "" { | |||||
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff) | |||||
} | |||||
} | |||||
} |
@@ -0,0 +1,530 @@ | |||||
// Copyright 2015 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"errors" | |||||
"fmt" | |||||
"time" | |||||
"cloud.google.com/go/internal/optional" | |||||
"cloud.google.com/go/internal/trace" | |||||
"golang.org/x/net/context" | |||||
bq "google.golang.org/api/bigquery/v2" | |||||
"google.golang.org/api/iterator" | |||||
) | |||||
// Dataset is a reference to a BigQuery dataset. | |||||
type Dataset struct { | |||||
ProjectID string | |||||
DatasetID string | |||||
c *Client | |||||
} | |||||
// DatasetMetadata contains information about a BigQuery dataset. | |||||
type DatasetMetadata struct { | |||||
// These fields can be set when creating a dataset. | |||||
Name string // The user-friendly name for this dataset. | |||||
Description string // The user-friendly description of this dataset. | |||||
Location string // The geo location of the dataset. | |||||
DefaultTableExpiration time.Duration // The default expiration time for new tables. | |||||
Labels map[string]string // User-provided labels. | |||||
Access []*AccessEntry // Access permissions. | |||||
// These fields are read-only. | |||||
CreationTime time.Time | |||||
LastModifiedTime time.Time // When the dataset or any of its tables were modified. | |||||
FullID string // The full dataset ID in the form projectID:datasetID. | |||||
// ETag is the ETag obtained when reading metadata. Pass it to Dataset.Update to | |||||
// ensure that the metadata hasn't changed since it was read. | |||||
ETag string | |||||
} | |||||
// DatasetMetadataToUpdate is used when updating a dataset's metadata. | |||||
// Only non-nil fields will be updated. | |||||
type DatasetMetadataToUpdate struct { | |||||
Description optional.String // The user-friendly description of this table. | |||||
Name optional.String // The user-friendly name for this dataset. | |||||
// DefaultTableExpiration is the the default expiration time for new tables. | |||||
// If set to time.Duration(0), new tables never expire. | |||||
DefaultTableExpiration optional.Duration | |||||
// The entire access list. It is not possible to replace individual entries. | |||||
Access []*AccessEntry | |||||
labelUpdater | |||||
} | |||||
// Dataset creates a handle to a BigQuery dataset in the client's project. | |||||
func (c *Client) Dataset(id string) *Dataset { | |||||
return c.DatasetInProject(c.projectID, id) | |||||
} | |||||
// DatasetInProject creates a handle to a BigQuery dataset in the specified project. | |||||
func (c *Client) DatasetInProject(projectID, datasetID string) *Dataset { | |||||
return &Dataset{ | |||||
ProjectID: projectID, | |||||
DatasetID: datasetID, | |||||
c: c, | |||||
} | |||||
} | |||||
// Create creates a dataset in the BigQuery service. An error will be returned if the | |||||
// dataset already exists. Pass in a DatasetMetadata value to configure the dataset. | |||||
func (d *Dataset) Create(ctx context.Context, md *DatasetMetadata) (err error) { | |||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Create") | |||||
defer func() { trace.EndSpan(ctx, err) }() | |||||
ds, err := md.toBQ() | |||||
if err != nil { | |||||
return err | |||||
} | |||||
ds.DatasetReference = &bq.DatasetReference{DatasetId: d.DatasetID} | |||||
// Use Client.Location as a default. | |||||
if ds.Location == "" { | |||||
ds.Location = d.c.Location | |||||
} | |||||
call := d.c.bqs.Datasets.Insert(d.ProjectID, ds).Context(ctx) | |||||
setClientHeader(call.Header()) | |||||
_, err = call.Do() | |||||
return err | |||||
} | |||||
func (dm *DatasetMetadata) toBQ() (*bq.Dataset, error) { | |||||
ds := &bq.Dataset{} | |||||
if dm == nil { | |||||
return ds, nil | |||||
} | |||||
ds.FriendlyName = dm.Name | |||||
ds.Description = dm.Description | |||||
ds.Location = dm.Location | |||||
ds.DefaultTableExpirationMs = int64(dm.DefaultTableExpiration / time.Millisecond) | |||||
ds.Labels = dm.Labels | |||||
var err error | |||||
ds.Access, err = accessListToBQ(dm.Access) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
if !dm.CreationTime.IsZero() { | |||||
return nil, errors.New("bigquery: Dataset.CreationTime is not writable") | |||||
} | |||||
if !dm.LastModifiedTime.IsZero() { | |||||
return nil, errors.New("bigquery: Dataset.LastModifiedTime is not writable") | |||||
} | |||||
if dm.FullID != "" { | |||||
return nil, errors.New("bigquery: Dataset.FullID is not writable") | |||||
} | |||||
if dm.ETag != "" { | |||||
return nil, errors.New("bigquery: Dataset.ETag is not writable") | |||||
} | |||||
return ds, nil | |||||
} | |||||
func accessListToBQ(a []*AccessEntry) ([]*bq.DatasetAccess, error) { | |||||
var q []*bq.DatasetAccess | |||||
for _, e := range a { | |||||
a, err := e.toBQ() | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
q = append(q, a) | |||||
} | |||||
return q, nil | |||||
} | |||||
// Delete deletes the dataset. Delete will fail if the dataset is not empty. | |||||
func (d *Dataset) Delete(ctx context.Context) (err error) { | |||||
return d.deleteInternal(ctx, false) | |||||
} | |||||
// DeleteWithContents deletes the dataset, as well as contained resources. | |||||
func (d *Dataset) DeleteWithContents(ctx context.Context) (err error) { | |||||
return d.deleteInternal(ctx, true) | |||||
} | |||||
func (d *Dataset) deleteInternal(ctx context.Context, deleteContents bool) (err error) { | |||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Delete") | |||||
defer func() { trace.EndSpan(ctx, err) }() | |||||
call := d.c.bqs.Datasets.Delete(d.ProjectID, d.DatasetID).Context(ctx).DeleteContents(deleteContents) | |||||
setClientHeader(call.Header()) | |||||
return call.Do() | |||||
} | |||||
// Metadata fetches the metadata for the dataset. | |||||
func (d *Dataset) Metadata(ctx context.Context) (md *DatasetMetadata, err error) { | |||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Metadata") | |||||
defer func() { trace.EndSpan(ctx, err) }() | |||||
call := d.c.bqs.Datasets.Get(d.ProjectID, d.DatasetID).Context(ctx) | |||||
setClientHeader(call.Header()) | |||||
var ds *bq.Dataset | |||||
if err := runWithRetry(ctx, func() (err error) { | |||||
ds, err = call.Do() | |||||
return err | |||||
}); err != nil { | |||||
return nil, err | |||||
} | |||||
return bqToDatasetMetadata(ds) | |||||
} | |||||
func bqToDatasetMetadata(d *bq.Dataset) (*DatasetMetadata, error) { | |||||
dm := &DatasetMetadata{ | |||||
CreationTime: unixMillisToTime(d.CreationTime), | |||||
LastModifiedTime: unixMillisToTime(d.LastModifiedTime), | |||||
DefaultTableExpiration: time.Duration(d.DefaultTableExpirationMs) * time.Millisecond, | |||||
Description: d.Description, | |||||
Name: d.FriendlyName, | |||||
FullID: d.Id, | |||||
Location: d.Location, | |||||
Labels: d.Labels, | |||||
ETag: d.Etag, | |||||
} | |||||
for _, a := range d.Access { | |||||
e, err := bqToAccessEntry(a, nil) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
dm.Access = append(dm.Access, e) | |||||
} | |||||
return dm, nil | |||||
} | |||||
// Update modifies specific Dataset metadata fields. | |||||
// To perform a read-modify-write that protects against intervening reads, | |||||
// set the etag argument to the DatasetMetadata.ETag field from the read. | |||||
// Pass the empty string for etag for a "blind write" that will always succeed. | |||||
func (d *Dataset) Update(ctx context.Context, dm DatasetMetadataToUpdate, etag string) (md *DatasetMetadata, err error) { | |||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Dataset.Update") | |||||
defer func() { trace.EndSpan(ctx, err) }() | |||||
ds, err := dm.toBQ() | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
call := d.c.bqs.Datasets.Patch(d.ProjectID, d.DatasetID, ds).Context(ctx) | |||||
setClientHeader(call.Header()) | |||||
if etag != "" { | |||||
call.Header().Set("If-Match", etag) | |||||
} | |||||
var ds2 *bq.Dataset | |||||
if err := runWithRetry(ctx, func() (err error) { | |||||
ds2, err = call.Do() | |||||
return err | |||||
}); err != nil { | |||||
return nil, err | |||||
} | |||||
return bqToDatasetMetadata(ds2) | |||||
} | |||||
func (dm *DatasetMetadataToUpdate) toBQ() (*bq.Dataset, error) { | |||||
ds := &bq.Dataset{} | |||||
forceSend := func(field string) { | |||||
ds.ForceSendFields = append(ds.ForceSendFields, field) | |||||
} | |||||
if dm.Description != nil { | |||||
ds.Description = optional.ToString(dm.Description) | |||||
forceSend("Description") | |||||
} | |||||
if dm.Name != nil { | |||||
ds.FriendlyName = optional.ToString(dm.Name) | |||||
forceSend("FriendlyName") | |||||
} | |||||
if dm.DefaultTableExpiration != nil { | |||||
dur := optional.ToDuration(dm.DefaultTableExpiration) | |||||
if dur == 0 { | |||||
// Send a null to delete the field. | |||||
ds.NullFields = append(ds.NullFields, "DefaultTableExpirationMs") | |||||
} else { | |||||
ds.DefaultTableExpirationMs = int64(dur / time.Millisecond) | |||||
} | |||||
} | |||||
if dm.Access != nil { | |||||
var err error | |||||
ds.Access, err = accessListToBQ(dm.Access) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
if len(ds.Access) == 0 { | |||||
ds.NullFields = append(ds.NullFields, "Access") | |||||
} | |||||
} | |||||
labels, forces, nulls := dm.update() | |||||
ds.Labels = labels | |||||
ds.ForceSendFields = append(ds.ForceSendFields, forces...) | |||||
ds.NullFields = append(ds.NullFields, nulls...) | |||||
return ds, nil | |||||
} | |||||
// Table creates a handle to a BigQuery table in the dataset. | |||||
// To determine if a table exists, call Table.Metadata. | |||||
// If the table does not already exist, use Table.Create to create it. | |||||
func (d *Dataset) Table(tableID string) *Table { | |||||
return &Table{ProjectID: d.ProjectID, DatasetID: d.DatasetID, TableID: tableID, c: d.c} | |||||
} | |||||
// Tables returns an iterator over the tables in the Dataset. | |||||
func (d *Dataset) Tables(ctx context.Context) *TableIterator { | |||||
it := &TableIterator{ | |||||
ctx: ctx, | |||||
dataset: d, | |||||
} | |||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo( | |||||
it.fetch, | |||||
func() int { return len(it.tables) }, | |||||
func() interface{} { b := it.tables; it.tables = nil; return b }) | |||||
return it | |||||
} | |||||
// A TableIterator is an iterator over Tables. | |||||
type TableIterator struct { | |||||
ctx context.Context | |||||
dataset *Dataset | |||||
tables []*Table | |||||
pageInfo *iterator.PageInfo | |||||
nextFunc func() error | |||||
} | |||||
// Next returns the next result. Its second return value is Done if there are | |||||
// no more results. Once Next returns Done, all subsequent calls will return | |||||
// Done. | |||||
func (it *TableIterator) Next() (*Table, error) { | |||||
if err := it.nextFunc(); err != nil { | |||||
return nil, err | |||||
} | |||||
t := it.tables[0] | |||||
it.tables = it.tables[1:] | |||||
return t, nil | |||||
} | |||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. | |||||
func (it *TableIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } | |||||
// for testing | |||||
var listTables = func(it *TableIterator, pageSize int, pageToken string) (*bq.TableList, error) { | |||||
call := it.dataset.c.bqs.Tables.List(it.dataset.ProjectID, it.dataset.DatasetID). | |||||
PageToken(pageToken). | |||||
Context(it.ctx) | |||||
setClientHeader(call.Header()) | |||||
if pageSize > 0 { | |||||
call.MaxResults(int64(pageSize)) | |||||
} | |||||
var res *bq.TableList | |||||
err := runWithRetry(it.ctx, func() (err error) { | |||||
res, err = call.Do() | |||||
return err | |||||
}) | |||||
return res, err | |||||
} | |||||
func (it *TableIterator) fetch(pageSize int, pageToken string) (string, error) { | |||||
res, err := listTables(it, pageSize, pageToken) | |||||
if err != nil { | |||||
return "", err | |||||
} | |||||
for _, t := range res.Tables { | |||||
it.tables = append(it.tables, bqToTable(t.TableReference, it.dataset.c)) | |||||
} | |||||
return res.NextPageToken, nil | |||||
} | |||||
func bqToTable(tr *bq.TableReference, c *Client) *Table { | |||||
if tr == nil { | |||||
return nil | |||||
} | |||||
return &Table{ | |||||
ProjectID: tr.ProjectId, | |||||
DatasetID: tr.DatasetId, | |||||
TableID: tr.TableId, | |||||
c: c, | |||||
} | |||||
} | |||||
// Datasets returns an iterator over the datasets in a project. | |||||
// The Client's project is used by default, but that can be | |||||
// changed by setting ProjectID on the returned iterator before calling Next. | |||||
func (c *Client) Datasets(ctx context.Context) *DatasetIterator { | |||||
return c.DatasetsInProject(ctx, c.projectID) | |||||
} | |||||
// DatasetsInProject returns an iterator over the datasets in the provided project. | |||||
// | |||||
// Deprecated: call Client.Datasets, then set ProjectID on the returned iterator. | |||||
func (c *Client) DatasetsInProject(ctx context.Context, projectID string) *DatasetIterator { | |||||
it := &DatasetIterator{ | |||||
ctx: ctx, | |||||
c: c, | |||||
ProjectID: projectID, | |||||
} | |||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo( | |||||
it.fetch, | |||||
func() int { return len(it.items) }, | |||||
func() interface{} { b := it.items; it.items = nil; return b }) | |||||
return it | |||||
} | |||||
// DatasetIterator iterates over the datasets in a project. | |||||
type DatasetIterator struct { | |||||
// ListHidden causes hidden datasets to be listed when set to true. | |||||
// Set before the first call to Next. | |||||
ListHidden bool | |||||
// Filter restricts the datasets returned by label. The filter syntax is described in | |||||
// https://cloud.google.com/bigquery/docs/labeling-datasets#filtering_datasets_using_labels | |||||
// Set before the first call to Next. | |||||
Filter string | |||||
// The project ID of the listed datasets. | |||||
// Set before the first call to Next. | |||||
ProjectID string | |||||
ctx context.Context | |||||
c *Client | |||||
pageInfo *iterator.PageInfo | |||||
nextFunc func() error | |||||
items []*Dataset | |||||
} | |||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. | |||||
func (it *DatasetIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } | |||||
func (it *DatasetIterator) Next() (*Dataset, error) { | |||||
if err := it.nextFunc(); err != nil { | |||||
return nil, err | |||||
} | |||||
item := it.items[0] | |||||
it.items = it.items[1:] | |||||
return item, nil | |||||
} | |||||
// for testing | |||||
var listDatasets = func(it *DatasetIterator, pageSize int, pageToken string) (*bq.DatasetList, error) { | |||||
call := it.c.bqs.Datasets.List(it.ProjectID). | |||||
Context(it.ctx). | |||||
PageToken(pageToken). | |||||
All(it.ListHidden) | |||||
setClientHeader(call.Header()) | |||||
if pageSize > 0 { | |||||
call.MaxResults(int64(pageSize)) | |||||
} | |||||
if it.Filter != "" { | |||||
call.Filter(it.Filter) | |||||
} | |||||
var res *bq.DatasetList | |||||
err := runWithRetry(it.ctx, func() (err error) { | |||||
res, err = call.Do() | |||||
return err | |||||
}) | |||||
return res, err | |||||
} | |||||
func (it *DatasetIterator) fetch(pageSize int, pageToken string) (string, error) { | |||||
res, err := listDatasets(it, pageSize, pageToken) | |||||
if err != nil { | |||||
return "", err | |||||
} | |||||
for _, d := range res.Datasets { | |||||
it.items = append(it.items, &Dataset{ | |||||
ProjectID: d.DatasetReference.ProjectId, | |||||
DatasetID: d.DatasetReference.DatasetId, | |||||
c: it.c, | |||||
}) | |||||
} | |||||
return res.NextPageToken, nil | |||||
} | |||||
// An AccessEntry describes the permissions that an entity has on a dataset. | |||||
type AccessEntry struct { | |||||
Role AccessRole // The role of the entity | |||||
EntityType EntityType // The type of entity | |||||
Entity string // The entity (individual or group) granted access | |||||
View *Table // The view granted access (EntityType must be ViewEntity) | |||||
} | |||||
// AccessRole is the level of access to grant to a dataset. | |||||
type AccessRole string | |||||
const ( | |||||
OwnerRole AccessRole = "OWNER" | |||||
ReaderRole AccessRole = "READER" | |||||
WriterRole AccessRole = "WRITER" | |||||
) | |||||
// EntityType is the type of entity in an AccessEntry. | |||||
type EntityType int | |||||
const ( | |||||
// A domain (e.g. "example.com") | |||||
DomainEntity EntityType = iota + 1 | |||||
// Email address of a Google Group | |||||
GroupEmailEntity | |||||
// Email address of an individual user. | |||||
UserEmailEntity | |||||
// A special group: one of projectOwners, projectReaders, projectWriters or allAuthenticatedUsers. | |||||
SpecialGroupEntity | |||||
// A BigQuery view. | |||||
ViewEntity | |||||
) | |||||
func (e *AccessEntry) toBQ() (*bq.DatasetAccess, error) { | |||||
q := &bq.DatasetAccess{Role: string(e.Role)} | |||||
switch e.EntityType { | |||||
case DomainEntity: | |||||
q.Domain = e.Entity | |||||
case GroupEmailEntity: | |||||
q.GroupByEmail = e.Entity | |||||
case UserEmailEntity: | |||||
q.UserByEmail = e.Entity | |||||
case SpecialGroupEntity: | |||||
q.SpecialGroup = e.Entity | |||||
case ViewEntity: | |||||
q.View = e.View.toBQ() | |||||
default: | |||||
return nil, fmt.Errorf("bigquery: unknown entity type %d", e.EntityType) | |||||
} | |||||
return q, nil | |||||
} | |||||
func bqToAccessEntry(q *bq.DatasetAccess, c *Client) (*AccessEntry, error) { | |||||
e := &AccessEntry{Role: AccessRole(q.Role)} | |||||
switch { | |||||
case q.Domain != "": | |||||
e.Entity = q.Domain | |||||
e.EntityType = DomainEntity | |||||
case q.GroupByEmail != "": | |||||
e.Entity = q.GroupByEmail | |||||
e.EntityType = GroupEmailEntity | |||||
case q.UserByEmail != "": | |||||
e.Entity = q.UserByEmail | |||||
e.EntityType = UserEmailEntity | |||||
case q.SpecialGroup != "": | |||||
e.Entity = q.SpecialGroup | |||||
e.EntityType = SpecialGroupEntity | |||||
case q.View != nil: | |||||
e.View = c.DatasetInProject(q.View.ProjectId, q.View.DatasetId).Table(q.View.TableId) | |||||
e.EntityType = ViewEntity | |||||
default: | |||||
return nil, errors.New("bigquery: invalid access value") | |||||
} | |||||
return e, nil | |||||
} |
@@ -0,0 +1,328 @@ | |||||
// Copyright 2015 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"errors" | |||||
"strconv" | |||||
"testing" | |||||
"time" | |||||
"github.com/google/go-cmp/cmp" | |||||
"cloud.google.com/go/internal/testutil" | |||||
"golang.org/x/net/context" | |||||
bq "google.golang.org/api/bigquery/v2" | |||||
itest "google.golang.org/api/iterator/testing" | |||||
) | |||||
// readServiceStub services read requests by returning data from an in-memory list of values. | |||||
type listTablesStub struct { | |||||
expectedProject, expectedDataset string | |||||
tables []*bq.TableListTables | |||||
} | |||||
func (s *listTablesStub) listTables(it *TableIterator, pageSize int, pageToken string) (*bq.TableList, error) { | |||||
if it.dataset.ProjectID != s.expectedProject { | |||||
return nil, errors.New("wrong project id") | |||||
} | |||||
if it.dataset.DatasetID != s.expectedDataset { | |||||
return nil, errors.New("wrong dataset id") | |||||
} | |||||
const maxPageSize = 2 | |||||
if pageSize <= 0 || pageSize > maxPageSize { | |||||
pageSize = maxPageSize | |||||
} | |||||
start := 0 | |||||
if pageToken != "" { | |||||
var err error | |||||
start, err = strconv.Atoi(pageToken) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
} | |||||
end := start + pageSize | |||||
if end > len(s.tables) { | |||||
end = len(s.tables) | |||||
} | |||||
nextPageToken := "" | |||||
if end < len(s.tables) { | |||||
nextPageToken = strconv.Itoa(end) | |||||
} | |||||
return &bq.TableList{ | |||||
Tables: s.tables[start:end], | |||||
NextPageToken: nextPageToken, | |||||
}, nil | |||||
} | |||||
func TestTables(t *testing.T) { | |||||
c := &Client{projectID: "p1"} | |||||
inTables := []*bq.TableListTables{ | |||||
{TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t1"}}, | |||||
{TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t2"}}, | |||||
{TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t3"}}, | |||||
} | |||||
outTables := []*Table{ | |||||
{ProjectID: "p1", DatasetID: "d1", TableID: "t1", c: c}, | |||||
{ProjectID: "p1", DatasetID: "d1", TableID: "t2", c: c}, | |||||
{ProjectID: "p1", DatasetID: "d1", TableID: "t3", c: c}, | |||||
} | |||||
lts := &listTablesStub{ | |||||
expectedProject: "p1", | |||||
expectedDataset: "d1", | |||||
tables: inTables, | |||||
} | |||||
old := listTables | |||||
listTables = lts.listTables // cannot use t.Parallel with this test | |||||
defer func() { listTables = old }() | |||||
msg, ok := itest.TestIterator(outTables, | |||||
func() interface{} { return c.Dataset("d1").Tables(context.Background()) }, | |||||
func(it interface{}) (interface{}, error) { return it.(*TableIterator).Next() }) | |||||
if !ok { | |||||
t.Error(msg) | |||||
} | |||||
} | |||||
type listDatasetsStub struct { | |||||
expectedProject string | |||||
datasets []*bq.DatasetListDatasets | |||||
hidden map[*bq.DatasetListDatasets]bool | |||||
} | |||||
func (s *listDatasetsStub) listDatasets(it *DatasetIterator, pageSize int, pageToken string) (*bq.DatasetList, error) { | |||||
const maxPageSize = 2 | |||||
if pageSize <= 0 || pageSize > maxPageSize { | |||||
pageSize = maxPageSize | |||||
} | |||||
if it.Filter != "" { | |||||
return nil, errors.New("filter not supported") | |||||
} | |||||
if it.ProjectID != s.expectedProject { | |||||
return nil, errors.New("bad project ID") | |||||
} | |||||
start := 0 | |||||
if pageToken != "" { | |||||
var err error | |||||
start, err = strconv.Atoi(pageToken) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
} | |||||
var ( | |||||
i int | |||||
result []*bq.DatasetListDatasets | |||||
nextPageToken string | |||||
) | |||||
for i = start; len(result) < pageSize && i < len(s.datasets); i++ { | |||||
if s.hidden[s.datasets[i]] && !it.ListHidden { | |||||
continue | |||||
} | |||||
result = append(result, s.datasets[i]) | |||||
} | |||||
if i < len(s.datasets) { | |||||
nextPageToken = strconv.Itoa(i) | |||||
} | |||||
return &bq.DatasetList{ | |||||
Datasets: result, | |||||
NextPageToken: nextPageToken, | |||||
}, nil | |||||
} | |||||
func TestDatasets(t *testing.T) { | |||||
client := &Client{projectID: "p"} | |||||
inDatasets := []*bq.DatasetListDatasets{ | |||||
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "a"}}, | |||||
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "b"}}, | |||||
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "hidden"}}, | |||||
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "c"}}, | |||||
} | |||||
outDatasets := []*Dataset{ | |||||
{"p", "a", client}, | |||||
{"p", "b", client}, | |||||
{"p", "hidden", client}, | |||||
{"p", "c", client}, | |||||
} | |||||
lds := &listDatasetsStub{ | |||||
expectedProject: "p", | |||||
datasets: inDatasets, | |||||
hidden: map[*bq.DatasetListDatasets]bool{inDatasets[2]: true}, | |||||
} | |||||
old := listDatasets | |||||
listDatasets = lds.listDatasets // cannot use t.Parallel with this test | |||||
defer func() { listDatasets = old }() | |||||
msg, ok := itest.TestIterator(outDatasets, | |||||
func() interface{} { it := client.Datasets(context.Background()); it.ListHidden = true; return it }, | |||||
func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() }) | |||||
if !ok { | |||||
t.Fatalf("ListHidden=true: %s", msg) | |||||
} | |||||
msg, ok = itest.TestIterator([]*Dataset{outDatasets[0], outDatasets[1], outDatasets[3]}, | |||||
func() interface{} { it := client.Datasets(context.Background()); it.ListHidden = false; return it }, | |||||
func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() }) | |||||
if !ok { | |||||
t.Fatalf("ListHidden=false: %s", msg) | |||||
} | |||||
} | |||||
func TestDatasetToBQ(t *testing.T) { | |||||
for _, test := range []struct { | |||||
in *DatasetMetadata | |||||
want *bq.Dataset | |||||
}{ | |||||
{nil, &bq.Dataset{}}, | |||||
{&DatasetMetadata{Name: "name"}, &bq.Dataset{FriendlyName: "name"}}, | |||||
{&DatasetMetadata{ | |||||
Name: "name", | |||||
Description: "desc", | |||||
DefaultTableExpiration: time.Hour, | |||||
Location: "EU", | |||||
Labels: map[string]string{"x": "y"}, | |||||
Access: []*AccessEntry{{Role: OwnerRole, Entity: "example.com", EntityType: DomainEntity}}, | |||||
}, &bq.Dataset{ | |||||
FriendlyName: "name", | |||||
Description: "desc", | |||||
DefaultTableExpirationMs: 60 * 60 * 1000, | |||||
Location: "EU", | |||||
Labels: map[string]string{"x": "y"}, | |||||
Access: []*bq.DatasetAccess{{Role: "OWNER", Domain: "example.com"}}, | |||||
}}, | |||||
} { | |||||
got, err := test.in.toBQ() | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
if !testutil.Equal(got, test.want) { | |||||
t.Errorf("%v:\ngot %+v\nwant %+v", test.in, got, test.want) | |||||
} | |||||
} | |||||
// Check that non-writeable fields are unset. | |||||
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local) | |||||
for _, dm := range []*DatasetMetadata{ | |||||
{CreationTime: aTime}, | |||||
{LastModifiedTime: aTime}, | |||||
{FullID: "x"}, | |||||
{ETag: "e"}, | |||||
} { | |||||
if _, err := dm.toBQ(); err == nil { | |||||
t.Errorf("%+v: got nil, want error", dm) | |||||
} | |||||
} | |||||
} | |||||
func TestBQToDatasetMetadata(t *testing.T) { | |||||
cTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local) | |||||
cMillis := cTime.UnixNano() / 1e6 | |||||
mTime := time.Date(2017, 10, 31, 0, 0, 0, 0, time.Local) | |||||
mMillis := mTime.UnixNano() / 1e6 | |||||
q := &bq.Dataset{ | |||||
CreationTime: cMillis, | |||||
LastModifiedTime: mMillis, | |||||
FriendlyName: "name", | |||||
Description: "desc", | |||||
DefaultTableExpirationMs: 60 * 60 * 1000, | |||||
Location: "EU", | |||||
Labels: map[string]string{"x": "y"}, | |||||
Access: []*bq.DatasetAccess{ | |||||
{Role: "READER", UserByEmail: "joe@example.com"}, | |||||
{Role: "WRITER", GroupByEmail: "users@example.com"}, | |||||
}, | |||||
Etag: "etag", | |||||
} | |||||
want := &DatasetMetadata{ | |||||
CreationTime: cTime, | |||||
LastModifiedTime: mTime, | |||||
Name: "name", | |||||
Description: "desc", | |||||
DefaultTableExpiration: time.Hour, | |||||
Location: "EU", | |||||
Labels: map[string]string{"x": "y"}, | |||||
Access: []*AccessEntry{ | |||||
{Role: ReaderRole, Entity: "joe@example.com", EntityType: UserEmailEntity}, | |||||
{Role: WriterRole, Entity: "users@example.com", EntityType: GroupEmailEntity}, | |||||
}, | |||||
ETag: "etag", | |||||
} | |||||
got, err := bqToDatasetMetadata(q) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
if diff := testutil.Diff(got, want); diff != "" { | |||||
t.Errorf("-got, +want:\n%s", diff) | |||||
} | |||||
} | |||||
func TestDatasetMetadataToUpdateToBQ(t *testing.T) { | |||||
dm := DatasetMetadataToUpdate{ | |||||
Description: "desc", | |||||
Name: "name", | |||||
DefaultTableExpiration: time.Hour, | |||||
} | |||||
dm.SetLabel("label", "value") | |||||
dm.DeleteLabel("del") | |||||
got, err := dm.toBQ() | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
want := &bq.Dataset{ | |||||
Description: "desc", | |||||
FriendlyName: "name", | |||||
DefaultTableExpirationMs: 60 * 60 * 1000, | |||||
Labels: map[string]string{"label": "value"}, | |||||
ForceSendFields: []string{"Description", "FriendlyName"}, | |||||
NullFields: []string{"Labels.del"}, | |||||
} | |||||
if diff := testutil.Diff(got, want); diff != "" { | |||||
t.Errorf("-got, +want:\n%s", diff) | |||||
} | |||||
} | |||||
func TestConvertAccessEntry(t *testing.T) { | |||||
c := &Client{projectID: "pid"} | |||||
for _, e := range []*AccessEntry{ | |||||
{Role: ReaderRole, Entity: "e", EntityType: DomainEntity}, | |||||
{Role: WriterRole, Entity: "e", EntityType: GroupEmailEntity}, | |||||
{Role: OwnerRole, Entity: "e", EntityType: UserEmailEntity}, | |||||
{Role: ReaderRole, Entity: "e", EntityType: SpecialGroupEntity}, | |||||
{Role: ReaderRole, EntityType: ViewEntity, | |||||
View: &Table{ProjectID: "p", DatasetID: "d", TableID: "t", c: c}}, | |||||
} { | |||||
q, err := e.toBQ() | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
got, err := bqToAccessEntry(q, c) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
if diff := testutil.Diff(got, e, cmp.AllowUnexported(Table{}, Client{})); diff != "" { | |||||
t.Errorf("got=-, want=+:\n%s", diff) | |||||
} | |||||
} | |||||
e := &AccessEntry{Role: ReaderRole, Entity: "e"} | |||||
if _, err := e.toBQ(); err == nil { | |||||
t.Error("got nil, want error") | |||||
} | |||||
if _, err := bqToAccessEntry(&bq.DatasetAccess{Role: "WRITER"}, nil); err == nil { | |||||
t.Error("got nil, want error") | |||||
} | |||||
} |
@@ -0,0 +1,67 @@ | |||||
// Copyright 2018 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// https://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
// AUTO-GENERATED CODE. DO NOT EDIT. | |||||
package datatransfer | |||||
import ( | |||||
datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1" | |||||
) | |||||
import ( | |||||
"fmt" | |||||
"strconv" | |||||
"testing" | |||||
"time" | |||||
"cloud.google.com/go/internal/testutil" | |||||
"golang.org/x/net/context" | |||||
"google.golang.org/api/iterator" | |||||
"google.golang.org/api/option" | |||||
) | |||||
var _ = fmt.Sprintf | |||||
var _ = iterator.Done | |||||
var _ = strconv.FormatUint | |||||
var _ = time.Now | |||||
func TestDataTransferServiceSmoke(t *testing.T) { | |||||
if testing.Short() { | |||||
t.Skip("skipping smoke test in short mode") | |||||
} | |||||
ctx := context.Background() | |||||
ts := testutil.TokenSource(ctx, DefaultAuthScopes()...) | |||||
if ts == nil { | |||||
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") | |||||
} | |||||
projectId := testutil.ProjID() | |||||
_ = projectId | |||||
c, err := NewClient(ctx, option.WithTokenSource(ts)) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
var formattedParent string = fmt.Sprintf("projects/%s", projectId) | |||||
var request = &datatransferpb.ListDataSourcesRequest{ | |||||
Parent: formattedParent, | |||||
} | |||||
iter := c.ListDataSources(ctx, request) | |||||
if _, err := iter.Next(); err != nil && err != iterator.Done { | |||||
t.Error(err) | |||||
} | |||||
} |
@@ -0,0 +1,603 @@ | |||||
// Copyright 2018 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// https://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
// AUTO-GENERATED CODE. DO NOT EDIT. | |||||
package datatransfer | |||||
import ( | |||||
"math" | |||||
"time" | |||||
"cloud.google.com/go/internal/version" | |||||
gax "github.com/googleapis/gax-go" | |||||
"golang.org/x/net/context" | |||||
"google.golang.org/api/iterator" | |||||
"google.golang.org/api/option" | |||||
"google.golang.org/api/transport" | |||||
datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1" | |||||
"google.golang.org/grpc" | |||||
"google.golang.org/grpc/codes" | |||||
"google.golang.org/grpc/metadata" | |||||
) | |||||
// CallOptions contains the retry settings for each method of Client. | |||||
type CallOptions struct { | |||||
GetDataSource []gax.CallOption | |||||
ListDataSources []gax.CallOption | |||||
CreateTransferConfig []gax.CallOption | |||||
UpdateTransferConfig []gax.CallOption | |||||
DeleteTransferConfig []gax.CallOption | |||||
GetTransferConfig []gax.CallOption | |||||
ListTransferConfigs []gax.CallOption | |||||
ScheduleTransferRuns []gax.CallOption | |||||
GetTransferRun []gax.CallOption | |||||
DeleteTransferRun []gax.CallOption | |||||
ListTransferRuns []gax.CallOption | |||||
ListTransferLogs []gax.CallOption | |||||
CheckValidCreds []gax.CallOption | |||||
} | |||||
func defaultClientOptions() []option.ClientOption { | |||||
return []option.ClientOption{ | |||||
option.WithEndpoint("bigquerydatatransfer.googleapis.com:443"), | |||||
option.WithScopes(DefaultAuthScopes()...), | |||||
} | |||||
} | |||||
func defaultCallOptions() *CallOptions { | |||||
retry := map[[2]string][]gax.CallOption{ | |||||
{"default", "idempotent"}: { | |||||
gax.WithRetry(func() gax.Retryer { | |||||
return gax.OnCodes([]codes.Code{ | |||||
codes.DeadlineExceeded, | |||||
codes.Unavailable, | |||||
}, gax.Backoff{ | |||||
Initial: 100 * time.Millisecond, | |||||
Max: 60000 * time.Millisecond, | |||||
Multiplier: 1.3, | |||||
}) | |||||
}), | |||||
}, | |||||
} | |||||
return &CallOptions{ | |||||
GetDataSource: retry[[2]string{"default", "idempotent"}], | |||||
ListDataSources: retry[[2]string{"default", "idempotent"}], | |||||
CreateTransferConfig: retry[[2]string{"default", "non_idempotent"}], | |||||
UpdateTransferConfig: retry[[2]string{"default", "non_idempotent"}], | |||||
DeleteTransferConfig: retry[[2]string{"default", "idempotent"}], | |||||
GetTransferConfig: retry[[2]string{"default", "idempotent"}], | |||||
ListTransferConfigs: retry[[2]string{"default", "idempotent"}], | |||||
ScheduleTransferRuns: retry[[2]string{"default", "non_idempotent"}], | |||||
GetTransferRun: retry[[2]string{"default", "idempotent"}], | |||||
DeleteTransferRun: retry[[2]string{"default", "idempotent"}], | |||||
ListTransferRuns: retry[[2]string{"default", "idempotent"}], | |||||
ListTransferLogs: retry[[2]string{"default", "idempotent"}], | |||||
CheckValidCreds: retry[[2]string{"default", "idempotent"}], | |||||
} | |||||
} | |||||
// Client is a client for interacting with BigQuery Data Transfer API. | |||||
// | |||||
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. | |||||
type Client struct { | |||||
// The connection to the service. | |||||
conn *grpc.ClientConn | |||||
// The gRPC API client. | |||||
client datatransferpb.DataTransferServiceClient | |||||
// The call options for this service. | |||||
CallOptions *CallOptions | |||||
// The x-goog-* metadata to be sent with each request. | |||||
xGoogMetadata metadata.MD | |||||
} | |||||
// NewClient creates a new data transfer service client. | |||||
// | |||||
// The Google BigQuery Data Transfer Service API enables BigQuery users to | |||||
// configure the transfer of their data from other Google Products into BigQuery. | |||||
// This service contains methods that are end user exposed. It backs up the | |||||
// frontend. | |||||
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { | |||||
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
c := &Client{ | |||||
conn: conn, | |||||
CallOptions: defaultCallOptions(), | |||||
client: datatransferpb.NewDataTransferServiceClient(conn), | |||||
} | |||||
c.setGoogleClientInfo() | |||||
return c, nil | |||||
} | |||||
// Connection returns the client's connection to the API service. | |||||
func (c *Client) Connection() *grpc.ClientConn { | |||||
return c.conn | |||||
} | |||||
// Close closes the connection to the API service. The user should invoke this when | |||||
// the client is no longer required. | |||||
func (c *Client) Close() error { | |||||
return c.conn.Close() | |||||
} | |||||
// setGoogleClientInfo sets the name and version of the application in | |||||
// the `x-goog-api-client` header passed on each request. Intended for | |||||
// use by Google-written clients. | |||||
func (c *Client) setGoogleClientInfo(keyval ...string) { | |||||
kv := append([]string{"gl-go", version.Go()}, keyval...) | |||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) | |||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) | |||||
} | |||||
// GetDataSource retrieves a supported data source and returns its settings, | |||||
// which can be used for UI rendering. | |||||
func (c *Client) GetDataSource(ctx context.Context, req *datatransferpb.GetDataSourceRequest, opts ...gax.CallOption) (*datatransferpb.DataSource, error) { | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata) | |||||
opts = append(c.CallOptions.GetDataSource[0:len(c.CallOptions.GetDataSource):len(c.CallOptions.GetDataSource)], opts...) | |||||
var resp *datatransferpb.DataSource | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
resp, err = c.client.GetDataSource(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return resp, nil | |||||
} | |||||
// ListDataSources lists supported data sources and returns their settings, | |||||
// which can be used for UI rendering. | |||||
func (c *Client) ListDataSources(ctx context.Context, req *datatransferpb.ListDataSourcesRequest, opts ...gax.CallOption) *DataSourceIterator { | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata) | |||||
opts = append(c.CallOptions.ListDataSources[0:len(c.CallOptions.ListDataSources):len(c.CallOptions.ListDataSources)], opts...) | |||||
it := &DataSourceIterator{} | |||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.DataSource, string, error) { | |||||
var resp *datatransferpb.ListDataSourcesResponse | |||||
req.PageToken = pageToken | |||||
if pageSize > math.MaxInt32 { | |||||
req.PageSize = math.MaxInt32 | |||||
} else { | |||||
req.PageSize = int32(pageSize) | |||||
} | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
resp, err = c.client.ListDataSources(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
if err != nil { | |||||
return nil, "", err | |||||
} | |||||
return resp.DataSources, resp.NextPageToken, nil | |||||
} | |||||
fetch := func(pageSize int, pageToken string) (string, error) { | |||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) | |||||
if err != nil { | |||||
return "", err | |||||
} | |||||
it.items = append(it.items, items...) | |||||
return nextPageToken, nil | |||||
} | |||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) | |||||
return it | |||||
} | |||||
// CreateTransferConfig creates a new data transfer configuration. | |||||
func (c *Client) CreateTransferConfig(ctx context.Context, req *datatransferpb.CreateTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) { | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata) | |||||
opts = append(c.CallOptions.CreateTransferConfig[0:len(c.CallOptions.CreateTransferConfig):len(c.CallOptions.CreateTransferConfig)], opts...) | |||||
var resp *datatransferpb.TransferConfig | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
resp, err = c.client.CreateTransferConfig(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return resp, nil | |||||
} | |||||
// UpdateTransferConfig updates a data transfer configuration. | |||||
// All fields must be set, even if they are not updated. | |||||
func (c *Client) UpdateTransferConfig(ctx context.Context, req *datatransferpb.UpdateTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) { | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata) | |||||
opts = append(c.CallOptions.UpdateTransferConfig[0:len(c.CallOptions.UpdateTransferConfig):len(c.CallOptions.UpdateTransferConfig)], opts...) | |||||
var resp *datatransferpb.TransferConfig | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
resp, err = c.client.UpdateTransferConfig(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return resp, nil | |||||
} | |||||
// DeleteTransferConfig deletes a data transfer configuration, | |||||
// including any associated transfer runs and logs. | |||||
func (c *Client) DeleteTransferConfig(ctx context.Context, req *datatransferpb.DeleteTransferConfigRequest, opts ...gax.CallOption) error { | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata) | |||||
opts = append(c.CallOptions.DeleteTransferConfig[0:len(c.CallOptions.DeleteTransferConfig):len(c.CallOptions.DeleteTransferConfig)], opts...) | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
_, err = c.client.DeleteTransferConfig(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
return err | |||||
} | |||||
// GetTransferConfig returns information about a data transfer config. | |||||
func (c *Client) GetTransferConfig(ctx context.Context, req *datatransferpb.GetTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) { | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata) | |||||
opts = append(c.CallOptions.GetTransferConfig[0:len(c.CallOptions.GetTransferConfig):len(c.CallOptions.GetTransferConfig)], opts...) | |||||
var resp *datatransferpb.TransferConfig | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
resp, err = c.client.GetTransferConfig(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return resp, nil | |||||
} | |||||
// ListTransferConfigs returns information about all data transfers in the project. | |||||
func (c *Client) ListTransferConfigs(ctx context.Context, req *datatransferpb.ListTransferConfigsRequest, opts ...gax.CallOption) *TransferConfigIterator { | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata) | |||||
opts = append(c.CallOptions.ListTransferConfigs[0:len(c.CallOptions.ListTransferConfigs):len(c.CallOptions.ListTransferConfigs)], opts...) | |||||
it := &TransferConfigIterator{} | |||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferConfig, string, error) { | |||||
var resp *datatransferpb.ListTransferConfigsResponse | |||||
req.PageToken = pageToken | |||||
if pageSize > math.MaxInt32 { | |||||
req.PageSize = math.MaxInt32 | |||||
} else { | |||||
req.PageSize = int32(pageSize) | |||||
} | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
resp, err = c.client.ListTransferConfigs(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
if err != nil { | |||||
return nil, "", err | |||||
} | |||||
return resp.TransferConfigs, resp.NextPageToken, nil | |||||
} | |||||
fetch := func(pageSize int, pageToken string) (string, error) { | |||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) | |||||
if err != nil { | |||||
return "", err | |||||
} | |||||
it.items = append(it.items, items...) | |||||
return nextPageToken, nil | |||||
} | |||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) | |||||
return it | |||||
} | |||||
// ScheduleTransferRuns creates transfer runs for a time range [start_time, end_time]. | |||||
// For each date - or whatever granularity the data source supports - in the | |||||
// range, one transfer run is created. | |||||
// Note that runs are created per UTC time in the time range. | |||||
func (c *Client) ScheduleTransferRuns(ctx context.Context, req *datatransferpb.ScheduleTransferRunsRequest, opts ...gax.CallOption) (*datatransferpb.ScheduleTransferRunsResponse, error) { | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata) | |||||
opts = append(c.CallOptions.ScheduleTransferRuns[0:len(c.CallOptions.ScheduleTransferRuns):len(c.CallOptions.ScheduleTransferRuns)], opts...) | |||||
var resp *datatransferpb.ScheduleTransferRunsResponse | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
resp, err = c.client.ScheduleTransferRuns(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return resp, nil | |||||
} | |||||
// GetTransferRun returns information about the particular transfer run. | |||||
func (c *Client) GetTransferRun(ctx context.Context, req *datatransferpb.GetTransferRunRequest, opts ...gax.CallOption) (*datatransferpb.TransferRun, error) { | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata) | |||||
opts = append(c.CallOptions.GetTransferRun[0:len(c.CallOptions.GetTransferRun):len(c.CallOptions.GetTransferRun)], opts...) | |||||
var resp *datatransferpb.TransferRun | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
resp, err = c.client.GetTransferRun(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return resp, nil | |||||
} | |||||
// DeleteTransferRun deletes the specified transfer run. | |||||
func (c *Client) DeleteTransferRun(ctx context.Context, req *datatransferpb.DeleteTransferRunRequest, opts ...gax.CallOption) error { | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata) | |||||
opts = append(c.CallOptions.DeleteTransferRun[0:len(c.CallOptions.DeleteTransferRun):len(c.CallOptions.DeleteTransferRun)], opts...) | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
_, err = c.client.DeleteTransferRun(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
return err | |||||
} | |||||
// ListTransferRuns returns information about running and completed jobs. | |||||
func (c *Client) ListTransferRuns(ctx context.Context, req *datatransferpb.ListTransferRunsRequest, opts ...gax.CallOption) *TransferRunIterator { | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata) | |||||
opts = append(c.CallOptions.ListTransferRuns[0:len(c.CallOptions.ListTransferRuns):len(c.CallOptions.ListTransferRuns)], opts...) | |||||
it := &TransferRunIterator{} | |||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferRun, string, error) { | |||||
var resp *datatransferpb.ListTransferRunsResponse | |||||
req.PageToken = pageToken | |||||
if pageSize > math.MaxInt32 { | |||||
req.PageSize = math.MaxInt32 | |||||
} else { | |||||
req.PageSize = int32(pageSize) | |||||
} | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
resp, err = c.client.ListTransferRuns(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
if err != nil { | |||||
return nil, "", err | |||||
} | |||||
return resp.TransferRuns, resp.NextPageToken, nil | |||||
} | |||||
fetch := func(pageSize int, pageToken string) (string, error) { | |||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) | |||||
if err != nil { | |||||
return "", err | |||||
} | |||||
it.items = append(it.items, items...) | |||||
return nextPageToken, nil | |||||
} | |||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) | |||||
return it | |||||
} | |||||
// ListTransferLogs returns user facing log messages for the data transfer run. | |||||
func (c *Client) ListTransferLogs(ctx context.Context, req *datatransferpb.ListTransferLogsRequest, opts ...gax.CallOption) *TransferMessageIterator { | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata) | |||||
opts = append(c.CallOptions.ListTransferLogs[0:len(c.CallOptions.ListTransferLogs):len(c.CallOptions.ListTransferLogs)], opts...) | |||||
it := &TransferMessageIterator{} | |||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferMessage, string, error) { | |||||
var resp *datatransferpb.ListTransferLogsResponse | |||||
req.PageToken = pageToken | |||||
if pageSize > math.MaxInt32 { | |||||
req.PageSize = math.MaxInt32 | |||||
} else { | |||||
req.PageSize = int32(pageSize) | |||||
} | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
resp, err = c.client.ListTransferLogs(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
if err != nil { | |||||
return nil, "", err | |||||
} | |||||
return resp.TransferMessages, resp.NextPageToken, nil | |||||
} | |||||
fetch := func(pageSize int, pageToken string) (string, error) { | |||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) | |||||
if err != nil { | |||||
return "", err | |||||
} | |||||
it.items = append(it.items, items...) | |||||
return nextPageToken, nil | |||||
} | |||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) | |||||
return it | |||||
} | |||||
// CheckValidCreds returns true if valid credentials exist for the given data source and | |||||
// requesting user. | |||||
// Some data sources doesn't support service account, so we need to talk to | |||||
// them on behalf of the end user. This API just checks whether we have OAuth | |||||
// token for the particular user, which is a pre-requisite before user can | |||||
// create a transfer config. | |||||
func (c *Client) CheckValidCreds(ctx context.Context, req *datatransferpb.CheckValidCredsRequest, opts ...gax.CallOption) (*datatransferpb.CheckValidCredsResponse, error) { | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata) | |||||
opts = append(c.CallOptions.CheckValidCreds[0:len(c.CallOptions.CheckValidCreds):len(c.CallOptions.CheckValidCreds)], opts...) | |||||
var resp *datatransferpb.CheckValidCredsResponse | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
resp, err = c.client.CheckValidCreds(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return resp, nil | |||||
} | |||||
// DataSourceIterator manages a stream of *datatransferpb.DataSource. | |||||
type DataSourceIterator struct { | |||||
items []*datatransferpb.DataSource | |||||
pageInfo *iterator.PageInfo | |||||
nextFunc func() error | |||||
// InternalFetch is for use by the Google Cloud Libraries only. | |||||
// It is not part of the stable interface of this package. | |||||
// | |||||
// InternalFetch returns results from a single call to the underlying RPC. | |||||
// The number of results is no greater than pageSize. | |||||
// If there are no more results, nextPageToken is empty and err is nil. | |||||
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.DataSource, nextPageToken string, err error) | |||||
} | |||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. | |||||
func (it *DataSourceIterator) PageInfo() *iterator.PageInfo { | |||||
return it.pageInfo | |||||
} | |||||
// Next returns the next result. Its second return value is iterator.Done if there are no more | |||||
// results. Once Next returns Done, all subsequent calls will return Done. | |||||
func (it *DataSourceIterator) Next() (*datatransferpb.DataSource, error) { | |||||
var item *datatransferpb.DataSource | |||||
if err := it.nextFunc(); err != nil { | |||||
return item, err | |||||
} | |||||
item = it.items[0] | |||||
it.items = it.items[1:] | |||||
return item, nil | |||||
} | |||||
func (it *DataSourceIterator) bufLen() int { | |||||
return len(it.items) | |||||
} | |||||
func (it *DataSourceIterator) takeBuf() interface{} { | |||||
b := it.items | |||||
it.items = nil | |||||
return b | |||||
} | |||||
// TransferConfigIterator manages a stream of *datatransferpb.TransferConfig. | |||||
type TransferConfigIterator struct { | |||||
items []*datatransferpb.TransferConfig | |||||
pageInfo *iterator.PageInfo | |||||
nextFunc func() error | |||||
// InternalFetch is for use by the Google Cloud Libraries only. | |||||
// It is not part of the stable interface of this package. | |||||
// | |||||
// InternalFetch returns results from a single call to the underlying RPC. | |||||
// The number of results is no greater than pageSize. | |||||
// If there are no more results, nextPageToken is empty and err is nil. | |||||
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferConfig, nextPageToken string, err error) | |||||
} | |||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. | |||||
func (it *TransferConfigIterator) PageInfo() *iterator.PageInfo { | |||||
return it.pageInfo | |||||
} | |||||
// Next returns the next result. Its second return value is iterator.Done if there are no more | |||||
// results. Once Next returns Done, all subsequent calls will return Done. | |||||
func (it *TransferConfigIterator) Next() (*datatransferpb.TransferConfig, error) { | |||||
var item *datatransferpb.TransferConfig | |||||
if err := it.nextFunc(); err != nil { | |||||
return item, err | |||||
} | |||||
item = it.items[0] | |||||
it.items = it.items[1:] | |||||
return item, nil | |||||
} | |||||
func (it *TransferConfigIterator) bufLen() int { | |||||
return len(it.items) | |||||
} | |||||
func (it *TransferConfigIterator) takeBuf() interface{} { | |||||
b := it.items | |||||
it.items = nil | |||||
return b | |||||
} | |||||
// TransferMessageIterator manages a stream of *datatransferpb.TransferMessage. | |||||
type TransferMessageIterator struct { | |||||
items []*datatransferpb.TransferMessage | |||||
pageInfo *iterator.PageInfo | |||||
nextFunc func() error | |||||
// InternalFetch is for use by the Google Cloud Libraries only. | |||||
// It is not part of the stable interface of this package. | |||||
// | |||||
// InternalFetch returns results from a single call to the underlying RPC. | |||||
// The number of results is no greater than pageSize. | |||||
// If there are no more results, nextPageToken is empty and err is nil. | |||||
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferMessage, nextPageToken string, err error) | |||||
} | |||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. | |||||
func (it *TransferMessageIterator) PageInfo() *iterator.PageInfo { | |||||
return it.pageInfo | |||||
} | |||||
// Next returns the next result. Its second return value is iterator.Done if there are no more | |||||
// results. Once Next returns Done, all subsequent calls will return Done. | |||||
func (it *TransferMessageIterator) Next() (*datatransferpb.TransferMessage, error) { | |||||
var item *datatransferpb.TransferMessage | |||||
if err := it.nextFunc(); err != nil { | |||||
return item, err | |||||
} | |||||
item = it.items[0] | |||||
it.items = it.items[1:] | |||||
return item, nil | |||||
} | |||||
func (it *TransferMessageIterator) bufLen() int { | |||||
return len(it.items) | |||||
} | |||||
func (it *TransferMessageIterator) takeBuf() interface{} { | |||||
b := it.items | |||||
it.items = nil | |||||
return b | |||||
} | |||||
// TransferRunIterator manages a stream of *datatransferpb.TransferRun. | |||||
type TransferRunIterator struct { | |||||
items []*datatransferpb.TransferRun | |||||
pageInfo *iterator.PageInfo | |||||
nextFunc func() error | |||||
// InternalFetch is for use by the Google Cloud Libraries only. | |||||
// It is not part of the stable interface of this package. | |||||
// | |||||
// InternalFetch returns results from a single call to the underlying RPC. | |||||
// The number of results is no greater than pageSize. | |||||
// If there are no more results, nextPageToken is empty and err is nil. | |||||
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferRun, nextPageToken string, err error) | |||||
} | |||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. | |||||
func (it *TransferRunIterator) PageInfo() *iterator.PageInfo { | |||||
return it.pageInfo | |||||
} | |||||
// Next returns the next result. Its second return value is iterator.Done if there are no more | |||||
// results. Once Next returns Done, all subsequent calls will return Done. | |||||
func (it *TransferRunIterator) Next() (*datatransferpb.TransferRun, error) { | |||||
var item *datatransferpb.TransferRun | |||||
if err := it.nextFunc(); err != nil { | |||||
return item, err | |||||
} | |||||
item = it.items[0] | |||||
it.items = it.items[1:] | |||||
return item, nil | |||||
} | |||||
func (it *TransferRunIterator) bufLen() int { | |||||
return len(it.items) | |||||
} | |||||
func (it *TransferRunIterator) takeBuf() interface{} { | |||||
b := it.items | |||||
it.items = nil | |||||
return b | |||||
} |
@@ -0,0 +1,288 @@ | |||||
// Copyright 2018 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// https://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
// AUTO-GENERATED CODE. DO NOT EDIT. | |||||
package datatransfer_test | |||||
import ( | |||||
"cloud.google.com/go/bigquery/datatransfer/apiv1" | |||||
"golang.org/x/net/context" | |||||
"google.golang.org/api/iterator" | |||||
datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1" | |||||
) | |||||
func ExampleNewClient() { | |||||
ctx := context.Background() | |||||
c, err := datatransfer.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// TODO: Use client. | |||||
_ = c | |||||
} | |||||
func ExampleClient_GetDataSource() { | |||||
ctx := context.Background() | |||||
c, err := datatransfer.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &datatransferpb.GetDataSourceRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
resp, err := c.GetDataSource(ctx, req) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// TODO: Use resp. | |||||
_ = resp | |||||
} | |||||
func ExampleClient_ListDataSources() { | |||||
ctx := context.Background() | |||||
c, err := datatransfer.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &datatransferpb.ListDataSourcesRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
it := c.ListDataSources(ctx, req) | |||||
for { | |||||
resp, err := it.Next() | |||||
if err == iterator.Done { | |||||
break | |||||
} | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// TODO: Use resp. | |||||
_ = resp | |||||
} | |||||
} | |||||
func ExampleClient_CreateTransferConfig() { | |||||
ctx := context.Background() | |||||
c, err := datatransfer.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &datatransferpb.CreateTransferConfigRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
resp, err := c.CreateTransferConfig(ctx, req) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// TODO: Use resp. | |||||
_ = resp | |||||
} | |||||
func ExampleClient_UpdateTransferConfig() { | |||||
ctx := context.Background() | |||||
c, err := datatransfer.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &datatransferpb.UpdateTransferConfigRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
resp, err := c.UpdateTransferConfig(ctx, req) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// TODO: Use resp. | |||||
_ = resp | |||||
} | |||||
func ExampleClient_DeleteTransferConfig() { | |||||
ctx := context.Background() | |||||
c, err := datatransfer.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &datatransferpb.DeleteTransferConfigRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
err = c.DeleteTransferConfig(ctx, req) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
} | |||||
func ExampleClient_GetTransferConfig() { | |||||
ctx := context.Background() | |||||
c, err := datatransfer.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &datatransferpb.GetTransferConfigRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
resp, err := c.GetTransferConfig(ctx, req) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// TODO: Use resp. | |||||
_ = resp | |||||
} | |||||
func ExampleClient_ListTransferConfigs() { | |||||
ctx := context.Background() | |||||
c, err := datatransfer.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &datatransferpb.ListTransferConfigsRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
it := c.ListTransferConfigs(ctx, req) | |||||
for { | |||||
resp, err := it.Next() | |||||
if err == iterator.Done { | |||||
break | |||||
} | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// TODO: Use resp. | |||||
_ = resp | |||||
} | |||||
} | |||||
func ExampleClient_ScheduleTransferRuns() { | |||||
ctx := context.Background() | |||||
c, err := datatransfer.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &datatransferpb.ScheduleTransferRunsRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
resp, err := c.ScheduleTransferRuns(ctx, req) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// TODO: Use resp. | |||||
_ = resp | |||||
} | |||||
func ExampleClient_GetTransferRun() { | |||||
ctx := context.Background() | |||||
c, err := datatransfer.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &datatransferpb.GetTransferRunRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
resp, err := c.GetTransferRun(ctx, req) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// TODO: Use resp. | |||||
_ = resp | |||||
} | |||||
func ExampleClient_DeleteTransferRun() { | |||||
ctx := context.Background() | |||||
c, err := datatransfer.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &datatransferpb.DeleteTransferRunRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
err = c.DeleteTransferRun(ctx, req) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
} | |||||
func ExampleClient_ListTransferRuns() { | |||||
ctx := context.Background() | |||||
c, err := datatransfer.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &datatransferpb.ListTransferRunsRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
it := c.ListTransferRuns(ctx, req) | |||||
for { | |||||
resp, err := it.Next() | |||||
if err == iterator.Done { | |||||
break | |||||
} | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// TODO: Use resp. | |||||
_ = resp | |||||
} | |||||
} | |||||
func ExampleClient_ListTransferLogs() { | |||||
ctx := context.Background() | |||||
c, err := datatransfer.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &datatransferpb.ListTransferLogsRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
it := c.ListTransferLogs(ctx, req) | |||||
for { | |||||
resp, err := it.Next() | |||||
if err == iterator.Done { | |||||
break | |||||
} | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// TODO: Use resp. | |||||
_ = resp | |||||
} | |||||
} | |||||
func ExampleClient_CheckValidCreds() { | |||||
ctx := context.Background() | |||||
c, err := datatransfer.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &datatransferpb.CheckValidCredsRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
resp, err := c.CheckValidCreds(ctx, req) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// TODO: Use resp. | |||||
_ = resp | |||||
} |
@@ -0,0 +1,47 @@ | |||||
// Copyright 2018 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// https://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
// AUTO-GENERATED CODE. DO NOT EDIT. | |||||
// Package datatransfer is an auto-generated package for the | |||||
// BigQuery Data Transfer API. | |||||
// | |||||
// NOTE: This package is in alpha. It is not stable, and is likely to change. | |||||
// | |||||
// Transfers data from partner SaaS applications to Google BigQuery on a | |||||
// scheduled, managed basis. | |||||
package datatransfer // import "cloud.google.com/go/bigquery/datatransfer/apiv1" | |||||
import ( | |||||
"golang.org/x/net/context" | |||||
"google.golang.org/grpc/metadata" | |||||
) | |||||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { | |||||
out, _ := metadata.FromOutgoingContext(ctx) | |||||
out = out.Copy() | |||||
for _, md := range mds { | |||||
for k, v := range md { | |||||
out[k] = append(out[k], v...) | |||||
} | |||||
} | |||||
return metadata.NewOutgoingContext(ctx, out) | |||||
} | |||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package. | |||||
func DefaultAuthScopes() []string { | |||||
return []string{ | |||||
"https://www.googleapis.com/auth/cloud-platform", | |||||
} | |||||
} |
@@ -0,0 +1,135 @@ | |||||
// Copyright 2018 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// https://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package datatransfer | |||||
// ProjectPath returns the path for the project resource. | |||||
// | |||||
// Deprecated: Use | |||||
// fmt.Sprintf("projects/%s", project) | |||||
// instead. | |||||
func ProjectPath(project string) string { | |||||
return "" + | |||||
"projects/" + | |||||
project + | |||||
"" | |||||
} | |||||
// LocationPath returns the path for the location resource. | |||||
// | |||||
// Deprecated: Use | |||||
// fmt.Sprintf("projects/%s/locations/%s", project, location) | |||||
// instead. | |||||
func LocationPath(project, location string) string { | |||||
return "" + | |||||
"projects/" + | |||||
project + | |||||
"/locations/" + | |||||
location + | |||||
"" | |||||
} | |||||
// LocationDataSourcePath returns the path for the location data source resource. | |||||
// | |||||
// Deprecated: Use | |||||
// fmt.Sprintf("projects/%s/locations/%s/dataSources/%s", project, location, dataSource) | |||||
// instead. | |||||
func LocationDataSourcePath(project, location, dataSource string) string { | |||||
return "" + | |||||
"projects/" + | |||||
project + | |||||
"/locations/" + | |||||
location + | |||||
"/dataSources/" + | |||||
dataSource + | |||||
"" | |||||
} | |||||
// LocationTransferConfigPath returns the path for the location transfer config resource. | |||||
// | |||||
// Deprecated: Use | |||||
// fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", project, location, transferConfig) | |||||
// instead. | |||||
func LocationTransferConfigPath(project, location, transferConfig string) string { | |||||
return "" + | |||||
"projects/" + | |||||
project + | |||||
"/locations/" + | |||||
location + | |||||
"/transferConfigs/" + | |||||
transferConfig + | |||||
"" | |||||
} | |||||
// LocationRunPath returns the path for the location run resource. | |||||
// | |||||
// Deprecated: Use | |||||
// fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", project, location, transferConfig, run) | |||||
// instead. | |||||
func LocationRunPath(project, location, transferConfig, run string) string { | |||||
return "" + | |||||
"projects/" + | |||||
project + | |||||
"/locations/" + | |||||
location + | |||||
"/transferConfigs/" + | |||||
transferConfig + | |||||
"/runs/" + | |||||
run + | |||||
"" | |||||
} | |||||
// DataSourcePath returns the path for the data source resource. | |||||
// | |||||
// Deprecated: Use | |||||
// fmt.Sprintf("projects/%s/dataSources/%s", project, dataSource) | |||||
// instead. | |||||
func DataSourcePath(project, dataSource string) string { | |||||
return "" + | |||||
"projects/" + | |||||
project + | |||||
"/dataSources/" + | |||||
dataSource + | |||||
"" | |||||
} | |||||
// TransferConfigPath returns the path for the transfer config resource. | |||||
// | |||||
// Deprecated: Use | |||||
// fmt.Sprintf("projects/%s/transferConfigs/%s", project, transferConfig) | |||||
// instead. | |||||
func TransferConfigPath(project, transferConfig string) string { | |||||
return "" + | |||||
"projects/" + | |||||
project + | |||||
"/transferConfigs/" + | |||||
transferConfig + | |||||
"" | |||||
} | |||||
// RunPath returns the path for the run resource. | |||||
// | |||||
// Deprecated: Use | |||||
// fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", project, transferConfig, run) | |||||
// instead. | |||||
func RunPath(project, transferConfig, run string) string { | |||||
return "" + | |||||
"projects/" + | |||||
project + | |||||
"/transferConfigs/" + | |||||
transferConfig + | |||||
"/runs/" + | |||||
run + | |||||
"" | |||||
} |
@@ -0,0 +1,301 @@ | |||||
// Copyright 2015 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
/* | |||||
Package bigquery provides a client for the BigQuery service. | |||||
Note: This package is in beta. Some backwards-incompatible changes may occur. | |||||
The following assumes a basic familiarity with BigQuery concepts. | |||||
See https://cloud.google.com/bigquery/docs. | |||||
See https://godoc.org/cloud.google.com/go for authentication, timeouts, | |||||
connection pooling and similar aspects of this package. | |||||
Creating a Client | |||||
To start working with this package, create a client: | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, projectID) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
Querying | |||||
To query existing tables, create a Query and call its Read method: | |||||
q := client.Query(` | |||||
SELECT year, SUM(number) as num | |||||
FROM [bigquery-public-data:usa_names.usa_1910_2013] | |||||
WHERE name = "William" | |||||
GROUP BY year | |||||
ORDER BY year | |||||
`) | |||||
it, err := q.Read(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
Then iterate through the resulting rows. You can store a row using | |||||
anything that implements the ValueLoader interface, or with a slice or map of bigquery.Value. | |||||
A slice is simplest: | |||||
for { | |||||
var values []bigquery.Value | |||||
err := it.Next(&values) | |||||
if err == iterator.Done { | |||||
break | |||||
} | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
fmt.Println(values) | |||||
} | |||||
You can also use a struct whose exported fields match the query: | |||||
type Count struct { | |||||
Year int | |||||
Num int | |||||
} | |||||
for { | |||||
var c Count | |||||
err := it.Next(&c) | |||||
if err == iterator.Done { | |||||
break | |||||
} | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
fmt.Println(c) | |||||
} | |||||
You can also start the query running and get the results later. | |||||
Create the query as above, but call Run instead of Read. This returns a Job, | |||||
which represents an asynchronous operation. | |||||
job, err := q.Run(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
Get the job's ID, a printable string. You can save this string to retrieve | |||||
the results at a later time, even in another process. | |||||
jobID := job.ID() | |||||
fmt.Printf("The job ID is %s\n", jobID) | |||||
To retrieve the job's results from the ID, first look up the Job: | |||||
job, err = client.JobFromID(ctx, jobID) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
Use the Job.Read method to obtain an iterator, and loop over the rows. | |||||
Query.Read is just a convenience method that combines Query.Run and Job.Read. | |||||
it, err = job.Read(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// Proceed with iteration as above. | |||||
Datasets and Tables | |||||
You can refer to datasets in the client's project with the Dataset method, and | |||||
in other projects with the DatasetInProject method: | |||||
myDataset := client.Dataset("my_dataset") | |||||
yourDataset := client.DatasetInProject("your-project-id", "your_dataset") | |||||
These methods create references to datasets, not the datasets themselves. You can have | |||||
a dataset reference even if the dataset doesn't exist yet. Use Dataset.Create to | |||||
create a dataset from a reference: | |||||
if err := myDataset.Create(ctx, nil); err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
You can refer to tables with Dataset.Table. Like bigquery.Dataset, bigquery.Table is a reference | |||||
to an object in BigQuery that may or may not exist. | |||||
table := myDataset.Table("my_table") | |||||
You can create, delete and update the metadata of tables with methods on Table. | |||||
For instance, you could create a temporary table with: | |||||
err = myDataset.Table("temp").Create(ctx, &bigquery.TableMetadata{ | |||||
ExpirationTime: time.Now().Add(1*time.Hour)}) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
We'll see how to create a table with a schema in the next section. | |||||
Schemas | |||||
There are two ways to construct schemas with this package. | |||||
You can build a schema by hand, like so: | |||||
schema1 := bigquery.Schema{ | |||||
{Name: "Name", Required: true, Type: bigquery.StringFieldType}, | |||||
{Name: "Grades", Repeated: true, Type: bigquery.IntegerFieldType}, | |||||
{Name: "Optional", Required: false, Type: bigquery.IntegerFieldType}, | |||||
} | |||||
Or you can infer the schema from a struct: | |||||
type student struct { | |||||
Name string | |||||
Grades []int | |||||
Optional bigquery.NullInt64 | |||||
} | |||||
schema2, err := bigquery.InferSchema(student{}) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// schema1 and schema2 are identical. | |||||
Struct inference supports tags like those of the encoding/json package, so you can | |||||
change names, ignore fields, or mark a field as nullable (non-required). Fields | |||||
declared as one of the Null types (NullInt64, NullFloat64, NullString, NullBool, | |||||
NullTimestamp, NullDate, NullTime and NullDateTime) are automatically inferred as | |||||
nullable, so the "nullable" tag is only needed for []byte, *big.Rat and | |||||
pointer-to-struct fields. | |||||
type student2 struct { | |||||
Name string `bigquery:"full_name"` | |||||
Grades []int | |||||
Secret string `bigquery:"-"` | |||||
Optional []byte `bigquery:",nullable" | |||||
} | |||||
schema3, err := bigquery.InferSchema(student2{}) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// schema3 has required fields "full_name" and "Grade", and nullable BYTES field "Optional". | |||||
Having constructed a schema, you can create a table with it like so: | |||||
if err := table.Create(ctx, &bigquery.TableMetadata{Schema: schema1}); err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
Copying | |||||
You can copy one or more tables to another table. Begin by constructing a Copier | |||||
describing the copy. Then set any desired copy options, and finally call Run to get a Job: | |||||
copier := myDataset.Table("dest").CopierFrom(myDataset.Table("src")) | |||||
copier.WriteDisposition = bigquery.WriteTruncate | |||||
job, err = copier.Run(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
You can chain the call to Run if you don't want to set options: | |||||
job, err = myDataset.Table("dest").CopierFrom(myDataset.Table("src")).Run(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
You can wait for your job to complete: | |||||
status, err := job.Wait(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
Job.Wait polls with exponential backoff. You can also poll yourself, if you | |||||
wish: | |||||
for { | |||||
status, err := job.Status(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
if status.Done() { | |||||
if status.Err() != nil { | |||||
log.Fatalf("Job failed with error %v", status.Err()) | |||||
} | |||||
break | |||||
} | |||||
time.Sleep(pollInterval) | |||||
} | |||||
Loading and Uploading | |||||
There are two ways to populate a table with this package: load the data from a Google Cloud Storage | |||||
object, or upload rows directly from your program. | |||||
For loading, first create a GCSReference, configuring it if desired. Then make a Loader, optionally configure | |||||
it as well, and call its Run method. | |||||
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object") | |||||
gcsRef.AllowJaggedRows = true | |||||
loader := myDataset.Table("dest").LoaderFrom(gcsRef) | |||||
loader.CreateDisposition = bigquery.CreateNever | |||||
job, err = loader.Run(ctx) | |||||
// Poll the job for completion if desired, as above. | |||||
To upload, first define a type that implements the ValueSaver interface, which has a single method named Save. | |||||
Then create an Uploader, and call its Put method with a slice of values. | |||||
u := table.Uploader() | |||||
// Item implements the ValueSaver interface. | |||||
items := []*Item{ | |||||
{Name: "n1", Size: 32.6, Count: 7}, | |||||
{Name: "n2", Size: 4, Count: 2}, | |||||
{Name: "n3", Size: 101.5, Count: 1}, | |||||
} | |||||
if err := u.Put(ctx, items); err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
You can also upload a struct that doesn't implement ValueSaver. Use the StructSaver type | |||||
to specify the schema and insert ID by hand, or just supply the struct or struct pointer | |||||
directly and the schema will be inferred: | |||||
type Item2 struct { | |||||
Name string | |||||
Size float64 | |||||
Count int | |||||
} | |||||
// Item implements the ValueSaver interface. | |||||
items2 := []*Item2{ | |||||
{Name: "n1", Size: 32.6, Count: 7}, | |||||
{Name: "n2", Size: 4, Count: 2}, | |||||
{Name: "n3", Size: 101.5, Count: 1}, | |||||
} | |||||
if err := u.Put(ctx, items2); err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
Extracting | |||||
If you've been following so far, extracting data from a BigQuery table | |||||
into a Google Cloud Storage object will feel familiar. First create an | |||||
Extractor, then optionally configure it, and lastly call its Run method. | |||||
extractor := table.ExtractorTo(gcsRef) | |||||
extractor.DisableHeader = true | |||||
job, err = extractor.Run(ctx) | |||||
// Poll the job for completion if desired, as above. | |||||
*/ | |||||
package bigquery // import "cloud.google.com/go/bigquery" |
@@ -0,0 +1,82 @@ | |||||
// Copyright 2015 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"fmt" | |||||
bq "google.golang.org/api/bigquery/v2" | |||||
) | |||||
// An Error contains detailed information about a failed bigquery operation. | |||||
type Error struct { | |||||
// Mirrors bq.ErrorProto, but drops DebugInfo | |||||
Location, Message, Reason string | |||||
} | |||||
func (e Error) Error() string { | |||||
return fmt.Sprintf("{Location: %q; Message: %q; Reason: %q}", e.Location, e.Message, e.Reason) | |||||
} | |||||
func bqToError(ep *bq.ErrorProto) *Error { | |||||
if ep == nil { | |||||
return nil | |||||
} | |||||
return &Error{ | |||||
Location: ep.Location, | |||||
Message: ep.Message, | |||||
Reason: ep.Reason, | |||||
} | |||||
} | |||||
// A MultiError contains multiple related errors. | |||||
type MultiError []error | |||||
func (m MultiError) Error() string { | |||||
switch len(m) { | |||||
case 0: | |||||
return "(0 errors)" | |||||
case 1: | |||||
return m[0].Error() | |||||
case 2: | |||||
return m[0].Error() + " (and 1 other error)" | |||||
} | |||||
return fmt.Sprintf("%s (and %d other errors)", m[0].Error(), len(m)-1) | |||||
} | |||||
// RowInsertionError contains all errors that occurred when attempting to insert a row. | |||||
type RowInsertionError struct { | |||||
InsertID string // The InsertID associated with the affected row. | |||||
RowIndex int // The 0-based index of the affected row in the batch of rows being inserted. | |||||
Errors MultiError | |||||
} | |||||
func (e *RowInsertionError) Error() string { | |||||
errFmt := "insertion of row [insertID: %q; insertIndex: %v] failed with error: %s" | |||||
return fmt.Sprintf(errFmt, e.InsertID, e.RowIndex, e.Errors.Error()) | |||||
} | |||||
// PutMultiError contains an error for each row which was not successfully inserted | |||||
// into a BigQuery table. | |||||
type PutMultiError []RowInsertionError | |||||
func (pme PutMultiError) Error() string { | |||||
plural := "s" | |||||
if len(pme) == 1 { | |||||
plural = "" | |||||
} | |||||
return fmt.Sprintf("%v row insertion%s failed", len(pme), plural) | |||||
} |
@@ -0,0 +1,110 @@ | |||||
// Copyright 2015 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"errors" | |||||
"strings" | |||||
"testing" | |||||
"cloud.google.com/go/internal/testutil" | |||||
bq "google.golang.org/api/bigquery/v2" | |||||
) | |||||
func rowInsertionError(msg string) RowInsertionError { | |||||
return RowInsertionError{Errors: []error{errors.New(msg)}} | |||||
} | |||||
func TestPutMultiErrorString(t *testing.T) { | |||||
testCases := []struct { | |||||
errs PutMultiError | |||||
want string | |||||
}{ | |||||
{ | |||||
errs: PutMultiError{}, | |||||
want: "0 row insertions failed", | |||||
}, | |||||
{ | |||||
errs: PutMultiError{rowInsertionError("a")}, | |||||
want: "1 row insertion failed", | |||||
}, | |||||
{ | |||||
errs: PutMultiError{rowInsertionError("a"), rowInsertionError("b")}, | |||||
want: "2 row insertions failed", | |||||
}, | |||||
} | |||||
for _, tc := range testCases { | |||||
if tc.errs.Error() != tc.want { | |||||
t.Errorf("PutMultiError string: got:\n%v\nwant:\n%v", tc.errs.Error(), tc.want) | |||||
} | |||||
} | |||||
} | |||||
func TestMultiErrorString(t *testing.T) { | |||||
testCases := []struct { | |||||
errs MultiError | |||||
want string | |||||
}{ | |||||
{ | |||||
errs: MultiError{}, | |||||
want: "(0 errors)", | |||||
}, | |||||
{ | |||||
errs: MultiError{errors.New("a")}, | |||||
want: "a", | |||||
}, | |||||
{ | |||||
errs: MultiError{errors.New("a"), errors.New("b")}, | |||||
want: "a (and 1 other error)", | |||||
}, | |||||
{ | |||||
errs: MultiError{errors.New("a"), errors.New("b"), errors.New("c")}, | |||||
want: "a (and 2 other errors)", | |||||
}, | |||||
} | |||||
for _, tc := range testCases { | |||||
if tc.errs.Error() != tc.want { | |||||
t.Errorf("PutMultiError string: got:\n%v\nwant:\n%v", tc.errs.Error(), tc.want) | |||||
} | |||||
} | |||||
} | |||||
func TestErrorFromErrorProto(t *testing.T) { | |||||
for _, test := range []struct { | |||||
in *bq.ErrorProto | |||||
want *Error | |||||
}{ | |||||
{nil, nil}, | |||||
{ | |||||
in: &bq.ErrorProto{Location: "L", Message: "M", Reason: "R"}, | |||||
want: &Error{Location: "L", Message: "M", Reason: "R"}, | |||||
}, | |||||
} { | |||||
if got := bqToError(test.in); !testutil.Equal(got, test.want) { | |||||
t.Errorf("%v: got %v, want %v", test.in, got, test.want) | |||||
} | |||||
} | |||||
} | |||||
func TestErrorString(t *testing.T) { | |||||
e := &Error{Location: "<L>", Message: "<M>", Reason: "<R>"} | |||||
got := e.Error() | |||||
if !strings.Contains(got, "<L>") || !strings.Contains(got, "<M>") || !strings.Contains(got, "<R>") { | |||||
t.Errorf(`got %q, expected to see "<L>", "<M>" and "<R>"`, got) | |||||
} | |||||
} |
@@ -0,0 +1,829 @@ | |||||
// Copyright 2016 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery_test | |||||
import ( | |||||
"fmt" | |||||
"os" | |||||
"time" | |||||
"cloud.google.com/go/bigquery" | |||||
"golang.org/x/net/context" | |||||
"google.golang.org/api/iterator" | |||||
) | |||||
func ExampleNewClient() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
_ = client // TODO: Use client. | |||||
} | |||||
func ExampleClient_Dataset() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
ds := client.Dataset("my_dataset") | |||||
fmt.Println(ds) | |||||
} | |||||
func ExampleClient_DatasetInProject() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
ds := client.DatasetInProject("their-project-id", "their-dataset") | |||||
fmt.Println(ds) | |||||
} | |||||
func ExampleClient_Datasets() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
it := client.Datasets(ctx) | |||||
_ = it // TODO: iterate using Next or iterator.Pager. | |||||
} | |||||
func ExampleClient_DatasetsInProject() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
it := client.DatasetsInProject(ctx, "their-project-id") | |||||
_ = it // TODO: iterate using Next or iterator.Pager. | |||||
} | |||||
func getJobID() string { return "" } | |||||
func ExampleClient_JobFromID() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
jobID := getJobID() // Get a job ID using Job.ID, the console or elsewhere. | |||||
job, err := client.JobFromID(ctx, jobID) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
fmt.Println(job.LastStatus()) // Display the job's status. | |||||
} | |||||
func ExampleClient_Jobs() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
it := client.Jobs(ctx) | |||||
it.State = bigquery.Running // list only running jobs. | |||||
_ = it // TODO: iterate using Next or iterator.Pager. | |||||
} | |||||
func ExampleNewGCSReference() { | |||||
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object") | |||||
fmt.Println(gcsRef) | |||||
} | |||||
func ExampleClient_Query() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
q := client.Query("select name, num from t1") | |||||
q.DefaultProjectID = "project-id" | |||||
// TODO: set other options on the Query. | |||||
// TODO: Call Query.Run or Query.Read. | |||||
} | |||||
func ExampleClient_Query_parameters() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
q := client.Query("select num from t1 where name = @user") | |||||
q.Parameters = []bigquery.QueryParameter{ | |||||
{Name: "user", Value: "Elizabeth"}, | |||||
} | |||||
// TODO: set other options on the Query. | |||||
// TODO: Call Query.Run or Query.Read. | |||||
} | |||||
// This example demonstrates how to run a query job on a table | |||||
// with a customer-managed encryption key. The same | |||||
// applies to load and copy jobs as well. | |||||
func ExampleClient_Query_encryptionKey() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
q := client.Query("select name, num from t1") | |||||
// TODO: Replace this key with a key you have created in Cloud KMS. | |||||
keyName := "projects/P/locations/L/keyRings/R/cryptoKeys/K" | |||||
q.DestinationEncryptionConfig = &bigquery.EncryptionConfig{KMSKeyName: keyName} | |||||
// TODO: set other options on the Query. | |||||
// TODO: Call Query.Run or Query.Read. | |||||
} | |||||
func ExampleQuery_Read() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
q := client.Query("select name, num from t1") | |||||
it, err := q.Read(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
_ = it // TODO: iterate using Next or iterator.Pager. | |||||
} | |||||
func ExampleRowIterator_Next() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
q := client.Query("select name, num from t1") | |||||
it, err := q.Read(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
for { | |||||
var row []bigquery.Value | |||||
err := it.Next(&row) | |||||
if err == iterator.Done { | |||||
break | |||||
} | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
fmt.Println(row) | |||||
} | |||||
} | |||||
func ExampleRowIterator_Next_struct() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
type score struct { | |||||
Name string | |||||
Num int | |||||
} | |||||
q := client.Query("select name, num from t1") | |||||
it, err := q.Read(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
for { | |||||
var s score | |||||
err := it.Next(&s) | |||||
if err == iterator.Done { | |||||
break | |||||
} | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
fmt.Println(s) | |||||
} | |||||
} | |||||
func ExampleJob_Read() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
q := client.Query("select name, num from t1") | |||||
// Call Query.Run to get a Job, then call Read on the job. | |||||
// Note: Query.Read is a shorthand for this. | |||||
job, err := q.Run(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
it, err := job.Read(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
_ = it // TODO: iterate using Next or iterator.Pager. | |||||
} | |||||
func ExampleJob_Wait() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
ds := client.Dataset("my_dataset") | |||||
job, err := ds.Table("t1").CopierFrom(ds.Table("t2")).Run(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
status, err := job.Wait(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
if status.Err() != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
} | |||||
func ExampleJob_Config() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
ds := client.Dataset("my_dataset") | |||||
job, err := ds.Table("t1").CopierFrom(ds.Table("t2")).Run(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
jc, err := job.Config() | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
copyConfig := jc.(*bigquery.CopyConfig) | |||||
fmt.Println(copyConfig.Dst, copyConfig.CreateDisposition) | |||||
} | |||||
func ExampleDataset_Create() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
ds := client.Dataset("my_dataset") | |||||
if err := ds.Create(ctx, &bigquery.DatasetMetadata{Location: "EU"}); err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
} | |||||
func ExampleDataset_Delete() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
if err := client.Dataset("my_dataset").Delete(ctx); err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
} | |||||
func ExampleDataset_Metadata() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
md, err := client.Dataset("my_dataset").Metadata(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
fmt.Println(md) | |||||
} | |||||
// This example illustrates how to perform a read-modify-write sequence on dataset | |||||
// metadata. Passing the metadata's ETag to the Update call ensures that the call | |||||
// will fail if the metadata was changed since the read. | |||||
func ExampleDataset_Update_readModifyWrite() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
ds := client.Dataset("my_dataset") | |||||
md, err := ds.Metadata(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
md2, err := ds.Update(ctx, | |||||
bigquery.DatasetMetadataToUpdate{Name: "new " + md.Name}, | |||||
md.ETag) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
fmt.Println(md2) | |||||
} | |||||
// To perform a blind write, ignoring the existing state (and possibly overwriting | |||||
// other updates), pass the empty string as the etag. | |||||
func ExampleDataset_Update_blindWrite() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
md, err := client.Dataset("my_dataset").Update(ctx, bigquery.DatasetMetadataToUpdate{Name: "blind"}, "") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
fmt.Println(md) | |||||
} | |||||
func ExampleDataset_Table() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// Table creates a reference to the table. It does not create the actual | |||||
// table in BigQuery; to do so, use Table.Create. | |||||
t := client.Dataset("my_dataset").Table("my_table") | |||||
fmt.Println(t) | |||||
} | |||||
func ExampleDataset_Tables() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
it := client.Dataset("my_dataset").Tables(ctx) | |||||
_ = it // TODO: iterate using Next or iterator.Pager. | |||||
} | |||||
func ExampleDatasetIterator_Next() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
it := client.Datasets(ctx) | |||||
for { | |||||
ds, err := it.Next() | |||||
if err == iterator.Done { | |||||
break | |||||
} | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
fmt.Println(ds) | |||||
} | |||||
} | |||||
func ExampleInferSchema() { | |||||
type Item struct { | |||||
Name string | |||||
Size float64 | |||||
Count int | |||||
} | |||||
schema, err := bigquery.InferSchema(Item{}) | |||||
if err != nil { | |||||
fmt.Println(err) | |||||
// TODO: Handle error. | |||||
} | |||||
for _, fs := range schema { | |||||
fmt.Println(fs.Name, fs.Type) | |||||
} | |||||
// Output: | |||||
// Name STRING | |||||
// Size FLOAT | |||||
// Count INTEGER | |||||
} | |||||
func ExampleInferSchema_tags() { | |||||
type Item struct { | |||||
Name string | |||||
Size float64 | |||||
Count int `bigquery:"number"` | |||||
Secret []byte `bigquery:"-"` | |||||
Optional bigquery.NullBool | |||||
OptBytes []byte `bigquery:",nullable"` | |||||
} | |||||
schema, err := bigquery.InferSchema(Item{}) | |||||
if err != nil { | |||||
fmt.Println(err) | |||||
// TODO: Handle error. | |||||
} | |||||
for _, fs := range schema { | |||||
fmt.Println(fs.Name, fs.Type, fs.Required) | |||||
} | |||||
// Output: | |||||
// Name STRING true | |||||
// Size FLOAT true | |||||
// number INTEGER true | |||||
// Optional BOOLEAN false | |||||
// OptBytes BYTES false | |||||
} | |||||
func ExampleTable_Create() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
t := client.Dataset("my_dataset").Table("new-table") | |||||
if err := t.Create(ctx, nil); err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
} | |||||
// Initialize a new table by passing TableMetadata to Table.Create. | |||||
func ExampleTable_Create_initialize() { | |||||
ctx := context.Background() | |||||
// Infer table schema from a Go type. | |||||
schema, err := bigquery.InferSchema(Item{}) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
t := client.Dataset("my_dataset").Table("new-table") | |||||
if err := t.Create(ctx, | |||||
&bigquery.TableMetadata{ | |||||
Name: "My New Table", | |||||
Schema: schema, | |||||
ExpirationTime: time.Now().Add(24 * time.Hour), | |||||
}); err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
} | |||||
// This example demonstrates how to create a table with | |||||
// a customer-managed encryption key. | |||||
func ExampleTable_Create_encryptionKey() { | |||||
ctx := context.Background() | |||||
// Infer table schema from a Go type. | |||||
schema, err := bigquery.InferSchema(Item{}) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
t := client.Dataset("my_dataset").Table("new-table") | |||||
// TODO: Replace this key with a key you have created in Cloud KMS. | |||||
keyName := "projects/P/locations/L/keyRings/R/cryptoKeys/K" | |||||
if err := t.Create(ctx, | |||||
&bigquery.TableMetadata{ | |||||
Name: "My New Table", | |||||
Schema: schema, | |||||
EncryptionConfig: &bigquery.EncryptionConfig{KMSKeyName: keyName}, | |||||
}); err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
} | |||||
func ExampleTable_Delete() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
if err := client.Dataset("my_dataset").Table("my_table").Delete(ctx); err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
} | |||||
func ExampleTable_Metadata() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
md, err := client.Dataset("my_dataset").Table("my_table").Metadata(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
fmt.Println(md) | |||||
} | |||||
func ExampleTable_Uploader() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
u := client.Dataset("my_dataset").Table("my_table").Uploader() | |||||
_ = u // TODO: Use u. | |||||
} | |||||
func ExampleTable_Uploader_options() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
u := client.Dataset("my_dataset").Table("my_table").Uploader() | |||||
u.SkipInvalidRows = true | |||||
u.IgnoreUnknownValues = true | |||||
_ = u // TODO: Use u. | |||||
} | |||||
func ExampleTable_CopierFrom() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
ds := client.Dataset("my_dataset") | |||||
c := ds.Table("combined").CopierFrom(ds.Table("t1"), ds.Table("t2")) | |||||
c.WriteDisposition = bigquery.WriteTruncate | |||||
// TODO: set other options on the Copier. | |||||
job, err := c.Run(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
status, err := job.Wait(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
if status.Err() != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
} | |||||
func ExampleTable_ExtractorTo() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object") | |||||
gcsRef.FieldDelimiter = ":" | |||||
// TODO: set other options on the GCSReference. | |||||
ds := client.Dataset("my_dataset") | |||||
extractor := ds.Table("my_table").ExtractorTo(gcsRef) | |||||
extractor.DisableHeader = true | |||||
// TODO: set other options on the Extractor. | |||||
job, err := extractor.Run(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
status, err := job.Wait(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
if status.Err() != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
} | |||||
func ExampleTable_LoaderFrom() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object") | |||||
gcsRef.AllowJaggedRows = true | |||||
gcsRef.MaxBadRecords = 5 | |||||
gcsRef.Schema = schema | |||||
// TODO: set other options on the GCSReference. | |||||
ds := client.Dataset("my_dataset") | |||||
loader := ds.Table("my_table").LoaderFrom(gcsRef) | |||||
loader.CreateDisposition = bigquery.CreateNever | |||||
// TODO: set other options on the Loader. | |||||
job, err := loader.Run(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
status, err := job.Wait(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
if status.Err() != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
} | |||||
func ExampleTable_LoaderFrom_reader() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
f, err := os.Open("data.csv") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
rs := bigquery.NewReaderSource(f) | |||||
rs.AllowJaggedRows = true | |||||
rs.MaxBadRecords = 5 | |||||
rs.Schema = schema | |||||
// TODO: set other options on the GCSReference. | |||||
ds := client.Dataset("my_dataset") | |||||
loader := ds.Table("my_table").LoaderFrom(rs) | |||||
loader.CreateDisposition = bigquery.CreateNever | |||||
// TODO: set other options on the Loader. | |||||
job, err := loader.Run(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
status, err := job.Wait(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
if status.Err() != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
} | |||||
func ExampleTable_Read() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
it := client.Dataset("my_dataset").Table("my_table").Read(ctx) | |||||
_ = it // TODO: iterate using Next or iterator.Pager. | |||||
} | |||||
// This example illustrates how to perform a read-modify-write sequence on table | |||||
// metadata. Passing the metadata's ETag to the Update call ensures that the call | |||||
// will fail if the metadata was changed since the read. | |||||
func ExampleTable_Update_readModifyWrite() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
t := client.Dataset("my_dataset").Table("my_table") | |||||
md, err := t.Metadata(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
md2, err := t.Update(ctx, | |||||
bigquery.TableMetadataToUpdate{Name: "new " + md.Name}, | |||||
md.ETag) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
fmt.Println(md2) | |||||
} | |||||
// To perform a blind write, ignoring the existing state (and possibly overwriting | |||||
// other updates), pass the empty string as the etag. | |||||
func ExampleTable_Update_blindWrite() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
t := client.Dataset("my_dataset").Table("my_table") | |||||
tm, err := t.Update(ctx, bigquery.TableMetadataToUpdate{ | |||||
Description: "my favorite table", | |||||
}, "") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
fmt.Println(tm) | |||||
} | |||||
func ExampleTableIterator_Next() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
it := client.Dataset("my_dataset").Tables(ctx) | |||||
for { | |||||
t, err := it.Next() | |||||
if err == iterator.Done { | |||||
break | |||||
} | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
fmt.Println(t) | |||||
} | |||||
} | |||||
type Item struct { | |||||
Name string | |||||
Size float64 | |||||
Count int | |||||
} | |||||
// Save implements the ValueSaver interface. | |||||
func (i *Item) Save() (map[string]bigquery.Value, string, error) { | |||||
return map[string]bigquery.Value{ | |||||
"Name": i.Name, | |||||
"Size": i.Size, | |||||
"Count": i.Count, | |||||
}, "", nil | |||||
} | |||||
func ExampleUploader_Put() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
u := client.Dataset("my_dataset").Table("my_table").Uploader() | |||||
// Item implements the ValueSaver interface. | |||||
items := []*Item{ | |||||
{Name: "n1", Size: 32.6, Count: 7}, | |||||
{Name: "n2", Size: 4, Count: 2}, | |||||
{Name: "n3", Size: 101.5, Count: 1}, | |||||
} | |||||
if err := u.Put(ctx, items); err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
} | |||||
var schema bigquery.Schema | |||||
func ExampleUploader_Put_structSaver() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
u := client.Dataset("my_dataset").Table("my_table").Uploader() | |||||
type score struct { | |||||
Name string | |||||
Num int | |||||
} | |||||
// Assume schema holds the table's schema. | |||||
savers := []*bigquery.StructSaver{ | |||||
{Struct: score{Name: "n1", Num: 12}, Schema: schema, InsertID: "id1"}, | |||||
{Struct: score{Name: "n2", Num: 31}, Schema: schema, InsertID: "id2"}, | |||||
{Struct: score{Name: "n3", Num: 7}, Schema: schema, InsertID: "id3"}, | |||||
} | |||||
if err := u.Put(ctx, savers); err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
} | |||||
func ExampleUploader_Put_struct() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
u := client.Dataset("my_dataset").Table("my_table").Uploader() | |||||
type score struct { | |||||
Name string | |||||
Num int | |||||
} | |||||
scores := []score{ | |||||
{Name: "n1", Num: 12}, | |||||
{Name: "n2", Num: 31}, | |||||
{Name: "n3", Num: 7}, | |||||
} | |||||
// Schema is inferred from the score type. | |||||
if err := u.Put(ctx, scores); err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
} | |||||
func ExampleUploader_Put_valuesSaver() { | |||||
ctx := context.Background() | |||||
client, err := bigquery.NewClient(ctx, "project-id") | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
u := client.Dataset("my_dataset").Table("my_table").Uploader() | |||||
var vss []*bigquery.ValuesSaver | |||||
for i, name := range []string{"n1", "n2", "n3"} { | |||||
// Assume schema holds the table's schema. | |||||
vss = append(vss, &bigquery.ValuesSaver{ | |||||
Schema: schema, | |||||
InsertID: name, | |||||
Row: []bigquery.Value{name, int64(i)}, | |||||
}) | |||||
} | |||||
if err := u.Put(ctx, vss); err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
} |
@@ -0,0 +1,399 @@ | |||||
// Copyright 2017 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"encoding/base64" | |||||
"unicode/utf8" | |||||
bq "google.golang.org/api/bigquery/v2" | |||||
) | |||||
// DataFormat describes the format of BigQuery table data. | |||||
type DataFormat string | |||||
// Constants describing the format of BigQuery table data. | |||||
const ( | |||||
CSV DataFormat = "CSV" | |||||
Avro DataFormat = "AVRO" | |||||
JSON DataFormat = "NEWLINE_DELIMITED_JSON" | |||||
DatastoreBackup DataFormat = "DATASTORE_BACKUP" | |||||
GoogleSheets DataFormat = "GOOGLE_SHEETS" | |||||
Bigtable DataFormat = "BIGTABLE" | |||||
Parquet DataFormat = "PARQUET" | |||||
) | |||||
// ExternalData is a table which is stored outside of BigQuery. It is implemented by | |||||
// *ExternalDataConfig. | |||||
// GCSReference also implements it, for backwards compatibility. | |||||
type ExternalData interface { | |||||
toBQ() bq.ExternalDataConfiguration | |||||
} | |||||
// ExternalDataConfig describes data external to BigQuery that can be used | |||||
// in queries and to create external tables. | |||||
type ExternalDataConfig struct { | |||||
// The format of the data. Required. | |||||
SourceFormat DataFormat | |||||
// The fully-qualified URIs that point to your | |||||
// data in Google Cloud. Required. | |||||
// | |||||
// For Google Cloud Storage URIs, each URI can contain one '*' wildcard character | |||||
// and it must come after the 'bucket' name. Size limits related to load jobs | |||||
// apply to external data sources. | |||||
// | |||||
// For Google Cloud Bigtable URIs, exactly one URI can be specified and it has be | |||||
// a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. | |||||
// | |||||
// For Google Cloud Datastore backups, exactly one URI can be specified. Also, | |||||
// the '*' wildcard character is not allowed. | |||||
SourceURIs []string | |||||
// The schema of the data. Required for CSV and JSON; disallowed for the | |||||
// other formats. | |||||
Schema Schema | |||||
// Try to detect schema and format options automatically. | |||||
// Any option specified explicitly will be honored. | |||||
AutoDetect bool | |||||
// The compression type of the data. | |||||
Compression Compression | |||||
// IgnoreUnknownValues causes values not matching the schema to be | |||||
// tolerated. Unknown values are ignored. For CSV this ignores extra values | |||||
// at the end of a line. For JSON this ignores named values that do not | |||||
// match any column name. If this field is not set, records containing | |||||
// unknown values are treated as bad records. The MaxBadRecords field can | |||||
// be used to customize how bad records are handled. | |||||
IgnoreUnknownValues bool | |||||
// MaxBadRecords is the maximum number of bad records that will be ignored | |||||
// when reading data. | |||||
MaxBadRecords int64 | |||||
// Additional options for CSV, GoogleSheets and Bigtable formats. | |||||
Options ExternalDataConfigOptions | |||||
} | |||||
func (e *ExternalDataConfig) toBQ() bq.ExternalDataConfiguration { | |||||
q := bq.ExternalDataConfiguration{ | |||||
SourceFormat: string(e.SourceFormat), | |||||
SourceUris: e.SourceURIs, | |||||
Autodetect: e.AutoDetect, | |||||
Compression: string(e.Compression), | |||||
IgnoreUnknownValues: e.IgnoreUnknownValues, | |||||
MaxBadRecords: e.MaxBadRecords, | |||||
} | |||||
if e.Schema != nil { | |||||
q.Schema = e.Schema.toBQ() | |||||
} | |||||
if e.Options != nil { | |||||
e.Options.populateExternalDataConfig(&q) | |||||
} | |||||
return q | |||||
} | |||||
func bqToExternalDataConfig(q *bq.ExternalDataConfiguration) (*ExternalDataConfig, error) { | |||||
e := &ExternalDataConfig{ | |||||
SourceFormat: DataFormat(q.SourceFormat), | |||||
SourceURIs: q.SourceUris, | |||||
AutoDetect: q.Autodetect, | |||||
Compression: Compression(q.Compression), | |||||
IgnoreUnknownValues: q.IgnoreUnknownValues, | |||||
MaxBadRecords: q.MaxBadRecords, | |||||
Schema: bqToSchema(q.Schema), | |||||
} | |||||
switch { | |||||
case q.CsvOptions != nil: | |||||
e.Options = bqToCSVOptions(q.CsvOptions) | |||||
case q.GoogleSheetsOptions != nil: | |||||
e.Options = bqToGoogleSheetsOptions(q.GoogleSheetsOptions) | |||||
case q.BigtableOptions != nil: | |||||
var err error | |||||
e.Options, err = bqToBigtableOptions(q.BigtableOptions) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
} | |||||
return e, nil | |||||
} | |||||
// ExternalDataConfigOptions are additional options for external data configurations. | |||||
// This interface is implemented by CSVOptions, GoogleSheetsOptions and BigtableOptions. | |||||
type ExternalDataConfigOptions interface { | |||||
populateExternalDataConfig(*bq.ExternalDataConfiguration) | |||||
} | |||||
// CSVOptions are additional options for CSV external data sources. | |||||
type CSVOptions struct { | |||||
// AllowJaggedRows causes missing trailing optional columns to be tolerated | |||||
// when reading CSV data. Missing values are treated as nulls. | |||||
AllowJaggedRows bool | |||||
// AllowQuotedNewlines sets whether quoted data sections containing | |||||
// newlines are allowed when reading CSV data. | |||||
AllowQuotedNewlines bool | |||||
// Encoding is the character encoding of data to be read. | |||||
Encoding Encoding | |||||
// FieldDelimiter is the separator for fields in a CSV file, used when | |||||
// reading or exporting data. The default is ",". | |||||
FieldDelimiter string | |||||
// Quote is the value used to quote data sections in a CSV file. The | |||||
// default quotation character is the double quote ("), which is used if | |||||
// both Quote and ForceZeroQuote are unset. | |||||
// To specify that no character should be interpreted as a quotation | |||||
// character, set ForceZeroQuote to true. | |||||
// Only used when reading data. | |||||
Quote string | |||||
ForceZeroQuote bool | |||||
// The number of rows at the top of a CSV file that BigQuery will skip when | |||||
// reading data. | |||||
SkipLeadingRows int64 | |||||
} | |||||
func (o *CSVOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) { | |||||
c.CsvOptions = &bq.CsvOptions{ | |||||
AllowJaggedRows: o.AllowJaggedRows, | |||||
AllowQuotedNewlines: o.AllowQuotedNewlines, | |||||
Encoding: string(o.Encoding), | |||||
FieldDelimiter: o.FieldDelimiter, | |||||
Quote: o.quote(), | |||||
SkipLeadingRows: o.SkipLeadingRows, | |||||
} | |||||
} | |||||
// quote returns the CSV quote character, or nil if unset. | |||||
func (o *CSVOptions) quote() *string { | |||||
if o.ForceZeroQuote { | |||||
quote := "" | |||||
return "e | |||||
} | |||||
if o.Quote == "" { | |||||
return nil | |||||
} | |||||
return &o.Quote | |||||
} | |||||
func (o *CSVOptions) setQuote(ps *string) { | |||||
if ps != nil { | |||||
o.Quote = *ps | |||||
if o.Quote == "" { | |||||
o.ForceZeroQuote = true | |||||
} | |||||
} | |||||
} | |||||
func bqToCSVOptions(q *bq.CsvOptions) *CSVOptions { | |||||
o := &CSVOptions{ | |||||
AllowJaggedRows: q.AllowJaggedRows, | |||||
AllowQuotedNewlines: q.AllowQuotedNewlines, | |||||
Encoding: Encoding(q.Encoding), | |||||
FieldDelimiter: q.FieldDelimiter, | |||||
SkipLeadingRows: q.SkipLeadingRows, | |||||
} | |||||
o.setQuote(q.Quote) | |||||
return o | |||||
} | |||||
// GoogleSheetsOptions are additional options for GoogleSheets external data sources. | |||||
type GoogleSheetsOptions struct { | |||||
// The number of rows at the top of a sheet that BigQuery will skip when | |||||
// reading data. | |||||
SkipLeadingRows int64 | |||||
} | |||||
func (o *GoogleSheetsOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) { | |||||
c.GoogleSheetsOptions = &bq.GoogleSheetsOptions{ | |||||
SkipLeadingRows: o.SkipLeadingRows, | |||||
} | |||||
} | |||||
func bqToGoogleSheetsOptions(q *bq.GoogleSheetsOptions) *GoogleSheetsOptions { | |||||
return &GoogleSheetsOptions{ | |||||
SkipLeadingRows: q.SkipLeadingRows, | |||||
} | |||||
} | |||||
// BigtableOptions are additional options for Bigtable external data sources. | |||||
type BigtableOptions struct { | |||||
// A list of column families to expose in the table schema along with their | |||||
// types. If omitted, all column families are present in the table schema and | |||||
// their values are read as BYTES. | |||||
ColumnFamilies []*BigtableColumnFamily | |||||
// If true, then the column families that are not specified in columnFamilies | |||||
// list are not exposed in the table schema. Otherwise, they are read with BYTES | |||||
// type values. The default is false. | |||||
IgnoreUnspecifiedColumnFamilies bool | |||||
// If true, then the rowkey column families will be read and converted to string. | |||||
// Otherwise they are read with BYTES type values and users need to manually cast | |||||
// them with CAST if necessary. The default is false. | |||||
ReadRowkeyAsString bool | |||||
} | |||||
func (o *BigtableOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) { | |||||
q := &bq.BigtableOptions{ | |||||
IgnoreUnspecifiedColumnFamilies: o.IgnoreUnspecifiedColumnFamilies, | |||||
ReadRowkeyAsString: o.ReadRowkeyAsString, | |||||
} | |||||
for _, f := range o.ColumnFamilies { | |||||
q.ColumnFamilies = append(q.ColumnFamilies, f.toBQ()) | |||||
} | |||||
c.BigtableOptions = q | |||||
} | |||||
func bqToBigtableOptions(q *bq.BigtableOptions) (*BigtableOptions, error) { | |||||
b := &BigtableOptions{ | |||||
IgnoreUnspecifiedColumnFamilies: q.IgnoreUnspecifiedColumnFamilies, | |||||
ReadRowkeyAsString: q.ReadRowkeyAsString, | |||||
} | |||||
for _, f := range q.ColumnFamilies { | |||||
f2, err := bqToBigtableColumnFamily(f) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
b.ColumnFamilies = append(b.ColumnFamilies, f2) | |||||
} | |||||
return b, nil | |||||
} | |||||
// BigtableColumnFamily describes how BigQuery should access a Bigtable column family. | |||||
type BigtableColumnFamily struct { | |||||
// Identifier of the column family. | |||||
FamilyID string | |||||
// Lists of columns that should be exposed as individual fields as opposed to a | |||||
// list of (column name, value) pairs. All columns whose qualifier matches a | |||||
// qualifier in this list can be accessed as .. Other columns can be accessed as | |||||
// a list through .Column field. | |||||
Columns []*BigtableColumn | |||||
// The encoding of the values when the type is not STRING. Acceptable encoding values are: | |||||
// - TEXT - indicates values are alphanumeric text strings. | |||||
// - BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. | |||||
// This can be overridden for a specific column by listing that column in 'columns' and | |||||
// specifying an encoding for it. | |||||
Encoding string | |||||
// If true, only the latest version of values are exposed for all columns in this | |||||
// column family. This can be overridden for a specific column by listing that | |||||
// column in 'columns' and specifying a different setting for that column. | |||||
OnlyReadLatest bool | |||||
// The type to convert the value in cells of this | |||||
// column family. The values are expected to be encoded using HBase | |||||
// Bytes.toBytes function when using the BINARY encoding value. | |||||
// Following BigQuery types are allowed (case-sensitive): | |||||
// BYTES STRING INTEGER FLOAT BOOLEAN. | |||||
// The default type is BYTES. This can be overridden for a specific column by | |||||
// listing that column in 'columns' and specifying a type for it. | |||||
Type string | |||||
} | |||||
func (b *BigtableColumnFamily) toBQ() *bq.BigtableColumnFamily { | |||||
q := &bq.BigtableColumnFamily{ | |||||
FamilyId: b.FamilyID, | |||||
Encoding: b.Encoding, | |||||
OnlyReadLatest: b.OnlyReadLatest, | |||||
Type: b.Type, | |||||
} | |||||
for _, col := range b.Columns { | |||||
q.Columns = append(q.Columns, col.toBQ()) | |||||
} | |||||
return q | |||||
} | |||||
func bqToBigtableColumnFamily(q *bq.BigtableColumnFamily) (*BigtableColumnFamily, error) { | |||||
b := &BigtableColumnFamily{ | |||||
FamilyID: q.FamilyId, | |||||
Encoding: q.Encoding, | |||||
OnlyReadLatest: q.OnlyReadLatest, | |||||
Type: q.Type, | |||||
} | |||||
for _, col := range q.Columns { | |||||
c, err := bqToBigtableColumn(col) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
b.Columns = append(b.Columns, c) | |||||
} | |||||
return b, nil | |||||
} | |||||
// BigtableColumn describes how BigQuery should access a Bigtable column. | |||||
type BigtableColumn struct { | |||||
// Qualifier of the column. Columns in the parent column family that have this | |||||
// exact qualifier are exposed as . field. The column field name is the | |||||
// same as the column qualifier. | |||||
Qualifier string | |||||
// If the qualifier is not a valid BigQuery field identifier i.e. does not match | |||||
// [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field | |||||
// name and is used as field name in queries. | |||||
FieldName string | |||||
// If true, only the latest version of values are exposed for this column. | |||||
// See BigtableColumnFamily.OnlyReadLatest. | |||||
OnlyReadLatest bool | |||||
// The encoding of the values when the type is not STRING. | |||||
// See BigtableColumnFamily.Encoding | |||||
Encoding string | |||||
// The type to convert the value in cells of this column. | |||||
// See BigtableColumnFamily.Type | |||||
Type string | |||||
} | |||||
func (b *BigtableColumn) toBQ() *bq.BigtableColumn { | |||||
q := &bq.BigtableColumn{ | |||||
FieldName: b.FieldName, | |||||
OnlyReadLatest: b.OnlyReadLatest, | |||||
Encoding: b.Encoding, | |||||
Type: b.Type, | |||||
} | |||||
if utf8.ValidString(b.Qualifier) { | |||||
q.QualifierString = b.Qualifier | |||||
} else { | |||||
q.QualifierEncoded = base64.RawStdEncoding.EncodeToString([]byte(b.Qualifier)) | |||||
} | |||||
return q | |||||
} | |||||
func bqToBigtableColumn(q *bq.BigtableColumn) (*BigtableColumn, error) { | |||||
b := &BigtableColumn{ | |||||
FieldName: q.FieldName, | |||||
OnlyReadLatest: q.OnlyReadLatest, | |||||
Encoding: q.Encoding, | |||||
Type: q.Type, | |||||
} | |||||
if q.QualifierString != "" { | |||||
b.Qualifier = q.QualifierString | |||||
} else { | |||||
bytes, err := base64.RawStdEncoding.DecodeString(q.QualifierEncoded) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
b.Qualifier = string(bytes) | |||||
} | |||||
return b, nil | |||||
} |
@@ -0,0 +1,143 @@ | |||||
// Copyright 2017 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"testing" | |||||
"cloud.google.com/go/internal/pretty" | |||||
"cloud.google.com/go/internal/testutil" | |||||
) | |||||
func TestExternalDataConfig(t *testing.T) { | |||||
// Round-trip of ExternalDataConfig to underlying representation. | |||||
for i, want := range []*ExternalDataConfig{ | |||||
{ | |||||
SourceFormat: CSV, | |||||
SourceURIs: []string{"uri"}, | |||||
Schema: Schema{{Name: "n", Type: IntegerFieldType}}, | |||||
AutoDetect: true, | |||||
Compression: Gzip, | |||||
IgnoreUnknownValues: true, | |||||
MaxBadRecords: 17, | |||||
Options: &CSVOptions{ | |||||
AllowJaggedRows: true, | |||||
AllowQuotedNewlines: true, | |||||
Encoding: UTF_8, | |||||
FieldDelimiter: "f", | |||||
Quote: "q", | |||||
SkipLeadingRows: 3, | |||||
}, | |||||
}, | |||||
{ | |||||
SourceFormat: GoogleSheets, | |||||
Options: &GoogleSheetsOptions{SkipLeadingRows: 4}, | |||||
}, | |||||
{ | |||||
SourceFormat: Bigtable, | |||||
Options: &BigtableOptions{ | |||||
IgnoreUnspecifiedColumnFamilies: true, | |||||
ReadRowkeyAsString: true, | |||||
ColumnFamilies: []*BigtableColumnFamily{ | |||||
{ | |||||
FamilyID: "f1", | |||||
Encoding: "TEXT", | |||||
OnlyReadLatest: true, | |||||
Type: "FLOAT", | |||||
Columns: []*BigtableColumn{ | |||||
{ | |||||
Qualifier: "valid-utf-8", | |||||
FieldName: "fn", | |||||
OnlyReadLatest: true, | |||||
Encoding: "BINARY", | |||||
Type: "STRING", | |||||
}, | |||||
}, | |||||
}, | |||||
}, | |||||
}, | |||||
}, | |||||
} { | |||||
q := want.toBQ() | |||||
got, err := bqToExternalDataConfig(&q) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
if diff := testutil.Diff(got, want); diff != "" { | |||||
t.Errorf("#%d: got=-, want=+:\n%s", i, diff) | |||||
} | |||||
} | |||||
} | |||||
func TestQuote(t *testing.T) { | |||||
ptr := func(s string) *string { return &s } | |||||
for _, test := range []struct { | |||||
quote string | |||||
force bool | |||||
want *string | |||||
}{ | |||||
{"", false, nil}, | |||||
{"", true, ptr("")}, | |||||
{"-", false, ptr("-")}, | |||||
{"-", true, ptr("")}, | |||||
} { | |||||
o := CSVOptions{ | |||||
Quote: test.quote, | |||||
ForceZeroQuote: test.force, | |||||
} | |||||
got := o.quote() | |||||
if (got == nil) != (test.want == nil) { | |||||
t.Errorf("%+v\ngot %v\nwant %v", test, pretty.Value(got), pretty.Value(test.want)) | |||||
} | |||||
if got != nil && test.want != nil && *got != *test.want { | |||||
t.Errorf("%+v: got %q, want %q", test, *got, *test.want) | |||||
} | |||||
} | |||||
} | |||||
func TestQualifier(t *testing.T) { | |||||
b := BigtableColumn{Qualifier: "a"} | |||||
q := b.toBQ() | |||||
if q.QualifierString != b.Qualifier || q.QualifierEncoded != "" { | |||||
t.Errorf("got (%q, %q), want (%q, %q)", | |||||
q.QualifierString, q.QualifierEncoded, b.Qualifier, "") | |||||
} | |||||
b2, err := bqToBigtableColumn(q) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
if got, want := b2.Qualifier, b.Qualifier; got != want { | |||||
t.Errorf("got %q, want %q", got, want) | |||||
} | |||||
const ( | |||||
invalidUTF8 = "\xDF\xFF" | |||||
invalidEncoded = "3/8" | |||||
) | |||||
b = BigtableColumn{Qualifier: invalidUTF8} | |||||
q = b.toBQ() | |||||
if q.QualifierString != "" || q.QualifierEncoded != invalidEncoded { | |||||
t.Errorf("got (%q, %q), want (%q, %q)", | |||||
q.QualifierString, "", b.Qualifier, invalidEncoded) | |||||
} | |||||
b2, err = bqToBigtableColumn(q) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
if got, want := b2.Qualifier, b.Qualifier; got != want { | |||||
t.Errorf("got %q, want %q", got, want) | |||||
} | |||||
} |
@@ -0,0 +1,109 @@ | |||||
// Copyright 2016 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"cloud.google.com/go/internal/trace" | |||||
"golang.org/x/net/context" | |||||
bq "google.golang.org/api/bigquery/v2" | |||||
) | |||||
// ExtractConfig holds the configuration for an extract job. | |||||
type ExtractConfig struct { | |||||
// Src is the table from which data will be extracted. | |||||
Src *Table | |||||
// Dst is the destination into which the data will be extracted. | |||||
Dst *GCSReference | |||||
// DisableHeader disables the printing of a header row in exported data. | |||||
DisableHeader bool | |||||
// The labels associated with this job. | |||||
Labels map[string]string | |||||
} | |||||
func (e *ExtractConfig) toBQ() *bq.JobConfiguration { | |||||
var printHeader *bool | |||||
if e.DisableHeader { | |||||
f := false | |||||
printHeader = &f | |||||
} | |||||
return &bq.JobConfiguration{ | |||||
Labels: e.Labels, | |||||
Extract: &bq.JobConfigurationExtract{ | |||||
DestinationUris: append([]string{}, e.Dst.URIs...), | |||||
Compression: string(e.Dst.Compression), | |||||
DestinationFormat: string(e.Dst.DestinationFormat), | |||||
FieldDelimiter: e.Dst.FieldDelimiter, | |||||
SourceTable: e.Src.toBQ(), | |||||
PrintHeader: printHeader, | |||||
}, | |||||
} | |||||
} | |||||
func bqToExtractConfig(q *bq.JobConfiguration, c *Client) *ExtractConfig { | |||||
qe := q.Extract | |||||
return &ExtractConfig{ | |||||
Labels: q.Labels, | |||||
Dst: &GCSReference{ | |||||
URIs: qe.DestinationUris, | |||||
Compression: Compression(qe.Compression), | |||||
DestinationFormat: DataFormat(qe.DestinationFormat), | |||||
FileConfig: FileConfig{ | |||||
CSVOptions: CSVOptions{ | |||||
FieldDelimiter: qe.FieldDelimiter, | |||||
}, | |||||
}, | |||||
}, | |||||
DisableHeader: qe.PrintHeader != nil && !*qe.PrintHeader, | |||||
Src: bqToTable(qe.SourceTable, c), | |||||
} | |||||
} | |||||
// An Extractor extracts data from a BigQuery table into Google Cloud Storage. | |||||
type Extractor struct { | |||||
JobIDConfig | |||||
ExtractConfig | |||||
c *Client | |||||
} | |||||
// ExtractorTo returns an Extractor which can be used to extract data from a | |||||
// BigQuery table into Google Cloud Storage. | |||||
// The returned Extractor may optionally be further configured before its Run method is called. | |||||
func (t *Table) ExtractorTo(dst *GCSReference) *Extractor { | |||||
return &Extractor{ | |||||
c: t.c, | |||||
ExtractConfig: ExtractConfig{ | |||||
Src: t, | |||||
Dst: dst, | |||||
}, | |||||
} | |||||
} | |||||
// Run initiates an extract job. | |||||
func (e *Extractor) Run(ctx context.Context) (j *Job, err error) { | |||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Extractor.Run") | |||||
defer func() { trace.EndSpan(ctx, err) }() | |||||
return e.c.insertJob(ctx, e.newJob(), nil) | |||||
} | |||||
func (e *Extractor) newJob() *bq.Job { | |||||
return &bq.Job{ | |||||
JobReference: e.JobIDConfig.createJobRef(e.c), | |||||
Configuration: e.ExtractConfig.toBQ(), | |||||
} | |||||
} |
@@ -0,0 +1,118 @@ | |||||
// Copyright 2015 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"testing" | |||||
"github.com/google/go-cmp/cmp" | |||||
"cloud.google.com/go/internal/testutil" | |||||
bq "google.golang.org/api/bigquery/v2" | |||||
) | |||||
func defaultExtractJob() *bq.Job { | |||||
return &bq.Job{ | |||||
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"}, | |||||
Configuration: &bq.JobConfiguration{ | |||||
Extract: &bq.JobConfigurationExtract{ | |||||
SourceTable: &bq.TableReference{ | |||||
ProjectId: "client-project-id", | |||||
DatasetId: "dataset-id", | |||||
TableId: "table-id", | |||||
}, | |||||
DestinationUris: []string{"uri"}, | |||||
}, | |||||
}, | |||||
} | |||||
} | |||||
func defaultGCS() *GCSReference { | |||||
return &GCSReference{ | |||||
URIs: []string{"uri"}, | |||||
} | |||||
} | |||||
func TestExtract(t *testing.T) { | |||||
defer fixRandomID("RANDOM")() | |||||
c := &Client{ | |||||
projectID: "client-project-id", | |||||
} | |||||
testCases := []struct { | |||||
dst *GCSReference | |||||
src *Table | |||||
config ExtractConfig | |||||
want *bq.Job | |||||
}{ | |||||
{ | |||||
dst: defaultGCS(), | |||||
src: c.Dataset("dataset-id").Table("table-id"), | |||||
want: defaultExtractJob(), | |||||
}, | |||||
{ | |||||
dst: defaultGCS(), | |||||
src: c.Dataset("dataset-id").Table("table-id"), | |||||
config: ExtractConfig{ | |||||
DisableHeader: true, | |||||
Labels: map[string]string{"a": "b"}, | |||||
}, | |||||
want: func() *bq.Job { | |||||
j := defaultExtractJob() | |||||
j.Configuration.Labels = map[string]string{"a": "b"} | |||||
f := false | |||||
j.Configuration.Extract.PrintHeader = &f | |||||
return j | |||||
}(), | |||||
}, | |||||
{ | |||||
dst: func() *GCSReference { | |||||
g := NewGCSReference("uri") | |||||
g.Compression = Gzip | |||||
g.DestinationFormat = JSON | |||||
g.FieldDelimiter = "\t" | |||||
return g | |||||
}(), | |||||
src: c.Dataset("dataset-id").Table("table-id"), | |||||
want: func() *bq.Job { | |||||
j := defaultExtractJob() | |||||
j.Configuration.Extract.Compression = "GZIP" | |||||
j.Configuration.Extract.DestinationFormat = "NEWLINE_DELIMITED_JSON" | |||||
j.Configuration.Extract.FieldDelimiter = "\t" | |||||
return j | |||||
}(), | |||||
}, | |||||
} | |||||
for i, tc := range testCases { | |||||
ext := tc.src.ExtractorTo(tc.dst) | |||||
tc.config.Src = ext.Src | |||||
tc.config.Dst = ext.Dst | |||||
ext.ExtractConfig = tc.config | |||||
got := ext.newJob() | |||||
checkJob(t, i, got, tc.want) | |||||
jc, err := bqToJobConfig(got.Configuration, c) | |||||
if err != nil { | |||||
t.Fatalf("#%d: %v", i, err) | |||||
} | |||||
diff := testutil.Diff(jc, &ext.ExtractConfig, | |||||
cmp.AllowUnexported(Table{}, Client{})) | |||||
if diff != "" { | |||||
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff) | |||||
} | |||||
} | |||||
} |
@@ -0,0 +1,135 @@ | |||||
// Copyright 2016 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"io" | |||||
bq "google.golang.org/api/bigquery/v2" | |||||
) | |||||
// A ReaderSource is a source for a load operation that gets | |||||
// data from an io.Reader. | |||||
// | |||||
// When a ReaderSource is part of a LoadConfig obtained via Job.Config, | |||||
// its internal io.Reader will be nil, so it cannot be used for a | |||||
// subsequent load operation. | |||||
type ReaderSource struct { | |||||
r io.Reader | |||||
FileConfig | |||||
} | |||||
// NewReaderSource creates a ReaderSource from an io.Reader. You may | |||||
// optionally configure properties on the ReaderSource that describe the | |||||
// data being read, before passing it to Table.LoaderFrom. | |||||
func NewReaderSource(r io.Reader) *ReaderSource { | |||||
return &ReaderSource{r: r} | |||||
} | |||||
func (r *ReaderSource) populateLoadConfig(lc *bq.JobConfigurationLoad) io.Reader { | |||||
r.FileConfig.populateLoadConfig(lc) | |||||
return r.r | |||||
} | |||||
// FileConfig contains configuration options that pertain to files, typically | |||||
// text files that require interpretation to be used as a BigQuery table. A | |||||
// file may live in Google Cloud Storage (see GCSReference), or it may be | |||||
// loaded into a table via the Table.LoaderFromReader. | |||||
type FileConfig struct { | |||||
// SourceFormat is the format of the data to be read. | |||||
// Allowed values are: CSV, Avro, Parquet, JSON, DatastoreBackup. The default is CSV. | |||||
SourceFormat DataFormat | |||||
// Indicates if we should automatically infer the options and | |||||
// schema for CSV and JSON sources. | |||||
AutoDetect bool | |||||
// MaxBadRecords is the maximum number of bad records that will be ignored | |||||
// when reading data. | |||||
MaxBadRecords int64 | |||||
// IgnoreUnknownValues causes values not matching the schema to be | |||||
// tolerated. Unknown values are ignored. For CSV this ignores extra values | |||||
// at the end of a line. For JSON this ignores named values that do not | |||||
// match any column name. If this field is not set, records containing | |||||
// unknown values are treated as bad records. The MaxBadRecords field can | |||||
// be used to customize how bad records are handled. | |||||
IgnoreUnknownValues bool | |||||
// Schema describes the data. It is required when reading CSV or JSON data, | |||||
// unless the data is being loaded into a table that already exists. | |||||
Schema Schema | |||||
// Additional options for CSV files. | |||||
CSVOptions | |||||
} | |||||
func (fc *FileConfig) populateLoadConfig(conf *bq.JobConfigurationLoad) { | |||||
conf.SkipLeadingRows = fc.SkipLeadingRows | |||||
conf.SourceFormat = string(fc.SourceFormat) | |||||
conf.Autodetect = fc.AutoDetect | |||||
conf.AllowJaggedRows = fc.AllowJaggedRows | |||||
conf.AllowQuotedNewlines = fc.AllowQuotedNewlines | |||||
conf.Encoding = string(fc.Encoding) | |||||
conf.FieldDelimiter = fc.FieldDelimiter | |||||
conf.IgnoreUnknownValues = fc.IgnoreUnknownValues | |||||
conf.MaxBadRecords = fc.MaxBadRecords | |||||
if fc.Schema != nil { | |||||
conf.Schema = fc.Schema.toBQ() | |||||
} | |||||
conf.Quote = fc.quote() | |||||
} | |||||
func bqPopulateFileConfig(conf *bq.JobConfigurationLoad, fc *FileConfig) { | |||||
fc.SourceFormat = DataFormat(conf.SourceFormat) | |||||
fc.AutoDetect = conf.Autodetect | |||||
fc.MaxBadRecords = conf.MaxBadRecords | |||||
fc.IgnoreUnknownValues = conf.IgnoreUnknownValues | |||||
fc.Schema = bqToSchema(conf.Schema) | |||||
fc.SkipLeadingRows = conf.SkipLeadingRows | |||||
fc.AllowJaggedRows = conf.AllowJaggedRows | |||||
fc.AllowQuotedNewlines = conf.AllowQuotedNewlines | |||||
fc.Encoding = Encoding(conf.Encoding) | |||||
fc.FieldDelimiter = conf.FieldDelimiter | |||||
fc.CSVOptions.setQuote(conf.Quote) | |||||
} | |||||
func (fc *FileConfig) populateExternalDataConfig(conf *bq.ExternalDataConfiguration) { | |||||
format := fc.SourceFormat | |||||
if format == "" { | |||||
// Format must be explicitly set for external data sources. | |||||
format = CSV | |||||
} | |||||
conf.Autodetect = fc.AutoDetect | |||||
conf.IgnoreUnknownValues = fc.IgnoreUnknownValues | |||||
conf.MaxBadRecords = fc.MaxBadRecords | |||||
conf.SourceFormat = string(format) | |||||
if fc.Schema != nil { | |||||
conf.Schema = fc.Schema.toBQ() | |||||
} | |||||
if format == CSV { | |||||
fc.CSVOptions.populateExternalDataConfig(conf) | |||||
} | |||||
} | |||||
// Encoding specifies the character encoding of data to be loaded into BigQuery. | |||||
// See https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding | |||||
// for more details about how this is used. | |||||
type Encoding string | |||||
const ( | |||||
UTF_8 Encoding = "UTF-8" | |||||
ISO_8859_1 Encoding = "ISO-8859-1" | |||||
) |
@@ -0,0 +1,98 @@ | |||||
// Copyright 2016 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"testing" | |||||
"cloud.google.com/go/internal/pretty" | |||||
"cloud.google.com/go/internal/testutil" | |||||
bq "google.golang.org/api/bigquery/v2" | |||||
) | |||||
var ( | |||||
hyphen = "-" | |||||
fc = FileConfig{ | |||||
SourceFormat: CSV, | |||||
AutoDetect: true, | |||||
MaxBadRecords: 7, | |||||
IgnoreUnknownValues: true, | |||||
Schema: Schema{ | |||||
stringFieldSchema(), | |||||
nestedFieldSchema(), | |||||
}, | |||||
CSVOptions: CSVOptions{ | |||||
Quote: hyphen, | |||||
FieldDelimiter: "\t", | |||||
SkipLeadingRows: 8, | |||||
AllowJaggedRows: true, | |||||
AllowQuotedNewlines: true, | |||||
Encoding: UTF_8, | |||||
}, | |||||
} | |||||
) | |||||
func TestFileConfigPopulateLoadConfig(t *testing.T) { | |||||
want := &bq.JobConfigurationLoad{ | |||||
SourceFormat: "CSV", | |||||
FieldDelimiter: "\t", | |||||
SkipLeadingRows: 8, | |||||
AllowJaggedRows: true, | |||||
AllowQuotedNewlines: true, | |||||
Autodetect: true, | |||||
Encoding: "UTF-8", | |||||
MaxBadRecords: 7, | |||||
IgnoreUnknownValues: true, | |||||
Schema: &bq.TableSchema{ | |||||
Fields: []*bq.TableFieldSchema{ | |||||
bqStringFieldSchema(), | |||||
bqNestedFieldSchema(), | |||||
}}, | |||||
Quote: &hyphen, | |||||
} | |||||
got := &bq.JobConfigurationLoad{} | |||||
fc.populateLoadConfig(got) | |||||
if !testutil.Equal(got, want) { | |||||
t.Errorf("got:\n%v\nwant:\n%v", pretty.Value(got), pretty.Value(want)) | |||||
} | |||||
} | |||||
func TestFileConfigPopulateExternalDataConfig(t *testing.T) { | |||||
got := &bq.ExternalDataConfiguration{} | |||||
fc.populateExternalDataConfig(got) | |||||
want := &bq.ExternalDataConfiguration{ | |||||
SourceFormat: "CSV", | |||||
Autodetect: true, | |||||
MaxBadRecords: 7, | |||||
IgnoreUnknownValues: true, | |||||
Schema: &bq.TableSchema{ | |||||
Fields: []*bq.TableFieldSchema{ | |||||
bqStringFieldSchema(), | |||||
bqNestedFieldSchema(), | |||||
}}, | |||||
CsvOptions: &bq.CsvOptions{ | |||||
AllowJaggedRows: true, | |||||
AllowQuotedNewlines: true, | |||||
Encoding: "UTF-8", | |||||
FieldDelimiter: "\t", | |||||
Quote: &hyphen, | |||||
SkipLeadingRows: 8, | |||||
}, | |||||
} | |||||
if diff := testutil.Diff(got, want); diff != "" { | |||||
t.Errorf("got=-, want=+:\n%s", diff) | |||||
} | |||||
} |
@@ -0,0 +1,73 @@ | |||||
// Copyright 2015 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"io" | |||||
bq "google.golang.org/api/bigquery/v2" | |||||
) | |||||
// GCSReference is a reference to one or more Google Cloud Storage objects, which together constitute | |||||
// an input or output to a BigQuery operation. | |||||
type GCSReference struct { | |||||
// URIs refer to Google Cloud Storage objects. | |||||
URIs []string | |||||
FileConfig | |||||
// DestinationFormat is the format to use when writing exported files. | |||||
// Allowed values are: CSV, Avro, JSON. The default is CSV. | |||||
// CSV is not supported for tables with nested or repeated fields. | |||||
DestinationFormat DataFormat | |||||
// Compression specifies the type of compression to apply when writing data | |||||
// to Google Cloud Storage, or using this GCSReference as an ExternalData | |||||
// source with CSV or JSON SourceFormat. Default is None. | |||||
Compression Compression | |||||
} | |||||
// NewGCSReference constructs a reference to one or more Google Cloud Storage objects, which together constitute a data source or destination. | |||||
// In the simple case, a single URI in the form gs://bucket/object may refer to a single GCS object. | |||||
// Data may also be split into mutiple files, if multiple URIs or URIs containing wildcards are provided. | |||||
// Each URI may contain one '*' wildcard character, which (if present) must come after the bucket name. | |||||
// For more information about the treatment of wildcards and multiple URIs, | |||||
// see https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple | |||||
func NewGCSReference(uri ...string) *GCSReference { | |||||
return &GCSReference{URIs: uri} | |||||
} | |||||
// Compression is the type of compression to apply when writing data to Google Cloud Storage. | |||||
type Compression string | |||||
const ( | |||||
None Compression = "NONE" | |||||
Gzip Compression = "GZIP" | |||||
) | |||||
func (gcs *GCSReference) populateLoadConfig(lc *bq.JobConfigurationLoad) io.Reader { | |||||
lc.SourceUris = gcs.URIs | |||||
gcs.FileConfig.populateLoadConfig(lc) | |||||
return nil | |||||
} | |||||
func (gcs *GCSReference) toBQ() bq.ExternalDataConfiguration { | |||||
conf := bq.ExternalDataConfiguration{ | |||||
Compression: string(gcs.Compression), | |||||
SourceUris: append([]string{}, gcs.URIs...), | |||||
} | |||||
gcs.FileConfig.populateExternalDataConfig(&conf) | |||||
return conf | |||||
} |
@@ -0,0 +1,215 @@ | |||||
// Copyright 2015 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"fmt" | |||||
"reflect" | |||||
"golang.org/x/net/context" | |||||
bq "google.golang.org/api/bigquery/v2" | |||||
"google.golang.org/api/iterator" | |||||
) | |||||
func newRowIterator(ctx context.Context, t *Table, pf pageFetcher) *RowIterator { | |||||
it := &RowIterator{ | |||||
ctx: ctx, | |||||
table: t, | |||||
pf: pf, | |||||
} | |||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo( | |||||
it.fetch, | |||||
func() int { return len(it.rows) }, | |||||
func() interface{} { r := it.rows; it.rows = nil; return r }) | |||||
return it | |||||
} | |||||
// A RowIterator provides access to the result of a BigQuery lookup. | |||||
type RowIterator struct { | |||||
ctx context.Context | |||||
table *Table | |||||
pf pageFetcher | |||||
pageInfo *iterator.PageInfo | |||||
nextFunc func() error | |||||
// StartIndex can be set before the first call to Next. If PageInfo().Token | |||||
// is also set, StartIndex is ignored. | |||||
StartIndex uint64 | |||||
// The schema of the table. Available after the first call to Next. | |||||
Schema Schema | |||||
// The total number of rows in the result. Available after the first call to Next. | |||||
// May be zero just after rows were inserted. | |||||
TotalRows uint64 | |||||
rows [][]Value | |||||
structLoader structLoader // used to populate a pointer to a struct | |||||
} | |||||
// Next loads the next row into dst. Its return value is iterator.Done if there | |||||
// are no more results. Once Next returns iterator.Done, all subsequent calls | |||||
// will return iterator.Done. | |||||
// | |||||
// dst may implement ValueLoader, or may be a *[]Value, *map[string]Value, or struct pointer. | |||||
// | |||||
// If dst is a *[]Value, it will be set to to new []Value whose i'th element | |||||
// will be populated with the i'th column of the row. | |||||
// | |||||
// If dst is a *map[string]Value, a new map will be created if dst is nil. Then | |||||
// for each schema column name, the map key of that name will be set to the column's | |||||
// value. STRUCT types (RECORD types or nested schemas) become nested maps. | |||||
// | |||||
// If dst is pointer to a struct, each column in the schema will be matched | |||||
// with an exported field of the struct that has the same name, ignoring case. | |||||
// Unmatched schema columns and struct fields will be ignored. | |||||
// | |||||
// Each BigQuery column type corresponds to one or more Go types; a matching struct | |||||
// field must be of the correct type. The correspondences are: | |||||
// | |||||
// STRING string | |||||
// BOOL bool | |||||
// INTEGER int, int8, int16, int32, int64, uint8, uint16, uint32 | |||||
// FLOAT float32, float64 | |||||
// BYTES []byte | |||||
// TIMESTAMP time.Time | |||||
// DATE civil.Date | |||||
// TIME civil.Time | |||||
// DATETIME civil.DateTime | |||||
// | |||||
// A repeated field corresponds to a slice or array of the element type. A STRUCT | |||||
// type (RECORD or nested schema) corresponds to a nested struct or struct pointer. | |||||
// All calls to Next on the same iterator must use the same struct type. | |||||
// | |||||
// It is an error to attempt to read a BigQuery NULL value into a struct field, | |||||
// unless the field is of type []byte or is one of the special Null types: NullInt64, | |||||
// NullFloat64, NullBool, NullString, NullTimestamp, NullDate, NullTime or | |||||
// NullDateTime. You can also use a *[]Value or *map[string]Value to read from a | |||||
// table with NULLs. | |||||
func (it *RowIterator) Next(dst interface{}) error { | |||||
var vl ValueLoader | |||||
switch dst := dst.(type) { | |||||
case ValueLoader: | |||||
vl = dst | |||||
case *[]Value: | |||||
vl = (*valueList)(dst) | |||||
case *map[string]Value: | |||||
vl = (*valueMap)(dst) | |||||
default: | |||||
if !isStructPtr(dst) { | |||||
return fmt.Errorf("bigquery: cannot convert %T to ValueLoader (need pointer to []Value, map[string]Value, or struct)", dst) | |||||
} | |||||
} | |||||
if err := it.nextFunc(); err != nil { | |||||
return err | |||||
} | |||||
row := it.rows[0] | |||||
it.rows = it.rows[1:] | |||||
if vl == nil { | |||||
// This can only happen if dst is a pointer to a struct. We couldn't | |||||
// set vl above because we need the schema. | |||||
if err := it.structLoader.set(dst, it.Schema); err != nil { | |||||
return err | |||||
} | |||||
vl = &it.structLoader | |||||
} | |||||
return vl.Load(row, it.Schema) | |||||
} | |||||
func isStructPtr(x interface{}) bool { | |||||
t := reflect.TypeOf(x) | |||||
return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct | |||||
} | |||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. | |||||
func (it *RowIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } | |||||
func (it *RowIterator) fetch(pageSize int, pageToken string) (string, error) { | |||||
res, err := it.pf(it.ctx, it.table, it.Schema, it.StartIndex, int64(pageSize), pageToken) | |||||
if err != nil { | |||||
return "", err | |||||
} | |||||
it.rows = append(it.rows, res.rows...) | |||||
it.Schema = res.schema | |||||
it.TotalRows = res.totalRows | |||||
return res.pageToken, nil | |||||
} | |||||
// A pageFetcher returns a page of rows from a destination table. | |||||
type pageFetcher func(ctx context.Context, _ *Table, _ Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error) | |||||
type fetchPageResult struct { | |||||
pageToken string | |||||
rows [][]Value | |||||
totalRows uint64 | |||||
schema Schema | |||||
} | |||||
// fetchPage gets a page of rows from t. | |||||
func fetchPage(ctx context.Context, t *Table, schema Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error) { | |||||
// Fetch the table schema in the background, if necessary. | |||||
errc := make(chan error, 1) | |||||
if schema != nil { | |||||
errc <- nil | |||||
} else { | |||||
go func() { | |||||
var bqt *bq.Table | |||||
err := runWithRetry(ctx, func() (err error) { | |||||
bqt, err = t.c.bqs.Tables.Get(t.ProjectID, t.DatasetID, t.TableID). | |||||
Fields("schema"). | |||||
Context(ctx). | |||||
Do() | |||||
return err | |||||
}) | |||||
if err == nil && bqt.Schema != nil { | |||||
schema = bqToSchema(bqt.Schema) | |||||
} | |||||
errc <- err | |||||
}() | |||||
} | |||||
call := t.c.bqs.Tabledata.List(t.ProjectID, t.DatasetID, t.TableID) | |||||
setClientHeader(call.Header()) | |||||
if pageToken != "" { | |||||
call.PageToken(pageToken) | |||||
} else { | |||||
call.StartIndex(startIndex) | |||||
} | |||||
if pageSize > 0 { | |||||
call.MaxResults(pageSize) | |||||
} | |||||
var res *bq.TableDataList | |||||
err := runWithRetry(ctx, func() (err error) { | |||||
res, err = call.Context(ctx).Do() | |||||
return err | |||||
}) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
err = <-errc | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
rows, err := convertRows(res.Rows, schema) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return &fetchPageResult{ | |||||
pageToken: res.PageToken, | |||||
rows: rows, | |||||
totalRows: uint64(res.TotalRows), | |||||
schema: schema, | |||||
}, nil | |||||
} |
@@ -0,0 +1,363 @@ | |||||
// Copyright 2015 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"errors" | |||||
"fmt" | |||||
"testing" | |||||
"cloud.google.com/go/internal/testutil" | |||||
"golang.org/x/net/context" | |||||
"google.golang.org/api/iterator" | |||||
) | |||||
type fetchResponse struct { | |||||
result *fetchPageResult // The result to return. | |||||
err error // The error to return. | |||||
} | |||||
// pageFetcherStub services fetch requests by returning data from an in-memory list of values. | |||||
type pageFetcherStub struct { | |||||
fetchResponses map[string]fetchResponse | |||||
err error | |||||
} | |||||
func (pf *pageFetcherStub) fetchPage(ctx context.Context, _ *Table, _ Schema, _ uint64, _ int64, pageToken string) (*fetchPageResult, error) { | |||||
call, ok := pf.fetchResponses[pageToken] | |||||
if !ok { | |||||
pf.err = fmt.Errorf("Unexpected page token: %q", pageToken) | |||||
} | |||||
return call.result, call.err | |||||
} | |||||
func TestIterator(t *testing.T) { | |||||
var ( | |||||
iiSchema = Schema{ | |||||
{Type: IntegerFieldType}, | |||||
{Type: IntegerFieldType}, | |||||
} | |||||
siSchema = Schema{ | |||||
{Type: StringFieldType}, | |||||
{Type: IntegerFieldType}, | |||||
} | |||||
) | |||||
fetchFailure := errors.New("fetch failure") | |||||
testCases := []struct { | |||||
desc string | |||||
pageToken string | |||||
fetchResponses map[string]fetchResponse | |||||
want [][]Value | |||||
wantErr error | |||||
wantSchema Schema | |||||
wantTotalRows uint64 | |||||
}{ | |||||
{ | |||||
desc: "Iteration over single empty page", | |||||
fetchResponses: map[string]fetchResponse{ | |||||
"": { | |||||
result: &fetchPageResult{ | |||||
pageToken: "", | |||||
rows: [][]Value{}, | |||||
schema: Schema{}, | |||||
}, | |||||
}, | |||||
}, | |||||
want: [][]Value{}, | |||||
wantSchema: Schema{}, | |||||
}, | |||||
{ | |||||
desc: "Iteration over single page", | |||||
fetchResponses: map[string]fetchResponse{ | |||||
"": { | |||||
result: &fetchPageResult{ | |||||
pageToken: "", | |||||
rows: [][]Value{{1, 2}, {11, 12}}, | |||||
schema: iiSchema, | |||||
totalRows: 4, | |||||
}, | |||||
}, | |||||
}, | |||||
want: [][]Value{{1, 2}, {11, 12}}, | |||||
wantSchema: iiSchema, | |||||
wantTotalRows: 4, | |||||
}, | |||||
{ | |||||
desc: "Iteration over single page with different schema", | |||||
fetchResponses: map[string]fetchResponse{ | |||||
"": { | |||||
result: &fetchPageResult{ | |||||
pageToken: "", | |||||
rows: [][]Value{{"1", 2}, {"11", 12}}, | |||||
schema: siSchema, | |||||
}, | |||||
}, | |||||
}, | |||||
want: [][]Value{{"1", 2}, {"11", 12}}, | |||||
wantSchema: siSchema, | |||||
}, | |||||
{ | |||||
desc: "Iteration over two pages", | |||||
fetchResponses: map[string]fetchResponse{ | |||||
"": { | |||||
result: &fetchPageResult{ | |||||
pageToken: "a", | |||||
rows: [][]Value{{1, 2}, {11, 12}}, | |||||
schema: iiSchema, | |||||
totalRows: 4, | |||||
}, | |||||
}, | |||||
"a": { | |||||
result: &fetchPageResult{ | |||||
pageToken: "", | |||||
rows: [][]Value{{101, 102}, {111, 112}}, | |||||
schema: iiSchema, | |||||
totalRows: 4, | |||||
}, | |||||
}, | |||||
}, | |||||
want: [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}}, | |||||
wantSchema: iiSchema, | |||||
wantTotalRows: 4, | |||||
}, | |||||
{ | |||||
desc: "Server response includes empty page", | |||||
fetchResponses: map[string]fetchResponse{ | |||||
"": { | |||||
result: &fetchPageResult{ | |||||
pageToken: "a", | |||||
rows: [][]Value{{1, 2}, {11, 12}}, | |||||
schema: iiSchema, | |||||
}, | |||||
}, | |||||
"a": { | |||||
result: &fetchPageResult{ | |||||
pageToken: "b", | |||||
rows: [][]Value{}, | |||||
schema: iiSchema, | |||||
}, | |||||
}, | |||||
"b": { | |||||
result: &fetchPageResult{ | |||||
pageToken: "", | |||||
rows: [][]Value{{101, 102}, {111, 112}}, | |||||
schema: iiSchema, | |||||
}, | |||||
}, | |||||
}, | |||||
want: [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}}, | |||||
wantSchema: iiSchema, | |||||
}, | |||||
{ | |||||
desc: "Fetch error", | |||||
fetchResponses: map[string]fetchResponse{ | |||||
"": { | |||||
result: &fetchPageResult{ | |||||
pageToken: "a", | |||||
rows: [][]Value{{1, 2}, {11, 12}}, | |||||
schema: iiSchema, | |||||
}, | |||||
}, | |||||
"a": { | |||||
// We returns some data from this fetch, but also an error. | |||||
// So the end result should include only data from the previous fetch. | |||||
err: fetchFailure, | |||||
result: &fetchPageResult{ | |||||
pageToken: "b", | |||||
rows: [][]Value{{101, 102}, {111, 112}}, | |||||
schema: iiSchema, | |||||
}, | |||||
}, | |||||
}, | |||||
want: [][]Value{{1, 2}, {11, 12}}, | |||||
wantErr: fetchFailure, | |||||
wantSchema: iiSchema, | |||||
}, | |||||
{ | |||||
desc: "Skip over an entire page", | |||||
pageToken: "a", | |||||
fetchResponses: map[string]fetchResponse{ | |||||
"": { | |||||
result: &fetchPageResult{ | |||||
pageToken: "a", | |||||
rows: [][]Value{{1, 2}, {11, 12}}, | |||||
schema: iiSchema, | |||||
}, | |||||
}, | |||||
"a": { | |||||
result: &fetchPageResult{ | |||||
pageToken: "", | |||||
rows: [][]Value{{101, 102}, {111, 112}}, | |||||
schema: iiSchema, | |||||
}, | |||||
}, | |||||
}, | |||||
want: [][]Value{{101, 102}, {111, 112}}, | |||||
wantSchema: iiSchema, | |||||
}, | |||||
{ | |||||
desc: "Skip beyond all data", | |||||
pageToken: "b", | |||||
fetchResponses: map[string]fetchResponse{ | |||||
"": { | |||||
result: &fetchPageResult{ | |||||
pageToken: "a", | |||||
rows: [][]Value{{1, 2}, {11, 12}}, | |||||
schema: iiSchema, | |||||
}, | |||||
}, | |||||
"a": { | |||||
result: &fetchPageResult{ | |||||
pageToken: "b", | |||||
rows: [][]Value{{101, 102}, {111, 112}}, | |||||
schema: iiSchema, | |||||
}, | |||||
}, | |||||
"b": { | |||||
result: &fetchPageResult{}, | |||||
}, | |||||
}, | |||||
// In this test case, Next will return false on its first call, | |||||
// so we won't even attempt to call Get. | |||||
want: [][]Value{}, | |||||
wantSchema: Schema{}, | |||||
}, | |||||
} | |||||
for _, tc := range testCases { | |||||
pf := &pageFetcherStub{ | |||||
fetchResponses: tc.fetchResponses, | |||||
} | |||||
it := newRowIterator(context.Background(), nil, pf.fetchPage) | |||||
it.PageInfo().Token = tc.pageToken | |||||
values, schema, totalRows, err := consumeRowIterator(it) | |||||
if err != tc.wantErr { | |||||
t.Fatalf("%s: got %v, want %v", tc.desc, err, tc.wantErr) | |||||
} | |||||
if (len(values) != 0 || len(tc.want) != 0) && !testutil.Equal(values, tc.want) { | |||||
t.Errorf("%s: values:\ngot: %v\nwant:%v", tc.desc, values, tc.want) | |||||
} | |||||
if (len(schema) != 0 || len(tc.wantSchema) != 0) && !testutil.Equal(schema, tc.wantSchema) { | |||||
t.Errorf("%s: iterator.Schema:\ngot: %v\nwant: %v", tc.desc, schema, tc.wantSchema) | |||||
} | |||||
if totalRows != tc.wantTotalRows { | |||||
t.Errorf("%s: totalRows: got %d, want %d", tc.desc, totalRows, tc.wantTotalRows) | |||||
} | |||||
} | |||||
} | |||||
// consumeRowIterator reads the schema and all values from a RowIterator and returns them. | |||||
func consumeRowIterator(it *RowIterator) ([][]Value, Schema, uint64, error) { | |||||
var ( | |||||
got [][]Value | |||||
schema Schema | |||||
totalRows uint64 | |||||
) | |||||
for { | |||||
var vls []Value | |||||
err := it.Next(&vls) | |||||
if err == iterator.Done { | |||||
return got, schema, totalRows, nil | |||||
} | |||||
if err != nil { | |||||
return got, schema, totalRows, err | |||||
} | |||||
got = append(got, vls) | |||||
schema = it.Schema | |||||
totalRows = it.TotalRows | |||||
} | |||||
} | |||||
func TestNextDuringErrorState(t *testing.T) { | |||||
pf := &pageFetcherStub{ | |||||
fetchResponses: map[string]fetchResponse{ | |||||
"": {err: errors.New("bang")}, | |||||
}, | |||||
} | |||||
it := newRowIterator(context.Background(), nil, pf.fetchPage) | |||||
var vals []Value | |||||
if err := it.Next(&vals); err == nil { | |||||
t.Errorf("Expected error after calling Next") | |||||
} | |||||
if err := it.Next(&vals); err == nil { | |||||
t.Errorf("Expected error calling Next again when iterator has a non-nil error.") | |||||
} | |||||
} | |||||
func TestNextAfterFinished(t *testing.T) { | |||||
testCases := []struct { | |||||
fetchResponses map[string]fetchResponse | |||||
want [][]Value | |||||
}{ | |||||
{ | |||||
fetchResponses: map[string]fetchResponse{ | |||||
"": { | |||||
result: &fetchPageResult{ | |||||
pageToken: "", | |||||
rows: [][]Value{{1, 2}, {11, 12}}, | |||||
}, | |||||
}, | |||||
}, | |||||
want: [][]Value{{1, 2}, {11, 12}}, | |||||
}, | |||||
{ | |||||
fetchResponses: map[string]fetchResponse{ | |||||
"": { | |||||
result: &fetchPageResult{ | |||||
pageToken: "", | |||||
rows: [][]Value{}, | |||||
}, | |||||
}, | |||||
}, | |||||
want: [][]Value{}, | |||||
}, | |||||
} | |||||
for _, tc := range testCases { | |||||
pf := &pageFetcherStub{ | |||||
fetchResponses: tc.fetchResponses, | |||||
} | |||||
it := newRowIterator(context.Background(), nil, pf.fetchPage) | |||||
values, _, _, err := consumeRowIterator(it) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
if (len(values) != 0 || len(tc.want) != 0) && !testutil.Equal(values, tc.want) { | |||||
t.Errorf("values: got:\n%v\nwant:\n%v", values, tc.want) | |||||
} | |||||
// Try calling Get again. | |||||
var vals []Value | |||||
if err := it.Next(&vals); err != iterator.Done { | |||||
t.Errorf("Expected Done calling Next when there are no more values") | |||||
} | |||||
} | |||||
} | |||||
func TestIteratorNextTypes(t *testing.T) { | |||||
it := newRowIterator(context.Background(), nil, nil) | |||||
for _, v := range []interface{}{3, "s", []int{}, &[]int{}, | |||||
map[string]Value{}, &map[string]interface{}{}, | |||||
struct{}{}, | |||||
} { | |||||
if err := it.Next(v); err == nil { | |||||
t.Errorf("%v: want error, got nil", v) | |||||
} | |||||
} | |||||
} |
@@ -0,0 +1,822 @@ | |||||
// Copyright 2015 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"errors" | |||||
"fmt" | |||||
"math/rand" | |||||
"os" | |||||
"sync" | |||||
"time" | |||||
"cloud.google.com/go/internal" | |||||
"cloud.google.com/go/internal/trace" | |||||
gax "github.com/googleapis/gax-go" | |||||
"golang.org/x/net/context" | |||||
bq "google.golang.org/api/bigquery/v2" | |||||
"google.golang.org/api/googleapi" | |||||
"google.golang.org/api/iterator" | |||||
) | |||||
// A Job represents an operation which has been submitted to BigQuery for processing. | |||||
type Job struct { | |||||
c *Client | |||||
projectID string | |||||
jobID string | |||||
location string | |||||
config *bq.JobConfiguration | |||||
lastStatus *JobStatus | |||||
} | |||||
// JobFromID creates a Job which refers to an existing BigQuery job. The job | |||||
// need not have been created by this package. For example, the job may have | |||||
// been created in the BigQuery console. | |||||
// | |||||
// For jobs whose location is other than "US" or "EU", set Client.Location or use | |||||
// JobFromIDLocation. | |||||
func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) { | |||||
return c.JobFromIDLocation(ctx, id, c.Location) | |||||
} | |||||
// JobFromIDLocation creates a Job which refers to an existing BigQuery job. The job | |||||
// need not have been created by this package (for example, it may have | |||||
// been created in the BigQuery console), but it must exist in the specified location. | |||||
func (c *Client) JobFromIDLocation(ctx context.Context, id, location string) (j *Job, err error) { | |||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.JobFromIDLocation") | |||||
defer func() { trace.EndSpan(ctx, err) }() | |||||
bqjob, err := c.getJobInternal(ctx, id, location, "configuration", "jobReference", "status", "statistics") | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return bqToJob(bqjob, c) | |||||
} | |||||
// ID returns the job's ID. | |||||
func (j *Job) ID() string { | |||||
return j.jobID | |||||
} | |||||
// Location returns the job's location. | |||||
func (j *Job) Location() string { | |||||
return j.location | |||||
} | |||||
// State is one of a sequence of states that a Job progresses through as it is processed. | |||||
type State int | |||||
const ( | |||||
StateUnspecified State = iota // used only as a default in JobIterator | |||||
Pending | |||||
Running | |||||
Done | |||||
) | |||||
// JobStatus contains the current State of a job, and errors encountered while processing that job. | |||||
type JobStatus struct { | |||||
State State | |||||
err error | |||||
// All errors encountered during the running of the job. | |||||
// Not all Errors are fatal, so errors here do not necessarily mean that the job has completed or was unsuccessful. | |||||
Errors []*Error | |||||
// Statistics about the job. | |||||
Statistics *JobStatistics | |||||
} | |||||
// JobConfig contains configuration information for a job. It is implemented by | |||||
// *CopyConfig, *ExtractConfig, *LoadConfig and *QueryConfig. | |||||
type JobConfig interface { | |||||
isJobConfig() | |||||
} | |||||
func (*CopyConfig) isJobConfig() {} | |||||
func (*ExtractConfig) isJobConfig() {} | |||||
func (*LoadConfig) isJobConfig() {} | |||||
func (*QueryConfig) isJobConfig() {} | |||||
// Config returns the configuration information for j. | |||||
func (j *Job) Config() (JobConfig, error) { | |||||
return bqToJobConfig(j.config, j.c) | |||||
} | |||||
func bqToJobConfig(q *bq.JobConfiguration, c *Client) (JobConfig, error) { | |||||
switch { | |||||
case q == nil: | |||||
return nil, nil | |||||
case q.Copy != nil: | |||||
return bqToCopyConfig(q, c), nil | |||||
case q.Extract != nil: | |||||
return bqToExtractConfig(q, c), nil | |||||
case q.Load != nil: | |||||
return bqToLoadConfig(q, c), nil | |||||
case q.Query != nil: | |||||
return bqToQueryConfig(q, c) | |||||
default: | |||||
return nil, nil | |||||
} | |||||
} | |||||
// JobIDConfig describes how to create an ID for a job. | |||||
type JobIDConfig struct { | |||||
// JobID is the ID to use for the job. If empty, a random job ID will be generated. | |||||
JobID string | |||||
// If AddJobIDSuffix is true, then a random string will be appended to JobID. | |||||
AddJobIDSuffix bool | |||||
// Location is the location for the job. | |||||
Location string | |||||
} | |||||
// createJobRef creates a JobReference. | |||||
func (j *JobIDConfig) createJobRef(c *Client) *bq.JobReference { | |||||
// We don't check whether projectID is empty; the server will return an | |||||
// error when it encounters the resulting JobReference. | |||||
loc := j.Location | |||||
if loc == "" { // Use Client.Location as a default. | |||||
loc = c.Location | |||||
} | |||||
jr := &bq.JobReference{ProjectId: c.projectID, Location: loc} | |||||
if j.JobID == "" { | |||||
jr.JobId = randomIDFn() | |||||
} else if j.AddJobIDSuffix { | |||||
jr.JobId = j.JobID + "-" + randomIDFn() | |||||
} else { | |||||
jr.JobId = j.JobID | |||||
} | |||||
return jr | |||||
} | |||||
const alphanum = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" | |||||
var ( | |||||
rngMu sync.Mutex | |||||
rng = rand.New(rand.NewSource(time.Now().UnixNano() ^ int64(os.Getpid()))) | |||||
) | |||||
// For testing. | |||||
var randomIDFn = randomID | |||||
// As of August 2017, the BigQuery service uses 27 alphanumeric characters for | |||||
// suffixes. | |||||
const randomIDLen = 27 | |||||
func randomID() string { | |||||
// This is used for both job IDs and insert IDs. | |||||
var b [randomIDLen]byte | |||||
rngMu.Lock() | |||||
for i := 0; i < len(b); i++ { | |||||
b[i] = alphanum[rng.Intn(len(alphanum))] | |||||
} | |||||
rngMu.Unlock() | |||||
return string(b[:]) | |||||
} | |||||
// Done reports whether the job has completed. | |||||
// After Done returns true, the Err method will return an error if the job completed unsuccessfully. | |||||
func (s *JobStatus) Done() bool { | |||||
return s.State == Done | |||||
} | |||||
// Err returns the error that caused the job to complete unsuccessfully (if any). | |||||
func (s *JobStatus) Err() error { | |||||
return s.err | |||||
} | |||||
// Status retrieves the current status of the job from BigQuery. It fails if the Status could not be determined. | |||||
func (j *Job) Status(ctx context.Context) (js *JobStatus, err error) { | |||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Job.Status") | |||||
defer func() { trace.EndSpan(ctx, err) }() | |||||
bqjob, err := j.c.getJobInternal(ctx, j.jobID, j.location, "status", "statistics") | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
if err := j.setStatus(bqjob.Status); err != nil { | |||||
return nil, err | |||||
} | |||||
j.setStatistics(bqjob.Statistics, j.c) | |||||
return j.lastStatus, nil | |||||
} | |||||
// LastStatus returns the most recently retrieved status of the job. The status is | |||||
// retrieved when a new job is created, or when JobFromID or Job.Status is called. | |||||
// Call Job.Status to get the most up-to-date information about a job. | |||||
func (j *Job) LastStatus() *JobStatus { | |||||
return j.lastStatus | |||||
} | |||||
// Cancel requests that a job be cancelled. This method returns without waiting for | |||||
// cancellation to take effect. To check whether the job has terminated, use Job.Status. | |||||
// Cancelled jobs may still incur costs. | |||||
func (j *Job) Cancel(ctx context.Context) error { | |||||
// Jobs.Cancel returns a job entity, but the only relevant piece of | |||||
// data it may contain (the status of the job) is unreliable. From the | |||||
// docs: "This call will return immediately, and the client will need | |||||
// to poll for the job status to see if the cancel completed | |||||
// successfully". So it would be misleading to return a status. | |||||
call := j.c.bqs.Jobs.Cancel(j.projectID, j.jobID). | |||||
Location(j.location). | |||||
Fields(). // We don't need any of the response data. | |||||
Context(ctx) | |||||
setClientHeader(call.Header()) | |||||
return runWithRetry(ctx, func() error { | |||||
_, err := call.Do() | |||||
return err | |||||
}) | |||||
} | |||||
// Wait blocks until the job or the context is done. It returns the final status | |||||
// of the job. | |||||
// If an error occurs while retrieving the status, Wait returns that error. But | |||||
// Wait returns nil if the status was retrieved successfully, even if | |||||
// status.Err() != nil. So callers must check both errors. See the example. | |||||
func (j *Job) Wait(ctx context.Context) (js *JobStatus, err error) { | |||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Job.Wait") | |||||
defer func() { trace.EndSpan(ctx, err) }() | |||||
if j.isQuery() { | |||||
// We can avoid polling for query jobs. | |||||
if _, err := j.waitForQuery(ctx, j.projectID); err != nil { | |||||
return nil, err | |||||
} | |||||
// Note: extra RPC even if you just want to wait for the query to finish. | |||||
js, err := j.Status(ctx) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return js, nil | |||||
} | |||||
// Non-query jobs must poll. | |||||
err = internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) { | |||||
js, err = j.Status(ctx) | |||||
if err != nil { | |||||
return true, err | |||||
} | |||||
if js.Done() { | |||||
return true, nil | |||||
} | |||||
return false, nil | |||||
}) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return js, nil | |||||
} | |||||
// Read fetches the results of a query job. | |||||
// If j is not a query job, Read returns an error. | |||||
func (j *Job) Read(ctx context.Context) (ri *RowIterator, err error) { | |||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Job.Read") | |||||
defer func() { trace.EndSpan(ctx, err) }() | |||||
return j.read(ctx, j.waitForQuery, fetchPage) | |||||
} | |||||
func (j *Job) read(ctx context.Context, waitForQuery func(context.Context, string) (Schema, error), pf pageFetcher) (*RowIterator, error) { | |||||
if !j.isQuery() { | |||||
return nil, errors.New("bigquery: cannot read from a non-query job") | |||||
} | |||||
destTable := j.config.Query.DestinationTable | |||||
// The destination table should only be nil if there was a query error. | |||||
projectID := j.projectID | |||||
if destTable != nil && projectID != destTable.ProjectId { | |||||
return nil, fmt.Errorf("bigquery: job project ID is %q, but destination table's is %q", projectID, destTable.ProjectId) | |||||
} | |||||
schema, err := waitForQuery(ctx, projectID) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
if destTable == nil { | |||||
return nil, errors.New("bigquery: query job missing destination table") | |||||
} | |||||
dt := bqToTable(destTable, j.c) | |||||
it := newRowIterator(ctx, dt, pf) | |||||
it.Schema = schema | |||||
return it, nil | |||||
} | |||||
// waitForQuery waits for the query job to complete and returns its schema. | |||||
func (j *Job) waitForQuery(ctx context.Context, projectID string) (Schema, error) { | |||||
// Use GetQueryResults only to wait for completion, not to read results. | |||||
call := j.c.bqs.Jobs.GetQueryResults(projectID, j.jobID).Location(j.location).Context(ctx).MaxResults(0) | |||||
setClientHeader(call.Header()) | |||||
backoff := gax.Backoff{ | |||||
Initial: 1 * time.Second, | |||||
Multiplier: 2, | |||||
Max: 60 * time.Second, | |||||
} | |||||
var res *bq.GetQueryResultsResponse | |||||
err := internal.Retry(ctx, backoff, func() (stop bool, err error) { | |||||
res, err = call.Do() | |||||
if err != nil { | |||||
return !retryableError(err), err | |||||
} | |||||
if !res.JobComplete { // GetQueryResults may return early without error; retry. | |||||
return false, nil | |||||
} | |||||
return true, nil | |||||
}) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return bqToSchema(res.Schema), nil | |||||
} | |||||
// JobStatistics contains statistics about a job. | |||||
type JobStatistics struct { | |||||
CreationTime time.Time | |||||
StartTime time.Time | |||||
EndTime time.Time | |||||
TotalBytesProcessed int64 | |||||
Details Statistics | |||||
} | |||||
// Statistics is one of ExtractStatistics, LoadStatistics or QueryStatistics. | |||||
type Statistics interface { | |||||
implementsStatistics() | |||||
} | |||||
// ExtractStatistics contains statistics about an extract job. | |||||
type ExtractStatistics struct { | |||||
// The number of files per destination URI or URI pattern specified in the | |||||
// extract configuration. These values will be in the same order as the | |||||
// URIs specified in the 'destinationUris' field. | |||||
DestinationURIFileCounts []int64 | |||||
} | |||||
// LoadStatistics contains statistics about a load job. | |||||
type LoadStatistics struct { | |||||
// The number of bytes of source data in a load job. | |||||
InputFileBytes int64 | |||||
// The number of source files in a load job. | |||||
InputFiles int64 | |||||
// Size of the loaded data in bytes. Note that while a load job is in the | |||||
// running state, this value may change. | |||||
OutputBytes int64 | |||||
// The number of rows imported in a load job. Note that while an import job is | |||||
// in the running state, this value may change. | |||||
OutputRows int64 | |||||
} | |||||
// QueryStatistics contains statistics about a query job. | |||||
type QueryStatistics struct { | |||||
// Billing tier for the job. | |||||
BillingTier int64 | |||||
// Whether the query result was fetched from the query cache. | |||||
CacheHit bool | |||||
// The type of query statement, if valid. | |||||
StatementType string | |||||
// Total bytes billed for the job. | |||||
TotalBytesBilled int64 | |||||
// Total bytes processed for the job. | |||||
TotalBytesProcessed int64 | |||||
// Describes execution plan for the query. | |||||
QueryPlan []*ExplainQueryStage | |||||
// The number of rows affected by a DML statement. Present only for DML | |||||
// statements INSERT, UPDATE or DELETE. | |||||
NumDMLAffectedRows int64 | |||||
// Describes a timeline of job execution. | |||||
Timeline []*QueryTimelineSample | |||||
// ReferencedTables: [Output-only, Experimental] Referenced tables for | |||||
// the job. Queries that reference more than 50 tables will not have a | |||||
// complete list. | |||||
ReferencedTables []*Table | |||||
// The schema of the results. Present only for successful dry run of | |||||
// non-legacy SQL queries. | |||||
Schema Schema | |||||
// Slot-milliseconds consumed by this query job. | |||||
SlotMillis int64 | |||||
// Standard SQL: list of undeclared query parameter names detected during a | |||||
// dry run validation. | |||||
UndeclaredQueryParameterNames []string | |||||
// DDL target table. | |||||
DDLTargetTable *Table | |||||
// DDL Operation performed on the target table. Used to report how the | |||||
// query impacted the DDL target table. | |||||
DDLOperationPerformed string | |||||
} | |||||
// ExplainQueryStage describes one stage of a query. | |||||
type ExplainQueryStage struct { | |||||
// CompletedParallelInputs: Number of parallel input segments completed. | |||||
CompletedParallelInputs int64 | |||||
// ComputeAvg: Duration the average shard spent on CPU-bound tasks. | |||||
ComputeAvg time.Duration | |||||
// ComputeMax: Duration the slowest shard spent on CPU-bound tasks. | |||||
ComputeMax time.Duration | |||||
// Relative amount of the total time the average shard spent on CPU-bound tasks. | |||||
ComputeRatioAvg float64 | |||||
// Relative amount of the total time the slowest shard spent on CPU-bound tasks. | |||||
ComputeRatioMax float64 | |||||
// EndTime: Stage end time. | |||||
EndTime time.Time | |||||
// Unique ID for stage within plan. | |||||
ID int64 | |||||
// InputStages: IDs for stages that are inputs to this stage. | |||||
InputStages []int64 | |||||
// Human-readable name for stage. | |||||
Name string | |||||
// ParallelInputs: Number of parallel input segments to be processed. | |||||
ParallelInputs int64 | |||||
// ReadAvg: Duration the average shard spent reading input. | |||||
ReadAvg time.Duration | |||||
// ReadMax: Duration the slowest shard spent reading input. | |||||
ReadMax time.Duration | |||||
// Relative amount of the total time the average shard spent reading input. | |||||
ReadRatioAvg float64 | |||||
// Relative amount of the total time the slowest shard spent reading input. | |||||
ReadRatioMax float64 | |||||
// Number of records read into the stage. | |||||
RecordsRead int64 | |||||
// Number of records written by the stage. | |||||
RecordsWritten int64 | |||||
// ShuffleOutputBytes: Total number of bytes written to shuffle. | |||||
ShuffleOutputBytes int64 | |||||
// ShuffleOutputBytesSpilled: Total number of bytes written to shuffle | |||||
// and spilled to disk. | |||||
ShuffleOutputBytesSpilled int64 | |||||
// StartTime: Stage start time. | |||||
StartTime time.Time | |||||
// Current status for the stage. | |||||
Status string | |||||
// List of operations within the stage in dependency order (approximately | |||||
// chronological). | |||||
Steps []*ExplainQueryStep | |||||
// WaitAvg: Duration the average shard spent waiting to be scheduled. | |||||
WaitAvg time.Duration | |||||
// WaitMax: Duration the slowest shard spent waiting to be scheduled. | |||||
WaitMax time.Duration | |||||
// Relative amount of the total time the average shard spent waiting to be scheduled. | |||||
WaitRatioAvg float64 | |||||
// Relative amount of the total time the slowest shard spent waiting to be scheduled. | |||||
WaitRatioMax float64 | |||||
// WriteAvg: Duration the average shard spent on writing output. | |||||
WriteAvg time.Duration | |||||
// WriteMax: Duration the slowest shard spent on writing output. | |||||
WriteMax time.Duration | |||||
// Relative amount of the total time the average shard spent on writing output. | |||||
WriteRatioAvg float64 | |||||
// Relative amount of the total time the slowest shard spent on writing output. | |||||
WriteRatioMax float64 | |||||
} | |||||
// ExplainQueryStep describes one step of a query stage. | |||||
type ExplainQueryStep struct { | |||||
// Machine-readable operation type. | |||||
Kind string | |||||
// Human-readable stage descriptions. | |||||
Substeps []string | |||||
} | |||||
// QueryTimelineSample represents a sample of execution statistics at a point in time. | |||||
type QueryTimelineSample struct { | |||||
// Total number of units currently being processed by workers, represented as largest value since last sample. | |||||
ActiveUnits int64 | |||||
// Total parallel units of work completed by this query. | |||||
CompletedUnits int64 | |||||
// Time elapsed since start of query execution. | |||||
Elapsed time.Duration | |||||
// Total parallel units of work remaining for the active stages. | |||||
PendingUnits int64 | |||||
// Cumulative slot-milliseconds consumed by the query. | |||||
SlotMillis int64 | |||||
} | |||||
func (*ExtractStatistics) implementsStatistics() {} | |||||
func (*LoadStatistics) implementsStatistics() {} | |||||
func (*QueryStatistics) implementsStatistics() {} | |||||
// Jobs lists jobs within a project. | |||||
func (c *Client) Jobs(ctx context.Context) *JobIterator { | |||||
it := &JobIterator{ | |||||
ctx: ctx, | |||||
c: c, | |||||
ProjectID: c.projectID, | |||||
} | |||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo( | |||||
it.fetch, | |||||
func() int { return len(it.items) }, | |||||
func() interface{} { b := it.items; it.items = nil; return b }) | |||||
return it | |||||
} | |||||
// JobIterator iterates over jobs in a project. | |||||
type JobIterator struct { | |||||
ProjectID string // Project ID of the jobs to list. Default is the client's project. | |||||
AllUsers bool // Whether to list jobs owned by all users in the project, or just the current caller. | |||||
State State // List only jobs in the given state. Defaults to all states. | |||||
ctx context.Context | |||||
c *Client | |||||
pageInfo *iterator.PageInfo | |||||
nextFunc func() error | |||||
items []*Job | |||||
} | |||||
func (it *JobIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } | |||||
func (it *JobIterator) Next() (*Job, error) { | |||||
if err := it.nextFunc(); err != nil { | |||||
return nil, err | |||||
} | |||||
item := it.items[0] | |||||
it.items = it.items[1:] | |||||
return item, nil | |||||
} | |||||
func (it *JobIterator) fetch(pageSize int, pageToken string) (string, error) { | |||||
var st string | |||||
switch it.State { | |||||
case StateUnspecified: | |||||
st = "" | |||||
case Pending: | |||||
st = "pending" | |||||
case Running: | |||||
st = "running" | |||||
case Done: | |||||
st = "done" | |||||
default: | |||||
return "", fmt.Errorf("bigquery: invalid value for JobIterator.State: %d", it.State) | |||||
} | |||||
req := it.c.bqs.Jobs.List(it.ProjectID). | |||||
Context(it.ctx). | |||||
PageToken(pageToken). | |||||
Projection("full"). | |||||
AllUsers(it.AllUsers) | |||||
if st != "" { | |||||
req.StateFilter(st) | |||||
} | |||||
setClientHeader(req.Header()) | |||||
if pageSize > 0 { | |||||
req.MaxResults(int64(pageSize)) | |||||
} | |||||
res, err := req.Do() | |||||
if err != nil { | |||||
return "", err | |||||
} | |||||
for _, j := range res.Jobs { | |||||
job, err := convertListedJob(j, it.c) | |||||
if err != nil { | |||||
return "", err | |||||
} | |||||
it.items = append(it.items, job) | |||||
} | |||||
return res.NextPageToken, nil | |||||
} | |||||
func convertListedJob(j *bq.JobListJobs, c *Client) (*Job, error) { | |||||
return bqToJob2(j.JobReference, j.Configuration, j.Status, j.Statistics, c) | |||||
} | |||||
func (c *Client) getJobInternal(ctx context.Context, jobID, location string, fields ...googleapi.Field) (*bq.Job, error) { | |||||
var job *bq.Job | |||||
call := c.bqs.Jobs.Get(c.projectID, jobID).Context(ctx) | |||||
if location != "" { | |||||
call = call.Location(location) | |||||
} | |||||
if len(fields) > 0 { | |||||
call = call.Fields(fields...) | |||||
} | |||||
setClientHeader(call.Header()) | |||||
err := runWithRetry(ctx, func() (err error) { | |||||
job, err = call.Do() | |||||
return err | |||||
}) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return job, nil | |||||
} | |||||
func bqToJob(q *bq.Job, c *Client) (*Job, error) { | |||||
return bqToJob2(q.JobReference, q.Configuration, q.Status, q.Statistics, c) | |||||
} | |||||
func bqToJob2(qr *bq.JobReference, qc *bq.JobConfiguration, qs *bq.JobStatus, qt *bq.JobStatistics, c *Client) (*Job, error) { | |||||
j := &Job{ | |||||
projectID: qr.ProjectId, | |||||
jobID: qr.JobId, | |||||
location: qr.Location, | |||||
c: c, | |||||
} | |||||
j.setConfig(qc) | |||||
if err := j.setStatus(qs); err != nil { | |||||
return nil, err | |||||
} | |||||
j.setStatistics(qt, c) | |||||
return j, nil | |||||
} | |||||
func (j *Job) setConfig(config *bq.JobConfiguration) { | |||||
if config == nil { | |||||
return | |||||
} | |||||
j.config = config | |||||
} | |||||
func (j *Job) isQuery() bool { | |||||
return j.config != nil && j.config.Query != nil | |||||
} | |||||
var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done} | |||||
func (j *Job) setStatus(qs *bq.JobStatus) error { | |||||
if qs == nil { | |||||
return nil | |||||
} | |||||
state, ok := stateMap[qs.State] | |||||
if !ok { | |||||
return fmt.Errorf("unexpected job state: %v", qs.State) | |||||
} | |||||
j.lastStatus = &JobStatus{ | |||||
State: state, | |||||
err: nil, | |||||
} | |||||
if err := bqToError(qs.ErrorResult); state == Done && err != nil { | |||||
j.lastStatus.err = err | |||||
} | |||||
for _, ep := range qs.Errors { | |||||
j.lastStatus.Errors = append(j.lastStatus.Errors, bqToError(ep)) | |||||
} | |||||
return nil | |||||
} | |||||
func (j *Job) setStatistics(s *bq.JobStatistics, c *Client) { | |||||
if s == nil || j.lastStatus == nil { | |||||
return | |||||
} | |||||
js := &JobStatistics{ | |||||
CreationTime: unixMillisToTime(s.CreationTime), | |||||
StartTime: unixMillisToTime(s.StartTime), | |||||
EndTime: unixMillisToTime(s.EndTime), | |||||
TotalBytesProcessed: s.TotalBytesProcessed, | |||||
} | |||||
switch { | |||||
case s.Extract != nil: | |||||
js.Details = &ExtractStatistics{ | |||||
DestinationURIFileCounts: []int64(s.Extract.DestinationUriFileCounts), | |||||
} | |||||
case s.Load != nil: | |||||
js.Details = &LoadStatistics{ | |||||
InputFileBytes: s.Load.InputFileBytes, | |||||
InputFiles: s.Load.InputFiles, | |||||
OutputBytes: s.Load.OutputBytes, | |||||
OutputRows: s.Load.OutputRows, | |||||
} | |||||
case s.Query != nil: | |||||
var names []string | |||||
for _, qp := range s.Query.UndeclaredQueryParameters { | |||||
names = append(names, qp.Name) | |||||
} | |||||
var tables []*Table | |||||
for _, tr := range s.Query.ReferencedTables { | |||||
tables = append(tables, bqToTable(tr, c)) | |||||
} | |||||
js.Details = &QueryStatistics{ | |||||
BillingTier: s.Query.BillingTier, | |||||
CacheHit: s.Query.CacheHit, | |||||
DDLTargetTable: bqToTable(s.Query.DdlTargetTable, c), | |||||
DDLOperationPerformed: s.Query.DdlOperationPerformed, | |||||
StatementType: s.Query.StatementType, | |||||
TotalBytesBilled: s.Query.TotalBytesBilled, | |||||
TotalBytesProcessed: s.Query.TotalBytesProcessed, | |||||
NumDMLAffectedRows: s.Query.NumDmlAffectedRows, | |||||
QueryPlan: queryPlanFromProto(s.Query.QueryPlan), | |||||
Schema: bqToSchema(s.Query.Schema), | |||||
SlotMillis: s.Query.TotalSlotMs, | |||||
Timeline: timelineFromProto(s.Query.Timeline), | |||||
ReferencedTables: tables, | |||||
UndeclaredQueryParameterNames: names, | |||||
} | |||||
} | |||||
j.lastStatus.Statistics = js | |||||
} | |||||
func queryPlanFromProto(stages []*bq.ExplainQueryStage) []*ExplainQueryStage { | |||||
var res []*ExplainQueryStage | |||||
for _, s := range stages { | |||||
var steps []*ExplainQueryStep | |||||
for _, p := range s.Steps { | |||||
steps = append(steps, &ExplainQueryStep{ | |||||
Kind: p.Kind, | |||||
Substeps: p.Substeps, | |||||
}) | |||||
} | |||||
res = append(res, &ExplainQueryStage{ | |||||
CompletedParallelInputs: s.CompletedParallelInputs, | |||||
ComputeAvg: time.Duration(s.ComputeMsAvg) * time.Millisecond, | |||||
ComputeMax: time.Duration(s.ComputeMsMax) * time.Millisecond, | |||||
ComputeRatioAvg: s.ComputeRatioAvg, | |||||
ComputeRatioMax: s.ComputeRatioMax, | |||||
EndTime: time.Unix(0, s.EndMs*1e6), | |||||
ID: s.Id, | |||||
InputStages: s.InputStages, | |||||
Name: s.Name, | |||||
ParallelInputs: s.ParallelInputs, | |||||
ReadAvg: time.Duration(s.ReadMsAvg) * time.Millisecond, | |||||
ReadMax: time.Duration(s.ReadMsMax) * time.Millisecond, | |||||
ReadRatioAvg: s.ReadRatioAvg, | |||||
ReadRatioMax: s.ReadRatioMax, | |||||
RecordsRead: s.RecordsRead, | |||||
RecordsWritten: s.RecordsWritten, | |||||
ShuffleOutputBytes: s.ShuffleOutputBytes, | |||||
ShuffleOutputBytesSpilled: s.ShuffleOutputBytesSpilled, | |||||
StartTime: time.Unix(0, s.StartMs*1e6), | |||||
Status: s.Status, | |||||
Steps: steps, | |||||
WaitAvg: time.Duration(s.WaitMsAvg) * time.Millisecond, | |||||
WaitMax: time.Duration(s.WaitMsMax) * time.Millisecond, | |||||
WaitRatioAvg: s.WaitRatioAvg, | |||||
WaitRatioMax: s.WaitRatioMax, | |||||
WriteAvg: time.Duration(s.WriteMsAvg) * time.Millisecond, | |||||
WriteMax: time.Duration(s.WriteMsMax) * time.Millisecond, | |||||
WriteRatioAvg: s.WriteRatioAvg, | |||||
WriteRatioMax: s.WriteRatioMax, | |||||
}) | |||||
} | |||||
return res | |||||
} | |||||
func timelineFromProto(timeline []*bq.QueryTimelineSample) []*QueryTimelineSample { | |||||
var res []*QueryTimelineSample | |||||
for _, s := range timeline { | |||||
res = append(res, &QueryTimelineSample{ | |||||
ActiveUnits: s.ActiveUnits, | |||||
CompletedUnits: s.CompletedUnits, | |||||
Elapsed: time.Duration(s.ElapsedMs) * time.Millisecond, | |||||
PendingUnits: s.PendingUnits, | |||||
SlotMillis: s.TotalSlotMs, | |||||
}) | |||||
} | |||||
return res | |||||
} |
@@ -0,0 +1,95 @@ | |||||
// Copyright 2017 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"testing" | |||||
"cloud.google.com/go/internal/testutil" | |||||
bq "google.golang.org/api/bigquery/v2" | |||||
) | |||||
func TestCreateJobRef(t *testing.T) { | |||||
defer fixRandomID("RANDOM")() | |||||
cNoLoc := &Client{projectID: "projectID"} | |||||
cLoc := &Client{projectID: "projectID", Location: "defaultLoc"} | |||||
for _, test := range []struct { | |||||
in JobIDConfig | |||||
client *Client | |||||
want *bq.JobReference | |||||
}{ | |||||
{ | |||||
in: JobIDConfig{JobID: "foo"}, | |||||
want: &bq.JobReference{JobId: "foo"}, | |||||
}, | |||||
{ | |||||
in: JobIDConfig{}, | |||||
want: &bq.JobReference{JobId: "RANDOM"}, | |||||
}, | |||||
{ | |||||
in: JobIDConfig{AddJobIDSuffix: true}, | |||||
want: &bq.JobReference{JobId: "RANDOM"}, | |||||
}, | |||||
{ | |||||
in: JobIDConfig{JobID: "foo", AddJobIDSuffix: true}, | |||||
want: &bq.JobReference{JobId: "foo-RANDOM"}, | |||||
}, | |||||
{ | |||||
in: JobIDConfig{JobID: "foo", Location: "loc"}, | |||||
want: &bq.JobReference{JobId: "foo", Location: "loc"}, | |||||
}, | |||||
{ | |||||
in: JobIDConfig{JobID: "foo"}, | |||||
client: cLoc, | |||||
want: &bq.JobReference{JobId: "foo", Location: "defaultLoc"}, | |||||
}, | |||||
{ | |||||
in: JobIDConfig{JobID: "foo", Location: "loc"}, | |||||
client: cLoc, | |||||
want: &bq.JobReference{JobId: "foo", Location: "loc"}, | |||||
}, | |||||
} { | |||||
client := test.client | |||||
if client == nil { | |||||
client = cNoLoc | |||||
} | |||||
got := test.in.createJobRef(client) | |||||
test.want.ProjectId = "projectID" | |||||
if !testutil.Equal(got, test.want) { | |||||
t.Errorf("%+v: got %+v, want %+v", test.in, got, test.want) | |||||
} | |||||
} | |||||
} | |||||
func fixRandomID(s string) func() { | |||||
prev := randomIDFn | |||||
randomIDFn = func() string { return s } | |||||
return func() { randomIDFn = prev } | |||||
} | |||||
func checkJob(t *testing.T, i int, got, want *bq.Job) { | |||||
if got.JobReference == nil { | |||||
t.Errorf("#%d: empty job reference", i) | |||||
return | |||||
} | |||||
if got.JobReference.JobId == "" { | |||||
t.Errorf("#%d: empty job ID", i) | |||||
return | |||||
} | |||||
d := testutil.Diff(got, want) | |||||
if d != "" { | |||||
t.Errorf("#%d: (got=-, want=+) %s", i, d) | |||||
} | |||||
} |
@@ -0,0 +1,141 @@ | |||||
// Copyright 2016 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"io" | |||||
"cloud.google.com/go/internal/trace" | |||||
"golang.org/x/net/context" | |||||
bq "google.golang.org/api/bigquery/v2" | |||||
) | |||||
// LoadConfig holds the configuration for a load job. | |||||
type LoadConfig struct { | |||||
// Src is the source from which data will be loaded. | |||||
Src LoadSource | |||||
// Dst is the table into which the data will be loaded. | |||||
Dst *Table | |||||
// CreateDisposition specifies the circumstances under which the destination table will be created. | |||||
// The default is CreateIfNeeded. | |||||
CreateDisposition TableCreateDisposition | |||||
// WriteDisposition specifies how existing data in the destination table is treated. | |||||
// The default is WriteAppend. | |||||
WriteDisposition TableWriteDisposition | |||||
// The labels associated with this job. | |||||
Labels map[string]string | |||||
// If non-nil, the destination table is partitioned by time. | |||||
TimePartitioning *TimePartitioning | |||||
// Custom encryption configuration (e.g., Cloud KMS keys). | |||||
DestinationEncryptionConfig *EncryptionConfig | |||||
// Allows the schema of the destination table to be updated as a side effect of | |||||
// the load job. | |||||
SchemaUpdateOptions []string | |||||
} | |||||
func (l *LoadConfig) toBQ() (*bq.JobConfiguration, io.Reader) { | |||||
config := &bq.JobConfiguration{ | |||||
Labels: l.Labels, | |||||
Load: &bq.JobConfigurationLoad{ | |||||
CreateDisposition: string(l.CreateDisposition), | |||||
WriteDisposition: string(l.WriteDisposition), | |||||
DestinationTable: l.Dst.toBQ(), | |||||
TimePartitioning: l.TimePartitioning.toBQ(), | |||||
DestinationEncryptionConfiguration: l.DestinationEncryptionConfig.toBQ(), | |||||
SchemaUpdateOptions: l.SchemaUpdateOptions, | |||||
}, | |||||
} | |||||
media := l.Src.populateLoadConfig(config.Load) | |||||
return config, media | |||||
} | |||||
func bqToLoadConfig(q *bq.JobConfiguration, c *Client) *LoadConfig { | |||||
lc := &LoadConfig{ | |||||
Labels: q.Labels, | |||||
CreateDisposition: TableCreateDisposition(q.Load.CreateDisposition), | |||||
WriteDisposition: TableWriteDisposition(q.Load.WriteDisposition), | |||||
Dst: bqToTable(q.Load.DestinationTable, c), | |||||
TimePartitioning: bqToTimePartitioning(q.Load.TimePartitioning), | |||||
DestinationEncryptionConfig: bqToEncryptionConfig(q.Load.DestinationEncryptionConfiguration), | |||||
SchemaUpdateOptions: q.Load.SchemaUpdateOptions, | |||||
} | |||||
var fc *FileConfig | |||||
if len(q.Load.SourceUris) == 0 { | |||||
s := NewReaderSource(nil) | |||||
fc = &s.FileConfig | |||||
lc.Src = s | |||||
} else { | |||||
s := NewGCSReference(q.Load.SourceUris...) | |||||
fc = &s.FileConfig | |||||
lc.Src = s | |||||
} | |||||
bqPopulateFileConfig(q.Load, fc) | |||||
return lc | |||||
} | |||||
// A Loader loads data from Google Cloud Storage into a BigQuery table. | |||||
type Loader struct { | |||||
JobIDConfig | |||||
LoadConfig | |||||
c *Client | |||||
} | |||||
// A LoadSource represents a source of data that can be loaded into | |||||
// a BigQuery table. | |||||
// | |||||
// This package defines two LoadSources: GCSReference, for Google Cloud Storage | |||||
// objects, and ReaderSource, for data read from an io.Reader. | |||||
type LoadSource interface { | |||||
// populates config, returns media | |||||
populateLoadConfig(*bq.JobConfigurationLoad) io.Reader | |||||
} | |||||
// LoaderFrom returns a Loader which can be used to load data into a BigQuery table. | |||||
// The returned Loader may optionally be further configured before its Run method is called. | |||||
// See GCSReference and ReaderSource for additional configuration options that | |||||
// affect loading. | |||||
func (t *Table) LoaderFrom(src LoadSource) *Loader { | |||||
return &Loader{ | |||||
c: t.c, | |||||
LoadConfig: LoadConfig{ | |||||
Src: src, | |||||
Dst: t, | |||||
}, | |||||
} | |||||
} | |||||
// Run initiates a load job. | |||||
func (l *Loader) Run(ctx context.Context) (j *Job, err error) { | |||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Load.Run") | |||||
defer func() { trace.EndSpan(ctx, err) }() | |||||
job, media := l.newJob() | |||||
return l.c.insertJob(ctx, job, media) | |||||
} | |||||
func (l *Loader) newJob() (*bq.Job, io.Reader) { | |||||
config, media := l.LoadConfig.toBQ() | |||||
return &bq.Job{ | |||||
JobReference: l.JobIDConfig.createJobRef(l.c), | |||||
Configuration: config, | |||||
}, media | |||||
} |
@@ -0,0 +1,260 @@ | |||||
// Copyright 2015 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"strings" | |||||
"testing" | |||||
"time" | |||||
"cloud.google.com/go/internal/testutil" | |||||
"github.com/google/go-cmp/cmp" | |||||
"github.com/google/go-cmp/cmp/cmpopts" | |||||
bq "google.golang.org/api/bigquery/v2" | |||||
) | |||||
func defaultLoadJob() *bq.Job { | |||||
return &bq.Job{ | |||||
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"}, | |||||
Configuration: &bq.JobConfiguration{ | |||||
Load: &bq.JobConfigurationLoad{ | |||||
DestinationTable: &bq.TableReference{ | |||||
ProjectId: "client-project-id", | |||||
DatasetId: "dataset-id", | |||||
TableId: "table-id", | |||||
}, | |||||
SourceUris: []string{"uri"}, | |||||
}, | |||||
}, | |||||
} | |||||
} | |||||
func stringFieldSchema() *FieldSchema { | |||||
return &FieldSchema{Name: "fieldname", Type: StringFieldType} | |||||
} | |||||
func nestedFieldSchema() *FieldSchema { | |||||
return &FieldSchema{ | |||||
Name: "nested", | |||||
Type: RecordFieldType, | |||||
Schema: Schema{stringFieldSchema()}, | |||||
} | |||||
} | |||||
func bqStringFieldSchema() *bq.TableFieldSchema { | |||||
return &bq.TableFieldSchema{ | |||||
Name: "fieldname", | |||||
Type: "STRING", | |||||
} | |||||
} | |||||
func bqNestedFieldSchema() *bq.TableFieldSchema { | |||||
return &bq.TableFieldSchema{ | |||||
Name: "nested", | |||||
Type: "RECORD", | |||||
Fields: []*bq.TableFieldSchema{bqStringFieldSchema()}, | |||||
} | |||||
} | |||||
func TestLoad(t *testing.T) { | |||||
defer fixRandomID("RANDOM")() | |||||
c := &Client{projectID: "client-project-id"} | |||||
testCases := []struct { | |||||
dst *Table | |||||
src LoadSource | |||||
jobID string | |||||
location string | |||||
config LoadConfig | |||||
want *bq.Job | |||||
}{ | |||||
{ | |||||
dst: c.Dataset("dataset-id").Table("table-id"), | |||||
src: NewGCSReference("uri"), | |||||
want: defaultLoadJob(), | |||||
}, | |||||
{ | |||||
dst: c.Dataset("dataset-id").Table("table-id"), | |||||
src: NewGCSReference("uri"), | |||||
location: "loc", | |||||
want: func() *bq.Job { | |||||
j := defaultLoadJob() | |||||
j.JobReference.Location = "loc" | |||||
return j | |||||
}(), | |||||
}, | |||||
{ | |||||
dst: c.Dataset("dataset-id").Table("table-id"), | |||||
jobID: "ajob", | |||||
config: LoadConfig{ | |||||
CreateDisposition: CreateNever, | |||||
WriteDisposition: WriteTruncate, | |||||
Labels: map[string]string{"a": "b"}, | |||||
TimePartitioning: &TimePartitioning{Expiration: 1234 * time.Millisecond}, | |||||
DestinationEncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"}, | |||||
SchemaUpdateOptions: []string{"ALLOW_FIELD_ADDITION"}, | |||||
}, | |||||
src: NewGCSReference("uri"), | |||||
want: func() *bq.Job { | |||||
j := defaultLoadJob() | |||||
j.Configuration.Labels = map[string]string{"a": "b"} | |||||
j.Configuration.Load.CreateDisposition = "CREATE_NEVER" | |||||
j.Configuration.Load.WriteDisposition = "WRITE_TRUNCATE" | |||||
j.Configuration.Load.TimePartitioning = &bq.TimePartitioning{ | |||||
Type: "DAY", | |||||
ExpirationMs: 1234, | |||||
} | |||||
j.Configuration.Load.DestinationEncryptionConfiguration = &bq.EncryptionConfiguration{KmsKeyName: "keyName"} | |||||
j.JobReference = &bq.JobReference{ | |||||
JobId: "ajob", | |||||
ProjectId: "client-project-id", | |||||
} | |||||
j.Configuration.Load.SchemaUpdateOptions = []string{"ALLOW_FIELD_ADDITION"} | |||||
return j | |||||
}(), | |||||
}, | |||||
{ | |||||
dst: c.Dataset("dataset-id").Table("table-id"), | |||||
src: func() *GCSReference { | |||||
g := NewGCSReference("uri") | |||||
g.MaxBadRecords = 1 | |||||
g.AllowJaggedRows = true | |||||
g.AllowQuotedNewlines = true | |||||
g.IgnoreUnknownValues = true | |||||
return g | |||||
}(), | |||||
want: func() *bq.Job { | |||||
j := defaultLoadJob() | |||||
j.Configuration.Load.MaxBadRecords = 1 | |||||
j.Configuration.Load.AllowJaggedRows = true | |||||
j.Configuration.Load.AllowQuotedNewlines = true | |||||
j.Configuration.Load.IgnoreUnknownValues = true | |||||
return j | |||||
}(), | |||||
}, | |||||
{ | |||||
dst: c.Dataset("dataset-id").Table("table-id"), | |||||
src: func() *GCSReference { | |||||
g := NewGCSReference("uri") | |||||
g.Schema = Schema{ | |||||
stringFieldSchema(), | |||||
nestedFieldSchema(), | |||||
} | |||||
return g | |||||
}(), | |||||
want: func() *bq.Job { | |||||
j := defaultLoadJob() | |||||
j.Configuration.Load.Schema = &bq.TableSchema{ | |||||
Fields: []*bq.TableFieldSchema{ | |||||
bqStringFieldSchema(), | |||||
bqNestedFieldSchema(), | |||||
}} | |||||
return j | |||||
}(), | |||||
}, | |||||
{ | |||||
dst: c.Dataset("dataset-id").Table("table-id"), | |||||
src: func() *GCSReference { | |||||
g := NewGCSReference("uri") | |||||
g.SkipLeadingRows = 1 | |||||
g.SourceFormat = JSON | |||||
g.Encoding = UTF_8 | |||||
g.FieldDelimiter = "\t" | |||||
g.Quote = "-" | |||||
return g | |||||
}(), | |||||
want: func() *bq.Job { | |||||
j := defaultLoadJob() | |||||
j.Configuration.Load.SkipLeadingRows = 1 | |||||
j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON" | |||||
j.Configuration.Load.Encoding = "UTF-8" | |||||
j.Configuration.Load.FieldDelimiter = "\t" | |||||
hyphen := "-" | |||||
j.Configuration.Load.Quote = &hyphen | |||||
return j | |||||
}(), | |||||
}, | |||||
{ | |||||
dst: c.Dataset("dataset-id").Table("table-id"), | |||||
src: NewGCSReference("uri"), | |||||
want: func() *bq.Job { | |||||
j := defaultLoadJob() | |||||
// Quote is left unset in GCSReference, so should be nil here. | |||||
j.Configuration.Load.Quote = nil | |||||
return j | |||||
}(), | |||||
}, | |||||
{ | |||||
dst: c.Dataset("dataset-id").Table("table-id"), | |||||
src: func() *GCSReference { | |||||
g := NewGCSReference("uri") | |||||
g.ForceZeroQuote = true | |||||
return g | |||||
}(), | |||||
want: func() *bq.Job { | |||||
j := defaultLoadJob() | |||||
empty := "" | |||||
j.Configuration.Load.Quote = &empty | |||||
return j | |||||
}(), | |||||
}, | |||||
{ | |||||
dst: c.Dataset("dataset-id").Table("table-id"), | |||||
src: func() *ReaderSource { | |||||
r := NewReaderSource(strings.NewReader("foo")) | |||||
r.SkipLeadingRows = 1 | |||||
r.SourceFormat = JSON | |||||
r.Encoding = UTF_8 | |||||
r.FieldDelimiter = "\t" | |||||
r.Quote = "-" | |||||
return r | |||||
}(), | |||||
want: func() *bq.Job { | |||||
j := defaultLoadJob() | |||||
j.Configuration.Load.SourceUris = nil | |||||
j.Configuration.Load.SkipLeadingRows = 1 | |||||
j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON" | |||||
j.Configuration.Load.Encoding = "UTF-8" | |||||
j.Configuration.Load.FieldDelimiter = "\t" | |||||
hyphen := "-" | |||||
j.Configuration.Load.Quote = &hyphen | |||||
return j | |||||
}(), | |||||
}, | |||||
} | |||||
for i, tc := range testCases { | |||||
loader := tc.dst.LoaderFrom(tc.src) | |||||
loader.JobID = tc.jobID | |||||
loader.Location = tc.location | |||||
tc.config.Src = tc.src | |||||
tc.config.Dst = tc.dst | |||||
loader.LoadConfig = tc.config | |||||
got, _ := loader.newJob() | |||||
checkJob(t, i, got, tc.want) | |||||
jc, err := bqToJobConfig(got.Configuration, c) | |||||
if err != nil { | |||||
t.Fatalf("#%d: %v", i, err) | |||||
} | |||||
diff := testutil.Diff(jc.(*LoadConfig), &loader.LoadConfig, | |||||
cmp.AllowUnexported(Table{}, Client{}), | |||||
cmpopts.IgnoreUnexported(ReaderSource{})) | |||||
if diff != "" { | |||||
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff) | |||||
} | |||||
} | |||||
} |
@@ -0,0 +1,299 @@ | |||||
// Copyright 2015 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"bytes" | |||||
"encoding/json" | |||||
"fmt" | |||||
"reflect" | |||||
"strconv" | |||||
"time" | |||||
"cloud.google.com/go/civil" | |||||
) | |||||
// NullInt64 represents a BigQuery INT64 that may be NULL. | |||||
type NullInt64 struct { | |||||
Int64 int64 | |||||
Valid bool // Valid is true if Int64 is not NULL. | |||||
} | |||||
func (n NullInt64) String() string { return nullstr(n.Valid, n.Int64) } | |||||
// NullString represents a BigQuery STRING that may be NULL. | |||||
type NullString struct { | |||||
StringVal string | |||||
Valid bool // Valid is true if StringVal is not NULL. | |||||
} | |||||
func (n NullString) String() string { return nullstr(n.Valid, n.StringVal) } | |||||
// NullFloat64 represents a BigQuery FLOAT64 that may be NULL. | |||||
type NullFloat64 struct { | |||||
Float64 float64 | |||||
Valid bool // Valid is true if Float64 is not NULL. | |||||
} | |||||
func (n NullFloat64) String() string { return nullstr(n.Valid, n.Float64) } | |||||
// NullBool represents a BigQuery BOOL that may be NULL. | |||||
type NullBool struct { | |||||
Bool bool | |||||
Valid bool // Valid is true if Bool is not NULL. | |||||
} | |||||
func (n NullBool) String() string { return nullstr(n.Valid, n.Bool) } | |||||
// NullTimestamp represents a BigQuery TIMESTAMP that may be null. | |||||
type NullTimestamp struct { | |||||
Timestamp time.Time | |||||
Valid bool // Valid is true if Time is not NULL. | |||||
} | |||||
func (n NullTimestamp) String() string { return nullstr(n.Valid, n.Timestamp) } | |||||
// NullDate represents a BigQuery DATE that may be null. | |||||
type NullDate struct { | |||||
Date civil.Date | |||||
Valid bool // Valid is true if Date is not NULL. | |||||
} | |||||
func (n NullDate) String() string { return nullstr(n.Valid, n.Date) } | |||||
// NullTime represents a BigQuery TIME that may be null. | |||||
type NullTime struct { | |||||
Time civil.Time | |||||
Valid bool // Valid is true if Time is not NULL. | |||||
} | |||||
func (n NullTime) String() string { | |||||
if !n.Valid { | |||||
return "<null>" | |||||
} | |||||
return CivilTimeString(n.Time) | |||||
} | |||||
// NullDateTime represents a BigQuery DATETIME that may be null. | |||||
type NullDateTime struct { | |||||
DateTime civil.DateTime | |||||
Valid bool // Valid is true if DateTime is not NULL. | |||||
} | |||||
func (n NullDateTime) String() string { | |||||
if !n.Valid { | |||||
return "<null>" | |||||
} | |||||
return CivilDateTimeString(n.DateTime) | |||||
} | |||||
func (n NullInt64) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Int64) } | |||||
func (n NullFloat64) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Float64) } | |||||
func (n NullBool) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Bool) } | |||||
func (n NullString) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.StringVal) } | |||||
func (n NullTimestamp) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Timestamp) } | |||||
func (n NullDate) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Date) } | |||||
func (n NullTime) MarshalJSON() ([]byte, error) { | |||||
if !n.Valid { | |||||
return jsonNull, nil | |||||
} | |||||
return []byte(`"` + CivilTimeString(n.Time) + `"`), nil | |||||
} | |||||
func (n NullDateTime) MarshalJSON() ([]byte, error) { | |||||
if !n.Valid { | |||||
return jsonNull, nil | |||||
} | |||||
return []byte(`"` + CivilDateTimeString(n.DateTime) + `"`), nil | |||||
} | |||||
func nullstr(valid bool, v interface{}) string { | |||||
if !valid { | |||||
return "NULL" | |||||
} | |||||
return fmt.Sprint(v) | |||||
} | |||||
var jsonNull = []byte("null") | |||||
func nulljson(valid bool, v interface{}) ([]byte, error) { | |||||
if !valid { | |||||
return jsonNull, nil | |||||
} | |||||
return json.Marshal(v) | |||||
} | |||||
func (n *NullInt64) UnmarshalJSON(b []byte) error { | |||||
n.Valid = false | |||||
n.Int64 = 0 | |||||
if bytes.Equal(b, jsonNull) { | |||||
return nil | |||||
} | |||||
if err := json.Unmarshal(b, &n.Int64); err != nil { | |||||
return err | |||||
} | |||||
n.Valid = true | |||||
return nil | |||||
} | |||||
func (n *NullFloat64) UnmarshalJSON(b []byte) error { | |||||
n.Valid = false | |||||
n.Float64 = 0 | |||||
if bytes.Equal(b, jsonNull) { | |||||
return nil | |||||
} | |||||
if err := json.Unmarshal(b, &n.Float64); err != nil { | |||||
return err | |||||
} | |||||
n.Valid = true | |||||
return nil | |||||
} | |||||
func (n *NullBool) UnmarshalJSON(b []byte) error { | |||||
n.Valid = false | |||||
n.Bool = false | |||||
if bytes.Equal(b, jsonNull) { | |||||
return nil | |||||
} | |||||
if err := json.Unmarshal(b, &n.Bool); err != nil { | |||||
return err | |||||
} | |||||
n.Valid = true | |||||
return nil | |||||
} | |||||
func (n *NullString) UnmarshalJSON(b []byte) error { | |||||
n.Valid = false | |||||
n.StringVal = "" | |||||
if bytes.Equal(b, jsonNull) { | |||||
return nil | |||||
} | |||||
if err := json.Unmarshal(b, &n.StringVal); err != nil { | |||||
return err | |||||
} | |||||
n.Valid = true | |||||
return nil | |||||
} | |||||
func (n *NullTimestamp) UnmarshalJSON(b []byte) error { | |||||
n.Valid = false | |||||
n.Timestamp = time.Time{} | |||||
if bytes.Equal(b, jsonNull) { | |||||
return nil | |||||
} | |||||
if err := json.Unmarshal(b, &n.Timestamp); err != nil { | |||||
return err | |||||
} | |||||
n.Valid = true | |||||
return nil | |||||
} | |||||
func (n *NullDate) UnmarshalJSON(b []byte) error { | |||||
n.Valid = false | |||||
n.Date = civil.Date{} | |||||
if bytes.Equal(b, jsonNull) { | |||||
return nil | |||||
} | |||||
if err := json.Unmarshal(b, &n.Date); err != nil { | |||||
return err | |||||
} | |||||
n.Valid = true | |||||
return nil | |||||
} | |||||
func (n *NullTime) UnmarshalJSON(b []byte) error { | |||||
n.Valid = false | |||||
n.Time = civil.Time{} | |||||
if bytes.Equal(b, jsonNull) { | |||||
return nil | |||||
} | |||||
s, err := strconv.Unquote(string(b)) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
t, err := civil.ParseTime(s) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
n.Time = t | |||||
n.Valid = true | |||||
return nil | |||||
} | |||||
func (n *NullDateTime) UnmarshalJSON(b []byte) error { | |||||
n.Valid = false | |||||
n.DateTime = civil.DateTime{} | |||||
if bytes.Equal(b, jsonNull) { | |||||
return nil | |||||
} | |||||
s, err := strconv.Unquote(string(b)) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
dt, err := parseCivilDateTime(s) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
n.DateTime = dt | |||||
n.Valid = true | |||||
return nil | |||||
} | |||||
var ( | |||||
typeOfNullInt64 = reflect.TypeOf(NullInt64{}) | |||||
typeOfNullFloat64 = reflect.TypeOf(NullFloat64{}) | |||||
typeOfNullBool = reflect.TypeOf(NullBool{}) | |||||
typeOfNullString = reflect.TypeOf(NullString{}) | |||||
typeOfNullTimestamp = reflect.TypeOf(NullTimestamp{}) | |||||
typeOfNullDate = reflect.TypeOf(NullDate{}) | |||||
typeOfNullTime = reflect.TypeOf(NullTime{}) | |||||
typeOfNullDateTime = reflect.TypeOf(NullDateTime{}) | |||||
) | |||||
func nullableFieldType(t reflect.Type) FieldType { | |||||
switch t { | |||||
case typeOfNullInt64: | |||||
return IntegerFieldType | |||||
case typeOfNullFloat64: | |||||
return FloatFieldType | |||||
case typeOfNullBool: | |||||
return BooleanFieldType | |||||
case typeOfNullString: | |||||
return StringFieldType | |||||
case typeOfNullTimestamp: | |||||
return TimestampFieldType | |||||
case typeOfNullDate: | |||||
return DateFieldType | |||||
case typeOfNullTime: | |||||
return TimeFieldType | |||||
case typeOfNullDateTime: | |||||
return DateTimeFieldType | |||||
default: | |||||
return "" | |||||
} | |||||
} |
@@ -0,0 +1,73 @@ | |||||
// Copyright 2015 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"encoding/json" | |||||
"reflect" | |||||
"testing" | |||||
"cloud.google.com/go/civil" | |||||
"cloud.google.com/go/internal/testutil" | |||||
) | |||||
var ( | |||||
nullsTestTime = civil.Time{Hour: 7, Minute: 50, Second: 22, Nanosecond: 1000} | |||||
nullsTestDateTime = civil.DateTime{Date: civil.Date{Year: 2016, Month: 11, Day: 5}, Time: nullsTestTime} | |||||
) | |||||
func TestNullsJSON(t *testing.T) { | |||||
for _, test := range []struct { | |||||
in interface{} | |||||
want string | |||||
}{ | |||||
{&NullInt64{Valid: true, Int64: 3}, `3`}, | |||||
{&NullFloat64{Valid: true, Float64: 3.14}, `3.14`}, | |||||
{&NullBool{Valid: true, Bool: true}, `true`}, | |||||
{&NullString{Valid: true, StringVal: "foo"}, `"foo"`}, | |||||
{&NullTimestamp{Valid: true, Timestamp: testTimestamp}, `"2016-11-05T07:50:22.000000008Z"`}, | |||||
{&NullDate{Valid: true, Date: testDate}, `"2016-11-05"`}, | |||||
{&NullTime{Valid: true, Time: nullsTestTime}, `"07:50:22.000001"`}, | |||||
{&NullDateTime{Valid: true, DateTime: nullsTestDateTime}, `"2016-11-05 07:50:22.000001"`}, | |||||
{&NullInt64{}, `null`}, | |||||
{&NullFloat64{}, `null`}, | |||||
{&NullBool{}, `null`}, | |||||
{&NullString{}, `null`}, | |||||
{&NullTimestamp{}, `null`}, | |||||
{&NullDate{}, `null`}, | |||||
{&NullTime{}, `null`}, | |||||
{&NullDateTime{}, `null`}, | |||||
} { | |||||
bytes, err := json.Marshal(test.in) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
if got, want := string(bytes), test.want; got != want { | |||||
t.Errorf("%#v: got %s, want %s", test.in, got, want) | |||||
} | |||||
typ := reflect.Indirect(reflect.ValueOf(test.in)).Type() | |||||
value := reflect.New(typ).Interface() | |||||
err = json.Unmarshal(bytes, value) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
if !testutil.Equal(value, test.in) { | |||||
t.Errorf("%#v: got %#v, want %#v", test.in, value, test.in) | |||||
} | |||||
} | |||||
} |
@@ -0,0 +1,40 @@ | |||||
// Copyright 2018 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
// +build go1.8 | |||||
package bigquery | |||||
import ( | |||||
"testing" | |||||
"cloud.google.com/go/internal/testutil" | |||||
"golang.org/x/net/context" | |||||
) | |||||
func TestOCTracing(t *testing.T) { | |||||
ctx := context.Background() | |||||
client := getClient(t) | |||||
defer client.Close() | |||||
te := testutil.NewTestExporter() | |||||
defer te.Unregister() | |||||
q := client.Query("select *") | |||||
q.Run(ctx) // Doesn't matter if we get an error; span should be created either way | |||||
if len(te.Spans) == 0 { | |||||
t.Fatalf("Expected some spans to be created, but got %d", 0) | |||||
} | |||||
} |
@@ -0,0 +1,357 @@ | |||||
// Copyright 2016 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"encoding/base64" | |||||
"errors" | |||||
"fmt" | |||||
"math/big" | |||||
"reflect" | |||||
"regexp" | |||||
"time" | |||||
"cloud.google.com/go/civil" | |||||
"cloud.google.com/go/internal/fields" | |||||
bq "google.golang.org/api/bigquery/v2" | |||||
) | |||||
var ( | |||||
// See https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#timestamp-type. | |||||
timestampFormat = "2006-01-02 15:04:05.999999-07:00" | |||||
// See https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#schema.fields.name | |||||
validFieldName = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]{0,127}$") | |||||
) | |||||
const nullableTagOption = "nullable" | |||||
func bqTagParser(t reflect.StructTag) (name string, keep bool, other interface{}, err error) { | |||||
name, keep, opts, err := fields.ParseStandardTag("bigquery", t) | |||||
if err != nil { | |||||
return "", false, nil, err | |||||
} | |||||
if name != "" && !validFieldName.MatchString(name) { | |||||
return "", false, nil, errInvalidFieldName | |||||
} | |||||
for _, opt := range opts { | |||||
if opt != nullableTagOption { | |||||
return "", false, nil, fmt.Errorf( | |||||
"bigquery: invalid tag option %q. The only valid option is %q", | |||||
opt, nullableTagOption) | |||||
} | |||||
} | |||||
return name, keep, opts, nil | |||||
} | |||||
var fieldCache = fields.NewCache(bqTagParser, nil, nil) | |||||
var ( | |||||
int64ParamType = &bq.QueryParameterType{Type: "INT64"} | |||||
float64ParamType = &bq.QueryParameterType{Type: "FLOAT64"} | |||||
boolParamType = &bq.QueryParameterType{Type: "BOOL"} | |||||
stringParamType = &bq.QueryParameterType{Type: "STRING"} | |||||
bytesParamType = &bq.QueryParameterType{Type: "BYTES"} | |||||
dateParamType = &bq.QueryParameterType{Type: "DATE"} | |||||
timeParamType = &bq.QueryParameterType{Type: "TIME"} | |||||
dateTimeParamType = &bq.QueryParameterType{Type: "DATETIME"} | |||||
timestampParamType = &bq.QueryParameterType{Type: "TIMESTAMP"} | |||||
numericParamType = &bq.QueryParameterType{Type: "NUMERIC"} | |||||
) | |||||
var ( | |||||
typeOfDate = reflect.TypeOf(civil.Date{}) | |||||
typeOfTime = reflect.TypeOf(civil.Time{}) | |||||
typeOfDateTime = reflect.TypeOf(civil.DateTime{}) | |||||
typeOfGoTime = reflect.TypeOf(time.Time{}) | |||||
typeOfRat = reflect.TypeOf(&big.Rat{}) | |||||
) | |||||
// A QueryParameter is a parameter to a query. | |||||
type QueryParameter struct { | |||||
// Name is used for named parameter mode. | |||||
// It must match the name in the query case-insensitively. | |||||
Name string | |||||
// Value is the value of the parameter. | |||||
// | |||||
// When you create a QueryParameter to send to BigQuery, the following Go types | |||||
// are supported, with their corresponding Bigquery types: | |||||
// int, int8, int16, int32, int64, uint8, uint16, uint32: INT64 | |||||
// Note that uint, uint64 and uintptr are not supported, because | |||||
// they may contain values that cannot fit into a 64-bit signed integer. | |||||
// float32, float64: FLOAT64 | |||||
// bool: BOOL | |||||
// string: STRING | |||||
// []byte: BYTES | |||||
// time.Time: TIMESTAMP | |||||
// *big.Rat: NUMERIC | |||||
// Arrays and slices of the above. | |||||
// Structs of the above. Only the exported fields are used. | |||||
// | |||||
// When a QueryParameter is returned inside a QueryConfig from a call to | |||||
// Job.Config: | |||||
// Integers are of type int64. | |||||
// Floating-point values are of type float64. | |||||
// Arrays are of type []interface{}, regardless of the array element type. | |||||
// Structs are of type map[string]interface{}. | |||||
Value interface{} | |||||
} | |||||
func (p QueryParameter) toBQ() (*bq.QueryParameter, error) { | |||||
pv, err := paramValue(reflect.ValueOf(p.Value)) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
pt, err := paramType(reflect.TypeOf(p.Value)) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return &bq.QueryParameter{ | |||||
Name: p.Name, | |||||
ParameterValue: &pv, | |||||
ParameterType: pt, | |||||
}, nil | |||||
} | |||||
func paramType(t reflect.Type) (*bq.QueryParameterType, error) { | |||||
if t == nil { | |||||
return nil, errors.New("bigquery: nil parameter") | |||||
} | |||||
switch t { | |||||
case typeOfDate: | |||||
return dateParamType, nil | |||||
case typeOfTime: | |||||
return timeParamType, nil | |||||
case typeOfDateTime: | |||||
return dateTimeParamType, nil | |||||
case typeOfGoTime: | |||||
return timestampParamType, nil | |||||
case typeOfRat: | |||||
return numericParamType, nil | |||||
} | |||||
switch t.Kind() { | |||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32: | |||||
return int64ParamType, nil | |||||
case reflect.Float32, reflect.Float64: | |||||
return float64ParamType, nil | |||||
case reflect.Bool: | |||||
return boolParamType, nil | |||||
case reflect.String: | |||||
return stringParamType, nil | |||||
case reflect.Slice: | |||||
if t.Elem().Kind() == reflect.Uint8 { | |||||
return bytesParamType, nil | |||||
} | |||||
fallthrough | |||||
case reflect.Array: | |||||
et, err := paramType(t.Elem()) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return &bq.QueryParameterType{Type: "ARRAY", ArrayType: et}, nil | |||||
case reflect.Ptr: | |||||
if t.Elem().Kind() != reflect.Struct { | |||||
break | |||||
} | |||||
t = t.Elem() | |||||
fallthrough | |||||
case reflect.Struct: | |||||
var fts []*bq.QueryParameterTypeStructTypes | |||||
fields, err := fieldCache.Fields(t) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
for _, f := range fields { | |||||
pt, err := paramType(f.Type) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
fts = append(fts, &bq.QueryParameterTypeStructTypes{ | |||||
Name: f.Name, | |||||
Type: pt, | |||||
}) | |||||
} | |||||
return &bq.QueryParameterType{Type: "STRUCT", StructTypes: fts}, nil | |||||
} | |||||
return nil, fmt.Errorf("bigquery: Go type %s cannot be represented as a parameter type", t) | |||||
} | |||||
func paramValue(v reflect.Value) (bq.QueryParameterValue, error) { | |||||
var res bq.QueryParameterValue | |||||
if !v.IsValid() { | |||||
return res, errors.New("bigquery: nil parameter") | |||||
} | |||||
t := v.Type() | |||||
switch t { | |||||
case typeOfDate: | |||||
res.Value = v.Interface().(civil.Date).String() | |||||
return res, nil | |||||
case typeOfTime: | |||||
// civil.Time has nanosecond resolution, but BigQuery TIME only microsecond. | |||||
// (If we send nanoseconds, then when we try to read the result we get "query job | |||||
// missing destination table"). | |||||
res.Value = CivilTimeString(v.Interface().(civil.Time)) | |||||
return res, nil | |||||
case typeOfDateTime: | |||||
res.Value = CivilDateTimeString(v.Interface().(civil.DateTime)) | |||||
return res, nil | |||||
case typeOfGoTime: | |||||
res.Value = v.Interface().(time.Time).Format(timestampFormat) | |||||
return res, nil | |||||
case typeOfRat: | |||||
res.Value = NumericString(v.Interface().(*big.Rat)) | |||||
return res, nil | |||||
} | |||||
switch t.Kind() { | |||||
case reflect.Slice: | |||||
if t.Elem().Kind() == reflect.Uint8 { | |||||
res.Value = base64.StdEncoding.EncodeToString(v.Interface().([]byte)) | |||||
return res, nil | |||||
} | |||||
fallthrough | |||||
case reflect.Array: | |||||
var vals []*bq.QueryParameterValue | |||||
for i := 0; i < v.Len(); i++ { | |||||
val, err := paramValue(v.Index(i)) | |||||
if err != nil { | |||||
return bq.QueryParameterValue{}, err | |||||
} | |||||
vals = append(vals, &val) | |||||
} | |||||
return bq.QueryParameterValue{ArrayValues: vals}, nil | |||||
case reflect.Ptr: | |||||
if t.Elem().Kind() != reflect.Struct { | |||||
return res, fmt.Errorf("bigquery: Go type %s cannot be represented as a parameter value", t) | |||||
} | |||||
t = t.Elem() | |||||
v = v.Elem() | |||||
if !v.IsValid() { | |||||
// nil pointer becomes empty value | |||||
return res, nil | |||||
} | |||||
fallthrough | |||||
case reflect.Struct: | |||||
fields, err := fieldCache.Fields(t) | |||||
if err != nil { | |||||
return bq.QueryParameterValue{}, err | |||||
} | |||||
res.StructValues = map[string]bq.QueryParameterValue{} | |||||
for _, f := range fields { | |||||
fv := v.FieldByIndex(f.Index) | |||||
fp, err := paramValue(fv) | |||||
if err != nil { | |||||
return bq.QueryParameterValue{}, err | |||||
} | |||||
res.StructValues[f.Name] = fp | |||||
} | |||||
return res, nil | |||||
} | |||||
// None of the above: assume a scalar type. (If it's not a valid type, | |||||
// paramType will catch the error.) | |||||
res.Value = fmt.Sprint(v.Interface()) | |||||
return res, nil | |||||
} | |||||
func bqToQueryParameter(q *bq.QueryParameter) (QueryParameter, error) { | |||||
p := QueryParameter{Name: q.Name} | |||||
val, err := convertParamValue(q.ParameterValue, q.ParameterType) | |||||
if err != nil { | |||||
return QueryParameter{}, err | |||||
} | |||||
p.Value = val | |||||
return p, nil | |||||
} | |||||
var paramTypeToFieldType = map[string]FieldType{ | |||||
int64ParamType.Type: IntegerFieldType, | |||||
float64ParamType.Type: FloatFieldType, | |||||
boolParamType.Type: BooleanFieldType, | |||||
stringParamType.Type: StringFieldType, | |||||
bytesParamType.Type: BytesFieldType, | |||||
dateParamType.Type: DateFieldType, | |||||
timeParamType.Type: TimeFieldType, | |||||
numericParamType.Type: NumericFieldType, | |||||
} | |||||
// Convert a parameter value from the service to a Go value. This is similar to, but | |||||
// not quite the same as, converting data values. | |||||
func convertParamValue(qval *bq.QueryParameterValue, qtype *bq.QueryParameterType) (interface{}, error) { | |||||
switch qtype.Type { | |||||
case "ARRAY": | |||||
if qval == nil { | |||||
return []interface{}(nil), nil | |||||
} | |||||
return convertParamArray(qval.ArrayValues, qtype.ArrayType) | |||||
case "STRUCT": | |||||
if qval == nil { | |||||
return map[string]interface{}(nil), nil | |||||
} | |||||
return convertParamStruct(qval.StructValues, qtype.StructTypes) | |||||
case "TIMESTAMP": | |||||
return time.Parse(timestampFormat, qval.Value) | |||||
case "DATETIME": | |||||
return parseCivilDateTime(qval.Value) | |||||
default: | |||||
return convertBasicType(qval.Value, paramTypeToFieldType[qtype.Type]) | |||||
} | |||||
} | |||||
// convertParamArray converts a query parameter array value to a Go value. It | |||||
// always returns a []interface{}. | |||||
func convertParamArray(elVals []*bq.QueryParameterValue, elType *bq.QueryParameterType) ([]interface{}, error) { | |||||
var vals []interface{} | |||||
for _, el := range elVals { | |||||
val, err := convertParamValue(el, elType) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
vals = append(vals, val) | |||||
} | |||||
return vals, nil | |||||
} | |||||
// convertParamStruct converts a query parameter struct value into a Go value. It | |||||
// always returns a map[string]interface{}. | |||||
func convertParamStruct(sVals map[string]bq.QueryParameterValue, sTypes []*bq.QueryParameterTypeStructTypes) (map[string]interface{}, error) { | |||||
vals := map[string]interface{}{} | |||||
for _, st := range sTypes { | |||||
if sv, ok := sVals[st.Name]; ok { | |||||
val, err := convertParamValue(&sv, st.Type) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
vals[st.Name] = val | |||||
} else { | |||||
vals[st.Name] = nil | |||||
} | |||||
} | |||||
return vals, nil | |||||
} |
@@ -0,0 +1,363 @@ | |||||
// Copyright 2016 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"errors" | |||||
"math" | |||||
"math/big" | |||||
"reflect" | |||||
"testing" | |||||
"time" | |||||
"github.com/google/go-cmp/cmp" | |||||
"cloud.google.com/go/civil" | |||||
"cloud.google.com/go/internal/testutil" | |||||
"golang.org/x/net/context" | |||||
bq "google.golang.org/api/bigquery/v2" | |||||
) | |||||
var scalarTests = []struct { | |||||
val interface{} // The Go value | |||||
wantVal string // paramValue's desired output | |||||
wantType *bq.QueryParameterType // paramType's desired output | |||||
}{ | |||||
{int64(0), "0", int64ParamType}, | |||||
{3.14, "3.14", float64ParamType}, | |||||
{3.14159e-87, "3.14159e-87", float64ParamType}, | |||||
{true, "true", boolParamType}, | |||||
{"string", "string", stringParamType}, | |||||
{"\u65e5\u672c\u8a9e\n", "\u65e5\u672c\u8a9e\n", stringParamType}, | |||||
{math.NaN(), "NaN", float64ParamType}, | |||||
{[]byte("foo"), "Zm9v", bytesParamType}, // base64 encoding of "foo" | |||||
{time.Date(2016, 3, 20, 4, 22, 9, 5000, time.FixedZone("neg1-2", -3720)), | |||||
"2016-03-20 04:22:09.000005-01:02", | |||||
timestampParamType}, | |||||
{civil.Date{Year: 2016, Month: 3, Day: 20}, "2016-03-20", dateParamType}, | |||||
{civil.Time{Hour: 4, Minute: 5, Second: 6, Nanosecond: 789000000}, "04:05:06.789000", timeParamType}, | |||||
{civil.DateTime{Date: civil.Date{Year: 2016, Month: 3, Day: 20}, Time: civil.Time{Hour: 4, Minute: 5, Second: 6, Nanosecond: 789000000}}, | |||||
"2016-03-20 04:05:06.789000", | |||||
dateTimeParamType}, | |||||
{big.NewRat(12345, 1000), "12.345000000", numericParamType}, | |||||
} | |||||
type ( | |||||
S1 struct { | |||||
A int | |||||
B *S2 | |||||
C bool | |||||
} | |||||
S2 struct { | |||||
D string | |||||
e int | |||||
} | |||||
) | |||||
var ( | |||||
s1 = S1{ | |||||
A: 1, | |||||
B: &S2{D: "s"}, | |||||
C: true, | |||||
} | |||||
s1ParamType = &bq.QueryParameterType{ | |||||
Type: "STRUCT", | |||||
StructTypes: []*bq.QueryParameterTypeStructTypes{ | |||||
{Name: "A", Type: int64ParamType}, | |||||
{Name: "B", Type: &bq.QueryParameterType{ | |||||
Type: "STRUCT", | |||||
StructTypes: []*bq.QueryParameterTypeStructTypes{ | |||||
{Name: "D", Type: stringParamType}, | |||||
}, | |||||
}}, | |||||
{Name: "C", Type: boolParamType}, | |||||
}, | |||||
} | |||||
s1ParamValue = bq.QueryParameterValue{ | |||||
StructValues: map[string]bq.QueryParameterValue{ | |||||
"A": sval("1"), | |||||
"B": { | |||||
StructValues: map[string]bq.QueryParameterValue{ | |||||
"D": sval("s"), | |||||
}, | |||||
}, | |||||
"C": sval("true"), | |||||
}, | |||||
} | |||||
s1ParamReturnValue = map[string]interface{}{ | |||||
"A": int64(1), | |||||
"B": map[string]interface{}{"D": "s"}, | |||||
"C": true, | |||||
} | |||||
) | |||||
func sval(s string) bq.QueryParameterValue { | |||||
return bq.QueryParameterValue{Value: s} | |||||
} | |||||
func TestParamValueScalar(t *testing.T) { | |||||
for _, test := range scalarTests { | |||||
got, err := paramValue(reflect.ValueOf(test.val)) | |||||
if err != nil { | |||||
t.Errorf("%v: got %v, want nil", test.val, err) | |||||
continue | |||||
} | |||||
want := sval(test.wantVal) | |||||
if !testutil.Equal(got, want) { | |||||
t.Errorf("%v:\ngot %+v\nwant %+v", test.val, got, want) | |||||
} | |||||
} | |||||
} | |||||
func TestParamValueArray(t *testing.T) { | |||||
qpv := bq.QueryParameterValue{ArrayValues: []*bq.QueryParameterValue{ | |||||
{Value: "1"}, | |||||
{Value: "2"}, | |||||
}, | |||||
} | |||||
for _, test := range []struct { | |||||
val interface{} | |||||
want bq.QueryParameterValue | |||||
}{ | |||||
{[]int(nil), bq.QueryParameterValue{}}, | |||||
{[]int{}, bq.QueryParameterValue{}}, | |||||
{[]int{1, 2}, qpv}, | |||||
{[2]int{1, 2}, qpv}, | |||||
} { | |||||
got, err := paramValue(reflect.ValueOf(test.val)) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
if !testutil.Equal(got, test.want) { | |||||
t.Errorf("%#v:\ngot %+v\nwant %+v", test.val, got, test.want) | |||||
} | |||||
} | |||||
} | |||||
func TestParamValueStruct(t *testing.T) { | |||||
got, err := paramValue(reflect.ValueOf(s1)) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
if !testutil.Equal(got, s1ParamValue) { | |||||
t.Errorf("got %+v\nwant %+v", got, s1ParamValue) | |||||
} | |||||
} | |||||
func TestParamValueErrors(t *testing.T) { | |||||
// paramValue lets a few invalid types through, but paramType catches them. | |||||
// Since we never call one without the other that's fine. | |||||
for _, val := range []interface{}{nil, new([]int)} { | |||||
_, err := paramValue(reflect.ValueOf(val)) | |||||
if err == nil { | |||||
t.Errorf("%v (%T): got nil, want error", val, val) | |||||
} | |||||
} | |||||
} | |||||
func TestParamType(t *testing.T) { | |||||
for _, test := range scalarTests { | |||||
got, err := paramType(reflect.TypeOf(test.val)) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
if !testutil.Equal(got, test.wantType) { | |||||
t.Errorf("%v (%T): got %v, want %v", test.val, test.val, got, test.wantType) | |||||
} | |||||
} | |||||
for _, test := range []struct { | |||||
val interface{} | |||||
want *bq.QueryParameterType | |||||
}{ | |||||
{uint32(32767), int64ParamType}, | |||||
{[]byte("foo"), bytesParamType}, | |||||
{[]int{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: int64ParamType}}, | |||||
{[3]bool{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: boolParamType}}, | |||||
{S1{}, s1ParamType}, | |||||
} { | |||||
got, err := paramType(reflect.TypeOf(test.val)) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
if !testutil.Equal(got, test.want) { | |||||
t.Errorf("%v (%T): got %v, want %v", test.val, test.val, got, test.want) | |||||
} | |||||
} | |||||
} | |||||
func TestParamTypeErrors(t *testing.T) { | |||||
for _, val := range []interface{}{ | |||||
nil, uint(0), new([]int), make(chan int), | |||||
} { | |||||
_, err := paramType(reflect.TypeOf(val)) | |||||
if err == nil { | |||||
t.Errorf("%v (%T): got nil, want error", val, val) | |||||
} | |||||
} | |||||
} | |||||
func TestConvertParamValue(t *testing.T) { | |||||
// Scalars. | |||||
for _, test := range scalarTests { | |||||
pval, err := paramValue(reflect.ValueOf(test.val)) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
ptype, err := paramType(reflect.TypeOf(test.val)) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
got, err := convertParamValue(&pval, ptype) | |||||
if err != nil { | |||||
t.Fatalf("convertParamValue(%+v, %+v): %v", pval, ptype, err) | |||||
} | |||||
if !testutil.Equal(got, test.val) { | |||||
t.Errorf("%#v: got %#v", test.val, got) | |||||
} | |||||
} | |||||
// Arrays. | |||||
for _, test := range []struct { | |||||
pval *bq.QueryParameterValue | |||||
want []interface{} | |||||
}{ | |||||
{ | |||||
&bq.QueryParameterValue{}, | |||||
nil, | |||||
}, | |||||
{ | |||||
&bq.QueryParameterValue{ | |||||
ArrayValues: []*bq.QueryParameterValue{{Value: "1"}, {Value: "2"}}, | |||||
}, | |||||
[]interface{}{int64(1), int64(2)}, | |||||
}, | |||||
} { | |||||
ptype := &bq.QueryParameterType{Type: "ARRAY", ArrayType: int64ParamType} | |||||
got, err := convertParamValue(test.pval, ptype) | |||||
if err != nil { | |||||
t.Fatalf("%+v: %v", test.pval, err) | |||||
} | |||||
if !testutil.Equal(got, test.want) { | |||||
t.Errorf("%+v: got %+v, want %+v", test.pval, got, test.want) | |||||
} | |||||
} | |||||
// Structs. | |||||
got, err := convertParamValue(&s1ParamValue, s1ParamType) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
if !testutil.Equal(got, s1ParamReturnValue) { | |||||
t.Errorf("got %+v, want %+v", got, s1ParamReturnValue) | |||||
} | |||||
} | |||||
func TestIntegration_ScalarParam(t *testing.T) { | |||||
roundToMicros := cmp.Transformer("RoundToMicros", | |||||
func(t time.Time) time.Time { return t.Round(time.Microsecond) }) | |||||
c := getClient(t) | |||||
for _, test := range scalarTests { | |||||
gotData, gotParam, err := paramRoundTrip(c, test.val) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
if !testutil.Equal(gotData, test.val, roundToMicros) { | |||||
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", gotData, gotData, test.val, test.val) | |||||
} | |||||
if !testutil.Equal(gotParam, test.val, roundToMicros) { | |||||
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", gotParam, gotParam, test.val, test.val) | |||||
} | |||||
} | |||||
} | |||||
func TestIntegration_OtherParam(t *testing.T) { | |||||
c := getClient(t) | |||||
for _, test := range []struct { | |||||
val interface{} | |||||
wantData interface{} | |||||
wantParam interface{} | |||||
}{ | |||||
{[]int(nil), []Value(nil), []interface{}(nil)}, | |||||
{[]int{}, []Value(nil), []interface{}(nil)}, | |||||
{ | |||||
[]int{1, 2}, | |||||
[]Value{int64(1), int64(2)}, | |||||
[]interface{}{int64(1), int64(2)}, | |||||
}, | |||||
{ | |||||
[3]int{1, 2, 3}, | |||||
[]Value{int64(1), int64(2), int64(3)}, | |||||
[]interface{}{int64(1), int64(2), int64(3)}, | |||||
}, | |||||
{ | |||||
S1{}, | |||||
[]Value{int64(0), nil, false}, | |||||
map[string]interface{}{ | |||||
"A": int64(0), | |||||
"B": nil, | |||||
"C": false, | |||||
}, | |||||
}, | |||||
{ | |||||
s1, | |||||
[]Value{int64(1), []Value{"s"}, true}, | |||||
s1ParamReturnValue, | |||||
}, | |||||
} { | |||||
gotData, gotParam, err := paramRoundTrip(c, test.val) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
if !testutil.Equal(gotData, test.wantData) { | |||||
t.Errorf("%#v:\ngot %#v (%T)\nwant %#v (%T)", | |||||
test.val, gotData, gotData, test.wantData, test.wantData) | |||||
} | |||||
if !testutil.Equal(gotParam, test.wantParam) { | |||||
t.Errorf("%#v:\ngot %#v (%T)\nwant %#v (%T)", | |||||
test.val, gotParam, gotParam, test.wantParam, test.wantParam) | |||||
} | |||||
} | |||||
} | |||||
// paramRoundTrip passes x as a query parameter to BigQuery. It returns | |||||
// the resulting data value from running the query and the parameter value from | |||||
// the returned job configuration. | |||||
func paramRoundTrip(c *Client, x interface{}) (data Value, param interface{}, err error) { | |||||
ctx := context.Background() | |||||
q := c.Query("select ?") | |||||
q.Parameters = []QueryParameter{{Value: x}} | |||||
job, err := q.Run(ctx) | |||||
if err != nil { | |||||
return nil, nil, err | |||||
} | |||||
it, err := job.Read(ctx) | |||||
if err != nil { | |||||
return nil, nil, err | |||||
} | |||||
var val []Value | |||||
err = it.Next(&val) | |||||
if err != nil { | |||||
return nil, nil, err | |||||
} | |||||
if len(val) != 1 { | |||||
return nil, nil, errors.New("wrong number of values") | |||||
} | |||||
conf, err := job.Config() | |||||
if err != nil { | |||||
return nil, nil, err | |||||
} | |||||
return val[0], conf.(*QueryConfig).Parameters[0].Value, nil | |||||
} |
@@ -0,0 +1,307 @@ | |||||
// Copyright 2015 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"errors" | |||||
"cloud.google.com/go/internal/trace" | |||||
"golang.org/x/net/context" | |||||
bq "google.golang.org/api/bigquery/v2" | |||||
) | |||||
// QueryConfig holds the configuration for a query job. | |||||
type QueryConfig struct { | |||||
// Dst is the table into which the results of the query will be written. | |||||
// If this field is nil, a temporary table will be created. | |||||
Dst *Table | |||||
// The query to execute. See https://cloud.google.com/bigquery/query-reference for details. | |||||
Q string | |||||
// DefaultProjectID and DefaultDatasetID specify the dataset to use for unqualified table names in the query. | |||||
// If DefaultProjectID is set, DefaultDatasetID must also be set. | |||||
DefaultProjectID string | |||||
DefaultDatasetID string | |||||
// TableDefinitions describes data sources outside of BigQuery. | |||||
// The map keys may be used as table names in the query string. | |||||
// | |||||
// When a QueryConfig is returned from Job.Config, the map values | |||||
// are always of type *ExternalDataConfig. | |||||
TableDefinitions map[string]ExternalData | |||||
// CreateDisposition specifies the circumstances under which the destination table will be created. | |||||
// The default is CreateIfNeeded. | |||||
CreateDisposition TableCreateDisposition | |||||
// WriteDisposition specifies how existing data in the destination table is treated. | |||||
// The default is WriteEmpty. | |||||
WriteDisposition TableWriteDisposition | |||||
// DisableQueryCache prevents results being fetched from the query cache. | |||||
// If this field is false, results are fetched from the cache if they are available. | |||||
// The query cache is a best-effort cache that is flushed whenever tables in the query are modified. | |||||
// Cached results are only available when TableID is unspecified in the query's destination Table. | |||||
// For more information, see https://cloud.google.com/bigquery/querying-data#querycaching | |||||
DisableQueryCache bool | |||||
// DisableFlattenedResults prevents results being flattened. | |||||
// If this field is false, results from nested and repeated fields are flattened. | |||||
// DisableFlattenedResults implies AllowLargeResults | |||||
// For more information, see https://cloud.google.com/bigquery/docs/data#nested | |||||
DisableFlattenedResults bool | |||||
// AllowLargeResults allows the query to produce arbitrarily large result tables. | |||||
// The destination must be a table. | |||||
// When using this option, queries will take longer to execute, even if the result set is small. | |||||
// For additional limitations, see https://cloud.google.com/bigquery/querying-data#largequeryresults | |||||
AllowLargeResults bool | |||||
// Priority specifies the priority with which to schedule the query. | |||||
// The default priority is InteractivePriority. | |||||
// For more information, see https://cloud.google.com/bigquery/querying-data#batchqueries | |||||
Priority QueryPriority | |||||
// MaxBillingTier sets the maximum billing tier for a Query. | |||||
// Queries that have resource usage beyond this tier will fail (without | |||||
// incurring a charge). If this field is zero, the project default will be used. | |||||
MaxBillingTier int | |||||
// MaxBytesBilled limits the number of bytes billed for | |||||
// this job. Queries that would exceed this limit will fail (without incurring | |||||
// a charge). | |||||
// If this field is less than 1, the project default will be | |||||
// used. | |||||
MaxBytesBilled int64 | |||||
// UseStandardSQL causes the query to use standard SQL. The default. | |||||
// Deprecated: use UseLegacySQL. | |||||
UseStandardSQL bool | |||||
// UseLegacySQL causes the query to use legacy SQL. | |||||
UseLegacySQL bool | |||||
// Parameters is a list of query parameters. The presence of parameters | |||||
// implies the use of standard SQL. | |||||
// If the query uses positional syntax ("?"), then no parameter may have a name. | |||||
// If the query uses named syntax ("@p"), then all parameters must have names. | |||||
// It is illegal to mix positional and named syntax. | |||||
Parameters []QueryParameter | |||||
// TimePartitioning specifies time-based partitioning | |||||
// for the destination table. | |||||
TimePartitioning *TimePartitioning | |||||
// The labels associated with this job. | |||||
Labels map[string]string | |||||
// If true, don't actually run this job. A valid query will return a mostly | |||||
// empty response with some processing statistics, while an invalid query will | |||||
// return the same error it would if it wasn't a dry run. | |||||
// | |||||
// Query.Read will fail with dry-run queries. Call Query.Run instead, and then | |||||
// call LastStatus on the returned job to get statistics. Calling Status on a | |||||
// dry-run job will fail. | |||||
DryRun bool | |||||
// Custom encryption configuration (e.g., Cloud KMS keys). | |||||
DestinationEncryptionConfig *EncryptionConfig | |||||
// Allows the schema of the destination table to be updated as a side effect of | |||||
// the query job. | |||||
SchemaUpdateOptions []string | |||||
} | |||||
func (qc *QueryConfig) toBQ() (*bq.JobConfiguration, error) { | |||||
qconf := &bq.JobConfigurationQuery{ | |||||
Query: qc.Q, | |||||
CreateDisposition: string(qc.CreateDisposition), | |||||
WriteDisposition: string(qc.WriteDisposition), | |||||
AllowLargeResults: qc.AllowLargeResults, | |||||
Priority: string(qc.Priority), | |||||
MaximumBytesBilled: qc.MaxBytesBilled, | |||||
TimePartitioning: qc.TimePartitioning.toBQ(), | |||||
DestinationEncryptionConfiguration: qc.DestinationEncryptionConfig.toBQ(), | |||||
SchemaUpdateOptions: qc.SchemaUpdateOptions, | |||||
} | |||||
if len(qc.TableDefinitions) > 0 { | |||||
qconf.TableDefinitions = make(map[string]bq.ExternalDataConfiguration) | |||||
} | |||||
for name, data := range qc.TableDefinitions { | |||||
qconf.TableDefinitions[name] = data.toBQ() | |||||
} | |||||
if qc.DefaultProjectID != "" || qc.DefaultDatasetID != "" { | |||||
qconf.DefaultDataset = &bq.DatasetReference{ | |||||
DatasetId: qc.DefaultDatasetID, | |||||
ProjectId: qc.DefaultProjectID, | |||||
} | |||||
} | |||||
if tier := int64(qc.MaxBillingTier); tier > 0 { | |||||
qconf.MaximumBillingTier = &tier | |||||
} | |||||
f := false | |||||
if qc.DisableQueryCache { | |||||
qconf.UseQueryCache = &f | |||||
} | |||||
if qc.DisableFlattenedResults { | |||||
qconf.FlattenResults = &f | |||||
// DisableFlattenResults implies AllowLargeResults. | |||||
qconf.AllowLargeResults = true | |||||
} | |||||
if qc.UseStandardSQL && qc.UseLegacySQL { | |||||
return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL") | |||||
} | |||||
if len(qc.Parameters) > 0 && qc.UseLegacySQL { | |||||
return nil, errors.New("bigquery: cannot provide both Parameters (implying standard SQL) and UseLegacySQL") | |||||
} | |||||
ptrue := true | |||||
pfalse := false | |||||
if qc.UseLegacySQL { | |||||
qconf.UseLegacySql = &ptrue | |||||
} else { | |||||
qconf.UseLegacySql = &pfalse | |||||
} | |||||
if qc.Dst != nil && !qc.Dst.implicitTable() { | |||||
qconf.DestinationTable = qc.Dst.toBQ() | |||||
} | |||||
for _, p := range qc.Parameters { | |||||
qp, err := p.toBQ() | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
qconf.QueryParameters = append(qconf.QueryParameters, qp) | |||||
} | |||||
return &bq.JobConfiguration{ | |||||
Labels: qc.Labels, | |||||
DryRun: qc.DryRun, | |||||
Query: qconf, | |||||
}, nil | |||||
} | |||||
func bqToQueryConfig(q *bq.JobConfiguration, c *Client) (*QueryConfig, error) { | |||||
qq := q.Query | |||||
qc := &QueryConfig{ | |||||
Labels: q.Labels, | |||||
DryRun: q.DryRun, | |||||
Q: qq.Query, | |||||
CreateDisposition: TableCreateDisposition(qq.CreateDisposition), | |||||
WriteDisposition: TableWriteDisposition(qq.WriteDisposition), | |||||
AllowLargeResults: qq.AllowLargeResults, | |||||
Priority: QueryPriority(qq.Priority), | |||||
MaxBytesBilled: qq.MaximumBytesBilled, | |||||
UseLegacySQL: qq.UseLegacySql == nil || *qq.UseLegacySql, | |||||
TimePartitioning: bqToTimePartitioning(qq.TimePartitioning), | |||||
DestinationEncryptionConfig: bqToEncryptionConfig(qq.DestinationEncryptionConfiguration), | |||||
SchemaUpdateOptions: qq.SchemaUpdateOptions, | |||||
} | |||||
qc.UseStandardSQL = !qc.UseLegacySQL | |||||
if len(qq.TableDefinitions) > 0 { | |||||
qc.TableDefinitions = make(map[string]ExternalData) | |||||
} | |||||
for name, qedc := range qq.TableDefinitions { | |||||
edc, err := bqToExternalDataConfig(&qedc) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
qc.TableDefinitions[name] = edc | |||||
} | |||||
if qq.DefaultDataset != nil { | |||||
qc.DefaultProjectID = qq.DefaultDataset.ProjectId | |||||
qc.DefaultDatasetID = qq.DefaultDataset.DatasetId | |||||
} | |||||
if qq.MaximumBillingTier != nil { | |||||
qc.MaxBillingTier = int(*qq.MaximumBillingTier) | |||||
} | |||||
if qq.UseQueryCache != nil && !*qq.UseQueryCache { | |||||
qc.DisableQueryCache = true | |||||
} | |||||
if qq.FlattenResults != nil && !*qq.FlattenResults { | |||||
qc.DisableFlattenedResults = true | |||||
} | |||||
if qq.DestinationTable != nil { | |||||
qc.Dst = bqToTable(qq.DestinationTable, c) | |||||
} | |||||
for _, qp := range qq.QueryParameters { | |||||
p, err := bqToQueryParameter(qp) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
qc.Parameters = append(qc.Parameters, p) | |||||
} | |||||
return qc, nil | |||||
} | |||||
// QueryPriority specifies a priority with which a query is to be executed. | |||||
type QueryPriority string | |||||
const ( | |||||
BatchPriority QueryPriority = "BATCH" | |||||
InteractivePriority QueryPriority = "INTERACTIVE" | |||||
) | |||||
// A Query queries data from a BigQuery table. Use Client.Query to create a Query. | |||||
type Query struct { | |||||
JobIDConfig | |||||
QueryConfig | |||||
client *Client | |||||
} | |||||
// Query creates a query with string q. | |||||
// The returned Query may optionally be further configured before its Run method is called. | |||||
func (c *Client) Query(q string) *Query { | |||||
return &Query{ | |||||
client: c, | |||||
QueryConfig: QueryConfig{Q: q}, | |||||
} | |||||
} | |||||
// Run initiates a query job. | |||||
func (q *Query) Run(ctx context.Context) (j *Job, err error) { | |||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Query.Run") | |||||
defer func() { trace.EndSpan(ctx, err) }() | |||||
job, err := q.newJob() | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
j, err = q.client.insertJob(ctx, job, nil) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return j, nil | |||||
} | |||||
func (q *Query) newJob() (*bq.Job, error) { | |||||
config, err := q.QueryConfig.toBQ() | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return &bq.Job{ | |||||
JobReference: q.JobIDConfig.createJobRef(q.client), | |||||
Configuration: config, | |||||
}, nil | |||||
} | |||||
// Read submits a query for execution and returns the results via a RowIterator. | |||||
// It is a shorthand for Query.Run followed by Job.Read. | |||||
func (q *Query) Read(ctx context.Context) (*RowIterator, error) { | |||||
job, err := q.Run(ctx) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return job.Read(ctx) | |||||
} |
@@ -0,0 +1,406 @@ | |||||
// Copyright 2015 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"testing" | |||||
"time" | |||||
"github.com/google/go-cmp/cmp" | |||||
"cloud.google.com/go/internal/testutil" | |||||
bq "google.golang.org/api/bigquery/v2" | |||||
) | |||||
func defaultQueryJob() *bq.Job { | |||||
pfalse := false | |||||
return &bq.Job{ | |||||
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"}, | |||||
Configuration: &bq.JobConfiguration{ | |||||
Query: &bq.JobConfigurationQuery{ | |||||
DestinationTable: &bq.TableReference{ | |||||
ProjectId: "client-project-id", | |||||
DatasetId: "dataset-id", | |||||
TableId: "table-id", | |||||
}, | |||||
Query: "query string", | |||||
DefaultDataset: &bq.DatasetReference{ | |||||
ProjectId: "def-project-id", | |||||
DatasetId: "def-dataset-id", | |||||
}, | |||||
UseLegacySql: &pfalse, | |||||
}, | |||||
}, | |||||
} | |||||
} | |||||
var defaultQuery = &QueryConfig{ | |||||
Q: "query string", | |||||
DefaultProjectID: "def-project-id", | |||||
DefaultDatasetID: "def-dataset-id", | |||||
} | |||||
func TestQuery(t *testing.T) { | |||||
defer fixRandomID("RANDOM")() | |||||
c := &Client{ | |||||
projectID: "client-project-id", | |||||
} | |||||
testCases := []struct { | |||||
dst *Table | |||||
src *QueryConfig | |||||
jobIDConfig JobIDConfig | |||||
want *bq.Job | |||||
}{ | |||||
{ | |||||
dst: c.Dataset("dataset-id").Table("table-id"), | |||||
src: defaultQuery, | |||||
want: defaultQueryJob(), | |||||
}, | |||||
{ | |||||
dst: c.Dataset("dataset-id").Table("table-id"), | |||||
src: &QueryConfig{ | |||||
Q: "query string", | |||||
Labels: map[string]string{"a": "b"}, | |||||
DryRun: true, | |||||
}, | |||||
want: func() *bq.Job { | |||||
j := defaultQueryJob() | |||||
j.Configuration.Labels = map[string]string{"a": "b"} | |||||
j.Configuration.DryRun = true | |||||
j.Configuration.Query.DefaultDataset = nil | |||||
return j | |||||
}(), | |||||
}, | |||||
{ | |||||
dst: c.Dataset("dataset-id").Table("table-id"), | |||||
jobIDConfig: JobIDConfig{JobID: "jobID", AddJobIDSuffix: true}, | |||||
src: &QueryConfig{Q: "query string"}, | |||||
want: func() *bq.Job { | |||||
j := defaultQueryJob() | |||||
j.Configuration.Query.DefaultDataset = nil | |||||
j.JobReference.JobId = "jobID-RANDOM" | |||||
return j | |||||
}(), | |||||
}, | |||||
{ | |||||
dst: &Table{}, | |||||
src: defaultQuery, | |||||
want: func() *bq.Job { | |||||
j := defaultQueryJob() | |||||
j.Configuration.Query.DestinationTable = nil | |||||
return j | |||||
}(), | |||||
}, | |||||
{ | |||||
dst: c.Dataset("dataset-id").Table("table-id"), | |||||
src: &QueryConfig{ | |||||
Q: "query string", | |||||
TableDefinitions: map[string]ExternalData{ | |||||
"atable": func() *GCSReference { | |||||
g := NewGCSReference("uri") | |||||
g.AllowJaggedRows = true | |||||
g.AllowQuotedNewlines = true | |||||
g.Compression = Gzip | |||||
g.Encoding = UTF_8 | |||||
g.FieldDelimiter = ";" | |||||
g.IgnoreUnknownValues = true | |||||
g.MaxBadRecords = 1 | |||||
g.Quote = "'" | |||||
g.SkipLeadingRows = 2 | |||||
g.Schema = Schema{{Name: "name", Type: StringFieldType}} | |||||
return g | |||||
}(), | |||||
}, | |||||
}, | |||||
want: func() *bq.Job { | |||||
j := defaultQueryJob() | |||||
j.Configuration.Query.DefaultDataset = nil | |||||
td := make(map[string]bq.ExternalDataConfiguration) | |||||
quote := "'" | |||||
td["atable"] = bq.ExternalDataConfiguration{ | |||||
Compression: "GZIP", | |||||
IgnoreUnknownValues: true, | |||||
MaxBadRecords: 1, | |||||
SourceFormat: "CSV", // must be explicitly set. | |||||
SourceUris: []string{"uri"}, | |||||
CsvOptions: &bq.CsvOptions{ | |||||
AllowJaggedRows: true, | |||||
AllowQuotedNewlines: true, | |||||
Encoding: "UTF-8", | |||||
FieldDelimiter: ";", | |||||
SkipLeadingRows: 2, | |||||
Quote: "e, | |||||
}, | |||||
Schema: &bq.TableSchema{ | |||||
Fields: []*bq.TableFieldSchema{ | |||||
{Name: "name", Type: "STRING"}, | |||||
}, | |||||
}, | |||||
} | |||||
j.Configuration.Query.TableDefinitions = td | |||||
return j | |||||
}(), | |||||
}, | |||||
{ | |||||
dst: &Table{ | |||||
ProjectID: "project-id", | |||||
DatasetID: "dataset-id", | |||||
TableID: "table-id", | |||||
}, | |||||
src: &QueryConfig{ | |||||
Q: "query string", | |||||
DefaultProjectID: "def-project-id", | |||||
DefaultDatasetID: "def-dataset-id", | |||||
CreateDisposition: CreateNever, | |||||
WriteDisposition: WriteTruncate, | |||||
}, | |||||
want: func() *bq.Job { | |||||
j := defaultQueryJob() | |||||
j.Configuration.Query.DestinationTable.ProjectId = "project-id" | |||||
j.Configuration.Query.WriteDisposition = "WRITE_TRUNCATE" | |||||
j.Configuration.Query.CreateDisposition = "CREATE_NEVER" | |||||
return j | |||||
}(), | |||||
}, | |||||
{ | |||||
dst: c.Dataset("dataset-id").Table("table-id"), | |||||
src: &QueryConfig{ | |||||
Q: "query string", | |||||
DefaultProjectID: "def-project-id", | |||||
DefaultDatasetID: "def-dataset-id", | |||||
DisableQueryCache: true, | |||||
}, | |||||
want: func() *bq.Job { | |||||
j := defaultQueryJob() | |||||
f := false | |||||
j.Configuration.Query.UseQueryCache = &f | |||||
return j | |||||
}(), | |||||
}, | |||||
{ | |||||
dst: c.Dataset("dataset-id").Table("table-id"), | |||||
src: &QueryConfig{ | |||||
Q: "query string", | |||||
DefaultProjectID: "def-project-id", | |||||
DefaultDatasetID: "def-dataset-id", | |||||
AllowLargeResults: true, | |||||
}, | |||||
want: func() *bq.Job { | |||||
j := defaultQueryJob() | |||||
j.Configuration.Query.AllowLargeResults = true | |||||
return j | |||||
}(), | |||||
}, | |||||
{ | |||||
dst: c.Dataset("dataset-id").Table("table-id"), | |||||
src: &QueryConfig{ | |||||
Q: "query string", | |||||
DefaultProjectID: "def-project-id", | |||||
DefaultDatasetID: "def-dataset-id", | |||||
DisableFlattenedResults: true, | |||||
}, | |||||
want: func() *bq.Job { | |||||
j := defaultQueryJob() | |||||
f := false | |||||
j.Configuration.Query.FlattenResults = &f | |||||
j.Configuration.Query.AllowLargeResults = true | |||||
return j | |||||
}(), | |||||
}, | |||||
{ | |||||
dst: c.Dataset("dataset-id").Table("table-id"), | |||||
src: &QueryConfig{ | |||||
Q: "query string", | |||||
DefaultProjectID: "def-project-id", | |||||
DefaultDatasetID: "def-dataset-id", | |||||
Priority: QueryPriority("low"), | |||||
}, | |||||
want: func() *bq.Job { | |||||
j := defaultQueryJob() | |||||
j.Configuration.Query.Priority = "low" | |||||
return j | |||||
}(), | |||||
}, | |||||
{ | |||||
dst: c.Dataset("dataset-id").Table("table-id"), | |||||
src: &QueryConfig{ | |||||
Q: "query string", | |||||
DefaultProjectID: "def-project-id", | |||||
DefaultDatasetID: "def-dataset-id", | |||||
MaxBillingTier: 3, | |||||
MaxBytesBilled: 5, | |||||
}, | |||||
want: func() *bq.Job { | |||||
j := defaultQueryJob() | |||||
tier := int64(3) | |||||
j.Configuration.Query.MaximumBillingTier = &tier | |||||
j.Configuration.Query.MaximumBytesBilled = 5 | |||||
return j | |||||
}(), | |||||
}, | |||||
{ | |||||
dst: c.Dataset("dataset-id").Table("table-id"), | |||||
src: &QueryConfig{ | |||||
Q: "query string", | |||||
DefaultProjectID: "def-project-id", | |||||
DefaultDatasetID: "def-dataset-id", | |||||
UseStandardSQL: true, | |||||
}, | |||||
want: defaultQueryJob(), | |||||
}, | |||||
{ | |||||
dst: c.Dataset("dataset-id").Table("table-id"), | |||||
src: &QueryConfig{ | |||||
Q: "query string", | |||||
DefaultProjectID: "def-project-id", | |||||
DefaultDatasetID: "def-dataset-id", | |||||
UseLegacySQL: true, | |||||
}, | |||||
want: func() *bq.Job { | |||||
j := defaultQueryJob() | |||||
ptrue := true | |||||
j.Configuration.Query.UseLegacySql = &ptrue | |||||
j.Configuration.Query.ForceSendFields = nil | |||||
return j | |||||
}(), | |||||
}, | |||||
} | |||||
for i, tc := range testCases { | |||||
query := c.Query("") | |||||
query.JobIDConfig = tc.jobIDConfig | |||||
query.QueryConfig = *tc.src | |||||
query.Dst = tc.dst | |||||
got, err := query.newJob() | |||||
if err != nil { | |||||
t.Errorf("#%d: err calling query: %v", i, err) | |||||
continue | |||||
} | |||||
checkJob(t, i, got, tc.want) | |||||
// Round-trip. | |||||
jc, err := bqToJobConfig(got.Configuration, c) | |||||
if err != nil { | |||||
t.Fatalf("#%d: %v", i, err) | |||||
} | |||||
wantConfig := query.QueryConfig | |||||
// We set AllowLargeResults to true when DisableFlattenedResults is true. | |||||
if wantConfig.DisableFlattenedResults { | |||||
wantConfig.AllowLargeResults = true | |||||
} | |||||
// A QueryConfig with neither UseXXXSQL field set is equivalent | |||||
// to one where UseStandardSQL = true. | |||||
if !wantConfig.UseLegacySQL && !wantConfig.UseStandardSQL { | |||||
wantConfig.UseStandardSQL = true | |||||
} | |||||
// Treat nil and empty tables the same, and ignore the client. | |||||
tableEqual := func(t1, t2 *Table) bool { | |||||
if t1 == nil { | |||||
t1 = &Table{} | |||||
} | |||||
if t2 == nil { | |||||
t2 = &Table{} | |||||
} | |||||
return t1.ProjectID == t2.ProjectID && t1.DatasetID == t2.DatasetID && t1.TableID == t2.TableID | |||||
} | |||||
// A table definition that is a GCSReference round-trips as an ExternalDataConfig. | |||||
// TODO(jba): see if there is a way to express this with a transformer. | |||||
gcsRefToEDC := func(g *GCSReference) *ExternalDataConfig { | |||||
q := g.toBQ() | |||||
e, _ := bqToExternalDataConfig(&q) | |||||
return e | |||||
} | |||||
externalDataEqual := func(e1, e2 ExternalData) bool { | |||||
if r, ok := e1.(*GCSReference); ok { | |||||
e1 = gcsRefToEDC(r) | |||||
} | |||||
if r, ok := e2.(*GCSReference); ok { | |||||
e2 = gcsRefToEDC(r) | |||||
} | |||||
return cmp.Equal(e1, e2) | |||||
} | |||||
diff := testutil.Diff(jc.(*QueryConfig), &wantConfig, | |||||
cmp.Comparer(tableEqual), | |||||
cmp.Comparer(externalDataEqual), | |||||
) | |||||
if diff != "" { | |||||
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff) | |||||
} | |||||
} | |||||
} | |||||
func TestConfiguringQuery(t *testing.T) { | |||||
c := &Client{ | |||||
projectID: "project-id", | |||||
} | |||||
query := c.Query("q") | |||||
query.JobID = "ajob" | |||||
query.DefaultProjectID = "def-project-id" | |||||
query.DefaultDatasetID = "def-dataset-id" | |||||
query.TimePartitioning = &TimePartitioning{Expiration: 1234 * time.Second, Field: "f"} | |||||
query.DestinationEncryptionConfig = &EncryptionConfig{KMSKeyName: "keyName"} | |||||
query.SchemaUpdateOptions = []string{"ALLOW_FIELD_ADDITION"} | |||||
// Note: Other configuration fields are tested in other tests above. | |||||
// A lot of that can be consolidated once Client.Copy is gone. | |||||
pfalse := false | |||||
want := &bq.Job{ | |||||
Configuration: &bq.JobConfiguration{ | |||||
Query: &bq.JobConfigurationQuery{ | |||||
Query: "q", | |||||
DefaultDataset: &bq.DatasetReference{ | |||||
ProjectId: "def-project-id", | |||||
DatasetId: "def-dataset-id", | |||||
}, | |||||
UseLegacySql: &pfalse, | |||||
TimePartitioning: &bq.TimePartitioning{ExpirationMs: 1234000, Field: "f", Type: "DAY"}, | |||||
DestinationEncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"}, | |||||
SchemaUpdateOptions: []string{"ALLOW_FIELD_ADDITION"}, | |||||
}, | |||||
}, | |||||
JobReference: &bq.JobReference{ | |||||
JobId: "ajob", | |||||
ProjectId: "project-id", | |||||
}, | |||||
} | |||||
got, err := query.newJob() | |||||
if err != nil { | |||||
t.Fatalf("err calling Query.newJob: %v", err) | |||||
} | |||||
if diff := testutil.Diff(got, want); diff != "" { | |||||
t.Errorf("querying: -got +want:\n%s", diff) | |||||
} | |||||
} | |||||
func TestQueryLegacySQL(t *testing.T) { | |||||
c := &Client{projectID: "project-id"} | |||||
q := c.Query("q") | |||||
q.UseStandardSQL = true | |||||
q.UseLegacySQL = true | |||||
_, err := q.newJob() | |||||
if err == nil { | |||||
t.Error("UseStandardSQL and UseLegacySQL: got nil, want error") | |||||
} | |||||
q = c.Query("q") | |||||
q.Parameters = []QueryParameter{{Name: "p", Value: 3}} | |||||
q.UseLegacySQL = true | |||||
_, err = q.newJob() | |||||
if err == nil { | |||||
t.Error("Parameters and UseLegacySQL: got nil, want error") | |||||
} | |||||
} |
@@ -0,0 +1,235 @@ | |||||
// Copyright 2015 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"errors" | |||||
"testing" | |||||
"github.com/google/go-cmp/cmp" | |||||
"cloud.google.com/go/internal/testutil" | |||||
"golang.org/x/net/context" | |||||
bq "google.golang.org/api/bigquery/v2" | |||||
"google.golang.org/api/iterator" | |||||
) | |||||
type pageFetcherArgs struct { | |||||
table *Table | |||||
schema Schema | |||||
startIndex uint64 | |||||
pageSize int64 | |||||
pageToken string | |||||
} | |||||
// pageFetcherReadStub services read requests by returning data from an in-memory list of values. | |||||
type pageFetcherReadStub struct { | |||||
// values and pageTokens are used as sources of data to return in response to calls to readTabledata or readQuery. | |||||
values [][][]Value // contains pages / rows / columns. | |||||
pageTokens map[string]string // maps incoming page token to returned page token. | |||||
// arguments are recorded for later inspection. | |||||
calls []pageFetcherArgs | |||||
} | |||||
func (s *pageFetcherReadStub) fetchPage(ctx context.Context, t *Table, schema Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error) { | |||||
s.calls = append(s.calls, | |||||
pageFetcherArgs{t, schema, startIndex, pageSize, pageToken}) | |||||
result := &fetchPageResult{ | |||||
pageToken: s.pageTokens[pageToken], | |||||
rows: s.values[0], | |||||
} | |||||
s.values = s.values[1:] | |||||
return result, nil | |||||
} | |||||
func waitForQueryStub(context.Context, string) (Schema, error) { | |||||
return nil, nil | |||||
} | |||||
func TestRead(t *testing.T) { | |||||
// The data for the service stub to return is populated for each test case in the testCases for loop. | |||||
ctx := context.Background() | |||||
c := &Client{projectID: "project-id"} | |||||
pf := &pageFetcherReadStub{} | |||||
queryJob := &Job{ | |||||
projectID: "project-id", | |||||
jobID: "job-id", | |||||
c: c, | |||||
config: &bq.JobConfiguration{ | |||||
Query: &bq.JobConfigurationQuery{ | |||||
DestinationTable: &bq.TableReference{ | |||||
ProjectId: "project-id", | |||||
DatasetId: "dataset-id", | |||||
TableId: "table-id", | |||||
}, | |||||
}, | |||||
}, | |||||
} | |||||
for _, readFunc := range []func() *RowIterator{ | |||||
func() *RowIterator { | |||||
return c.Dataset("dataset-id").Table("table-id").read(ctx, pf.fetchPage) | |||||
}, | |||||
func() *RowIterator { | |||||
it, err := queryJob.read(ctx, waitForQueryStub, pf.fetchPage) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
return it | |||||
}, | |||||
} { | |||||
testCases := []struct { | |||||
data [][][]Value | |||||
pageTokens map[string]string | |||||
want [][]Value | |||||
}{ | |||||
{ | |||||
data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}}, | |||||
pageTokens: map[string]string{"": "a", "a": ""}, | |||||
want: [][]Value{{1, 2}, {11, 12}, {30, 40}, {31, 41}}, | |||||
}, | |||||
{ | |||||
data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}}, | |||||
pageTokens: map[string]string{"": ""}, // no more pages after first one. | |||||
want: [][]Value{{1, 2}, {11, 12}}, | |||||
}, | |||||
} | |||||
for _, tc := range testCases { | |||||
pf.values = tc.data | |||||
pf.pageTokens = tc.pageTokens | |||||
if got, ok := collectValues(t, readFunc()); ok { | |||||
if !testutil.Equal(got, tc.want) { | |||||
t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want) | |||||
} | |||||
} | |||||
} | |||||
} | |||||
} | |||||
func collectValues(t *testing.T, it *RowIterator) ([][]Value, bool) { | |||||
var got [][]Value | |||||
for { | |||||
var vals []Value | |||||
err := it.Next(&vals) | |||||
if err == iterator.Done { | |||||
break | |||||
} | |||||
if err != nil { | |||||
t.Errorf("err calling Next: %v", err) | |||||
return nil, false | |||||
} | |||||
got = append(got, vals) | |||||
} | |||||
return got, true | |||||
} | |||||
func TestNoMoreValues(t *testing.T) { | |||||
c := &Client{projectID: "project-id"} | |||||
pf := &pageFetcherReadStub{ | |||||
values: [][][]Value{{{1, 2}, {11, 12}}}, | |||||
} | |||||
it := c.Dataset("dataset-id").Table("table-id").read(context.Background(), pf.fetchPage) | |||||
var vals []Value | |||||
// We expect to retrieve two values and then fail on the next attempt. | |||||
if err := it.Next(&vals); err != nil { | |||||
t.Fatalf("Next: got: %v: want: nil", err) | |||||
} | |||||
if err := it.Next(&vals); err != nil { | |||||
t.Fatalf("Next: got: %v: want: nil", err) | |||||
} | |||||
if err := it.Next(&vals); err != iterator.Done { | |||||
t.Fatalf("Next: got: %v: want: iterator.Done", err) | |||||
} | |||||
} | |||||
var errBang = errors.New("bang!") | |||||
func errorFetchPage(context.Context, *Table, Schema, uint64, int64, string) (*fetchPageResult, error) { | |||||
return nil, errBang | |||||
} | |||||
func TestReadError(t *testing.T) { | |||||
// test that service read errors are propagated back to the caller. | |||||
c := &Client{projectID: "project-id"} | |||||
it := c.Dataset("dataset-id").Table("table-id").read(context.Background(), errorFetchPage) | |||||
var vals []Value | |||||
if err := it.Next(&vals); err != errBang { | |||||
t.Fatalf("Get: got: %v: want: %v", err, errBang) | |||||
} | |||||
} | |||||
func TestReadTabledataOptions(t *testing.T) { | |||||
// test that read options are propagated. | |||||
s := &pageFetcherReadStub{ | |||||
values: [][][]Value{{{1, 2}}}, | |||||
} | |||||
c := &Client{projectID: "project-id"} | |||||
tr := c.Dataset("dataset-id").Table("table-id") | |||||
it := tr.read(context.Background(), s.fetchPage) | |||||
it.PageInfo().MaxSize = 5 | |||||
var vals []Value | |||||
if err := it.Next(&vals); err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
want := []pageFetcherArgs{{ | |||||
table: tr, | |||||
pageSize: 5, | |||||
pageToken: "", | |||||
}} | |||||
if diff := testutil.Diff(s.calls, want, cmp.AllowUnexported(pageFetcherArgs{}, pageFetcherReadStub{}, Table{}, Client{})); diff != "" { | |||||
t.Errorf("reading (got=-, want=+):\n%s", diff) | |||||
} | |||||
} | |||||
func TestReadQueryOptions(t *testing.T) { | |||||
// test that read options are propagated. | |||||
c := &Client{projectID: "project-id"} | |||||
pf := &pageFetcherReadStub{ | |||||
values: [][][]Value{{{1, 2}}}, | |||||
} | |||||
tr := &bq.TableReference{ | |||||
ProjectId: "project-id", | |||||
DatasetId: "dataset-id", | |||||
TableId: "table-id", | |||||
} | |||||
queryJob := &Job{ | |||||
projectID: "project-id", | |||||
jobID: "job-id", | |||||
c: c, | |||||
config: &bq.JobConfiguration{ | |||||
Query: &bq.JobConfigurationQuery{DestinationTable: tr}, | |||||
}, | |||||
} | |||||
it, err := queryJob.read(context.Background(), waitForQueryStub, pf.fetchPage) | |||||
if err != nil { | |||||
t.Fatalf("err calling Read: %v", err) | |||||
} | |||||
it.PageInfo().MaxSize = 5 | |||||
var vals []Value | |||||
if err := it.Next(&vals); err != nil { | |||||
t.Fatalf("Next: got: %v: want: nil", err) | |||||
} | |||||
want := []pageFetcherArgs{{ | |||||
table: bqToTable(tr, c), | |||||
pageSize: 5, | |||||
pageToken: "", | |||||
}} | |||||
if !testutil.Equal(pf.calls, want, cmp.AllowUnexported(pageFetcherArgs{}, Table{}, Client{})) { | |||||
t.Errorf("reading: got:\n%v\nwant:\n%v", pf.calls, want) | |||||
} | |||||
} |
@@ -0,0 +1,397 @@ | |||||
// Copyright 2015 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"errors" | |||||
"fmt" | |||||
"reflect" | |||||
"cloud.google.com/go/internal/atomiccache" | |||||
bq "google.golang.org/api/bigquery/v2" | |||||
) | |||||
// Schema describes the fields in a table or query result. | |||||
type Schema []*FieldSchema | |||||
type FieldSchema struct { | |||||
// The field name. | |||||
// Must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_), | |||||
// and must start with a letter or underscore. | |||||
// The maximum length is 128 characters. | |||||
Name string | |||||
// A description of the field. The maximum length is 16,384 characters. | |||||
Description string | |||||
// Whether the field may contain multiple values. | |||||
Repeated bool | |||||
// Whether the field is required. Ignored if Repeated is true. | |||||
Required bool | |||||
// The field data type. If Type is Record, then this field contains a nested schema, | |||||
// which is described by Schema. | |||||
Type FieldType | |||||
// Describes the nested schema if Type is set to Record. | |||||
Schema Schema | |||||
} | |||||
func (fs *FieldSchema) toBQ() *bq.TableFieldSchema { | |||||
tfs := &bq.TableFieldSchema{ | |||||
Description: fs.Description, | |||||
Name: fs.Name, | |||||
Type: string(fs.Type), | |||||
} | |||||
if fs.Repeated { | |||||
tfs.Mode = "REPEATED" | |||||
} else if fs.Required { | |||||
tfs.Mode = "REQUIRED" | |||||
} // else leave as default, which is interpreted as NULLABLE. | |||||
for _, f := range fs.Schema { | |||||
tfs.Fields = append(tfs.Fields, f.toBQ()) | |||||
} | |||||
return tfs | |||||
} | |||||
func (s Schema) toBQ() *bq.TableSchema { | |||||
var fields []*bq.TableFieldSchema | |||||
for _, f := range s { | |||||
fields = append(fields, f.toBQ()) | |||||
} | |||||
return &bq.TableSchema{Fields: fields} | |||||
} | |||||
func bqToFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema { | |||||
fs := &FieldSchema{ | |||||
Description: tfs.Description, | |||||
Name: tfs.Name, | |||||
Repeated: tfs.Mode == "REPEATED", | |||||
Required: tfs.Mode == "REQUIRED", | |||||
Type: FieldType(tfs.Type), | |||||
} | |||||
for _, f := range tfs.Fields { | |||||
fs.Schema = append(fs.Schema, bqToFieldSchema(f)) | |||||
} | |||||
return fs | |||||
} | |||||
func bqToSchema(ts *bq.TableSchema) Schema { | |||||
if ts == nil { | |||||
return nil | |||||
} | |||||
var s Schema | |||||
for _, f := range ts.Fields { | |||||
s = append(s, bqToFieldSchema(f)) | |||||
} | |||||
return s | |||||
} | |||||
type FieldType string | |||||
const ( | |||||
StringFieldType FieldType = "STRING" | |||||
BytesFieldType FieldType = "BYTES" | |||||
IntegerFieldType FieldType = "INTEGER" | |||||
FloatFieldType FieldType = "FLOAT" | |||||
BooleanFieldType FieldType = "BOOLEAN" | |||||
TimestampFieldType FieldType = "TIMESTAMP" | |||||
RecordFieldType FieldType = "RECORD" | |||||
DateFieldType FieldType = "DATE" | |||||
TimeFieldType FieldType = "TIME" | |||||
DateTimeFieldType FieldType = "DATETIME" | |||||
NumericFieldType FieldType = "NUMERIC" | |||||
) | |||||
var ( | |||||
errNoStruct = errors.New("bigquery: can only infer schema from struct or pointer to struct") | |||||
errUnsupportedFieldType = errors.New("bigquery: unsupported type of field in struct") | |||||
errInvalidFieldName = errors.New("bigquery: invalid name of field in struct") | |||||
errBadNullable = errors.New(`bigquery: use "nullable" only for []byte and struct pointers; for all other types, use a NullXXX type`) | |||||
) | |||||
var typeOfByteSlice = reflect.TypeOf([]byte{}) | |||||
// InferSchema tries to derive a BigQuery schema from the supplied struct value. | |||||
// Each exported struct field is mapped to a field in the schema. | |||||
// | |||||
// The following BigQuery types are inferred from the corresponding Go types. | |||||
// (This is the same mapping as that used for RowIterator.Next.) Fields inferred | |||||
// from these types are marked required (non-nullable). | |||||
// | |||||
// STRING string | |||||
// BOOL bool | |||||
// INTEGER int, int8, int16, int32, int64, uint8, uint16, uint32 | |||||
// FLOAT float32, float64 | |||||
// BYTES []byte | |||||
// TIMESTAMP time.Time | |||||
// DATE civil.Date | |||||
// TIME civil.Time | |||||
// DATETIME civil.DateTime | |||||
// NUMERIC *big.Rat | |||||
// | |||||
// The big.Rat type supports numbers of arbitrary size and precision. Values | |||||
// will be rounded to 9 digits after the decimal point before being transmitted | |||||
// to BigQuery. See https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#numeric-type | |||||
// for more on NUMERIC. | |||||
// | |||||
// A Go slice or array type is inferred to be a BigQuery repeated field of the | |||||
// element type. The element type must be one of the above listed types. | |||||
// | |||||
// Nullable fields are inferred from the NullXXX types, declared in this package: | |||||
// | |||||
// STRING NullString | |||||
// BOOL NullBool | |||||
// INTEGER NullInt64 | |||||
// FLOAT NullFloat64 | |||||
// TIMESTAMP NullTimestamp | |||||
// DATE NullDate | |||||
// TIME NullTime | |||||
// DATETIME NullDateTime | |||||
// | |||||
// For a nullable BYTES field, use the type []byte and tag the field "nullable" (see below). | |||||
// For a nullable NUMERIC field, use the type *big.Rat and tag the field "nullable". | |||||
// | |||||
// A struct field that is of struct type is inferred to be a required field of type | |||||
// RECORD with a schema inferred recursively. For backwards compatibility, a field of | |||||
// type pointer to struct is also inferred to be required. To get a nullable RECORD | |||||
// field, use the "nullable" tag (see below). | |||||
// | |||||
// InferSchema returns an error if any of the examined fields is of type uint, | |||||
// uint64, uintptr, map, interface, complex64, complex128, func, or chan. Future | |||||
// versions may handle these cases without error. | |||||
// | |||||
// Recursively defined structs are also disallowed. | |||||
// | |||||
// Struct fields may be tagged in a way similar to the encoding/json package. | |||||
// A tag of the form | |||||
// bigquery:"name" | |||||
// uses "name" instead of the struct field name as the BigQuery field name. | |||||
// A tag of the form | |||||
// bigquery:"-" | |||||
// omits the field from the inferred schema. | |||||
// The "nullable" option marks the field as nullable (not required). It is only | |||||
// needed for []byte, *big.Rat and pointer-to-struct fields, and cannot appear on other | |||||
// fields. In this example, the Go name of the field is retained: | |||||
// bigquery:",nullable" | |||||
func InferSchema(st interface{}) (Schema, error) { | |||||
return inferSchemaReflectCached(reflect.TypeOf(st)) | |||||
} | |||||
// TODO(jba): replace with sync.Map for Go 1.9. | |||||
var schemaCache atomiccache.Cache | |||||
type cacheVal struct { | |||||
schema Schema | |||||
err error | |||||
} | |||||
func inferSchemaReflectCached(t reflect.Type) (Schema, error) { | |||||
cv := schemaCache.Get(t, func() interface{} { | |||||
s, err := inferSchemaReflect(t) | |||||
return cacheVal{s, err} | |||||
}).(cacheVal) | |||||
return cv.schema, cv.err | |||||
} | |||||
func inferSchemaReflect(t reflect.Type) (Schema, error) { | |||||
rec, err := hasRecursiveType(t, nil) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
if rec { | |||||
return nil, fmt.Errorf("bigquery: schema inference for recursive type %s", t) | |||||
} | |||||
return inferStruct(t) | |||||
} | |||||
func inferStruct(t reflect.Type) (Schema, error) { | |||||
switch t.Kind() { | |||||
case reflect.Ptr: | |||||
if t.Elem().Kind() != reflect.Struct { | |||||
return nil, errNoStruct | |||||
} | |||||
t = t.Elem() | |||||
fallthrough | |||||
case reflect.Struct: | |||||
return inferFields(t) | |||||
default: | |||||
return nil, errNoStruct | |||||
} | |||||
} | |||||
// inferFieldSchema infers the FieldSchema for a Go type | |||||
func inferFieldSchema(rt reflect.Type, nullable bool) (*FieldSchema, error) { | |||||
// Only []byte and struct pointers can be tagged nullable. | |||||
if nullable && !(rt == typeOfByteSlice || rt.Kind() == reflect.Ptr && rt.Elem().Kind() == reflect.Struct) { | |||||
return nil, errBadNullable | |||||
} | |||||
switch rt { | |||||
case typeOfByteSlice: | |||||
return &FieldSchema{Required: !nullable, Type: BytesFieldType}, nil | |||||
case typeOfGoTime: | |||||
return &FieldSchema{Required: true, Type: TimestampFieldType}, nil | |||||
case typeOfDate: | |||||
return &FieldSchema{Required: true, Type: DateFieldType}, nil | |||||
case typeOfTime: | |||||
return &FieldSchema{Required: true, Type: TimeFieldType}, nil | |||||
case typeOfDateTime: | |||||
return &FieldSchema{Required: true, Type: DateTimeFieldType}, nil | |||||
case typeOfRat: | |||||
return &FieldSchema{Required: !nullable, Type: NumericFieldType}, nil | |||||
} | |||||
if ft := nullableFieldType(rt); ft != "" { | |||||
return &FieldSchema{Required: false, Type: ft}, nil | |||||
} | |||||
if isSupportedIntType(rt) || isSupportedUintType(rt) { | |||||
return &FieldSchema{Required: true, Type: IntegerFieldType}, nil | |||||
} | |||||
switch rt.Kind() { | |||||
case reflect.Slice, reflect.Array: | |||||
et := rt.Elem() | |||||
if et != typeOfByteSlice && (et.Kind() == reflect.Slice || et.Kind() == reflect.Array) { | |||||
// Multi dimensional slices/arrays are not supported by BigQuery | |||||
return nil, errUnsupportedFieldType | |||||
} | |||||
if nullableFieldType(et) != "" { | |||||
// Repeated nullable types are not supported by BigQuery. | |||||
return nil, errUnsupportedFieldType | |||||
} | |||||
f, err := inferFieldSchema(et, false) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
f.Repeated = true | |||||
f.Required = false | |||||
return f, nil | |||||
case reflect.Ptr: | |||||
if rt.Elem().Kind() != reflect.Struct { | |||||
return nil, errUnsupportedFieldType | |||||
} | |||||
fallthrough | |||||
case reflect.Struct: | |||||
nested, err := inferStruct(rt) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return &FieldSchema{Required: !nullable, Type: RecordFieldType, Schema: nested}, nil | |||||
case reflect.String: | |||||
return &FieldSchema{Required: !nullable, Type: StringFieldType}, nil | |||||
case reflect.Bool: | |||||
return &FieldSchema{Required: !nullable, Type: BooleanFieldType}, nil | |||||
case reflect.Float32, reflect.Float64: | |||||
return &FieldSchema{Required: !nullable, Type: FloatFieldType}, nil | |||||
default: | |||||
return nil, errUnsupportedFieldType | |||||
} | |||||
} | |||||
// inferFields extracts all exported field types from struct type. | |||||
func inferFields(rt reflect.Type) (Schema, error) { | |||||
var s Schema | |||||
fields, err := fieldCache.Fields(rt) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
for _, field := range fields { | |||||
var nullable bool | |||||
for _, opt := range field.ParsedTag.([]string) { | |||||
if opt == nullableTagOption { | |||||
nullable = true | |||||
break | |||||
} | |||||
} | |||||
f, err := inferFieldSchema(field.Type, nullable) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
f.Name = field.Name | |||||
s = append(s, f) | |||||
} | |||||
return s, nil | |||||
} | |||||
// isSupportedIntType reports whether t is an int type that can be properly | |||||
// represented by the BigQuery INTEGER/INT64 type. | |||||
func isSupportedIntType(t reflect.Type) bool { | |||||
switch t.Kind() { | |||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: | |||||
return true | |||||
default: | |||||
return false | |||||
} | |||||
} | |||||
// isSupportedIntType reports whether t is a uint type that can be properly | |||||
// represented by the BigQuery INTEGER/INT64 type. | |||||
func isSupportedUintType(t reflect.Type) bool { | |||||
switch t.Kind() { | |||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32: | |||||
return true | |||||
default: | |||||
return false | |||||
} | |||||
} | |||||
// typeList is a linked list of reflect.Types. | |||||
type typeList struct { | |||||
t reflect.Type | |||||
next *typeList | |||||
} | |||||
func (l *typeList) has(t reflect.Type) bool { | |||||
for l != nil { | |||||
if l.t == t { | |||||
return true | |||||
} | |||||
l = l.next | |||||
} | |||||
return false | |||||
} | |||||
// hasRecursiveType reports whether t or any type inside t refers to itself, directly or indirectly, | |||||
// via exported fields. (Schema inference ignores unexported fields.) | |||||
func hasRecursiveType(t reflect.Type, seen *typeList) (bool, error) { | |||||
for t.Kind() == reflect.Ptr || t.Kind() == reflect.Slice || t.Kind() == reflect.Array { | |||||
t = t.Elem() | |||||
} | |||||
if t.Kind() != reflect.Struct { | |||||
return false, nil | |||||
} | |||||
if seen.has(t) { | |||||
return true, nil | |||||
} | |||||
fields, err := fieldCache.Fields(t) | |||||
if err != nil { | |||||
return false, err | |||||
} | |||||
seen = &typeList{t, seen} | |||||
// Because seen is a linked list, additions to it from one field's | |||||
// recursive call will not affect the value for subsequent fields' calls. | |||||
for _, field := range fields { | |||||
ok, err := hasRecursiveType(field.Type, seen) | |||||
if err != nil { | |||||
return false, err | |||||
} | |||||
if ok { | |||||
return true, nil | |||||
} | |||||
} | |||||
return false, nil | |||||
} |
@@ -0,0 +1,920 @@ | |||||
// Copyright 2015 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"fmt" | |||||
"math/big" | |||||
"reflect" | |||||
"testing" | |||||
"time" | |||||
"cloud.google.com/go/civil" | |||||
"cloud.google.com/go/internal/pretty" | |||||
"cloud.google.com/go/internal/testutil" | |||||
bq "google.golang.org/api/bigquery/v2" | |||||
) | |||||
func (fs *FieldSchema) GoString() string { | |||||
if fs == nil { | |||||
return "<nil>" | |||||
} | |||||
return fmt.Sprintf("{Name:%s Description:%s Repeated:%t Required:%t Type:%s Schema:%s}", | |||||
fs.Name, | |||||
fs.Description, | |||||
fs.Repeated, | |||||
fs.Required, | |||||
fs.Type, | |||||
fmt.Sprintf("%#v", fs.Schema), | |||||
) | |||||
} | |||||
func bqTableFieldSchema(desc, name, typ, mode string) *bq.TableFieldSchema { | |||||
return &bq.TableFieldSchema{ | |||||
Description: desc, | |||||
Name: name, | |||||
Mode: mode, | |||||
Type: typ, | |||||
} | |||||
} | |||||
func fieldSchema(desc, name, typ string, repeated, required bool) *FieldSchema { | |||||
return &FieldSchema{ | |||||
Description: desc, | |||||
Name: name, | |||||
Repeated: repeated, | |||||
Required: required, | |||||
Type: FieldType(typ), | |||||
} | |||||
} | |||||
func TestSchemaConversion(t *testing.T) { | |||||
testCases := []struct { | |||||
schema Schema | |||||
bqSchema *bq.TableSchema | |||||
}{ | |||||
{ | |||||
// required | |||||
bqSchema: &bq.TableSchema{ | |||||
Fields: []*bq.TableFieldSchema{ | |||||
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"), | |||||
}, | |||||
}, | |||||
schema: Schema{ | |||||
fieldSchema("desc", "name", "STRING", false, true), | |||||
}, | |||||
}, | |||||
{ | |||||
// repeated | |||||
bqSchema: &bq.TableSchema{ | |||||
Fields: []*bq.TableFieldSchema{ | |||||
bqTableFieldSchema("desc", "name", "STRING", "REPEATED"), | |||||
}, | |||||
}, | |||||
schema: Schema{ | |||||
fieldSchema("desc", "name", "STRING", true, false), | |||||
}, | |||||
}, | |||||
{ | |||||
// nullable, string | |||||
bqSchema: &bq.TableSchema{ | |||||
Fields: []*bq.TableFieldSchema{ | |||||
bqTableFieldSchema("desc", "name", "STRING", ""), | |||||
}, | |||||
}, | |||||
schema: Schema{ | |||||
fieldSchema("desc", "name", "STRING", false, false), | |||||
}, | |||||
}, | |||||
{ | |||||
// integer | |||||
bqSchema: &bq.TableSchema{ | |||||
Fields: []*bq.TableFieldSchema{ | |||||
bqTableFieldSchema("desc", "name", "INTEGER", ""), | |||||
}, | |||||
}, | |||||
schema: Schema{ | |||||
fieldSchema("desc", "name", "INTEGER", false, false), | |||||
}, | |||||
}, | |||||
{ | |||||
// float | |||||
bqSchema: &bq.TableSchema{ | |||||
Fields: []*bq.TableFieldSchema{ | |||||
bqTableFieldSchema("desc", "name", "FLOAT", ""), | |||||
}, | |||||
}, | |||||
schema: Schema{ | |||||
fieldSchema("desc", "name", "FLOAT", false, false), | |||||
}, | |||||
}, | |||||
{ | |||||
// boolean | |||||
bqSchema: &bq.TableSchema{ | |||||
Fields: []*bq.TableFieldSchema{ | |||||
bqTableFieldSchema("desc", "name", "BOOLEAN", ""), | |||||
}, | |||||
}, | |||||
schema: Schema{ | |||||
fieldSchema("desc", "name", "BOOLEAN", false, false), | |||||
}, | |||||
}, | |||||
{ | |||||
// timestamp | |||||
bqSchema: &bq.TableSchema{ | |||||
Fields: []*bq.TableFieldSchema{ | |||||
bqTableFieldSchema("desc", "name", "TIMESTAMP", ""), | |||||
}, | |||||
}, | |||||
schema: Schema{ | |||||
fieldSchema("desc", "name", "TIMESTAMP", false, false), | |||||
}, | |||||
}, | |||||
{ | |||||
// civil times | |||||
bqSchema: &bq.TableSchema{ | |||||
Fields: []*bq.TableFieldSchema{ | |||||
bqTableFieldSchema("desc", "f1", "TIME", ""), | |||||
bqTableFieldSchema("desc", "f2", "DATE", ""), | |||||
bqTableFieldSchema("desc", "f3", "DATETIME", ""), | |||||
}, | |||||
}, | |||||
schema: Schema{ | |||||
fieldSchema("desc", "f1", "TIME", false, false), | |||||
fieldSchema("desc", "f2", "DATE", false, false), | |||||
fieldSchema("desc", "f3", "DATETIME", false, false), | |||||
}, | |||||
}, | |||||
{ | |||||
// numeric | |||||
bqSchema: &bq.TableSchema{ | |||||
Fields: []*bq.TableFieldSchema{ | |||||
bqTableFieldSchema("desc", "n", "NUMERIC", ""), | |||||
}, | |||||
}, | |||||
schema: Schema{ | |||||
fieldSchema("desc", "n", "NUMERIC", false, false), | |||||
}, | |||||
}, | |||||
{ | |||||
// nested | |||||
bqSchema: &bq.TableSchema{ | |||||
Fields: []*bq.TableFieldSchema{ | |||||
{ | |||||
Description: "An outer schema wrapping a nested schema", | |||||
Name: "outer", | |||||
Mode: "REQUIRED", | |||||
Type: "RECORD", | |||||
Fields: []*bq.TableFieldSchema{ | |||||
bqTableFieldSchema("inner field", "inner", "STRING", ""), | |||||
}, | |||||
}, | |||||
}, | |||||
}, | |||||
schema: Schema{ | |||||
&FieldSchema{ | |||||
Description: "An outer schema wrapping a nested schema", | |||||
Name: "outer", | |||||
Required: true, | |||||
Type: "RECORD", | |||||
Schema: Schema{ | |||||
{ | |||||
Description: "inner field", | |||||
Name: "inner", | |||||
Type: "STRING", | |||||
}, | |||||
}, | |||||
}, | |||||
}, | |||||
}, | |||||
} | |||||
for _, tc := range testCases { | |||||
bqSchema := tc.schema.toBQ() | |||||
if !testutil.Equal(bqSchema, tc.bqSchema) { | |||||
t.Errorf("converting to TableSchema: got:\n%v\nwant:\n%v", | |||||
pretty.Value(bqSchema), pretty.Value(tc.bqSchema)) | |||||
} | |||||
schema := bqToSchema(tc.bqSchema) | |||||
if !testutil.Equal(schema, tc.schema) { | |||||
t.Errorf("converting to Schema: got:\n%v\nwant:\n%v", schema, tc.schema) | |||||
} | |||||
} | |||||
} | |||||
type allStrings struct { | |||||
String string | |||||
ByteSlice []byte | |||||
} | |||||
type allSignedIntegers struct { | |||||
Int64 int64 | |||||
Int32 int32 | |||||
Int16 int16 | |||||
Int8 int8 | |||||
Int int | |||||
} | |||||
type allUnsignedIntegers struct { | |||||
Uint32 uint32 | |||||
Uint16 uint16 | |||||
Uint8 uint8 | |||||
} | |||||
type allFloat struct { | |||||
Float64 float64 | |||||
Float32 float32 | |||||
// NOTE: Complex32 and Complex64 are unsupported by BigQuery | |||||
} | |||||
type allBoolean struct { | |||||
Bool bool | |||||
} | |||||
type allTime struct { | |||||
Timestamp time.Time | |||||
Time civil.Time | |||||
Date civil.Date | |||||
DateTime civil.DateTime | |||||
} | |||||
type allNumeric struct { | |||||
Numeric *big.Rat | |||||
} | |||||
func reqField(name, typ string) *FieldSchema { | |||||
return &FieldSchema{ | |||||
Name: name, | |||||
Type: FieldType(typ), | |||||
Required: true, | |||||
} | |||||
} | |||||
func optField(name, typ string) *FieldSchema { | |||||
return &FieldSchema{ | |||||
Name: name, | |||||
Type: FieldType(typ), | |||||
Required: false, | |||||
} | |||||
} | |||||
func TestSimpleInference(t *testing.T) { | |||||
testCases := []struct { | |||||
in interface{} | |||||
want Schema | |||||
}{ | |||||
{ | |||||
in: allSignedIntegers{}, | |||||
want: Schema{ | |||||
reqField("Int64", "INTEGER"), | |||||
reqField("Int32", "INTEGER"), | |||||
reqField("Int16", "INTEGER"), | |||||
reqField("Int8", "INTEGER"), | |||||
reqField("Int", "INTEGER"), | |||||
}, | |||||
}, | |||||
{ | |||||
in: allUnsignedIntegers{}, | |||||
want: Schema{ | |||||
reqField("Uint32", "INTEGER"), | |||||
reqField("Uint16", "INTEGER"), | |||||
reqField("Uint8", "INTEGER"), | |||||
}, | |||||
}, | |||||
{ | |||||
in: allFloat{}, | |||||
want: Schema{ | |||||
reqField("Float64", "FLOAT"), | |||||
reqField("Float32", "FLOAT"), | |||||
}, | |||||
}, | |||||
{ | |||||
in: allBoolean{}, | |||||
want: Schema{ | |||||
reqField("Bool", "BOOLEAN"), | |||||
}, | |||||
}, | |||||
{ | |||||
in: &allBoolean{}, | |||||
want: Schema{ | |||||
reqField("Bool", "BOOLEAN"), | |||||
}, | |||||
}, | |||||
{ | |||||
in: allTime{}, | |||||
want: Schema{ | |||||
reqField("Timestamp", "TIMESTAMP"), | |||||
reqField("Time", "TIME"), | |||||
reqField("Date", "DATE"), | |||||
reqField("DateTime", "DATETIME"), | |||||
}, | |||||
}, | |||||
{ | |||||
in: &allNumeric{}, | |||||
want: Schema{ | |||||
reqField("Numeric", "NUMERIC"), | |||||
}, | |||||
}, | |||||
{ | |||||
in: allStrings{}, | |||||
want: Schema{ | |||||
reqField("String", "STRING"), | |||||
reqField("ByteSlice", "BYTES"), | |||||
}, | |||||
}, | |||||
} | |||||
for _, tc := range testCases { | |||||
got, err := InferSchema(tc.in) | |||||
if err != nil { | |||||
t.Fatalf("%T: error inferring TableSchema: %v", tc.in, err) | |||||
} | |||||
if !testutil.Equal(got, tc.want) { | |||||
t.Errorf("%T: inferring TableSchema: got:\n%#v\nwant:\n%#v", tc.in, | |||||
pretty.Value(got), pretty.Value(tc.want)) | |||||
} | |||||
} | |||||
} | |||||
type containsNested struct { | |||||
hidden string | |||||
NotNested int | |||||
Nested struct { | |||||
Inside int | |||||
} | |||||
} | |||||
type containsDoubleNested struct { | |||||
NotNested int | |||||
Nested struct { | |||||
InsideNested struct { | |||||
Inside int | |||||
} | |||||
} | |||||
} | |||||
type ptrNested struct { | |||||
Ptr *struct{ Inside int } | |||||
} | |||||
type dup struct { // more than one field of the same struct type | |||||
A, B allBoolean | |||||
} | |||||
func TestNestedInference(t *testing.T) { | |||||
testCases := []struct { | |||||
in interface{} | |||||
want Schema | |||||
}{ | |||||
{ | |||||
in: containsNested{}, | |||||
want: Schema{ | |||||
reqField("NotNested", "INTEGER"), | |||||
&FieldSchema{ | |||||
Name: "Nested", | |||||
Required: true, | |||||
Type: "RECORD", | |||||
Schema: Schema{reqField("Inside", "INTEGER")}, | |||||
}, | |||||
}, | |||||
}, | |||||
{ | |||||
in: containsDoubleNested{}, | |||||
want: Schema{ | |||||
reqField("NotNested", "INTEGER"), | |||||
&FieldSchema{ | |||||
Name: "Nested", | |||||
Required: true, | |||||
Type: "RECORD", | |||||
Schema: Schema{ | |||||
{ | |||||
Name: "InsideNested", | |||||
Required: true, | |||||
Type: "RECORD", | |||||
Schema: Schema{reqField("Inside", "INTEGER")}, | |||||
}, | |||||
}, | |||||
}, | |||||
}, | |||||
}, | |||||
{ | |||||
in: ptrNested{}, | |||||
want: Schema{ | |||||
&FieldSchema{ | |||||
Name: "Ptr", | |||||
Required: true, | |||||
Type: "RECORD", | |||||
Schema: Schema{reqField("Inside", "INTEGER")}, | |||||
}, | |||||
}, | |||||
}, | |||||
{ | |||||
in: dup{}, | |||||
want: Schema{ | |||||
&FieldSchema{ | |||||
Name: "A", | |||||
Required: true, | |||||
Type: "RECORD", | |||||
Schema: Schema{reqField("Bool", "BOOLEAN")}, | |||||
}, | |||||
&FieldSchema{ | |||||
Name: "B", | |||||
Required: true, | |||||
Type: "RECORD", | |||||
Schema: Schema{reqField("Bool", "BOOLEAN")}, | |||||
}, | |||||
}, | |||||
}, | |||||
} | |||||
for _, tc := range testCases { | |||||
got, err := InferSchema(tc.in) | |||||
if err != nil { | |||||
t.Fatalf("%T: error inferring TableSchema: %v", tc.in, err) | |||||
} | |||||
if !testutil.Equal(got, tc.want) { | |||||
t.Errorf("%T: inferring TableSchema: got:\n%#v\nwant:\n%#v", tc.in, | |||||
pretty.Value(got), pretty.Value(tc.want)) | |||||
} | |||||
} | |||||
} | |||||
type repeated struct { | |||||
NotRepeated []byte | |||||
RepeatedByteSlice [][]byte | |||||
Slice []int | |||||
Array [5]bool | |||||
} | |||||
type nestedRepeated struct { | |||||
NotRepeated int | |||||
Repeated []struct { | |||||
Inside int | |||||
} | |||||
RepeatedPtr []*struct{ Inside int } | |||||
} | |||||
func repField(name, typ string) *FieldSchema { | |||||
return &FieldSchema{ | |||||
Name: name, | |||||
Type: FieldType(typ), | |||||
Repeated: true, | |||||
} | |||||
} | |||||
func TestRepeatedInference(t *testing.T) { | |||||
testCases := []struct { | |||||
in interface{} | |||||
want Schema | |||||
}{ | |||||
{ | |||||
in: repeated{}, | |||||
want: Schema{ | |||||
reqField("NotRepeated", "BYTES"), | |||||
repField("RepeatedByteSlice", "BYTES"), | |||||
repField("Slice", "INTEGER"), | |||||
repField("Array", "BOOLEAN"), | |||||
}, | |||||
}, | |||||
{ | |||||
in: nestedRepeated{}, | |||||
want: Schema{ | |||||
reqField("NotRepeated", "INTEGER"), | |||||
{ | |||||
Name: "Repeated", | |||||
Repeated: true, | |||||
Type: "RECORD", | |||||
Schema: Schema{reqField("Inside", "INTEGER")}, | |||||
}, | |||||
{ | |||||
Name: "RepeatedPtr", | |||||
Repeated: true, | |||||
Type: "RECORD", | |||||
Schema: Schema{reqField("Inside", "INTEGER")}, | |||||
}, | |||||
}, | |||||
}, | |||||
} | |||||
for i, tc := range testCases { | |||||
got, err := InferSchema(tc.in) | |||||
if err != nil { | |||||
t.Fatalf("%d: error inferring TableSchema: %v", i, err) | |||||
} | |||||
if !testutil.Equal(got, tc.want) { | |||||
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, | |||||
pretty.Value(got), pretty.Value(tc.want)) | |||||
} | |||||
} | |||||
} | |||||
type allNulls struct { | |||||
A NullInt64 | |||||
B NullFloat64 | |||||
C NullBool | |||||
D NullString | |||||
E NullTimestamp | |||||
F NullTime | |||||
G NullDate | |||||
H NullDateTime | |||||
} | |||||
func TestNullInference(t *testing.T) { | |||||
got, err := InferSchema(allNulls{}) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
want := Schema{ | |||||
optField("A", "INTEGER"), | |||||
optField("B", "FLOAT"), | |||||
optField("C", "BOOLEAN"), | |||||
optField("D", "STRING"), | |||||
optField("E", "TIMESTAMP"), | |||||
optField("F", "TIME"), | |||||
optField("G", "DATE"), | |||||
optField("H", "DATETIME"), | |||||
} | |||||
if diff := testutil.Diff(got, want); diff != "" { | |||||
t.Error(diff) | |||||
} | |||||
} | |||||
type Embedded struct { | |||||
Embedded int | |||||
} | |||||
type embedded struct { | |||||
Embedded2 int | |||||
} | |||||
type nestedEmbedded struct { | |||||
Embedded | |||||
embedded | |||||
} | |||||
func TestEmbeddedInference(t *testing.T) { | |||||
got, err := InferSchema(nestedEmbedded{}) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
want := Schema{ | |||||
reqField("Embedded", "INTEGER"), | |||||
reqField("Embedded2", "INTEGER"), | |||||
} | |||||
if !testutil.Equal(got, want) { | |||||
t.Errorf("got %v, want %v", pretty.Value(got), pretty.Value(want)) | |||||
} | |||||
} | |||||
func TestRecursiveInference(t *testing.T) { | |||||
type List struct { | |||||
Val int | |||||
Next *List | |||||
} | |||||
_, err := InferSchema(List{}) | |||||
if err == nil { | |||||
t.Fatal("got nil, want error") | |||||
} | |||||
} | |||||
type withTags struct { | |||||
NoTag int | |||||
ExcludeTag int `bigquery:"-"` | |||||
SimpleTag int `bigquery:"simple_tag"` | |||||
UnderscoreTag int `bigquery:"_id"` | |||||
MixedCase int `bigquery:"MIXEDcase"` | |||||
Nullable []byte `bigquery:",nullable"` | |||||
NullNumeric *big.Rat `bigquery:",nullable"` | |||||
} | |||||
type withTagsNested struct { | |||||
Nested withTags `bigquery:"nested"` | |||||
NestedAnonymous struct { | |||||
ExcludeTag int `bigquery:"-"` | |||||
Inside int `bigquery:"inside"` | |||||
} `bigquery:"anon"` | |||||
PNested *struct{ X int } // not nullable, for backwards compatibility | |||||
PNestedNullable *struct{ X int } `bigquery:",nullable"` | |||||
} | |||||
type withTagsRepeated struct { | |||||
Repeated []withTags `bigquery:"repeated"` | |||||
RepeatedAnonymous []struct { | |||||
ExcludeTag int `bigquery:"-"` | |||||
Inside int `bigquery:"inside"` | |||||
} `bigquery:"anon"` | |||||
} | |||||
type withTagsEmbedded struct { | |||||
withTags | |||||
} | |||||
var withTagsSchema = Schema{ | |||||
reqField("NoTag", "INTEGER"), | |||||
reqField("simple_tag", "INTEGER"), | |||||
reqField("_id", "INTEGER"), | |||||
reqField("MIXEDcase", "INTEGER"), | |||||
optField("Nullable", "BYTES"), | |||||
optField("NullNumeric", "NUMERIC"), | |||||
} | |||||
func TestTagInference(t *testing.T) { | |||||
testCases := []struct { | |||||
in interface{} | |||||
want Schema | |||||
}{ | |||||
{ | |||||
in: withTags{}, | |||||
want: withTagsSchema, | |||||
}, | |||||
{ | |||||
in: withTagsNested{}, | |||||
want: Schema{ | |||||
&FieldSchema{ | |||||
Name: "nested", | |||||
Required: true, | |||||
Type: "RECORD", | |||||
Schema: withTagsSchema, | |||||
}, | |||||
&FieldSchema{ | |||||
Name: "anon", | |||||
Required: true, | |||||
Type: "RECORD", | |||||
Schema: Schema{reqField("inside", "INTEGER")}, | |||||
}, | |||||
&FieldSchema{ | |||||
Name: "PNested", | |||||
Required: true, | |||||
Type: "RECORD", | |||||
Schema: Schema{reqField("X", "INTEGER")}, | |||||
}, | |||||
&FieldSchema{ | |||||
Name: "PNestedNullable", | |||||
Required: false, | |||||
Type: "RECORD", | |||||
Schema: Schema{reqField("X", "INTEGER")}, | |||||
}, | |||||
}, | |||||
}, | |||||
{ | |||||
in: withTagsRepeated{}, | |||||
want: Schema{ | |||||
&FieldSchema{ | |||||
Name: "repeated", | |||||
Repeated: true, | |||||
Type: "RECORD", | |||||
Schema: withTagsSchema, | |||||
}, | |||||
&FieldSchema{ | |||||
Name: "anon", | |||||
Repeated: true, | |||||
Type: "RECORD", | |||||
Schema: Schema{reqField("inside", "INTEGER")}, | |||||
}, | |||||
}, | |||||
}, | |||||
{ | |||||
in: withTagsEmbedded{}, | |||||
want: withTagsSchema, | |||||
}, | |||||
} | |||||
for i, tc := range testCases { | |||||
got, err := InferSchema(tc.in) | |||||
if err != nil { | |||||
t.Fatalf("%d: error inferring TableSchema: %v", i, err) | |||||
} | |||||
if !testutil.Equal(got, tc.want) { | |||||
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, | |||||
pretty.Value(got), pretty.Value(tc.want)) | |||||
} | |||||
} | |||||
} | |||||
func TestTagInferenceErrors(t *testing.T) { | |||||
testCases := []struct { | |||||
in interface{} | |||||
err error | |||||
}{ | |||||
{ | |||||
in: struct { | |||||
LongTag int `bigquery:"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxy"` | |||||
}{}, | |||||
err: errInvalidFieldName, | |||||
}, | |||||
{ | |||||
in: struct { | |||||
UnsupporedStartChar int `bigquery:"øab"` | |||||
}{}, | |||||
err: errInvalidFieldName, | |||||
}, | |||||
{ | |||||
in: struct { | |||||
UnsupportedEndChar int `bigquery:"abø"` | |||||
}{}, | |||||
err: errInvalidFieldName, | |||||
}, | |||||
{ | |||||
in: struct { | |||||
UnsupportedMiddleChar int `bigquery:"aøb"` | |||||
}{}, | |||||
err: errInvalidFieldName, | |||||
}, | |||||
{ | |||||
in: struct { | |||||
StartInt int `bigquery:"1abc"` | |||||
}{}, | |||||
err: errInvalidFieldName, | |||||
}, | |||||
{ | |||||
in: struct { | |||||
Hyphens int `bigquery:"a-b"` | |||||
}{}, | |||||
err: errInvalidFieldName, | |||||
}, | |||||
} | |||||
for i, tc := range testCases { | |||||
want := tc.err | |||||
_, got := InferSchema(tc.in) | |||||
if got != want { | |||||
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, want) | |||||
} | |||||
} | |||||
_, err := InferSchema(struct { | |||||
X int `bigquery:",optional"` | |||||
}{}) | |||||
if err == nil { | |||||
t.Error("got nil, want error") | |||||
} | |||||
} | |||||
func TestSchemaErrors(t *testing.T) { | |||||
testCases := []struct { | |||||
in interface{} | |||||
err error | |||||
}{ | |||||
{ | |||||
in: []byte{}, | |||||
err: errNoStruct, | |||||
}, | |||||
{ | |||||
in: new(int), | |||||
err: errNoStruct, | |||||
}, | |||||
{ | |||||
in: struct{ Uint uint }{}, | |||||
err: errUnsupportedFieldType, | |||||
}, | |||||
{ | |||||
in: struct{ Uint64 uint64 }{}, | |||||
err: errUnsupportedFieldType, | |||||
}, | |||||
{ | |||||
in: struct{ Uintptr uintptr }{}, | |||||
err: errUnsupportedFieldType, | |||||
}, | |||||
{ | |||||
in: struct{ Complex complex64 }{}, | |||||
err: errUnsupportedFieldType, | |||||
}, | |||||
{ | |||||
in: struct{ Map map[string]int }{}, | |||||
err: errUnsupportedFieldType, | |||||
}, | |||||
{ | |||||
in: struct{ Chan chan bool }{}, | |||||
err: errUnsupportedFieldType, | |||||
}, | |||||
{ | |||||
in: struct{ Ptr *int }{}, | |||||
err: errUnsupportedFieldType, | |||||
}, | |||||
{ | |||||
in: struct{ Interface interface{} }{}, | |||||
err: errUnsupportedFieldType, | |||||
}, | |||||
{ | |||||
in: struct{ MultiDimensional [][]int }{}, | |||||
err: errUnsupportedFieldType, | |||||
}, | |||||
{ | |||||
in: struct{ MultiDimensional [][][]byte }{}, | |||||
err: errUnsupportedFieldType, | |||||
}, | |||||
{ | |||||
in: struct{ SliceOfPointer []*int }{}, | |||||
err: errUnsupportedFieldType, | |||||
}, | |||||
{ | |||||
in: struct{ SliceOfNull []NullInt64 }{}, | |||||
err: errUnsupportedFieldType, | |||||
}, | |||||
{ | |||||
in: struct{ ChanSlice []chan bool }{}, | |||||
err: errUnsupportedFieldType, | |||||
}, | |||||
{ | |||||
in: struct{ NestedChan struct{ Chan []chan bool } }{}, | |||||
err: errUnsupportedFieldType, | |||||
}, | |||||
{ | |||||
in: struct { | |||||
X int `bigquery:",nullable"` | |||||
}{}, | |||||
err: errBadNullable, | |||||
}, | |||||
{ | |||||
in: struct { | |||||
X bool `bigquery:",nullable"` | |||||
}{}, | |||||
err: errBadNullable, | |||||
}, | |||||
{ | |||||
in: struct { | |||||
X struct{ N int } `bigquery:",nullable"` | |||||
}{}, | |||||
err: errBadNullable, | |||||
}, | |||||
{ | |||||
in: struct { | |||||
X []int `bigquery:",nullable"` | |||||
}{}, | |||||
err: errBadNullable, | |||||
}, | |||||
{ | |||||
in: struct{ X *[]byte }{}, | |||||
err: errUnsupportedFieldType, | |||||
}, | |||||
{ | |||||
in: struct{ X *[]int }{}, | |||||
err: errUnsupportedFieldType, | |||||
}, | |||||
{ | |||||
in: struct{ X *int }{}, | |||||
err: errUnsupportedFieldType, | |||||
}, | |||||
} | |||||
for _, tc := range testCases { | |||||
want := tc.err | |||||
_, got := InferSchema(tc.in) | |||||
if got != want { | |||||
t.Errorf("%#v: got:\n%#v\nwant:\n%#v", tc.in, got, want) | |||||
} | |||||
} | |||||
} | |||||
func TestHasRecursiveType(t *testing.T) { | |||||
type ( | |||||
nonStruct int | |||||
nonRec struct{ A string } | |||||
dup struct{ A, B nonRec } | |||||
rec struct { | |||||
A int | |||||
B *rec | |||||
} | |||||
recUnexported struct { | |||||
A int | |||||
b *rec | |||||
} | |||||
hasRec struct { | |||||
A int | |||||
R *rec | |||||
} | |||||
recSlicePointer struct { | |||||
A []*recSlicePointer | |||||
} | |||||
) | |||||
for _, test := range []struct { | |||||
in interface{} | |||||
want bool | |||||
}{ | |||||
{nonStruct(0), false}, | |||||
{nonRec{}, false}, | |||||
{dup{}, false}, | |||||
{rec{}, true}, | |||||
{recUnexported{}, false}, | |||||
{hasRec{}, true}, | |||||
{&recSlicePointer{}, true}, | |||||
} { | |||||
got, err := hasRecursiveType(reflect.TypeOf(test.in), nil) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
if got != test.want { | |||||
t.Errorf("%T: got %t, want %t", test.in, got, test.want) | |||||
} | |||||
} | |||||
} |
@@ -0,0 +1,531 @@ | |||||
// Copyright 2015 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"errors" | |||||
"fmt" | |||||
"time" | |||||
"cloud.google.com/go/internal/trace" | |||||
"golang.org/x/net/context" | |||||
"cloud.google.com/go/internal/optional" | |||||
bq "google.golang.org/api/bigquery/v2" | |||||
) | |||||
// A Table is a reference to a BigQuery table. | |||||
type Table struct { | |||||
// ProjectID, DatasetID and TableID may be omitted if the Table is the destination for a query. | |||||
// In this case the result will be stored in an ephemeral table. | |||||
ProjectID string | |||||
DatasetID string | |||||
// TableID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). | |||||
// The maximum length is 1,024 characters. | |||||
TableID string | |||||
c *Client | |||||
} | |||||
// TableMetadata contains information about a BigQuery table. | |||||
type TableMetadata struct { | |||||
// The following fields can be set when creating a table. | |||||
// The user-friendly name for the table. | |||||
Name string | |||||
// The user-friendly description of the table. | |||||
Description string | |||||
// The table schema. If provided on create, ViewQuery must be empty. | |||||
Schema Schema | |||||
// The query to use for a view. If provided on create, Schema must be nil. | |||||
ViewQuery string | |||||
// Use Legacy SQL for the view query. | |||||
// At most one of UseLegacySQL and UseStandardSQL can be true. | |||||
UseLegacySQL bool | |||||
// Use Legacy SQL for the view query. The default. | |||||
// At most one of UseLegacySQL and UseStandardSQL can be true. | |||||
// Deprecated: use UseLegacySQL. | |||||
UseStandardSQL bool | |||||
// If non-nil, the table is partitioned by time. | |||||
TimePartitioning *TimePartitioning | |||||
// The time when this table expires. If not set, the table will persist | |||||
// indefinitely. Expired tables will be deleted and their storage reclaimed. | |||||
ExpirationTime time.Time | |||||
// User-provided labels. | |||||
Labels map[string]string | |||||
// Information about a table stored outside of BigQuery. | |||||
ExternalDataConfig *ExternalDataConfig | |||||
// Custom encryption configuration (e.g., Cloud KMS keys). | |||||
EncryptionConfig *EncryptionConfig | |||||
// All the fields below are read-only. | |||||
FullID string // An opaque ID uniquely identifying the table. | |||||
Type TableType | |||||
CreationTime time.Time | |||||
LastModifiedTime time.Time | |||||
// The size of the table in bytes. | |||||
// This does not include data that is being buffered during a streaming insert. | |||||
NumBytes int64 | |||||
// The number of rows of data in this table. | |||||
// This does not include data that is being buffered during a streaming insert. | |||||
NumRows uint64 | |||||
// Contains information regarding this table's streaming buffer, if one is | |||||
// present. This field will be nil if the table is not being streamed to or if | |||||
// there is no data in the streaming buffer. | |||||
StreamingBuffer *StreamingBuffer | |||||
// ETag is the ETag obtained when reading metadata. Pass it to Table.Update to | |||||
// ensure that the metadata hasn't changed since it was read. | |||||
ETag string | |||||
} | |||||
// TableCreateDisposition specifies the circumstances under which destination table will be created. | |||||
// Default is CreateIfNeeded. | |||||
type TableCreateDisposition string | |||||
const ( | |||||
// CreateIfNeeded will create the table if it does not already exist. | |||||
// Tables are created atomically on successful completion of a job. | |||||
CreateIfNeeded TableCreateDisposition = "CREATE_IF_NEEDED" | |||||
// CreateNever ensures the table must already exist and will not be | |||||
// automatically created. | |||||
CreateNever TableCreateDisposition = "CREATE_NEVER" | |||||
) | |||||
// TableWriteDisposition specifies how existing data in a destination table is treated. | |||||
// Default is WriteAppend. | |||||
type TableWriteDisposition string | |||||
const ( | |||||
// WriteAppend will append to any existing data in the destination table. | |||||
// Data is appended atomically on successful completion of a job. | |||||
WriteAppend TableWriteDisposition = "WRITE_APPEND" | |||||
// WriteTruncate overrides the existing data in the destination table. | |||||
// Data is overwritten atomically on successful completion of a job. | |||||
WriteTruncate TableWriteDisposition = "WRITE_TRUNCATE" | |||||
// WriteEmpty fails writes if the destination table already contains data. | |||||
WriteEmpty TableWriteDisposition = "WRITE_EMPTY" | |||||
) | |||||
// TableType is the type of table. | |||||
type TableType string | |||||
const ( | |||||
RegularTable TableType = "TABLE" | |||||
ViewTable TableType = "VIEW" | |||||
ExternalTable TableType = "EXTERNAL" | |||||
) | |||||
// TimePartitioning describes the time-based date partitioning on a table. | |||||
// For more information see: https://cloud.google.com/bigquery/docs/creating-partitioned-tables. | |||||
type TimePartitioning struct { | |||||
// The amount of time to keep the storage for a partition. | |||||
// If the duration is empty (0), the data in the partitions do not expire. | |||||
Expiration time.Duration | |||||
// If empty, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the | |||||
// table is partitioned by this field. The field must be a top-level TIMESTAMP or | |||||
// DATE field. Its mode must be NULLABLE or REQUIRED. | |||||
Field string | |||||
} | |||||
func (p *TimePartitioning) toBQ() *bq.TimePartitioning { | |||||
if p == nil { | |||||
return nil | |||||
} | |||||
return &bq.TimePartitioning{ | |||||
Type: "DAY", | |||||
ExpirationMs: int64(p.Expiration / time.Millisecond), | |||||
Field: p.Field, | |||||
} | |||||
} | |||||
func bqToTimePartitioning(q *bq.TimePartitioning) *TimePartitioning { | |||||
if q == nil { | |||||
return nil | |||||
} | |||||
return &TimePartitioning{ | |||||
Expiration: time.Duration(q.ExpirationMs) * time.Millisecond, | |||||
Field: q.Field, | |||||
} | |||||
} | |||||
// EncryptionConfig configures customer-managed encryption on tables. | |||||
type EncryptionConfig struct { | |||||
// Describes the Cloud KMS encryption key that will be used to protect | |||||
// destination BigQuery table. The BigQuery Service Account associated with your | |||||
// project requires access to this encryption key. | |||||
KMSKeyName string | |||||
} | |||||
func (e *EncryptionConfig) toBQ() *bq.EncryptionConfiguration { | |||||
if e == nil { | |||||
return nil | |||||
} | |||||
return &bq.EncryptionConfiguration{ | |||||
KmsKeyName: e.KMSKeyName, | |||||
} | |||||
} | |||||
func bqToEncryptionConfig(q *bq.EncryptionConfiguration) *EncryptionConfig { | |||||
if q == nil { | |||||
return nil | |||||
} | |||||
return &EncryptionConfig{ | |||||
KMSKeyName: q.KmsKeyName, | |||||
} | |||||
} | |||||
// StreamingBuffer holds information about the streaming buffer. | |||||
type StreamingBuffer struct { | |||||
// A lower-bound estimate of the number of bytes currently in the streaming | |||||
// buffer. | |||||
EstimatedBytes uint64 | |||||
// A lower-bound estimate of the number of rows currently in the streaming | |||||
// buffer. | |||||
EstimatedRows uint64 | |||||
// The time of the oldest entry in the streaming buffer. | |||||
OldestEntryTime time.Time | |||||
} | |||||
func (t *Table) toBQ() *bq.TableReference { | |||||
return &bq.TableReference{ | |||||
ProjectId: t.ProjectID, | |||||
DatasetId: t.DatasetID, | |||||
TableId: t.TableID, | |||||
} | |||||
} | |||||
// FullyQualifiedName returns the ID of the table in projectID:datasetID.tableID format. | |||||
func (t *Table) FullyQualifiedName() string { | |||||
return fmt.Sprintf("%s:%s.%s", t.ProjectID, t.DatasetID, t.TableID) | |||||
} | |||||
// implicitTable reports whether Table is an empty placeholder, which signifies that a new table should be created with an auto-generated Table ID. | |||||
func (t *Table) implicitTable() bool { | |||||
return t.ProjectID == "" && t.DatasetID == "" && t.TableID == "" | |||||
} | |||||
// Create creates a table in the BigQuery service. | |||||
// Pass in a TableMetadata value to configure the table. | |||||
// If tm.View.Query is non-empty, the created table will be of type VIEW. | |||||
// Expiration can only be set during table creation. | |||||
// After table creation, a view can be modified only if its table was initially created | |||||
// with a view. | |||||
func (t *Table) Create(ctx context.Context, tm *TableMetadata) (err error) { | |||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Create") | |||||
defer func() { trace.EndSpan(ctx, err) }() | |||||
table, err := tm.toBQ() | |||||
if err != nil { | |||||
return err | |||||
} | |||||
table.TableReference = &bq.TableReference{ | |||||
ProjectId: t.ProjectID, | |||||
DatasetId: t.DatasetID, | |||||
TableId: t.TableID, | |||||
} | |||||
req := t.c.bqs.Tables.Insert(t.ProjectID, t.DatasetID, table).Context(ctx) | |||||
setClientHeader(req.Header()) | |||||
_, err = req.Do() | |||||
return err | |||||
} | |||||
func (tm *TableMetadata) toBQ() (*bq.Table, error) { | |||||
t := &bq.Table{} | |||||
if tm == nil { | |||||
return t, nil | |||||
} | |||||
if tm.Schema != nil && tm.ViewQuery != "" { | |||||
return nil, errors.New("bigquery: provide Schema or ViewQuery, not both") | |||||
} | |||||
t.FriendlyName = tm.Name | |||||
t.Description = tm.Description | |||||
t.Labels = tm.Labels | |||||
if tm.Schema != nil { | |||||
t.Schema = tm.Schema.toBQ() | |||||
} | |||||
if tm.ViewQuery != "" { | |||||
if tm.UseStandardSQL && tm.UseLegacySQL { | |||||
return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL") | |||||
} | |||||
t.View = &bq.ViewDefinition{Query: tm.ViewQuery} | |||||
if tm.UseLegacySQL { | |||||
t.View.UseLegacySql = true | |||||
} else { | |||||
t.View.UseLegacySql = false | |||||
t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql") | |||||
} | |||||
} else if tm.UseLegacySQL || tm.UseStandardSQL { | |||||
return nil, errors.New("bigquery: UseLegacy/StandardSQL requires ViewQuery") | |||||
} | |||||
t.TimePartitioning = tm.TimePartitioning.toBQ() | |||||
if !tm.ExpirationTime.IsZero() { | |||||
t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6 | |||||
} | |||||
if tm.ExternalDataConfig != nil { | |||||
edc := tm.ExternalDataConfig.toBQ() | |||||
t.ExternalDataConfiguration = &edc | |||||
} | |||||
t.EncryptionConfiguration = tm.EncryptionConfig.toBQ() | |||||
if tm.FullID != "" { | |||||
return nil, errors.New("cannot set FullID on create") | |||||
} | |||||
if tm.Type != "" { | |||||
return nil, errors.New("cannot set Type on create") | |||||
} | |||||
if !tm.CreationTime.IsZero() { | |||||
return nil, errors.New("cannot set CreationTime on create") | |||||
} | |||||
if !tm.LastModifiedTime.IsZero() { | |||||
return nil, errors.New("cannot set LastModifiedTime on create") | |||||
} | |||||
if tm.NumBytes != 0 { | |||||
return nil, errors.New("cannot set NumBytes on create") | |||||
} | |||||
if tm.NumRows != 0 { | |||||
return nil, errors.New("cannot set NumRows on create") | |||||
} | |||||
if tm.StreamingBuffer != nil { | |||||
return nil, errors.New("cannot set StreamingBuffer on create") | |||||
} | |||||
if tm.ETag != "" { | |||||
return nil, errors.New("cannot set ETag on create") | |||||
} | |||||
return t, nil | |||||
} | |||||
// Metadata fetches the metadata for the table. | |||||
func (t *Table) Metadata(ctx context.Context) (md *TableMetadata, err error) { | |||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Metadata") | |||||
defer func() { trace.EndSpan(ctx, err) }() | |||||
req := t.c.bqs.Tables.Get(t.ProjectID, t.DatasetID, t.TableID).Context(ctx) | |||||
setClientHeader(req.Header()) | |||||
var table *bq.Table | |||||
err = runWithRetry(ctx, func() (err error) { | |||||
table, err = req.Do() | |||||
return err | |||||
}) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return bqToTableMetadata(table) | |||||
} | |||||
func bqToTableMetadata(t *bq.Table) (*TableMetadata, error) { | |||||
md := &TableMetadata{ | |||||
Description: t.Description, | |||||
Name: t.FriendlyName, | |||||
Type: TableType(t.Type), | |||||
FullID: t.Id, | |||||
Labels: t.Labels, | |||||
NumBytes: t.NumBytes, | |||||
NumRows: t.NumRows, | |||||
ExpirationTime: unixMillisToTime(t.ExpirationTime), | |||||
CreationTime: unixMillisToTime(t.CreationTime), | |||||
LastModifiedTime: unixMillisToTime(int64(t.LastModifiedTime)), | |||||
ETag: t.Etag, | |||||
EncryptionConfig: bqToEncryptionConfig(t.EncryptionConfiguration), | |||||
} | |||||
if t.Schema != nil { | |||||
md.Schema = bqToSchema(t.Schema) | |||||
} | |||||
if t.View != nil { | |||||
md.ViewQuery = t.View.Query | |||||
md.UseLegacySQL = t.View.UseLegacySql | |||||
} | |||||
md.TimePartitioning = bqToTimePartitioning(t.TimePartitioning) | |||||
if t.StreamingBuffer != nil { | |||||
md.StreamingBuffer = &StreamingBuffer{ | |||||
EstimatedBytes: t.StreamingBuffer.EstimatedBytes, | |||||
EstimatedRows: t.StreamingBuffer.EstimatedRows, | |||||
OldestEntryTime: unixMillisToTime(int64(t.StreamingBuffer.OldestEntryTime)), | |||||
} | |||||
} | |||||
if t.ExternalDataConfiguration != nil { | |||||
edc, err := bqToExternalDataConfig(t.ExternalDataConfiguration) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
md.ExternalDataConfig = edc | |||||
} | |||||
return md, nil | |||||
} | |||||
// Delete deletes the table. | |||||
func (t *Table) Delete(ctx context.Context) (err error) { | |||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Delete") | |||||
defer func() { trace.EndSpan(ctx, err) }() | |||||
req := t.c.bqs.Tables.Delete(t.ProjectID, t.DatasetID, t.TableID).Context(ctx) | |||||
setClientHeader(req.Header()) | |||||
return req.Do() | |||||
} | |||||
// Read fetches the contents of the table. | |||||
func (t *Table) Read(ctx context.Context) *RowIterator { | |||||
return t.read(ctx, fetchPage) | |||||
} | |||||
func (t *Table) read(ctx context.Context, pf pageFetcher) *RowIterator { | |||||
return newRowIterator(ctx, t, pf) | |||||
} | |||||
// Update modifies specific Table metadata fields. | |||||
func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate, etag string) (md *TableMetadata, err error) { | |||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Update") | |||||
defer func() { trace.EndSpan(ctx, err) }() | |||||
bqt := tm.toBQ() | |||||
call := t.c.bqs.Tables.Patch(t.ProjectID, t.DatasetID, t.TableID, bqt).Context(ctx) | |||||
setClientHeader(call.Header()) | |||||
if etag != "" { | |||||
call.Header().Set("If-Match", etag) | |||||
} | |||||
var res *bq.Table | |||||
if err := runWithRetry(ctx, func() (err error) { | |||||
res, err = call.Do() | |||||
return err | |||||
}); err != nil { | |||||
return nil, err | |||||
} | |||||
return bqToTableMetadata(res) | |||||
} | |||||
func (tm *TableMetadataToUpdate) toBQ() *bq.Table { | |||||
t := &bq.Table{} | |||||
forceSend := func(field string) { | |||||
t.ForceSendFields = append(t.ForceSendFields, field) | |||||
} | |||||
if tm.Description != nil { | |||||
t.Description = optional.ToString(tm.Description) | |||||
forceSend("Description") | |||||
} | |||||
if tm.Name != nil { | |||||
t.FriendlyName = optional.ToString(tm.Name) | |||||
forceSend("FriendlyName") | |||||
} | |||||
if tm.Schema != nil { | |||||
t.Schema = tm.Schema.toBQ() | |||||
forceSend("Schema") | |||||
} | |||||
if !tm.ExpirationTime.IsZero() { | |||||
t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6 | |||||
forceSend("ExpirationTime") | |||||
} | |||||
if tm.ViewQuery != nil { | |||||
t.View = &bq.ViewDefinition{ | |||||
Query: optional.ToString(tm.ViewQuery), | |||||
ForceSendFields: []string{"Query"}, | |||||
} | |||||
} | |||||
if tm.UseLegacySQL != nil { | |||||
if t.View == nil { | |||||
t.View = &bq.ViewDefinition{} | |||||
} | |||||
t.View.UseLegacySql = optional.ToBool(tm.UseLegacySQL) | |||||
t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql") | |||||
} | |||||
labels, forces, nulls := tm.update() | |||||
t.Labels = labels | |||||
t.ForceSendFields = append(t.ForceSendFields, forces...) | |||||
t.NullFields = append(t.NullFields, nulls...) | |||||
return t | |||||
} | |||||
// TableMetadataToUpdate is used when updating a table's metadata. | |||||
// Only non-nil fields will be updated. | |||||
type TableMetadataToUpdate struct { | |||||
// The user-friendly description of this table. | |||||
Description optional.String | |||||
// The user-friendly name for this table. | |||||
Name optional.String | |||||
// The table's schema. | |||||
// When updating a schema, you can add columns but not remove them. | |||||
Schema Schema | |||||
// The time when this table expires. | |||||
ExpirationTime time.Time | |||||
// The query to use for a view. | |||||
ViewQuery optional.String | |||||
// Use Legacy SQL for the view query. | |||||
UseLegacySQL optional.Bool | |||||
labelUpdater | |||||
} | |||||
// labelUpdater contains common code for updating labels. | |||||
type labelUpdater struct { | |||||
setLabels map[string]string | |||||
deleteLabels map[string]bool | |||||
} | |||||
// SetLabel causes a label to be added or modified on a call to Update. | |||||
func (u *labelUpdater) SetLabel(name, value string) { | |||||
if u.setLabels == nil { | |||||
u.setLabels = map[string]string{} | |||||
} | |||||
u.setLabels[name] = value | |||||
} | |||||
// DeleteLabel causes a label to be deleted on a call to Update. | |||||
func (u *labelUpdater) DeleteLabel(name string) { | |||||
if u.deleteLabels == nil { | |||||
u.deleteLabels = map[string]bool{} | |||||
} | |||||
u.deleteLabels[name] = true | |||||
} | |||||
func (u *labelUpdater) update() (labels map[string]string, forces, nulls []string) { | |||||
if u.setLabels == nil && u.deleteLabels == nil { | |||||
return nil, nil, nil | |||||
} | |||||
labels = map[string]string{} | |||||
for k, v := range u.setLabels { | |||||
labels[k] = v | |||||
} | |||||
if len(labels) == 0 && len(u.deleteLabels) > 0 { | |||||
forces = []string{"Labels"} | |||||
} | |||||
for l := range u.deleteLabels { | |||||
nulls = append(nulls, "Labels."+l) | |||||
} | |||||
return labels, forces, nulls | |||||
} |
@@ -0,0 +1,295 @@ | |||||
// Copyright 2017 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"testing" | |||||
"time" | |||||
"cloud.google.com/go/internal/testutil" | |||||
bq "google.golang.org/api/bigquery/v2" | |||||
) | |||||
func TestBQToTableMetadata(t *testing.T) { | |||||
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local) | |||||
aTimeMillis := aTime.UnixNano() / 1e6 | |||||
for _, test := range []struct { | |||||
in *bq.Table | |||||
want *TableMetadata | |||||
}{ | |||||
{&bq.Table{}, &TableMetadata{}}, // test minimal case | |||||
{ | |||||
&bq.Table{ | |||||
CreationTime: aTimeMillis, | |||||
Description: "desc", | |||||
Etag: "etag", | |||||
ExpirationTime: aTimeMillis, | |||||
FriendlyName: "fname", | |||||
Id: "id", | |||||
LastModifiedTime: uint64(aTimeMillis), | |||||
Location: "loc", | |||||
NumBytes: 123, | |||||
NumLongTermBytes: 23, | |||||
NumRows: 7, | |||||
StreamingBuffer: &bq.Streamingbuffer{ | |||||
EstimatedBytes: 11, | |||||
EstimatedRows: 3, | |||||
OldestEntryTime: uint64(aTimeMillis), | |||||
}, | |||||
TimePartitioning: &bq.TimePartitioning{ | |||||
ExpirationMs: 7890, | |||||
Type: "DAY", | |||||
Field: "pfield", | |||||
}, | |||||
EncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"}, | |||||
Type: "EXTERNAL", | |||||
View: &bq.ViewDefinition{Query: "view-query"}, | |||||
Labels: map[string]string{"a": "b"}, | |||||
ExternalDataConfiguration: &bq.ExternalDataConfiguration{ | |||||
SourceFormat: "GOOGLE_SHEETS", | |||||
}, | |||||
}, | |||||
&TableMetadata{ | |||||
Description: "desc", | |||||
Name: "fname", | |||||
ViewQuery: "view-query", | |||||
FullID: "id", | |||||
Type: ExternalTable, | |||||
Labels: map[string]string{"a": "b"}, | |||||
ExternalDataConfig: &ExternalDataConfig{SourceFormat: GoogleSheets}, | |||||
ExpirationTime: aTime.Truncate(time.Millisecond), | |||||
CreationTime: aTime.Truncate(time.Millisecond), | |||||
LastModifiedTime: aTime.Truncate(time.Millisecond), | |||||
NumBytes: 123, | |||||
NumRows: 7, | |||||
TimePartitioning: &TimePartitioning{ | |||||
Expiration: 7890 * time.Millisecond, | |||||
Field: "pfield", | |||||
}, | |||||
StreamingBuffer: &StreamingBuffer{ | |||||
EstimatedBytes: 11, | |||||
EstimatedRows: 3, | |||||
OldestEntryTime: aTime, | |||||
}, | |||||
EncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"}, | |||||
ETag: "etag", | |||||
}, | |||||
}, | |||||
} { | |||||
got, err := bqToTableMetadata(test.in) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
if diff := testutil.Diff(got, test.want); diff != "" { | |||||
t.Errorf("%+v:\n, -got, +want:\n%s", test.in, diff) | |||||
} | |||||
} | |||||
} | |||||
func TestTableMetadataToBQ(t *testing.T) { | |||||
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local) | |||||
aTimeMillis := aTime.UnixNano() / 1e6 | |||||
sc := Schema{fieldSchema("desc", "name", "STRING", false, true)} | |||||
for _, test := range []struct { | |||||
in *TableMetadata | |||||
want *bq.Table | |||||
}{ | |||||
{nil, &bq.Table{}}, | |||||
{&TableMetadata{}, &bq.Table{}}, | |||||
{ | |||||
&TableMetadata{ | |||||
Name: "n", | |||||
Description: "d", | |||||
Schema: sc, | |||||
ExpirationTime: aTime, | |||||
Labels: map[string]string{"a": "b"}, | |||||
ExternalDataConfig: &ExternalDataConfig{SourceFormat: Bigtable}, | |||||
EncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"}, | |||||
}, | |||||
&bq.Table{ | |||||
FriendlyName: "n", | |||||
Description: "d", | |||||
Schema: &bq.TableSchema{ | |||||
Fields: []*bq.TableFieldSchema{ | |||||
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"), | |||||
}, | |||||
}, | |||||
ExpirationTime: aTimeMillis, | |||||
Labels: map[string]string{"a": "b"}, | |||||
ExternalDataConfiguration: &bq.ExternalDataConfiguration{SourceFormat: "BIGTABLE"}, | |||||
EncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"}, | |||||
}, | |||||
}, | |||||
{ | |||||
&TableMetadata{ViewQuery: "q"}, | |||||
&bq.Table{ | |||||
View: &bq.ViewDefinition{ | |||||
Query: "q", | |||||
UseLegacySql: false, | |||||
ForceSendFields: []string{"UseLegacySql"}, | |||||
}, | |||||
}, | |||||
}, | |||||
{ | |||||
&TableMetadata{ | |||||
ViewQuery: "q", | |||||
UseLegacySQL: true, | |||||
TimePartitioning: &TimePartitioning{}, | |||||
}, | |||||
&bq.Table{ | |||||
View: &bq.ViewDefinition{ | |||||
Query: "q", | |||||
UseLegacySql: true, | |||||
}, | |||||
TimePartitioning: &bq.TimePartitioning{ | |||||
Type: "DAY", | |||||
ExpirationMs: 0, | |||||
}, | |||||
}, | |||||
}, | |||||
{ | |||||
&TableMetadata{ | |||||
ViewQuery: "q", | |||||
UseStandardSQL: true, | |||||
TimePartitioning: &TimePartitioning{ | |||||
Expiration: time.Second, | |||||
Field: "ofDreams", | |||||
}, | |||||
}, | |||||
&bq.Table{ | |||||
View: &bq.ViewDefinition{ | |||||
Query: "q", | |||||
UseLegacySql: false, | |||||
ForceSendFields: []string{"UseLegacySql"}, | |||||
}, | |||||
TimePartitioning: &bq.TimePartitioning{ | |||||
Type: "DAY", | |||||
ExpirationMs: 1000, | |||||
Field: "ofDreams", | |||||
}, | |||||
}, | |||||
}, | |||||
} { | |||||
got, err := test.in.toBQ() | |||||
if err != nil { | |||||
t.Fatalf("%+v: %v", test.in, err) | |||||
} | |||||
if diff := testutil.Diff(got, test.want); diff != "" { | |||||
t.Errorf("%+v:\n-got, +want:\n%s", test.in, diff) | |||||
} | |||||
} | |||||
// Errors | |||||
for _, in := range []*TableMetadata{ | |||||
{Schema: sc, ViewQuery: "q"}, // can't have both schema and query | |||||
{UseLegacySQL: true}, // UseLegacySQL without query | |||||
{UseStandardSQL: true}, // UseStandardSQL without query | |||||
// read-only fields | |||||
{FullID: "x"}, | |||||
{Type: "x"}, | |||||
{CreationTime: aTime}, | |||||
{LastModifiedTime: aTime}, | |||||
{NumBytes: 1}, | |||||
{NumRows: 1}, | |||||
{StreamingBuffer: &StreamingBuffer{}}, | |||||
{ETag: "x"}, | |||||
} { | |||||
_, err := in.toBQ() | |||||
if err == nil { | |||||
t.Errorf("%+v: got nil, want error", in) | |||||
} | |||||
} | |||||
} | |||||
func TestTableMetadataToUpdateToBQ(t *testing.T) { | |||||
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local) | |||||
for _, test := range []struct { | |||||
tm TableMetadataToUpdate | |||||
want *bq.Table | |||||
}{ | |||||
{ | |||||
tm: TableMetadataToUpdate{}, | |||||
want: &bq.Table{}, | |||||
}, | |||||
{ | |||||
tm: TableMetadataToUpdate{ | |||||
Description: "d", | |||||
Name: "n", | |||||
}, | |||||
want: &bq.Table{ | |||||
Description: "d", | |||||
FriendlyName: "n", | |||||
ForceSendFields: []string{"Description", "FriendlyName"}, | |||||
}, | |||||
}, | |||||
{ | |||||
tm: TableMetadataToUpdate{ | |||||
Schema: Schema{fieldSchema("desc", "name", "STRING", false, true)}, | |||||
ExpirationTime: aTime, | |||||
}, | |||||
want: &bq.Table{ | |||||
Schema: &bq.TableSchema{ | |||||
Fields: []*bq.TableFieldSchema{ | |||||
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"), | |||||
}, | |||||
}, | |||||
ExpirationTime: aTime.UnixNano() / 1e6, | |||||
ForceSendFields: []string{"Schema", "ExpirationTime"}, | |||||
}, | |||||
}, | |||||
{ | |||||
tm: TableMetadataToUpdate{ViewQuery: "q"}, | |||||
want: &bq.Table{ | |||||
View: &bq.ViewDefinition{Query: "q", ForceSendFields: []string{"Query"}}, | |||||
}, | |||||
}, | |||||
{ | |||||
tm: TableMetadataToUpdate{UseLegacySQL: false}, | |||||
want: &bq.Table{ | |||||
View: &bq.ViewDefinition{ | |||||
UseLegacySql: false, | |||||
ForceSendFields: []string{"UseLegacySql"}, | |||||
}, | |||||
}, | |||||
}, | |||||
{ | |||||
tm: TableMetadataToUpdate{ViewQuery: "q", UseLegacySQL: true}, | |||||
want: &bq.Table{ | |||||
View: &bq.ViewDefinition{ | |||||
Query: "q", | |||||
UseLegacySql: true, | |||||
ForceSendFields: []string{"Query", "UseLegacySql"}, | |||||
}, | |||||
}, | |||||
}, | |||||
{ | |||||
tm: func() (tm TableMetadataToUpdate) { | |||||
tm.SetLabel("L", "V") | |||||
tm.DeleteLabel("D") | |||||
return tm | |||||
}(), | |||||
want: &bq.Table{ | |||||
Labels: map[string]string{"L": "V"}, | |||||
NullFields: []string{"Labels.D"}, | |||||
}, | |||||
}, | |||||
} { | |||||
got := test.tm.toBQ() | |||||
if !testutil.Equal(got, test.want) { | |||||
t.Errorf("%+v:\ngot %+v\nwant %+v", test.tm, got, test.want) | |||||
} | |||||
} | |||||
} |
@@ -0,0 +1,231 @@ | |||||
// Copyright 2015 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"errors" | |||||
"fmt" | |||||
"reflect" | |||||
"cloud.google.com/go/internal/trace" | |||||
"golang.org/x/net/context" | |||||
bq "google.golang.org/api/bigquery/v2" | |||||
) | |||||
// An Uploader does streaming inserts into a BigQuery table. | |||||
// It is safe for concurrent use. | |||||
type Uploader struct { | |||||
t *Table | |||||
// SkipInvalidRows causes rows containing invalid data to be silently | |||||
// ignored. The default value is false, which causes the entire request to | |||||
// fail if there is an attempt to insert an invalid row. | |||||
SkipInvalidRows bool | |||||
// IgnoreUnknownValues causes values not matching the schema to be ignored. | |||||
// The default value is false, which causes records containing such values | |||||
// to be treated as invalid records. | |||||
IgnoreUnknownValues bool | |||||
// A TableTemplateSuffix allows Uploaders to create tables automatically. | |||||
// | |||||
// Experimental: this option is experimental and may be modified or removed in future versions, | |||||
// regardless of any other documented package stability guarantees. | |||||
// | |||||
// When you specify a suffix, the table you upload data to | |||||
// will be used as a template for creating a new table, with the same schema, | |||||
// called <table> + <suffix>. | |||||
// | |||||
// More information is available at | |||||
// https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables | |||||
TableTemplateSuffix string | |||||
} | |||||
// Uploader returns an Uploader that can be used to append rows to t. | |||||
// The returned Uploader may optionally be further configured before its Put method is called. | |||||
// | |||||
// To stream rows into a date-partitioned table at a particular date, add the | |||||
// $yyyymmdd suffix to the table name when constructing the Table. | |||||
func (t *Table) Uploader() *Uploader { | |||||
return &Uploader{t: t} | |||||
} | |||||
// Put uploads one or more rows to the BigQuery service. | |||||
// | |||||
// If src is ValueSaver, then its Save method is called to produce a row for uploading. | |||||
// | |||||
// If src is a struct or pointer to a struct, then a schema is inferred from it | |||||
// and used to create a StructSaver. The InsertID of the StructSaver will be | |||||
// empty. | |||||
// | |||||
// If src is a slice of ValueSavers, structs, or struct pointers, then each | |||||
// element of the slice is treated as above, and multiple rows are uploaded. | |||||
// | |||||
// Put returns a PutMultiError if one or more rows failed to be uploaded. | |||||
// The PutMultiError contains a RowInsertionError for each failed row. | |||||
// | |||||
// Put will retry on temporary errors (see | |||||
// https://cloud.google.com/bigquery/troubleshooting-errors). This can result | |||||
// in duplicate rows if you do not use insert IDs. Also, if the error persists, | |||||
// the call will run indefinitely. Pass a context with a timeout to prevent | |||||
// hanging calls. | |||||
func (u *Uploader) Put(ctx context.Context, src interface{}) (err error) { | |||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Uploader.Put") | |||||
defer func() { trace.EndSpan(ctx, err) }() | |||||
savers, err := valueSavers(src) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
return u.putMulti(ctx, savers) | |||||
} | |||||
func valueSavers(src interface{}) ([]ValueSaver, error) { | |||||
saver, ok, err := toValueSaver(src) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
if ok { | |||||
return []ValueSaver{saver}, nil | |||||
} | |||||
srcVal := reflect.ValueOf(src) | |||||
if srcVal.Kind() != reflect.Slice { | |||||
return nil, fmt.Errorf("%T is not a ValueSaver, struct, struct pointer, or slice", src) | |||||
} | |||||
var savers []ValueSaver | |||||
for i := 0; i < srcVal.Len(); i++ { | |||||
s := srcVal.Index(i).Interface() | |||||
saver, ok, err := toValueSaver(s) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
if !ok { | |||||
return nil, fmt.Errorf("src[%d] has type %T, which is not a ValueSaver, struct or struct pointer", i, s) | |||||
} | |||||
savers = append(savers, saver) | |||||
} | |||||
return savers, nil | |||||
} | |||||
// Make a ValueSaver from x, which must implement ValueSaver already | |||||
// or be a struct or pointer to struct. | |||||
func toValueSaver(x interface{}) (ValueSaver, bool, error) { | |||||
if _, ok := x.(StructSaver); ok { | |||||
return nil, false, errors.New("bigquery: use &StructSaver, not StructSaver") | |||||
} | |||||
var insertID string | |||||
// Handle StructSavers specially so we can infer the schema if necessary. | |||||
if ss, ok := x.(*StructSaver); ok && ss.Schema == nil { | |||||
x = ss.Struct | |||||
insertID = ss.InsertID | |||||
// Fall through so we can infer the schema. | |||||
} | |||||
if saver, ok := x.(ValueSaver); ok { | |||||
return saver, ok, nil | |||||
} | |||||
v := reflect.ValueOf(x) | |||||
// Support Put with []interface{} | |||||
if v.Kind() == reflect.Interface { | |||||
v = v.Elem() | |||||
} | |||||
if v.Kind() == reflect.Ptr { | |||||
v = v.Elem() | |||||
} | |||||
if v.Kind() != reflect.Struct { | |||||
return nil, false, nil | |||||
} | |||||
schema, err := inferSchemaReflectCached(v.Type()) | |||||
if err != nil { | |||||
return nil, false, err | |||||
} | |||||
return &StructSaver{ | |||||
Struct: x, | |||||
InsertID: insertID, | |||||
Schema: schema, | |||||
}, true, nil | |||||
} | |||||
func (u *Uploader) putMulti(ctx context.Context, src []ValueSaver) error { | |||||
req, err := u.newInsertRequest(src) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
if req == nil { | |||||
return nil | |||||
} | |||||
call := u.t.c.bqs.Tabledata.InsertAll(u.t.ProjectID, u.t.DatasetID, u.t.TableID, req) | |||||
call = call.Context(ctx) | |||||
setClientHeader(call.Header()) | |||||
var res *bq.TableDataInsertAllResponse | |||||
err = runWithRetry(ctx, func() (err error) { | |||||
res, err = call.Do() | |||||
return err | |||||
}) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
return handleInsertErrors(res.InsertErrors, req.Rows) | |||||
} | |||||
func (u *Uploader) newInsertRequest(savers []ValueSaver) (*bq.TableDataInsertAllRequest, error) { | |||||
if savers == nil { // If there are no rows, do nothing. | |||||
return nil, nil | |||||
} | |||||
req := &bq.TableDataInsertAllRequest{ | |||||
TemplateSuffix: u.TableTemplateSuffix, | |||||
IgnoreUnknownValues: u.IgnoreUnknownValues, | |||||
SkipInvalidRows: u.SkipInvalidRows, | |||||
} | |||||
for _, saver := range savers { | |||||
row, insertID, err := saver.Save() | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
if insertID == "" { | |||||
insertID = randomIDFn() | |||||
} | |||||
m := make(map[string]bq.JsonValue) | |||||
for k, v := range row { | |||||
m[k] = bq.JsonValue(v) | |||||
} | |||||
req.Rows = append(req.Rows, &bq.TableDataInsertAllRequestRows{ | |||||
InsertId: insertID, | |||||
Json: m, | |||||
}) | |||||
} | |||||
return req, nil | |||||
} | |||||
func handleInsertErrors(ierrs []*bq.TableDataInsertAllResponseInsertErrors, rows []*bq.TableDataInsertAllRequestRows) error { | |||||
if len(ierrs) == 0 { | |||||
return nil | |||||
} | |||||
var errs PutMultiError | |||||
for _, e := range ierrs { | |||||
if int(e.Index) > len(rows) { | |||||
return fmt.Errorf("internal error: unexpected row index: %v", e.Index) | |||||
} | |||||
rie := RowInsertionError{ | |||||
InsertID: rows[e.Index].InsertId, | |||||
RowIndex: int(e.Index), | |||||
} | |||||
for _, errp := range e.Errors { | |||||
rie.Errors = append(rie.Errors, bqToError(errp)) | |||||
} | |||||
errs = append(errs, rie) | |||||
} | |||||
return errs | |||||
} |
@@ -0,0 +1,211 @@ | |||||
// Copyright 2015 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"errors" | |||||
"strconv" | |||||
"testing" | |||||
"github.com/google/go-cmp/cmp" | |||||
"cloud.google.com/go/internal/pretty" | |||||
"cloud.google.com/go/internal/testutil" | |||||
bq "google.golang.org/api/bigquery/v2" | |||||
) | |||||
type testSaver struct { | |||||
row map[string]Value | |||||
insertID string | |||||
err error | |||||
} | |||||
func (ts testSaver) Save() (map[string]Value, string, error) { | |||||
return ts.row, ts.insertID, ts.err | |||||
} | |||||
func TestNewInsertRequest(t *testing.T) { | |||||
prev := randomIDFn | |||||
n := 0 | |||||
randomIDFn = func() string { n++; return strconv.Itoa(n) } | |||||
defer func() { randomIDFn = prev }() | |||||
tests := []struct { | |||||
ul *Uploader | |||||
savers []ValueSaver | |||||
req *bq.TableDataInsertAllRequest | |||||
}{ | |||||
{ | |||||
ul: &Uploader{}, | |||||
savers: nil, | |||||
req: nil, | |||||
}, | |||||
{ | |||||
ul: &Uploader{}, | |||||
savers: []ValueSaver{ | |||||
testSaver{row: map[string]Value{"one": 1}}, | |||||
testSaver{row: map[string]Value{"two": 2}}, | |||||
}, | |||||
req: &bq.TableDataInsertAllRequest{ | |||||
Rows: []*bq.TableDataInsertAllRequestRows{ | |||||
{InsertId: "1", Json: map[string]bq.JsonValue{"one": 1}}, | |||||
{InsertId: "2", Json: map[string]bq.JsonValue{"two": 2}}, | |||||
}, | |||||
}, | |||||
}, | |||||
{ | |||||
ul: &Uploader{ | |||||
TableTemplateSuffix: "suffix", | |||||
IgnoreUnknownValues: true, | |||||
SkipInvalidRows: true, | |||||
}, | |||||
savers: []ValueSaver{ | |||||
testSaver{insertID: "a", row: map[string]Value{"one": 1}}, | |||||
testSaver{insertID: "", row: map[string]Value{"two": 2}}, | |||||
}, | |||||
req: &bq.TableDataInsertAllRequest{ | |||||
Rows: []*bq.TableDataInsertAllRequestRows{ | |||||
{InsertId: "a", Json: map[string]bq.JsonValue{"one": 1}}, | |||||
{InsertId: "3", Json: map[string]bq.JsonValue{"two": 2}}, | |||||
}, | |||||
TemplateSuffix: "suffix", | |||||
SkipInvalidRows: true, | |||||
IgnoreUnknownValues: true, | |||||
}, | |||||
}, | |||||
} | |||||
for i, tc := range tests { | |||||
got, err := tc.ul.newInsertRequest(tc.savers) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
want := tc.req | |||||
if !testutil.Equal(got, want) { | |||||
t.Errorf("%d: %#v: got %#v, want %#v", i, tc.ul, got, want) | |||||
} | |||||
} | |||||
} | |||||
func TestNewInsertRequestErrors(t *testing.T) { | |||||
var u Uploader | |||||
_, err := u.newInsertRequest([]ValueSaver{testSaver{err: errors.New("!")}}) | |||||
if err == nil { | |||||
t.Error("got nil, want error") | |||||
} | |||||
} | |||||
func TestHandleInsertErrors(t *testing.T) { | |||||
rows := []*bq.TableDataInsertAllRequestRows{ | |||||
{InsertId: "a"}, | |||||
{InsertId: "b"}, | |||||
} | |||||
for _, test := range []struct { | |||||
in []*bq.TableDataInsertAllResponseInsertErrors | |||||
want error | |||||
}{ | |||||
{ | |||||
in: nil, | |||||
want: nil, | |||||
}, | |||||
{ | |||||
in: []*bq.TableDataInsertAllResponseInsertErrors{{Index: 1}}, | |||||
want: PutMultiError{RowInsertionError{InsertID: "b", RowIndex: 1}}, | |||||
}, | |||||
{ | |||||
in: []*bq.TableDataInsertAllResponseInsertErrors{{Index: 1}}, | |||||
want: PutMultiError{RowInsertionError{InsertID: "b", RowIndex: 1}}, | |||||
}, | |||||
{ | |||||
in: []*bq.TableDataInsertAllResponseInsertErrors{ | |||||
{Errors: []*bq.ErrorProto{{Message: "m0"}}, Index: 0}, | |||||
{Errors: []*bq.ErrorProto{{Message: "m1"}}, Index: 1}, | |||||
}, | |||||
want: PutMultiError{ | |||||
RowInsertionError{InsertID: "a", RowIndex: 0, Errors: []error{&Error{Message: "m0"}}}, | |||||
RowInsertionError{InsertID: "b", RowIndex: 1, Errors: []error{&Error{Message: "m1"}}}, | |||||
}, | |||||
}, | |||||
} { | |||||
got := handleInsertErrors(test.in, rows) | |||||
if !testutil.Equal(got, test.want) { | |||||
t.Errorf("%#v:\ngot\n%#v\nwant\n%#v", test.in, got, test.want) | |||||
} | |||||
} | |||||
} | |||||
func TestValueSavers(t *testing.T) { | |||||
ts := &testSaver{} | |||||
type T struct{ I int } | |||||
schema, err := InferSchema(T{}) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
for _, test := range []struct { | |||||
in interface{} | |||||
want []ValueSaver | |||||
}{ | |||||
{[]interface{}(nil), nil}, | |||||
{[]interface{}{}, nil}, | |||||
{ts, []ValueSaver{ts}}, | |||||
{T{I: 1}, []ValueSaver{&StructSaver{Schema: schema, Struct: T{I: 1}}}}, | |||||
{[]ValueSaver{ts, ts}, []ValueSaver{ts, ts}}, | |||||
{[]interface{}{ts, ts}, []ValueSaver{ts, ts}}, | |||||
{[]T{{I: 1}, {I: 2}}, []ValueSaver{ | |||||
&StructSaver{Schema: schema, Struct: T{I: 1}}, | |||||
&StructSaver{Schema: schema, Struct: T{I: 2}}, | |||||
}}, | |||||
{[]interface{}{T{I: 1}, &T{I: 2}}, []ValueSaver{ | |||||
&StructSaver{Schema: schema, Struct: T{I: 1}}, | |||||
&StructSaver{Schema: schema, Struct: &T{I: 2}}, | |||||
}}, | |||||
{&StructSaver{Struct: T{I: 3}, InsertID: "foo"}, | |||||
[]ValueSaver{ | |||||
&StructSaver{Schema: schema, Struct: T{I: 3}, InsertID: "foo"}, | |||||
}}, | |||||
} { | |||||
got, err := valueSavers(test.in) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
if !testutil.Equal(got, test.want, cmp.AllowUnexported(testSaver{})) { | |||||
t.Errorf("%+v: got %v, want %v", test.in, pretty.Value(got), pretty.Value(test.want)) | |||||
} | |||||
// Make sure Save is successful. | |||||
for i, vs := range got { | |||||
_, _, err := vs.Save() | |||||
if err != nil { | |||||
t.Fatalf("%+v, #%d: got error %v, want nil", test.in, i, err) | |||||
} | |||||
} | |||||
} | |||||
} | |||||
func TestValueSaversErrors(t *testing.T) { | |||||
inputs := []interface{}{ | |||||
nil, | |||||
1, | |||||
[]int{1, 2}, | |||||
[]interface{}{ | |||||
testSaver{row: map[string]Value{"one": 1}, insertID: "a"}, | |||||
1, | |||||
}, | |||||
StructSaver{}, | |||||
} | |||||
for _, in := range inputs { | |||||
if _, err := valueSavers(in); err == nil { | |||||
t.Errorf("%#v: got nil, want error", in) | |||||
} | |||||
} | |||||
} |
@@ -0,0 +1,871 @@ | |||||
// Copyright 2015 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigquery | |||||
import ( | |||||
"encoding/base64" | |||||
"errors" | |||||
"fmt" | |||||
"math" | |||||
"math/big" | |||||
"reflect" | |||||
"strconv" | |||||
"strings" | |||||
"time" | |||||
"cloud.google.com/go/civil" | |||||
bq "google.golang.org/api/bigquery/v2" | |||||
) | |||||
// Value stores the contents of a single cell from a BigQuery result. | |||||
type Value interface{} | |||||
// ValueLoader stores a slice of Values representing a result row from a Read operation. | |||||
// See RowIterator.Next for more information. | |||||
type ValueLoader interface { | |||||
Load(v []Value, s Schema) error | |||||
} | |||||
// valueList converts a []Value to implement ValueLoader. | |||||
type valueList []Value | |||||
// Load stores a sequence of values in a valueList. | |||||
// It resets the slice length to zero, then appends each value to it. | |||||
func (vs *valueList) Load(v []Value, _ Schema) error { | |||||
*vs = append((*vs)[:0], v...) | |||||
return nil | |||||
} | |||||
// valueMap converts a map[string]Value to implement ValueLoader. | |||||
type valueMap map[string]Value | |||||
// Load stores a sequence of values in a valueMap. | |||||
func (vm *valueMap) Load(v []Value, s Schema) error { | |||||
if *vm == nil { | |||||
*vm = map[string]Value{} | |||||
} | |||||
loadMap(*vm, v, s) | |||||
return nil | |||||
} | |||||
func loadMap(m map[string]Value, vals []Value, s Schema) { | |||||
for i, f := range s { | |||||
val := vals[i] | |||||
var v interface{} | |||||
switch { | |||||
case val == nil: | |||||
v = val | |||||
case f.Schema == nil: | |||||
v = val | |||||
case !f.Repeated: | |||||
m2 := map[string]Value{} | |||||
loadMap(m2, val.([]Value), f.Schema) | |||||
v = m2 | |||||
default: // repeated and nested | |||||
sval := val.([]Value) | |||||
vs := make([]Value, len(sval)) | |||||
for j, e := range sval { | |||||
m2 := map[string]Value{} | |||||
loadMap(m2, e.([]Value), f.Schema) | |||||
vs[j] = m2 | |||||
} | |||||
v = vs | |||||
} | |||||
m[f.Name] = v | |||||
} | |||||
} | |||||
type structLoader struct { | |||||
typ reflect.Type // type of struct | |||||
err error | |||||
ops []structLoaderOp | |||||
vstructp reflect.Value // pointer to current struct value; changed by set | |||||
} | |||||
// A setFunc is a function that sets a struct field or slice/array | |||||
// element to a value. | |||||
type setFunc func(v reflect.Value, val interface{}) error | |||||
// A structLoaderOp instructs the loader to set a struct field to a row value. | |||||
type structLoaderOp struct { | |||||
fieldIndex []int | |||||
valueIndex int | |||||
setFunc setFunc | |||||
repeated bool | |||||
} | |||||
var errNoNulls = errors.New("bigquery: NULL values cannot be read into structs") | |||||
func setAny(v reflect.Value, x interface{}) error { | |||||
if x == nil { | |||||
return errNoNulls | |||||
} | |||||
v.Set(reflect.ValueOf(x)) | |||||
return nil | |||||
} | |||||
func setInt(v reflect.Value, x interface{}) error { | |||||
if x == nil { | |||||
return errNoNulls | |||||
} | |||||
xx := x.(int64) | |||||
if v.OverflowInt(xx) { | |||||
return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type()) | |||||
} | |||||
v.SetInt(xx) | |||||
return nil | |||||
} | |||||
func setUint(v reflect.Value, x interface{}) error { | |||||
if x == nil { | |||||
return errNoNulls | |||||
} | |||||
xx := x.(int64) | |||||
if xx < 0 || v.OverflowUint(uint64(xx)) { | |||||
return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type()) | |||||
} | |||||
v.SetUint(uint64(xx)) | |||||
return nil | |||||
} | |||||
func setFloat(v reflect.Value, x interface{}) error { | |||||
if x == nil { | |||||
return errNoNulls | |||||
} | |||||
xx := x.(float64) | |||||
if v.OverflowFloat(xx) { | |||||
return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type()) | |||||
} | |||||
v.SetFloat(xx) | |||||
return nil | |||||
} | |||||
func setBool(v reflect.Value, x interface{}) error { | |||||
if x == nil { | |||||
return errNoNulls | |||||
} | |||||
v.SetBool(x.(bool)) | |||||
return nil | |||||
} | |||||
func setString(v reflect.Value, x interface{}) error { | |||||
if x == nil { | |||||
return errNoNulls | |||||
} | |||||
v.SetString(x.(string)) | |||||
return nil | |||||
} | |||||
func setBytes(v reflect.Value, x interface{}) error { | |||||
if x == nil { | |||||
v.SetBytes(nil) | |||||
} else { | |||||
v.SetBytes(x.([]byte)) | |||||
} | |||||
return nil | |||||
} | |||||
func setNull(v reflect.Value, x interface{}, build func() interface{}) error { | |||||
if x == nil { | |||||
v.Set(reflect.Zero(v.Type())) | |||||
} else { | |||||
n := build() | |||||
v.Set(reflect.ValueOf(n)) | |||||
} | |||||
return nil | |||||
} | |||||
// set remembers a value for the next call to Load. The value must be | |||||
// a pointer to a struct. (This is checked in RowIterator.Next.) | |||||
func (sl *structLoader) set(structp interface{}, schema Schema) error { | |||||
if sl.err != nil { | |||||
return sl.err | |||||
} | |||||
sl.vstructp = reflect.ValueOf(structp) | |||||
typ := sl.vstructp.Type().Elem() | |||||
if sl.typ == nil { | |||||
// First call: remember the type and compile the schema. | |||||
sl.typ = typ | |||||
ops, err := compileToOps(typ, schema) | |||||
if err != nil { | |||||
sl.err = err | |||||
return err | |||||
} | |||||
sl.ops = ops | |||||
} else if sl.typ != typ { | |||||
return fmt.Errorf("bigquery: struct type changed from %s to %s", sl.typ, typ) | |||||
} | |||||
return nil | |||||
} | |||||
// compileToOps produces a sequence of operations that will set the fields of a | |||||
// value of structType to the contents of a row with schema. | |||||
func compileToOps(structType reflect.Type, schema Schema) ([]structLoaderOp, error) { | |||||
var ops []structLoaderOp | |||||
fields, err := fieldCache.Fields(structType) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
for i, schemaField := range schema { | |||||
// Look for an exported struct field with the same name as the schema | |||||
// field, ignoring case (BigQuery column names are case-insensitive, | |||||
// and we want to act like encoding/json anyway). | |||||
structField := fields.Match(schemaField.Name) | |||||
if structField == nil { | |||||
// Ignore schema fields with no corresponding struct field. | |||||
continue | |||||
} | |||||
op := structLoaderOp{ | |||||
fieldIndex: structField.Index, | |||||
valueIndex: i, | |||||
} | |||||
t := structField.Type | |||||
if schemaField.Repeated { | |||||
if t.Kind() != reflect.Slice && t.Kind() != reflect.Array { | |||||
return nil, fmt.Errorf("bigquery: repeated schema field %s requires slice or array, but struct field %s has type %s", | |||||
schemaField.Name, structField.Name, t) | |||||
} | |||||
t = t.Elem() | |||||
op.repeated = true | |||||
} | |||||
if schemaField.Type == RecordFieldType { | |||||
// Field can be a struct or a pointer to a struct. | |||||
if t.Kind() == reflect.Ptr { | |||||
t = t.Elem() | |||||
} | |||||
if t.Kind() != reflect.Struct { | |||||
return nil, fmt.Errorf("bigquery: field %s has type %s, expected struct or *struct", | |||||
structField.Name, structField.Type) | |||||
} | |||||
nested, err := compileToOps(t, schemaField.Schema) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
op.setFunc = func(v reflect.Value, val interface{}) error { | |||||
return setNested(nested, v, val) | |||||
} | |||||
} else { | |||||
op.setFunc = determineSetFunc(t, schemaField.Type) | |||||
if op.setFunc == nil { | |||||
return nil, fmt.Errorf("bigquery: schema field %s of type %s is not assignable to struct field %s of type %s", | |||||
schemaField.Name, schemaField.Type, structField.Name, t) | |||||
} | |||||
} | |||||
ops = append(ops, op) | |||||
} | |||||
return ops, nil | |||||
} | |||||
// determineSetFunc chooses the best function for setting a field of type ftype | |||||
// to a value whose schema field type is stype. It returns nil if stype | |||||
// is not assignable to ftype. | |||||
// determineSetFunc considers only basic types. See compileToOps for | |||||
// handling of repetition and nesting. | |||||
func determineSetFunc(ftype reflect.Type, stype FieldType) setFunc { | |||||
switch stype { | |||||
case StringFieldType: | |||||
if ftype.Kind() == reflect.String { | |||||
return setString | |||||
} | |||||
if ftype == typeOfNullString { | |||||
return func(v reflect.Value, x interface{}) error { | |||||
return setNull(v, x, func() interface{} { | |||||
return NullString{StringVal: x.(string), Valid: true} | |||||
}) | |||||
} | |||||
} | |||||
case BytesFieldType: | |||||
if ftype == typeOfByteSlice { | |||||
return setBytes | |||||
} | |||||
case IntegerFieldType: | |||||
if isSupportedUintType(ftype) { | |||||
return setUint | |||||
} else if isSupportedIntType(ftype) { | |||||
return setInt | |||||
} | |||||
if ftype == typeOfNullInt64 { | |||||
return func(v reflect.Value, x interface{}) error { | |||||
return setNull(v, x, func() interface{} { | |||||
return NullInt64{Int64: x.(int64), Valid: true} | |||||
}) | |||||
} | |||||
} | |||||
case FloatFieldType: | |||||
switch ftype.Kind() { | |||||
case reflect.Float32, reflect.Float64: | |||||
return setFloat | |||||
} | |||||
if ftype == typeOfNullFloat64 { | |||||
return func(v reflect.Value, x interface{}) error { | |||||
return setNull(v, x, func() interface{} { | |||||
return NullFloat64{Float64: x.(float64), Valid: true} | |||||
}) | |||||
} | |||||
} | |||||
case BooleanFieldType: | |||||
if ftype.Kind() == reflect.Bool { | |||||
return setBool | |||||
} | |||||
if ftype == typeOfNullBool { | |||||
return func(v reflect.Value, x interface{}) error { | |||||
return setNull(v, x, func() interface{} { | |||||
return NullBool{Bool: x.(bool), Valid: true} | |||||
}) | |||||
} | |||||
} | |||||
case TimestampFieldType: | |||||
if ftype == typeOfGoTime { | |||||
return setAny | |||||
} | |||||
if ftype == typeOfNullTimestamp { | |||||
return func(v reflect.Value, x interface{}) error { | |||||
return setNull(v, x, func() interface{} { | |||||
return NullTimestamp{Timestamp: x.(time.Time), Valid: true} | |||||
}) | |||||
} | |||||
} | |||||
case DateFieldType: | |||||
if ftype == typeOfDate { | |||||
return setAny | |||||
} | |||||
if ftype == typeOfNullDate { | |||||
return func(v reflect.Value, x interface{}) error { | |||||
return setNull(v, x, func() interface{} { | |||||
return NullDate{Date: x.(civil.Date), Valid: true} | |||||
}) | |||||
} | |||||
} | |||||
case TimeFieldType: | |||||
if ftype == typeOfTime { | |||||
return setAny | |||||
} | |||||
if ftype == typeOfNullTime { | |||||
return func(v reflect.Value, x interface{}) error { | |||||
return setNull(v, x, func() interface{} { | |||||
return NullTime{Time: x.(civil.Time), Valid: true} | |||||
}) | |||||
} | |||||
} | |||||
case DateTimeFieldType: | |||||
if ftype == typeOfDateTime { | |||||
return setAny | |||||
} | |||||
if ftype == typeOfNullDateTime { | |||||
return func(v reflect.Value, x interface{}) error { | |||||
return setNull(v, x, func() interface{} { | |||||
return NullDateTime{DateTime: x.(civil.DateTime), Valid: true} | |||||
}) | |||||
} | |||||
} | |||||
case NumericFieldType: | |||||
if ftype == typeOfRat { | |||||
return func(v reflect.Value, x interface{}) error { | |||||
return setNull(v, x, func() interface{} { return x.(*big.Rat) }) | |||||
} | |||||
} | |||||
} | |||||
return nil | |||||
} | |||||
func (sl *structLoader) Load(values []Value, _ Schema) error { | |||||
if sl.err != nil { | |||||
return sl.err | |||||
} | |||||
return runOps(sl.ops, sl.vstructp.Elem(), values) | |||||
} | |||||
// runOps executes a sequence of ops, setting the fields of vstruct to the | |||||
// supplied values. | |||||
func runOps(ops []structLoaderOp, vstruct reflect.Value, values []Value) error { | |||||
for _, op := range ops { | |||||
field := vstruct.FieldByIndex(op.fieldIndex) | |||||
var err error | |||||
if op.repeated { | |||||
err = setRepeated(field, values[op.valueIndex].([]Value), op.setFunc) | |||||
} else { | |||||
err = op.setFunc(field, values[op.valueIndex]) | |||||
} | |||||
if err != nil { | |||||
return err | |||||
} | |||||
} | |||||
return nil | |||||
} | |||||
func setNested(ops []structLoaderOp, v reflect.Value, val interface{}) error { | |||||
// v is either a struct or a pointer to a struct. | |||||
if v.Kind() == reflect.Ptr { | |||||
// If the value is nil, set the pointer to nil. | |||||
if val == nil { | |||||
v.Set(reflect.Zero(v.Type())) | |||||
return nil | |||||
} | |||||
// If the pointer is nil, set it to a zero struct value. | |||||
if v.IsNil() { | |||||
v.Set(reflect.New(v.Type().Elem())) | |||||
} | |||||
v = v.Elem() | |||||
} | |||||
return runOps(ops, v, val.([]Value)) | |||||
} | |||||
func setRepeated(field reflect.Value, vslice []Value, setElem setFunc) error { | |||||
vlen := len(vslice) | |||||
var flen int | |||||
switch field.Type().Kind() { | |||||
case reflect.Slice: | |||||
// Make a slice of the right size, avoiding allocation if possible. | |||||
switch { | |||||
case field.Len() < vlen: | |||||
field.Set(reflect.MakeSlice(field.Type(), vlen, vlen)) | |||||
case field.Len() > vlen: | |||||
field.SetLen(vlen) | |||||
} | |||||
flen = vlen | |||||
case reflect.Array: | |||||
flen = field.Len() | |||||
if flen > vlen { | |||||
// Set extra elements to their zero value. | |||||
z := reflect.Zero(field.Type().Elem()) | |||||
for i := vlen; i < flen; i++ { | |||||
field.Index(i).Set(z) | |||||
} | |||||
} | |||||
default: | |||||
return fmt.Errorf("bigquery: impossible field type %s", field.Type()) | |||||
} | |||||
for i, val := range vslice { | |||||
if i < flen { // avoid writing past the end of a short array | |||||
if err := setElem(field.Index(i), val); err != nil { | |||||
return err | |||||
} | |||||
} | |||||
} | |||||
return nil | |||||
} | |||||
// A ValueSaver returns a row of data to be inserted into a table. | |||||
type ValueSaver interface { | |||||
// Save returns a row to be inserted into a BigQuery table, represented | |||||
// as a map from field name to Value. | |||||
// If insertID is non-empty, BigQuery will use it to de-duplicate | |||||
// insertions of this row on a best-effort basis. | |||||
Save() (row map[string]Value, insertID string, err error) | |||||
} | |||||
// ValuesSaver implements ValueSaver for a slice of Values. | |||||
type ValuesSaver struct { | |||||
Schema Schema | |||||
// If non-empty, BigQuery will use InsertID to de-duplicate insertions | |||||
// of this row on a best-effort basis. | |||||
InsertID string | |||||
Row []Value | |||||
} | |||||
// Save implements ValueSaver. | |||||
func (vls *ValuesSaver) Save() (map[string]Value, string, error) { | |||||
m, err := valuesToMap(vls.Row, vls.Schema) | |||||
return m, vls.InsertID, err | |||||
} | |||||
func valuesToMap(vs []Value, schema Schema) (map[string]Value, error) { | |||||
if len(vs) != len(schema) { | |||||
return nil, errors.New("Schema does not match length of row to be inserted") | |||||
} | |||||
m := make(map[string]Value) | |||||
for i, fieldSchema := range schema { | |||||
if vs[i] == nil { | |||||
m[fieldSchema.Name] = nil | |||||
continue | |||||
} | |||||
if fieldSchema.Type != RecordFieldType { | |||||
m[fieldSchema.Name] = toUploadValue(vs[i], fieldSchema) | |||||
continue | |||||
} | |||||
// Nested record, possibly repeated. | |||||
vals, ok := vs[i].([]Value) | |||||
if !ok { | |||||
return nil, errors.New("nested record is not a []Value") | |||||
} | |||||
if !fieldSchema.Repeated { | |||||
value, err := valuesToMap(vals, fieldSchema.Schema) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
m[fieldSchema.Name] = value | |||||
continue | |||||
} | |||||
// A repeated nested field is converted into a slice of maps. | |||||
var maps []Value | |||||
for _, v := range vals { | |||||
sv, ok := v.([]Value) | |||||
if !ok { | |||||
return nil, errors.New("nested record in slice is not a []Value") | |||||
} | |||||
value, err := valuesToMap(sv, fieldSchema.Schema) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
maps = append(maps, value) | |||||
} | |||||
m[fieldSchema.Name] = maps | |||||
} | |||||
return m, nil | |||||
} | |||||
// StructSaver implements ValueSaver for a struct. | |||||
// The struct is converted to a map of values by using the values of struct | |||||
// fields corresponding to schema fields. Additional and missing | |||||
// fields are ignored, as are nested struct pointers that are nil. | |||||
type StructSaver struct { | |||||
// Schema determines what fields of the struct are uploaded. It should | |||||
// match the table's schema. | |||||
// Schema is optional for StructSavers that are passed to Uploader.Put. | |||||
Schema Schema | |||||
// If non-empty, BigQuery will use InsertID to de-duplicate insertions | |||||
// of this row on a best-effort basis. | |||||
InsertID string | |||||
// Struct should be a struct or a pointer to a struct. | |||||
Struct interface{} | |||||
} | |||||
// Save implements ValueSaver. | |||||
func (ss *StructSaver) Save() (row map[string]Value, insertID string, err error) { | |||||
vstruct := reflect.ValueOf(ss.Struct) | |||||
row, err = structToMap(vstruct, ss.Schema) | |||||
if err != nil { | |||||
return nil, "", err | |||||
} | |||||
return row, ss.InsertID, nil | |||||
} | |||||
func structToMap(vstruct reflect.Value, schema Schema) (map[string]Value, error) { | |||||
if vstruct.Kind() == reflect.Ptr { | |||||
vstruct = vstruct.Elem() | |||||
} | |||||
if !vstruct.IsValid() { | |||||
return nil, nil | |||||
} | |||||
m := map[string]Value{} | |||||
if vstruct.Kind() != reflect.Struct { | |||||
return nil, fmt.Errorf("bigquery: type is %s, need struct or struct pointer", vstruct.Type()) | |||||
} | |||||
fields, err := fieldCache.Fields(vstruct.Type()) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
for _, schemaField := range schema { | |||||
// Look for an exported struct field with the same name as the schema | |||||
// field, ignoring case. | |||||
structField := fields.Match(schemaField.Name) | |||||
if structField == nil { | |||||
continue | |||||
} | |||||
val, err := structFieldToUploadValue(vstruct.FieldByIndex(structField.Index), schemaField) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
// Add the value to the map, unless it is nil. | |||||
if val != nil { | |||||
m[schemaField.Name] = val | |||||
} | |||||
} | |||||
return m, nil | |||||
} | |||||
// structFieldToUploadValue converts a struct field to a value suitable for ValueSaver.Save, using | |||||
// the schemaField as a guide. | |||||
// structFieldToUploadValue is careful to return a true nil interface{} when needed, so its | |||||
// caller can easily identify a nil value. | |||||
func structFieldToUploadValue(vfield reflect.Value, schemaField *FieldSchema) (interface{}, error) { | |||||
if schemaField.Repeated && (vfield.Kind() != reflect.Slice && vfield.Kind() != reflect.Array) { | |||||
return nil, fmt.Errorf("bigquery: repeated schema field %s requires slice or array, but value has type %s", | |||||
schemaField.Name, vfield.Type()) | |||||
} | |||||
// A non-nested field can be represented by its Go value, except for some types. | |||||
if schemaField.Type != RecordFieldType { | |||||
return toUploadValueReflect(vfield, schemaField), nil | |||||
} | |||||
// A non-repeated nested field is converted into a map[string]Value. | |||||
if !schemaField.Repeated { | |||||
m, err := structToMap(vfield, schemaField.Schema) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
if m == nil { | |||||
return nil, nil | |||||
} | |||||
return m, nil | |||||
} | |||||
// A repeated nested field is converted into a slice of maps. | |||||
if vfield.Len() == 0 { | |||||
return nil, nil | |||||
} | |||||
var vals []Value | |||||
for i := 0; i < vfield.Len(); i++ { | |||||
m, err := structToMap(vfield.Index(i), schemaField.Schema) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
vals = append(vals, m) | |||||
} | |||||
return vals, nil | |||||
} | |||||
func toUploadValue(val interface{}, fs *FieldSchema) interface{} { | |||||
if fs.Type == TimeFieldType || fs.Type == DateTimeFieldType || fs.Type == NumericFieldType { | |||||
return toUploadValueReflect(reflect.ValueOf(val), fs) | |||||
} | |||||
return val | |||||
} | |||||
func toUploadValueReflect(v reflect.Value, fs *FieldSchema) interface{} { | |||||
switch fs.Type { | |||||
case TimeFieldType: | |||||
if v.Type() == typeOfNullTime { | |||||
return v.Interface() | |||||
} | |||||
return formatUploadValue(v, fs, func(v reflect.Value) string { | |||||
return CivilTimeString(v.Interface().(civil.Time)) | |||||
}) | |||||
case DateTimeFieldType: | |||||
if v.Type() == typeOfNullDateTime { | |||||
return v.Interface() | |||||
} | |||||
return formatUploadValue(v, fs, func(v reflect.Value) string { | |||||
return CivilDateTimeString(v.Interface().(civil.DateTime)) | |||||
}) | |||||
case NumericFieldType: | |||||
if r, ok := v.Interface().(*big.Rat); ok && r == nil { | |||||
return nil | |||||
} | |||||
return formatUploadValue(v, fs, func(v reflect.Value) string { | |||||
return NumericString(v.Interface().(*big.Rat)) | |||||
}) | |||||
default: | |||||
if !fs.Repeated || v.Len() > 0 { | |||||
return v.Interface() | |||||
} | |||||
// The service treats a null repeated field as an error. Return | |||||
// nil to omit the field entirely. | |||||
return nil | |||||
} | |||||
} | |||||
func formatUploadValue(v reflect.Value, fs *FieldSchema, cvt func(reflect.Value) string) interface{} { | |||||
if !fs.Repeated { | |||||
return cvt(v) | |||||
} | |||||
if v.Len() == 0 { | |||||
return nil | |||||
} | |||||
s := make([]string, v.Len()) | |||||
for i := 0; i < v.Len(); i++ { | |||||
s[i] = cvt(v.Index(i)) | |||||
} | |||||
return s | |||||
} | |||||
// CivilTimeString returns a string representing a civil.Time in a format compatible | |||||
// with BigQuery SQL. It rounds the time to the nearest microsecond and returns a | |||||
// string with six digits of sub-second precision. | |||||
// | |||||
// Use CivilTimeString when using civil.Time in DML, for example in INSERT | |||||
// statements. | |||||
func CivilTimeString(t civil.Time) string { | |||||
if t.Nanosecond == 0 { | |||||
return t.String() | |||||
} else { | |||||
micro := (t.Nanosecond + 500) / 1000 // round to nearest microsecond | |||||
t.Nanosecond = 0 | |||||
return t.String() + fmt.Sprintf(".%06d", micro) | |||||
} | |||||
} | |||||
// CivilDateTimeString returns a string representing a civil.DateTime in a format compatible | |||||
// with BigQuery SQL. It separate the date and time with a space, and formats the time | |||||
// with CivilTimeString. | |||||
// | |||||
// Use CivilDateTimeString when using civil.DateTime in DML, for example in INSERT | |||||
// statements. | |||||
func CivilDateTimeString(dt civil.DateTime) string { | |||||
return dt.Date.String() + " " + CivilTimeString(dt.Time) | |||||
} | |||||
// parseCivilDateTime parses a date-time represented in a BigQuery SQL | |||||
// compatible format and returns a civil.DateTime. | |||||
func parseCivilDateTime(s string) (civil.DateTime, error) { | |||||
parts := strings.Fields(s) | |||||
if len(parts) != 2 { | |||||
return civil.DateTime{}, fmt.Errorf("bigquery: bad DATETIME value %q", s) | |||||
} | |||||
return civil.ParseDateTime(parts[0] + "T" + parts[1]) | |||||
} | |||||
const ( | |||||
// The maximum number of digits in a NUMERIC value. | |||||
NumericPrecisionDigits = 38 | |||||
// The maximum number of digits after the decimal point in a NUMERIC value. | |||||
NumericScaleDigits = 9 | |||||
) | |||||
// NumericString returns a string representing a *big.Rat in a format compatible | |||||
// with BigQuery SQL. It returns a floating-point literal with 9 digits | |||||
// after the decimal point. | |||||
func NumericString(r *big.Rat) string { | |||||
return r.FloatString(NumericScaleDigits) | |||||
} | |||||
// convertRows converts a series of TableRows into a series of Value slices. | |||||
// schema is used to interpret the data from rows; its length must match the | |||||
// length of each row. | |||||
func convertRows(rows []*bq.TableRow, schema Schema) ([][]Value, error) { | |||||
var rs [][]Value | |||||
for _, r := range rows { | |||||
row, err := convertRow(r, schema) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
rs = append(rs, row) | |||||
} | |||||
return rs, nil | |||||
} | |||||
func convertRow(r *bq.TableRow, schema Schema) ([]Value, error) { | |||||
if len(schema) != len(r.F) { | |||||
return nil, errors.New("schema length does not match row length") | |||||
} | |||||
var values []Value | |||||
for i, cell := range r.F { | |||||
fs := schema[i] | |||||
v, err := convertValue(cell.V, fs.Type, fs.Schema) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
values = append(values, v) | |||||
} | |||||
return values, nil | |||||
} | |||||
func convertValue(val interface{}, typ FieldType, schema Schema) (Value, error) { | |||||
switch val := val.(type) { | |||||
case nil: | |||||
return nil, nil | |||||
case []interface{}: | |||||
return convertRepeatedRecord(val, typ, schema) | |||||
case map[string]interface{}: | |||||
return convertNestedRecord(val, schema) | |||||
case string: | |||||
return convertBasicType(val, typ) | |||||
default: | |||||
return nil, fmt.Errorf("got value %v; expected a value of type %s", val, typ) | |||||
} | |||||
} | |||||
func convertRepeatedRecord(vals []interface{}, typ FieldType, schema Schema) (Value, error) { | |||||
var values []Value | |||||
for _, cell := range vals { | |||||
// each cell contains a single entry, keyed by "v" | |||||
val := cell.(map[string]interface{})["v"] | |||||
v, err := convertValue(val, typ, schema) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
values = append(values, v) | |||||
} | |||||
return values, nil | |||||
} | |||||
func convertNestedRecord(val map[string]interface{}, schema Schema) (Value, error) { | |||||
// convertNestedRecord is similar to convertRow, as a record has the same structure as a row. | |||||
// Nested records are wrapped in a map with a single key, "f". | |||||
record := val["f"].([]interface{}) | |||||
if len(record) != len(schema) { | |||||
return nil, errors.New("schema length does not match record length") | |||||
} | |||||
var values []Value | |||||
for i, cell := range record { | |||||
// each cell contains a single entry, keyed by "v" | |||||
val := cell.(map[string]interface{})["v"] | |||||
fs := schema[i] | |||||
v, err := convertValue(val, fs.Type, fs.Schema) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
values = append(values, v) | |||||
} | |||||
return values, nil | |||||
} | |||||
// convertBasicType returns val as an interface with a concrete type specified by typ. | |||||
func convertBasicType(val string, typ FieldType) (Value, error) { | |||||
switch typ { | |||||
case StringFieldType: | |||||
return val, nil | |||||
case BytesFieldType: | |||||
return base64.StdEncoding.DecodeString(val) | |||||
case IntegerFieldType: | |||||
return strconv.ParseInt(val, 10, 64) | |||||
case FloatFieldType: | |||||
return strconv.ParseFloat(val, 64) | |||||
case BooleanFieldType: | |||||
return strconv.ParseBool(val) | |||||
case TimestampFieldType: | |||||
f, err := strconv.ParseFloat(val, 64) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
secs := math.Trunc(f) | |||||
nanos := (f - secs) * 1e9 | |||||
return Value(time.Unix(int64(secs), int64(nanos)).UTC()), nil | |||||
case DateFieldType: | |||||
return civil.ParseDate(val) | |||||
case TimeFieldType: | |||||
return civil.ParseTime(val) | |||||
case DateTimeFieldType: | |||||
return civil.ParseDateTime(val) | |||||
case NumericFieldType: | |||||
r, ok := (&big.Rat{}).SetString(val) | |||||
if !ok { | |||||
return nil, fmt.Errorf("bigquery: invalid NUMERIC value %q", val) | |||||
} | |||||
return Value(r), nil | |||||
default: | |||||
return nil, fmt.Errorf("unrecognized type: %s", typ) | |||||
} | |||||
} |
@@ -0,0 +1,578 @@ | |||||
// Copyright 2015 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bigtable | |||||
import ( | |||||
"fmt" | |||||
"math" | |||||
"sort" | |||||
"strings" | |||||
"testing" | |||||
"time" | |||||
"cloud.google.com/go/internal/testutil" | |||||
"github.com/golang/protobuf/proto" | |||||
"golang.org/x/net/context" | |||||
"google.golang.org/api/iterator" | |||||
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2" | |||||
) | |||||
func TestAdminIntegration(t *testing.T) { | |||||
testEnv, err := NewIntegrationEnv() | |||||
if err != nil { | |||||
t.Fatalf("IntegrationEnv: %v", err) | |||||
} | |||||
defer testEnv.Close() | |||||
timeout := 2 * time.Second | |||||
if testEnv.Config().UseProd { | |||||
timeout = 5 * time.Minute | |||||
} | |||||
ctx, _ := context.WithTimeout(context.Background(), timeout) | |||||
adminClient, err := testEnv.NewAdminClient() | |||||
if err != nil { | |||||
t.Fatalf("NewAdminClient: %v", err) | |||||
} | |||||
defer adminClient.Close() | |||||
iAdminClient, err := testEnv.NewInstanceAdminClient() | |||||
if err != nil { | |||||
t.Fatalf("NewInstanceAdminClient: %v", err) | |||||
} | |||||
if iAdminClient != nil { | |||||
defer iAdminClient.Close() | |||||
iInfo, err := iAdminClient.InstanceInfo(ctx, adminClient.instance) | |||||
if err != nil { | |||||
t.Errorf("InstanceInfo: %v", err) | |||||
} | |||||
if iInfo.Name != adminClient.instance { | |||||
t.Errorf("InstanceInfo returned name %#v, want %#v", iInfo.Name, adminClient.instance) | |||||
} | |||||
} | |||||
list := func() []string { | |||||
tbls, err := adminClient.Tables(ctx) | |||||
if err != nil { | |||||
t.Fatalf("Fetching list of tables: %v", err) | |||||
} | |||||
sort.Strings(tbls) | |||||
return tbls | |||||
} | |||||
containsAll := func(got, want []string) bool { | |||||
gotSet := make(map[string]bool) | |||||
for _, s := range got { | |||||
gotSet[s] = true | |||||
} | |||||
for _, s := range want { | |||||
if !gotSet[s] { | |||||
return false | |||||
} | |||||
} | |||||
return true | |||||
} | |||||
defer adminClient.DeleteTable(ctx, "mytable") | |||||
if err := adminClient.CreateTable(ctx, "mytable"); err != nil { | |||||
t.Fatalf("Creating table: %v", err) | |||||
} | |||||
defer adminClient.DeleteTable(ctx, "myothertable") | |||||
if err := adminClient.CreateTable(ctx, "myothertable"); err != nil { | |||||
t.Fatalf("Creating table: %v", err) | |||||
} | |||||
if got, want := list(), []string{"myothertable", "mytable"}; !containsAll(got, want) { | |||||
t.Errorf("adminClient.Tables returned %#v, want %#v", got, want) | |||||
} | |||||
must(adminClient.WaitForReplication(ctx, "mytable")) | |||||
if err := adminClient.DeleteTable(ctx, "myothertable"); err != nil { | |||||
t.Fatalf("Deleting table: %v", err) | |||||
} | |||||
tables := list() | |||||
if got, want := tables, []string{"mytable"}; !containsAll(got, want) { | |||||
t.Errorf("adminClient.Tables returned %#v, want %#v", got, want) | |||||
} | |||||
if got, unwanted := tables, []string{"myothertable"}; containsAll(got, unwanted) { | |||||
t.Errorf("adminClient.Tables return %#v. unwanted %#v", got, unwanted) | |||||
} | |||||
tblConf := TableConf{ | |||||
TableID: "conftable", | |||||
Families: map[string]GCPolicy{ | |||||
"fam1": MaxVersionsPolicy(1), | |||||
"fam2": MaxVersionsPolicy(2), | |||||
}, | |||||
} | |||||
if err := adminClient.CreateTableFromConf(ctx, &tblConf); err != nil { | |||||
t.Fatalf("Creating table from TableConf: %v", err) | |||||
} | |||||
defer adminClient.DeleteTable(ctx, tblConf.TableID) | |||||
tblInfo, err := adminClient.TableInfo(ctx, tblConf.TableID) | |||||
if err != nil { | |||||
t.Fatalf("Getting table info: %v", err) | |||||
} | |||||
sort.Strings(tblInfo.Families) | |||||
wantFams := []string{"fam1", "fam2"} | |||||
if !testutil.Equal(tblInfo.Families, wantFams) { | |||||
t.Errorf("Column family mismatch, got %v, want %v", tblInfo.Families, wantFams) | |||||
} | |||||
// Populate mytable and drop row ranges | |||||
if err = adminClient.CreateColumnFamily(ctx, "mytable", "cf"); err != nil { | |||||
t.Fatalf("Creating column family: %v", err) | |||||
} | |||||
client, err := testEnv.NewClient() | |||||
if err != nil { | |||||
t.Fatalf("NewClient: %v", err) | |||||
} | |||||
defer client.Close() | |||||
tbl := client.Open("mytable") | |||||
prefixes := []string{"a", "b", "c"} | |||||
for _, prefix := range prefixes { | |||||
for i := 0; i < 5; i++ { | |||||
mut := NewMutation() | |||||
mut.Set("cf", "col", 1000, []byte("1")) | |||||
if err := tbl.Apply(ctx, fmt.Sprintf("%v-%v", prefix, i), mut); err != nil { | |||||
t.Fatalf("Mutating row: %v", err) | |||||
} | |||||
} | |||||
} | |||||
if err = adminClient.DropRowRange(ctx, "mytable", "a"); err != nil { | |||||
t.Errorf("DropRowRange a: %v", err) | |||||
} | |||||
if err = adminClient.DropRowRange(ctx, "mytable", "c"); err != nil { | |||||
t.Errorf("DropRowRange c: %v", err) | |||||
} | |||||
if err = adminClient.DropRowRange(ctx, "mytable", "x"); err != nil { | |||||
t.Errorf("DropRowRange x: %v", err) | |||||
} | |||||
var gotRowCount int | |||||
must(tbl.ReadRows(ctx, RowRange{}, func(row Row) bool { | |||||
gotRowCount += 1 | |||||
if !strings.HasPrefix(row.Key(), "b") { | |||||
t.Errorf("Invalid row after dropping range: %v", row) | |||||
} | |||||
return true | |||||
})) | |||||
if gotRowCount != 5 { | |||||
t.Errorf("Invalid row count after dropping range: got %v, want %v", gotRowCount, 5) | |||||
} | |||||
} | |||||
func TestInstanceUpdate(t *testing.T) { | |||||
testEnv, err := NewIntegrationEnv() | |||||
if err != nil { | |||||
t.Fatalf("IntegrationEnv: %v", err) | |||||
} | |||||
defer testEnv.Close() | |||||
timeout := 2 * time.Second | |||||
if testEnv.Config().UseProd { | |||||
timeout = 5 * time.Minute | |||||
} | |||||
ctx, cancel := context.WithTimeout(context.Background(), timeout) | |||||
defer cancel() | |||||
adminClient, err := testEnv.NewAdminClient() | |||||
if err != nil { | |||||
t.Fatalf("NewAdminClient: %v", err) | |||||
} | |||||
defer adminClient.Close() | |||||
iAdminClient, err := testEnv.NewInstanceAdminClient() | |||||
if err != nil { | |||||
t.Fatalf("NewInstanceAdminClient: %v", err) | |||||
} | |||||
if iAdminClient == nil { | |||||
return | |||||
} | |||||
defer iAdminClient.Close() | |||||
iInfo, err := iAdminClient.InstanceInfo(ctx, adminClient.instance) | |||||
if err != nil { | |||||
t.Errorf("InstanceInfo: %v", err) | |||||
} | |||||
if iInfo.Name != adminClient.instance { | |||||
t.Errorf("InstanceInfo returned name %#v, want %#v", iInfo.Name, adminClient.instance) | |||||
} | |||||
if iInfo.DisplayName != adminClient.instance { | |||||
t.Errorf("InstanceInfo returned name %#v, want %#v", iInfo.Name, adminClient.instance) | |||||
} | |||||
const numNodes = 4 | |||||
// update cluster nodes | |||||
if err := iAdminClient.UpdateCluster(ctx, adminClient.instance, testEnv.Config().Cluster, int32(numNodes)); err != nil { | |||||
t.Errorf("UpdateCluster: %v", err) | |||||
} | |||||
// get cluster after updating | |||||
cis, err := iAdminClient.GetCluster(ctx, adminClient.instance, testEnv.Config().Cluster) | |||||
if err != nil { | |||||
t.Errorf("GetCluster %v", err) | |||||
} | |||||
if cis.ServeNodes != int(numNodes) { | |||||
t.Errorf("ServeNodes returned %d, want %d", cis.ServeNodes, int(numNodes)) | |||||
} | |||||
} | |||||
func TestAdminSnapshotIntegration(t *testing.T) { | |||||
testEnv, err := NewIntegrationEnv() | |||||
if err != nil { | |||||
t.Fatalf("IntegrationEnv: %v", err) | |||||
} | |||||
defer testEnv.Close() | |||||
if !testEnv.Config().UseProd { | |||||
t.Skip("emulator doesn't support snapshots") | |||||
} | |||||
timeout := 2 * time.Second | |||||
if testEnv.Config().UseProd { | |||||
timeout = 5 * time.Minute | |||||
} | |||||
ctx, _ := context.WithTimeout(context.Background(), timeout) | |||||
adminClient, err := testEnv.NewAdminClient() | |||||
if err != nil { | |||||
t.Fatalf("NewAdminClient: %v", err) | |||||
} | |||||
defer adminClient.Close() | |||||
table := testEnv.Config().Table | |||||
cluster := testEnv.Config().Cluster | |||||
list := func(cluster string) ([]*SnapshotInfo, error) { | |||||
infos := []*SnapshotInfo(nil) | |||||
it := adminClient.Snapshots(ctx, cluster) | |||||
for { | |||||
s, err := it.Next() | |||||
if err == iterator.Done { | |||||
break | |||||
} | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
infos = append(infos, s) | |||||
} | |||||
return infos, err | |||||
} | |||||
// Delete the table at the end of the test. Schedule ahead of time | |||||
// in case the client fails | |||||
defer adminClient.DeleteTable(ctx, table) | |||||
if err := adminClient.CreateTable(ctx, table); err != nil { | |||||
t.Fatalf("Creating table: %v", err) | |||||
} | |||||
// Precondition: no snapshots | |||||
snapshots, err := list(cluster) | |||||
if err != nil { | |||||
t.Fatalf("Initial snapshot list: %v", err) | |||||
} | |||||
if got, want := len(snapshots), 0; got != want { | |||||
t.Fatalf("Initial snapshot list len: %d, want: %d", got, want) | |||||
} | |||||
// Create snapshot | |||||
defer adminClient.DeleteSnapshot(ctx, cluster, "mysnapshot") | |||||
if err = adminClient.SnapshotTable(ctx, table, cluster, "mysnapshot", 5*time.Hour); err != nil { | |||||
t.Fatalf("Creating snaphot: %v", err) | |||||
} | |||||
// List snapshot | |||||
snapshots, err = list(cluster) | |||||
if err != nil { | |||||
t.Fatalf("Listing snapshots: %v", err) | |||||
} | |||||
if got, want := len(snapshots), 1; got != want { | |||||
t.Fatalf("Listing snapshot count: %d, want: %d", got, want) | |||||
} | |||||
if got, want := snapshots[0].Name, "mysnapshot"; got != want { | |||||
t.Fatalf("Snapshot name: %s, want: %s", got, want) | |||||
} | |||||
if got, want := snapshots[0].SourceTable, table; got != want { | |||||
t.Fatalf("Snapshot SourceTable: %s, want: %s", got, want) | |||||
} | |||||
if got, want := snapshots[0].DeleteTime, snapshots[0].CreateTime.Add(5*time.Hour); math.Abs(got.Sub(want).Minutes()) > 1 { | |||||
t.Fatalf("Snapshot DeleteTime: %s, want: %s", got, want) | |||||
} | |||||
// Get snapshot | |||||
snapshot, err := adminClient.SnapshotInfo(ctx, cluster, "mysnapshot") | |||||
if err != nil { | |||||
t.Fatalf("SnapshotInfo: %v", snapshot) | |||||
} | |||||
if got, want := *snapshot, *snapshots[0]; got != want { | |||||
t.Fatalf("SnapshotInfo: %v, want: %v", got, want) | |||||
} | |||||
// Restore | |||||
restoredTable := table + "-restored" | |||||
defer adminClient.DeleteTable(ctx, restoredTable) | |||||
if err = adminClient.CreateTableFromSnapshot(ctx, restoredTable, cluster, "mysnapshot"); err != nil { | |||||
t.Fatalf("CreateTableFromSnapshot: %v", err) | |||||
} | |||||
if _, err := adminClient.TableInfo(ctx, restoredTable); err != nil { | |||||
t.Fatalf("Restored TableInfo: %v", err) | |||||
} | |||||
// Delete snapshot | |||||
if err = adminClient.DeleteSnapshot(ctx, cluster, "mysnapshot"); err != nil { | |||||
t.Fatalf("DeleteSnapshot: %v", err) | |||||
} | |||||
snapshots, err = list(cluster) | |||||
if err != nil { | |||||
t.Fatalf("List after Delete: %v", err) | |||||
} | |||||
if got, want := len(snapshots), 0; got != want { | |||||
t.Fatalf("List after delete len: %d, want: %d", got, want) | |||||
} | |||||
} | |||||
func TestGranularity(t *testing.T) { | |||||
testEnv, err := NewIntegrationEnv() | |||||
if err != nil { | |||||
t.Fatalf("IntegrationEnv: %v", err) | |||||
} | |||||
defer testEnv.Close() | |||||
timeout := 2 * time.Second | |||||
if testEnv.Config().UseProd { | |||||
timeout = 5 * time.Minute | |||||
} | |||||
ctx, _ := context.WithTimeout(context.Background(), timeout) | |||||
adminClient, err := testEnv.NewAdminClient() | |||||
if err != nil { | |||||
t.Fatalf("NewAdminClient: %v", err) | |||||
} | |||||
defer adminClient.Close() | |||||
list := func() []string { | |||||
tbls, err := adminClient.Tables(ctx) | |||||
if err != nil { | |||||
t.Fatalf("Fetching list of tables: %v", err) | |||||
} | |||||
sort.Strings(tbls) | |||||
return tbls | |||||
} | |||||
containsAll := func(got, want []string) bool { | |||||
gotSet := make(map[string]bool) | |||||
for _, s := range got { | |||||
gotSet[s] = true | |||||
} | |||||
for _, s := range want { | |||||
if !gotSet[s] { | |||||
return false | |||||
} | |||||
} | |||||
return true | |||||
} | |||||
defer adminClient.DeleteTable(ctx, "mytable") | |||||
if err := adminClient.CreateTable(ctx, "mytable"); err != nil { | |||||
t.Fatalf("Creating table: %v", err) | |||||
} | |||||
tables := list() | |||||
if got, want := tables, []string{"mytable"}; !containsAll(got, want) { | |||||
t.Errorf("adminClient.Tables returned %#v, want %#v", got, want) | |||||
} | |||||
// calling ModifyColumnFamilies to check the granularity of table | |||||
prefix := adminClient.instancePrefix() | |||||
req := &btapb.ModifyColumnFamiliesRequest{ | |||||
Name: prefix + "/tables/" + "mytable", | |||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ | |||||
Id: "cf", | |||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}}, | |||||
}}, | |||||
} | |||||
table, err := adminClient.tClient.ModifyColumnFamilies(ctx, req) | |||||
if err != nil { | |||||
t.Fatalf("Creating column family: %v", err) | |||||
} | |||||
if table.Granularity != btapb.Table_TimestampGranularity(btapb.Table_MILLIS) { | |||||
t.Errorf("ModifyColumnFamilies returned granularity %#v, want %#v", table.Granularity, btapb.Table_TimestampGranularity(btapb.Table_MILLIS)) | |||||
} | |||||
} | |||||
func TestInstanceAdminClient_AppProfile(t *testing.T) { | |||||
testEnv, err := NewIntegrationEnv() | |||||
if err != nil { | |||||
t.Fatalf("IntegrationEnv: %v", err) | |||||
} | |||||
defer testEnv.Close() | |||||
timeout := 2 * time.Second | |||||
if testEnv.Config().UseProd { | |||||
timeout = 5 * time.Minute | |||||
} | |||||
ctx, cancel := context.WithTimeout(context.Background(), timeout) | |||||
defer cancel() | |||||
adminClient, err := testEnv.NewAdminClient() | |||||
if err != nil { | |||||
t.Fatalf("NewAdminClient: %v", err) | |||||
} | |||||
defer adminClient.Close() | |||||
iAdminClient, err := testEnv.NewInstanceAdminClient() | |||||
if err != nil { | |||||
t.Fatalf("NewInstanceAdminClient: %v", err) | |||||
} | |||||
if iAdminClient == nil { | |||||
return | |||||
} | |||||
defer iAdminClient.Close() | |||||
profile := ProfileConf{ | |||||
ProfileID: "app_profile1", | |||||
InstanceID: adminClient.instance, | |||||
ClusterID: testEnv.Config().Cluster, | |||||
Description: "creating new app profile 1", | |||||
RoutingPolicy: SingleClusterRouting, | |||||
} | |||||
createdProfile, err := iAdminClient.CreateAppProfile(ctx, profile) | |||||
if err != nil { | |||||
t.Fatalf("Creating app profile: %v", err) | |||||
} | |||||
gotProfile, err := iAdminClient.GetAppProfile(ctx, adminClient.instance, "app_profile1") | |||||
if err != nil { | |||||
t.Fatalf("Get app profile: %v", err) | |||||
} | |||||
if !proto.Equal(createdProfile, gotProfile) { | |||||
t.Fatalf("created profile: %s, got profile: %s", createdProfile.Name, gotProfile.Name) | |||||
} | |||||
list := func(instanceID string) ([]*btapb.AppProfile, error) { | |||||
profiles := []*btapb.AppProfile(nil) | |||||
it := iAdminClient.ListAppProfiles(ctx, instanceID) | |||||
for { | |||||
s, err := it.Next() | |||||
if err == iterator.Done { | |||||
break | |||||
} | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
profiles = append(profiles, s) | |||||
} | |||||
return profiles, err | |||||
} | |||||
profiles, err := list(adminClient.instance) | |||||
if err != nil { | |||||
t.Fatalf("List app profile: %v", err) | |||||
} | |||||
if got, want := len(profiles), 1; got != want { | |||||
t.Fatalf("Initial app profile list len: %d, want: %d", got, want) | |||||
} | |||||
for _, test := range []struct { | |||||
desc string | |||||
uattrs ProfileAttrsToUpdate | |||||
want *btapb.AppProfile // nil means error | |||||
}{ | |||||
{ | |||||
desc: "empty update", | |||||
uattrs: ProfileAttrsToUpdate{}, | |||||
want: nil, | |||||
}, | |||||
{ | |||||
desc: "empty description update", | |||||
uattrs: ProfileAttrsToUpdate{Description: ""}, | |||||
want: &btapb.AppProfile{ | |||||
Name: gotProfile.Name, | |||||
Description: "", | |||||
RoutingPolicy: gotProfile.RoutingPolicy, | |||||
Etag: gotProfile.Etag}, | |||||
}, | |||||
{ | |||||
desc: "routing update", | |||||
uattrs: ProfileAttrsToUpdate{ | |||||
RoutingPolicy: SingleClusterRouting, | |||||
ClusterID: testEnv.Config().Cluster, | |||||
}, | |||||
want: &btapb.AppProfile{ | |||||
Name: gotProfile.Name, | |||||
Description: "", | |||||
Etag: gotProfile.Etag, | |||||
RoutingPolicy: &btapb.AppProfile_SingleClusterRouting_{ | |||||
SingleClusterRouting: &btapb.AppProfile_SingleClusterRouting{ | |||||
ClusterId: testEnv.Config().Cluster, | |||||
}}, | |||||
}, | |||||
}, | |||||
} { | |||||
err = iAdminClient.UpdateAppProfile(ctx, adminClient.instance, "app_profile1", test.uattrs) | |||||
if err != nil { | |||||
if test.want != nil { | |||||
t.Errorf("%s: %v", test.desc, err) | |||||
} | |||||
continue | |||||
} | |||||
if err == nil && test.want == nil { | |||||
t.Errorf("%s: got nil, want error", test.desc) | |||||
continue | |||||
} | |||||
got, _ := iAdminClient.GetAppProfile(ctx, adminClient.instance, "app_profile1") | |||||
if !proto.Equal(got, test.want) { | |||||
t.Fatalf("%s : got profile : %v, want profile: %v", test.desc, gotProfile, test.want) | |||||
} | |||||
} | |||||
err = iAdminClient.DeleteAppProfile(ctx, adminClient.instance, "app_profile1") | |||||
if err != nil { | |||||
t.Fatalf("Delete app profile: %v", err) | |||||
} | |||||
} |
@@ -0,0 +1,887 @@ | |||||
/* | |||||
Copyright 2015 Google LLC | |||||
Licensed under the Apache License, Version 2.0 (the "License"); | |||||
you may not use this file except in compliance with the License. | |||||
You may obtain a copy of the License at | |||||
http://www.apache.org/licenses/LICENSE-2.0 | |||||
Unless required by applicable law or agreed to in writing, software | |||||
distributed under the License is distributed on an "AS IS" BASIS, | |||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
See the License for the specific language governing permissions and | |||||
limitations under the License. | |||||
*/ | |||||
package bigtable // import "cloud.google.com/go/bigtable" | |||||
import ( | |||||
"errors" | |||||
"fmt" | |||||
"io" | |||||
"strconv" | |||||
"time" | |||||
"cloud.google.com/go/bigtable/internal/gax" | |||||
btopt "cloud.google.com/go/bigtable/internal/option" | |||||
"github.com/golang/protobuf/proto" | |||||
"golang.org/x/net/context" | |||||
"google.golang.org/api/option" | |||||
gtransport "google.golang.org/api/transport/grpc" | |||||
btpb "google.golang.org/genproto/googleapis/bigtable/v2" | |||||
"google.golang.org/grpc" | |||||
"google.golang.org/grpc/codes" | |||||
"google.golang.org/grpc/metadata" | |||||
"google.golang.org/grpc/status" | |||||
) | |||||
const prodAddr = "bigtable.googleapis.com:443" | |||||
// Client is a client for reading and writing data to tables in an instance. | |||||
// | |||||
// A Client is safe to use concurrently, except for its Close method. | |||||
type Client struct { | |||||
conn *grpc.ClientConn | |||||
client btpb.BigtableClient | |||||
project, instance string | |||||
// App Profiles are part of the private alpha release of Cloud Bigtable replication. | |||||
// This feature | |||||
// is not currently available to most Cloud Bigtable customers. This feature | |||||
// might be changed in backward-incompatible ways and is not recommended for | |||||
// production use. It is not subject to any SLA or deprecation policy. | |||||
appProfile string | |||||
} | |||||
// ClientConfig has configurations for the client. | |||||
type ClientConfig struct { | |||||
// The id of the app profile to associate with all data operations sent from this client. | |||||
// If unspecified, the default app profile for the instance will be used. | |||||
AppProfile string | |||||
} | |||||
// NewClient creates a new Client for a given project and instance. | |||||
// The default ClientConfig will be used. | |||||
func NewClient(ctx context.Context, project, instance string, opts ...option.ClientOption) (*Client, error) { | |||||
return NewClientWithConfig(ctx, project, instance, ClientConfig{}, opts...) | |||||
} | |||||
func NewClientWithConfig(ctx context.Context, project, instance string, config ClientConfig, opts ...option.ClientOption) (*Client, error) { | |||||
o, err := btopt.DefaultClientOptions(prodAddr, Scope, clientUserAgent) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
// Default to a small connection pool that can be overridden. | |||||
o = append(o, | |||||
option.WithGRPCConnectionPool(4), | |||||
// Set the max size to correspond to server-side limits. | |||||
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(100<<20), grpc.MaxCallRecvMsgSize(100<<20))), | |||||
// TODO(grpc/grpc-go#1388) using connection pool without WithBlock | |||||
// can cause RPCs to fail randomly. We can delete this after the issue is fixed. | |||||
option.WithGRPCDialOption(grpc.WithBlock())) | |||||
o = append(o, opts...) | |||||
conn, err := gtransport.Dial(ctx, o...) | |||||
if err != nil { | |||||
return nil, fmt.Errorf("dialing: %v", err) | |||||
} | |||||
return &Client{ | |||||
conn: conn, | |||||
client: btpb.NewBigtableClient(conn), | |||||
project: project, | |||||
instance: instance, | |||||
appProfile: config.AppProfile, | |||||
}, nil | |||||
} | |||||
// Close closes the Client. | |||||
func (c *Client) Close() error { | |||||
return c.conn.Close() | |||||
} | |||||
var ( | |||||
idempotentRetryCodes = []codes.Code{codes.DeadlineExceeded, codes.Unavailable, codes.Aborted} | |||||
isIdempotentRetryCode = make(map[codes.Code]bool) | |||||
retryOptions = []gax.CallOption{ | |||||
gax.WithDelayTimeoutSettings(100*time.Millisecond, 2000*time.Millisecond, 1.2), | |||||
gax.WithRetryCodes(idempotentRetryCodes), | |||||
} | |||||
) | |||||
func init() { | |||||
for _, code := range idempotentRetryCodes { | |||||
isIdempotentRetryCode[code] = true | |||||
} | |||||
} | |||||
func (c *Client) fullTableName(table string) string { | |||||
return fmt.Sprintf("projects/%s/instances/%s/tables/%s", c.project, c.instance, table) | |||||
} | |||||
// A Table refers to a table. | |||||
// | |||||
// A Table is safe to use concurrently. | |||||
type Table struct { | |||||
c *Client | |||||
table string | |||||
// Metadata to be sent with each request. | |||||
md metadata.MD | |||||
} | |||||
// Open opens a table. | |||||
func (c *Client) Open(table string) *Table { | |||||
return &Table{ | |||||
c: c, | |||||
table: table, | |||||
md: metadata.Pairs(resourcePrefixHeader, c.fullTableName(table)), | |||||
} | |||||
} | |||||
// TODO(dsymonds): Read method that returns a sequence of ReadItems. | |||||
// ReadRows reads rows from a table. f is called for each row. | |||||
// If f returns false, the stream is shut down and ReadRows returns. | |||||
// f owns its argument, and f is called serially in order by row key. | |||||
// | |||||
// By default, the yielded rows will contain all values in all cells. | |||||
// Use RowFilter to limit the cells returned. | |||||
func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts ...ReadOption) error { | |||||
ctx = mergeOutgoingMetadata(ctx, t.md) | |||||
var prevRowKey string | |||||
var err error | |||||
ctx = traceStartSpan(ctx, "cloud.google.com/go/bigtable.ReadRows") | |||||
defer func() { traceEndSpan(ctx, err) }() | |||||
attrMap := make(map[string]interface{}) | |||||
err = gax.Invoke(ctx, func(ctx context.Context) error { | |||||
if !arg.valid() { | |||||
// Empty row set, no need to make an API call. | |||||
// NOTE: we must return early if arg == RowList{} because reading | |||||
// an empty RowList from bigtable returns all rows from that table. | |||||
return nil | |||||
} | |||||
req := &btpb.ReadRowsRequest{ | |||||
TableName: t.c.fullTableName(t.table), | |||||
AppProfileId: t.c.appProfile, | |||||
Rows: arg.proto(), | |||||
} | |||||
for _, opt := range opts { | |||||
opt.set(req) | |||||
} | |||||
ctx, cancel := context.WithCancel(ctx) // for aborting the stream | |||||
defer cancel() | |||||
startTime := time.Now() | |||||
stream, err := t.c.client.ReadRows(ctx, req) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
cr := newChunkReader() | |||||
for { | |||||
res, err := stream.Recv() | |||||
if err == io.EOF { | |||||
break | |||||
} | |||||
if err != nil { | |||||
// Reset arg for next Invoke call. | |||||
arg = arg.retainRowsAfter(prevRowKey) | |||||
attrMap["rowKey"] = prevRowKey | |||||
attrMap["error"] = err.Error() | |||||
attrMap["time_secs"] = time.Since(startTime).Seconds() | |||||
tracePrintf(ctx, attrMap, "Retry details in ReadRows") | |||||
return err | |||||
} | |||||
attrMap["time_secs"] = time.Since(startTime).Seconds() | |||||
attrMap["rowCount"] = len(res.Chunks) | |||||
tracePrintf(ctx, attrMap, "Details in ReadRows") | |||||
for _, cc := range res.Chunks { | |||||
row, err := cr.Process(cc) | |||||
if err != nil { | |||||
// No need to prepare for a retry, this is an unretryable error. | |||||
return err | |||||
} | |||||
if row == nil { | |||||
continue | |||||
} | |||||
prevRowKey = row.Key() | |||||
if !f(row) { | |||||
// Cancel and drain stream. | |||||
cancel() | |||||
for { | |||||
if _, err := stream.Recv(); err != nil { | |||||
// The stream has ended. We don't return an error | |||||
// because the caller has intentionally interrupted the scan. | |||||
return nil | |||||
} | |||||
} | |||||
} | |||||
} | |||||
if err := cr.Close(); err != nil { | |||||
// No need to prepare for a retry, this is an unretryable error. | |||||
return err | |||||
} | |||||
} | |||||
return err | |||||
}, retryOptions...) | |||||
return err | |||||
} | |||||
// ReadRow is a convenience implementation of a single-row reader. | |||||
// A missing row will return a zero-length map and a nil error. | |||||
func (t *Table) ReadRow(ctx context.Context, row string, opts ...ReadOption) (Row, error) { | |||||
var r Row | |||||
err := t.ReadRows(ctx, SingleRow(row), func(rr Row) bool { | |||||
r = rr | |||||
return true | |||||
}, opts...) | |||||
return r, err | |||||
} | |||||
// decodeFamilyProto adds the cell data from f to the given row. | |||||
func decodeFamilyProto(r Row, row string, f *btpb.Family) { | |||||
fam := f.Name // does not have colon | |||||
for _, col := range f.Columns { | |||||
for _, cell := range col.Cells { | |||||
ri := ReadItem{ | |||||
Row: row, | |||||
Column: fam + ":" + string(col.Qualifier), | |||||
Timestamp: Timestamp(cell.TimestampMicros), | |||||
Value: cell.Value, | |||||
} | |||||
r[fam] = append(r[fam], ri) | |||||
} | |||||
} | |||||
} | |||||
// RowSet is a set of rows to be read. It is satisfied by RowList, RowRange and RowRangeList. | |||||
// The serialized size of the RowSet must be no larger than 1MiB. | |||||
type RowSet interface { | |||||
proto() *btpb.RowSet | |||||
// retainRowsAfter returns a new RowSet that does not include the | |||||
// given row key or any row key lexicographically less than it. | |||||
retainRowsAfter(lastRowKey string) RowSet | |||||
// Valid reports whether this set can cover at least one row. | |||||
valid() bool | |||||
} | |||||
// RowList is a sequence of row keys. | |||||
type RowList []string | |||||
func (r RowList) proto() *btpb.RowSet { | |||||
keys := make([][]byte, len(r)) | |||||
for i, row := range r { | |||||
keys[i] = []byte(row) | |||||
} | |||||
return &btpb.RowSet{RowKeys: keys} | |||||
} | |||||
func (r RowList) retainRowsAfter(lastRowKey string) RowSet { | |||||
var retryKeys RowList | |||||
for _, key := range r { | |||||
if key > lastRowKey { | |||||
retryKeys = append(retryKeys, key) | |||||
} | |||||
} | |||||
return retryKeys | |||||
} | |||||
func (r RowList) valid() bool { | |||||
return len(r) > 0 | |||||
} | |||||
// A RowRange is a half-open interval [Start, Limit) encompassing | |||||
// all the rows with keys at least as large as Start, and less than Limit. | |||||
// (Bigtable string comparison is the same as Go's.) | |||||
// A RowRange can be unbounded, encompassing all keys at least as large as Start. | |||||
type RowRange struct { | |||||
start string | |||||
limit string | |||||
} | |||||
// NewRange returns the new RowRange [begin, end). | |||||
func NewRange(begin, end string) RowRange { | |||||
return RowRange{ | |||||
start: begin, | |||||
limit: end, | |||||
} | |||||
} | |||||
// Unbounded tests whether a RowRange is unbounded. | |||||
func (r RowRange) Unbounded() bool { | |||||
return r.limit == "" | |||||
} | |||||
// Contains says whether the RowRange contains the key. | |||||
func (r RowRange) Contains(row string) bool { | |||||
return r.start <= row && (r.limit == "" || r.limit > row) | |||||
} | |||||
// String provides a printable description of a RowRange. | |||||
func (r RowRange) String() string { | |||||
a := strconv.Quote(r.start) | |||||
if r.Unbounded() { | |||||
return fmt.Sprintf("[%s,∞)", a) | |||||
} | |||||
return fmt.Sprintf("[%s,%q)", a, r.limit) | |||||
} | |||||
func (r RowRange) proto() *btpb.RowSet { | |||||
rr := &btpb.RowRange{ | |||||
StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte(r.start)}, | |||||
} | |||||
if !r.Unbounded() { | |||||
rr.EndKey = &btpb.RowRange_EndKeyOpen{EndKeyOpen: []byte(r.limit)} | |||||
} | |||||
return &btpb.RowSet{RowRanges: []*btpb.RowRange{rr}} | |||||
} | |||||
func (r RowRange) retainRowsAfter(lastRowKey string) RowSet { | |||||
if lastRowKey == "" || lastRowKey < r.start { | |||||
return r | |||||
} | |||||
// Set the beginning of the range to the row after the last scanned. | |||||
start := lastRowKey + "\x00" | |||||
if r.Unbounded() { | |||||
return InfiniteRange(start) | |||||
} | |||||
return NewRange(start, r.limit) | |||||
} | |||||
func (r RowRange) valid() bool { | |||||
return r.Unbounded() || r.start < r.limit | |||||
} | |||||
// RowRangeList is a sequence of RowRanges representing the union of the ranges. | |||||
type RowRangeList []RowRange | |||||
func (r RowRangeList) proto() *btpb.RowSet { | |||||
ranges := make([]*btpb.RowRange, len(r)) | |||||
for i, rr := range r { | |||||
// RowRange.proto() returns a RowSet with a single element RowRange array | |||||
ranges[i] = rr.proto().RowRanges[0] | |||||
} | |||||
return &btpb.RowSet{RowRanges: ranges} | |||||
} | |||||
func (r RowRangeList) retainRowsAfter(lastRowKey string) RowSet { | |||||
if lastRowKey == "" { | |||||
return r | |||||
} | |||||
// Return a list of any range that has not yet been completely processed | |||||
var ranges RowRangeList | |||||
for _, rr := range r { | |||||
retained := rr.retainRowsAfter(lastRowKey) | |||||
if retained.valid() { | |||||
ranges = append(ranges, retained.(RowRange)) | |||||
} | |||||
} | |||||
return ranges | |||||
} | |||||
func (r RowRangeList) valid() bool { | |||||
for _, rr := range r { | |||||
if rr.valid() { | |||||
return true | |||||
} | |||||
} | |||||
return false | |||||
} | |||||
// SingleRow returns a RowSet for reading a single row. | |||||
func SingleRow(row string) RowSet { | |||||
return RowList{row} | |||||
} | |||||
// PrefixRange returns a RowRange consisting of all keys starting with the prefix. | |||||
func PrefixRange(prefix string) RowRange { | |||||
return RowRange{ | |||||
start: prefix, | |||||
limit: prefixSuccessor(prefix), | |||||
} | |||||
} | |||||
// InfiniteRange returns the RowRange consisting of all keys at least as | |||||
// large as start. | |||||
func InfiniteRange(start string) RowRange { | |||||
return RowRange{ | |||||
start: start, | |||||
limit: "", | |||||
} | |||||
} | |||||
// prefixSuccessor returns the lexically smallest string greater than the | |||||
// prefix, if it exists, or "" otherwise. In either case, it is the string | |||||
// needed for the Limit of a RowRange. | |||||
func prefixSuccessor(prefix string) string { | |||||
if prefix == "" { | |||||
return "" // infinite range | |||||
} | |||||
n := len(prefix) | |||||
for n--; n >= 0 && prefix[n] == '\xff'; n-- { | |||||
} | |||||
if n == -1 { | |||||
return "" | |||||
} | |||||
ans := []byte(prefix[:n]) | |||||
ans = append(ans, prefix[n]+1) | |||||
return string(ans) | |||||
} | |||||
// A ReadOption is an optional argument to ReadRows. | |||||
type ReadOption interface { | |||||
set(req *btpb.ReadRowsRequest) | |||||
} | |||||
// RowFilter returns a ReadOption that applies f to the contents of read rows. | |||||
// | |||||
// If multiple RowFilters are provided, only the last is used. To combine filters, | |||||
// use ChainFilters or InterleaveFilters instead. | |||||
func RowFilter(f Filter) ReadOption { return rowFilter{f} } | |||||
type rowFilter struct{ f Filter } | |||||
func (rf rowFilter) set(req *btpb.ReadRowsRequest) { req.Filter = rf.f.proto() } | |||||
// LimitRows returns a ReadOption that will limit the number of rows to be read. | |||||
func LimitRows(limit int64) ReadOption { return limitRows{limit} } | |||||
type limitRows struct{ limit int64 } | |||||
func (lr limitRows) set(req *btpb.ReadRowsRequest) { req.RowsLimit = lr.limit } | |||||
// mutationsAreRetryable returns true if all mutations are idempotent | |||||
// and therefore retryable. A mutation is idempotent iff all cell timestamps | |||||
// have an explicit timestamp set and do not rely on the timestamp being set on the server. | |||||
func mutationsAreRetryable(muts []*btpb.Mutation) bool { | |||||
serverTime := int64(ServerTime) | |||||
for _, mut := range muts { | |||||
setCell := mut.GetSetCell() | |||||
if setCell != nil && setCell.TimestampMicros == serverTime { | |||||
return false | |||||
} | |||||
} | |||||
return true | |||||
} | |||||
// Apply applies a Mutation to a specific row. | |||||
func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...ApplyOption) error { | |||||
ctx = mergeOutgoingMetadata(ctx, t.md) | |||||
after := func(res proto.Message) { | |||||
for _, o := range opts { | |||||
o.after(res) | |||||
} | |||||
} | |||||
var err error | |||||
ctx = traceStartSpan(ctx, "cloud.google.com/go/bigtable/Apply") | |||||
defer func() { traceEndSpan(ctx, err) }() | |||||
var callOptions []gax.CallOption | |||||
if m.cond == nil { | |||||
req := &btpb.MutateRowRequest{ | |||||
TableName: t.c.fullTableName(t.table), | |||||
AppProfileId: t.c.appProfile, | |||||
RowKey: []byte(row), | |||||
Mutations: m.ops, | |||||
} | |||||
if mutationsAreRetryable(m.ops) { | |||||
callOptions = retryOptions | |||||
} | |||||
var res *btpb.MutateRowResponse | |||||
err := gax.Invoke(ctx, func(ctx context.Context) error { | |||||
var err error | |||||
res, err = t.c.client.MutateRow(ctx, req) | |||||
return err | |||||
}, callOptions...) | |||||
if err == nil { | |||||
after(res) | |||||
} | |||||
return err | |||||
} | |||||
req := &btpb.CheckAndMutateRowRequest{ | |||||
TableName: t.c.fullTableName(t.table), | |||||
AppProfileId: t.c.appProfile, | |||||
RowKey: []byte(row), | |||||
PredicateFilter: m.cond.proto(), | |||||
} | |||||
if m.mtrue != nil { | |||||
if m.mtrue.cond != nil { | |||||
return errors.New("bigtable: conditional mutations cannot be nested") | |||||
} | |||||
req.TrueMutations = m.mtrue.ops | |||||
} | |||||
if m.mfalse != nil { | |||||
if m.mfalse.cond != nil { | |||||
return errors.New("bigtable: conditional mutations cannot be nested") | |||||
} | |||||
req.FalseMutations = m.mfalse.ops | |||||
} | |||||
if mutationsAreRetryable(req.TrueMutations) && mutationsAreRetryable(req.FalseMutations) { | |||||
callOptions = retryOptions | |||||
} | |||||
var cmRes *btpb.CheckAndMutateRowResponse | |||||
err = gax.Invoke(ctx, func(ctx context.Context) error { | |||||
var err error | |||||
cmRes, err = t.c.client.CheckAndMutateRow(ctx, req) | |||||
return err | |||||
}, callOptions...) | |||||
if err == nil { | |||||
after(cmRes) | |||||
} | |||||
return err | |||||
} | |||||
// An ApplyOption is an optional argument to Apply. | |||||
type ApplyOption interface { | |||||
after(res proto.Message) | |||||
} | |||||
type applyAfterFunc func(res proto.Message) | |||||
func (a applyAfterFunc) after(res proto.Message) { a(res) } | |||||
// GetCondMutationResult returns an ApplyOption that reports whether the conditional | |||||
// mutation's condition matched. | |||||
func GetCondMutationResult(matched *bool) ApplyOption { | |||||
return applyAfterFunc(func(res proto.Message) { | |||||
if res, ok := res.(*btpb.CheckAndMutateRowResponse); ok { | |||||
*matched = res.PredicateMatched | |||||
} | |||||
}) | |||||
} | |||||
// Mutation represents a set of changes for a single row of a table. | |||||
type Mutation struct { | |||||
ops []*btpb.Mutation | |||||
// for conditional mutations | |||||
cond Filter | |||||
mtrue, mfalse *Mutation | |||||
} | |||||
// NewMutation returns a new mutation. | |||||
func NewMutation() *Mutation { | |||||
return new(Mutation) | |||||
} | |||||
// NewCondMutation returns a conditional mutation. | |||||
// The given row filter determines which mutation is applied: | |||||
// If the filter matches any cell in the row, mtrue is applied; | |||||
// otherwise, mfalse is applied. | |||||
// Either given mutation may be nil. | |||||
func NewCondMutation(cond Filter, mtrue, mfalse *Mutation) *Mutation { | |||||
return &Mutation{cond: cond, mtrue: mtrue, mfalse: mfalse} | |||||
} | |||||
// Set sets a value in a specified column, with the given timestamp. | |||||
// The timestamp will be truncated to millisecond granularity. | |||||
// A timestamp of ServerTime means to use the server timestamp. | |||||
func (m *Mutation) Set(family, column string, ts Timestamp, value []byte) { | |||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{ | |||||
FamilyName: family, | |||||
ColumnQualifier: []byte(column), | |||||
TimestampMicros: int64(ts.TruncateToMilliseconds()), | |||||
Value: value, | |||||
}}}) | |||||
} | |||||
// DeleteCellsInColumn will delete all the cells whose columns are family:column. | |||||
func (m *Mutation) DeleteCellsInColumn(family, column string) { | |||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{DeleteFromColumn: &btpb.Mutation_DeleteFromColumn{ | |||||
FamilyName: family, | |||||
ColumnQualifier: []byte(column), | |||||
}}}) | |||||
} | |||||
// DeleteTimestampRange deletes all cells whose columns are family:column | |||||
// and whose timestamps are in the half-open interval [start, end). | |||||
// If end is zero, it will be interpreted as infinity. | |||||
// The timestamps will be truncated to millisecond granularity. | |||||
func (m *Mutation) DeleteTimestampRange(family, column string, start, end Timestamp) { | |||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{DeleteFromColumn: &btpb.Mutation_DeleteFromColumn{ | |||||
FamilyName: family, | |||||
ColumnQualifier: []byte(column), | |||||
TimeRange: &btpb.TimestampRange{ | |||||
StartTimestampMicros: int64(start.TruncateToMilliseconds()), | |||||
EndTimestampMicros: int64(end.TruncateToMilliseconds()), | |||||
}, | |||||
}}}) | |||||
} | |||||
// DeleteCellsInFamily will delete all the cells whose columns are family:*. | |||||
func (m *Mutation) DeleteCellsInFamily(family string) { | |||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromFamily_{DeleteFromFamily: &btpb.Mutation_DeleteFromFamily{ | |||||
FamilyName: family, | |||||
}}}) | |||||
} | |||||
// DeleteRow deletes the entire row. | |||||
func (m *Mutation) DeleteRow() { | |||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromRow_{DeleteFromRow: &btpb.Mutation_DeleteFromRow{}}}) | |||||
} | |||||
// entryErr is a container that combines an entry with the error that was returned for it. | |||||
// Err may be nil if no error was returned for the Entry, or if the Entry has not yet been processed. | |||||
type entryErr struct { | |||||
Entry *btpb.MutateRowsRequest_Entry | |||||
Err error | |||||
} | |||||
// ApplyBulk applies multiple Mutations, up to a maximum of 100,000. | |||||
// Each mutation is individually applied atomically, | |||||
// but the set of mutations may be applied in any order. | |||||
// | |||||
// Two types of failures may occur. If the entire process | |||||
// fails, (nil, err) will be returned. If specific mutations | |||||
// fail to apply, ([]err, nil) will be returned, and the errors | |||||
// will correspond to the relevant rowKeys/muts arguments. | |||||
// | |||||
// Conditional mutations cannot be applied in bulk and providing one will result in an error. | |||||
func (t *Table) ApplyBulk(ctx context.Context, rowKeys []string, muts []*Mutation, opts ...ApplyOption) ([]error, error) { | |||||
ctx = mergeOutgoingMetadata(ctx, t.md) | |||||
if len(rowKeys) != len(muts) { | |||||
return nil, fmt.Errorf("mismatched rowKeys and mutation array lengths: %d, %d", len(rowKeys), len(muts)) | |||||
} | |||||
origEntries := make([]*entryErr, len(rowKeys)) | |||||
for i, key := range rowKeys { | |||||
mut := muts[i] | |||||
if mut.cond != nil { | |||||
return nil, errors.New("conditional mutations cannot be applied in bulk") | |||||
} | |||||
origEntries[i] = &entryErr{Entry: &btpb.MutateRowsRequest_Entry{RowKey: []byte(key), Mutations: mut.ops}} | |||||
} | |||||
// entries will be reduced after each invocation to just what needs to be retried. | |||||
entries := make([]*entryErr, len(rowKeys)) | |||||
copy(entries, origEntries) | |||||
var err error | |||||
ctx = traceStartSpan(ctx, "cloud.google.com/go/bigtable/ApplyBulk") | |||||
defer func() { traceEndSpan(ctx, err) }() | |||||
attrMap := make(map[string]interface{}) | |||||
err = gax.Invoke(ctx, func(ctx context.Context) error { | |||||
attrMap["rowCount"] = len(entries) | |||||
tracePrintf(ctx, attrMap, "Row count in ApplyBulk") | |||||
err := t.doApplyBulk(ctx, entries, opts...) | |||||
if err != nil { | |||||
// We want to retry the entire request with the current entries | |||||
return err | |||||
} | |||||
entries = t.getApplyBulkRetries(entries) | |||||
if len(entries) > 0 && len(idempotentRetryCodes) > 0 { | |||||
// We have at least one mutation that needs to be retried. | |||||
// Return an arbitrary error that is retryable according to callOptions. | |||||
return status.Errorf(idempotentRetryCodes[0], "Synthetic error: partial failure of ApplyBulk") | |||||
} | |||||
return nil | |||||
}, retryOptions...) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
// Accumulate all of the errors into an array to return, interspersed with nils for successful | |||||
// entries. The absence of any errors means we should return nil. | |||||
var errs []error | |||||
var foundErr bool | |||||
for _, entry := range origEntries { | |||||
if entry.Err != nil { | |||||
foundErr = true | |||||
} | |||||
errs = append(errs, entry.Err) | |||||
} | |||||
if foundErr { | |||||
return errs, nil | |||||
} | |||||
return nil, nil | |||||
} | |||||
// getApplyBulkRetries returns the entries that need to be retried | |||||
func (t *Table) getApplyBulkRetries(entries []*entryErr) []*entryErr { | |||||
var retryEntries []*entryErr | |||||
for _, entry := range entries { | |||||
err := entry.Err | |||||
if err != nil && isIdempotentRetryCode[grpc.Code(err)] && mutationsAreRetryable(entry.Entry.Mutations) { | |||||
// There was an error and the entry is retryable. | |||||
retryEntries = append(retryEntries, entry) | |||||
} | |||||
} | |||||
return retryEntries | |||||
} | |||||
// doApplyBulk does the work of a single ApplyBulk invocation | |||||
func (t *Table) doApplyBulk(ctx context.Context, entryErrs []*entryErr, opts ...ApplyOption) error { | |||||
after := func(res proto.Message) { | |||||
for _, o := range opts { | |||||
o.after(res) | |||||
} | |||||
} | |||||
entries := make([]*btpb.MutateRowsRequest_Entry, len(entryErrs)) | |||||
for i, entryErr := range entryErrs { | |||||
entries[i] = entryErr.Entry | |||||
} | |||||
req := &btpb.MutateRowsRequest{ | |||||
TableName: t.c.fullTableName(t.table), | |||||
AppProfileId: t.c.appProfile, | |||||
Entries: entries, | |||||
} | |||||
stream, err := t.c.client.MutateRows(ctx, req) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
for { | |||||
res, err := stream.Recv() | |||||
if err == io.EOF { | |||||
break | |||||
} | |||||
if err != nil { | |||||
return err | |||||
} | |||||
for i, entry := range res.Entries { | |||||
s := entry.Status | |||||
if s.Code == int32(codes.OK) { | |||||
entryErrs[i].Err = nil | |||||
} else { | |||||
entryErrs[i].Err = status.Errorf(codes.Code(s.Code), s.Message) | |||||
} | |||||
} | |||||
after(res) | |||||
} | |||||
return nil | |||||
} | |||||
// Timestamp is in units of microseconds since 1 January 1970. | |||||
type Timestamp int64 | |||||
// ServerTime is a specific Timestamp that may be passed to (*Mutation).Set. | |||||
// It indicates that the server's timestamp should be used. | |||||
const ServerTime Timestamp = -1 | |||||
// Time converts a time.Time into a Timestamp. | |||||
func Time(t time.Time) Timestamp { return Timestamp(t.UnixNano() / 1e3) } | |||||
// Now returns the Timestamp representation of the current time on the client. | |||||
func Now() Timestamp { return Time(time.Now()) } | |||||
// Time converts a Timestamp into a time.Time. | |||||
func (ts Timestamp) Time() time.Time { return time.Unix(0, int64(ts)*1e3) } | |||||
// TruncateToMilliseconds truncates a Timestamp to millisecond granularity, | |||||
// which is currently the only granularity supported. | |||||
func (ts Timestamp) TruncateToMilliseconds() Timestamp { | |||||
if ts == ServerTime { | |||||
return ts | |||||
} | |||||
return ts - ts%1000 | |||||
} | |||||
// ApplyReadModifyWrite applies a ReadModifyWrite to a specific row. | |||||
// It returns the newly written cells. | |||||
func (t *Table) ApplyReadModifyWrite(ctx context.Context, row string, m *ReadModifyWrite) (Row, error) { | |||||
ctx = mergeOutgoingMetadata(ctx, t.md) | |||||
req := &btpb.ReadModifyWriteRowRequest{ | |||||
TableName: t.c.fullTableName(t.table), | |||||
AppProfileId: t.c.appProfile, | |||||
RowKey: []byte(row), | |||||
Rules: m.ops, | |||||
} | |||||
res, err := t.c.client.ReadModifyWriteRow(ctx, req) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
if res.Row == nil { | |||||
return nil, errors.New("unable to apply ReadModifyWrite: res.Row=nil") | |||||
} | |||||
r := make(Row) | |||||
for _, fam := range res.Row.Families { // res is *btpb.Row, fam is *btpb.Family | |||||
decodeFamilyProto(r, row, fam) | |||||
} | |||||
return r, nil | |||||
} | |||||
// ReadModifyWrite represents a set of operations on a single row of a table. | |||||
// It is like Mutation but for non-idempotent changes. | |||||
// When applied, these operations operate on the latest values of the row's cells, | |||||
// and result in a new value being written to the relevant cell with a timestamp | |||||
// that is max(existing timestamp, current server time). | |||||
// | |||||
// The application of a ReadModifyWrite is atomic; concurrent ReadModifyWrites will | |||||
// be executed serially by the server. | |||||
type ReadModifyWrite struct { | |||||
ops []*btpb.ReadModifyWriteRule | |||||
} | |||||
// NewReadModifyWrite returns a new ReadModifyWrite. | |||||
func NewReadModifyWrite() *ReadModifyWrite { return new(ReadModifyWrite) } | |||||
// AppendValue appends a value to a specific cell's value. | |||||
// If the cell is unset, it will be treated as an empty value. | |||||
func (m *ReadModifyWrite) AppendValue(family, column string, v []byte) { | |||||
m.ops = append(m.ops, &btpb.ReadModifyWriteRule{ | |||||
FamilyName: family, | |||||
ColumnQualifier: []byte(column), | |||||
Rule: &btpb.ReadModifyWriteRule_AppendValue{AppendValue: v}, | |||||
}) | |||||
} | |||||
// Increment interprets the value in a specific cell as a 64-bit big-endian signed integer, | |||||
// and adds a value to it. If the cell is unset, it will be treated as zero. | |||||
// If the cell is set and is not an 8-byte value, the entire ApplyReadModifyWrite | |||||
// operation will fail. | |||||
func (m *ReadModifyWrite) Increment(family, column string, delta int64) { | |||||
m.ops = append(m.ops, &btpb.ReadModifyWriteRule{ | |||||
FamilyName: family, | |||||
ColumnQualifier: []byte(column), | |||||
Rule: &btpb.ReadModifyWriteRule_IncrementAmount{IncrementAmount: delta}, | |||||
}) | |||||
} | |||||
// mergeOutgoingMetadata returns a context populated by the existing outgoing metadata, | |||||
// if any, joined with internal metadata. | |||||
func mergeOutgoingMetadata(ctx context.Context, md metadata.MD) context.Context { | |||||
mdCopy, _ := metadata.FromOutgoingContext(ctx) | |||||
return metadata.NewOutgoingContext(ctx, metadata.Join(mdCopy, md)) | |||||
} | |||||
func (t *Table) SampleRowKeys(ctx context.Context) ([]string, error) { | |||||
ctx = mergeOutgoingMetadata(ctx, t.md) | |||||
var sampledRowKeys []string | |||||
err := gax.Invoke(ctx, func(ctx context.Context) error { | |||||
sampledRowKeys = nil | |||||
req := &btpb.SampleRowKeysRequest{ | |||||
TableName: t.c.fullTableName(t.table), | |||||
AppProfileId: t.c.appProfile, | |||||
} | |||||
ctx, cancel := context.WithCancel(ctx) // for aborting the stream | |||||
defer cancel() | |||||
stream, err := t.c.client.SampleRowKeys(ctx, req) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
for { | |||||
res, err := stream.Recv() | |||||
if err == io.EOF { | |||||
break | |||||
} | |||||
if err != nil { | |||||
return err | |||||
} | |||||
key := string(res.RowKey) | |||||
if key == "" { | |||||
continue | |||||
} | |||||
sampledRowKeys = append(sampledRowKeys, key) | |||||
} | |||||
return nil | |||||
}, retryOptions...) | |||||
return sampledRowKeys, err | |||||
} |
@@ -0,0 +1,83 @@ | |||||
/* | |||||
Copyright 2016 Google LLC | |||||
Licensed under the Apache License, Version 2.0 (the "License"); | |||||
you may not use this file except in compliance with the License. | |||||
You may obtain a copy of the License at | |||||
http://www.apache.org/licenses/LICENSE-2.0 | |||||
Unless required by applicable law or agreed to in writing, software | |||||
distributed under the License is distributed on an "AS IS" BASIS, | |||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
See the License for the specific language governing permissions and | |||||
limitations under the License. | |||||
*/ | |||||
package bttest_test | |||||
import ( | |||||
"fmt" | |||||
"log" | |||||
"cloud.google.com/go/bigtable" | |||||
"cloud.google.com/go/bigtable/bttest" | |||||
"golang.org/x/net/context" | |||||
"google.golang.org/api/option" | |||||
"google.golang.org/grpc" | |||||
) | |||||
func ExampleNewServer() { | |||||
srv, err := bttest.NewServer("localhost:0") | |||||
if err != nil { | |||||
log.Fatalln(err) | |||||
} | |||||
ctx := context.Background() | |||||
conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) | |||||
if err != nil { | |||||
log.Fatalln(err) | |||||
} | |||||
proj, instance := "proj", "instance" | |||||
adminClient, err := bigtable.NewAdminClient(ctx, proj, instance, option.WithGRPCConn(conn)) | |||||
if err != nil { | |||||
log.Fatalln(err) | |||||
} | |||||
if err = adminClient.CreateTable(ctx, "example"); err != nil { | |||||
log.Fatalln(err) | |||||
} | |||||
if err = adminClient.CreateColumnFamily(ctx, "example", "links"); err != nil { | |||||
log.Fatalln(err) | |||||
} | |||||
client, err := bigtable.NewClient(ctx, proj, instance, option.WithGRPCConn(conn)) | |||||
if err != nil { | |||||
log.Fatalln(err) | |||||
} | |||||
tbl := client.Open("example") | |||||
mut := bigtable.NewMutation() | |||||
mut.Set("links", "golang.org", bigtable.Now(), []byte("Gophers!")) | |||||
if err = tbl.Apply(ctx, "com.google.cloud", mut); err != nil { | |||||
log.Fatalln(err) | |||||
} | |||||
if row, err := tbl.ReadRow(ctx, "com.google.cloud"); err != nil { | |||||
log.Fatalln(err) | |||||
} else { | |||||
for _, column := range row["links"] { | |||||
fmt.Println(column.Column) | |||||
fmt.Println(string(column.Value)) | |||||
} | |||||
} | |||||
// Output: | |||||
// links:golang.org | |||||
// Gophers! | |||||
} |
@@ -0,0 +1,805 @@ | |||||
// Copyright 2016 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package bttest | |||||
import ( | |||||
"fmt" | |||||
"math/rand" | |||||
"strconv" | |||||
"sync" | |||||
"sync/atomic" | |||||
"testing" | |||||
"time" | |||||
"github.com/google/go-cmp/cmp" | |||||
"github.com/google/go-cmp/cmp/cmpopts" | |||||
"golang.org/x/net/context" | |||||
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2" | |||||
btpb "google.golang.org/genproto/googleapis/bigtable/v2" | |||||
"google.golang.org/grpc" | |||||
) | |||||
func TestConcurrentMutationsReadModifyAndGC(t *testing.T) { | |||||
s := &server{ | |||||
tables: make(map[string]*table), | |||||
} | |||||
ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) | |||||
defer cancel() | |||||
if _, err := s.CreateTable( | |||||
ctx, | |||||
&btapb.CreateTableRequest{Parent: "cluster", TableId: "t"}); err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
const name = `cluster/tables/t` | |||||
tbl := s.tables[name] | |||||
req := &btapb.ModifyColumnFamiliesRequest{ | |||||
Name: name, | |||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ | |||||
Id: "cf", | |||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{Create: &btapb.ColumnFamily{}}, | |||||
}}, | |||||
} | |||||
_, err := s.ModifyColumnFamilies(ctx, req) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
req = &btapb.ModifyColumnFamiliesRequest{ | |||||
Name: name, | |||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ | |||||
Id: "cf", | |||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{Update: &btapb.ColumnFamily{ | |||||
GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}, | |||||
}}, | |||||
}}, | |||||
} | |||||
if _, err := s.ModifyColumnFamilies(ctx, req); err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
var wg sync.WaitGroup | |||||
var ts int64 | |||||
ms := func() []*btpb.Mutation { | |||||
return []*btpb.Mutation{{ | |||||
Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{ | |||||
FamilyName: "cf", | |||||
ColumnQualifier: []byte(`col`), | |||||
TimestampMicros: atomic.AddInt64(&ts, 1000), | |||||
}}, | |||||
}} | |||||
} | |||||
rmw := func() *btpb.ReadModifyWriteRowRequest { | |||||
return &btpb.ReadModifyWriteRowRequest{ | |||||
TableName: name, | |||||
RowKey: []byte(fmt.Sprint(rand.Intn(100))), | |||||
Rules: []*btpb.ReadModifyWriteRule{{ | |||||
FamilyName: "cf", | |||||
ColumnQualifier: []byte("col"), | |||||
Rule: &btpb.ReadModifyWriteRule_IncrementAmount{IncrementAmount: 1}, | |||||
}}, | |||||
} | |||||
} | |||||
for i := 0; i < 100; i++ { | |||||
wg.Add(1) | |||||
go func() { | |||||
defer wg.Done() | |||||
for ctx.Err() == nil { | |||||
req := &btpb.MutateRowRequest{ | |||||
TableName: name, | |||||
RowKey: []byte(fmt.Sprint(rand.Intn(100))), | |||||
Mutations: ms(), | |||||
} | |||||
if _, err := s.MutateRow(ctx, req); err != nil { | |||||
panic(err) // can't use t.Fatal in goroutine | |||||
} | |||||
} | |||||
}() | |||||
wg.Add(1) | |||||
go func() { | |||||
defer wg.Done() | |||||
for ctx.Err() == nil { | |||||
_, _ = s.ReadModifyWriteRow(ctx, rmw()) | |||||
} | |||||
}() | |||||
wg.Add(1) | |||||
go func() { | |||||
defer wg.Done() | |||||
tbl.gc() | |||||
}() | |||||
} | |||||
done := make(chan struct{}) | |||||
go func() { | |||||
wg.Wait() | |||||
close(done) | |||||
}() | |||||
select { | |||||
case <-done: | |||||
case <-time.After(1 * time.Second): | |||||
t.Error("Concurrent mutations and GCs haven't completed after 1s") | |||||
} | |||||
} | |||||
func TestCreateTableWithFamily(t *testing.T) { | |||||
// The Go client currently doesn't support creating a table with column families | |||||
// in one operation but it is allowed by the API. This must still be supported by the | |||||
// fake server so this test lives here instead of in the main bigtable | |||||
// integration test. | |||||
s := &server{ | |||||
tables: make(map[string]*table), | |||||
} | |||||
ctx := context.Background() | |||||
newTbl := btapb.Table{ | |||||
ColumnFamilies: map[string]*btapb.ColumnFamily{ | |||||
"cf1": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 123}}}, | |||||
"cf2": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 456}}}, | |||||
}, | |||||
} | |||||
cTbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) | |||||
if err != nil { | |||||
t.Fatalf("Creating table: %v", err) | |||||
} | |||||
tbl, err := s.GetTable(ctx, &btapb.GetTableRequest{Name: cTbl.Name}) | |||||
if err != nil { | |||||
t.Fatalf("Getting table: %v", err) | |||||
} | |||||
cf := tbl.ColumnFamilies["cf1"] | |||||
if cf == nil { | |||||
t.Fatalf("Missing col family cf1") | |||||
} | |||||
if got, want := cf.GcRule.GetMaxNumVersions(), int32(123); got != want { | |||||
t.Errorf("Invalid MaxNumVersions: wanted:%d, got:%d", want, got) | |||||
} | |||||
cf = tbl.ColumnFamilies["cf2"] | |||||
if cf == nil { | |||||
t.Fatalf("Missing col family cf2") | |||||
} | |||||
if got, want := cf.GcRule.GetMaxNumVersions(), int32(456); got != want { | |||||
t.Errorf("Invalid MaxNumVersions: wanted:%d, got:%d", want, got) | |||||
} | |||||
} | |||||
type MockSampleRowKeysServer struct { | |||||
responses []*btpb.SampleRowKeysResponse | |||||
grpc.ServerStream | |||||
} | |||||
func (s *MockSampleRowKeysServer) Send(resp *btpb.SampleRowKeysResponse) error { | |||||
s.responses = append(s.responses, resp) | |||||
return nil | |||||
} | |||||
func TestSampleRowKeys(t *testing.T) { | |||||
s := &server{ | |||||
tables: make(map[string]*table), | |||||
} | |||||
ctx := context.Background() | |||||
newTbl := btapb.Table{ | |||||
ColumnFamilies: map[string]*btapb.ColumnFamily{ | |||||
"cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}}, | |||||
}, | |||||
} | |||||
tbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) | |||||
if err != nil { | |||||
t.Fatalf("Creating table: %v", err) | |||||
} | |||||
// Populate the table | |||||
val := []byte("value") | |||||
rowCount := 1000 | |||||
for i := 0; i < rowCount; i++ { | |||||
req := &btpb.MutateRowRequest{ | |||||
TableName: tbl.Name, | |||||
RowKey: []byte("row-" + strconv.Itoa(i)), | |||||
Mutations: []*btpb.Mutation{{ | |||||
Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{ | |||||
FamilyName: "cf", | |||||
ColumnQualifier: []byte("col"), | |||||
TimestampMicros: 1000, | |||||
Value: val, | |||||
}}, | |||||
}}, | |||||
} | |||||
if _, err := s.MutateRow(ctx, req); err != nil { | |||||
t.Fatalf("Populating table: %v", err) | |||||
} | |||||
} | |||||
mock := &MockSampleRowKeysServer{} | |||||
if err := s.SampleRowKeys(&btpb.SampleRowKeysRequest{TableName: tbl.Name}, mock); err != nil { | |||||
t.Errorf("SampleRowKeys error: %v", err) | |||||
} | |||||
if len(mock.responses) == 0 { | |||||
t.Fatal("Response count: got 0, want > 0") | |||||
} | |||||
// Make sure the offset of the final response is the offset of the final row | |||||
got := mock.responses[len(mock.responses)-1].OffsetBytes | |||||
want := int64((rowCount - 1) * len(val)) | |||||
if got != want { | |||||
t.Errorf("Invalid offset: got %d, want %d", got, want) | |||||
} | |||||
} | |||||
func TestDropRowRange(t *testing.T) { | |||||
s := &server{ | |||||
tables: make(map[string]*table), | |||||
} | |||||
ctx := context.Background() | |||||
newTbl := btapb.Table{ | |||||
ColumnFamilies: map[string]*btapb.ColumnFamily{ | |||||
"cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}}, | |||||
}, | |||||
} | |||||
tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) | |||||
if err != nil { | |||||
t.Fatalf("Creating table: %v", err) | |||||
} | |||||
tbl := s.tables[tblInfo.Name] | |||||
// Populate the table | |||||
prefixes := []string{"AAA", "BBB", "CCC", "DDD"} | |||||
count := 3 | |||||
doWrite := func() { | |||||
for _, prefix := range prefixes { | |||||
for i := 0; i < count; i++ { | |||||
req := &btpb.MutateRowRequest{ | |||||
TableName: tblInfo.Name, | |||||
RowKey: []byte(prefix + strconv.Itoa(i)), | |||||
Mutations: []*btpb.Mutation{{ | |||||
Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{ | |||||
FamilyName: "cf", | |||||
ColumnQualifier: []byte("col"), | |||||
TimestampMicros: 1000, | |||||
Value: []byte{}, | |||||
}}, | |||||
}}, | |||||
} | |||||
if _, err := s.MutateRow(ctx, req); err != nil { | |||||
t.Fatalf("Populating table: %v", err) | |||||
} | |||||
} | |||||
} | |||||
} | |||||
doWrite() | |||||
tblSize := tbl.rows.Len() | |||||
req := &btapb.DropRowRangeRequest{ | |||||
Name: tblInfo.Name, | |||||
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{RowKeyPrefix: []byte("AAA")}, | |||||
} | |||||
if _, err = s.DropRowRange(ctx, req); err != nil { | |||||
t.Fatalf("Dropping first range: %v", err) | |||||
} | |||||
got, want := tbl.rows.Len(), tblSize-count | |||||
if got != want { | |||||
t.Errorf("Row count after first drop: got %d (%v), want %d", got, tbl.rows, want) | |||||
} | |||||
req = &btapb.DropRowRangeRequest{ | |||||
Name: tblInfo.Name, | |||||
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{RowKeyPrefix: []byte("DDD")}, | |||||
} | |||||
if _, err = s.DropRowRange(ctx, req); err != nil { | |||||
t.Fatalf("Dropping second range: %v", err) | |||||
} | |||||
got, want = tbl.rows.Len(), tblSize-(2*count) | |||||
if got != want { | |||||
t.Errorf("Row count after second drop: got %d (%v), want %d", got, tbl.rows, want) | |||||
} | |||||
req = &btapb.DropRowRangeRequest{ | |||||
Name: tblInfo.Name, | |||||
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{RowKeyPrefix: []byte("XXX")}, | |||||
} | |||||
if _, err = s.DropRowRange(ctx, req); err != nil { | |||||
t.Fatalf("Dropping invalid range: %v", err) | |||||
} | |||||
got, want = tbl.rows.Len(), tblSize-(2*count) | |||||
if got != want { | |||||
t.Errorf("Row count after invalid drop: got %d (%v), want %d", got, tbl.rows, want) | |||||
} | |||||
req = &btapb.DropRowRangeRequest{ | |||||
Name: tblInfo.Name, | |||||
Target: &btapb.DropRowRangeRequest_DeleteAllDataFromTable{DeleteAllDataFromTable: true}, | |||||
} | |||||
if _, err = s.DropRowRange(ctx, req); err != nil { | |||||
t.Fatalf("Dropping all data: %v", err) | |||||
} | |||||
got, want = tbl.rows.Len(), 0 | |||||
if got != want { | |||||
t.Errorf("Row count after drop all: got %d, want %d", got, want) | |||||
} | |||||
// Test that we can write rows, delete some and then write them again. | |||||
count = 1 | |||||
doWrite() | |||||
req = &btapb.DropRowRangeRequest{ | |||||
Name: tblInfo.Name, | |||||
Target: &btapb.DropRowRangeRequest_DeleteAllDataFromTable{DeleteAllDataFromTable: true}, | |||||
} | |||||
if _, err = s.DropRowRange(ctx, req); err != nil { | |||||
t.Fatalf("Dropping all data: %v", err) | |||||
} | |||||
got, want = tbl.rows.Len(), 0 | |||||
if got != want { | |||||
t.Errorf("Row count after drop all: got %d, want %d", got, want) | |||||
} | |||||
doWrite() | |||||
got, want = tbl.rows.Len(), len(prefixes) | |||||
if got != want { | |||||
t.Errorf("Row count after rewrite: got %d, want %d", got, want) | |||||
} | |||||
req = &btapb.DropRowRangeRequest{ | |||||
Name: tblInfo.Name, | |||||
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{RowKeyPrefix: []byte("BBB")}, | |||||
} | |||||
if _, err = s.DropRowRange(ctx, req); err != nil { | |||||
t.Fatalf("Dropping range: %v", err) | |||||
} | |||||
doWrite() | |||||
got, want = tbl.rows.Len(), len(prefixes) | |||||
if got != want { | |||||
t.Errorf("Row count after drop range: got %d, want %d", got, want) | |||||
} | |||||
} | |||||
type MockReadRowsServer struct { | |||||
responses []*btpb.ReadRowsResponse | |||||
grpc.ServerStream | |||||
} | |||||
func (s *MockReadRowsServer) Send(resp *btpb.ReadRowsResponse) error { | |||||
s.responses = append(s.responses, resp) | |||||
return nil | |||||
} | |||||
func TestReadRows(t *testing.T) { | |||||
ctx := context.Background() | |||||
s := &server{ | |||||
tables: make(map[string]*table), | |||||
} | |||||
newTbl := btapb.Table{ | |||||
ColumnFamilies: map[string]*btapb.ColumnFamily{ | |||||
"cf0": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}}, | |||||
}, | |||||
} | |||||
tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) | |||||
if err != nil { | |||||
t.Fatalf("Creating table: %v", err) | |||||
} | |||||
mreq := &btpb.MutateRowRequest{ | |||||
TableName: tblInfo.Name, | |||||
RowKey: []byte("row"), | |||||
Mutations: []*btpb.Mutation{{ | |||||
Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{ | |||||
FamilyName: "cf0", | |||||
ColumnQualifier: []byte("col"), | |||||
TimestampMicros: 1000, | |||||
Value: []byte{}, | |||||
}}, | |||||
}}, | |||||
} | |||||
if _, err := s.MutateRow(ctx, mreq); err != nil { | |||||
t.Fatalf("Populating table: %v", err) | |||||
} | |||||
for _, rowset := range []*btpb.RowSet{ | |||||
{RowKeys: [][]byte{[]byte("row")}}, | |||||
{RowRanges: []*btpb.RowRange{{StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte("")}}}}, | |||||
{RowRanges: []*btpb.RowRange{{StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte("r")}}}}, | |||||
{RowRanges: []*btpb.RowRange{{ | |||||
StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte("")}, | |||||
EndKey: &btpb.RowRange_EndKeyOpen{EndKeyOpen: []byte("s")}, | |||||
}}}, | |||||
} { | |||||
mock := &MockReadRowsServer{} | |||||
req := &btpb.ReadRowsRequest{TableName: tblInfo.Name, Rows: rowset} | |||||
if err = s.ReadRows(req, mock); err != nil { | |||||
t.Fatalf("ReadRows error: %v", err) | |||||
} | |||||
if got, want := len(mock.responses), 1; got != want { | |||||
t.Errorf("%+v: response count: got %d, want %d", rowset, got, want) | |||||
} | |||||
} | |||||
} | |||||
func TestReadRowsOrder(t *testing.T) { | |||||
s := &server{ | |||||
tables: make(map[string]*table), | |||||
} | |||||
ctx := context.Background() | |||||
newTbl := btapb.Table{ | |||||
ColumnFamilies: map[string]*btapb.ColumnFamily{ | |||||
"cf0": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}}, | |||||
}, | |||||
} | |||||
tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) | |||||
if err != nil { | |||||
t.Fatalf("Creating table: %v", err) | |||||
} | |||||
count := 3 | |||||
mcf := func(i int) *btapb.ModifyColumnFamiliesRequest { | |||||
return &btapb.ModifyColumnFamiliesRequest{ | |||||
Name: tblInfo.Name, | |||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ | |||||
Id: "cf" + strconv.Itoa(i), | |||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{Create: &btapb.ColumnFamily{}}, | |||||
}}, | |||||
} | |||||
} | |||||
for i := 1; i <= count; i++ { | |||||
_, err = s.ModifyColumnFamilies(ctx, mcf(i)) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
} | |||||
// Populate the table | |||||
for fc := 0; fc < count; fc++ { | |||||
for cc := count; cc > 0; cc-- { | |||||
for tc := 0; tc < count; tc++ { | |||||
req := &btpb.MutateRowRequest{ | |||||
TableName: tblInfo.Name, | |||||
RowKey: []byte("row"), | |||||
Mutations: []*btpb.Mutation{{ | |||||
Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{ | |||||
FamilyName: "cf" + strconv.Itoa(fc), | |||||
ColumnQualifier: []byte("col" + strconv.Itoa(cc)), | |||||
TimestampMicros: int64((tc + 1) * 1000), | |||||
Value: []byte{}, | |||||
}}, | |||||
}}, | |||||
} | |||||
if _, err := s.MutateRow(ctx, req); err != nil { | |||||
t.Fatalf("Populating table: %v", err) | |||||
} | |||||
} | |||||
} | |||||
} | |||||
req := &btpb.ReadRowsRequest{ | |||||
TableName: tblInfo.Name, | |||||
Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}}, | |||||
} | |||||
mock := &MockReadRowsServer{} | |||||
if err = s.ReadRows(req, mock); err != nil { | |||||
t.Errorf("ReadRows error: %v", err) | |||||
} | |||||
if len(mock.responses) == 0 { | |||||
t.Fatal("Response count: got 0, want > 0") | |||||
} | |||||
if len(mock.responses[0].Chunks) != 27 { | |||||
t.Fatalf("Chunk count: got %d, want 27", len(mock.responses[0].Chunks)) | |||||
} | |||||
testOrder := func(ms *MockReadRowsServer) { | |||||
var prevFam, prevCol string | |||||
var prevTime int64 | |||||
for _, cc := range ms.responses[0].Chunks { | |||||
if prevFam == "" { | |||||
prevFam = cc.FamilyName.Value | |||||
prevCol = string(cc.Qualifier.Value) | |||||
prevTime = cc.TimestampMicros | |||||
continue | |||||
} | |||||
if cc.FamilyName.Value < prevFam { | |||||
t.Errorf("Family order is not correct: got %s < %s", cc.FamilyName.Value, prevFam) | |||||
} else if cc.FamilyName.Value == prevFam { | |||||
if string(cc.Qualifier.Value) < prevCol { | |||||
t.Errorf("Column order is not correct: got %s < %s", string(cc.Qualifier.Value), prevCol) | |||||
} else if string(cc.Qualifier.Value) == prevCol { | |||||
if cc.TimestampMicros > prevTime { | |||||
t.Errorf("cell order is not correct: got %d > %d", cc.TimestampMicros, prevTime) | |||||
} | |||||
} | |||||
} | |||||
prevFam = cc.FamilyName.Value | |||||
prevCol = string(cc.Qualifier.Value) | |||||
prevTime = cc.TimestampMicros | |||||
} | |||||
} | |||||
testOrder(mock) | |||||
// Read with interleave filter | |||||
inter := &btpb.RowFilter_Interleave{} | |||||
fnr := &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{FamilyNameRegexFilter: "1"}} | |||||
cqr := &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{ColumnQualifierRegexFilter: []byte("2")}} | |||||
inter.Filters = append(inter.Filters, fnr, cqr) | |||||
req = &btpb.ReadRowsRequest{ | |||||
TableName: tblInfo.Name, | |||||
Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}}, | |||||
Filter: &btpb.RowFilter{ | |||||
Filter: &btpb.RowFilter_Interleave_{Interleave: inter}, | |||||
}, | |||||
} | |||||
mock = &MockReadRowsServer{} | |||||
if err = s.ReadRows(req, mock); err != nil { | |||||
t.Errorf("ReadRows error: %v", err) | |||||
} | |||||
if len(mock.responses) == 0 { | |||||
t.Fatal("Response count: got 0, want > 0") | |||||
} | |||||
if len(mock.responses[0].Chunks) != 18 { | |||||
t.Fatalf("Chunk count: got %d, want 18", len(mock.responses[0].Chunks)) | |||||
} | |||||
testOrder(mock) | |||||
// Check order after ReadModifyWriteRow | |||||
rmw := func(i int) *btpb.ReadModifyWriteRowRequest { | |||||
return &btpb.ReadModifyWriteRowRequest{ | |||||
TableName: tblInfo.Name, | |||||
RowKey: []byte("row"), | |||||
Rules: []*btpb.ReadModifyWriteRule{{ | |||||
FamilyName: "cf3", | |||||
ColumnQualifier: []byte("col" + strconv.Itoa(i)), | |||||
Rule: &btpb.ReadModifyWriteRule_IncrementAmount{IncrementAmount: 1}, | |||||
}}, | |||||
} | |||||
} | |||||
for i := count; i > 0; i-- { | |||||
if _, err := s.ReadModifyWriteRow(ctx, rmw(i)); err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
} | |||||
req = &btpb.ReadRowsRequest{ | |||||
TableName: tblInfo.Name, | |||||
Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}}, | |||||
} | |||||
mock = &MockReadRowsServer{} | |||||
if err = s.ReadRows(req, mock); err != nil { | |||||
t.Errorf("ReadRows error: %v", err) | |||||
} | |||||
if len(mock.responses) == 0 { | |||||
t.Fatal("Response count: got 0, want > 0") | |||||
} | |||||
if len(mock.responses[0].Chunks) != 30 { | |||||
t.Fatalf("Chunk count: got %d, want 30", len(mock.responses[0].Chunks)) | |||||
} | |||||
testOrder(mock) | |||||
} | |||||
func TestCheckAndMutateRowWithoutPredicate(t *testing.T) { | |||||
s := &server{ | |||||
tables: make(map[string]*table), | |||||
} | |||||
ctx := context.Background() | |||||
newTbl := btapb.Table{ | |||||
ColumnFamilies: map[string]*btapb.ColumnFamily{ | |||||
"cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}}, | |||||
}, | |||||
} | |||||
tbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) | |||||
if err != nil { | |||||
t.Fatalf("Creating table: %v", err) | |||||
} | |||||
// Populate the table | |||||
val := []byte("value") | |||||
mrreq := &btpb.MutateRowRequest{ | |||||
TableName: tbl.Name, | |||||
RowKey: []byte("row-present"), | |||||
Mutations: []*btpb.Mutation{{ | |||||
Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{ | |||||
FamilyName: "cf", | |||||
ColumnQualifier: []byte("col"), | |||||
TimestampMicros: 1000, | |||||
Value: val, | |||||
}}, | |||||
}}, | |||||
} | |||||
if _, err := s.MutateRow(ctx, mrreq); err != nil { | |||||
t.Fatalf("Populating table: %v", err) | |||||
} | |||||
req := &btpb.CheckAndMutateRowRequest{ | |||||
TableName: tbl.Name, | |||||
RowKey: []byte("row-not-present"), | |||||
} | |||||
if res, err := s.CheckAndMutateRow(ctx, req); err != nil { | |||||
t.Errorf("CheckAndMutateRow error: %v", err) | |||||
} else if got, want := res.PredicateMatched, false; got != want { | |||||
t.Errorf("Invalid PredicateMatched value: got %t, want %t", got, want) | |||||
} | |||||
req = &btpb.CheckAndMutateRowRequest{ | |||||
TableName: tbl.Name, | |||||
RowKey: []byte("row-present"), | |||||
} | |||||
if res, err := s.CheckAndMutateRow(ctx, req); err != nil { | |||||
t.Errorf("CheckAndMutateRow error: %v", err) | |||||
} else if got, want := res.PredicateMatched, true; got != want { | |||||
t.Errorf("Invalid PredicateMatched value: got %t, want %t", got, want) | |||||
} | |||||
} | |||||
func TestServer_ReadModifyWriteRow(t *testing.T) { | |||||
s := &server{ | |||||
tables: make(map[string]*table), | |||||
} | |||||
ctx := context.Background() | |||||
newTbl := btapb.Table{ | |||||
ColumnFamilies: map[string]*btapb.ColumnFamily{ | |||||
"cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}}, | |||||
}, | |||||
} | |||||
tbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) | |||||
if err != nil { | |||||
t.Fatalf("Creating table: %v", err) | |||||
} | |||||
req := &btpb.ReadModifyWriteRowRequest{ | |||||
TableName: tbl.Name, | |||||
RowKey: []byte("row-key"), | |||||
Rules: []*btpb.ReadModifyWriteRule{ | |||||
{ | |||||
FamilyName: "cf", | |||||
ColumnQualifier: []byte("q1"), | |||||
Rule: &btpb.ReadModifyWriteRule_AppendValue{ | |||||
AppendValue: []byte("a"), | |||||
}, | |||||
}, | |||||
// multiple ops for same cell | |||||
{ | |||||
FamilyName: "cf", | |||||
ColumnQualifier: []byte("q1"), | |||||
Rule: &btpb.ReadModifyWriteRule_AppendValue{ | |||||
AppendValue: []byte("b"), | |||||
}, | |||||
}, | |||||
// different cell whose qualifier should sort before the prior rules | |||||
{ | |||||
FamilyName: "cf", | |||||
ColumnQualifier: []byte("q0"), | |||||
Rule: &btpb.ReadModifyWriteRule_IncrementAmount{ | |||||
IncrementAmount: 1, | |||||
}, | |||||
}, | |||||
}, | |||||
} | |||||
got, err := s.ReadModifyWriteRow(ctx, req) | |||||
if err != nil { | |||||
t.Fatalf("ReadModifyWriteRow error: %v", err) | |||||
} | |||||
want := &btpb.ReadModifyWriteRowResponse{ | |||||
Row: &btpb.Row{ | |||||
Key: []byte("row-key"), | |||||
Families: []*btpb.Family{{ | |||||
Name: "cf", | |||||
Columns: []*btpb.Column{ | |||||
{ | |||||
Qualifier: []byte("q0"), | |||||
Cells: []*btpb.Cell{{ | |||||
Value: []byte{0, 0, 0, 0, 0, 0, 0, 1}, | |||||
}}, | |||||
}, | |||||
{ | |||||
Qualifier: []byte("q1"), | |||||
Cells: []*btpb.Cell{{ | |||||
Value: []byte("ab"), | |||||
}}, | |||||
}, | |||||
}, | |||||
}}, | |||||
}, | |||||
} | |||||
diff := cmp.Diff(got, want, cmpopts.IgnoreFields(btpb.Cell{}, "TimestampMicros")) | |||||
if diff != "" { | |||||
t.Errorf("unexpected response: %s", diff) | |||||
} | |||||
} | |||||
// helper function to populate table data | |||||
func populateTable(ctx context.Context, s *server) (*btapb.Table, error) { | |||||
newTbl := btapb.Table{ | |||||
ColumnFamilies: map[string]*btapb.ColumnFamily{ | |||||
"cf0": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}}, | |||||
}, | |||||
} | |||||
tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
count := 3 | |||||
mcf := func(i int) *btapb.ModifyColumnFamiliesRequest { | |||||
return &btapb.ModifyColumnFamiliesRequest{ | |||||
Name: tblInfo.Name, | |||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ | |||||
Id: "cf" + strconv.Itoa(i), | |||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}}, | |||||
}}, | |||||
} | |||||
} | |||||
for i := 1; i <= count; i++ { | |||||
_, err = s.ModifyColumnFamilies(ctx, mcf(i)) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
} | |||||
// Populate the table | |||||
for fc := 0; fc < count; fc++ { | |||||
for cc := count; cc > 0; cc-- { | |||||
for tc := 0; tc < count; tc++ { | |||||
req := &btpb.MutateRowRequest{ | |||||
TableName: tblInfo.Name, | |||||
RowKey: []byte("row"), | |||||
Mutations: []*btpb.Mutation{{ | |||||
Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{ | |||||
FamilyName: "cf" + strconv.Itoa(fc), | |||||
ColumnQualifier: []byte("col" + strconv.Itoa(cc)), | |||||
TimestampMicros: int64((tc + 1) * 1000), | |||||
Value: []byte{}, | |||||
}}, | |||||
}}, | |||||
} | |||||
if _, err := s.MutateRow(ctx, req); err != nil { | |||||
return nil, err | |||||
} | |||||
} | |||||
} | |||||
} | |||||
return tblInfo, nil | |||||
} | |||||
func TestFilters(t *testing.T) { | |||||
tests := []struct { | |||||
in *btpb.RowFilter | |||||
out int | |||||
}{ | |||||
{in: &btpb.RowFilter{Filter: &btpb.RowFilter_BlockAllFilter{true}}, out: 0}, | |||||
{in: &btpb.RowFilter{Filter: &btpb.RowFilter_BlockAllFilter{false}}, out: 1}, | |||||
{in: &btpb.RowFilter{Filter: &btpb.RowFilter_PassAllFilter{true}}, out: 1}, | |||||
{in: &btpb.RowFilter{Filter: &btpb.RowFilter_PassAllFilter{false}}, out: 0}, | |||||
} | |||||
ctx := context.Background() | |||||
s := &server{ | |||||
tables: make(map[string]*table), | |||||
} | |||||
tblInfo, err := populateTable(ctx, s) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
req := &btpb.ReadRowsRequest{ | |||||
TableName: tblInfo.Name, | |||||
Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}}, | |||||
} | |||||
for _, tc := range tests { | |||||
req.Filter = tc.in | |||||
mock := &MockReadRowsServer{} | |||||
if err = s.ReadRows(req, mock); err != nil { | |||||
t.Errorf("ReadRows error: %v", err) | |||||
continue | |||||
} | |||||
if len(mock.responses) != tc.out { | |||||
t.Errorf("Response count: got %d, want %d", len(mock.responses), tc.out) | |||||
continue | |||||
} | |||||
} | |||||
} |
@@ -0,0 +1,224 @@ | |||||
// Copyright 2016 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package main | |||||
import ( | |||||
"testing" | |||||
"time" | |||||
"cloud.google.com/go/bigtable" | |||||
"cloud.google.com/go/internal/testutil" | |||||
"github.com/google/go-cmp/cmp" | |||||
) | |||||
func TestParseDuration(t *testing.T) { | |||||
tests := []struct { | |||||
in string | |||||
// out or fail are mutually exclusive | |||||
out time.Duration | |||||
fail bool | |||||
}{ | |||||
{in: "10ms", out: 10 * time.Millisecond}, | |||||
{in: "3s", out: 3 * time.Second}, | |||||
{in: "60m", out: 60 * time.Minute}, | |||||
{in: "12h", out: 12 * time.Hour}, | |||||
{in: "7d", out: 168 * time.Hour}, | |||||
{in: "", fail: true}, | |||||
{in: "0", fail: true}, | |||||
{in: "7ns", fail: true}, | |||||
{in: "14mo", fail: true}, | |||||
{in: "3.5h", fail: true}, | |||||
{in: "106752d", fail: true}, // overflow | |||||
} | |||||
for _, tc := range tests { | |||||
got, err := parseDuration(tc.in) | |||||
if !tc.fail && err != nil { | |||||
t.Errorf("parseDuration(%q) unexpectedly failed: %v", tc.in, err) | |||||
continue | |||||
} | |||||
if tc.fail && err == nil { | |||||
t.Errorf("parseDuration(%q) did not fail", tc.in) | |||||
continue | |||||
} | |||||
if tc.fail { | |||||
continue | |||||
} | |||||
if got != tc.out { | |||||
t.Errorf("parseDuration(%q) = %v, want %v", tc.in, got, tc.out) | |||||
} | |||||
} | |||||
} | |||||
func TestParseGCPolicy(t *testing.T) { | |||||
tests := []struct { | |||||
in string | |||||
out bigtable.GCPolicy | |||||
fail bool | |||||
}{ | |||||
{in: "maxage=1h", out: bigtable.MaxAgePolicy(time.Hour * 1)}, | |||||
{in: "maxversions=2", out: bigtable.MaxVersionsPolicy(int(2))}, | |||||
{in: "maxversions=2 and maxage=1h", out: bigtable.IntersectionPolicy([]bigtable.GCPolicy{bigtable.MaxVersionsPolicy(int(2)), bigtable.MaxAgePolicy(time.Hour * 1)}...)}, | |||||
{in: "maxversions=2 or maxage=1h", out: bigtable.UnionPolicy([]bigtable.GCPolicy{bigtable.MaxVersionsPolicy(int(2)), bigtable.MaxAgePolicy(time.Hour * 1)}...)}, | |||||
{in: "maxage=1", fail: true}, | |||||
{in: "maxage = 1h", fail: true}, | |||||
{in: "maxage =1h", fail: true}, | |||||
{in: "maxage= 1h", fail: true}, | |||||
{in: "foomaxage=1h", fail: true}, | |||||
{in: "maxversions=1h", fail: true}, | |||||
{in: "maxversions= 1", fail: true}, | |||||
{in: "maxversions = 1", fail: true}, | |||||
{in: "maxversions =1", fail: true}, | |||||
{in: "barmaxversions=1", fail: true}, | |||||
{in: "maxage = 1h or maxversions=1h", fail: true}, | |||||
{in: "foomaxversions=2 or maxage=1h", fail: true}, | |||||
{in: "maxversions=2 or barmaxage=1h", fail: true}, | |||||
{in: "foomaxversions=2 or barmaxage=1h", fail: true}, | |||||
{in: "maxage = 1h and maxversions=1h", fail: true}, | |||||
{in: "foomaxage=1h and maxversions=1", fail: true}, | |||||
{in: "maxage=1h and barmaxversions=1", fail: true}, | |||||
{in: "foomaxage=1h and barmaxversions=1", fail: true}, | |||||
} | |||||
for _, tc := range tests { | |||||
got, err := parseGCPolicy(tc.in) | |||||
if !tc.fail && err != nil { | |||||
t.Errorf("parseGCPolicy(%q) unexpectedly failed: %v", tc.in, err) | |||||
continue | |||||
} | |||||
if tc.fail && err == nil { | |||||
t.Errorf("parseGCPolicy(%q) did not fail", tc.in) | |||||
continue | |||||
} | |||||
if tc.fail { | |||||
continue | |||||
} | |||||
var cmpOpts cmp.Options | |||||
cmpOpts = append(cmpOpts, cmp.AllowUnexported(bigtable.IntersectionPolicy([]bigtable.GCPolicy{}...)), cmp.AllowUnexported(bigtable.UnionPolicy([]bigtable.GCPolicy{}...))) | |||||
if !cmp.Equal(got, tc.out, cmpOpts) { | |||||
t.Errorf("parseGCPolicy(%q) =%v, want %v", tc.in, got, tc.out) | |||||
} | |||||
} | |||||
} | |||||
func TestParseArgs(t *testing.T) { | |||||
got, err := parseArgs([]string{"a=1", "b=2"}, []string{"a", "b"}) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
want := map[string]string{"a": "1", "b": "2"} | |||||
if !testutil.Equal(got, want) { | |||||
t.Fatalf("got %v, want %v", got, want) | |||||
} | |||||
if _, err := parseArgs([]string{"a1"}, []string{"a1"}); err == nil { | |||||
t.Error("malformed: got nil, want error") | |||||
} | |||||
if _, err := parseArgs([]string{"a=1"}, []string{"b"}); err == nil { | |||||
t.Error("invalid: got nil, want error") | |||||
} | |||||
} | |||||
func TestParseColumnsFilter(t *testing.T) { | |||||
tests := []struct { | |||||
in string | |||||
out bigtable.Filter | |||||
fail bool | |||||
}{ | |||||
{ | |||||
in: "columnA", | |||||
out: bigtable.ColumnFilter("columnA"), | |||||
}, | |||||
{ | |||||
in: "familyA:columnA", | |||||
out: bigtable.ChainFilters(bigtable.FamilyFilter("familyA"), bigtable.ColumnFilter("columnA")), | |||||
}, | |||||
{ | |||||
in: "columnA,columnB", | |||||
out: bigtable.InterleaveFilters(bigtable.ColumnFilter("columnA"), bigtable.ColumnFilter("columnB")), | |||||
}, | |||||
{ | |||||
in: "familyA:columnA,columnB", | |||||
out: bigtable.InterleaveFilters( | |||||
bigtable.ChainFilters(bigtable.FamilyFilter("familyA"), bigtable.ColumnFilter("columnA")), | |||||
bigtable.ColumnFilter("columnB"), | |||||
), | |||||
}, | |||||
{ | |||||
in: "columnA,familyB:columnB", | |||||
out: bigtable.InterleaveFilters( | |||||
bigtable.ColumnFilter("columnA"), | |||||
bigtable.ChainFilters(bigtable.FamilyFilter("familyB"), bigtable.ColumnFilter("columnB")), | |||||
), | |||||
}, | |||||
{ | |||||
in: "familyA:columnA,familyB:columnB", | |||||
out: bigtable.InterleaveFilters( | |||||
bigtable.ChainFilters(bigtable.FamilyFilter("familyA"), bigtable.ColumnFilter("columnA")), | |||||
bigtable.ChainFilters(bigtable.FamilyFilter("familyB"), bigtable.ColumnFilter("columnB")), | |||||
), | |||||
}, | |||||
{ | |||||
in: "familyA:", | |||||
out: bigtable.FamilyFilter("familyA"), | |||||
}, | |||||
{ | |||||
in: ":columnA", | |||||
out: bigtable.ColumnFilter("columnA"), | |||||
}, | |||||
{ | |||||
in: ",:columnA,,familyB:columnB,", | |||||
out: bigtable.InterleaveFilters( | |||||
bigtable.ColumnFilter("columnA"), | |||||
bigtable.ChainFilters(bigtable.FamilyFilter("familyB"), bigtable.ColumnFilter("columnB")), | |||||
), | |||||
}, | |||||
{ | |||||
in: "familyA:columnA:cellA", | |||||
fail: true, | |||||
}, | |||||
{ | |||||
in: "familyA::columnA", | |||||
fail: true, | |||||
}, | |||||
} | |||||
for _, tc := range tests { | |||||
got, err := parseColumnsFilter(tc.in) | |||||
if !tc.fail && err != nil { | |||||
t.Errorf("parseColumnsFilter(%q) unexpectedly failed: %v", tc.in, err) | |||||
continue | |||||
} | |||||
if tc.fail && err == nil { | |||||
t.Errorf("parseColumnsFilter(%q) did not fail", tc.in) | |||||
continue | |||||
} | |||||
if tc.fail { | |||||
continue | |||||
} | |||||
var cmpOpts cmp.Options | |||||
cmpOpts = | |||||
append( | |||||
cmpOpts, | |||||
cmp.AllowUnexported(bigtable.ChainFilters([]bigtable.Filter{}...)), | |||||
cmp.AllowUnexported(bigtable.InterleaveFilters([]bigtable.Filter{}...))) | |||||
if !cmp.Equal(got, tc.out, cmpOpts) { | |||||
t.Errorf("parseColumnsFilter(%q) = %v, want %v", tc.in, got, tc.out) | |||||
} | |||||
} | |||||
} |
@@ -0,0 +1,369 @@ | |||||
// Copyright 2016 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED. | |||||
// Run "go generate" to regenerate. | |||||
//go:generate go run cbt.go -o cbtdoc.go doc | |||||
/* | |||||
Cbt is a tool for doing basic interactions with Cloud Bigtable. To learn how to | |||||
install the cbt tool, see the | |||||
[cbt overview](https://cloud.google.com/bigtable/docs/go/cbt-overview). | |||||
Usage: | |||||
cbt [options] command [arguments] | |||||
The commands are: | |||||
count Count rows in a table | |||||
createinstance Create an instance with an initial cluster | |||||
createcluster Create a cluster in the configured instance (replication alpha) | |||||
createfamily Create a column family | |||||
createtable Create a table | |||||
updatecluster Update a cluster in the configured instance | |||||
deleteinstance Deletes an instance | |||||
deletecluster Deletes a cluster from the configured instance (replication alpha) | |||||
deletecolumn Delete all cells in a column | |||||
deletefamily Delete a column family | |||||
deleterow Delete a row | |||||
deletetable Delete a table | |||||
doc Print godoc-suitable documentation for cbt | |||||
help Print help text | |||||
listinstances List instances in a project | |||||
listclusters List instances in an instance | |||||
lookup Read from a single row | |||||
ls List tables and column families | |||||
mddoc Print documentation for cbt in Markdown format | |||||
read Read rows | |||||
set Set value of a cell | |||||
setgcpolicy Set the GC policy for a column family | |||||
waitforreplication Blocks until all the completed writes have been replicated to all the clusters (replication alpha) | |||||
createtablefromsnapshot Create a table from a snapshot (snapshots alpha) | |||||
createsnapshot Create a snapshot from a source table (snapshots alpha) | |||||
listsnapshots List snapshots in a cluster (snapshots alpha) | |||||
getsnapshot Get snapshot info (snapshots alpha) | |||||
deletesnapshot Delete snapshot in a cluster (snapshots alpha) | |||||
version Print the current cbt version | |||||
Use "cbt help <command>" for more information about a command. | |||||
The options are: | |||||
-project string | |||||
project ID, if unset uses gcloud configured project | |||||
-instance string | |||||
Cloud Bigtable instance | |||||
-creds string | |||||
if set, use application credentials in this file | |||||
Alpha features are not currently available to most Cloud Bigtable customers. The | |||||
features might be changed in backward-incompatible ways and are not recommended | |||||
for production use. They are not subject to any SLA or deprecation policy. | |||||
For convenience, values of the -project, -instance, -creds, | |||||
-admin-endpoint and -data-endpoint flags may be specified in | |||||
~/.cbtrc in this format: | |||||
project = my-project-123 | |||||
instance = my-instance | |||||
creds = path-to-account-key.json | |||||
admin-endpoint = hostname:port | |||||
data-endpoint = hostname:port | |||||
All values are optional, and all will be overridden by flags. | |||||
Count rows in a table | |||||
Usage: | |||||
cbt count <table> | |||||
Create an instance with an initial cluster | |||||
Usage: | |||||
cbt createinstance <instance-id> <display-name> <cluster-id> <zone> <num-nodes> <storage type> | |||||
instance-id Permanent, unique id for the instance | |||||
display-name Description of the instance | |||||
cluster-id Permanent, unique id for the cluster in the instance | |||||
zone The zone in which to create the cluster | |||||
num-nodes The number of nodes to create | |||||
storage-type SSD or HDD | |||||
Create a cluster in the configured instance (replication alpha) | |||||
Usage: | |||||
cbt createcluster <cluster-id> <zone> <num-nodes> <storage type> | |||||
cluster-id Permanent, unique id for the cluster in the instance | |||||
zone The zone in which to create the cluster | |||||
num-nodes The number of nodes to create | |||||
storage-type SSD or HDD | |||||
Create a column family | |||||
Usage: | |||||
cbt createfamily <table> <family> | |||||
Create a table | |||||
Usage: | |||||
cbt createtable <table> [families=family[:(maxage=<d> | maxversions=<n>)],...] [splits=split,...] | |||||
families: Column families and their associated GC policies. See "setgcpolicy". | |||||
Example: families=family1:maxage=1w,family2:maxversions=1 | |||||
splits: Row key to be used to initially split the table | |||||
Update a cluster in the configured instance | |||||
Usage: | |||||
cbt updatecluster <cluster-id> [num-nodes=num-nodes] | |||||
cluster-id Permanent, unique id for the cluster in the instance | |||||
num-nodes The number of nodes to update to | |||||
Deletes an instance | |||||
Usage: | |||||
cbt deleteinstance <instance> | |||||
Deletes a cluster from the configured instance (replication alpha) | |||||
Usage: | |||||
cbt deletecluster <cluster> | |||||
Delete all cells in a column | |||||
Usage: | |||||
cbt deletecolumn <table> <row> <family> <column> [app-profile=<app profile id>] | |||||
app-profile=<app profile id> The app profile id to use for the request (replication alpha) | |||||
Delete a column family | |||||
Usage: | |||||
cbt deletefamily <table> <family> | |||||
Delete a row | |||||
Usage: | |||||
cbt deleterow <table> <row> [app-profile=<app profile id>] | |||||
app-profile=<app profile id> The app profile id to use for the request (replication alpha) | |||||
Delete a table | |||||
Usage: | |||||
cbt deletetable <table> | |||||
Print godoc-suitable documentation for cbt | |||||
Usage: | |||||
cbt doc | |||||
Print help text | |||||
Usage: | |||||
cbt help [command] | |||||
List instances in a project | |||||
Usage: | |||||
cbt listinstances | |||||
List instances in an instance | |||||
Usage: | |||||
cbt listclusters | |||||
Read from a single row | |||||
Usage: | |||||
cbt lookup <table> <row> [cells-per-column=<n>] [app-profile=<app profile id>] | |||||
cells-per-column=<n> Read only this many cells per column | |||||
app-profile=<app profile id> The app profile id to use for the request (replication alpha) | |||||
List tables and column families | |||||
Usage: | |||||
cbt ls List tables | |||||
cbt ls <table> List column families in <table> | |||||
Print documentation for cbt in Markdown format | |||||
Usage: | |||||
cbt mddoc | |||||
Read rows | |||||
Usage: | |||||
cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>] [regex=<regex>] [count=<n>] [cells-per-column=<n>] [app-profile=<app profile id>] | |||||
start=<row> Start reading at this row | |||||
end=<row> Stop reading before this row | |||||
prefix=<prefix> Read rows with this prefix | |||||
regex=<regex> Read rows with keys matching this regex | |||||
count=<n> Read only this many rows | |||||
cells-per-column=<n> Read only this many cells per column | |||||
app-profile=<app profile id> The app profile id to use for the request (replication alpha) | |||||
Set value of a cell | |||||
Usage: | |||||
cbt set <table> <row> [app-profile=<app profile id>] family:column=val[@ts] ... | |||||
app-profile=<app profile id> The app profile id to use for the request (replication alpha) | |||||
family:column=val[@ts] may be repeated to set multiple cells. | |||||
ts is an optional integer timestamp. | |||||
If it cannot be parsed, the `@ts` part will be | |||||
interpreted as part of the value. | |||||
Set the GC policy for a column family | |||||
Usage: | |||||
cbt setgcpolicy <table> <family> ( maxage=<d> | maxversions=<n> ) | |||||
maxage=<d> Maximum timestamp age to preserve (e.g. "1h", "4d") | |||||
maxversions=<n> Maximum number of versions to preserve | |||||
Blocks until all the completed writes have been replicated to all the clusters (replication alpha) | |||||
Usage: | |||||
cbt waitforreplication <table> | |||||
Create a table from a snapshot (snapshots alpha) | |||||
Usage: | |||||
cbt createtablefromsnapshot <table> <cluster> <snapshot> | |||||
table The name of the table to create | |||||
cluster The cluster where the snapshot is located | |||||
snapshot The snapshot to restore | |||||
Create a snapshot from a source table (snapshots alpha) | |||||
Usage: | |||||
cbt createsnapshot <cluster> <snapshot> <table> [ttl=<d>] | |||||
[ttl=<d>] Lifespan of the snapshot (e.g. "1h", "4d") | |||||
List snapshots in a cluster (snapshots alpha) | |||||
Usage: | |||||
cbt listsnapshots [<cluster>] | |||||
Get snapshot info (snapshots alpha) | |||||
Usage: | |||||
cbt getsnapshot <cluster> <snapshot> | |||||
Delete snapshot in a cluster (snapshots alpha) | |||||
Usage: | |||||
cbt deletesnapshot <cluster> <snapshot> | |||||
Print the current cbt version | |||||
Usage: | |||||
cbt version | |||||
*/ | |||||
package main |
@@ -0,0 +1,44 @@ | |||||
// Copyright 2016 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
/* | |||||
cbtemulator launches the in-memory Cloud Bigtable server on the given address. | |||||
*/ | |||||
package main | |||||
import ( | |||||
"flag" | |||||
"fmt" | |||||
"log" | |||||
"cloud.google.com/go/bigtable/bttest" | |||||
"google.golang.org/grpc" | |||||
) | |||||
var ( | |||||
host = flag.String("host", "localhost", "the address to bind to on the local machine") | |||||
port = flag.Int("port", 9000, "the port number to bind to on the local machine") | |||||
) | |||||
func main() { | |||||
grpc.EnableTracing = false | |||||
flag.Parse() | |||||
srv, err := bttest.NewServer(fmt.Sprintf("%s:%d", *host, *port)) | |||||
if err != nil { | |||||
log.Fatalf("failed to start emulator: %v", err) | |||||
} | |||||
fmt.Printf("Cloud Bigtable emulator running on %s\n", srv.Addr) | |||||
select {} | |||||
} |
@@ -0,0 +1,205 @@ | |||||
/* | |||||
Copyright 2015 Google LLC | |||||
Licensed under the Apache License, Version 2.0 (the "License"); | |||||
you may not use this file except in compliance with the License. | |||||
You may obtain a copy of the License at | |||||
http://www.apache.org/licenses/LICENSE-2.0 | |||||
Unless required by applicable law or agreed to in writing, software | |||||
distributed under the License is distributed on an "AS IS" BASIS, | |||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
See the License for the specific language governing permissions and | |||||
limitations under the License. | |||||
*/ | |||||
/* | |||||
Loadtest does some load testing through the Go client library for Cloud Bigtable. | |||||
*/ | |||||
package main | |||||
import ( | |||||
"bytes" | |||||
"flag" | |||||
"fmt" | |||||
"log" | |||||
"math/rand" | |||||
"os" | |||||
"os/signal" | |||||
"sync" | |||||
"sync/atomic" | |||||
"time" | |||||
"cloud.google.com/go/bigtable" | |||||
"cloud.google.com/go/bigtable/internal/cbtconfig" | |||||
"cloud.google.com/go/bigtable/internal/stat" | |||||
"golang.org/x/net/context" | |||||
"google.golang.org/api/option" | |||||
"google.golang.org/grpc" | |||||
) | |||||
var ( | |||||
runFor = flag.Duration("run_for", 5*time.Second, | |||||
"how long to run the load test for; 0 to run forever until SIGTERM") | |||||
scratchTable = flag.String("scratch_table", "loadtest-scratch", "name of table to use; should not already exist") | |||||
csvOutput = flag.String("csv_output", "", | |||||
"output path for statistics in .csv format. If this file already exists it will be overwritten.") | |||||
poolSize = flag.Int("pool_size", 1, "size of the gRPC connection pool to use for the data client") | |||||
reqCount = flag.Int("req_count", 100, "number of concurrent requests") | |||||
config *cbtconfig.Config | |||||
client *bigtable.Client | |||||
adminClient *bigtable.AdminClient | |||||
) | |||||
func main() { | |||||
var err error | |||||
config, err = cbtconfig.Load() | |||||
if err != nil { | |||||
log.Fatal(err) | |||||
} | |||||
config.RegisterFlags() | |||||
flag.Parse() | |||||
if err := config.CheckFlags(cbtconfig.ProjectAndInstanceRequired); err != nil { | |||||
log.Fatal(err) | |||||
} | |||||
if config.Creds != "" { | |||||
os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds) | |||||
} | |||||
if flag.NArg() != 0 { | |||||
flag.Usage() | |||||
os.Exit(1) | |||||
} | |||||
var options []option.ClientOption | |||||
if *poolSize > 1 { | |||||
options = append(options, | |||||
option.WithGRPCConnectionPool(*poolSize), | |||||
// TODO(grpc/grpc-go#1388) using connection pool without WithBlock | |||||
// can cause RPCs to fail randomly. We can delete this after the issue is fixed. | |||||
option.WithGRPCDialOption(grpc.WithBlock())) | |||||
} | |||||
var csvFile *os.File | |||||
if *csvOutput != "" { | |||||
csvFile, err = os.Create(*csvOutput) | |||||
if err != nil { | |||||
log.Fatalf("creating csv output file: %v", err) | |||||
} | |||||
defer csvFile.Close() | |||||
log.Printf("Writing statistics to %q ...", *csvOutput) | |||||
} | |||||
log.Printf("Dialing connections...") | |||||
client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance, options...) | |||||
if err != nil { | |||||
log.Fatalf("Making bigtable.Client: %v", err) | |||||
} | |||||
defer client.Close() | |||||
adminClient, err = bigtable.NewAdminClient(context.Background(), config.Project, config.Instance) | |||||
if err != nil { | |||||
log.Fatalf("Making bigtable.AdminClient: %v", err) | |||||
} | |||||
defer adminClient.Close() | |||||
// Create a scratch table. | |||||
log.Printf("Setting up scratch table...") | |||||
tblConf := bigtable.TableConf{ | |||||
TableID: *scratchTable, | |||||
Families: map[string]bigtable.GCPolicy{"f": bigtable.MaxVersionsPolicy(1)}, | |||||
} | |||||
if err := adminClient.CreateTableFromConf(context.Background(), &tblConf); err != nil { | |||||
log.Fatalf("Making scratch table %q: %v", *scratchTable, err) | |||||
} | |||||
// Upon a successful run, delete the table. Don't bother checking for errors. | |||||
defer adminClient.DeleteTable(context.Background(), *scratchTable) | |||||
// Also delete the table on SIGTERM. | |||||
c := make(chan os.Signal, 1) | |||||
signal.Notify(c, os.Interrupt) | |||||
go func() { | |||||
s := <-c | |||||
log.Printf("Caught %v, cleaning scratch table.", s) | |||||
_ = adminClient.DeleteTable(context.Background(), *scratchTable) | |||||
os.Exit(1) | |||||
}() | |||||
log.Printf("Starting load test... (run for %v)", *runFor) | |||||
tbl := client.Open(*scratchTable) | |||||
sem := make(chan int, *reqCount) // limit the number of requests happening at once | |||||
var reads, writes stats | |||||
stopTime := time.Now().Add(*runFor) | |||||
var wg sync.WaitGroup | |||||
for time.Now().Before(stopTime) || *runFor == 0 { | |||||
sem <- 1 | |||||
wg.Add(1) | |||||
go func() { | |||||
defer wg.Done() | |||||
defer func() { <-sem }() | |||||
ok := true | |||||
opStart := time.Now() | |||||
var stats *stats | |||||
defer func() { | |||||
stats.Record(ok, time.Since(opStart)) | |||||
}() | |||||
row := fmt.Sprintf("row%d", rand.Intn(100)) // operate on 1 of 100 rows | |||||
switch rand.Intn(10) { | |||||
default: | |||||
// read | |||||
stats = &reads | |||||
_, err := tbl.ReadRow(context.Background(), row, bigtable.RowFilter(bigtable.LatestNFilter(1))) | |||||
if err != nil { | |||||
log.Printf("Error doing read: %v", err) | |||||
ok = false | |||||
} | |||||
case 0, 1, 2, 3, 4: | |||||
// write | |||||
stats = &writes | |||||
mut := bigtable.NewMutation() | |||||
mut.Set("f", "col", bigtable.Now(), bytes.Repeat([]byte("0"), 1<<10)) // 1 KB write | |||||
if err := tbl.Apply(context.Background(), row, mut); err != nil { | |||||
log.Printf("Error doing mutation: %v", err) | |||||
ok = false | |||||
} | |||||
} | |||||
}() | |||||
} | |||||
wg.Wait() | |||||
readsAgg := stat.NewAggregate("reads", reads.ds, reads.tries-reads.ok) | |||||
writesAgg := stat.NewAggregate("writes", writes.ds, writes.tries-writes.ok) | |||||
log.Printf("Reads (%d ok / %d tries):\n%v", reads.ok, reads.tries, readsAgg) | |||||
log.Printf("Writes (%d ok / %d tries):\n%v", writes.ok, writes.tries, writesAgg) | |||||
if csvFile != nil { | |||||
stat.WriteCSV([]*stat.Aggregate{readsAgg, writesAgg}, csvFile) | |||||
} | |||||
} | |||||
var allStats int64 // atomic | |||||
type stats struct { | |||||
mu sync.Mutex | |||||
tries, ok int | |||||
ds []time.Duration | |||||
} | |||||
func (s *stats) Record(ok bool, d time.Duration) { | |||||
s.mu.Lock() | |||||
s.tries++ | |||||
if ok { | |||||
s.ok++ | |||||
} | |||||
s.ds = append(s.ds, d) | |||||
s.mu.Unlock() | |||||
if n := atomic.AddInt64(&allStats, 1); n%1000 == 0 { | |||||
log.Printf("Progress: done %d ops", n) | |||||
} | |||||
} |
@@ -0,0 +1,155 @@ | |||||
/* | |||||
Copyright 2016 Google LLC | |||||
Licensed under the Apache License, Version 2.0 (the "License"); | |||||
you may not use this file except in compliance with the License. | |||||
You may obtain a copy of the License at | |||||
http://www.apache.org/licenses/LICENSE-2.0 | |||||
Unless required by applicable law or agreed to in writing, software | |||||
distributed under the License is distributed on an "AS IS" BASIS, | |||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
See the License for the specific language governing permissions and | |||||
limitations under the License. | |||||
*/ | |||||
/* | |||||
Scantest does scan-related load testing against Cloud Bigtable. The logic here | |||||
mimics a similar test written using the Java client. | |||||
*/ | |||||
package main | |||||
import ( | |||||
"bytes" | |||||
"flag" | |||||
"fmt" | |||||
"log" | |||||
"math/rand" | |||||
"os" | |||||
"sync" | |||||
"sync/atomic" | |||||
"text/tabwriter" | |||||
"time" | |||||
"cloud.google.com/go/bigtable" | |||||
"cloud.google.com/go/bigtable/internal/cbtconfig" | |||||
"cloud.google.com/go/bigtable/internal/stat" | |||||
"golang.org/x/net/context" | |||||
) | |||||
var ( | |||||
runFor = flag.Duration("run_for", 5*time.Second, "how long to run the load test for") | |||||
numScans = flag.Int("concurrent_scans", 1, "number of concurrent scans") | |||||
rowLimit = flag.Int("row_limit", 10000, "max number of records per scan") | |||||
config *cbtconfig.Config | |||||
client *bigtable.Client | |||||
) | |||||
func main() { | |||||
flag.Usage = func() { | |||||
fmt.Printf("Usage: scantest [options] <table_name>\n\n") | |||||
flag.PrintDefaults() | |||||
} | |||||
var err error | |||||
config, err = cbtconfig.Load() | |||||
if err != nil { | |||||
log.Fatal(err) | |||||
} | |||||
config.RegisterFlags() | |||||
flag.Parse() | |||||
if err := config.CheckFlags(cbtconfig.ProjectAndInstanceRequired); err != nil { | |||||
log.Fatal(err) | |||||
} | |||||
if config.Creds != "" { | |||||
os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds) | |||||
} | |||||
if flag.NArg() != 1 { | |||||
flag.Usage() | |||||
os.Exit(1) | |||||
} | |||||
table := flag.Arg(0) | |||||
log.Printf("Dialing connections...") | |||||
client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance) | |||||
if err != nil { | |||||
log.Fatalf("Making bigtable.Client: %v", err) | |||||
} | |||||
defer client.Close() | |||||
log.Printf("Starting scan test... (run for %v)", *runFor) | |||||
tbl := client.Open(table) | |||||
sem := make(chan int, *numScans) // limit the number of requests happening at once | |||||
var scans stats | |||||
stopTime := time.Now().Add(*runFor) | |||||
var wg sync.WaitGroup | |||||
for time.Now().Before(stopTime) { | |||||
sem <- 1 | |||||
wg.Add(1) | |||||
go func() { | |||||
defer wg.Done() | |||||
defer func() { <-sem }() | |||||
ok := true | |||||
opStart := time.Now() | |||||
defer func() { | |||||
scans.Record(ok, time.Since(opStart)) | |||||
}() | |||||
// Start at a random row key | |||||
key := fmt.Sprintf("user%d", rand.Int63()) | |||||
limit := bigtable.LimitRows(int64(*rowLimit)) | |||||
noop := func(bigtable.Row) bool { return true } | |||||
if err := tbl.ReadRows(context.Background(), bigtable.NewRange(key, ""), noop, limit); err != nil { | |||||
log.Printf("Error during scan: %v", err) | |||||
ok = false | |||||
} | |||||
}() | |||||
} | |||||
wg.Wait() | |||||
agg := stat.NewAggregate("scans", scans.ds, scans.tries-scans.ok) | |||||
log.Printf("Scans (%d ok / %d tries):\nscan times:\n%v\nthroughput (rows/second):\n%v", | |||||
scans.ok, scans.tries, agg, throughputString(agg)) | |||||
} | |||||
func throughputString(agg *stat.Aggregate) string { | |||||
var buf bytes.Buffer | |||||
tw := tabwriter.NewWriter(&buf, 0, 0, 1, ' ', 0) // one-space padding | |||||
rowLimitF := float64(*rowLimit) | |||||
fmt.Fprintf( | |||||
tw, | |||||
"min:\t%.2f\nmedian:\t%.2f\nmax:\t%.2f\n", | |||||
rowLimitF/agg.Max.Seconds(), | |||||
rowLimitF/agg.Median.Seconds(), | |||||
rowLimitF/agg.Min.Seconds()) | |||||
tw.Flush() | |||||
return buf.String() | |||||
} | |||||
var allStats int64 // atomic | |||||
type stats struct { | |||||
mu sync.Mutex | |||||
tries, ok int | |||||
ds []time.Duration | |||||
} | |||||
func (s *stats) Record(ok bool, d time.Duration) { | |||||
s.mu.Lock() | |||||
s.tries++ | |||||
if ok { | |||||
s.ok++ | |||||
} | |||||
s.ds = append(s.ds, d) | |||||
s.mu.Unlock() | |||||
if n := atomic.AddInt64(&allStats, 1); n%1000 == 0 { | |||||
log.Printf("Progress: done %d ops", n) | |||||
} | |||||
} |
@@ -0,0 +1,123 @@ | |||||
/* | |||||
Copyright 2015 Google LLC | |||||
Licensed under the Apache License, Version 2.0 (the "License"); | |||||
you may not use this file except in compliance with the License. | |||||
You may obtain a copy of the License at | |||||
http://www.apache.org/licenses/LICENSE-2.0 | |||||
Unless required by applicable law or agreed to in writing, software | |||||
distributed under the License is distributed on an "AS IS" BASIS, | |||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
See the License for the specific language governing permissions and | |||||
limitations under the License. | |||||
*/ | |||||
/* | |||||
Package bigtable is an API to Google Cloud Bigtable. | |||||
See https://cloud.google.com/bigtable/docs/ for general product documentation. | |||||
See https://godoc.org/cloud.google.com/go for authentication, timeouts, | |||||
connection pooling and similar aspects of this package. | |||||
Setup and Credentials | |||||
Use NewClient or NewAdminClient to create a client that can be used to access | |||||
the data or admin APIs respectively. Both require credentials that have permission | |||||
to access the Cloud Bigtable API. | |||||
If your program is run on Google App Engine or Google Compute Engine, using the Application Default Credentials | |||||
(https://developers.google.com/accounts/docs/application-default-credentials) | |||||
is the simplest option. Those credentials will be used by default when NewClient or NewAdminClient are called. | |||||
To use alternate credentials, pass them to NewClient or NewAdminClient using option.WithTokenSource. | |||||
For instance, you can use service account credentials by visiting | |||||
https://cloud.google.com/console/project/MYPROJECT/apiui/credential, | |||||
creating a new OAuth "Client ID", storing the JSON key somewhere accessible, and writing | |||||
jsonKey, err := ioutil.ReadFile(pathToKeyFile) | |||||
... | |||||
config, err := google.JWTConfigFromJSON(jsonKey, bigtable.Scope) // or bigtable.AdminScope, etc. | |||||
... | |||||
client, err := bigtable.NewClient(ctx, project, instance, option.WithTokenSource(config.TokenSource(ctx))) | |||||
... | |||||
Here, `google` means the golang.org/x/oauth2/google package | |||||
and `option` means the google.golang.org/api/option package. | |||||
Reading | |||||
The principal way to read from a Bigtable is to use the ReadRows method on *Table. | |||||
A RowRange specifies a contiguous portion of a table. A Filter may be provided through | |||||
RowFilter to limit or transform the data that is returned. | |||||
tbl := client.Open("mytable") | |||||
... | |||||
// Read all the rows starting with "com.google.", | |||||
// but only fetch the columns in the "links" family. | |||||
rr := bigtable.PrefixRange("com.google.") | |||||
err := tbl.ReadRows(ctx, rr, func(r Row) bool { | |||||
// do something with r | |||||
return true // keep going | |||||
}, bigtable.RowFilter(bigtable.FamilyFilter("links"))) | |||||
... | |||||
To read a single row, use the ReadRow helper method. | |||||
r, err := tbl.ReadRow(ctx, "com.google.cloud") // "com.google.cloud" is the entire row key | |||||
... | |||||
Writing | |||||
This API exposes two distinct forms of writing to a Bigtable: a Mutation and a ReadModifyWrite. | |||||
The former expresses idempotent operations. | |||||
The latter expresses non-idempotent operations and returns the new values of updated cells. | |||||
These operations are performed by creating a Mutation or ReadModifyWrite (with NewMutation or NewReadModifyWrite), | |||||
building up one or more operations on that, and then using the Apply or ApplyReadModifyWrite | |||||
methods on a Table. | |||||
For instance, to set a couple of cells in a table, | |||||
tbl := client.Open("mytable") | |||||
mut := bigtable.NewMutation() | |||||
mut.Set("links", "maps.google.com", bigtable.Now(), []byte("1")) | |||||
mut.Set("links", "golang.org", bigtable.Now(), []byte("1")) | |||||
err := tbl.Apply(ctx, "com.google.cloud", mut) | |||||
... | |||||
To increment an encoded value in one cell, | |||||
tbl := client.Open("mytable") | |||||
rmw := bigtable.NewReadModifyWrite() | |||||
rmw.Increment("links", "golang.org", 12) // add 12 to the cell in column "links:golang.org" | |||||
r, err := tbl.ApplyReadModifyWrite(ctx, "com.google.cloud", rmw) | |||||
... | |||||
Retries | |||||
If a read or write operation encounters a transient error it will be retried until a successful | |||||
response, an unretryable error or the context deadline is reached. Non-idempotent writes (where | |||||
the timestamp is set to ServerTime) will not be retried. In the case of ReadRows, retried calls | |||||
will not re-scan rows that have already been processed. | |||||
*/ | |||||
package bigtable // import "cloud.google.com/go/bigtable" | |||||
// Scope constants for authentication credentials. | |||||
// These should be used when using credential creation functions such as oauth.NewServiceAccountFromFile. | |||||
const ( | |||||
// Scope is the OAuth scope for Cloud Bigtable data operations. | |||||
Scope = "https://www.googleapis.com/auth/bigtable.data" | |||||
// ReadonlyScope is the OAuth scope for Cloud Bigtable read-only data operations. | |||||
ReadonlyScope = "https://www.googleapis.com/auth/bigtable.readonly" | |||||
// AdminScope is the OAuth scope for Cloud Bigtable table admin operations. | |||||
AdminScope = "https://www.googleapis.com/auth/bigtable.admin.table" | |||||
// InstanceAdminScope is the OAuth scope for Cloud Bigtable instance (and cluster) admin operations. | |||||
InstanceAdminScope = "https://www.googleapis.com/auth/bigtable.admin.cluster" | |||||
) | |||||
// clientUserAgent identifies the version of this package. | |||||
// It should be bumped upon significant changes only. | |||||
const clientUserAgent = "cbt-go/20180601" | |||||
// resourcePrefixHeader is the name of the metadata header used to indicate | |||||
// the resource being operated on. | |||||
const resourcePrefixHeader = "google-cloud-resource-prefix" |
@@ -0,0 +1,222 @@ | |||||
/* | |||||
Copyright 2016 Google LLC | |||||
Licensed under the Apache License, Version 2.0 (the "License"); | |||||
you may not use this file except in compliance with the License. | |||||
You may obtain a copy of the License at | |||||
http://www.apache.org/licenses/LICENSE-2.0 | |||||
Unless required by applicable law or agreed to in writing, software | |||||
distributed under the License is distributed on an "AS IS" BASIS, | |||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
See the License for the specific language governing permissions and | |||||
limitations under the License. | |||||
*/ | |||||
package bigtable | |||||
import ( | |||||
"errors" | |||||
"flag" | |||||
"fmt" | |||||
"strings" | |||||
"time" | |||||
"cloud.google.com/go/bigtable/bttest" | |||||
"golang.org/x/net/context" | |||||
"google.golang.org/api/option" | |||||
"google.golang.org/grpc" | |||||
) | |||||
var legacyUseProd string | |||||
var integrationConfig IntegrationTestConfig | |||||
func init() { | |||||
c := &integrationConfig | |||||
flag.BoolVar(&c.UseProd, "it.use-prod", false, "Use remote bigtable instead of local emulator") | |||||
flag.StringVar(&c.AdminEndpoint, "it.admin-endpoint", "", "Admin api host and port") | |||||
flag.StringVar(&c.DataEndpoint, "it.data-endpoint", "", "Data api host and port") | |||||
flag.StringVar(&c.Project, "it.project", "", "Project to use for integration test") | |||||
flag.StringVar(&c.Instance, "it.instance", "", "Bigtable instance to use") | |||||
flag.StringVar(&c.Cluster, "it.cluster", "", "Bigtable cluster to use") | |||||
flag.StringVar(&c.Table, "it.table", "", "Bigtable table to create") | |||||
// Backwards compat | |||||
flag.StringVar(&legacyUseProd, "use_prod", "", `DEPRECATED: if set to "proj,instance,table", run integration test against production`) | |||||
} | |||||
// IntegrationTestConfig contains parameters to pick and setup a IntegrationEnv for testing | |||||
type IntegrationTestConfig struct { | |||||
UseProd bool | |||||
AdminEndpoint string | |||||
DataEndpoint string | |||||
Project string | |||||
Instance string | |||||
Cluster string | |||||
Table string | |||||
} | |||||
// IntegrationEnv represents a testing environment. | |||||
// The environment can be implemented using production or an emulator | |||||
type IntegrationEnv interface { | |||||
Config() IntegrationTestConfig | |||||
NewAdminClient() (*AdminClient, error) | |||||
// NewInstanceAdminClient will return nil if instance administration is unsupported in this environment | |||||
NewInstanceAdminClient() (*InstanceAdminClient, error) | |||||
NewClient() (*Client, error) | |||||
Close() | |||||
} | |||||
// NewIntegrationEnv creates a new environment based on the command line args | |||||
func NewIntegrationEnv() (IntegrationEnv, error) { | |||||
c := integrationConfig | |||||
if legacyUseProd != "" { | |||||
fmt.Println("WARNING: using legacy commandline arg -use_prod, please switch to -it.*") | |||||
parts := strings.SplitN(legacyUseProd, ",", 3) | |||||
c.UseProd = true | |||||
c.Project = parts[0] | |||||
c.Instance = parts[1] | |||||
c.Table = parts[2] | |||||
} | |||||
if integrationConfig.UseProd { | |||||
return NewProdEnv(c) | |||||
} else { | |||||
return NewEmulatedEnv(c) | |||||
} | |||||
} | |||||
// EmulatedEnv encapsulates the state of an emulator | |||||
type EmulatedEnv struct { | |||||
config IntegrationTestConfig | |||||
server *bttest.Server | |||||
} | |||||
// NewEmulatedEnv builds and starts the emulator based environment | |||||
func NewEmulatedEnv(config IntegrationTestConfig) (*EmulatedEnv, error) { | |||||
srv, err := bttest.NewServer("localhost:0", grpc.MaxRecvMsgSize(200<<20), grpc.MaxSendMsgSize(100<<20)) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
if config.Project == "" { | |||||
config.Project = "project" | |||||
} | |||||
if config.Instance == "" { | |||||
config.Instance = "instance" | |||||
} | |||||
if config.Table == "" { | |||||
config.Table = "mytable" | |||||
} | |||||
config.AdminEndpoint = srv.Addr | |||||
config.DataEndpoint = srv.Addr | |||||
env := &EmulatedEnv{ | |||||
config: config, | |||||
server: srv, | |||||
} | |||||
return env, nil | |||||
} | |||||
// Close stops & cleans up the emulator | |||||
func (e *EmulatedEnv) Close() { | |||||
e.server.Close() | |||||
} | |||||
// Config gets the config used to build this environment | |||||
func (e *EmulatedEnv) Config() IntegrationTestConfig { | |||||
return e.config | |||||
} | |||||
// NewAdminClient builds a new connected admin client for this environment | |||||
func (e *EmulatedEnv) NewAdminClient() (*AdminClient, error) { | |||||
timeout := 20 * time.Second | |||||
ctx, _ := context.WithTimeout(context.Background(), timeout) | |||||
conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure(), grpc.WithBlock()) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return NewAdminClient(ctx, e.config.Project, e.config.Instance, option.WithGRPCConn(conn)) | |||||
} | |||||
// NewInstanceAdminClient returns nil for the emulated environment since the API is not implemented. | |||||
func (e *EmulatedEnv) NewInstanceAdminClient() (*InstanceAdminClient, error) { | |||||
return nil, nil | |||||
} | |||||
// NewClient builds a new connected data client for this environment | |||||
func (e *EmulatedEnv) NewClient() (*Client, error) { | |||||
timeout := 20 * time.Second | |||||
ctx, _ := context.WithTimeout(context.Background(), timeout) | |||||
conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure(), grpc.WithBlock(), | |||||
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(100<<20), grpc.MaxCallRecvMsgSize(100<<20))) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return NewClient(ctx, e.config.Project, e.config.Instance, option.WithGRPCConn(conn)) | |||||
} | |||||
// ProdEnv encapsulates the state necessary to connect to the external Bigtable service | |||||
type ProdEnv struct { | |||||
config IntegrationTestConfig | |||||
} | |||||
// NewProdEnv builds the environment representation | |||||
func NewProdEnv(config IntegrationTestConfig) (*ProdEnv, error) { | |||||
if config.Project == "" { | |||||
return nil, errors.New("Project not set") | |||||
} | |||||
if config.Instance == "" { | |||||
return nil, errors.New("Instance not set") | |||||
} | |||||
if config.Table == "" { | |||||
return nil, errors.New("Table not set") | |||||
} | |||||
return &ProdEnv{config}, nil | |||||
} | |||||
// Close is a no-op for production environments | |||||
func (e *ProdEnv) Close() {} | |||||
// Config gets the config used to build this environment | |||||
func (e *ProdEnv) Config() IntegrationTestConfig { | |||||
return e.config | |||||
} | |||||
// NewAdminClient builds a new connected admin client for this environment | |||||
func (e *ProdEnv) NewAdminClient() (*AdminClient, error) { | |||||
timeout := 20 * time.Second | |||||
ctx, _ := context.WithTimeout(context.Background(), timeout) | |||||
var clientOpts []option.ClientOption | |||||
if endpoint := e.config.AdminEndpoint; endpoint != "" { | |||||
clientOpts = append(clientOpts, option.WithEndpoint(endpoint)) | |||||
} | |||||
return NewAdminClient(ctx, e.config.Project, e.config.Instance, clientOpts...) | |||||
} | |||||
// NewInstanceAdminClient returns a new connected instance admin client for this environment | |||||
func (e *ProdEnv) NewInstanceAdminClient() (*InstanceAdminClient, error) { | |||||
timeout := 20 * time.Second | |||||
ctx, _ := context.WithTimeout(context.Background(), timeout) | |||||
var clientOpts []option.ClientOption | |||||
if endpoint := e.config.AdminEndpoint; endpoint != "" { | |||||
clientOpts = append(clientOpts, option.WithEndpoint(endpoint)) | |||||
} | |||||
return NewInstanceAdminClient(ctx, e.config.Project, clientOpts...) | |||||
} | |||||
// NewClient builds a connected data client for this environment | |||||
func (e *ProdEnv) NewClient() (*Client, error) { | |||||
timeout := 20 * time.Second | |||||
ctx, _ := context.WithTimeout(context.Background(), timeout) | |||||
var clientOpts []option.ClientOption | |||||
if endpoint := e.config.DataEndpoint; endpoint != "" { | |||||
clientOpts = append(clientOpts, option.WithEndpoint(endpoint)) | |||||
} | |||||
return NewClient(ctx, e.config.Project, e.config.Instance, clientOpts...) | |||||
} |
@@ -0,0 +1,317 @@ | |||||
/* | |||||
Copyright 2015 Google LLC | |||||
Licensed under the Apache License, Version 2.0 (the "License"); | |||||
you may not use this file except in compliance with the License. | |||||
You may obtain a copy of the License at | |||||
http://www.apache.org/licenses/LICENSE-2.0 | |||||
Unless required by applicable law or agreed to in writing, software | |||||
distributed under the License is distributed on an "AS IS" BASIS, | |||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
See the License for the specific language governing permissions and | |||||
limitations under the License. | |||||
*/ | |||||
package bigtable | |||||
import ( | |||||
"fmt" | |||||
"strings" | |||||
"time" | |||||
btpb "google.golang.org/genproto/googleapis/bigtable/v2" | |||||
) | |||||
// A Filter represents a row filter. | |||||
type Filter interface { | |||||
String() string | |||||
proto() *btpb.RowFilter | |||||
} | |||||
// ChainFilters returns a filter that applies a sequence of filters. | |||||
func ChainFilters(sub ...Filter) Filter { return chainFilter{sub} } | |||||
type chainFilter struct { | |||||
sub []Filter | |||||
} | |||||
func (cf chainFilter) String() string { | |||||
var ss []string | |||||
for _, sf := range cf.sub { | |||||
ss = append(ss, sf.String()) | |||||
} | |||||
return "(" + strings.Join(ss, " | ") + ")" | |||||
} | |||||
func (cf chainFilter) proto() *btpb.RowFilter { | |||||
chain := &btpb.RowFilter_Chain{} | |||||
for _, sf := range cf.sub { | |||||
chain.Filters = append(chain.Filters, sf.proto()) | |||||
} | |||||
return &btpb.RowFilter{ | |||||
Filter: &btpb.RowFilter_Chain_{Chain: chain}, | |||||
} | |||||
} | |||||
// InterleaveFilters returns a filter that applies a set of filters in parallel | |||||
// and interleaves the results. | |||||
func InterleaveFilters(sub ...Filter) Filter { return interleaveFilter{sub} } | |||||
type interleaveFilter struct { | |||||
sub []Filter | |||||
} | |||||
func (ilf interleaveFilter) String() string { | |||||
var ss []string | |||||
for _, sf := range ilf.sub { | |||||
ss = append(ss, sf.String()) | |||||
} | |||||
return "(" + strings.Join(ss, " + ") + ")" | |||||
} | |||||
func (ilf interleaveFilter) proto() *btpb.RowFilter { | |||||
inter := &btpb.RowFilter_Interleave{} | |||||
for _, sf := range ilf.sub { | |||||
inter.Filters = append(inter.Filters, sf.proto()) | |||||
} | |||||
return &btpb.RowFilter{ | |||||
Filter: &btpb.RowFilter_Interleave_{Interleave: inter}, | |||||
} | |||||
} | |||||
// RowKeyFilter returns a filter that matches cells from rows whose | |||||
// key matches the provided RE2 pattern. | |||||
// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. | |||||
func RowKeyFilter(pattern string) Filter { return rowKeyFilter(pattern) } | |||||
type rowKeyFilter string | |||||
func (rkf rowKeyFilter) String() string { return fmt.Sprintf("row(%s)", string(rkf)) } | |||||
func (rkf rowKeyFilter) proto() *btpb.RowFilter { | |||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_RowKeyRegexFilter{RowKeyRegexFilter: []byte(rkf)}} | |||||
} | |||||
// FamilyFilter returns a filter that matches cells whose family name | |||||
// matches the provided RE2 pattern. | |||||
// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. | |||||
func FamilyFilter(pattern string) Filter { return familyFilter(pattern) } | |||||
type familyFilter string | |||||
func (ff familyFilter) String() string { return fmt.Sprintf("col(%s:)", string(ff)) } | |||||
func (ff familyFilter) proto() *btpb.RowFilter { | |||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{FamilyNameRegexFilter: string(ff)}} | |||||
} | |||||
// ColumnFilter returns a filter that matches cells whose column name | |||||
// matches the provided RE2 pattern. | |||||
// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. | |||||
func ColumnFilter(pattern string) Filter { return columnFilter(pattern) } | |||||
type columnFilter string | |||||
func (cf columnFilter) String() string { return fmt.Sprintf("col(.*:%s)", string(cf)) } | |||||
func (cf columnFilter) proto() *btpb.RowFilter { | |||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{ColumnQualifierRegexFilter: []byte(cf)}} | |||||
} | |||||
// ValueFilter returns a filter that matches cells whose value | |||||
// matches the provided RE2 pattern. | |||||
// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. | |||||
func ValueFilter(pattern string) Filter { return valueFilter(pattern) } | |||||
type valueFilter string | |||||
func (vf valueFilter) String() string { return fmt.Sprintf("value_match(%s)", string(vf)) } | |||||
func (vf valueFilter) proto() *btpb.RowFilter { | |||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_ValueRegexFilter{ValueRegexFilter: []byte(vf)}} | |||||
} | |||||
// LatestNFilter returns a filter that matches the most recent N cells in each column. | |||||
func LatestNFilter(n int) Filter { return latestNFilter(n) } | |||||
type latestNFilter int32 | |||||
func (lnf latestNFilter) String() string { return fmt.Sprintf("col(*,%d)", lnf) } | |||||
func (lnf latestNFilter) proto() *btpb.RowFilter { | |||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerColumnLimitFilter{CellsPerColumnLimitFilter: int32(lnf)}} | |||||
} | |||||
// StripValueFilter returns a filter that replaces each value with the empty string. | |||||
func StripValueFilter() Filter { return stripValueFilter{} } | |||||
type stripValueFilter struct{} | |||||
func (stripValueFilter) String() string { return "strip_value()" } | |||||
func (stripValueFilter) proto() *btpb.RowFilter { | |||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_StripValueTransformer{StripValueTransformer: true}} | |||||
} | |||||
// TimestampRangeFilter returns a filter that matches any cells whose timestamp is within the given time bounds. A zero | |||||
// time means no bound. | |||||
// The timestamp will be truncated to millisecond granularity. | |||||
func TimestampRangeFilter(startTime time.Time, endTime time.Time) Filter { | |||||
trf := timestampRangeFilter{} | |||||
if !startTime.IsZero() { | |||||
trf.startTime = Time(startTime) | |||||
} | |||||
if !endTime.IsZero() { | |||||
trf.endTime = Time(endTime) | |||||
} | |||||
return trf | |||||
} | |||||
// TimestampRangeFilterMicros returns a filter that matches any cells whose timestamp is within the given time bounds, | |||||
// specified in units of microseconds since 1 January 1970. A zero value for the end time is interpreted as no bound. | |||||
// The timestamp will be truncated to millisecond granularity. | |||||
func TimestampRangeFilterMicros(startTime Timestamp, endTime Timestamp) Filter { | |||||
return timestampRangeFilter{startTime, endTime} | |||||
} | |||||
type timestampRangeFilter struct { | |||||
startTime Timestamp | |||||
endTime Timestamp | |||||
} | |||||
func (trf timestampRangeFilter) String() string { | |||||
return fmt.Sprintf("timestamp_range(%v,%v)", trf.startTime, trf.endTime) | |||||
} | |||||
func (trf timestampRangeFilter) proto() *btpb.RowFilter { | |||||
return &btpb.RowFilter{ | |||||
Filter: &btpb.RowFilter_TimestampRangeFilter{TimestampRangeFilter: &btpb.TimestampRange{ | |||||
StartTimestampMicros: int64(trf.startTime.TruncateToMilliseconds()), | |||||
EndTimestampMicros: int64(trf.endTime.TruncateToMilliseconds()), | |||||
}, | |||||
}} | |||||
} | |||||
// ColumnRangeFilter returns a filter that matches a contiguous range of columns within a single | |||||
// family, as specified by an inclusive start qualifier and exclusive end qualifier. | |||||
func ColumnRangeFilter(family, start, end string) Filter { | |||||
return columnRangeFilter{family, start, end} | |||||
} | |||||
type columnRangeFilter struct { | |||||
family string | |||||
start string | |||||
end string | |||||
} | |||||
func (crf columnRangeFilter) String() string { | |||||
return fmt.Sprintf("columnRangeFilter(%s,%s,%s)", crf.family, crf.start, crf.end) | |||||
} | |||||
func (crf columnRangeFilter) proto() *btpb.RowFilter { | |||||
r := &btpb.ColumnRange{FamilyName: crf.family} | |||||
if crf.start != "" { | |||||
r.StartQualifier = &btpb.ColumnRange_StartQualifierClosed{StartQualifierClosed: []byte(crf.start)} | |||||
} | |||||
if crf.end != "" { | |||||
r.EndQualifier = &btpb.ColumnRange_EndQualifierOpen{EndQualifierOpen: []byte(crf.end)} | |||||
} | |||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnRangeFilter{ColumnRangeFilter: r}} | |||||
} | |||||
// ValueRangeFilter returns a filter that matches cells with values that fall within | |||||
// the given range, as specified by an inclusive start value and exclusive end value. | |||||
func ValueRangeFilter(start, end []byte) Filter { | |||||
return valueRangeFilter{start, end} | |||||
} | |||||
type valueRangeFilter struct { | |||||
start []byte | |||||
end []byte | |||||
} | |||||
func (vrf valueRangeFilter) String() string { | |||||
return fmt.Sprintf("valueRangeFilter(%s,%s)", vrf.start, vrf.end) | |||||
} | |||||
func (vrf valueRangeFilter) proto() *btpb.RowFilter { | |||||
r := &btpb.ValueRange{} | |||||
if vrf.start != nil { | |||||
r.StartValue = &btpb.ValueRange_StartValueClosed{StartValueClosed: vrf.start} | |||||
} | |||||
if vrf.end != nil { | |||||
r.EndValue = &btpb.ValueRange_EndValueOpen{EndValueOpen: vrf.end} | |||||
} | |||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_ValueRangeFilter{ValueRangeFilter: r}} | |||||
} | |||||
// ConditionFilter returns a filter that evaluates to one of two possible filters depending | |||||
// on whether or not the given predicate filter matches at least one cell. | |||||
// If the matched filter is nil then no results will be returned. | |||||
// IMPORTANT NOTE: The predicate filter does not execute atomically with the | |||||
// true and false filters, which may lead to inconsistent or unexpected | |||||
// results. Additionally, condition filters have poor performance, especially | |||||
// when filters are set for the false condition. | |||||
func ConditionFilter(predicateFilter, trueFilter, falseFilter Filter) Filter { | |||||
return conditionFilter{predicateFilter, trueFilter, falseFilter} | |||||
} | |||||
type conditionFilter struct { | |||||
predicateFilter Filter | |||||
trueFilter Filter | |||||
falseFilter Filter | |||||
} | |||||
func (cf conditionFilter) String() string { | |||||
return fmt.Sprintf("conditionFilter(%s,%s,%s)", cf.predicateFilter, cf.trueFilter, cf.falseFilter) | |||||
} | |||||
func (cf conditionFilter) proto() *btpb.RowFilter { | |||||
var tf *btpb.RowFilter | |||||
var ff *btpb.RowFilter | |||||
if cf.trueFilter != nil { | |||||
tf = cf.trueFilter.proto() | |||||
} | |||||
if cf.falseFilter != nil { | |||||
ff = cf.falseFilter.proto() | |||||
} | |||||
return &btpb.RowFilter{ | |||||
Filter: &btpb.RowFilter_Condition_{Condition: &btpb.RowFilter_Condition{ | |||||
PredicateFilter: cf.predicateFilter.proto(), | |||||
TrueFilter: tf, | |||||
FalseFilter: ff, | |||||
}}} | |||||
} | |||||
// CellsPerRowOffsetFilter returns a filter that skips the first N cells of each row, matching all subsequent cells. | |||||
func CellsPerRowOffsetFilter(n int) Filter { | |||||
return cellsPerRowOffsetFilter(n) | |||||
} | |||||
type cellsPerRowOffsetFilter int32 | |||||
func (cof cellsPerRowOffsetFilter) String() string { | |||||
return fmt.Sprintf("cells_per_row_offset(%d)", cof) | |||||
} | |||||
func (cof cellsPerRowOffsetFilter) proto() *btpb.RowFilter { | |||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerRowOffsetFilter{CellsPerRowOffsetFilter: int32(cof)}} | |||||
} | |||||
// CellsPerRowLimitFilter returns a filter that matches only the first N cells of each row. | |||||
func CellsPerRowLimitFilter(n int) Filter { | |||||
return cellsPerRowLimitFilter(n) | |||||
} | |||||
type cellsPerRowLimitFilter int32 | |||||
func (clf cellsPerRowLimitFilter) String() string { | |||||
return fmt.Sprintf("cells_per_row_limit(%d)", clf) | |||||
} | |||||
func (clf cellsPerRowLimitFilter) proto() *btpb.RowFilter { | |||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerRowLimitFilter{CellsPerRowLimitFilter: int32(clf)}} | |||||
} | |||||
// TODO(dsymonds): More filters: sampling |
@@ -0,0 +1,158 @@ | |||||
/* | |||||
Copyright 2015 Google LLC | |||||
Licensed under the Apache License, Version 2.0 (the "License"); | |||||
you may not use this file except in compliance with the License. | |||||
You may obtain a copy of the License at | |||||
http://www.apache.org/licenses/LICENSE-2.0 | |||||
Unless required by applicable law or agreed to in writing, software | |||||
distributed under the License is distributed on an "AS IS" BASIS, | |||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
See the License for the specific language governing permissions and | |||||
limitations under the License. | |||||
*/ | |||||
package bigtable | |||||
import ( | |||||
"fmt" | |||||
"strings" | |||||
"time" | |||||
durpb "github.com/golang/protobuf/ptypes/duration" | |||||
bttdpb "google.golang.org/genproto/googleapis/bigtable/admin/v2" | |||||
) | |||||
// A GCPolicy represents a rule that determines which cells are eligible for garbage collection. | |||||
type GCPolicy interface { | |||||
String() string | |||||
proto() *bttdpb.GcRule | |||||
} | |||||
// IntersectionPolicy returns a GC policy that only applies when all its sub-policies apply. | |||||
func IntersectionPolicy(sub ...GCPolicy) GCPolicy { return intersectionPolicy{sub} } | |||||
type intersectionPolicy struct { | |||||
sub []GCPolicy | |||||
} | |||||
func (ip intersectionPolicy) String() string { | |||||
var ss []string | |||||
for _, sp := range ip.sub { | |||||
ss = append(ss, sp.String()) | |||||
} | |||||
return "(" + strings.Join(ss, " && ") + ")" | |||||
} | |||||
func (ip intersectionPolicy) proto() *bttdpb.GcRule { | |||||
inter := &bttdpb.GcRule_Intersection{} | |||||
for _, sp := range ip.sub { | |||||
inter.Rules = append(inter.Rules, sp.proto()) | |||||
} | |||||
return &bttdpb.GcRule{ | |||||
Rule: &bttdpb.GcRule_Intersection_{Intersection: inter}, | |||||
} | |||||
} | |||||
// UnionPolicy returns a GC policy that applies when any of its sub-policies apply. | |||||
func UnionPolicy(sub ...GCPolicy) GCPolicy { return unionPolicy{sub} } | |||||
type unionPolicy struct { | |||||
sub []GCPolicy | |||||
} | |||||
func (up unionPolicy) String() string { | |||||
var ss []string | |||||
for _, sp := range up.sub { | |||||
ss = append(ss, sp.String()) | |||||
} | |||||
return "(" + strings.Join(ss, " || ") + ")" | |||||
} | |||||
func (up unionPolicy) proto() *bttdpb.GcRule { | |||||
union := &bttdpb.GcRule_Union{} | |||||
for _, sp := range up.sub { | |||||
union.Rules = append(union.Rules, sp.proto()) | |||||
} | |||||
return &bttdpb.GcRule{ | |||||
Rule: &bttdpb.GcRule_Union_{Union: union}, | |||||
} | |||||
} | |||||
// MaxVersionsPolicy returns a GC policy that applies to all versions of a cell | |||||
// except for the most recent n. | |||||
func MaxVersionsPolicy(n int) GCPolicy { return maxVersionsPolicy(n) } | |||||
type maxVersionsPolicy int | |||||
func (mvp maxVersionsPolicy) String() string { return fmt.Sprintf("versions() > %d", int(mvp)) } | |||||
func (mvp maxVersionsPolicy) proto() *bttdpb.GcRule { | |||||
return &bttdpb.GcRule{Rule: &bttdpb.GcRule_MaxNumVersions{MaxNumVersions: int32(mvp)}} | |||||
} | |||||
// MaxAgePolicy returns a GC policy that applies to all cells | |||||
// older than the given age. | |||||
func MaxAgePolicy(d time.Duration) GCPolicy { return maxAgePolicy(d) } | |||||
type maxAgePolicy time.Duration | |||||
var units = []struct { | |||||
d time.Duration | |||||
suffix string | |||||
}{ | |||||
{24 * time.Hour, "d"}, | |||||
{time.Hour, "h"}, | |||||
{time.Minute, "m"}, | |||||
} | |||||
func (ma maxAgePolicy) String() string { | |||||
d := time.Duration(ma) | |||||
for _, u := range units { | |||||
if d%u.d == 0 { | |||||
return fmt.Sprintf("age() > %d%s", d/u.d, u.suffix) | |||||
} | |||||
} | |||||
return fmt.Sprintf("age() > %d", d/time.Microsecond) | |||||
} | |||||
func (ma maxAgePolicy) proto() *bttdpb.GcRule { | |||||
// This doesn't handle overflows, etc. | |||||
// Fix this if people care about GC policies over 290 years. | |||||
ns := time.Duration(ma).Nanoseconds() | |||||
return &bttdpb.GcRule{ | |||||
Rule: &bttdpb.GcRule_MaxAge{MaxAge: &durpb.Duration{ | |||||
Seconds: ns / 1e9, | |||||
Nanos: int32(ns % 1e9), | |||||
}}, | |||||
} | |||||
} | |||||
// GCRuleToString converts the given GcRule proto to a user-visible string. | |||||
func GCRuleToString(rule *bttdpb.GcRule) string { | |||||
if rule == nil { | |||||
return "<default>" | |||||
} | |||||
switch r := rule.Rule.(type) { | |||||
case *bttdpb.GcRule_MaxNumVersions: | |||||
return MaxVersionsPolicy(int(r.MaxNumVersions)).String() | |||||
case *bttdpb.GcRule_MaxAge: | |||||
return MaxAgePolicy(time.Duration(r.MaxAge.Seconds) * time.Second).String() | |||||
case *bttdpb.GcRule_Intersection_: | |||||
return joinRules(r.Intersection.Rules, " && ") | |||||
case *bttdpb.GcRule_Union_: | |||||
return joinRules(r.Union.Rules, " || ") | |||||
default: | |||||
return "" | |||||
} | |||||
} | |||||
func joinRules(rules []*bttdpb.GcRule, sep string) string { | |||||
var chunks []string | |||||
for _, r := range rules { | |||||
chunks = append(chunks, GCRuleToString(r)) | |||||
} | |||||
return "(" + strings.Join(chunks, sep) + ")" | |||||
} |
@@ -0,0 +1,46 @@ | |||||
/* | |||||
Copyright 2017 Google LLC | |||||
Licensed under the Apache License, Version 2.0 (the "License"); | |||||
you may not use this file except in compliance with the License. | |||||
You may obtain a copy of the License at | |||||
http://www.apache.org/licenses/LICENSE-2.0 | |||||
Unless required by applicable law or agreed to in writing, software | |||||
distributed under the License is distributed on an "AS IS" BASIS, | |||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
See the License for the specific language governing permissions and | |||||
limitations under the License. | |||||
*/ | |||||
package bigtable | |||||
import ( | |||||
"testing" | |||||
"time" | |||||
bttdpb "google.golang.org/genproto/googleapis/bigtable/admin/v2" | |||||
) | |||||
func TestGcRuleToString(t *testing.T) { | |||||
intersection := IntersectionPolicy(MaxVersionsPolicy(5), MaxVersionsPolicy(10), MaxAgePolicy(16*time.Hour)) | |||||
var tests = []struct { | |||||
proto *bttdpb.GcRule | |||||
want string | |||||
}{ | |||||
{MaxAgePolicy(72 * time.Hour).proto(), "age() > 3d"}, | |||||
{MaxVersionsPolicy(5).proto(), "versions() > 5"}, | |||||
{intersection.proto(), "(versions() > 5 && versions() > 10 && age() > 16h)"}, | |||||
{UnionPolicy(intersection, MaxAgePolicy(72*time.Hour)).proto(), | |||||
"((versions() > 5 && versions() > 10 && age() > 16h) || age() > 3d)"}, | |||||
} | |||||
for _, test := range tests { | |||||
got := GCRuleToString(test.proto) | |||||
if got != test.want { | |||||
t.Errorf("got gc rule string: %v, wanted: %v", got, test.want) | |||||
} | |||||
} | |||||
} |
@@ -0,0 +1,68 @@ | |||||
// Copyright 2018 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
// +build go1.8 | |||||
package bigtable | |||||
import ( | |||||
"fmt" | |||||
"go.opencensus.io/plugin/ocgrpc" | |||||
"go.opencensus.io/trace" | |||||
"golang.org/x/net/context" | |||||
"google.golang.org/api/option" | |||||
"google.golang.org/grpc" | |||||
) | |||||
func openCensusOptions() []option.ClientOption { | |||||
return []option.ClientOption{ | |||||
option.WithGRPCDialOption(grpc.WithStatsHandler(&ocgrpc.ClientHandler{})), | |||||
} | |||||
} | |||||
func traceStartSpan(ctx context.Context, name string) context.Context { | |||||
ctx, _ = trace.StartSpan(ctx, name) | |||||
return ctx | |||||
} | |||||
func traceEndSpan(ctx context.Context, err error) { | |||||
span := trace.FromContext(ctx) | |||||
if err != nil { | |||||
span.SetStatus(trace.Status{Message: err.Error()}) | |||||
} | |||||
span.End() | |||||
} | |||||
func tracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) { | |||||
var attrs []trace.Attribute | |||||
for k, v := range attrMap { | |||||
var a trace.Attribute | |||||
switch v := v.(type) { | |||||
case string: | |||||
a = trace.StringAttribute(k, v) | |||||
case bool: | |||||
a = trace.BoolAttribute(k, v) | |||||
case int: | |||||
a = trace.Int64Attribute(k, int64(v)) | |||||
case int64: | |||||
a = trace.Int64Attribute(k, v) | |||||
default: | |||||
a = trace.StringAttribute(k, fmt.Sprintf("%#v", v)) | |||||
} | |||||
attrs = append(attrs, a) | |||||
} | |||||
trace.FromContext(ctx).Annotatef(attrs, format, args...) | |||||
} |
@@ -0,0 +1,252 @@ | |||||
/* | |||||
Copyright 2015 Google LLC | |||||
Licensed under the Apache License, Version 2.0 (the "License"); | |||||
you may not use this file except in compliance with the License. | |||||
You may obtain a copy of the License at | |||||
http://www.apache.org/licenses/LICENSE-2.0 | |||||
Unless required by applicable law or agreed to in writing, software | |||||
distributed under the License is distributed on an "AS IS" BASIS, | |||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
See the License for the specific language governing permissions and | |||||
limitations under the License. | |||||
*/ | |||||
// Package cbtconfig encapsulates common code for reading configuration from .cbtrc and gcloud. | |||||
package cbtconfig | |||||
import ( | |||||
"bufio" | |||||
"bytes" | |||||
"crypto/tls" | |||||
"crypto/x509" | |||||
"encoding/json" | |||||
"flag" | |||||
"fmt" | |||||
"io/ioutil" | |||||
"log" | |||||
"os" | |||||
"os/exec" | |||||
"path/filepath" | |||||
"runtime" | |||||
"strings" | |||||
"time" | |||||
"golang.org/x/oauth2" | |||||
"google.golang.org/grpc/credentials" | |||||
) | |||||
// Config represents a configuration. | |||||
type Config struct { | |||||
Project, Instance string // required | |||||
Creds string // optional | |||||
AdminEndpoint string // optional | |||||
DataEndpoint string // optional | |||||
CertFile string // optional | |||||
UserAgent string // optional | |||||
TokenSource oauth2.TokenSource // derived | |||||
TLSCreds credentials.TransportCredentials // derived | |||||
} | |||||
type RequiredFlags uint | |||||
const NoneRequired RequiredFlags = 0 | |||||
const ( | |||||
ProjectRequired RequiredFlags = 1 << iota | |||||
InstanceRequired | |||||
) | |||||
const ProjectAndInstanceRequired RequiredFlags = ProjectRequired | InstanceRequired | |||||
// RegisterFlags registers a set of standard flags for this config. | |||||
// It should be called before flag.Parse. | |||||
func (c *Config) RegisterFlags() { | |||||
flag.StringVar(&c.Project, "project", c.Project, "project ID, if unset uses gcloud configured project") | |||||
flag.StringVar(&c.Instance, "instance", c.Instance, "Cloud Bigtable instance") | |||||
flag.StringVar(&c.Creds, "creds", c.Creds, "if set, use application credentials in this file") | |||||
flag.StringVar(&c.AdminEndpoint, "admin-endpoint", c.AdminEndpoint, "Override the admin api endpoint") | |||||
flag.StringVar(&c.DataEndpoint, "data-endpoint", c.DataEndpoint, "Override the data api endpoint") | |||||
flag.StringVar(&c.CertFile, "cert-file", c.CertFile, "Override the TLS certificates file") | |||||
flag.StringVar(&c.UserAgent, "user-agent", c.UserAgent, "Override the user agent string") | |||||
} | |||||
// CheckFlags checks that the required config values are set. | |||||
func (c *Config) CheckFlags(required RequiredFlags) error { | |||||
var missing []string | |||||
if c.CertFile != "" { | |||||
b, err := ioutil.ReadFile(c.CertFile) | |||||
if err != nil { | |||||
return fmt.Errorf("Failed to load certificates from %s: %v", c.CertFile, err) | |||||
} | |||||
cp := x509.NewCertPool() | |||||
if !cp.AppendCertsFromPEM(b) { | |||||
return fmt.Errorf("Failed to append certificates from %s", c.CertFile) | |||||
} | |||||
c.TLSCreds = credentials.NewTLS(&tls.Config{RootCAs: cp}) | |||||
} | |||||
if required != NoneRequired { | |||||
c.SetFromGcloud() | |||||
} | |||||
if required&ProjectRequired != 0 && c.Project == "" { | |||||
missing = append(missing, "-project") | |||||
} | |||||
if required&InstanceRequired != 0 && c.Instance == "" { | |||||
missing = append(missing, "-instance") | |||||
} | |||||
if len(missing) > 0 { | |||||
return fmt.Errorf("Missing %s", strings.Join(missing, " and ")) | |||||
} | |||||
return nil | |||||
} | |||||
// Filename returns the filename consulted for standard configuration. | |||||
func Filename() string { | |||||
// TODO(dsymonds): Might need tweaking for Windows. | |||||
return filepath.Join(os.Getenv("HOME"), ".cbtrc") | |||||
} | |||||
// Load loads a .cbtrc file. | |||||
// If the file is not present, an empty config is returned. | |||||
func Load() (*Config, error) { | |||||
filename := Filename() | |||||
data, err := ioutil.ReadFile(filename) | |||||
if err != nil { | |||||
// silent fail if the file isn't there | |||||
if os.IsNotExist(err) { | |||||
return &Config{}, nil | |||||
} | |||||
return nil, fmt.Errorf("Reading %s: %v", filename, err) | |||||
} | |||||
c := new(Config) | |||||
s := bufio.NewScanner(bytes.NewReader(data)) | |||||
for s.Scan() { | |||||
line := s.Text() | |||||
i := strings.Index(line, "=") | |||||
if i < 0 { | |||||
return nil, fmt.Errorf("Bad line in %s: %q", filename, line) | |||||
} | |||||
key, val := strings.TrimSpace(line[:i]), strings.TrimSpace(line[i+1:]) | |||||
switch key { | |||||
default: | |||||
return nil, fmt.Errorf("Unknown key in %s: %q", filename, key) | |||||
case "project": | |||||
c.Project = val | |||||
case "instance": | |||||
c.Instance = val | |||||
case "creds": | |||||
c.Creds = val | |||||
case "admin-endpoint": | |||||
c.AdminEndpoint = val | |||||
case "data-endpoint": | |||||
c.DataEndpoint = val | |||||
case "cert-file": | |||||
c.CertFile = val | |||||
case "user-agent": | |||||
c.UserAgent = val | |||||
} | |||||
} | |||||
return c, s.Err() | |||||
} | |||||
type GcloudCredential struct { | |||||
AccessToken string `json:"access_token"` | |||||
Expiry time.Time `json:"token_expiry"` | |||||
} | |||||
func (cred *GcloudCredential) Token() *oauth2.Token { | |||||
return &oauth2.Token{AccessToken: cred.AccessToken, TokenType: "Bearer", Expiry: cred.Expiry} | |||||
} | |||||
type GcloudConfig struct { | |||||
Configuration struct { | |||||
Properties struct { | |||||
Core struct { | |||||
Project string `json:"project"` | |||||
} `json:"core"` | |||||
} `json:"properties"` | |||||
} `json:"configuration"` | |||||
Credential GcloudCredential `json:"credential"` | |||||
} | |||||
type GcloudCmdTokenSource struct { | |||||
Command string | |||||
Args []string | |||||
} | |||||
// Token implements the oauth2.TokenSource interface | |||||
func (g *GcloudCmdTokenSource) Token() (*oauth2.Token, error) { | |||||
gcloudConfig, err := LoadGcloudConfig(g.Command, g.Args) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return gcloudConfig.Credential.Token(), nil | |||||
} | |||||
// LoadGcloudConfig retrieves the gcloud configuration values we need use via the | |||||
// 'config-helper' command | |||||
func LoadGcloudConfig(gcloudCmd string, gcloudCmdArgs []string) (*GcloudConfig, error) { | |||||
out, err := exec.Command(gcloudCmd, gcloudCmdArgs...).Output() | |||||
if err != nil { | |||||
return nil, fmt.Errorf("Could not retrieve gcloud configuration") | |||||
} | |||||
var gcloudConfig GcloudConfig | |||||
if err := json.Unmarshal(out, &gcloudConfig); err != nil { | |||||
return nil, fmt.Errorf("Could not parse gcloud configuration") | |||||
} | |||||
return &gcloudConfig, nil | |||||
} | |||||
// SetFromGcloud retrieves and sets any missing config values from the gcloud | |||||
// configuration if possible possible | |||||
func (c *Config) SetFromGcloud() error { | |||||
if c.Creds == "" { | |||||
c.Creds = os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") | |||||
if c.Creds == "" { | |||||
log.Printf("-creds flag unset, will use gcloud credential") | |||||
} | |||||
} else { | |||||
os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", c.Creds) | |||||
} | |||||
if c.Project == "" { | |||||
log.Printf("-project flag unset, will use gcloud active project") | |||||
} | |||||
if c.Creds != "" && c.Project != "" { | |||||
return nil | |||||
} | |||||
gcloudCmd := "gcloud" | |||||
if runtime.GOOS == "windows" { | |||||
gcloudCmd = gcloudCmd + ".cmd" | |||||
} | |||||
gcloudCmdArgs := []string{"config", "config-helper", | |||||
"--format=json(configuration.properties.core.project,credential)"} | |||||
gcloudConfig, err := LoadGcloudConfig(gcloudCmd, gcloudCmdArgs) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
if c.Project == "" && gcloudConfig.Configuration.Properties.Core.Project != "" { | |||||
log.Printf("gcloud active project is \"%s\"", | |||||
gcloudConfig.Configuration.Properties.Core.Project) | |||||
c.Project = gcloudConfig.Configuration.Properties.Core.Project | |||||
} | |||||
if c.Creds == "" { | |||||
c.TokenSource = oauth2.ReuseTokenSource( | |||||
gcloudConfig.Credential.Token(), | |||||
&GcloudCmdTokenSource{Command: gcloudCmd, Args: gcloudCmdArgs}) | |||||
} | |||||
return nil | |||||
} |
@@ -0,0 +1,106 @@ | |||||
/* | |||||
Copyright 2016 Google LLC | |||||
Licensed under the Apache License, Version 2.0 (the "License"); | |||||
you may not use this file except in compliance with the License. | |||||
You may obtain a copy of the License at | |||||
http://www.apache.org/licenses/LICENSE-2.0 | |||||
Unless required by applicable law or agreed to in writing, software | |||||
distributed under the License is distributed on an "AS IS" BASIS, | |||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
See the License for the specific language governing permissions and | |||||
limitations under the License. | |||||
*/ | |||||
// This is ia snapshot from github.com/googleapis/gax-go with minor modifications. | |||||
package gax | |||||
import ( | |||||
"time" | |||||
"google.golang.org/grpc/codes" | |||||
) | |||||
type CallOption interface { | |||||
Resolve(*CallSettings) | |||||
} | |||||
type callOptions []CallOption | |||||
func (opts callOptions) Resolve(s *CallSettings) *CallSettings { | |||||
for _, opt := range opts { | |||||
opt.Resolve(s) | |||||
} | |||||
return s | |||||
} | |||||
// Encapsulates the call settings for a particular API call. | |||||
type CallSettings struct { | |||||
Timeout time.Duration | |||||
RetrySettings RetrySettings | |||||
} | |||||
// Per-call configurable settings for retrying upon transient failure. | |||||
type RetrySettings struct { | |||||
RetryCodes map[codes.Code]bool | |||||
BackoffSettings BackoffSettings | |||||
} | |||||
// Parameters to the exponential backoff algorithm for retrying. | |||||
type BackoffSettings struct { | |||||
DelayTimeoutSettings MultipliableDuration | |||||
RPCTimeoutSettings MultipliableDuration | |||||
} | |||||
type MultipliableDuration struct { | |||||
Initial time.Duration | |||||
Max time.Duration | |||||
Multiplier float64 | |||||
} | |||||
func (w CallSettings) Resolve(s *CallSettings) { | |||||
s.Timeout = w.Timeout | |||||
s.RetrySettings = w.RetrySettings | |||||
s.RetrySettings.RetryCodes = make(map[codes.Code]bool, len(w.RetrySettings.RetryCodes)) | |||||
for key, value := range w.RetrySettings.RetryCodes { | |||||
s.RetrySettings.RetryCodes[key] = value | |||||
} | |||||
} | |||||
type withRetryCodes []codes.Code | |||||
func (w withRetryCodes) Resolve(s *CallSettings) { | |||||
s.RetrySettings.RetryCodes = make(map[codes.Code]bool) | |||||
for _, code := range w { | |||||
s.RetrySettings.RetryCodes[code] = true | |||||
} | |||||
} | |||||
// WithRetryCodes sets a list of Google API canonical error codes upon which a | |||||
// retry should be attempted. | |||||
func WithRetryCodes(retryCodes []codes.Code) CallOption { | |||||
return withRetryCodes(retryCodes) | |||||
} | |||||
type withDelayTimeoutSettings MultipliableDuration | |||||
func (w withDelayTimeoutSettings) Resolve(s *CallSettings) { | |||||
s.RetrySettings.BackoffSettings.DelayTimeoutSettings = MultipliableDuration(w) | |||||
} | |||||
// WithDelayTimeoutSettings specifies: | |||||
// - The initial delay time, in milliseconds, between the completion of | |||||
// the first failed request and the initiation of the first retrying | |||||
// request. | |||||
// - The multiplier by which to increase the delay time between the | |||||
// completion of failed requests, and the initiation of the subsequent | |||||
// retrying request. | |||||
// - The maximum delay time, in milliseconds, between requests. When this | |||||
// value is reached, `RetryDelayMultiplier` will no longer be used to | |||||
// increase delay time. | |||||
func WithDelayTimeoutSettings(initial time.Duration, max time.Duration, multiplier float64) CallOption { | |||||
return withDelayTimeoutSettings(MultipliableDuration{initial, max, multiplier}) | |||||
} |
@@ -0,0 +1,87 @@ | |||||
/* | |||||
Copyright 2015 Google LLC | |||||
Licensed under the Apache License, Version 2.0 (the "License"); | |||||
you may not use this file except in compliance with the License. | |||||
You may obtain a copy of the License at | |||||
http://www.apache.org/licenses/LICENSE-2.0 | |||||
Unless required by applicable law or agreed to in writing, software | |||||
distributed under the License is distributed on an "AS IS" BASIS, | |||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
See the License for the specific language governing permissions and | |||||
limitations under the License. | |||||
*/ | |||||
// This is ia snapshot from github.com/googleapis/gax-go with minor modifications. | |||||
package gax | |||||
import ( | |||||
"math/rand" | |||||
"time" | |||||
"log" | |||||
"os" | |||||
"golang.org/x/net/context" | |||||
"google.golang.org/grpc" | |||||
"google.golang.org/grpc/codes" | |||||
) | |||||
var Logger *log.Logger = log.New(os.Stderr, "", log.LstdFlags) | |||||
// A user defined call stub. | |||||
type APICall func(context.Context) error | |||||
// scaleDuration returns the product of a and mult. | |||||
func scaleDuration(a time.Duration, mult float64) time.Duration { | |||||
ns := float64(a) * mult | |||||
return time.Duration(ns) | |||||
} | |||||
// invokeWithRetry calls stub using an exponential backoff retry mechanism | |||||
// based on the values provided in callSettings. | |||||
func invokeWithRetry(ctx context.Context, stub APICall, callSettings CallSettings) error { | |||||
retrySettings := callSettings.RetrySettings | |||||
backoffSettings := callSettings.RetrySettings.BackoffSettings | |||||
delay := backoffSettings.DelayTimeoutSettings.Initial | |||||
for { | |||||
// If the deadline is exceeded... | |||||
if ctx.Err() != nil { | |||||
return ctx.Err() | |||||
} | |||||
err := stub(ctx) | |||||
code := grpc.Code(err) | |||||
if code == codes.OK { | |||||
return nil | |||||
} | |||||
if !retrySettings.RetryCodes[code] { | |||||
return err | |||||
} | |||||
// Sleep a random amount up to the current delay | |||||
d := time.Duration(rand.Int63n(int64(delay))) | |||||
delayCtx, _ := context.WithTimeout(ctx, delay) | |||||
if Logger != nil { | |||||
Logger.Printf("Retryable error: %v, retrying in %v", err, d) | |||||
} | |||||
<-delayCtx.Done() | |||||
delay = scaleDuration(delay, backoffSettings.DelayTimeoutSettings.Multiplier) | |||||
if delay > backoffSettings.DelayTimeoutSettings.Max { | |||||
delay = backoffSettings.DelayTimeoutSettings.Max | |||||
} | |||||
} | |||||
} | |||||
// Invoke calls stub with a child of context modified by the specified options. | |||||
func Invoke(ctx context.Context, stub APICall, opts ...CallOption) error { | |||||
settings := &CallSettings{} | |||||
callOptions(opts).Resolve(settings) | |||||
if len(settings.RetrySettings.RetryCodes) > 0 { | |||||
return invokeWithRetry(ctx, stub, *settings) | |||||
} | |||||
return stub(ctx) | |||||
} |
@@ -0,0 +1,49 @@ | |||||
/* | |||||
Copyright 2015 Google LLC | |||||
Licensed under the Apache License, Version 2.0 (the "License"); | |||||
you may not use this file except in compliance with the License. | |||||
You may obtain a copy of the License at | |||||
http://www.apache.org/licenses/LICENSE-2.0 | |||||
Unless required by applicable law or agreed to in writing, software | |||||
distributed under the License is distributed on an "AS IS" BASIS, | |||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
See the License for the specific language governing permissions and | |||||
limitations under the License. | |||||
*/ | |||||
package gax | |||||
import ( | |||||
"testing" | |||||
"time" | |||||
"golang.org/x/net/context" | |||||
"google.golang.org/grpc/codes" | |||||
"google.golang.org/grpc/status" | |||||
) | |||||
func TestRandomizedDelays(t *testing.T) { | |||||
max := 200 * time.Millisecond | |||||
settings := []CallOption{ | |||||
WithRetryCodes([]codes.Code{codes.Unavailable, codes.DeadlineExceeded}), | |||||
WithDelayTimeoutSettings(10*time.Millisecond, max, 1.5), | |||||
} | |||||
deadline := time.Now().Add(1 * time.Second) | |||||
ctx, _ := context.WithDeadline(context.Background(), deadline) | |||||
var invokeTime time.Time | |||||
_ = Invoke(ctx, func(childCtx context.Context) error { | |||||
// Keep failing, make sure we never slept more than max (plus a fudge factor) | |||||
if !invokeTime.IsZero() { | |||||
if got, want := time.Since(invokeTime), max; got > (want + 20*time.Millisecond) { | |||||
t.Logf("Slept too long. Got: %v, want: %v", got, max) | |||||
} | |||||
} | |||||
invokeTime = time.Now() | |||||
// Workaround for `go vet`: https://github.com/grpc/grpc-go/issues/90 | |||||
errf := status.Errorf | |||||
return errf(codes.Unavailable, "") | |||||
}, settings...) | |||||
} |
@@ -0,0 +1,48 @@ | |||||
/* | |||||
Copyright 2015 Google LLC | |||||
Licensed under the Apache License, Version 2.0 (the "License"); | |||||
you may not use this file except in compliance with the License. | |||||
You may obtain a copy of the License at | |||||
http://www.apache.org/licenses/LICENSE-2.0 | |||||
Unless required by applicable law or agreed to in writing, software | |||||
distributed under the License is distributed on an "AS IS" BASIS, | |||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
See the License for the specific language governing permissions and | |||||
limitations under the License. | |||||
*/ | |||||
// Package option contains common code for dealing with client options. | |||||
package option | |||||
import ( | |||||
"fmt" | |||||
"os" | |||||
"google.golang.org/api/option" | |||||
"google.golang.org/grpc" | |||||
) | |||||
// DefaultClientOptions returns the default client options to use for the | |||||
// client's gRPC connection. | |||||
func DefaultClientOptions(endpoint, scope, userAgent string) ([]option.ClientOption, error) { | |||||
var o []option.ClientOption | |||||
// Check the environment variables for the bigtable emulator. | |||||
// Dial it directly and don't pass any credentials. | |||||
if addr := os.Getenv("BIGTABLE_EMULATOR_HOST"); addr != "" { | |||||
conn, err := grpc.Dial(addr, grpc.WithInsecure()) | |||||
if err != nil { | |||||
return nil, fmt.Errorf("emulator grpc.Dial: %v", err) | |||||
} | |||||
o = []option.ClientOption{option.WithGRPCConn(conn)} | |||||
} else { | |||||
o = []option.ClientOption{ | |||||
option.WithEndpoint(endpoint), | |||||
option.WithScopes(scope), | |||||
option.WithUserAgent(userAgent), | |||||
} | |||||
} | |||||
return o, nil | |||||
} |
@@ -0,0 +1,149 @@ | |||||
// Copyright 2016 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package stat | |||||
import ( | |||||
"bytes" | |||||
"encoding/csv" | |||||
"fmt" | |||||
"io" | |||||
"math" | |||||
"sort" | |||||
"strconv" | |||||
"text/tabwriter" | |||||
"time" | |||||
) | |||||
type byDuration []time.Duration | |||||
func (data byDuration) Len() int { return len(data) } | |||||
func (data byDuration) Swap(i, j int) { data[i], data[j] = data[j], data[i] } | |||||
func (data byDuration) Less(i, j int) bool { return data[i] < data[j] } | |||||
// quantile returns a value representing the kth of q quantiles. | |||||
// May alter the order of data. | |||||
func quantile(data []time.Duration, k, q int) (quantile time.Duration, ok bool) { | |||||
if len(data) < 1 { | |||||
return 0, false | |||||
} | |||||
if k > q { | |||||
return 0, false | |||||
} | |||||
if k < 0 || q < 1 { | |||||
return 0, false | |||||
} | |||||
sort.Sort(byDuration(data)) | |||||
if k == 0 { | |||||
return data[0], true | |||||
} | |||||
if k == q { | |||||
return data[len(data)-1], true | |||||
} | |||||
bucketSize := float64(len(data)-1) / float64(q) | |||||
i := float64(k) * bucketSize | |||||
lower := int(math.Trunc(i)) | |||||
var upper int | |||||
if i > float64(lower) && lower+1 < len(data) { | |||||
// If the quantile lies between two elements | |||||
upper = lower + 1 | |||||
} else { | |||||
upper = lower | |||||
} | |||||
weightUpper := i - float64(lower) | |||||
weightLower := 1 - weightUpper | |||||
return time.Duration(weightLower*float64(data[lower]) + weightUpper*float64(data[upper])), true | |||||
} | |||||
type Aggregate struct { | |||||
Name string | |||||
Count, Errors int | |||||
Min, Median, Max time.Duration | |||||
P75, P90, P95, P99 time.Duration // percentiles | |||||
} | |||||
// NewAggregate constructs an aggregate from latencies. Returns nil if latencies does not contain aggregateable data. | |||||
func NewAggregate(name string, latencies []time.Duration, errorCount int) *Aggregate { | |||||
agg := Aggregate{Name: name, Count: len(latencies), Errors: errorCount} | |||||
if len(latencies) == 0 { | |||||
return nil | |||||
} | |||||
var ok bool | |||||
if agg.Min, ok = quantile(latencies, 0, 2); !ok { | |||||
return nil | |||||
} | |||||
if agg.Median, ok = quantile(latencies, 1, 2); !ok { | |||||
return nil | |||||
} | |||||
if agg.Max, ok = quantile(latencies, 2, 2); !ok { | |||||
return nil | |||||
} | |||||
if agg.P75, ok = quantile(latencies, 75, 100); !ok { | |||||
return nil | |||||
} | |||||
if agg.P90, ok = quantile(latencies, 90, 100); !ok { | |||||
return nil | |||||
} | |||||
if agg.P95, ok = quantile(latencies, 95, 100); !ok { | |||||
return nil | |||||
} | |||||
if agg.P99, ok = quantile(latencies, 99, 100); !ok { | |||||
return nil | |||||
} | |||||
return &agg | |||||
} | |||||
func (agg *Aggregate) String() string { | |||||
if agg == nil { | |||||
return "no data" | |||||
} | |||||
var buf bytes.Buffer | |||||
tw := tabwriter.NewWriter(&buf, 0, 0, 1, ' ', 0) // one-space padding | |||||
fmt.Fprintf(tw, "min:\t%v\nmedian:\t%v\nmax:\t%v\n95th percentile:\t%v\n99th percentile:\t%v\n", | |||||
agg.Min, agg.Median, agg.Max, agg.P95, agg.P99) | |||||
tw.Flush() | |||||
return buf.String() | |||||
} | |||||
// WriteCSV writes a csv file to the given Writer, | |||||
// with a header row and one row per aggregate. | |||||
func WriteCSV(aggs []*Aggregate, iow io.Writer) (err error) { | |||||
w := csv.NewWriter(iow) | |||||
defer func() { | |||||
w.Flush() | |||||
if err == nil { | |||||
err = w.Error() | |||||
} | |||||
}() | |||||
err = w.Write([]string{"name", "count", "errors", "min", "median", "max", "p75", "p90", "p95", "p99"}) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
for _, agg := range aggs { | |||||
err = w.Write([]string{ | |||||
agg.Name, strconv.Itoa(agg.Count), strconv.Itoa(agg.Errors), | |||||
agg.Min.String(), agg.Median.String(), agg.Max.String(), | |||||
agg.P75.String(), agg.P90.String(), agg.P95.String(), agg.P99.String(), | |||||
}) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
} | |||||
return nil | |||||
} |
@@ -0,0 +1,36 @@ | |||||
// Copyright 2017 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
// +build !go1.8 | |||||
package bigtable | |||||
import ( | |||||
"golang.org/x/net/context" | |||||
"google.golang.org/api/option" | |||||
) | |||||
// OpenCensus only supports go 1.8 and higher. | |||||
func openCensusOptions() []option.ClientOption { return nil } | |||||
func traceStartSpan(ctx context.Context, _ string) context.Context { | |||||
return ctx | |||||
} | |||||
func traceEndSpan(context.Context, error) { | |||||
} | |||||
func tracePrintf(context.Context, map[string]interface{}, string, ...interface{}) { | |||||
} |
@@ -0,0 +1,250 @@ | |||||
/* | |||||
Copyright 2016 Google LLC | |||||
Licensed under the Apache License, Version 2.0 (the "License"); | |||||
you may not use this file except in compliance with the License. | |||||
You may obtain a copy of the License at | |||||
http://www.apache.org/licenses/LICENSE-2.0 | |||||
Unless required by applicable law or agreed to in writing, software | |||||
distributed under the License is distributed on an "AS IS" BASIS, | |||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
See the License for the specific language governing permissions and | |||||
limitations under the License. | |||||
*/ | |||||
package bigtable | |||||
import ( | |||||
"bytes" | |||||
"fmt" | |||||
btpb "google.golang.org/genproto/googleapis/bigtable/v2" | |||||
) | |||||
// A Row is returned by ReadRows. The map is keyed by column family (the prefix | |||||
// of the column name before the colon). The values are the returned ReadItems | |||||
// for that column family in the order returned by Read. | |||||
type Row map[string][]ReadItem | |||||
// Key returns the row's key, or "" if the row is empty. | |||||
func (r Row) Key() string { | |||||
for _, items := range r { | |||||
if len(items) > 0 { | |||||
return items[0].Row | |||||
} | |||||
} | |||||
return "" | |||||
} | |||||
// A ReadItem is returned by Read. A ReadItem contains data from a specific row and column. | |||||
type ReadItem struct { | |||||
Row, Column string | |||||
Timestamp Timestamp | |||||
Value []byte | |||||
} | |||||
// The current state of the read rows state machine. | |||||
type rrState int64 | |||||
const ( | |||||
newRow rrState = iota | |||||
rowInProgress | |||||
cellInProgress | |||||
) | |||||
// chunkReader handles cell chunks from the read rows response and combines | |||||
// them into full Rows. | |||||
type chunkReader struct { | |||||
state rrState | |||||
curKey []byte | |||||
curFam string | |||||
curQual []byte | |||||
curTS int64 | |||||
curVal []byte | |||||
curRow Row | |||||
lastKey string | |||||
} | |||||
// newChunkReader returns a new chunkReader for handling read rows responses. | |||||
func newChunkReader() *chunkReader { | |||||
return &chunkReader{state: newRow} | |||||
} | |||||
// Process takes a cell chunk and returns a new Row if the given chunk | |||||
// completes a Row, or nil otherwise. | |||||
func (cr *chunkReader) Process(cc *btpb.ReadRowsResponse_CellChunk) (Row, error) { | |||||
var row Row | |||||
switch cr.state { | |||||
case newRow: | |||||
if err := cr.validateNewRow(cc); err != nil { | |||||
return nil, err | |||||
} | |||||
cr.curRow = make(Row) | |||||
cr.curKey = cc.RowKey | |||||
cr.curFam = cc.FamilyName.Value | |||||
cr.curQual = cc.Qualifier.Value | |||||
cr.curTS = cc.TimestampMicros | |||||
row = cr.handleCellValue(cc) | |||||
case rowInProgress: | |||||
if err := cr.validateRowInProgress(cc); err != nil { | |||||
return nil, err | |||||
} | |||||
if cc.GetResetRow() { | |||||
cr.resetToNewRow() | |||||
return nil, nil | |||||
} | |||||
if cc.FamilyName != nil { | |||||
cr.curFam = cc.FamilyName.Value | |||||
} | |||||
if cc.Qualifier != nil { | |||||
cr.curQual = cc.Qualifier.Value | |||||
} | |||||
cr.curTS = cc.TimestampMicros | |||||
row = cr.handleCellValue(cc) | |||||
case cellInProgress: | |||||
if err := cr.validateCellInProgress(cc); err != nil { | |||||
return nil, err | |||||
} | |||||
if cc.GetResetRow() { | |||||
cr.resetToNewRow() | |||||
return nil, nil | |||||
} | |||||
row = cr.handleCellValue(cc) | |||||
} | |||||
return row, nil | |||||
} | |||||
// Close must be called after all cell chunks from the response | |||||
// have been processed. An error will be returned if the reader is | |||||
// in an invalid state, in which case the error should be propagated to the caller. | |||||
func (cr *chunkReader) Close() error { | |||||
if cr.state != newRow { | |||||
return fmt.Errorf("invalid state for end of stream %q", cr.state) | |||||
} | |||||
return nil | |||||
} | |||||
// handleCellValue returns a Row if the cell value includes a commit, otherwise nil. | |||||
func (cr *chunkReader) handleCellValue(cc *btpb.ReadRowsResponse_CellChunk) Row { | |||||
if cc.ValueSize > 0 { | |||||
// ValueSize is specified so expect a split value of ValueSize bytes | |||||
if cr.curVal == nil { | |||||
cr.curVal = make([]byte, 0, cc.ValueSize) | |||||
} | |||||
cr.curVal = append(cr.curVal, cc.Value...) | |||||
cr.state = cellInProgress | |||||
} else { | |||||
// This cell is either the complete value or the last chunk of a split | |||||
if cr.curVal == nil { | |||||
cr.curVal = cc.Value | |||||
} else { | |||||
cr.curVal = append(cr.curVal, cc.Value...) | |||||
} | |||||
cr.finishCell() | |||||
if cc.GetCommitRow() { | |||||
return cr.commitRow() | |||||
} else { | |||||
cr.state = rowInProgress | |||||
} | |||||
} | |||||
return nil | |||||
} | |||||
func (cr *chunkReader) finishCell() { | |||||
ri := ReadItem{ | |||||
Row: string(cr.curKey), | |||||
Column: string(cr.curFam) + ":" + string(cr.curQual), | |||||
Timestamp: Timestamp(cr.curTS), | |||||
Value: cr.curVal, | |||||
} | |||||
cr.curRow[cr.curFam] = append(cr.curRow[cr.curFam], ri) | |||||
cr.curVal = nil | |||||
} | |||||
func (cr *chunkReader) commitRow() Row { | |||||
row := cr.curRow | |||||
cr.lastKey = cr.curRow.Key() | |||||
cr.resetToNewRow() | |||||
return row | |||||
} | |||||
func (cr *chunkReader) resetToNewRow() { | |||||
cr.curKey = nil | |||||
cr.curFam = "" | |||||
cr.curQual = nil | |||||
cr.curVal = nil | |||||
cr.curRow = nil | |||||
cr.curTS = 0 | |||||
cr.state = newRow | |||||
} | |||||
func (cr *chunkReader) validateNewRow(cc *btpb.ReadRowsResponse_CellChunk) error { | |||||
if cc.GetResetRow() { | |||||
return fmt.Errorf("reset_row not allowed between rows") | |||||
} | |||||
if cc.RowKey == nil || cc.FamilyName == nil || cc.Qualifier == nil { | |||||
return fmt.Errorf("missing key field for new row %v", cc) | |||||
} | |||||
if cr.lastKey != "" && cr.lastKey >= string(cc.RowKey) { | |||||
return fmt.Errorf("out of order row key: %q, %q", cr.lastKey, string(cc.RowKey)) | |||||
} | |||||
return nil | |||||
} | |||||
func (cr *chunkReader) validateRowInProgress(cc *btpb.ReadRowsResponse_CellChunk) error { | |||||
if err := cr.validateRowStatus(cc); err != nil { | |||||
return err | |||||
} | |||||
if cc.RowKey != nil && !bytes.Equal(cc.RowKey, cr.curKey) { | |||||
return fmt.Errorf("received new row key %q during existing row %q", cc.RowKey, cr.curKey) | |||||
} | |||||
if cc.FamilyName != nil && cc.Qualifier == nil { | |||||
return fmt.Errorf("family name %q specified without a qualifier", cc.FamilyName) | |||||
} | |||||
return nil | |||||
} | |||||
func (cr *chunkReader) validateCellInProgress(cc *btpb.ReadRowsResponse_CellChunk) error { | |||||
if err := cr.validateRowStatus(cc); err != nil { | |||||
return err | |||||
} | |||||
if cr.curVal == nil { | |||||
return fmt.Errorf("no cached cell while CELL_IN_PROGRESS %v", cc) | |||||
} | |||||
if cc.GetResetRow() == false && cr.isAnyKeyPresent(cc) { | |||||
return fmt.Errorf("cell key components found while CELL_IN_PROGRESS %v", cc) | |||||
} | |||||
return nil | |||||
} | |||||
func (cr *chunkReader) isAnyKeyPresent(cc *btpb.ReadRowsResponse_CellChunk) bool { | |||||
return cc.RowKey != nil || | |||||
cc.FamilyName != nil || | |||||
cc.Qualifier != nil || | |||||
cc.TimestampMicros != 0 | |||||
} | |||||
// Validate a RowStatus, commit or reset, if present. | |||||
func (cr *chunkReader) validateRowStatus(cc *btpb.ReadRowsResponse_CellChunk) error { | |||||
// Resets can't be specified with any other part of a cell | |||||
if cc.GetResetRow() && (cr.isAnyKeyPresent(cc) || | |||||
cc.Value != nil || | |||||
cc.ValueSize != 0 || | |||||
cc.Labels != nil) { | |||||
return fmt.Errorf("reset must not be specified with other fields %v", cc) | |||||
} | |||||
if cc.GetCommitRow() && cc.ValueSize > 0 { | |||||
return fmt.Errorf("commit row found in between chunks in a cell") | |||||
} | |||||
return nil | |||||
} |
@@ -0,0 +1,351 @@ | |||||
/* | |||||
Copyright 2016 Google LLC | |||||
Licensed under the Apache License, Version 2.0 (the "License"); | |||||
you may not use this file except in compliance with the License. | |||||
You may obtain a copy of the License at | |||||
http://www.apache.org/licenses/LICENSE-2.0 | |||||
Unless required by applicable law or agreed to in writing, software | |||||
distributed under the License is distributed on an "AS IS" BASIS, | |||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
See the License for the specific language governing permissions and | |||||
limitations under the License. | |||||
*/ | |||||
package bigtable | |||||
import ( | |||||
"encoding/json" | |||||
"fmt" | |||||
"io/ioutil" | |||||
"strings" | |||||
"testing" | |||||
"cloud.google.com/go/internal/testutil" | |||||
"github.com/golang/protobuf/proto" | |||||
"github.com/golang/protobuf/ptypes/wrappers" | |||||
btspb "google.golang.org/genproto/googleapis/bigtable/v2" | |||||
) | |||||
// Indicates that a field in the proto should be omitted, rather than included | |||||
// as a wrapped empty string. | |||||
const nilStr = "<>" | |||||
func TestSingleCell(t *testing.T) { | |||||
cr := newChunkReader() | |||||
// All in one cell | |||||
row, err := cr.Process(cc("rk", "fm", "col", 1, "value", 0, true)) | |||||
if err != nil { | |||||
t.Fatalf("Processing chunk: %v", err) | |||||
} | |||||
if row == nil { | |||||
t.Fatalf("Missing row") | |||||
} | |||||
if len(row["fm"]) != 1 { | |||||
t.Fatalf("Family name length mismatch %d, %d", 1, len(row["fm"])) | |||||
} | |||||
want := []ReadItem{ri("rk", "fm", "col", 1, "value")} | |||||
if !testutil.Equal(row["fm"], want) { | |||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm"], want) | |||||
} | |||||
if err := cr.Close(); err != nil { | |||||
t.Fatalf("Close: %v", err) | |||||
} | |||||
} | |||||
func TestMultipleCells(t *testing.T) { | |||||
cr := newChunkReader() | |||||
mustProcess(t, cr, cc("rs", "fm1", "col1", 0, "val1", 0, false)) | |||||
mustProcess(t, cr, cc("rs", "fm1", "col1", 1, "val2", 0, false)) | |||||
mustProcess(t, cr, cc("rs", "fm1", "col2", 0, "val3", 0, false)) | |||||
mustProcess(t, cr, cc("rs", "fm2", "col1", 0, "val4", 0, false)) | |||||
row, err := cr.Process(cc("rs", "fm2", "col2", 1, "extralongval5", 0, true)) | |||||
if err != nil { | |||||
t.Fatalf("Processing chunk: %v", err) | |||||
} | |||||
if row == nil { | |||||
t.Fatalf("Missing row") | |||||
} | |||||
want := []ReadItem{ | |||||
ri("rs", "fm1", "col1", 0, "val1"), | |||||
ri("rs", "fm1", "col1", 1, "val2"), | |||||
ri("rs", "fm1", "col2", 0, "val3"), | |||||
} | |||||
if !testutil.Equal(row["fm1"], want) { | |||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want) | |||||
} | |||||
want = []ReadItem{ | |||||
ri("rs", "fm2", "col1", 0, "val4"), | |||||
ri("rs", "fm2", "col2", 1, "extralongval5"), | |||||
} | |||||
if !testutil.Equal(row["fm2"], want) { | |||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want) | |||||
} | |||||
if err := cr.Close(); err != nil { | |||||
t.Fatalf("Close: %v", err) | |||||
} | |||||
} | |||||
func TestSplitCells(t *testing.T) { | |||||
cr := newChunkReader() | |||||
mustProcess(t, cr, cc("rs", "fm1", "col1", 0, "hello ", 11, false)) | |||||
mustProcess(t, cr, ccData("world", 0, false)) | |||||
row, err := cr.Process(cc("rs", "fm1", "col2", 0, "val2", 0, true)) | |||||
if err != nil { | |||||
t.Fatalf("Processing chunk: %v", err) | |||||
} | |||||
if row == nil { | |||||
t.Fatalf("Missing row") | |||||
} | |||||
want := []ReadItem{ | |||||
ri("rs", "fm1", "col1", 0, "hello world"), | |||||
ri("rs", "fm1", "col2", 0, "val2"), | |||||
} | |||||
if !testutil.Equal(row["fm1"], want) { | |||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want) | |||||
} | |||||
if err := cr.Close(); err != nil { | |||||
t.Fatalf("Close: %v", err) | |||||
} | |||||
} | |||||
func TestMultipleRows(t *testing.T) { | |||||
cr := newChunkReader() | |||||
row, err := cr.Process(cc("rs1", "fm1", "col1", 1, "val1", 0, true)) | |||||
if err != nil { | |||||
t.Fatalf("Processing chunk: %v", err) | |||||
} | |||||
want := []ReadItem{ri("rs1", "fm1", "col1", 1, "val1")} | |||||
if !testutil.Equal(row["fm1"], want) { | |||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want) | |||||
} | |||||
row, err = cr.Process(cc("rs2", "fm2", "col2", 2, "val2", 0, true)) | |||||
if err != nil { | |||||
t.Fatalf("Processing chunk: %v", err) | |||||
} | |||||
want = []ReadItem{ri("rs2", "fm2", "col2", 2, "val2")} | |||||
if !testutil.Equal(row["fm2"], want) { | |||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want) | |||||
} | |||||
if err := cr.Close(); err != nil { | |||||
t.Fatalf("Close: %v", err) | |||||
} | |||||
} | |||||
func TestBlankQualifier(t *testing.T) { | |||||
cr := newChunkReader() | |||||
row, err := cr.Process(cc("rs1", "fm1", "", 1, "val1", 0, true)) | |||||
if err != nil { | |||||
t.Fatalf("Processing chunk: %v", err) | |||||
} | |||||
want := []ReadItem{ri("rs1", "fm1", "", 1, "val1")} | |||||
if !testutil.Equal(row["fm1"], want) { | |||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want) | |||||
} | |||||
row, err = cr.Process(cc("rs2", "fm2", "col2", 2, "val2", 0, true)) | |||||
if err != nil { | |||||
t.Fatalf("Processing chunk: %v", err) | |||||
} | |||||
want = []ReadItem{ri("rs2", "fm2", "col2", 2, "val2")} | |||||
if !testutil.Equal(row["fm2"], want) { | |||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want) | |||||
} | |||||
if err := cr.Close(); err != nil { | |||||
t.Fatalf("Close: %v", err) | |||||
} | |||||
} | |||||
func TestReset(t *testing.T) { | |||||
cr := newChunkReader() | |||||
mustProcess(t, cr, cc("rs", "fm1", "col1", 0, "val1", 0, false)) | |||||
mustProcess(t, cr, cc("rs", "fm1", "col1", 1, "val2", 0, false)) | |||||
mustProcess(t, cr, cc("rs", "fm1", "col2", 0, "val3", 0, false)) | |||||
mustProcess(t, cr, ccReset()) | |||||
row := mustProcess(t, cr, cc("rs1", "fm1", "col1", 1, "val1", 0, true)) | |||||
want := []ReadItem{ri("rs1", "fm1", "col1", 1, "val1")} | |||||
if !testutil.Equal(row["fm1"], want) { | |||||
t.Fatalf("Reset: got: %v\nwant: %v\n", row["fm1"], want) | |||||
} | |||||
if err := cr.Close(); err != nil { | |||||
t.Fatalf("Close: %v", err) | |||||
} | |||||
} | |||||
func TestNewFamEmptyQualifier(t *testing.T) { | |||||
cr := newChunkReader() | |||||
mustProcess(t, cr, cc("rs", "fm1", "col1", 0, "val1", 0, false)) | |||||
_, err := cr.Process(cc(nilStr, "fm2", nilStr, 0, "val2", 0, true)) | |||||
if err == nil { | |||||
t.Fatalf("Expected error on second chunk with no qualifier set") | |||||
} | |||||
} | |||||
func mustProcess(t *testing.T, cr *chunkReader, cc *btspb.ReadRowsResponse_CellChunk) Row { | |||||
row, err := cr.Process(cc) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
return row | |||||
} | |||||
// The read rows acceptance test reads a json file specifying a number of tests, | |||||
// each consisting of one or more cell chunk text protos and one or more resulting | |||||
// cells or errors. | |||||
type AcceptanceTest struct { | |||||
Tests []TestCase `json:"tests"` | |||||
} | |||||
type TestCase struct { | |||||
Name string `json:"name"` | |||||
Chunks []string `json:"chunks"` | |||||
Results []TestResult `json:"results"` | |||||
} | |||||
type TestResult struct { | |||||
RK string `json:"rk"` | |||||
FM string `json:"fm"` | |||||
Qual string `json:"qual"` | |||||
TS int64 `json:"ts"` | |||||
Value string `json:"value"` | |||||
Error bool `json:"error"` // If true, expect an error. Ignore any other field. | |||||
} | |||||
func TestAcceptance(t *testing.T) { | |||||
testJson, err := ioutil.ReadFile("./testdata/read-rows-acceptance-test.json") | |||||
if err != nil { | |||||
t.Fatalf("could not open acceptance test file %v", err) | |||||
} | |||||
var accTest AcceptanceTest | |||||
err = json.Unmarshal(testJson, &accTest) | |||||
if err != nil { | |||||
t.Fatalf("could not parse acceptance test file: %v", err) | |||||
} | |||||
for _, test := range accTest.Tests { | |||||
runTestCase(t, test) | |||||
} | |||||
} | |||||
func runTestCase(t *testing.T, test TestCase) { | |||||
// Increment an index into the result array as we get results | |||||
cr := newChunkReader() | |||||
var results []TestResult | |||||
var seenErr bool | |||||
for _, chunkText := range test.Chunks { | |||||
// Parse and pass each cell chunk to the ChunkReader | |||||
cc := &btspb.ReadRowsResponse_CellChunk{} | |||||
err := proto.UnmarshalText(chunkText, cc) | |||||
if err != nil { | |||||
t.Errorf("[%s] failed to unmarshal text proto: %s\n%s", test.Name, chunkText, err) | |||||
return | |||||
} | |||||
row, err := cr.Process(cc) | |||||
if err != nil { | |||||
results = append(results, TestResult{Error: true}) | |||||
seenErr = true | |||||
break | |||||
} else { | |||||
// Turn the Row into TestResults | |||||
for fm, ris := range row { | |||||
for _, ri := range ris { | |||||
tr := TestResult{ | |||||
RK: ri.Row, | |||||
FM: fm, | |||||
Qual: strings.Split(ri.Column, ":")[1], | |||||
TS: int64(ri.Timestamp), | |||||
Value: string(ri.Value), | |||||
} | |||||
results = append(results, tr) | |||||
} | |||||
} | |||||
} | |||||
} | |||||
// Only Close if we don't have an error yet, otherwise Close: is expected. | |||||
if !seenErr { | |||||
err := cr.Close() | |||||
if err != nil { | |||||
results = append(results, TestResult{Error: true}) | |||||
} | |||||
} | |||||
got := toSet(results) | |||||
want := toSet(test.Results) | |||||
if !testutil.Equal(got, want) { | |||||
t.Fatalf("[%s]: got: %v\nwant: %v\n", test.Name, got, want) | |||||
} | |||||
} | |||||
func toSet(res []TestResult) map[TestResult]bool { | |||||
set := make(map[TestResult]bool) | |||||
for _, tr := range res { | |||||
set[tr] = true | |||||
} | |||||
return set | |||||
} | |||||
// ri returns a ReadItem for the given components | |||||
func ri(rk string, fm string, qual string, ts int64, val string) ReadItem { | |||||
return ReadItem{Row: rk, Column: fmt.Sprintf("%s:%s", fm, qual), Value: []byte(val), Timestamp: Timestamp(ts)} | |||||
} | |||||
// cc returns a CellChunk proto | |||||
func cc(rk string, fm string, qual string, ts int64, val string, size int32, commit bool) *btspb.ReadRowsResponse_CellChunk { | |||||
// The components of the cell key are wrapped and can be null or empty | |||||
var rkWrapper []byte | |||||
if rk == nilStr { | |||||
rkWrapper = nil | |||||
} else { | |||||
rkWrapper = []byte(rk) | |||||
} | |||||
var fmWrapper *wrappers.StringValue | |||||
if fm != nilStr { | |||||
fmWrapper = &wrappers.StringValue{Value: fm} | |||||
} else { | |||||
fmWrapper = nil | |||||
} | |||||
var qualWrapper *wrappers.BytesValue | |||||
if qual != nilStr { | |||||
qualWrapper = &wrappers.BytesValue{Value: []byte(qual)} | |||||
} else { | |||||
qualWrapper = nil | |||||
} | |||||
return &btspb.ReadRowsResponse_CellChunk{ | |||||
RowKey: rkWrapper, | |||||
FamilyName: fmWrapper, | |||||
Qualifier: qualWrapper, | |||||
TimestampMicros: ts, | |||||
Value: []byte(val), | |||||
ValueSize: size, | |||||
RowStatus: &btspb.ReadRowsResponse_CellChunk_CommitRow{CommitRow: commit}} | |||||
} | |||||
// ccData returns a CellChunk with only a value and size | |||||
func ccData(val string, size int32, commit bool) *btspb.ReadRowsResponse_CellChunk { | |||||
return cc(nilStr, nilStr, nilStr, 0, val, size, commit) | |||||
} | |||||
// ccReset returns a CellChunk with RestRow set to true | |||||
func ccReset() *btspb.ReadRowsResponse_CellChunk { | |||||
return &btspb.ReadRowsResponse_CellChunk{ | |||||
RowStatus: &btspb.ReadRowsResponse_CellChunk_ResetRow{ResetRow: true}} | |||||
} |
@@ -0,0 +1,381 @@ | |||||
/* | |||||
Copyright 2016 Google LLC | |||||
Licensed under the Apache License, Version 2.0 (the "License"); | |||||
you may not use this file except in compliance with the License. | |||||
You may obtain a copy of the License at | |||||
http://www.apache.org/licenses/LICENSE-2.0 | |||||
Unless required by applicable law or agreed to in writing, software | |||||
distributed under the License is distributed on an "AS IS" BASIS, | |||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
See the License for the specific language governing permissions and | |||||
limitations under the License. | |||||
*/ | |||||
package bigtable | |||||
import ( | |||||
"strings" | |||||
"testing" | |||||
"time" | |||||
"cloud.google.com/go/bigtable/bttest" | |||||
"cloud.google.com/go/bigtable/internal/gax" | |||||
"cloud.google.com/go/internal/testutil" | |||||
"github.com/golang/protobuf/ptypes/wrappers" | |||||
"github.com/google/go-cmp/cmp" | |||||
"golang.org/x/net/context" | |||||
"google.golang.org/api/option" | |||||
btpb "google.golang.org/genproto/googleapis/bigtable/v2" | |||||
rpcpb "google.golang.org/genproto/googleapis/rpc/status" | |||||
"google.golang.org/grpc" | |||||
"google.golang.org/grpc/codes" | |||||
"google.golang.org/grpc/status" | |||||
) | |||||
func setupFakeServer(opt ...grpc.ServerOption) (tbl *Table, cleanup func(), err error) { | |||||
srv, err := bttest.NewServer("localhost:0", opt...) | |||||
if err != nil { | |||||
return nil, nil, err | |||||
} | |||||
conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure(), grpc.WithBlock()) | |||||
if err != nil { | |||||
return nil, nil, err | |||||
} | |||||
client, err := NewClient(context.Background(), "client", "instance", option.WithGRPCConn(conn), option.WithGRPCDialOption(grpc.WithBlock())) | |||||
if err != nil { | |||||
return nil, nil, err | |||||
} | |||||
adminClient, err := NewAdminClient(context.Background(), "client", "instance", option.WithGRPCConn(conn), option.WithGRPCDialOption(grpc.WithBlock())) | |||||
if err != nil { | |||||
return nil, nil, err | |||||
} | |||||
if err := adminClient.CreateTable(context.Background(), "table"); err != nil { | |||||
return nil, nil, err | |||||
} | |||||
if err := adminClient.CreateColumnFamily(context.Background(), "table", "cf"); err != nil { | |||||
return nil, nil, err | |||||
} | |||||
t := client.Open("table") | |||||
cleanupFunc := func() { | |||||
adminClient.Close() | |||||
client.Close() | |||||
srv.Close() | |||||
} | |||||
return t, cleanupFunc, nil | |||||
} | |||||
func TestRetryApply(t *testing.T) { | |||||
gax.Logger = nil | |||||
ctx := context.Background() | |||||
errCount := 0 | |||||
code := codes.Unavailable // Will be retried | |||||
// Intercept requests and return an error or defer to the underlying handler | |||||
errInjector := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { | |||||
if strings.HasSuffix(info.FullMethod, "MutateRow") && errCount < 3 { | |||||
errCount++ | |||||
return nil, status.Errorf(code, "") | |||||
} | |||||
return handler(ctx, req) | |||||
} | |||||
tbl, cleanup, err := setupFakeServer(grpc.UnaryInterceptor(errInjector)) | |||||
if err != nil { | |||||
t.Fatalf("fake server setup: %v", err) | |||||
} | |||||
defer cleanup() | |||||
mut := NewMutation() | |||||
mut.Set("cf", "col", 1000, []byte("val")) | |||||
if err := tbl.Apply(ctx, "row1", mut); err != nil { | |||||
t.Errorf("applying single mutation with retries: %v", err) | |||||
} | |||||
row, err := tbl.ReadRow(ctx, "row1") | |||||
if err != nil { | |||||
t.Errorf("reading single value with retries: %v", err) | |||||
} | |||||
if row == nil { | |||||
t.Errorf("applying single mutation with retries: could not read back row") | |||||
} | |||||
code = codes.FailedPrecondition // Won't be retried | |||||
errCount = 0 | |||||
if err := tbl.Apply(ctx, "row", mut); err == nil { | |||||
t.Errorf("applying single mutation with no retries: no error") | |||||
} | |||||
// Check and mutate | |||||
mutTrue := NewMutation() | |||||
mutTrue.DeleteRow() | |||||
mutFalse := NewMutation() | |||||
mutFalse.Set("cf", "col", 1000, []byte("val")) | |||||
condMut := NewCondMutation(ValueFilter("."), mutTrue, mutFalse) | |||||
errCount = 0 | |||||
code = codes.Unavailable // Will be retried | |||||
if err := tbl.Apply(ctx, "row1", condMut); err != nil { | |||||
t.Errorf("conditionally mutating row with retries: %v", err) | |||||
} | |||||
row, err = tbl.ReadRow(ctx, "row1") // row1 already in the table | |||||
if err != nil { | |||||
t.Errorf("reading single value after conditional mutation: %v", err) | |||||
} | |||||
if row != nil { | |||||
t.Errorf("reading single value after conditional mutation: row not deleted") | |||||
} | |||||
errCount = 0 | |||||
code = codes.FailedPrecondition // Won't be retried | |||||
if err := tbl.Apply(ctx, "row", condMut); err == nil { | |||||
t.Errorf("conditionally mutating row with no retries: no error") | |||||
} | |||||
} | |||||
func TestRetryApplyBulk(t *testing.T) { | |||||
ctx := context.Background() | |||||
gax.Logger = nil | |||||
// Intercept requests and delegate to an interceptor defined by the test case | |||||
errCount := 0 | |||||
var f func(grpc.ServerStream) error | |||||
errInjector := func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { | |||||
if strings.HasSuffix(info.FullMethod, "MutateRows") { | |||||
return f(ss) | |||||
} | |||||
return handler(ctx, ss) | |||||
} | |||||
tbl, cleanup, err := setupFakeServer(grpc.StreamInterceptor(errInjector)) | |||||
defer cleanup() | |||||
if err != nil { | |||||
t.Fatalf("fake server setup: %v", err) | |||||
} | |||||
errCount = 0 | |||||
// Test overall request failure and retries | |||||
f = func(ss grpc.ServerStream) error { | |||||
if errCount < 3 { | |||||
errCount++ | |||||
return status.Errorf(codes.Aborted, "") | |||||
} | |||||
return nil | |||||
} | |||||
mut := NewMutation() | |||||
mut.Set("cf", "col", 1, []byte{}) | |||||
errors, err := tbl.ApplyBulk(ctx, []string{"row2"}, []*Mutation{mut}) | |||||
if errors != nil || err != nil { | |||||
t.Errorf("bulk with request failure: got: %v, %v, want: nil", errors, err) | |||||
} | |||||
// Test failures and retries in one request | |||||
errCount = 0 | |||||
m1 := NewMutation() | |||||
m1.Set("cf", "col", 1, []byte{}) | |||||
m2 := NewMutation() | |||||
m2.Set("cf", "col2", 1, []byte{}) | |||||
m3 := NewMutation() | |||||
m3.Set("cf", "col3", 1, []byte{}) | |||||
f = func(ss grpc.ServerStream) error { | |||||
var err error | |||||
req := new(btpb.MutateRowsRequest) | |||||
must(ss.RecvMsg(req)) | |||||
switch errCount { | |||||
case 0: | |||||
// Retryable request failure | |||||
err = status.Errorf(codes.Unavailable, "") | |||||
case 1: | |||||
// Two mutations fail | |||||
must(writeMutateRowsResponse(ss, codes.Unavailable, codes.OK, codes.Aborted)) | |||||
err = nil | |||||
case 2: | |||||
// Two failures were retried. One will succeed. | |||||
if want, got := 2, len(req.Entries); want != got { | |||||
t.Errorf("2 bulk retries, got: %d, want %d", got, want) | |||||
} | |||||
must(writeMutateRowsResponse(ss, codes.OK, codes.Aborted)) | |||||
err = nil | |||||
case 3: | |||||
// One failure was retried and will succeed. | |||||
if want, got := 1, len(req.Entries); want != got { | |||||
t.Errorf("1 bulk retry, got: %d, want %d", got, want) | |||||
} | |||||
must(writeMutateRowsResponse(ss, codes.OK)) | |||||
err = nil | |||||
} | |||||
errCount++ | |||||
return err | |||||
} | |||||
errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2", "row3"}, []*Mutation{m1, m2, m3}) | |||||
if errors != nil || err != nil { | |||||
t.Errorf("bulk with retries: got: %v, %v, want: nil", errors, err) | |||||
} | |||||
// Test unretryable errors | |||||
niMut := NewMutation() | |||||
niMut.Set("cf", "col", ServerTime, []byte{}) // Non-idempotent | |||||
errCount = 0 | |||||
f = func(ss grpc.ServerStream) error { | |||||
var err error | |||||
req := new(btpb.MutateRowsRequest) | |||||
must(ss.RecvMsg(req)) | |||||
switch errCount { | |||||
case 0: | |||||
// Give non-idempotent mutation a retryable error code. | |||||
// Nothing should be retried. | |||||
must(writeMutateRowsResponse(ss, codes.FailedPrecondition, codes.Aborted)) | |||||
err = nil | |||||
case 1: | |||||
t.Errorf("unretryable errors: got one retry, want no retries") | |||||
} | |||||
errCount++ | |||||
return err | |||||
} | |||||
errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2"}, []*Mutation{m1, niMut}) | |||||
if err != nil { | |||||
t.Errorf("unretryable errors: request failed %v", err) | |||||
} | |||||
want := []error{ | |||||
status.Errorf(codes.FailedPrecondition, ""), | |||||
status.Errorf(codes.Aborted, ""), | |||||
} | |||||
if !testutil.Equal(want, errors) { | |||||
t.Errorf("unretryable errors: got: %v, want: %v", errors, want) | |||||
} | |||||
// Test individual errors and a deadline exceeded | |||||
f = func(ss grpc.ServerStream) error { | |||||
return writeMutateRowsResponse(ss, codes.FailedPrecondition, codes.OK, codes.Aborted) | |||||
} | |||||
ctx, _ = context.WithTimeout(ctx, 100*time.Millisecond) | |||||
errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2", "row3"}, []*Mutation{m1, m2, m3}) | |||||
wantErr := context.DeadlineExceeded | |||||
if wantErr != err { | |||||
t.Errorf("deadline exceeded error: got: %v, want: %v", err, wantErr) | |||||
} | |||||
if errors != nil { | |||||
t.Errorf("deadline exceeded errors: got: %v, want: nil", err) | |||||
} | |||||
} | |||||
func writeMutateRowsResponse(ss grpc.ServerStream, codes ...codes.Code) error { | |||||
res := &btpb.MutateRowsResponse{Entries: make([]*btpb.MutateRowsResponse_Entry, len(codes))} | |||||
for i, code := range codes { | |||||
res.Entries[i] = &btpb.MutateRowsResponse_Entry{ | |||||
Index: int64(i), | |||||
Status: &rpcpb.Status{Code: int32(code), Message: ""}, | |||||
} | |||||
} | |||||
return ss.SendMsg(res) | |||||
} | |||||
func TestRetainRowsAfter(t *testing.T) { | |||||
prevRowRange := NewRange("a", "z") | |||||
prevRowKey := "m" | |||||
want := NewRange("m\x00", "z") | |||||
got := prevRowRange.retainRowsAfter(prevRowKey) | |||||
if !testutil.Equal(want, got, cmp.AllowUnexported(RowRange{})) { | |||||
t.Errorf("range retry: got %v, want %v", got, want) | |||||
} | |||||
prevRowRangeList := RowRangeList{NewRange("a", "d"), NewRange("e", "g"), NewRange("h", "l")} | |||||
prevRowKey = "f" | |||||
wantRowRangeList := RowRangeList{NewRange("f\x00", "g"), NewRange("h", "l")} | |||||
got = prevRowRangeList.retainRowsAfter(prevRowKey) | |||||
if !testutil.Equal(wantRowRangeList, got, cmp.AllowUnexported(RowRange{})) { | |||||
t.Errorf("range list retry: got %v, want %v", got, wantRowRangeList) | |||||
} | |||||
prevRowList := RowList{"a", "b", "c", "d", "e", "f"} | |||||
prevRowKey = "b" | |||||
wantList := RowList{"c", "d", "e", "f"} | |||||
got = prevRowList.retainRowsAfter(prevRowKey) | |||||
if !testutil.Equal(wantList, got) { | |||||
t.Errorf("list retry: got %v, want %v", got, wantList) | |||||
} | |||||
} | |||||
func TestRetryReadRows(t *testing.T) { | |||||
ctx := context.Background() | |||||
gax.Logger = nil | |||||
// Intercept requests and delegate to an interceptor defined by the test case | |||||
errCount := 0 | |||||
var f func(grpc.ServerStream) error | |||||
errInjector := func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { | |||||
if strings.HasSuffix(info.FullMethod, "ReadRows") { | |||||
return f(ss) | |||||
} | |||||
return handler(ctx, ss) | |||||
} | |||||
tbl, cleanup, err := setupFakeServer(grpc.StreamInterceptor(errInjector)) | |||||
defer cleanup() | |||||
if err != nil { | |||||
t.Fatalf("fake server setup: %v", err) | |||||
} | |||||
errCount = 0 | |||||
// Test overall request failure and retries | |||||
f = func(ss grpc.ServerStream) error { | |||||
var err error | |||||
req := new(btpb.ReadRowsRequest) | |||||
must(ss.RecvMsg(req)) | |||||
switch errCount { | |||||
case 0: | |||||
// Retryable request failure | |||||
err = status.Errorf(codes.Unavailable, "") | |||||
case 1: | |||||
// Write two rows then error | |||||
if want, got := "a", string(req.Rows.RowRanges[0].GetStartKeyClosed()); want != got { | |||||
t.Errorf("first retry, no data received yet: got %q, want %q", got, want) | |||||
} | |||||
must(writeReadRowsResponse(ss, "a", "b")) | |||||
err = status.Errorf(codes.Unavailable, "") | |||||
case 2: | |||||
// Retryable request failure | |||||
if want, got := "b\x00", string(req.Rows.RowRanges[0].GetStartKeyClosed()); want != got { | |||||
t.Errorf("2 range retries: got %q, want %q", got, want) | |||||
} | |||||
err = status.Errorf(codes.Unavailable, "") | |||||
case 3: | |||||
// Write two more rows | |||||
must(writeReadRowsResponse(ss, "c", "d")) | |||||
err = nil | |||||
} | |||||
errCount++ | |||||
return err | |||||
} | |||||
var got []string | |||||
must(tbl.ReadRows(ctx, NewRange("a", "z"), func(r Row) bool { | |||||
got = append(got, r.Key()) | |||||
return true | |||||
})) | |||||
want := []string{"a", "b", "c", "d"} | |||||
if !testutil.Equal(got, want) { | |||||
t.Errorf("retry range integration: got %v, want %v", got, want) | |||||
} | |||||
} | |||||
func writeReadRowsResponse(ss grpc.ServerStream, rowKeys ...string) error { | |||||
var chunks []*btpb.ReadRowsResponse_CellChunk | |||||
for _, key := range rowKeys { | |||||
chunks = append(chunks, &btpb.ReadRowsResponse_CellChunk{ | |||||
RowKey: []byte(key), | |||||
FamilyName: &wrappers.StringValue{Value: "fm"}, | |||||
Qualifier: &wrappers.BytesValue{Value: []byte("col")}, | |||||
RowStatus: &btpb.ReadRowsResponse_CellChunk_CommitRow{CommitRow: true}, | |||||
}) | |||||
} | |||||
return ss.SendMsg(&btpb.ReadRowsResponse{Chunks: chunks}) | |||||
} | |||||
func must(err error) { | |||||
if err != nil { | |||||
panic(err) | |||||
} | |||||
} |
@@ -0,0 +1,277 @@ | |||||
// Copyright 2016 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
// Package civil implements types for civil time, a time-zone-independent | |||||
// representation of time that follows the rules of the proleptic | |||||
// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second | |||||
// minutes. | |||||
// | |||||
// Because they lack location information, these types do not represent unique | |||||
// moments or intervals of time. Use time.Time for that purpose. | |||||
package civil | |||||
import ( | |||||
"fmt" | |||||
"time" | |||||
) | |||||
// A Date represents a date (year, month, day). | |||||
// | |||||
// This type does not include location information, and therefore does not | |||||
// describe a unique 24-hour timespan. | |||||
type Date struct { | |||||
Year int // Year (e.g., 2014). | |||||
Month time.Month // Month of the year (January = 1, ...). | |||||
Day int // Day of the month, starting at 1. | |||||
} | |||||
// DateOf returns the Date in which a time occurs in that time's location. | |||||
func DateOf(t time.Time) Date { | |||||
var d Date | |||||
d.Year, d.Month, d.Day = t.Date() | |||||
return d | |||||
} | |||||
// ParseDate parses a string in RFC3339 full-date format and returns the date value it represents. | |||||
func ParseDate(s string) (Date, error) { | |||||
t, err := time.Parse("2006-01-02", s) | |||||
if err != nil { | |||||
return Date{}, err | |||||
} | |||||
return DateOf(t), nil | |||||
} | |||||
// String returns the date in RFC3339 full-date format. | |||||
func (d Date) String() string { | |||||
return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) | |||||
} | |||||
// IsValid reports whether the date is valid. | |||||
func (d Date) IsValid() bool { | |||||
return DateOf(d.In(time.UTC)) == d | |||||
} | |||||
// In returns the time corresponding to time 00:00:00 of the date in the location. | |||||
// | |||||
// In is always consistent with time.Date, even when time.Date returns a time | |||||
// on a different day. For example, if loc is America/Indiana/Vincennes, then both | |||||
// time.Date(1955, time.May, 1, 0, 0, 0, 0, loc) | |||||
// and | |||||
// civil.Date{Year: 1955, Month: time.May, Day: 1}.In(loc) | |||||
// return 23:00:00 on April 30, 1955. | |||||
// | |||||
// In panics if loc is nil. | |||||
func (d Date) In(loc *time.Location) time.Time { | |||||
return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc) | |||||
} | |||||
// AddDays returns the date that is n days in the future. | |||||
// n can also be negative to go into the past. | |||||
func (d Date) AddDays(n int) Date { | |||||
return DateOf(d.In(time.UTC).AddDate(0, 0, n)) | |||||
} | |||||
// DaysSince returns the signed number of days between the date and s, not including the end day. | |||||
// This is the inverse operation to AddDays. | |||||
func (d Date) DaysSince(s Date) (days int) { | |||||
// We convert to Unix time so we do not have to worry about leap seconds: | |||||
// Unix time increases by exactly 86400 seconds per day. | |||||
deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix() | |||||
return int(deltaUnix / 86400) | |||||
} | |||||
// Before reports whether d1 occurs before d2. | |||||
func (d1 Date) Before(d2 Date) bool { | |||||
if d1.Year != d2.Year { | |||||
return d1.Year < d2.Year | |||||
} | |||||
if d1.Month != d2.Month { | |||||
return d1.Month < d2.Month | |||||
} | |||||
return d1.Day < d2.Day | |||||
} | |||||
// After reports whether d1 occurs after d2. | |||||
func (d1 Date) After(d2 Date) bool { | |||||
return d2.Before(d1) | |||||
} | |||||
// MarshalText implements the encoding.TextMarshaler interface. | |||||
// The output is the result of d.String(). | |||||
func (d Date) MarshalText() ([]byte, error) { | |||||
return []byte(d.String()), nil | |||||
} | |||||
// UnmarshalText implements the encoding.TextUnmarshaler interface. | |||||
// The date is expected to be a string in a format accepted by ParseDate. | |||||
func (d *Date) UnmarshalText(data []byte) error { | |||||
var err error | |||||
*d, err = ParseDate(string(data)) | |||||
return err | |||||
} | |||||
// A Time represents a time with nanosecond precision. | |||||
// | |||||
// This type does not include location information, and therefore does not | |||||
// describe a unique moment in time. | |||||
// | |||||
// This type exists to represent the TIME type in storage-based APIs like BigQuery. | |||||
// Most operations on Times are unlikely to be meaningful. Prefer the DateTime type. | |||||
type Time struct { | |||||
Hour int // The hour of the day in 24-hour format; range [0-23] | |||||
Minute int // The minute of the hour; range [0-59] | |||||
Second int // The second of the minute; range [0-59] | |||||
Nanosecond int // The nanosecond of the second; range [0-999999999] | |||||
} | |||||
// TimeOf returns the Time representing the time of day in which a time occurs | |||||
// in that time's location. It ignores the date. | |||||
func TimeOf(t time.Time) Time { | |||||
var tm Time | |||||
tm.Hour, tm.Minute, tm.Second = t.Clock() | |||||
tm.Nanosecond = t.Nanosecond() | |||||
return tm | |||||
} | |||||
// ParseTime parses a string and returns the time value it represents. | |||||
// ParseTime accepts an extended form of the RFC3339 partial-time format. After | |||||
// the HH:MM:SS part of the string, an optional fractional part may appear, | |||||
// consisting of a decimal point followed by one to nine decimal digits. | |||||
// (RFC3339 admits only one digit after the decimal point). | |||||
func ParseTime(s string) (Time, error) { | |||||
t, err := time.Parse("15:04:05.999999999", s) | |||||
if err != nil { | |||||
return Time{}, err | |||||
} | |||||
return TimeOf(t), nil | |||||
} | |||||
// String returns the date in the format described in ParseTime. If Nanoseconds | |||||
// is zero, no fractional part will be generated. Otherwise, the result will | |||||
// end with a fractional part consisting of a decimal point and nine digits. | |||||
func (t Time) String() string { | |||||
s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second) | |||||
if t.Nanosecond == 0 { | |||||
return s | |||||
} | |||||
return s + fmt.Sprintf(".%09d", t.Nanosecond) | |||||
} | |||||
// IsValid reports whether the time is valid. | |||||
func (t Time) IsValid() bool { | |||||
// Construct a non-zero time. | |||||
tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC) | |||||
return TimeOf(tm) == t | |||||
} | |||||
// MarshalText implements the encoding.TextMarshaler interface. | |||||
// The output is the result of t.String(). | |||||
func (t Time) MarshalText() ([]byte, error) { | |||||
return []byte(t.String()), nil | |||||
} | |||||
// UnmarshalText implements the encoding.TextUnmarshaler interface. | |||||
// The time is expected to be a string in a format accepted by ParseTime. | |||||
func (t *Time) UnmarshalText(data []byte) error { | |||||
var err error | |||||
*t, err = ParseTime(string(data)) | |||||
return err | |||||
} | |||||
// A DateTime represents a date and time. | |||||
// | |||||
// This type does not include location information, and therefore does not | |||||
// describe a unique moment in time. | |||||
type DateTime struct { | |||||
Date Date | |||||
Time Time | |||||
} | |||||
// Note: We deliberately do not embed Date into DateTime, to avoid promoting AddDays and Sub. | |||||
// DateTimeOf returns the DateTime in which a time occurs in that time's location. | |||||
func DateTimeOf(t time.Time) DateTime { | |||||
return DateTime{ | |||||
Date: DateOf(t), | |||||
Time: TimeOf(t), | |||||
} | |||||
} | |||||
// ParseDateTime parses a string and returns the DateTime it represents. | |||||
// ParseDateTime accepts a variant of the RFC3339 date-time format that omits | |||||
// the time offset but includes an optional fractional time, as described in | |||||
// ParseTime. Informally, the accepted format is | |||||
// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF] | |||||
// where the 'T' may be a lower-case 't'. | |||||
func ParseDateTime(s string) (DateTime, error) { | |||||
t, err := time.Parse("2006-01-02T15:04:05.999999999", s) | |||||
if err != nil { | |||||
t, err = time.Parse("2006-01-02t15:04:05.999999999", s) | |||||
if err != nil { | |||||
return DateTime{}, err | |||||
} | |||||
} | |||||
return DateTimeOf(t), nil | |||||
} | |||||
// String returns the date in the format described in ParseDate. | |||||
func (dt DateTime) String() string { | |||||
return dt.Date.String() + "T" + dt.Time.String() | |||||
} | |||||
// IsValid reports whether the datetime is valid. | |||||
func (dt DateTime) IsValid() bool { | |||||
return dt.Date.IsValid() && dt.Time.IsValid() | |||||
} | |||||
// In returns the time corresponding to the DateTime in the given location. | |||||
// | |||||
// If the time is missing or ambigous at the location, In returns the same | |||||
// result as time.Date. For example, if loc is America/Indiana/Vincennes, then | |||||
// both | |||||
// time.Date(1955, time.May, 1, 0, 30, 0, 0, loc) | |||||
// and | |||||
// civil.DateTime{ | |||||
// civil.Date{Year: 1955, Month: time.May, Day: 1}}, | |||||
// civil.Time{Minute: 30}}.In(loc) | |||||
// return 23:30:00 on April 30, 1955. | |||||
// | |||||
// In panics if loc is nil. | |||||
func (dt DateTime) In(loc *time.Location) time.Time { | |||||
return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc) | |||||
} | |||||
// Before reports whether dt1 occurs before dt2. | |||||
func (dt1 DateTime) Before(dt2 DateTime) bool { | |||||
return dt1.In(time.UTC).Before(dt2.In(time.UTC)) | |||||
} | |||||
// After reports whether dt1 occurs after dt2. | |||||
func (dt1 DateTime) After(dt2 DateTime) bool { | |||||
return dt2.Before(dt1) | |||||
} | |||||
// MarshalText implements the encoding.TextMarshaler interface. | |||||
// The output is the result of dt.String(). | |||||
func (dt DateTime) MarshalText() ([]byte, error) { | |||||
return []byte(dt.String()), nil | |||||
} | |||||
// UnmarshalText implements the encoding.TextUnmarshaler interface. | |||||
// The datetime is expected to be a string in a format accepted by ParseDateTime | |||||
func (dt *DateTime) UnmarshalText(data []byte) error { | |||||
var err error | |||||
*dt, err = ParseDateTime(string(data)) | |||||
return err | |||||
} |
@@ -0,0 +1,442 @@ | |||||
// Copyright 2016 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
package civil | |||||
import ( | |||||
"encoding/json" | |||||
"testing" | |||||
"time" | |||||
"github.com/google/go-cmp/cmp" | |||||
) | |||||
func TestDates(t *testing.T) { | |||||
for _, test := range []struct { | |||||
date Date | |||||
loc *time.Location | |||||
wantStr string | |||||
wantTime time.Time | |||||
}{ | |||||
{ | |||||
date: Date{2014, 7, 29}, | |||||
loc: time.Local, | |||||
wantStr: "2014-07-29", | |||||
wantTime: time.Date(2014, time.July, 29, 0, 0, 0, 0, time.Local), | |||||
}, | |||||
{ | |||||
date: DateOf(time.Date(2014, 8, 20, 15, 8, 43, 1, time.Local)), | |||||
loc: time.UTC, | |||||
wantStr: "2014-08-20", | |||||
wantTime: time.Date(2014, 8, 20, 0, 0, 0, 0, time.UTC), | |||||
}, | |||||
{ | |||||
date: DateOf(time.Date(999, time.January, 26, 0, 0, 0, 0, time.Local)), | |||||
loc: time.UTC, | |||||
wantStr: "0999-01-26", | |||||
wantTime: time.Date(999, 1, 26, 0, 0, 0, 0, time.UTC), | |||||
}, | |||||
} { | |||||
if got := test.date.String(); got != test.wantStr { | |||||
t.Errorf("%#v.String() = %q, want %q", test.date, got, test.wantStr) | |||||
} | |||||
if got := test.date.In(test.loc); !got.Equal(test.wantTime) { | |||||
t.Errorf("%#v.In(%v) = %v, want %v", test.date, test.loc, got, test.wantTime) | |||||
} | |||||
} | |||||
} | |||||
func TestDateIsValid(t *testing.T) { | |||||
for _, test := range []struct { | |||||
date Date | |||||
want bool | |||||
}{ | |||||
{Date{2014, 7, 29}, true}, | |||||
{Date{2000, 2, 29}, true}, | |||||
{Date{10000, 12, 31}, true}, | |||||
{Date{1, 1, 1}, true}, | |||||
{Date{0, 1, 1}, true}, // year zero is OK | |||||
{Date{-1, 1, 1}, true}, // negative year is OK | |||||
{Date{1, 0, 1}, false}, | |||||
{Date{1, 1, 0}, false}, | |||||
{Date{2016, 1, 32}, false}, | |||||
{Date{2016, 13, 1}, false}, | |||||
{Date{1, -1, 1}, false}, | |||||
{Date{1, 1, -1}, false}, | |||||
} { | |||||
got := test.date.IsValid() | |||||
if got != test.want { | |||||
t.Errorf("%#v: got %t, want %t", test.date, got, test.want) | |||||
} | |||||
} | |||||
} | |||||
func TestParseDate(t *testing.T) { | |||||
for _, test := range []struct { | |||||
str string | |||||
want Date // if empty, expect an error | |||||
}{ | |||||
{"2016-01-02", Date{2016, 1, 2}}, | |||||
{"2016-12-31", Date{2016, 12, 31}}, | |||||
{"0003-02-04", Date{3, 2, 4}}, | |||||
{"999-01-26", Date{}}, | |||||
{"", Date{}}, | |||||
{"2016-01-02x", Date{}}, | |||||
} { | |||||
got, err := ParseDate(test.str) | |||||
if got != test.want { | |||||
t.Errorf("ParseDate(%q) = %+v, want %+v", test.str, got, test.want) | |||||
} | |||||
if err != nil && test.want != (Date{}) { | |||||
t.Errorf("Unexpected error %v from ParseDate(%q)", err, test.str) | |||||
} | |||||
} | |||||
} | |||||
func TestDateArithmetic(t *testing.T) { | |||||
for _, test := range []struct { | |||||
desc string | |||||
start Date | |||||
end Date | |||||
days int | |||||
}{ | |||||
{ | |||||
desc: "zero days noop", | |||||
start: Date{2014, 5, 9}, | |||||
end: Date{2014, 5, 9}, | |||||
days: 0, | |||||
}, | |||||
{ | |||||
desc: "crossing a year boundary", | |||||
start: Date{2014, 12, 31}, | |||||
end: Date{2015, 1, 1}, | |||||
days: 1, | |||||
}, | |||||
{ | |||||
desc: "negative number of days", | |||||
start: Date{2015, 1, 1}, | |||||
end: Date{2014, 12, 31}, | |||||
days: -1, | |||||
}, | |||||
{ | |||||
desc: "full leap year", | |||||
start: Date{2004, 1, 1}, | |||||
end: Date{2005, 1, 1}, | |||||
days: 366, | |||||
}, | |||||
{ | |||||
desc: "full non-leap year", | |||||
start: Date{2001, 1, 1}, | |||||
end: Date{2002, 1, 1}, | |||||
days: 365, | |||||
}, | |||||
{ | |||||
desc: "crossing a leap second", | |||||
start: Date{1972, 6, 30}, | |||||
end: Date{1972, 7, 1}, | |||||
days: 1, | |||||
}, | |||||
{ | |||||
desc: "dates before the unix epoch", | |||||
start: Date{101, 1, 1}, | |||||
end: Date{102, 1, 1}, | |||||
days: 365, | |||||
}, | |||||
} { | |||||
if got := test.start.AddDays(test.days); got != test.end { | |||||
t.Errorf("[%s] %#v.AddDays(%v) = %#v, want %#v", test.desc, test.start, test.days, got, test.end) | |||||
} | |||||
if got := test.end.DaysSince(test.start); got != test.days { | |||||
t.Errorf("[%s] %#v.Sub(%#v) = %v, want %v", test.desc, test.end, test.start, got, test.days) | |||||
} | |||||
} | |||||
} | |||||
func TestDateBefore(t *testing.T) { | |||||
for _, test := range []struct { | |||||
d1, d2 Date | |||||
want bool | |||||
}{ | |||||
{Date{2016, 12, 31}, Date{2017, 1, 1}, true}, | |||||
{Date{2016, 1, 1}, Date{2016, 1, 1}, false}, | |||||
{Date{2016, 12, 30}, Date{2016, 12, 31}, true}, | |||||
} { | |||||
if got := test.d1.Before(test.d2); got != test.want { | |||||
t.Errorf("%v.Before(%v): got %t, want %t", test.d1, test.d2, got, test.want) | |||||
} | |||||
} | |||||
} | |||||
func TestDateAfter(t *testing.T) { | |||||
for _, test := range []struct { | |||||
d1, d2 Date | |||||
want bool | |||||
}{ | |||||
{Date{2016, 12, 31}, Date{2017, 1, 1}, false}, | |||||
{Date{2016, 1, 1}, Date{2016, 1, 1}, false}, | |||||
{Date{2016, 12, 30}, Date{2016, 12, 31}, false}, | |||||
} { | |||||
if got := test.d1.After(test.d2); got != test.want { | |||||
t.Errorf("%v.After(%v): got %t, want %t", test.d1, test.d2, got, test.want) | |||||
} | |||||
} | |||||
} | |||||
func TestTimeToString(t *testing.T) { | |||||
for _, test := range []struct { | |||||
str string | |||||
time Time | |||||
roundTrip bool // ParseTime(str).String() == str? | |||||
}{ | |||||
{"13:26:33", Time{13, 26, 33, 0}, true}, | |||||
{"01:02:03.000023456", Time{1, 2, 3, 23456}, true}, | |||||
{"00:00:00.000000001", Time{0, 0, 0, 1}, true}, | |||||
{"13:26:03.1", Time{13, 26, 3, 100000000}, false}, | |||||
{"13:26:33.0000003", Time{13, 26, 33, 300}, false}, | |||||
} { | |||||
gotTime, err := ParseTime(test.str) | |||||
if err != nil { | |||||
t.Errorf("ParseTime(%q): got error: %v", test.str, err) | |||||
continue | |||||
} | |||||
if gotTime != test.time { | |||||
t.Errorf("ParseTime(%q) = %+v, want %+v", test.str, gotTime, test.time) | |||||
} | |||||
if test.roundTrip { | |||||
gotStr := test.time.String() | |||||
if gotStr != test.str { | |||||
t.Errorf("%#v.String() = %q, want %q", test.time, gotStr, test.str) | |||||
} | |||||
} | |||||
} | |||||
} | |||||
func TestTimeOf(t *testing.T) { | |||||
for _, test := range []struct { | |||||
time time.Time | |||||
want Time | |||||
}{ | |||||
{time.Date(2014, 8, 20, 15, 8, 43, 1, time.Local), Time{15, 8, 43, 1}}, | |||||
{time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), Time{0, 0, 0, 0}}, | |||||
} { | |||||
if got := TimeOf(test.time); got != test.want { | |||||
t.Errorf("TimeOf(%v) = %+v, want %+v", test.time, got, test.want) | |||||
} | |||||
} | |||||
} | |||||
func TestTimeIsValid(t *testing.T) { | |||||
for _, test := range []struct { | |||||
time Time | |||||
want bool | |||||
}{ | |||||
{Time{0, 0, 0, 0}, true}, | |||||
{Time{23, 0, 0, 0}, true}, | |||||
{Time{23, 59, 59, 999999999}, true}, | |||||
{Time{24, 59, 59, 999999999}, false}, | |||||
{Time{23, 60, 59, 999999999}, false}, | |||||
{Time{23, 59, 60, 999999999}, false}, | |||||
{Time{23, 59, 59, 1000000000}, false}, | |||||
{Time{-1, 0, 0, 0}, false}, | |||||
{Time{0, -1, 0, 0}, false}, | |||||
{Time{0, 0, -1, 0}, false}, | |||||
{Time{0, 0, 0, -1}, false}, | |||||
} { | |||||
got := test.time.IsValid() | |||||
if got != test.want { | |||||
t.Errorf("%#v: got %t, want %t", test.time, got, test.want) | |||||
} | |||||
} | |||||
} | |||||
func TestDateTimeToString(t *testing.T) { | |||||
for _, test := range []struct { | |||||
str string | |||||
dateTime DateTime | |||||
roundTrip bool // ParseDateTime(str).String() == str? | |||||
}{ | |||||
{"2016-03-22T13:26:33", DateTime{Date{2016, 03, 22}, Time{13, 26, 33, 0}}, true}, | |||||
{"2016-03-22T13:26:33.000000600", DateTime{Date{2016, 03, 22}, Time{13, 26, 33, 600}}, true}, | |||||
{"2016-03-22t13:26:33", DateTime{Date{2016, 03, 22}, Time{13, 26, 33, 0}}, false}, | |||||
} { | |||||
gotDateTime, err := ParseDateTime(test.str) | |||||
if err != nil { | |||||
t.Errorf("ParseDateTime(%q): got error: %v", test.str, err) | |||||
continue | |||||
} | |||||
if gotDateTime != test.dateTime { | |||||
t.Errorf("ParseDateTime(%q) = %+v, want %+v", test.str, gotDateTime, test.dateTime) | |||||
} | |||||
if test.roundTrip { | |||||
gotStr := test.dateTime.String() | |||||
if gotStr != test.str { | |||||
t.Errorf("%#v.String() = %q, want %q", test.dateTime, gotStr, test.str) | |||||
} | |||||
} | |||||
} | |||||
} | |||||
func TestParseDateTimeErrors(t *testing.T) { | |||||
for _, str := range []string{ | |||||
"", | |||||
"2016-03-22", // just a date | |||||
"13:26:33", // just a time | |||||
"2016-03-22 13:26:33", // wrong separating character | |||||
"2016-03-22T13:26:33x", // extra at end | |||||
} { | |||||
if _, err := ParseDateTime(str); err == nil { | |||||
t.Errorf("ParseDateTime(%q) succeeded, want error", str) | |||||
} | |||||
} | |||||
} | |||||
func TestDateTimeOf(t *testing.T) { | |||||
for _, test := range []struct { | |||||
time time.Time | |||||
want DateTime | |||||
}{ | |||||
{time.Date(2014, 8, 20, 15, 8, 43, 1, time.Local), | |||||
DateTime{Date{2014, 8, 20}, Time{15, 8, 43, 1}}}, | |||||
{time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), | |||||
DateTime{Date{1, 1, 1}, Time{0, 0, 0, 0}}}, | |||||
} { | |||||
if got := DateTimeOf(test.time); got != test.want { | |||||
t.Errorf("DateTimeOf(%v) = %+v, want %+v", test.time, got, test.want) | |||||
} | |||||
} | |||||
} | |||||
func TestDateTimeIsValid(t *testing.T) { | |||||
// No need to be exhaustive here; it's just Date.IsValid && Time.IsValid. | |||||
for _, test := range []struct { | |||||
dt DateTime | |||||
want bool | |||||
}{ | |||||
{DateTime{Date{2016, 3, 20}, Time{0, 0, 0, 0}}, true}, | |||||
{DateTime{Date{2016, -3, 20}, Time{0, 0, 0, 0}}, false}, | |||||
{DateTime{Date{2016, 3, 20}, Time{24, 0, 0, 0}}, false}, | |||||
} { | |||||
got := test.dt.IsValid() | |||||
if got != test.want { | |||||
t.Errorf("%#v: got %t, want %t", test.dt, got, test.want) | |||||
} | |||||
} | |||||
} | |||||
func TestDateTimeIn(t *testing.T) { | |||||
dt := DateTime{Date{2016, 1, 2}, Time{3, 4, 5, 6}} | |||||
got := dt.In(time.UTC) | |||||
want := time.Date(2016, 1, 2, 3, 4, 5, 6, time.UTC) | |||||
if !got.Equal(want) { | |||||
t.Errorf("got %v, want %v", got, want) | |||||
} | |||||
} | |||||
func TestDateTimeBefore(t *testing.T) { | |||||
d1 := Date{2016, 12, 31} | |||||
d2 := Date{2017, 1, 1} | |||||
t1 := Time{5, 6, 7, 8} | |||||
t2 := Time{5, 6, 7, 9} | |||||
for _, test := range []struct { | |||||
dt1, dt2 DateTime | |||||
want bool | |||||
}{ | |||||
{DateTime{d1, t1}, DateTime{d2, t1}, true}, | |||||
{DateTime{d1, t1}, DateTime{d1, t2}, true}, | |||||
{DateTime{d2, t1}, DateTime{d1, t1}, false}, | |||||
{DateTime{d2, t1}, DateTime{d2, t1}, false}, | |||||
} { | |||||
if got := test.dt1.Before(test.dt2); got != test.want { | |||||
t.Errorf("%v.Before(%v): got %t, want %t", test.dt1, test.dt2, got, test.want) | |||||
} | |||||
} | |||||
} | |||||
func TestDateTimeAfter(t *testing.T) { | |||||
d1 := Date{2016, 12, 31} | |||||
d2 := Date{2017, 1, 1} | |||||
t1 := Time{5, 6, 7, 8} | |||||
t2 := Time{5, 6, 7, 9} | |||||
for _, test := range []struct { | |||||
dt1, dt2 DateTime | |||||
want bool | |||||
}{ | |||||
{DateTime{d1, t1}, DateTime{d2, t1}, false}, | |||||
{DateTime{d1, t1}, DateTime{d1, t2}, false}, | |||||
{DateTime{d2, t1}, DateTime{d1, t1}, true}, | |||||
{DateTime{d2, t1}, DateTime{d2, t1}, false}, | |||||
} { | |||||
if got := test.dt1.After(test.dt2); got != test.want { | |||||
t.Errorf("%v.After(%v): got %t, want %t", test.dt1, test.dt2, got, test.want) | |||||
} | |||||
} | |||||
} | |||||
func TestMarshalJSON(t *testing.T) { | |||||
for _, test := range []struct { | |||||
value interface{} | |||||
want string | |||||
}{ | |||||
{Date{1987, 4, 15}, `"1987-04-15"`}, | |||||
{Time{18, 54, 2, 0}, `"18:54:02"`}, | |||||
{DateTime{Date{1987, 4, 15}, Time{18, 54, 2, 0}}, `"1987-04-15T18:54:02"`}, | |||||
} { | |||||
bgot, err := json.Marshal(test.value) | |||||
if err != nil { | |||||
t.Fatal(err) | |||||
} | |||||
if got := string(bgot); got != test.want { | |||||
t.Errorf("%#v: got %s, want %s", test.value, got, test.want) | |||||
} | |||||
} | |||||
} | |||||
func TestUnmarshalJSON(t *testing.T) { | |||||
var d Date | |||||
var tm Time | |||||
var dt DateTime | |||||
for _, test := range []struct { | |||||
data string | |||||
ptr interface{} | |||||
want interface{} | |||||
}{ | |||||
{`"1987-04-15"`, &d, &Date{1987, 4, 15}}, | |||||
{`"1987-04-\u0031\u0035"`, &d, &Date{1987, 4, 15}}, | |||||
{`"18:54:02"`, &tm, &Time{18, 54, 2, 0}}, | |||||
{`"1987-04-15T18:54:02"`, &dt, &DateTime{Date{1987, 4, 15}, Time{18, 54, 2, 0}}}, | |||||
} { | |||||
if err := json.Unmarshal([]byte(test.data), test.ptr); err != nil { | |||||
t.Fatalf("%s: %v", test.data, err) | |||||
} | |||||
if !cmp.Equal(test.ptr, test.want) { | |||||
t.Errorf("%s: got %#v, want %#v", test.data, test.ptr, test.want) | |||||
} | |||||
} | |||||
for _, bad := range []string{"", `""`, `"bad"`, `"1987-04-15x"`, | |||||
`19870415`, // a JSON number | |||||
`11987-04-15x`, // not a JSON string | |||||
} { | |||||
if json.Unmarshal([]byte(bad), &d) == nil { | |||||
t.Errorf("%q, Date: got nil, want error", bad) | |||||
} | |||||
if json.Unmarshal([]byte(bad), &tm) == nil { | |||||
t.Errorf("%q, Time: got nil, want error", bad) | |||||
} | |||||
if json.Unmarshal([]byte(bad), &dt) == nil { | |||||
t.Errorf("%q, DateTime: got nil, want error", bad) | |||||
} | |||||
} | |||||
} |
@@ -0,0 +1,76 @@ | |||||
// Copyright 2014 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
/* | |||||
Package cloud is the root of the packages used to access Google Cloud | |||||
Services. See https://godoc.org/cloud.google.com/go for a full list | |||||
of sub-packages. | |||||
Client Options | |||||
All clients in sub-packages are configurable via client options. These options are | |||||
described here: https://godoc.org/google.golang.org/api/option. | |||||
Authentication and Authorization | |||||
All the clients in sub-packages support authentication via Google Application Default | |||||
Credentials (see https://cloud.google.com/docs/authentication/production), or | |||||
by providing a JSON key file for a Service Account. See the authentication examples | |||||
in this package for details. | |||||
Timeouts and Cancellation | |||||
By default, all requests in sub-packages will run indefinitely, retrying on transient | |||||
errors when correctness allows. To set timeouts or arrange for cancellation, use | |||||
contexts. See the examples for details. | |||||
Do not attempt to control the initial connection (dialing) of a service by setting a | |||||
timeout on the context passed to NewClient. Dialing is non-blocking, so timeouts | |||||
would be ineffective and would only interfere with credential refreshing, which uses | |||||
the same context. | |||||
Connection Pooling | |||||
Connection pooling differs in clients based on their transport. Cloud | |||||
clients either rely on HTTP or gRPC transports to communicate | |||||
with Google Cloud. | |||||
Cloud clients that use HTTP (bigquery, compute, storage, and translate) rely on the | |||||
underlying HTTP transport to cache connections for later re-use. These are cached to | |||||
the default http.MaxIdleConns and http.MaxIdleConnsPerHost settings in | |||||
http.DefaultTransport. | |||||
For gPRC clients (all others in this repo), connection pooling is configurable. Users | |||||
of cloud client libraries may specify option.WithGRPCConnectionPool(n) as a client | |||||
option to NewClient calls. This configures the underlying gRPC connections to be | |||||
pooled and addressed in a round robin fashion. | |||||
Using the Libraries with Docker | |||||
Minimal docker images like Alpine lack CA certificates. This causes RPCs to appear to | |||||
hang, because gRPC retries indefinitely. See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/928 | |||||
for more information. | |||||
Debugging | |||||
To see gRPC logs, set the environment variable GRPC_GO_LOG_SEVERITY_LEVEL. See | |||||
https://godoc.org/google.golang.org/grpc/grpclog for more information. | |||||
For HTTP logging, set the GODEBUG environment variable to "http2debug=1" or "http2debug=2". | |||||
*/ | |||||
package cloud // import "cloud.google.com/go" |
@@ -0,0 +1,786 @@ | |||||
// Copyright 2018 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// https://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
// AUTO-GENERATED CODE. DO NOT EDIT. | |||||
package cloudtasks | |||||
import ( | |||||
"fmt" | |||||
"math" | |||||
"time" | |||||
"cloud.google.com/go/internal/version" | |||||
gax "github.com/googleapis/gax-go" | |||||
"golang.org/x/net/context" | |||||
"google.golang.org/api/iterator" | |||||
"google.golang.org/api/option" | |||||
"google.golang.org/api/transport" | |||||
taskspb "google.golang.org/genproto/googleapis/cloud/tasks/v2beta2" | |||||
iampb "google.golang.org/genproto/googleapis/iam/v1" | |||||
"google.golang.org/grpc" | |||||
"google.golang.org/grpc/codes" | |||||
"google.golang.org/grpc/metadata" | |||||
) | |||||
// CallOptions contains the retry settings for each method of Client. | |||||
type CallOptions struct { | |||||
ListQueues []gax.CallOption | |||||
GetQueue []gax.CallOption | |||||
CreateQueue []gax.CallOption | |||||
UpdateQueue []gax.CallOption | |||||
DeleteQueue []gax.CallOption | |||||
PurgeQueue []gax.CallOption | |||||
PauseQueue []gax.CallOption | |||||
ResumeQueue []gax.CallOption | |||||
GetIamPolicy []gax.CallOption | |||||
SetIamPolicy []gax.CallOption | |||||
TestIamPermissions []gax.CallOption | |||||
ListTasks []gax.CallOption | |||||
GetTask []gax.CallOption | |||||
CreateTask []gax.CallOption | |||||
DeleteTask []gax.CallOption | |||||
LeaseTasks []gax.CallOption | |||||
AcknowledgeTask []gax.CallOption | |||||
RenewLease []gax.CallOption | |||||
CancelLease []gax.CallOption | |||||
RunTask []gax.CallOption | |||||
} | |||||
func defaultClientOptions() []option.ClientOption { | |||||
return []option.ClientOption{ | |||||
option.WithEndpoint("cloudtasks.googleapis.com:443"), | |||||
option.WithScopes(DefaultAuthScopes()...), | |||||
} | |||||
} | |||||
func defaultCallOptions() *CallOptions { | |||||
retry := map[[2]string][]gax.CallOption{ | |||||
{"default", "idempotent"}: { | |||||
gax.WithRetry(func() gax.Retryer { | |||||
return gax.OnCodes([]codes.Code{ | |||||
codes.DeadlineExceeded, | |||||
codes.Unavailable, | |||||
}, gax.Backoff{ | |||||
Initial: 100 * time.Millisecond, | |||||
Max: 60000 * time.Millisecond, | |||||
Multiplier: 1.3, | |||||
}) | |||||
}), | |||||
}, | |||||
} | |||||
return &CallOptions{ | |||||
ListQueues: retry[[2]string{"default", "idempotent"}], | |||||
GetQueue: retry[[2]string{"default", "idempotent"}], | |||||
CreateQueue: retry[[2]string{"default", "non_idempotent"}], | |||||
UpdateQueue: retry[[2]string{"default", "non_idempotent"}], | |||||
DeleteQueue: retry[[2]string{"default", "non_idempotent"}], | |||||
PurgeQueue: retry[[2]string{"default", "non_idempotent"}], | |||||
PauseQueue: retry[[2]string{"default", "non_idempotent"}], | |||||
ResumeQueue: retry[[2]string{"default", "non_idempotent"}], | |||||
GetIamPolicy: retry[[2]string{"default", "idempotent"}], | |||||
SetIamPolicy: retry[[2]string{"default", "non_idempotent"}], | |||||
TestIamPermissions: retry[[2]string{"default", "idempotent"}], | |||||
ListTasks: retry[[2]string{"default", "idempotent"}], | |||||
GetTask: retry[[2]string{"default", "idempotent"}], | |||||
CreateTask: retry[[2]string{"default", "non_idempotent"}], | |||||
DeleteTask: retry[[2]string{"default", "idempotent"}], | |||||
LeaseTasks: retry[[2]string{"default", "non_idempotent"}], | |||||
AcknowledgeTask: retry[[2]string{"default", "non_idempotent"}], | |||||
RenewLease: retry[[2]string{"default", "non_idempotent"}], | |||||
CancelLease: retry[[2]string{"default", "non_idempotent"}], | |||||
RunTask: retry[[2]string{"default", "non_idempotent"}], | |||||
} | |||||
} | |||||
// Client is a client for interacting with Cloud Tasks API. | |||||
// | |||||
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. | |||||
type Client struct { | |||||
// The connection to the service. | |||||
conn *grpc.ClientConn | |||||
// The gRPC API client. | |||||
client taskspb.CloudTasksClient | |||||
// The call options for this service. | |||||
CallOptions *CallOptions | |||||
// The x-goog-* metadata to be sent with each request. | |||||
xGoogMetadata metadata.MD | |||||
} | |||||
// NewClient creates a new cloud tasks client. | |||||
// | |||||
// Cloud Tasks allows developers to manage the execution of background | |||||
// work in their applications. | |||||
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { | |||||
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
c := &Client{ | |||||
conn: conn, | |||||
CallOptions: defaultCallOptions(), | |||||
client: taskspb.NewCloudTasksClient(conn), | |||||
} | |||||
c.setGoogleClientInfo() | |||||
return c, nil | |||||
} | |||||
// Connection returns the client's connection to the API service. | |||||
func (c *Client) Connection() *grpc.ClientConn { | |||||
return c.conn | |||||
} | |||||
// Close closes the connection to the API service. The user should invoke this when | |||||
// the client is no longer required. | |||||
func (c *Client) Close() error { | |||||
return c.conn.Close() | |||||
} | |||||
// setGoogleClientInfo sets the name and version of the application in | |||||
// the `x-goog-api-client` header passed on each request. Intended for | |||||
// use by Google-written clients. | |||||
func (c *Client) setGoogleClientInfo(keyval ...string) { | |||||
kv := append([]string{"gl-go", version.Go()}, keyval...) | |||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) | |||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) | |||||
} | |||||
// ListQueues lists queues. | |||||
// | |||||
// Queues are returned in lexicographical order. | |||||
func (c *Client) ListQueues(ctx context.Context, req *taskspb.ListQueuesRequest, opts ...gax.CallOption) *QueueIterator { | |||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent())) | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||||
opts = append(c.CallOptions.ListQueues[0:len(c.CallOptions.ListQueues):len(c.CallOptions.ListQueues)], opts...) | |||||
it := &QueueIterator{} | |||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*taskspb.Queue, string, error) { | |||||
var resp *taskspb.ListQueuesResponse | |||||
req.PageToken = pageToken | |||||
if pageSize > math.MaxInt32 { | |||||
req.PageSize = math.MaxInt32 | |||||
} else { | |||||
req.PageSize = int32(pageSize) | |||||
} | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
resp, err = c.client.ListQueues(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
if err != nil { | |||||
return nil, "", err | |||||
} | |||||
return resp.Queues, resp.NextPageToken, nil | |||||
} | |||||
fetch := func(pageSize int, pageToken string) (string, error) { | |||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) | |||||
if err != nil { | |||||
return "", err | |||||
} | |||||
it.items = append(it.items, items...) | |||||
return nextPageToken, nil | |||||
} | |||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) | |||||
return it | |||||
} | |||||
// GetQueue gets a queue. | |||||
func (c *Client) GetQueue(ctx context.Context, req *taskspb.GetQueueRequest, opts ...gax.CallOption) (*taskspb.Queue, error) { | |||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||||
opts = append(c.CallOptions.GetQueue[0:len(c.CallOptions.GetQueue):len(c.CallOptions.GetQueue)], opts...) | |||||
var resp *taskspb.Queue | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
resp, err = c.client.GetQueue(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return resp, nil | |||||
} | |||||
// CreateQueue creates a queue. | |||||
// | |||||
// Queues created with this method allow tasks to live for a maximum of 31 | |||||
// days. After a task is 31 days old, the task will be deleted regardless of whether | |||||
// it was dispatched or not. | |||||
// | |||||
// WARNING: Using this method may have unintended side effects if you are | |||||
// using an App Engine queue.yaml or queue.xml file to manage your queues. | |||||
// Read | |||||
// Overview of Queue Management and queue.yaml (at /cloud-tasks/docs/queue-yaml) | |||||
// before using this method. | |||||
func (c *Client) CreateQueue(ctx context.Context, req *taskspb.CreateQueueRequest, opts ...gax.CallOption) (*taskspb.Queue, error) { | |||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent())) | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||||
opts = append(c.CallOptions.CreateQueue[0:len(c.CallOptions.CreateQueue):len(c.CallOptions.CreateQueue)], opts...) | |||||
var resp *taskspb.Queue | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
resp, err = c.client.CreateQueue(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return resp, nil | |||||
} | |||||
// UpdateQueue updates a queue. | |||||
// | |||||
// This method creates the queue if it does not exist and updates | |||||
// the queue if it does exist. | |||||
// | |||||
// Queues created with this method allow tasks to live for a maximum of 31 | |||||
// days. After a task is 31 days old, the task will be deleted regardless of whether | |||||
// it was dispatched or not. | |||||
// | |||||
// WARNING: Using this method may have unintended side effects if you are | |||||
// using an App Engine queue.yaml or queue.xml file to manage your queues. | |||||
// Read | |||||
// Overview of Queue Management and queue.yaml (at /cloud-tasks/docs/queue-yaml) | |||||
// before using this method. | |||||
func (c *Client) UpdateQueue(ctx context.Context, req *taskspb.UpdateQueueRequest, opts ...gax.CallOption) (*taskspb.Queue, error) { | |||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "queue.name", req.GetQueue().GetName())) | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||||
opts = append(c.CallOptions.UpdateQueue[0:len(c.CallOptions.UpdateQueue):len(c.CallOptions.UpdateQueue)], opts...) | |||||
var resp *taskspb.Queue | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
resp, err = c.client.UpdateQueue(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return resp, nil | |||||
} | |||||
// DeleteQueue deletes a queue. | |||||
// | |||||
// This command will delete the queue even if it has tasks in it. | |||||
// | |||||
// Note: If you delete a queue, a queue with the same name can't be created | |||||
// for 7 days. | |||||
// | |||||
// WARNING: Using this method may have unintended side effects if you are | |||||
// using an App Engine queue.yaml or queue.xml file to manage your queues. | |||||
// Read | |||||
// Overview of Queue Management and queue.yaml (at /cloud-tasks/docs/queue-yaml) | |||||
// before using this method. | |||||
func (c *Client) DeleteQueue(ctx context.Context, req *taskspb.DeleteQueueRequest, opts ...gax.CallOption) error { | |||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||||
opts = append(c.CallOptions.DeleteQueue[0:len(c.CallOptions.DeleteQueue):len(c.CallOptions.DeleteQueue)], opts...) | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
_, err = c.client.DeleteQueue(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
return err | |||||
} | |||||
// PurgeQueue purges a queue by deleting all of its tasks. | |||||
// | |||||
// All tasks created before this method is called are permanently deleted. | |||||
// | |||||
// Purge operations can take up to one minute to take effect. Tasks | |||||
// might be dispatched before the purge takes effect. A purge is irreversible. | |||||
func (c *Client) PurgeQueue(ctx context.Context, req *taskspb.PurgeQueueRequest, opts ...gax.CallOption) (*taskspb.Queue, error) { | |||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||||
opts = append(c.CallOptions.PurgeQueue[0:len(c.CallOptions.PurgeQueue):len(c.CallOptions.PurgeQueue)], opts...) | |||||
var resp *taskspb.Queue | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
resp, err = c.client.PurgeQueue(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return resp, nil | |||||
} | |||||
// PauseQueue pauses the queue. | |||||
// | |||||
// If a queue is paused then the system will stop dispatching tasks | |||||
// until the queue is resumed via | |||||
// [ResumeQueue][google.cloud.tasks.v2beta2.CloudTasks.ResumeQueue]. Tasks can still be added | |||||
// when the queue is paused. A queue is paused if its | |||||
// [state][google.cloud.tasks.v2beta2.Queue.state] is [PAUSED][google.cloud.tasks.v2beta2.Queue.State.PAUSED]. | |||||
func (c *Client) PauseQueue(ctx context.Context, req *taskspb.PauseQueueRequest, opts ...gax.CallOption) (*taskspb.Queue, error) { | |||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||||
opts = append(c.CallOptions.PauseQueue[0:len(c.CallOptions.PauseQueue):len(c.CallOptions.PauseQueue)], opts...) | |||||
var resp *taskspb.Queue | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
resp, err = c.client.PauseQueue(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return resp, nil | |||||
} | |||||
// ResumeQueue resume a queue. | |||||
// | |||||
// This method resumes a queue after it has been | |||||
// [PAUSED][google.cloud.tasks.v2beta2.Queue.State.PAUSED] or | |||||
// [DISABLED][google.cloud.tasks.v2beta2.Queue.State.DISABLED]. The state of a queue is stored | |||||
// in the queue's [state][google.cloud.tasks.v2beta2.Queue.state]; after calling this method it | |||||
// will be set to [RUNNING][google.cloud.tasks.v2beta2.Queue.State.RUNNING]. | |||||
// | |||||
// WARNING: Resuming many high-QPS queues at the same time can | |||||
// lead to target overloading. If you are resuming high-QPS | |||||
// queues, follow the 500/50/5 pattern described in | |||||
// Managing Cloud Tasks Scaling Risks (at /cloud-tasks/pdfs/managing-cloud-tasks-scaling-risks-2017-06-05.pdf). | |||||
func (c *Client) ResumeQueue(ctx context.Context, req *taskspb.ResumeQueueRequest, opts ...gax.CallOption) (*taskspb.Queue, error) { | |||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||||
opts = append(c.CallOptions.ResumeQueue[0:len(c.CallOptions.ResumeQueue):len(c.CallOptions.ResumeQueue)], opts...) | |||||
var resp *taskspb.Queue | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
resp, err = c.client.ResumeQueue(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return resp, nil | |||||
} | |||||
// GetIamPolicy gets the access control policy for a [Queue][google.cloud.tasks.v2beta2.Queue]. | |||||
// Returns an empty policy if the resource exists and does not have a policy | |||||
// set. | |||||
// | |||||
// Authorization requires the following Google IAM (at /iam) permission on the | |||||
// specified resource parent: | |||||
// | |||||
// cloudtasks.queues.getIamPolicy | |||||
func (c *Client) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { | |||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", req.GetResource())) | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||||
opts = append(c.CallOptions.GetIamPolicy[0:len(c.CallOptions.GetIamPolicy):len(c.CallOptions.GetIamPolicy)], opts...) | |||||
var resp *iampb.Policy | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
resp, err = c.client.GetIamPolicy(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return resp, nil | |||||
} | |||||
// SetIamPolicy sets the access control policy for a [Queue][google.cloud.tasks.v2beta2.Queue]. Replaces any existing | |||||
// policy. | |||||
// | |||||
// Note: The Cloud Console does not check queue-level IAM permissions yet. | |||||
// Project-level permissions are required to use the Cloud Console. | |||||
// | |||||
// Authorization requires the following Google IAM (at /iam) permission on the | |||||
// specified resource parent: | |||||
// | |||||
// cloudtasks.queues.setIamPolicy | |||||
func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { | |||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", req.GetResource())) | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||||
opts = append(c.CallOptions.SetIamPolicy[0:len(c.CallOptions.SetIamPolicy):len(c.CallOptions.SetIamPolicy)], opts...) | |||||
var resp *iampb.Policy | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
resp, err = c.client.SetIamPolicy(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return resp, nil | |||||
} | |||||
// TestIamPermissions returns permissions that a caller has on a [Queue][google.cloud.tasks.v2beta2.Queue]. | |||||
// If the resource does not exist, this will return an empty set of | |||||
// permissions, not a [NOT_FOUND][google.rpc.Code.NOT_FOUND] error. | |||||
// | |||||
// Note: This operation is designed to be used for building permission-aware | |||||
// UIs and command-line tools, not for authorization checking. This operation | |||||
// may "fail open" without warning. | |||||
func (c *Client) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { | |||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", req.GetResource())) | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||||
opts = append(c.CallOptions.TestIamPermissions[0:len(c.CallOptions.TestIamPermissions):len(c.CallOptions.TestIamPermissions)], opts...) | |||||
var resp *iampb.TestIamPermissionsResponse | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
resp, err = c.client.TestIamPermissions(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return resp, nil | |||||
} | |||||
// ListTasks lists the tasks in a queue. | |||||
// | |||||
// By default, only the [BASIC][google.cloud.tasks.v2beta2.Task.View.BASIC] view is retrieved | |||||
// due to performance considerations; | |||||
// [response_view][google.cloud.tasks.v2beta2.ListTasksRequest.response_view] controls the | |||||
// subset of information which is returned. | |||||
func (c *Client) ListTasks(ctx context.Context, req *taskspb.ListTasksRequest, opts ...gax.CallOption) *TaskIterator { | |||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent())) | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||||
opts = append(c.CallOptions.ListTasks[0:len(c.CallOptions.ListTasks):len(c.CallOptions.ListTasks)], opts...) | |||||
it := &TaskIterator{} | |||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*taskspb.Task, string, error) { | |||||
var resp *taskspb.ListTasksResponse | |||||
req.PageToken = pageToken | |||||
if pageSize > math.MaxInt32 { | |||||
req.PageSize = math.MaxInt32 | |||||
} else { | |||||
req.PageSize = int32(pageSize) | |||||
} | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
resp, err = c.client.ListTasks(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
if err != nil { | |||||
return nil, "", err | |||||
} | |||||
return resp.Tasks, resp.NextPageToken, nil | |||||
} | |||||
fetch := func(pageSize int, pageToken string) (string, error) { | |||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) | |||||
if err != nil { | |||||
return "", err | |||||
} | |||||
it.items = append(it.items, items...) | |||||
return nextPageToken, nil | |||||
} | |||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) | |||||
return it | |||||
} | |||||
// GetTask gets a task. | |||||
func (c *Client) GetTask(ctx context.Context, req *taskspb.GetTaskRequest, opts ...gax.CallOption) (*taskspb.Task, error) { | |||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||||
opts = append(c.CallOptions.GetTask[0:len(c.CallOptions.GetTask):len(c.CallOptions.GetTask)], opts...) | |||||
var resp *taskspb.Task | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
resp, err = c.client.GetTask(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return resp, nil | |||||
} | |||||
// CreateTask creates a task and adds it to a queue. | |||||
// | |||||
// To add multiple tasks at the same time, use | |||||
// HTTP batching (at /storage/docs/json_api/v1/how-tos/batch) | |||||
// or the batching documentation for your client library, for example | |||||
// https://developers.google.com/api-client-library/python/guide/batch. | |||||
// | |||||
// Tasks cannot be updated after creation; there is no UpdateTask command. | |||||
// | |||||
// For App Engine queues (at google.cloud.tasks.v2beta2.AppEngineHttpTarget), | |||||
// the maximum task size is 100KB. | |||||
// | |||||
// For pull queues (at google.cloud.tasks.v2beta2.PullTarget), this | |||||
// the maximum task size is 1MB. | |||||
func (c *Client) CreateTask(ctx context.Context, req *taskspb.CreateTaskRequest, opts ...gax.CallOption) (*taskspb.Task, error) { | |||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent())) | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||||
opts = append(c.CallOptions.CreateTask[0:len(c.CallOptions.CreateTask):len(c.CallOptions.CreateTask)], opts...) | |||||
var resp *taskspb.Task | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
resp, err = c.client.CreateTask(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return resp, nil | |||||
} | |||||
// DeleteTask deletes a task. | |||||
// | |||||
// A task can be deleted if it is scheduled or dispatched. A task | |||||
// cannot be deleted if it has completed successfully or permanently | |||||
// failed. | |||||
func (c *Client) DeleteTask(ctx context.Context, req *taskspb.DeleteTaskRequest, opts ...gax.CallOption) error { | |||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||||
opts = append(c.CallOptions.DeleteTask[0:len(c.CallOptions.DeleteTask):len(c.CallOptions.DeleteTask)], opts...) | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
_, err = c.client.DeleteTask(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
return err | |||||
} | |||||
// LeaseTasks leases tasks from a pull queue for | |||||
// [lease_duration][google.cloud.tasks.v2beta2.LeaseTasksRequest.lease_duration]. | |||||
// | |||||
// This method is invoked by the worker to obtain a lease. The | |||||
// worker must acknowledge the task via | |||||
// [AcknowledgeTask][google.cloud.tasks.v2beta2.CloudTasks.AcknowledgeTask] after they have | |||||
// performed the work associated with the task. | |||||
// | |||||
// The [payload][google.cloud.tasks.v2beta2.PullMessage.payload] is intended to store data that | |||||
// the worker needs to perform the work associated with the task. To | |||||
// return the payloads in the [response][google.cloud.tasks.v2beta2.LeaseTasksResponse], set | |||||
// [response_view][google.cloud.tasks.v2beta2.LeaseTasksRequest.response_view] to | |||||
// [FULL][google.cloud.tasks.v2beta2.Task.View.FULL]. | |||||
// | |||||
// A maximum of 10 qps of [LeaseTasks][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks] | |||||
// requests are allowed per | |||||
// queue. [RESOURCE_EXHAUSTED][google.rpc.Code.RESOURCE_EXHAUSTED] | |||||
// is returned when this limit is | |||||
// exceeded. [RESOURCE_EXHAUSTED][google.rpc.Code.RESOURCE_EXHAUSTED] | |||||
// is also returned when | |||||
// [max_tasks_dispatched_per_second][google.cloud.tasks.v2beta2.RateLimits.max_tasks_dispatched_per_second] | |||||
// is exceeded. | |||||
func (c *Client) LeaseTasks(ctx context.Context, req *taskspb.LeaseTasksRequest, opts ...gax.CallOption) (*taskspb.LeaseTasksResponse, error) { | |||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", req.GetParent())) | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||||
opts = append(c.CallOptions.LeaseTasks[0:len(c.CallOptions.LeaseTasks):len(c.CallOptions.LeaseTasks)], opts...) | |||||
var resp *taskspb.LeaseTasksResponse | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
resp, err = c.client.LeaseTasks(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return resp, nil | |||||
} | |||||
// AcknowledgeTask acknowledges a pull task. | |||||
// | |||||
// The worker, that is, the entity that | |||||
// [leased][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks] this task must call this method | |||||
// to indicate that the work associated with the task has finished. | |||||
// | |||||
// The worker must acknowledge a task within the | |||||
// [lease_duration][google.cloud.tasks.v2beta2.LeaseTasksRequest.lease_duration] or the lease | |||||
// will expire and the task will become available to be leased | |||||
// again. After the task is acknowledged, it will not be returned | |||||
// by a later [LeaseTasks][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks], | |||||
// [GetTask][google.cloud.tasks.v2beta2.CloudTasks.GetTask], or | |||||
// [ListTasks][google.cloud.tasks.v2beta2.CloudTasks.ListTasks]. | |||||
// | |||||
// To acknowledge multiple tasks at the same time, use | |||||
// HTTP batching (at /storage/docs/json_api/v1/how-tos/batch) | |||||
// or the batching documentation for your client library, for example | |||||
// https://developers.google.com/api-client-library/python/guide/batch. | |||||
func (c *Client) AcknowledgeTask(ctx context.Context, req *taskspb.AcknowledgeTaskRequest, opts ...gax.CallOption) error { | |||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||||
opts = append(c.CallOptions.AcknowledgeTask[0:len(c.CallOptions.AcknowledgeTask):len(c.CallOptions.AcknowledgeTask)], opts...) | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
_, err = c.client.AcknowledgeTask(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
return err | |||||
} | |||||
// RenewLease renew the current lease of a pull task. | |||||
// | |||||
// The worker can use this method to extend the lease by a new | |||||
// duration, starting from now. The new task lease will be | |||||
// returned in the task's [schedule_time][google.cloud.tasks.v2beta2.Task.schedule_time]. | |||||
func (c *Client) RenewLease(ctx context.Context, req *taskspb.RenewLeaseRequest, opts ...gax.CallOption) (*taskspb.Task, error) { | |||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||||
opts = append(c.CallOptions.RenewLease[0:len(c.CallOptions.RenewLease):len(c.CallOptions.RenewLease)], opts...) | |||||
var resp *taskspb.Task | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
resp, err = c.client.RenewLease(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return resp, nil | |||||
} | |||||
// CancelLease cancel a pull task's lease. | |||||
// | |||||
// The worker can use this method to cancel a task's lease by | |||||
// setting its [schedule_time][google.cloud.tasks.v2beta2.Task.schedule_time] to now. This will | |||||
// make the task available to be leased to the next caller of | |||||
// [LeaseTasks][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks]. | |||||
func (c *Client) CancelLease(ctx context.Context, req *taskspb.CancelLeaseRequest, opts ...gax.CallOption) (*taskspb.Task, error) { | |||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||||
opts = append(c.CallOptions.CancelLease[0:len(c.CallOptions.CancelLease):len(c.CallOptions.CancelLease)], opts...) | |||||
var resp *taskspb.Task | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
resp, err = c.client.CancelLease(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return resp, nil | |||||
} | |||||
// RunTask forces a task to run now. | |||||
// | |||||
// When this method is called, Cloud Tasks will dispatch the task, even if | |||||
// the task is already running, the queue has reached its [RateLimits][google.cloud.tasks.v2beta2.RateLimits] or | |||||
// is [PAUSED][google.cloud.tasks.v2beta2.Queue.State.PAUSED]. | |||||
// | |||||
// This command is meant to be used for manual debugging. For | |||||
// example, [RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] can be used to retry a failed | |||||
// task after a fix has been made or to manually force a task to be | |||||
// dispatched now. | |||||
// | |||||
// The dispatched task is returned. That is, the task that is returned | |||||
// contains the [status][google.cloud.tasks.v2beta2.Task.status] after the task is dispatched but | |||||
// before the task is received by its target. | |||||
// | |||||
// If Cloud Tasks receives a successful response from the task's | |||||
// target, then the task will be deleted; otherwise the task's | |||||
// [schedule_time][google.cloud.tasks.v2beta2.Task.schedule_time] will be reset to the time that | |||||
// [RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] was called plus the retry delay specified | |||||
// in the queue's [RetryConfig][google.cloud.tasks.v2beta2.RetryConfig]. | |||||
// | |||||
// [RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] returns | |||||
// [NOT_FOUND][google.rpc.Code.NOT_FOUND] when it is called on a | |||||
// task that has already succeeded or permanently failed. | |||||
// | |||||
// [RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask] cannot be called on a | |||||
// [pull task][google.cloud.tasks.v2beta2.PullMessage]. | |||||
func (c *Client) RunTask(ctx context.Context, req *taskspb.RunTaskRequest, opts ...gax.CallOption) (*taskspb.Task, error) { | |||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", req.GetName())) | |||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||||
opts = append(c.CallOptions.RunTask[0:len(c.CallOptions.RunTask):len(c.CallOptions.RunTask)], opts...) | |||||
var resp *taskspb.Task | |||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||||
var err error | |||||
resp, err = c.client.RunTask(ctx, req, settings.GRPC...) | |||||
return err | |||||
}, opts...) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
return resp, nil | |||||
} | |||||
// QueueIterator manages a stream of *taskspb.Queue. | |||||
type QueueIterator struct { | |||||
items []*taskspb.Queue | |||||
pageInfo *iterator.PageInfo | |||||
nextFunc func() error | |||||
// InternalFetch is for use by the Google Cloud Libraries only. | |||||
// It is not part of the stable interface of this package. | |||||
// | |||||
// InternalFetch returns results from a single call to the underlying RPC. | |||||
// The number of results is no greater than pageSize. | |||||
// If there are no more results, nextPageToken is empty and err is nil. | |||||
InternalFetch func(pageSize int, pageToken string) (results []*taskspb.Queue, nextPageToken string, err error) | |||||
} | |||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. | |||||
func (it *QueueIterator) PageInfo() *iterator.PageInfo { | |||||
return it.pageInfo | |||||
} | |||||
// Next returns the next result. Its second return value is iterator.Done if there are no more | |||||
// results. Once Next returns Done, all subsequent calls will return Done. | |||||
func (it *QueueIterator) Next() (*taskspb.Queue, error) { | |||||
var item *taskspb.Queue | |||||
if err := it.nextFunc(); err != nil { | |||||
return item, err | |||||
} | |||||
item = it.items[0] | |||||
it.items = it.items[1:] | |||||
return item, nil | |||||
} | |||||
func (it *QueueIterator) bufLen() int { | |||||
return len(it.items) | |||||
} | |||||
func (it *QueueIterator) takeBuf() interface{} { | |||||
b := it.items | |||||
it.items = nil | |||||
return b | |||||
} | |||||
// TaskIterator manages a stream of *taskspb.Task. | |||||
type TaskIterator struct { | |||||
items []*taskspb.Task | |||||
pageInfo *iterator.PageInfo | |||||
nextFunc func() error | |||||
// InternalFetch is for use by the Google Cloud Libraries only. | |||||
// It is not part of the stable interface of this package. | |||||
// | |||||
// InternalFetch returns results from a single call to the underlying RPC. | |||||
// The number of results is no greater than pageSize. | |||||
// If there are no more results, nextPageToken is empty and err is nil. | |||||
InternalFetch func(pageSize int, pageToken string) (results []*taskspb.Task, nextPageToken string, err error) | |||||
} | |||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. | |||||
func (it *TaskIterator) PageInfo() *iterator.PageInfo { | |||||
return it.pageInfo | |||||
} | |||||
// Next returns the next result. Its second return value is iterator.Done if there are no more | |||||
// results. Once Next returns Done, all subsequent calls will return Done. | |||||
func (it *TaskIterator) Next() (*taskspb.Task, error) { | |||||
var item *taskspb.Task | |||||
if err := it.nextFunc(); err != nil { | |||||
return item, err | |||||
} | |||||
item = it.items[0] | |||||
it.items = it.items[1:] | |||||
return item, nil | |||||
} | |||||
func (it *TaskIterator) bufLen() int { | |||||
return len(it.items) | |||||
} | |||||
func (it *TaskIterator) takeBuf() interface{} { | |||||
b := it.items | |||||
it.items = nil | |||||
return b | |||||
} |
@@ -0,0 +1,401 @@ | |||||
// Copyright 2018 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// https://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
// AUTO-GENERATED CODE. DO NOT EDIT. | |||||
package cloudtasks_test | |||||
import ( | |||||
"cloud.google.com/go/cloudtasks/apiv2beta2" | |||||
"golang.org/x/net/context" | |||||
"google.golang.org/api/iterator" | |||||
taskspb "google.golang.org/genproto/googleapis/cloud/tasks/v2beta2" | |||||
iampb "google.golang.org/genproto/googleapis/iam/v1" | |||||
) | |||||
func ExampleNewClient() { | |||||
ctx := context.Background() | |||||
c, err := cloudtasks.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// TODO: Use client. | |||||
_ = c | |||||
} | |||||
func ExampleClient_ListQueues() { | |||||
ctx := context.Background() | |||||
c, err := cloudtasks.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &taskspb.ListQueuesRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
it := c.ListQueues(ctx, req) | |||||
for { | |||||
resp, err := it.Next() | |||||
if err == iterator.Done { | |||||
break | |||||
} | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// TODO: Use resp. | |||||
_ = resp | |||||
} | |||||
} | |||||
func ExampleClient_GetQueue() { | |||||
ctx := context.Background() | |||||
c, err := cloudtasks.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &taskspb.GetQueueRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
resp, err := c.GetQueue(ctx, req) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// TODO: Use resp. | |||||
_ = resp | |||||
} | |||||
func ExampleClient_CreateQueue() { | |||||
ctx := context.Background() | |||||
c, err := cloudtasks.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &taskspb.CreateQueueRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
resp, err := c.CreateQueue(ctx, req) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// TODO: Use resp. | |||||
_ = resp | |||||
} | |||||
func ExampleClient_UpdateQueue() { | |||||
ctx := context.Background() | |||||
c, err := cloudtasks.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &taskspb.UpdateQueueRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
resp, err := c.UpdateQueue(ctx, req) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// TODO: Use resp. | |||||
_ = resp | |||||
} | |||||
func ExampleClient_DeleteQueue() { | |||||
ctx := context.Background() | |||||
c, err := cloudtasks.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &taskspb.DeleteQueueRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
err = c.DeleteQueue(ctx, req) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
} | |||||
func ExampleClient_PurgeQueue() { | |||||
ctx := context.Background() | |||||
c, err := cloudtasks.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &taskspb.PurgeQueueRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
resp, err := c.PurgeQueue(ctx, req) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// TODO: Use resp. | |||||
_ = resp | |||||
} | |||||
func ExampleClient_PauseQueue() { | |||||
ctx := context.Background() | |||||
c, err := cloudtasks.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &taskspb.PauseQueueRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
resp, err := c.PauseQueue(ctx, req) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// TODO: Use resp. | |||||
_ = resp | |||||
} | |||||
func ExampleClient_ResumeQueue() { | |||||
ctx := context.Background() | |||||
c, err := cloudtasks.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &taskspb.ResumeQueueRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
resp, err := c.ResumeQueue(ctx, req) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// TODO: Use resp. | |||||
_ = resp | |||||
} | |||||
func ExampleClient_GetIamPolicy() { | |||||
ctx := context.Background() | |||||
c, err := cloudtasks.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &iampb.GetIamPolicyRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
resp, err := c.GetIamPolicy(ctx, req) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// TODO: Use resp. | |||||
_ = resp | |||||
} | |||||
func ExampleClient_SetIamPolicy() { | |||||
ctx := context.Background() | |||||
c, err := cloudtasks.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &iampb.SetIamPolicyRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
resp, err := c.SetIamPolicy(ctx, req) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// TODO: Use resp. | |||||
_ = resp | |||||
} | |||||
func ExampleClient_TestIamPermissions() { | |||||
ctx := context.Background() | |||||
c, err := cloudtasks.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &iampb.TestIamPermissionsRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
resp, err := c.TestIamPermissions(ctx, req) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// TODO: Use resp. | |||||
_ = resp | |||||
} | |||||
func ExampleClient_ListTasks() { | |||||
ctx := context.Background() | |||||
c, err := cloudtasks.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &taskspb.ListTasksRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
it := c.ListTasks(ctx, req) | |||||
for { | |||||
resp, err := it.Next() | |||||
if err == iterator.Done { | |||||
break | |||||
} | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// TODO: Use resp. | |||||
_ = resp | |||||
} | |||||
} | |||||
func ExampleClient_GetTask() { | |||||
ctx := context.Background() | |||||
c, err := cloudtasks.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &taskspb.GetTaskRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
resp, err := c.GetTask(ctx, req) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// TODO: Use resp. | |||||
_ = resp | |||||
} | |||||
func ExampleClient_CreateTask() { | |||||
ctx := context.Background() | |||||
c, err := cloudtasks.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &taskspb.CreateTaskRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
resp, err := c.CreateTask(ctx, req) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// TODO: Use resp. | |||||
_ = resp | |||||
} | |||||
func ExampleClient_DeleteTask() { | |||||
ctx := context.Background() | |||||
c, err := cloudtasks.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &taskspb.DeleteTaskRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
err = c.DeleteTask(ctx, req) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
} | |||||
func ExampleClient_LeaseTasks() { | |||||
ctx := context.Background() | |||||
c, err := cloudtasks.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &taskspb.LeaseTasksRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
resp, err := c.LeaseTasks(ctx, req) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// TODO: Use resp. | |||||
_ = resp | |||||
} | |||||
func ExampleClient_AcknowledgeTask() { | |||||
ctx := context.Background() | |||||
c, err := cloudtasks.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &taskspb.AcknowledgeTaskRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
err = c.AcknowledgeTask(ctx, req) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
} | |||||
func ExampleClient_RenewLease() { | |||||
ctx := context.Background() | |||||
c, err := cloudtasks.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &taskspb.RenewLeaseRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
resp, err := c.RenewLease(ctx, req) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// TODO: Use resp. | |||||
_ = resp | |||||
} | |||||
func ExampleClient_CancelLease() { | |||||
ctx := context.Background() | |||||
c, err := cloudtasks.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &taskspb.CancelLeaseRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
resp, err := c.CancelLease(ctx, req) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// TODO: Use resp. | |||||
_ = resp | |||||
} | |||||
func ExampleClient_RunTask() { | |||||
ctx := context.Background() | |||||
c, err := cloudtasks.NewClient(ctx) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
req := &taskspb.RunTaskRequest{ | |||||
// TODO: Fill request struct fields. | |||||
} | |||||
resp, err := c.RunTask(ctx, req) | |||||
if err != nil { | |||||
// TODO: Handle error. | |||||
} | |||||
// TODO: Use resp. | |||||
_ = resp | |||||
} |
@@ -0,0 +1,48 @@ | |||||
// Copyright 2018 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// https://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
// AUTO-GENERATED CODE. DO NOT EDIT. | |||||
// Package cloudtasks is an auto-generated package for the | |||||
// Cloud Tasks API. | |||||
// | |||||
// NOTE: This package is in alpha. It is not stable, and is likely to change. | |||||
// | |||||
// Manages the execution of large numbers of distributed requests. Cloud | |||||
// Tasks | |||||
// is in Alpha. | |||||
package cloudtasks // import "cloud.google.com/go/cloudtasks/apiv2beta2" | |||||
import ( | |||||
"golang.org/x/net/context" | |||||
"google.golang.org/grpc/metadata" | |||||
) | |||||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { | |||||
out, _ := metadata.FromOutgoingContext(ctx) | |||||
out = out.Copy() | |||||
for _, md := range mds { | |||||
for k, v := range md { | |||||
out[k] = append(out[k], v...) | |||||
} | |||||
} | |||||
return metadata.NewOutgoingContext(ctx, out) | |||||
} | |||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package. | |||||
func DefaultAuthScopes() []string { | |||||
return []string{ | |||||
"https://www.googleapis.com/auth/cloud-platform", | |||||
} | |||||
} |
@@ -0,0 +1,450 @@ | |||||
// Copyright 2016 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
// +build linux,go1.7 | |||||
package main | |||||
import ( | |||||
"encoding/json" | |||||
"flag" | |||||
"fmt" | |||||
"io/ioutil" | |||||
"log" | |||||
"math/rand" | |||||
"os" | |||||
"sync" | |||||
"time" | |||||
"cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints" | |||||
debuglet "cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller" | |||||
"cloud.google.com/go/cmd/go-cloud-debug-agent/internal/debug" | |||||
"cloud.google.com/go/cmd/go-cloud-debug-agent/internal/debug/local" | |||||
"cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector" | |||||
"cloud.google.com/go/compute/metadata" | |||||
"golang.org/x/net/context" | |||||
"golang.org/x/oauth2" | |||||
"golang.org/x/oauth2/google" | |||||
cd "google.golang.org/api/clouddebugger/v2" | |||||
) | |||||
var ( | |||||
appModule = flag.String("appmodule", "", "Optional application module name.") | |||||
appVersion = flag.String("appversion", "", "Optional application module version name.") | |||||
sourceContextFile = flag.String("sourcecontext", "", "File containing JSON-encoded source context.") | |||||
verbose = flag.Bool("v", false, "Output verbose log messages.") | |||||
projectNumber = flag.String("projectnumber", "", "Project number."+ | |||||
" If this is not set, it is read from the GCP metadata server.") | |||||
projectID = flag.String("projectid", "", "Project ID."+ | |||||
" If this is not set, it is read from the GCP metadata server.") | |||||
serviceAccountFile = flag.String("serviceaccountfile", "", "File containing JSON service account credentials.") | |||||
) | |||||
const ( | |||||
maxCapturedStackFrames = 50 | |||||
maxCapturedVariables = 1000 | |||||
) | |||||
func main() { | |||||
flag.Usage = usage | |||||
flag.Parse() | |||||
args := flag.Args() | |||||
if len(args) == 0 { | |||||
// The user needs to supply the name of the executable to run. | |||||
flag.Usage() | |||||
return | |||||
} | |||||
if *projectNumber == "" { | |||||
var err error | |||||
*projectNumber, err = metadata.NumericProjectID() | |||||
if err != nil { | |||||
log.Print("Debuglet initialization: ", err) | |||||
} | |||||
} | |||||
if *projectID == "" { | |||||
var err error | |||||
*projectID, err = metadata.ProjectID() | |||||
if err != nil { | |||||
log.Print("Debuglet initialization: ", err) | |||||
} | |||||
} | |||||
sourceContexts, err := readSourceContextFile(*sourceContextFile) | |||||
if err != nil { | |||||
log.Print("Reading source context file: ", err) | |||||
} | |||||
var ts oauth2.TokenSource | |||||
ctx := context.Background() | |||||
if *serviceAccountFile != "" { | |||||
if ts, err = serviceAcctTokenSource(ctx, *serviceAccountFile, cd.CloudDebuggerScope); err != nil { | |||||
log.Fatalf("Error getting credentials from file %s: %v", *serviceAccountFile, err) | |||||
} | |||||
} else if ts, err = google.DefaultTokenSource(ctx, cd.CloudDebuggerScope); err != nil { | |||||
log.Print("Error getting application default credentials for Cloud Debugger:", err) | |||||
os.Exit(103) | |||||
} | |||||
c, err := debuglet.NewController(ctx, debuglet.Options{ | |||||
ProjectNumber: *projectNumber, | |||||
ProjectID: *projectID, | |||||
AppModule: *appModule, | |||||
AppVersion: *appVersion, | |||||
SourceContexts: sourceContexts, | |||||
Verbose: *verbose, | |||||
TokenSource: ts, | |||||
}) | |||||
if err != nil { | |||||
log.Fatal("Error connecting to Cloud Debugger: ", err) | |||||
} | |||||
prog, err := local.New(args[0]) | |||||
if err != nil { | |||||
log.Fatal("Error loading program: ", err) | |||||
} | |||||
// Load the program, but don't actually start it running yet. | |||||
if _, err = prog.Run(args[1:]...); err != nil { | |||||
log.Fatal("Error loading program: ", err) | |||||
} | |||||
bs := breakpoints.NewBreakpointStore(prog) | |||||
// Seed the random number generator. | |||||
rand.Seed(time.Now().UnixNano()) | |||||
// Now we want to do two things: run the user's program, and start sending | |||||
// List requests periodically to the Debuglet Controller to get breakpoints | |||||
// to set. | |||||
// | |||||
// We want to give the Debuglet Controller a chance to give us breakpoints | |||||
// before we start the program, otherwise we would miss any breakpoint | |||||
// triggers that occur during program startup -- for example, a breakpoint on | |||||
// the first line of main. But if the Debuglet Controller is not responding or | |||||
// is returning errors, we don't want to delay starting the program | |||||
// indefinitely. | |||||
// | |||||
// We pass a channel to breakpointListLoop, which will close it when the first | |||||
// List call finishes. Then we wait until either the channel is closed or a | |||||
// 5-second timer has finished before starting the program. | |||||
ch := make(chan bool) | |||||
// Start a goroutine that sends List requests to the Debuglet Controller, and | |||||
// sets any breakpoints it gets back. | |||||
go breakpointListLoop(ctx, c, bs, ch) | |||||
// Wait until 5 seconds have passed or breakpointListLoop has closed ch. | |||||
select { | |||||
case <-time.After(5 * time.Second): | |||||
case <-ch: | |||||
} | |||||
// Run the debuggee. | |||||
programLoop(ctx, c, bs, prog) | |||||
} | |||||
// usage prints a usage message to stderr and exits. | |||||
func usage() { | |||||
me := "a.out" | |||||
if len(os.Args) >= 1 { | |||||
me = os.Args[0] | |||||
} | |||||
fmt.Fprintf(os.Stderr, "Usage of %s:\n", me) | |||||
fmt.Fprintf(os.Stderr, "\t%s [flags...] -- <program name> args...\n", me) | |||||
fmt.Fprintf(os.Stderr, "Flags:\n") | |||||
flag.PrintDefaults() | |||||
fmt.Fprintf(os.Stderr, | |||||
"See https://cloud.google.com/tools/cloud-debugger/setting-up-on-compute-engine for more information.\n") | |||||
os.Exit(2) | |||||
} | |||||
// readSourceContextFile reads a JSON-encoded source context from the given file. | |||||
// It returns a non-empty slice on success. | |||||
func readSourceContextFile(filename string) ([]*cd.SourceContext, error) { | |||||
if filename == "" { | |||||
return nil, nil | |||||
} | |||||
scJSON, err := ioutil.ReadFile(filename) | |||||
if err != nil { | |||||
return nil, fmt.Errorf("reading file %q: %v", filename, err) | |||||
} | |||||
var sc cd.SourceContext | |||||
if err = json.Unmarshal(scJSON, &sc); err != nil { | |||||
return nil, fmt.Errorf("parsing file %q: %v", filename, err) | |||||
} | |||||
return []*cd.SourceContext{&sc}, nil | |||||
} | |||||
// breakpointListLoop repeatedly calls the Debuglet Controller's List RPC, and | |||||
// passes the results to the BreakpointStore so it can set and unset breakpoints | |||||
// in the program. | |||||
// | |||||
// After the first List call finishes, ch is closed. | |||||
func breakpointListLoop(ctx context.Context, c *debuglet.Controller, bs *breakpoints.BreakpointStore, first chan bool) { | |||||
const ( | |||||
avgTimeBetweenCalls = time.Second | |||||
errorDelay = 5 * time.Second | |||||
) | |||||
// randomDuration returns a random duration with expected value avg. | |||||
randomDuration := func(avg time.Duration) time.Duration { | |||||
return time.Duration(rand.Int63n(int64(2*avg + 1))) | |||||
} | |||||
var consecutiveFailures uint | |||||
for { | |||||
callStart := time.Now() | |||||
resp, err := c.List(ctx) | |||||
if err != nil && err != debuglet.ErrListUnchanged { | |||||
log.Printf("Debuglet controller server error: %v", err) | |||||
} | |||||
if err == nil { | |||||
bs.ProcessBreakpointList(resp.Breakpoints) | |||||
} | |||||
if first != nil { | |||||
// We've finished one call to List and set any breakpoints we received. | |||||
close(first) | |||||
first = nil | |||||
} | |||||
// Asynchronously send updates for any breakpoints that caused an error when | |||||
// the BreakpointStore tried to process them. We don't wait for the update | |||||
// to finish before the program can exit, as we do for normal updates. | |||||
errorBps := bs.ErrorBreakpoints() | |||||
for _, bp := range errorBps { | |||||
go func(bp *cd.Breakpoint) { | |||||
if err := c.Update(ctx, bp.Id, bp); err != nil { | |||||
log.Printf("Failed to send breakpoint update for %s: %s", bp.Id, err) | |||||
} | |||||
}(bp) | |||||
} | |||||
// Make the next call not too soon after the one we just did. | |||||
delay := randomDuration(avgTimeBetweenCalls) | |||||
// If the call returned an error other than ErrListUnchanged, wait longer. | |||||
if err != nil && err != debuglet.ErrListUnchanged { | |||||
// Wait twice as long after each consecutive failure, to a maximum of 16x. | |||||
delay += randomDuration(errorDelay * (1 << consecutiveFailures)) | |||||
if consecutiveFailures < 4 { | |||||
consecutiveFailures++ | |||||
} | |||||
} else { | |||||
consecutiveFailures = 0 | |||||
} | |||||
// Sleep until we reach time callStart+delay. If we've already passed that | |||||
// time, time.Sleep will return immediately -- this should be the common | |||||
// case, since the server will delay responding to List for a while when | |||||
// there are no changes to report. | |||||
time.Sleep(callStart.Add(delay).Sub(time.Now())) | |||||
} | |||||
} | |||||
// programLoop runs the program being debugged to completion. When a breakpoint's | |||||
// conditions are satisfied, it sends an Update RPC to the Debuglet Controller. | |||||
// The function returns when the program exits and all Update RPCs have finished. | |||||
func programLoop(ctx context.Context, c *debuglet.Controller, bs *breakpoints.BreakpointStore, prog debug.Program) { | |||||
var wg sync.WaitGroup | |||||
for { | |||||
// Run the program until it hits a breakpoint or exits. | |||||
status, err := prog.Resume() | |||||
if err != nil { | |||||
break | |||||
} | |||||
// Get the breakpoints at this address whose conditions were satisfied, | |||||
// and remove the ones that aren't logpoints. | |||||
bps := bs.BreakpointsAtPC(status.PC) | |||||
bps = bpsWithConditionSatisfied(bps, prog) | |||||
for _, bp := range bps { | |||||
if bp.Action != "LOG" { | |||||
bs.RemoveBreakpoint(bp) | |||||
} | |||||
} | |||||
if len(bps) == 0 { | |||||
continue | |||||
} | |||||
// Evaluate expressions and get the stack. | |||||
vc := valuecollector.NewCollector(prog, maxCapturedVariables) | |||||
needStackFrames := false | |||||
for _, bp := range bps { | |||||
// If evaluating bp's condition didn't return an error, evaluate bp's | |||||
// expressions, and later get the stack frames. | |||||
if bp.Status == nil { | |||||
bp.EvaluatedExpressions = expressionValues(bp.Expressions, prog, vc) | |||||
needStackFrames = true | |||||
} | |||||
} | |||||
var ( | |||||
stack []*cd.StackFrame | |||||
stackFramesStatusMessage *cd.StatusMessage | |||||
) | |||||
if needStackFrames { | |||||
stack, stackFramesStatusMessage = stackFrames(prog, vc) | |||||
} | |||||
// Read variable values from the program. | |||||
variableTable := vc.ReadValues() | |||||
// Start a goroutine to send updates to the Debuglet Controller or write | |||||
// to logs, concurrently with resuming the program. | |||||
// TODO: retry Update on failure. | |||||
for _, bp := range bps { | |||||
wg.Add(1) | |||||
switch bp.Action { | |||||
case "LOG": | |||||
go func(format string, evaluatedExpressions []*cd.Variable) { | |||||
s := valuecollector.LogString(format, evaluatedExpressions, variableTable) | |||||
log.Print(s) | |||||
wg.Done() | |||||
}(bp.LogMessageFormat, bp.EvaluatedExpressions) | |||||
bp.Status = nil | |||||
bp.EvaluatedExpressions = nil | |||||
default: | |||||
go func(bp *cd.Breakpoint) { | |||||
defer wg.Done() | |||||
bp.IsFinalState = true | |||||
if bp.Status == nil { | |||||
// If evaluating bp's condition didn't return an error, include the | |||||
// stack frames, variable table, and any status message produced when | |||||
// getting the stack frames. | |||||
bp.StackFrames = stack | |||||
bp.VariableTable = variableTable | |||||
bp.Status = stackFramesStatusMessage | |||||
} | |||||
if err := c.Update(ctx, bp.Id, bp); err != nil { | |||||
log.Printf("Failed to send breakpoint update for %s: %s", bp.Id, err) | |||||
} | |||||
}(bp) | |||||
} | |||||
} | |||||
} | |||||
// Wait for all updates to finish before returning. | |||||
wg.Wait() | |||||
} | |||||
// bpsWithConditionSatisfied returns the breakpoints whose conditions are true | |||||
// (or that do not have a condition.) | |||||
func bpsWithConditionSatisfied(bpsIn []*cd.Breakpoint, prog debug.Program) []*cd.Breakpoint { | |||||
var bpsOut []*cd.Breakpoint | |||||
for _, bp := range bpsIn { | |||||
cond, err := condTruth(bp.Condition, prog) | |||||
if err != nil { | |||||
bp.Status = errorStatusMessage(err.Error(), refersToBreakpointCondition) | |||||
// Include bp in the list to be updated when there's an error, so that | |||||
// the user gets a response. | |||||
bpsOut = append(bpsOut, bp) | |||||
} else if cond { | |||||
bpsOut = append(bpsOut, bp) | |||||
} | |||||
} | |||||
return bpsOut | |||||
} | |||||
// condTruth evaluates a condition. | |||||
func condTruth(condition string, prog debug.Program) (bool, error) { | |||||
if condition == "" { | |||||
// A condition wasn't set. | |||||
return true, nil | |||||
} | |||||
val, err := prog.Evaluate(condition) | |||||
if err != nil { | |||||
return false, err | |||||
} | |||||
if v, ok := val.(bool); !ok { | |||||
return false, fmt.Errorf("condition expression has type %T, should be bool", val) | |||||
} else { | |||||
return v, nil | |||||
} | |||||
} | |||||
// expressionValues evaluates a slice of expressions and returns a []*cd.Variable | |||||
// containing the results. | |||||
// If the result of an expression evaluation refers to values from the program's | |||||
// memory (e.g., the expression evaluates to a slice) a corresponding variable is | |||||
// added to the value collector, to be read later. | |||||
func expressionValues(expressions []string, prog debug.Program, vc *valuecollector.Collector) []*cd.Variable { | |||||
evaluatedExpressions := make([]*cd.Variable, len(expressions)) | |||||
for i, exp := range expressions { | |||||
ee := &cd.Variable{Name: exp} | |||||
evaluatedExpressions[i] = ee | |||||
if val, err := prog.Evaluate(exp); err != nil { | |||||
ee.Status = errorStatusMessage(err.Error(), refersToBreakpointExpression) | |||||
} else { | |||||
vc.FillValue(val, ee) | |||||
} | |||||
} | |||||
return evaluatedExpressions | |||||
} | |||||
// stackFrames returns a stack trace for the program. It passes references to | |||||
// function parameters and local variables to the value collector, so it can read | |||||
// their values later. | |||||
func stackFrames(prog debug.Program, vc *valuecollector.Collector) ([]*cd.StackFrame, *cd.StatusMessage) { | |||||
frames, err := prog.Frames(maxCapturedStackFrames) | |||||
if err != nil { | |||||
return nil, errorStatusMessage("Error getting stack: "+err.Error(), refersToUnspecified) | |||||
} | |||||
stackFrames := make([]*cd.StackFrame, len(frames)) | |||||
for i, f := range frames { | |||||
frame := &cd.StackFrame{} | |||||
frame.Function = f.Function | |||||
for _, v := range f.Params { | |||||
frame.Arguments = append(frame.Arguments, vc.AddVariable(debug.LocalVar(v))) | |||||
} | |||||
for _, v := range f.Vars { | |||||
frame.Locals = append(frame.Locals, vc.AddVariable(v)) | |||||
} | |||||
frame.Location = &cd.SourceLocation{ | |||||
Path: f.File, | |||||
Line: int64(f.Line), | |||||
} | |||||
stackFrames[i] = frame | |||||
} | |||||
return stackFrames, nil | |||||
} | |||||
// errorStatusMessage returns a *cd.StatusMessage indicating an error, | |||||
// with the given message and refersTo field. | |||||
func errorStatusMessage(msg string, refersTo int) *cd.StatusMessage { | |||||
return &cd.StatusMessage{ | |||||
Description: &cd.FormatMessage{Format: "$0", Parameters: []string{msg}}, | |||||
IsError: true, | |||||
RefersTo: refersToString[refersTo], | |||||
} | |||||
} | |||||
const ( | |||||
// RefersTo values for cd.StatusMessage. | |||||
refersToUnspecified = iota | |||||
refersToBreakpointCondition | |||||
refersToBreakpointExpression | |||||
) | |||||
// refersToString contains the strings for each refersTo value. | |||||
// See the definition of StatusMessage in the v2/clouddebugger package. | |||||
var refersToString = map[int]string{ | |||||
refersToUnspecified: "UNSPECIFIED", | |||||
refersToBreakpointCondition: "BREAKPOINT_CONDITION", | |||||
refersToBreakpointExpression: "BREAKPOINT_EXPRESSION", | |||||
} | |||||
func serviceAcctTokenSource(ctx context.Context, filename string, scope ...string) (oauth2.TokenSource, error) { | |||||
data, err := ioutil.ReadFile(filename) | |||||
if err != nil { | |||||
return nil, fmt.Errorf("cannot read service account file: %v", err) | |||||
} | |||||
cfg, err := google.JWTConfigFromJSON(data, scope...) | |||||
if err != nil { | |||||
return nil, fmt.Errorf("google.JWTConfigFromJSON: %v", err) | |||||
} | |||||
return cfg.TokenSource(ctx), nil | |||||
} |
@@ -0,0 +1,174 @@ | |||||
// Copyright 2016 Google LLC | |||||
// | |||||
// Licensed under the Apache License, Version 2.0 (the "License"); | |||||
// you may not use this file except in compliance with the License. | |||||
// You may obtain a copy of the License at | |||||
// | |||||
// http://www.apache.org/licenses/LICENSE-2.0 | |||||
// | |||||
// Unless required by applicable law or agreed to in writing, software | |||||
// distributed under the License is distributed on an "AS IS" BASIS, | |||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
// See the License for the specific language governing permissions and | |||||
// limitations under the License. | |||||
// Package breakpoints handles breakpoint requests we get from the user through | |||||
// the Debuglet Controller, and manages corresponding breakpoints set in the code. | |||||
package breakpoints | |||||
import ( | |||||
"log" | |||||
"sync" | |||||
"cloud.google.com/go/cmd/go-cloud-debug-agent/internal/debug" | |||||
cd "google.golang.org/api/clouddebugger/v2" | |||||
) | |||||
// BreakpointStore stores the set of breakpoints for a program. | |||||
type BreakpointStore struct { | |||||
mu sync.Mutex | |||||
// prog is the program being debugged. | |||||
prog debug.Program | |||||
// idToBreakpoint is a map from breakpoint identifier to *cd.Breakpoint. The | |||||
// map value is nil if the breakpoint is inactive. A breakpoint is active if: | |||||
// - We received it from the Debuglet Controller, and it was active at the time; | |||||
// - We were able to set code breakpoints for it; | |||||
// - We have not reached any of those code breakpoints while satisfying the | |||||
// breakpoint's conditions, or the breakpoint has action LOG; and | |||||
// - The Debuglet Controller hasn't informed us the breakpoint has become inactive. | |||||
idToBreakpoint map[string]*cd.Breakpoint | |||||
// pcToBps and bpToPCs store the many-to-many relationship between breakpoints we | |||||
// received from the Debuglet Controller and the code breakpoints we set for them. | |||||
pcToBps map[uint64][]*cd.Breakpoint | |||||
bpToPCs map[*cd.Breakpoint][]uint64 | |||||
// errors contains any breakpoints which couldn't be set because they caused an | |||||
// error. These are retrieved with ErrorBreakpoints, and the caller is | |||||
// expected to handle sending updates for them. | |||||
errors []*cd.Breakpoint | |||||
} | |||||
// NewBreakpointStore returns a BreakpointStore for the given program. | |||||
func NewBreakpointStore(prog debug.Program) *BreakpointStore { | |||||
return &BreakpointStore{ | |||||
idToBreakpoint: make(map[string]*cd.Breakpoint), | |||||
pcToBps: make(map[uint64][]*cd.Breakpoint), | |||||
bpToPCs: make(map[*cd.Breakpoint][]uint64), | |||||
prog: prog, | |||||
} | |||||
} | |||||
// ProcessBreakpointList applies updates received from the Debuglet Controller through a List call. | |||||
func (bs *BreakpointStore) ProcessBreakpointList(bps []*cd.Breakpoint) { | |||||
bs.mu.Lock() | |||||
defer bs.mu.Unlock() | |||||
for _, bp := range bps { | |||||
if storedBp, ok := bs.idToBreakpoint[bp.Id]; ok { | |||||
if storedBp != nil && bp.IsFinalState { | |||||
// IsFinalState indicates that the breakpoint has been made inactive. | |||||
bs.removeBreakpointLocked(storedBp) | |||||
} | |||||
} else { | |||||
if bp.IsFinalState { | |||||
// The controller is notifying us that the breakpoint is no longer active, | |||||
// but we didn't know about it anyway. | |||||
continue | |||||
} | |||||
if bp.Action != "" && bp.Action != "CAPTURE" && bp.Action != "LOG" { | |||||
bp.IsFinalState = true | |||||
bp.Status = &cd.StatusMessage{ | |||||
Description: &cd.FormatMessage{Format: "Action is not supported"}, | |||||
IsError: true, | |||||
} | |||||
bs.errors = append(bs.errors, bp) | |||||
// Note in idToBreakpoint that we've already seen this breakpoint, so that we | |||||
// don't try to report it as an error multiple times. | |||||
bs.idToBreakpoint[bp.Id] = nil | |||||
continue | |||||
} | |||||
pcs, err := bs.prog.BreakpointAtLine(bp.Location.Path, uint64(bp.Location.Line)) | |||||
if err != nil { | |||||
log.Printf("error setting breakpoint at %s:%d: %v", bp.Location.Path, bp.Location.Line, err) | |||||
} | |||||
if len(pcs) == 0 { | |||||
// We can't find a PC for this breakpoint's source line, so don't make it active. | |||||
// TODO: we could snap the line to a location where we can break, or report an error to the user. | |||||
bs.idToBreakpoint[bp.Id] = nil | |||||
} else { | |||||
bs.idToBreakpoint[bp.Id] = bp | |||||
for _, pc := range pcs { | |||||
bs.pcToBps[pc] = append(bs.pcToBps[pc], bp) | |||||
} | |||||
bs.bpToPCs[bp] = pcs | |||||
} | |||||
} | |||||
} | |||||
} | |||||
// ErrorBreakpoints returns a slice of Breakpoints that caused errors when the | |||||
// BreakpointStore tried to process them, and resets the list of such | |||||
// breakpoints. | |||||
// The caller is expected to send updates to the server to indicate the errors. | |||||
func (bs *BreakpointStore) ErrorBreakpoints() []*cd.Breakpoint { | |||||
bs.mu.Lock() | |||||
defer bs.mu.Unlock() | |||||
bps := bs.errors | |||||
bs.errors = nil | |||||
return bps | |||||
} | |||||
// BreakpointsAtPC returns all the breakpoints for which we set a code | |||||
// breakpoint at the given address. | |||||
func (bs *BreakpointStore) BreakpointsAtPC(pc uint64) []*cd.Breakpoint { | |||||
bs.mu.Lock() | |||||
defer bs.mu.Unlock() | |||||
return bs.pcToBps[pc] | |||||
} | |||||
// RemoveBreakpoint makes the given breakpoint inactive. | |||||
// This is called when either the debugged program hits the breakpoint, or the Debuglet | |||||
// Controller informs us that the breakpoint is now inactive. | |||||
func (bs *BreakpointStore) RemoveBreakpoint(bp *cd.Breakpoint) { | |||||
bs.mu.Lock() | |||||
bs.removeBreakpointLocked(bp) | |||||
bs.mu.Unlock() | |||||
} | |||||
func (bs *BreakpointStore) removeBreakpointLocked(bp *cd.Breakpoint) { | |||||
// Set the ID's corresponding breakpoint to nil, so that we won't activate it | |||||
// if we see it again. | |||||
// TODO: we could delete it after a few seconds. | |||||
bs.idToBreakpoint[bp.Id] = nil | |||||
// Delete bp from the list of cd breakpoints at each of its corresponding | |||||
// code breakpoint locations, and delete any code breakpoints which no longer | |||||
// have a corresponding cd breakpoint. | |||||
var codeBreakpointsToDelete []uint64 | |||||
for _, pc := range bs.bpToPCs[bp] { | |||||
bps := remove(bs.pcToBps[pc], bp) | |||||
if len(bps) == 0 { | |||||
// bp was the last breakpoint set at this PC, so delete the code breakpoint. | |||||
codeBreakpointsToDelete = append(codeBreakpointsToDelete, pc) | |||||
delete(bs.pcToBps, pc) | |||||
} else { | |||||
bs.pcToBps[pc] = bps | |||||
} | |||||
} | |||||
if len(codeBreakpointsToDelete) > 0 { | |||||
bs.prog.DeleteBreakpoints(codeBreakpointsToDelete) | |||||
} | |||||
delete(bs.bpToPCs, bp) | |||||
} | |||||
// remove updates rs by removing r, then returns rs. | |||||
// The mutex in the BreakpointStore which contains rs should be held. | |||||
func remove(rs []*cd.Breakpoint, r *cd.Breakpoint) []*cd.Breakpoint { | |||||
for i := range rs { | |||||
if rs[i] == r { | |||||
rs[i] = rs[len(rs)-1] | |||||
rs = rs[0 : len(rs)-1] | |||||
return rs | |||||
} | |||||
} | |||||
// We shouldn't reach here. | |||||
return rs | |||||
} |