|
- // Code generated by protoc-gen-go. DO NOT EDIT.
- // source: google/cloud/speech/v1p1beta1/cloud_speech.proto
-
- package speech // import "google.golang.org/genproto/googleapis/cloud/speech/v1p1beta1"
-
- import proto "github.com/golang/protobuf/proto"
- import fmt "fmt"
- import math "math"
- import _ "github.com/golang/protobuf/ptypes/any"
- import duration "github.com/golang/protobuf/ptypes/duration"
- import _ "github.com/golang/protobuf/ptypes/empty"
- import timestamp "github.com/golang/protobuf/ptypes/timestamp"
- import _ "google.golang.org/genproto/googleapis/api/annotations"
- import longrunning "google.golang.org/genproto/googleapis/longrunning"
- import status "google.golang.org/genproto/googleapis/rpc/status"
-
- import (
- context "golang.org/x/net/context"
- grpc "google.golang.org/grpc"
- )
-
- // Reference imports to suppress errors if they are not otherwise used.
- var _ = proto.Marshal
- var _ = fmt.Errorf
- var _ = math.Inf
-
- // This is a compile-time assertion to ensure that this generated file
- // is compatible with the proto package it is being compiled against.
- // A compilation error at this line likely means your copy of the
- // proto package needs to be updated.
- const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
- // The encoding of the audio data sent in the request.
- //
- // All encodings support only 1 channel (mono) audio.
- //
- // For best results, the audio source should be captured and transmitted using
- // a lossless encoding (`FLAC` or `LINEAR16`). The accuracy of the speech
- // recognition can be reduced if lossy codecs are used to capture or transmit
- // audio, particularly if background noise is present. Lossy codecs include
- // `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, and `SPEEX_WITH_HEADER_BYTE`.
- //
- // The `FLAC` and `WAV` audio file formats include a header that describes the
- // included audio content. You can request recognition for `WAV` files that
- // contain either `LINEAR16` or `MULAW` encoded audio.
- // If you send `FLAC` or `WAV` audio file format in
- // your request, you do not need to specify an `AudioEncoding`; the audio
- // encoding format is determined from the file header. If you specify
- // an `AudioEncoding` when you send send `FLAC` or `WAV` audio, the
- // encoding configuration must match the encoding described in the audio
- // header; otherwise the request returns an
- // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] error
- // code.
- type RecognitionConfig_AudioEncoding int32
-
- const (
- // Not specified.
- RecognitionConfig_ENCODING_UNSPECIFIED RecognitionConfig_AudioEncoding = 0
- // Uncompressed 16-bit signed little-endian samples (Linear PCM).
- RecognitionConfig_LINEAR16 RecognitionConfig_AudioEncoding = 1
- // `FLAC` (Free Lossless Audio
- // Codec) is the recommended encoding because it is
- // lossless--therefore recognition is not compromised--and
- // requires only about half the bandwidth of `LINEAR16`. `FLAC` stream
- // encoding supports 16-bit and 24-bit samples, however, not all fields in
- // `STREAMINFO` are supported.
- RecognitionConfig_FLAC RecognitionConfig_AudioEncoding = 2
- // 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
- RecognitionConfig_MULAW RecognitionConfig_AudioEncoding = 3
- // Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
- RecognitionConfig_AMR RecognitionConfig_AudioEncoding = 4
- // Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
- RecognitionConfig_AMR_WB RecognitionConfig_AudioEncoding = 5
- // Opus encoded audio frames in Ogg container
- // ([OggOpus](https://wiki.xiph.org/OggOpus)).
- // `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000.
- RecognitionConfig_OGG_OPUS RecognitionConfig_AudioEncoding = 6
- // Although the use of lossy encodings is not recommended, if a very low
- // bitrate encoding is required, `OGG_OPUS` is highly preferred over
- // Speex encoding. The [Speex](https://speex.org/) encoding supported by
- // Cloud Speech API has a header byte in each block, as in MIME type
- // `audio/x-speex-with-header-byte`.
- // It is a variant of the RTP Speex encoding defined in
- // [RFC 5574](https://tools.ietf.org/html/rfc5574).
- // The stream is a sequence of blocks, one block per RTP packet. Each block
- // starts with a byte containing the length of the block, in bytes, followed
- // by one or more frames of Speex data, padded to an integral number of
- // bytes (octets) as specified in RFC 5574. In other words, each RTP header
- // is replaced with a single byte containing the block length. Only Speex
- // wideband is supported. `sample_rate_hertz` must be 16000.
- RecognitionConfig_SPEEX_WITH_HEADER_BYTE RecognitionConfig_AudioEncoding = 7
- )
-
- var RecognitionConfig_AudioEncoding_name = map[int32]string{
- 0: "ENCODING_UNSPECIFIED",
- 1: "LINEAR16",
- 2: "FLAC",
- 3: "MULAW",
- 4: "AMR",
- 5: "AMR_WB",
- 6: "OGG_OPUS",
- 7: "SPEEX_WITH_HEADER_BYTE",
- }
- var RecognitionConfig_AudioEncoding_value = map[string]int32{
- "ENCODING_UNSPECIFIED": 0,
- "LINEAR16": 1,
- "FLAC": 2,
- "MULAW": 3,
- "AMR": 4,
- "AMR_WB": 5,
- "OGG_OPUS": 6,
- "SPEEX_WITH_HEADER_BYTE": 7,
- }
-
- func (x RecognitionConfig_AudioEncoding) String() string {
- return proto.EnumName(RecognitionConfig_AudioEncoding_name, int32(x))
- }
- func (RecognitionConfig_AudioEncoding) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_cloud_speech_9e62932d6cbd582d, []int{4, 0}
- }
-
- // Use case categories that the audio recognition request can be described
- // by.
- type RecognitionMetadata_InteractionType int32
-
- const (
- // Use case is either unknown or is something other than one of the other
- // values below.
- RecognitionMetadata_INTERACTION_TYPE_UNSPECIFIED RecognitionMetadata_InteractionType = 0
- // Multiple people in a conversation or discussion. For example in a
- // meeting with two or more people actively participating. Typically
- // all the primary people speaking would be in the same room (if not,
- // see PHONE_CALL)
- RecognitionMetadata_DISCUSSION RecognitionMetadata_InteractionType = 1
- // One or more persons lecturing or presenting to others, mostly
- // uninterrupted.
- RecognitionMetadata_PRESENTATION RecognitionMetadata_InteractionType = 2
- // A phone-call or video-conference in which two or more people, who are
- // not in the same room, are actively participating.
- RecognitionMetadata_PHONE_CALL RecognitionMetadata_InteractionType = 3
- // A recorded message intended for another person to listen to.
- RecognitionMetadata_VOICEMAIL RecognitionMetadata_InteractionType = 4
- // Professionally produced audio (eg. TV Show, Podcast).
- RecognitionMetadata_PROFESSIONALLY_PRODUCED RecognitionMetadata_InteractionType = 5
- // Transcribe spoken questions and queries into text.
- RecognitionMetadata_VOICE_SEARCH RecognitionMetadata_InteractionType = 6
- // Transcribe voice commands, such as for controlling a device.
- RecognitionMetadata_VOICE_COMMAND RecognitionMetadata_InteractionType = 7
- // Transcribe speech to text to create a written document, such as a
- // text-message, email or report.
- RecognitionMetadata_DICTATION RecognitionMetadata_InteractionType = 8
- )
-
- var RecognitionMetadata_InteractionType_name = map[int32]string{
- 0: "INTERACTION_TYPE_UNSPECIFIED",
- 1: "DISCUSSION",
- 2: "PRESENTATION",
- 3: "PHONE_CALL",
- 4: "VOICEMAIL",
- 5: "PROFESSIONALLY_PRODUCED",
- 6: "VOICE_SEARCH",
- 7: "VOICE_COMMAND",
- 8: "DICTATION",
- }
- var RecognitionMetadata_InteractionType_value = map[string]int32{
- "INTERACTION_TYPE_UNSPECIFIED": 0,
- "DISCUSSION": 1,
- "PRESENTATION": 2,
- "PHONE_CALL": 3,
- "VOICEMAIL": 4,
- "PROFESSIONALLY_PRODUCED": 5,
- "VOICE_SEARCH": 6,
- "VOICE_COMMAND": 7,
- "DICTATION": 8,
- }
-
- func (x RecognitionMetadata_InteractionType) String() string {
- return proto.EnumName(RecognitionMetadata_InteractionType_name, int32(x))
- }
- func (RecognitionMetadata_InteractionType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_cloud_speech_9e62932d6cbd582d, []int{5, 0}
- }
-
- // Enumerates the types of capture settings describing an audio file.
- type RecognitionMetadata_MicrophoneDistance int32
-
- const (
- // Audio type is not known.
- RecognitionMetadata_MICROPHONE_DISTANCE_UNSPECIFIED RecognitionMetadata_MicrophoneDistance = 0
- // The audio was captured from a closely placed microphone. Eg. phone,
- // dictaphone, or handheld microphone. Generally if there speaker is within
- // 1 meter of the microphone.
- RecognitionMetadata_NEARFIELD RecognitionMetadata_MicrophoneDistance = 1
- // The speaker if within 3 meters of the microphone.
- RecognitionMetadata_MIDFIELD RecognitionMetadata_MicrophoneDistance = 2
- // The speaker is more than 3 meters away from the microphone.
- RecognitionMetadata_FARFIELD RecognitionMetadata_MicrophoneDistance = 3
- )
-
- var RecognitionMetadata_MicrophoneDistance_name = map[int32]string{
- 0: "MICROPHONE_DISTANCE_UNSPECIFIED",
- 1: "NEARFIELD",
- 2: "MIDFIELD",
- 3: "FARFIELD",
- }
- var RecognitionMetadata_MicrophoneDistance_value = map[string]int32{
- "MICROPHONE_DISTANCE_UNSPECIFIED": 0,
- "NEARFIELD": 1,
- "MIDFIELD": 2,
- "FARFIELD": 3,
- }
-
- func (x RecognitionMetadata_MicrophoneDistance) String() string {
- return proto.EnumName(RecognitionMetadata_MicrophoneDistance_name, int32(x))
- }
- func (RecognitionMetadata_MicrophoneDistance) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_cloud_speech_9e62932d6cbd582d, []int{5, 1}
- }
-
- // The original media the speech was recorded on.
- type RecognitionMetadata_OriginalMediaType int32
-
- const (
- // Unknown original media type.
- RecognitionMetadata_ORIGINAL_MEDIA_TYPE_UNSPECIFIED RecognitionMetadata_OriginalMediaType = 0
- // The speech data is an audio recording.
- RecognitionMetadata_AUDIO RecognitionMetadata_OriginalMediaType = 1
- // The speech data originally recorded on a video.
- RecognitionMetadata_VIDEO RecognitionMetadata_OriginalMediaType = 2
- )
-
- var RecognitionMetadata_OriginalMediaType_name = map[int32]string{
- 0: "ORIGINAL_MEDIA_TYPE_UNSPECIFIED",
- 1: "AUDIO",
- 2: "VIDEO",
- }
- var RecognitionMetadata_OriginalMediaType_value = map[string]int32{
- "ORIGINAL_MEDIA_TYPE_UNSPECIFIED": 0,
- "AUDIO": 1,
- "VIDEO": 2,
- }
-
- func (x RecognitionMetadata_OriginalMediaType) String() string {
- return proto.EnumName(RecognitionMetadata_OriginalMediaType_name, int32(x))
- }
- func (RecognitionMetadata_OriginalMediaType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_cloud_speech_9e62932d6cbd582d, []int{5, 2}
- }
-
- // The type of device the speech was recorded with.
- type RecognitionMetadata_RecordingDeviceType int32
-
- const (
- // The recording device is unknown.
- RecognitionMetadata_RECORDING_DEVICE_TYPE_UNSPECIFIED RecognitionMetadata_RecordingDeviceType = 0
- // Speech was recorded on a smartphone.
- RecognitionMetadata_SMARTPHONE RecognitionMetadata_RecordingDeviceType = 1
- // Speech was recorded using a personal computer or tablet.
- RecognitionMetadata_PC RecognitionMetadata_RecordingDeviceType = 2
- // Speech was recorded over a phone line.
- RecognitionMetadata_PHONE_LINE RecognitionMetadata_RecordingDeviceType = 3
- // Speech was recorded in a vehicle.
- RecognitionMetadata_VEHICLE RecognitionMetadata_RecordingDeviceType = 4
- // Speech was recorded outdoors.
- RecognitionMetadata_OTHER_OUTDOOR_DEVICE RecognitionMetadata_RecordingDeviceType = 5
- // Speech was recorded indoors.
- RecognitionMetadata_OTHER_INDOOR_DEVICE RecognitionMetadata_RecordingDeviceType = 6
- )
-
- var RecognitionMetadata_RecordingDeviceType_name = map[int32]string{
- 0: "RECORDING_DEVICE_TYPE_UNSPECIFIED",
- 1: "SMARTPHONE",
- 2: "PC",
- 3: "PHONE_LINE",
- 4: "VEHICLE",
- 5: "OTHER_OUTDOOR_DEVICE",
- 6: "OTHER_INDOOR_DEVICE",
- }
- var RecognitionMetadata_RecordingDeviceType_value = map[string]int32{
- "RECORDING_DEVICE_TYPE_UNSPECIFIED": 0,
- "SMARTPHONE": 1,
- "PC": 2,
- "PHONE_LINE": 3,
- "VEHICLE": 4,
- "OTHER_OUTDOOR_DEVICE": 5,
- "OTHER_INDOOR_DEVICE": 6,
- }
-
- func (x RecognitionMetadata_RecordingDeviceType) String() string {
- return proto.EnumName(RecognitionMetadata_RecordingDeviceType_name, int32(x))
- }
- func (RecognitionMetadata_RecordingDeviceType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_cloud_speech_9e62932d6cbd582d, []int{5, 3}
- }
-
- // Indicates the type of speech event.
- type StreamingRecognizeResponse_SpeechEventType int32
-
- const (
- // No speech event specified.
- StreamingRecognizeResponse_SPEECH_EVENT_UNSPECIFIED StreamingRecognizeResponse_SpeechEventType = 0
- // This event indicates that the server has detected the end of the user's
- // speech utterance and expects no additional speech. Therefore, the server
- // will not process additional audio (although it may subsequently return
- // additional results). The client should stop sending additional audio
- // data, half-close the gRPC connection, and wait for any additional results
- // until the server closes the gRPC connection. This event is only sent if
- // `single_utterance` was set to `true`, and is not used otherwise.
- StreamingRecognizeResponse_END_OF_SINGLE_UTTERANCE StreamingRecognizeResponse_SpeechEventType = 1
- )
-
- var StreamingRecognizeResponse_SpeechEventType_name = map[int32]string{
- 0: "SPEECH_EVENT_UNSPECIFIED",
- 1: "END_OF_SINGLE_UTTERANCE",
- }
- var StreamingRecognizeResponse_SpeechEventType_value = map[string]int32{
- "SPEECH_EVENT_UNSPECIFIED": 0,
- "END_OF_SINGLE_UTTERANCE": 1,
- }
-
- func (x StreamingRecognizeResponse_SpeechEventType) String() string {
- return proto.EnumName(StreamingRecognizeResponse_SpeechEventType_name, int32(x))
- }
- func (StreamingRecognizeResponse_SpeechEventType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_cloud_speech_9e62932d6cbd582d, []int{11, 0}
- }
-
- // The top-level message sent by the client for the `Recognize` method.
- type RecognizeRequest struct {
- // *Required* Provides information to the recognizer that specifies how to
- // process the request.
- Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
- // *Required* The audio data to be recognized.
- Audio *RecognitionAudio `protobuf:"bytes,2,opt,name=audio,proto3" json:"audio,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
- }
-
- func (m *RecognizeRequest) Reset() { *m = RecognizeRequest{} }
- func (m *RecognizeRequest) String() string { return proto.CompactTextString(m) }
- func (*RecognizeRequest) ProtoMessage() {}
- func (*RecognizeRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_cloud_speech_9e62932d6cbd582d, []int{0}
- }
- func (m *RecognizeRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_RecognizeRequest.Unmarshal(m, b)
- }
- func (m *RecognizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_RecognizeRequest.Marshal(b, m, deterministic)
- }
- func (dst *RecognizeRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RecognizeRequest.Merge(dst, src)
- }
- func (m *RecognizeRequest) XXX_Size() int {
- return xxx_messageInfo_RecognizeRequest.Size(m)
- }
- func (m *RecognizeRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_RecognizeRequest.DiscardUnknown(m)
- }
-
- var xxx_messageInfo_RecognizeRequest proto.InternalMessageInfo
-
- func (m *RecognizeRequest) GetConfig() *RecognitionConfig {
- if m != nil {
- return m.Config
- }
- return nil
- }
-
- func (m *RecognizeRequest) GetAudio() *RecognitionAudio {
- if m != nil {
- return m.Audio
- }
- return nil
- }
-
- // The top-level message sent by the client for the `LongRunningRecognize`
- // method.
- type LongRunningRecognizeRequest struct {
- // *Required* Provides information to the recognizer that specifies how to
- // process the request.
- Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
- // *Required* The audio data to be recognized.
- Audio *RecognitionAudio `protobuf:"bytes,2,opt,name=audio,proto3" json:"audio,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
- }
-
- func (m *LongRunningRecognizeRequest) Reset() { *m = LongRunningRecognizeRequest{} }
- func (m *LongRunningRecognizeRequest) String() string { return proto.CompactTextString(m) }
- func (*LongRunningRecognizeRequest) ProtoMessage() {}
- func (*LongRunningRecognizeRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_cloud_speech_9e62932d6cbd582d, []int{1}
- }
- func (m *LongRunningRecognizeRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LongRunningRecognizeRequest.Unmarshal(m, b)
- }
- func (m *LongRunningRecognizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LongRunningRecognizeRequest.Marshal(b, m, deterministic)
- }
- func (dst *LongRunningRecognizeRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LongRunningRecognizeRequest.Merge(dst, src)
- }
- func (m *LongRunningRecognizeRequest) XXX_Size() int {
- return xxx_messageInfo_LongRunningRecognizeRequest.Size(m)
- }
- func (m *LongRunningRecognizeRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_LongRunningRecognizeRequest.DiscardUnknown(m)
- }
-
- var xxx_messageInfo_LongRunningRecognizeRequest proto.InternalMessageInfo
-
- func (m *LongRunningRecognizeRequest) GetConfig() *RecognitionConfig {
- if m != nil {
- return m.Config
- }
- return nil
- }
-
- func (m *LongRunningRecognizeRequest) GetAudio() *RecognitionAudio {
- if m != nil {
- return m.Audio
- }
- return nil
- }
-
- // The top-level message sent by the client for the `StreamingRecognize` method.
- // Multiple `StreamingRecognizeRequest` messages are sent. The first message
- // must contain a `streaming_config` message and must not contain `audio` data.
- // All subsequent messages must contain `audio` data and must not contain a
- // `streaming_config` message.
- type StreamingRecognizeRequest struct {
- // The streaming request, which is either a streaming config or audio content.
- //
- // Types that are valid to be assigned to StreamingRequest:
- // *StreamingRecognizeRequest_StreamingConfig
- // *StreamingRecognizeRequest_AudioContent
- StreamingRequest isStreamingRecognizeRequest_StreamingRequest `protobuf_oneof:"streaming_request"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
- }
-
- func (m *StreamingRecognizeRequest) Reset() { *m = StreamingRecognizeRequest{} }
- func (m *StreamingRecognizeRequest) String() string { return proto.CompactTextString(m) }
- func (*StreamingRecognizeRequest) ProtoMessage() {}
- func (*StreamingRecognizeRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_cloud_speech_9e62932d6cbd582d, []int{2}
- }
- func (m *StreamingRecognizeRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StreamingRecognizeRequest.Unmarshal(m, b)
- }
- func (m *StreamingRecognizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StreamingRecognizeRequest.Marshal(b, m, deterministic)
- }
- func (dst *StreamingRecognizeRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StreamingRecognizeRequest.Merge(dst, src)
- }
- func (m *StreamingRecognizeRequest) XXX_Size() int {
- return xxx_messageInfo_StreamingRecognizeRequest.Size(m)
- }
- func (m *StreamingRecognizeRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_StreamingRecognizeRequest.DiscardUnknown(m)
- }
-
- var xxx_messageInfo_StreamingRecognizeRequest proto.InternalMessageInfo
-
- type isStreamingRecognizeRequest_StreamingRequest interface {
- isStreamingRecognizeRequest_StreamingRequest()
- }
-
- type StreamingRecognizeRequest_StreamingConfig struct {
- StreamingConfig *StreamingRecognitionConfig `protobuf:"bytes,1,opt,name=streaming_config,json=streamingConfig,proto3,oneof"`
- }
-
- type StreamingRecognizeRequest_AudioContent struct {
- AudioContent []byte `protobuf:"bytes,2,opt,name=audio_content,json=audioContent,proto3,oneof"`
- }
-
- func (*StreamingRecognizeRequest_StreamingConfig) isStreamingRecognizeRequest_StreamingRequest() {}
-
- func (*StreamingRecognizeRequest_AudioContent) isStreamingRecognizeRequest_StreamingRequest() {}
-
- func (m *StreamingRecognizeRequest) GetStreamingRequest() isStreamingRecognizeRequest_StreamingRequest {
- if m != nil {
- return m.StreamingRequest
- }
- return nil
- }
-
- func (m *StreamingRecognizeRequest) GetStreamingConfig() *StreamingRecognitionConfig {
- if x, ok := m.GetStreamingRequest().(*StreamingRecognizeRequest_StreamingConfig); ok {
- return x.StreamingConfig
- }
- return nil
- }
-
- func (m *StreamingRecognizeRequest) GetAudioContent() []byte {
- if x, ok := m.GetStreamingRequest().(*StreamingRecognizeRequest_AudioContent); ok {
- return x.AudioContent
- }
- return nil
- }
-
- // XXX_OneofFuncs is for the internal use of the proto package.
- func (*StreamingRecognizeRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
- return _StreamingRecognizeRequest_OneofMarshaler, _StreamingRecognizeRequest_OneofUnmarshaler, _StreamingRecognizeRequest_OneofSizer, []interface{}{
- (*StreamingRecognizeRequest_StreamingConfig)(nil),
- (*StreamingRecognizeRequest_AudioContent)(nil),
- }
- }
-
- func _StreamingRecognizeRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
- m := msg.(*StreamingRecognizeRequest)
- // streaming_request
- switch x := m.StreamingRequest.(type) {
- case *StreamingRecognizeRequest_StreamingConfig:
- b.EncodeVarint(1<<3 | proto.WireBytes)
- if err := b.EncodeMessage(x.StreamingConfig); err != nil {
- return err
- }
- case *StreamingRecognizeRequest_AudioContent:
- b.EncodeVarint(2<<3 | proto.WireBytes)
- b.EncodeRawBytes(x.AudioContent)
- case nil:
- default:
- return fmt.Errorf("StreamingRecognizeRequest.StreamingRequest has unexpected type %T", x)
- }
- return nil
- }
-
- func _StreamingRecognizeRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
- m := msg.(*StreamingRecognizeRequest)
- switch tag {
- case 1: // streaming_request.streaming_config
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- msg := new(StreamingRecognitionConfig)
- err := b.DecodeMessage(msg)
- m.StreamingRequest = &StreamingRecognizeRequest_StreamingConfig{msg}
- return true, err
- case 2: // streaming_request.audio_content
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeRawBytes(true)
- m.StreamingRequest = &StreamingRecognizeRequest_AudioContent{x}
- return true, err
- default:
- return false, nil
- }
- }
-
- func _StreamingRecognizeRequest_OneofSizer(msg proto.Message) (n int) {
- m := msg.(*StreamingRecognizeRequest)
- // streaming_request
- switch x := m.StreamingRequest.(type) {
- case *StreamingRecognizeRequest_StreamingConfig:
- s := proto.Size(x.StreamingConfig)
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(s))
- n += s
- case *StreamingRecognizeRequest_AudioContent:
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(len(x.AudioContent)))
- n += len(x.AudioContent)
- case nil:
- default:
- panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
- }
- return n
- }
-
- // Provides information to the recognizer that specifies how to process the
- // request.
- type StreamingRecognitionConfig struct {
- // *Required* Provides information to the recognizer that specifies how to
- // process the request.
- Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
- // *Optional* If `false` or omitted, the recognizer will perform continuous
- // recognition (continuing to wait for and process audio even if the user
- // pauses speaking) until the client closes the input stream (gRPC API) or
- // until the maximum time limit has been reached. May return multiple
- // `StreamingRecognitionResult`s with the `is_final` flag set to `true`.
- //
- // If `true`, the recognizer will detect a single spoken utterance. When it
- // detects that the user has paused or stopped speaking, it will return an
- // `END_OF_SINGLE_UTTERANCE` event and cease recognition. It will return no
- // more than one `StreamingRecognitionResult` with the `is_final` flag set to
- // `true`.
- SingleUtterance bool `protobuf:"varint,2,opt,name=single_utterance,json=singleUtterance,proto3" json:"single_utterance,omitempty"`
- // *Optional* If `true`, interim results (tentative hypotheses) may be
- // returned as they become available (these interim results are indicated with
- // the `is_final=false` flag).
- // If `false` or omitted, only `is_final=true` result(s) are returned.
- InterimResults bool `protobuf:"varint,3,opt,name=interim_results,json=interimResults,proto3" json:"interim_results,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
- }
-
- func (m *StreamingRecognitionConfig) Reset() { *m = StreamingRecognitionConfig{} }
- func (m *StreamingRecognitionConfig) String() string { return proto.CompactTextString(m) }
- func (*StreamingRecognitionConfig) ProtoMessage() {}
- func (*StreamingRecognitionConfig) Descriptor() ([]byte, []int) {
- return fileDescriptor_cloud_speech_9e62932d6cbd582d, []int{3}
- }
- func (m *StreamingRecognitionConfig) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StreamingRecognitionConfig.Unmarshal(m, b)
- }
- func (m *StreamingRecognitionConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StreamingRecognitionConfig.Marshal(b, m, deterministic)
- }
- func (dst *StreamingRecognitionConfig) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StreamingRecognitionConfig.Merge(dst, src)
- }
- func (m *StreamingRecognitionConfig) XXX_Size() int {
- return xxx_messageInfo_StreamingRecognitionConfig.Size(m)
- }
- func (m *StreamingRecognitionConfig) XXX_DiscardUnknown() {
- xxx_messageInfo_StreamingRecognitionConfig.DiscardUnknown(m)
- }
-
- var xxx_messageInfo_StreamingRecognitionConfig proto.InternalMessageInfo
-
- func (m *StreamingRecognitionConfig) GetConfig() *RecognitionConfig {
- if m != nil {
- return m.Config
- }
- return nil
- }
-
- func (m *StreamingRecognitionConfig) GetSingleUtterance() bool {
- if m != nil {
- return m.SingleUtterance
- }
- return false
- }
-
- func (m *StreamingRecognitionConfig) GetInterimResults() bool {
- if m != nil {
- return m.InterimResults
- }
- return false
- }
-
- // Provides information to the recognizer that specifies how to process the
- // request.
- type RecognitionConfig struct {
- // Encoding of audio data sent in all `RecognitionAudio` messages.
- // This field is optional for `FLAC` and `WAV` audio files and required
- // for all other audio formats. For details, see
- // [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
- Encoding RecognitionConfig_AudioEncoding `protobuf:"varint,1,opt,name=encoding,proto3,enum=google.cloud.speech.v1p1beta1.RecognitionConfig_AudioEncoding" json:"encoding,omitempty"`
- // Sample rate in Hertz of the audio data sent in all
- // `RecognitionAudio` messages. Valid values are: 8000-48000.
- // 16000 is optimal. For best results, set the sampling rate of the audio
- // source to 16000 Hz. If that's not possible, use the native sample rate of
- // the audio source (instead of re-sampling).
- // This field is optional for `FLAC` and `WAV` audio files and required
- // for all other audio formats. For details, see
- // [AudioEncoding][google.cloud.speech.v1p1beta1.RecognitionConfig.AudioEncoding].
- SampleRateHertz int32 `protobuf:"varint,2,opt,name=sample_rate_hertz,json=sampleRateHertz,proto3" json:"sample_rate_hertz,omitempty"`
- // *Optional* The number of channels in the input audio data.
- // ONLY set this for MULTI-CHANNEL recognition.
- // Valid values for LINEAR16 and FLAC are `1`-`8`.
- // Valid values for OGG_OPUS are '1'-'254'.
- // Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
- // If `0` or omitted, defaults to one channel (mono).
- // Note: We only recognize the first channel by default.
- // To perform independent recognition on each channel set
- // `enable_separate_recognition_per_channel` to 'true'.
- AudioChannelCount int32 `protobuf:"varint,7,opt,name=audio_channel_count,json=audioChannelCount,proto3" json:"audio_channel_count,omitempty"`
- // This needs to be set to ‘true’ explicitly and `audio_channel_count` > 1
- // to get each channel recognized separately. The recognition result will
- // contain a `channel_tag` field to state which channel that result belongs
- // to. If this is not true, we will only recognize the first channel. The
- // request is billed cumulatively for all channels recognized:
- // `audio_channel_count` multiplied by the length of the audio.
- EnableSeparateRecognitionPerChannel bool `protobuf:"varint,12,opt,name=enable_separate_recognition_per_channel,json=enableSeparateRecognitionPerChannel,proto3" json:"enable_separate_recognition_per_channel,omitempty"`
- // *Required* The language of the supplied audio as a
- // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
- // Example: "en-US".
- // See [Language Support](/speech-to-text/docs/languages)
- // for a list of the currently supported language codes.
- LanguageCode string `protobuf:"bytes,3,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
- // *Optional* A list of up to 3 additional
- // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tags,
- // listing possible alternative languages of the supplied audio.
- // See [Language Support](/speech-to-text/docs/languages)
- // for a list of the currently supported language codes.
- // If alternative languages are listed, recognition result will contain
- // recognition in the most likely language detected including the main
- // language_code. The recognition result will include the language tag
- // of the language detected in the audio.
- // Note: This feature is only supported for Voice Command and Voice Search
- // use cases and performance may vary for other use cases (e.g., phone call
- // transcription).
- AlternativeLanguageCodes []string `protobuf:"bytes,18,rep,name=alternative_language_codes,json=alternativeLanguageCodes,proto3" json:"alternative_language_codes,omitempty"`
- // *Optional* Maximum number of recognition hypotheses to be returned.
- // Specifically, the maximum number of `SpeechRecognitionAlternative` messages
- // within each `SpeechRecognitionResult`.
- // The server may return fewer than `max_alternatives`.
- // Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
- // one. If omitted, will return a maximum of one.
- MaxAlternatives int32 `protobuf:"varint,4,opt,name=max_alternatives,json=maxAlternatives,proto3" json:"max_alternatives,omitempty"`
- // *Optional* If set to `true`, the server will attempt to filter out
- // profanities, replacing all but the initial character in each filtered word
- // with asterisks, e.g. "f***". If set to `false` or omitted, profanities
- // won't be filtered out.
- ProfanityFilter bool `protobuf:"varint,5,opt,name=profanity_filter,json=profanityFilter,proto3" json:"profanity_filter,omitempty"`
- // *Optional* array of
- // [SpeechContext][google.cloud.speech.v1p1beta1.SpeechContext]. A means to
- // provide context to assist the speech recognition. For more information, see
- // [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
- SpeechContexts []*SpeechContext `protobuf:"bytes,6,rep,name=speech_contexts,json=speechContexts,proto3" json:"speech_contexts,omitempty"`
- // *Optional* If `true`, the top result includes a list of words and
- // the start and end time offsets (timestamps) for those words. If
- // `false`, no word-level time offset information is returned. The default is
- // `false`.
- EnableWordTimeOffsets bool `protobuf:"varint,8,opt,name=enable_word_time_offsets,json=enableWordTimeOffsets,proto3" json:"enable_word_time_offsets,omitempty"`
- // *Optional* If `true`, the top result includes a list of words and the
- // confidence for those words. If `false`, no word-level confidence
- // information is returned. The default is `false`.
- EnableWordConfidence bool `protobuf:"varint,15,opt,name=enable_word_confidence,json=enableWordConfidence,proto3" json:"enable_word_confidence,omitempty"`
- // *Optional* If 'true', adds punctuation to recognition result hypotheses.
- // This feature is only available in select languages. Setting this for
- // requests in other languages has no effect at all.
- // The default 'false' value does not add punctuation to result hypotheses.
- // Note: This is currently offered as an experimental service, complimentary
- // to all users. In the future this may be exclusively available as a
- // premium feature.
- EnableAutomaticPunctuation bool `protobuf:"varint,11,opt,name=enable_automatic_punctuation,json=enableAutomaticPunctuation,proto3" json:"enable_automatic_punctuation,omitempty"`
- // *Optional* If 'true', enables speaker detection for each recognized word in
- // the top alternative of the recognition result using a speaker_tag provided
- // in the WordInfo.
- // Note: When this is true, we send all the words from the beginning of the
- // audio for the top alternative in every consecutive STREAMING responses.
- // This is done in order to improve our speaker tags as our models learn to
- // identify the speakers in the conversation over time.
- // For non-streaming requests, the diarization results will be provided only
- // in the top alternative of the FINAL SpeechRecognitionResult.
- EnableSpeakerDiarization bool `protobuf:"varint,16,opt,name=enable_speaker_diarization,json=enableSpeakerDiarization,proto3" json:"enable_speaker_diarization,omitempty"`
- // *Optional*
- // If set, specifies the estimated number of speakers in the conversation.
- // If not set, defaults to '2'.
- // Ignored unless enable_speaker_diarization is set to true."
- DiarizationSpeakerCount int32 `protobuf:"varint,17,opt,name=diarization_speaker_count,json=diarizationSpeakerCount,proto3" json:"diarization_speaker_count,omitempty"`
- // *Optional* Metadata regarding this request.
- Metadata *RecognitionMetadata `protobuf:"bytes,9,opt,name=metadata,proto3" json:"metadata,omitempty"`
- // *Optional* Which model to select for the given request. Select the model
- // best suited to your domain to get best results. If a model is not
- // explicitly specified, then we auto-select a model based on the parameters
- // in the RecognitionConfig.
- // <table>
- // <tr>
- // <td><b>Model</b></td>
- // <td><b>Description</b></td>
- // </tr>
- // <tr>
- // <td><code>command_and_search</code></td>
- // <td>Best for short queries such as voice commands or voice search.</td>
- // </tr>
- // <tr>
- // <td><code>phone_call</code></td>
- // <td>Best for audio that originated from a phone call (typically
- // recorded at an 8khz sampling rate).</td>
- // </tr>
- // <tr>
- // <td><code>video</code></td>
- // <td>Best for audio that originated from from video or includes multiple
- // speakers. Ideally the audio is recorded at a 16khz or greater
- // sampling rate. This is a premium model that costs more than the
- // standard rate.</td>
- // </tr>
- // <tr>
- // <td><code>default</code></td>
- // <td>Best for audio that is not one of the specific audio models.
- // For example, long-form audio. Ideally the audio is high-fidelity,
- // recorded at a 16khz or greater sampling rate.</td>
- // </tr>
- // </table>
- Model string `protobuf:"bytes,13,opt,name=model,proto3" json:"model,omitempty"`
- // *Optional* Set to true to use an enhanced model for speech recognition.
- // If `use_enhanced` is set to true and the `model` field is not set, then
- // an appropriate enhanced model is chosen if:
- // 1. project is eligible for requesting enhanced models
- // 2. an enhanced model exists for the audio
- //
- // If `use_enhanced` is true and an enhanced version of the specified model
- // does not exist, then the speech is recognized using the standard version
- // of the specified model.
- //
- // Enhanced speech models require that you opt-in to data logging using
- // instructions in the
- // [documentation](/speech-to-text/docs/enable-data-logging). If you set
- // `use_enhanced` to true and you have not enabled audio logging, then you
- // will receive an error.
- UseEnhanced bool `protobuf:"varint,14,opt,name=use_enhanced,json=useEnhanced,proto3" json:"use_enhanced,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
- }
-
- func (m *RecognitionConfig) Reset() { *m = RecognitionConfig{} }
- func (m *RecognitionConfig) String() string { return proto.CompactTextString(m) }
- func (*RecognitionConfig) ProtoMessage() {}
- func (*RecognitionConfig) Descriptor() ([]byte, []int) {
- return fileDescriptor_cloud_speech_9e62932d6cbd582d, []int{4}
- }
- func (m *RecognitionConfig) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_RecognitionConfig.Unmarshal(m, b)
- }
- func (m *RecognitionConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_RecognitionConfig.Marshal(b, m, deterministic)
- }
- func (dst *RecognitionConfig) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RecognitionConfig.Merge(dst, src)
- }
- func (m *RecognitionConfig) XXX_Size() int {
- return xxx_messageInfo_RecognitionConfig.Size(m)
- }
- func (m *RecognitionConfig) XXX_DiscardUnknown() {
- xxx_messageInfo_RecognitionConfig.DiscardUnknown(m)
- }
-
- var xxx_messageInfo_RecognitionConfig proto.InternalMessageInfo
-
- func (m *RecognitionConfig) GetEncoding() RecognitionConfig_AudioEncoding {
- if m != nil {
- return m.Encoding
- }
- return RecognitionConfig_ENCODING_UNSPECIFIED
- }
-
- func (m *RecognitionConfig) GetSampleRateHertz() int32 {
- if m != nil {
- return m.SampleRateHertz
- }
- return 0
- }
-
- func (m *RecognitionConfig) GetAudioChannelCount() int32 {
- if m != nil {
- return m.AudioChannelCount
- }
- return 0
- }
-
- func (m *RecognitionConfig) GetEnableSeparateRecognitionPerChannel() bool {
- if m != nil {
- return m.EnableSeparateRecognitionPerChannel
- }
- return false
- }
-
- func (m *RecognitionConfig) GetLanguageCode() string {
- if m != nil {
- return m.LanguageCode
- }
- return ""
- }
-
- func (m *RecognitionConfig) GetAlternativeLanguageCodes() []string {
- if m != nil {
- return m.AlternativeLanguageCodes
- }
- return nil
- }
-
- func (m *RecognitionConfig) GetMaxAlternatives() int32 {
- if m != nil {
- return m.MaxAlternatives
- }
- return 0
- }
-
- func (m *RecognitionConfig) GetProfanityFilter() bool {
- if m != nil {
- return m.ProfanityFilter
- }
- return false
- }
-
- func (m *RecognitionConfig) GetSpeechContexts() []*SpeechContext {
- if m != nil {
- return m.SpeechContexts
- }
- return nil
- }
-
- func (m *RecognitionConfig) GetEnableWordTimeOffsets() bool {
- if m != nil {
- return m.EnableWordTimeOffsets
- }
- return false
- }
-
- func (m *RecognitionConfig) GetEnableWordConfidence() bool {
- if m != nil {
- return m.EnableWordConfidence
- }
- return false
- }
-
- func (m *RecognitionConfig) GetEnableAutomaticPunctuation() bool {
- if m != nil {
- return m.EnableAutomaticPunctuation
- }
- return false
- }
-
- func (m *RecognitionConfig) GetEnableSpeakerDiarization() bool {
- if m != nil {
- return m.EnableSpeakerDiarization
- }
- return false
- }
-
- func (m *RecognitionConfig) GetDiarizationSpeakerCount() int32 {
- if m != nil {
- return m.DiarizationSpeakerCount
- }
- return 0
- }
-
- func (m *RecognitionConfig) GetMetadata() *RecognitionMetadata {
- if m != nil {
- return m.Metadata
- }
- return nil
- }
-
- func (m *RecognitionConfig) GetModel() string {
- if m != nil {
- return m.Model
- }
- return ""
- }
-
- func (m *RecognitionConfig) GetUseEnhanced() bool {
- if m != nil {
- return m.UseEnhanced
- }
- return false
- }
-
- // Description of audio data to be recognized.
- type RecognitionMetadata struct {
- // The use case most closely describing the audio content to be recognized.
- InteractionType RecognitionMetadata_InteractionType `protobuf:"varint,1,opt,name=interaction_type,json=interactionType,proto3,enum=google.cloud.speech.v1p1beta1.RecognitionMetadata_InteractionType" json:"interaction_type,omitempty"`
- // The industry vertical to which this speech recognition request most
- // closely applies. This is most indicative of the topics contained
- // in the audio. Use the 6-digit NAICS code to identify the industry
- // vertical - see https://www.naics.com/search/.
- IndustryNaicsCodeOfAudio uint32 `protobuf:"varint,3,opt,name=industry_naics_code_of_audio,json=industryNaicsCodeOfAudio,proto3" json:"industry_naics_code_of_audio,omitempty"`
- // The audio type that most closely describes the audio being recognized.
- MicrophoneDistance RecognitionMetadata_MicrophoneDistance `protobuf:"varint,4,opt,name=microphone_distance,json=microphoneDistance,proto3,enum=google.cloud.speech.v1p1beta1.RecognitionMetadata_MicrophoneDistance" json:"microphone_distance,omitempty"`
- // The original media the speech was recorded on.
- OriginalMediaType RecognitionMetadata_OriginalMediaType `protobuf:"varint,5,opt,name=original_media_type,json=originalMediaType,proto3,enum=google.cloud.speech.v1p1beta1.RecognitionMetadata_OriginalMediaType" json:"original_media_type,omitempty"`
- // The type of device the speech was recorded with.
- RecordingDeviceType RecognitionMetadata_RecordingDeviceType `protobuf:"varint,6,opt,name=recording_device_type,json=recordingDeviceType,proto3,enum=google.cloud.speech.v1p1beta1.RecognitionMetadata_RecordingDeviceType" json:"recording_device_type,omitempty"`
- // The device used to make the recording. Examples 'Nexus 5X' or
- // 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or
- // 'Cardioid Microphone'.
- RecordingDeviceName string `protobuf:"bytes,7,opt,name=recording_device_name,json=recordingDeviceName,proto3" json:"recording_device_name,omitempty"`
- // Mime type of the original audio file. For example `audio/m4a`,
- // `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`.
- // A list of possible audio mime types is maintained at
- // http://www.iana.org/assignments/media-types/media-types.xhtml#audio
- OriginalMimeType string `protobuf:"bytes,8,opt,name=original_mime_type,json=originalMimeType,proto3" json:"original_mime_type,omitempty"`
- // Obfuscated (privacy-protected) ID of the user, to identify number of
- // unique users using the service.
- ObfuscatedId int64 `protobuf:"varint,9,opt,name=obfuscated_id,json=obfuscatedId,proto3" json:"obfuscated_id,omitempty"`
- // Description of the content. Eg. "Recordings of federal supreme court
- // hearings from 2012".
- AudioTopic string `protobuf:"bytes,10,opt,name=audio_topic,json=audioTopic,proto3" json:"audio_topic,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
- }
-
- func (m *RecognitionMetadata) Reset() { *m = RecognitionMetadata{} }
- func (m *RecognitionMetadata) String() string { return proto.CompactTextString(m) }
- func (*RecognitionMetadata) ProtoMessage() {}
- func (*RecognitionMetadata) Descriptor() ([]byte, []int) {
- return fileDescriptor_cloud_speech_9e62932d6cbd582d, []int{5}
- }
- func (m *RecognitionMetadata) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_RecognitionMetadata.Unmarshal(m, b)
- }
- func (m *RecognitionMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_RecognitionMetadata.Marshal(b, m, deterministic)
- }
- func (dst *RecognitionMetadata) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RecognitionMetadata.Merge(dst, src)
- }
- func (m *RecognitionMetadata) XXX_Size() int {
- return xxx_messageInfo_RecognitionMetadata.Size(m)
- }
- func (m *RecognitionMetadata) XXX_DiscardUnknown() {
- xxx_messageInfo_RecognitionMetadata.DiscardUnknown(m)
- }
-
- var xxx_messageInfo_RecognitionMetadata proto.InternalMessageInfo
-
- func (m *RecognitionMetadata) GetInteractionType() RecognitionMetadata_InteractionType {
- if m != nil {
- return m.InteractionType
- }
- return RecognitionMetadata_INTERACTION_TYPE_UNSPECIFIED
- }
-
- func (m *RecognitionMetadata) GetIndustryNaicsCodeOfAudio() uint32 {
- if m != nil {
- return m.IndustryNaicsCodeOfAudio
- }
- return 0
- }
-
- func (m *RecognitionMetadata) GetMicrophoneDistance() RecognitionMetadata_MicrophoneDistance {
- if m != nil {
- return m.MicrophoneDistance
- }
- return RecognitionMetadata_MICROPHONE_DISTANCE_UNSPECIFIED
- }
-
- func (m *RecognitionMetadata) GetOriginalMediaType() RecognitionMetadata_OriginalMediaType {
- if m != nil {
- return m.OriginalMediaType
- }
- return RecognitionMetadata_ORIGINAL_MEDIA_TYPE_UNSPECIFIED
- }
-
- func (m *RecognitionMetadata) GetRecordingDeviceType() RecognitionMetadata_RecordingDeviceType {
- if m != nil {
- return m.RecordingDeviceType
- }
- return RecognitionMetadata_RECORDING_DEVICE_TYPE_UNSPECIFIED
- }
-
- func (m *RecognitionMetadata) GetRecordingDeviceName() string {
- if m != nil {
- return m.RecordingDeviceName
- }
- return ""
- }
-
- func (m *RecognitionMetadata) GetOriginalMimeType() string {
- if m != nil {
- return m.OriginalMimeType
- }
- return ""
- }
-
- func (m *RecognitionMetadata) GetObfuscatedId() int64 {
- if m != nil {
- return m.ObfuscatedId
- }
- return 0
- }
-
- func (m *RecognitionMetadata) GetAudioTopic() string {
- if m != nil {
- return m.AudioTopic
- }
- return ""
- }
-
- // Provides "hints" to the speech recognizer to favor specific words and phrases
- // in the results.
- type SpeechContext struct {
- // *Optional* A list of strings containing words and phrases "hints" so that
- // the speech recognition is more likely to recognize them. This can be used
- // to improve the accuracy for specific words and phrases, for example, if
- // specific commands are typically spoken by the user. This can also be used
- // to add additional words to the vocabulary of the recognizer. See
- // [usage limits](/speech-to-text/quotas#content).
- Phrases []string `protobuf:"bytes,1,rep,name=phrases,proto3" json:"phrases,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
- }
-
- func (m *SpeechContext) Reset() { *m = SpeechContext{} }
- func (m *SpeechContext) String() string { return proto.CompactTextString(m) }
- func (*SpeechContext) ProtoMessage() {}
- func (*SpeechContext) Descriptor() ([]byte, []int) {
- return fileDescriptor_cloud_speech_9e62932d6cbd582d, []int{6}
- }
- func (m *SpeechContext) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SpeechContext.Unmarshal(m, b)
- }
- func (m *SpeechContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SpeechContext.Marshal(b, m, deterministic)
- }
- func (dst *SpeechContext) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SpeechContext.Merge(dst, src)
- }
- func (m *SpeechContext) XXX_Size() int {
- return xxx_messageInfo_SpeechContext.Size(m)
- }
- func (m *SpeechContext) XXX_DiscardUnknown() {
- xxx_messageInfo_SpeechContext.DiscardUnknown(m)
- }
-
- var xxx_messageInfo_SpeechContext proto.InternalMessageInfo
-
- func (m *SpeechContext) GetPhrases() []string {
- if m != nil {
- return m.Phrases
- }
- return nil
- }
-
- // Contains audio data in the encoding specified in the `RecognitionConfig`.
- // Either `content` or `uri` must be supplied. Supplying both or neither
- // returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT].
- // See [content limits](/speech-to-text/quotas#content).
- type RecognitionAudio struct {
- // The audio source, which is either inline content or a Google Cloud
- // Storage uri.
- //
- // Types that are valid to be assigned to AudioSource:
- // *RecognitionAudio_Content
- // *RecognitionAudio_Uri
- AudioSource isRecognitionAudio_AudioSource `protobuf_oneof:"audio_source"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
- }
-
- func (m *RecognitionAudio) Reset() { *m = RecognitionAudio{} }
- func (m *RecognitionAudio) String() string { return proto.CompactTextString(m) }
- func (*RecognitionAudio) ProtoMessage() {}
- func (*RecognitionAudio) Descriptor() ([]byte, []int) {
- return fileDescriptor_cloud_speech_9e62932d6cbd582d, []int{7}
- }
- func (m *RecognitionAudio) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_RecognitionAudio.Unmarshal(m, b)
- }
- func (m *RecognitionAudio) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_RecognitionAudio.Marshal(b, m, deterministic)
- }
- func (dst *RecognitionAudio) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RecognitionAudio.Merge(dst, src)
- }
- func (m *RecognitionAudio) XXX_Size() int {
- return xxx_messageInfo_RecognitionAudio.Size(m)
- }
- func (m *RecognitionAudio) XXX_DiscardUnknown() {
- xxx_messageInfo_RecognitionAudio.DiscardUnknown(m)
- }
-
- var xxx_messageInfo_RecognitionAudio proto.InternalMessageInfo
-
- type isRecognitionAudio_AudioSource interface {
- isRecognitionAudio_AudioSource()
- }
-
- type RecognitionAudio_Content struct {
- Content []byte `protobuf:"bytes,1,opt,name=content,proto3,oneof"`
- }
-
- type RecognitionAudio_Uri struct {
- Uri string `protobuf:"bytes,2,opt,name=uri,proto3,oneof"`
- }
-
- func (*RecognitionAudio_Content) isRecognitionAudio_AudioSource() {}
-
- func (*RecognitionAudio_Uri) isRecognitionAudio_AudioSource() {}
-
- func (m *RecognitionAudio) GetAudioSource() isRecognitionAudio_AudioSource {
- if m != nil {
- return m.AudioSource
- }
- return nil
- }
-
- func (m *RecognitionAudio) GetContent() []byte {
- if x, ok := m.GetAudioSource().(*RecognitionAudio_Content); ok {
- return x.Content
- }
- return nil
- }
-
- func (m *RecognitionAudio) GetUri() string {
- if x, ok := m.GetAudioSource().(*RecognitionAudio_Uri); ok {
- return x.Uri
- }
- return ""
- }
-
- // XXX_OneofFuncs is for the internal use of the proto package.
- func (*RecognitionAudio) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
- return _RecognitionAudio_OneofMarshaler, _RecognitionAudio_OneofUnmarshaler, _RecognitionAudio_OneofSizer, []interface{}{
- (*RecognitionAudio_Content)(nil),
- (*RecognitionAudio_Uri)(nil),
- }
- }
-
- func _RecognitionAudio_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
- m := msg.(*RecognitionAudio)
- // audio_source
- switch x := m.AudioSource.(type) {
- case *RecognitionAudio_Content:
- b.EncodeVarint(1<<3 | proto.WireBytes)
- b.EncodeRawBytes(x.Content)
- case *RecognitionAudio_Uri:
- b.EncodeVarint(2<<3 | proto.WireBytes)
- b.EncodeStringBytes(x.Uri)
- case nil:
- default:
- return fmt.Errorf("RecognitionAudio.AudioSource has unexpected type %T", x)
- }
- return nil
- }
-
- func _RecognitionAudio_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
- m := msg.(*RecognitionAudio)
- switch tag {
- case 1: // audio_source.content
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeRawBytes(true)
- m.AudioSource = &RecognitionAudio_Content{x}
- return true, err
- case 2: // audio_source.uri
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeStringBytes()
- m.AudioSource = &RecognitionAudio_Uri{x}
- return true, err
- default:
- return false, nil
- }
- }
-
- func _RecognitionAudio_OneofSizer(msg proto.Message) (n int) {
- m := msg.(*RecognitionAudio)
- // audio_source
- switch x := m.AudioSource.(type) {
- case *RecognitionAudio_Content:
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(len(x.Content)))
- n += len(x.Content)
- case *RecognitionAudio_Uri:
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(len(x.Uri)))
- n += len(x.Uri)
- case nil:
- default:
- panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
- }
- return n
- }
-
- // The only message returned to the client by the `Recognize` method. It
- // contains the result as zero or more sequential `SpeechRecognitionResult`
- // messages.
- type RecognizeResponse struct {
- // Output only. Sequential list of transcription results corresponding to
- // sequential portions of audio.
- Results []*SpeechRecognitionResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
- }
-
- func (m *RecognizeResponse) Reset() { *m = RecognizeResponse{} }
- func (m *RecognizeResponse) String() string { return proto.CompactTextString(m) }
- func (*RecognizeResponse) ProtoMessage() {}
- func (*RecognizeResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_cloud_speech_9e62932d6cbd582d, []int{8}
- }
- func (m *RecognizeResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_RecognizeResponse.Unmarshal(m, b)
- }
- func (m *RecognizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_RecognizeResponse.Marshal(b, m, deterministic)
- }
- func (dst *RecognizeResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RecognizeResponse.Merge(dst, src)
- }
- func (m *RecognizeResponse) XXX_Size() int {
- return xxx_messageInfo_RecognizeResponse.Size(m)
- }
- func (m *RecognizeResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_RecognizeResponse.DiscardUnknown(m)
- }
-
- var xxx_messageInfo_RecognizeResponse proto.InternalMessageInfo
-
- func (m *RecognizeResponse) GetResults() []*SpeechRecognitionResult {
- if m != nil {
- return m.Results
- }
- return nil
- }
-
- // The only message returned to the client by the `LongRunningRecognize` method.
- // It contains the result as zero or more sequential `SpeechRecognitionResult`
- // messages. It is included in the `result.response` field of the `Operation`
- // returned by the `GetOperation` call of the `google::longrunning::Operations`
- // service.
- type LongRunningRecognizeResponse struct {
- // Output only. Sequential list of transcription results corresponding to
- // sequential portions of audio.
- Results []*SpeechRecognitionResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
- }
-
- func (m *LongRunningRecognizeResponse) Reset() { *m = LongRunningRecognizeResponse{} }
- func (m *LongRunningRecognizeResponse) String() string { return proto.CompactTextString(m) }
- func (*LongRunningRecognizeResponse) ProtoMessage() {}
- func (*LongRunningRecognizeResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_cloud_speech_9e62932d6cbd582d, []int{9}
- }
- func (m *LongRunningRecognizeResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LongRunningRecognizeResponse.Unmarshal(m, b)
- }
- func (m *LongRunningRecognizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LongRunningRecognizeResponse.Marshal(b, m, deterministic)
- }
- func (dst *LongRunningRecognizeResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LongRunningRecognizeResponse.Merge(dst, src)
- }
- func (m *LongRunningRecognizeResponse) XXX_Size() int {
- return xxx_messageInfo_LongRunningRecognizeResponse.Size(m)
- }
- func (m *LongRunningRecognizeResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_LongRunningRecognizeResponse.DiscardUnknown(m)
- }
-
- var xxx_messageInfo_LongRunningRecognizeResponse proto.InternalMessageInfo
-
- func (m *LongRunningRecognizeResponse) GetResults() []*SpeechRecognitionResult {
- if m != nil {
- return m.Results
- }
- return nil
- }
-
- // Describes the progress of a long-running `LongRunningRecognize` call. It is
- // included in the `metadata` field of the `Operation` returned by the
- // `GetOperation` call of the `google::longrunning::Operations` service.
- type LongRunningRecognizeMetadata struct {
- // Approximate percentage of audio processed thus far. Guaranteed to be 100
- // when the audio is fully processed and the results are available.
- ProgressPercent int32 `protobuf:"varint,1,opt,name=progress_percent,json=progressPercent,proto3" json:"progress_percent,omitempty"`
- // Time when the request was received.
- StartTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
- // Time of the most recent processing update.
- LastUpdateTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
- }
-
- func (m *LongRunningRecognizeMetadata) Reset() { *m = LongRunningRecognizeMetadata{} }
- func (m *LongRunningRecognizeMetadata) String() string { return proto.CompactTextString(m) }
- func (*LongRunningRecognizeMetadata) ProtoMessage() {}
- func (*LongRunningRecognizeMetadata) Descriptor() ([]byte, []int) {
- return fileDescriptor_cloud_speech_9e62932d6cbd582d, []int{10}
- }
- func (m *LongRunningRecognizeMetadata) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LongRunningRecognizeMetadata.Unmarshal(m, b)
- }
- func (m *LongRunningRecognizeMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LongRunningRecognizeMetadata.Marshal(b, m, deterministic)
- }
- func (dst *LongRunningRecognizeMetadata) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LongRunningRecognizeMetadata.Merge(dst, src)
- }
- func (m *LongRunningRecognizeMetadata) XXX_Size() int {
- return xxx_messageInfo_LongRunningRecognizeMetadata.Size(m)
- }
- func (m *LongRunningRecognizeMetadata) XXX_DiscardUnknown() {
- xxx_messageInfo_LongRunningRecognizeMetadata.DiscardUnknown(m)
- }
-
- var xxx_messageInfo_LongRunningRecognizeMetadata proto.InternalMessageInfo
-
- func (m *LongRunningRecognizeMetadata) GetProgressPercent() int32 {
- if m != nil {
- return m.ProgressPercent
- }
- return 0
- }
-
- func (m *LongRunningRecognizeMetadata) GetStartTime() *timestamp.Timestamp {
- if m != nil {
- return m.StartTime
- }
- return nil
- }
-
- func (m *LongRunningRecognizeMetadata) GetLastUpdateTime() *timestamp.Timestamp {
- if m != nil {
- return m.LastUpdateTime
- }
- return nil
- }
-
- // `StreamingRecognizeResponse` is the only message returned to the client by
- // `StreamingRecognize`. A series of zero or more `StreamingRecognizeResponse`
- // messages are streamed back to the client. If there is no recognizable
- // audio, and `single_utterance` is set to false, then no messages are streamed
- // back to the client.
- //
- // Here's an example of a series of ten `StreamingRecognizeResponse`s that might
- // be returned while processing audio:
- //
- // 1. results { alternatives { transcript: "tube" } stability: 0.01 }
- //
- // 2. results { alternatives { transcript: "to be a" } stability: 0.01 }
- //
- // 3. results { alternatives { transcript: "to be" } stability: 0.9 }
- // results { alternatives { transcript: " or not to be" } stability: 0.01 }
- //
- // 4. results { alternatives { transcript: "to be or not to be"
- // confidence: 0.92 }
- // alternatives { transcript: "to bee or not to bee" }
- // is_final: true }
- //
- // 5. results { alternatives { transcript: " that's" } stability: 0.01 }
- //
- // 6. results { alternatives { transcript: " that is" } stability: 0.9 }
- // results { alternatives { transcript: " the question" } stability: 0.01 }
- //
- // 7. results { alternatives { transcript: " that is the question"
- // confidence: 0.98 }
- // alternatives { transcript: " that was the question" }
- // is_final: true }
- //
- // Notes:
- //
- // - Only two of the above responses #4 and #7 contain final results; they are
- // indicated by `is_final: true`. Concatenating these together generates the
- // full transcript: "to be or not to be that is the question".
- //
- // - The others contain interim `results`. #3 and #6 contain two interim
- // `results`: the first portion has a high stability and is less likely to
- // change; the second portion has a low stability and is very likely to
- // change. A UI designer might choose to show only high stability `results`.
- //
- // - The specific `stability` and `confidence` values shown above are only for
- // illustrative purposes. Actual values may vary.
- //
- // - In each response, only one of these fields will be set:
- // `error`,
- // `speech_event_type`, or
- // one or more (repeated) `results`.
- type StreamingRecognizeResponse struct {
- // Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
- // message that specifies the error for the operation.
- Error *status.Status `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
- // Output only. This repeated list contains zero or more results that
- // correspond to consecutive portions of the audio currently being processed.
- // It contains zero or one `is_final=true` result (the newly settled portion),
- // followed by zero or more `is_final=false` results (the interim results).
- Results []*StreamingRecognitionResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"`
- // Output only. Indicates the type of speech event.
- SpeechEventType StreamingRecognizeResponse_SpeechEventType `protobuf:"varint,4,opt,name=speech_event_type,json=speechEventType,proto3,enum=google.cloud.speech.v1p1beta1.StreamingRecognizeResponse_SpeechEventType" json:"speech_event_type,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
- }
-
- func (m *StreamingRecognizeResponse) Reset() { *m = StreamingRecognizeResponse{} }
- func (m *StreamingRecognizeResponse) String() string { return proto.CompactTextString(m) }
- func (*StreamingRecognizeResponse) ProtoMessage() {}
- func (*StreamingRecognizeResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_cloud_speech_9e62932d6cbd582d, []int{11}
- }
- func (m *StreamingRecognizeResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StreamingRecognizeResponse.Unmarshal(m, b)
- }
- func (m *StreamingRecognizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StreamingRecognizeResponse.Marshal(b, m, deterministic)
- }
- func (dst *StreamingRecognizeResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StreamingRecognizeResponse.Merge(dst, src)
- }
- func (m *StreamingRecognizeResponse) XXX_Size() int {
- return xxx_messageInfo_StreamingRecognizeResponse.Size(m)
- }
- func (m *StreamingRecognizeResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_StreamingRecognizeResponse.DiscardUnknown(m)
- }
-
- var xxx_messageInfo_StreamingRecognizeResponse proto.InternalMessageInfo
-
- func (m *StreamingRecognizeResponse) GetError() *status.Status {
- if m != nil {
- return m.Error
- }
- return nil
- }
-
- func (m *StreamingRecognizeResponse) GetResults() []*StreamingRecognitionResult {
- if m != nil {
- return m.Results
- }
- return nil
- }
-
- func (m *StreamingRecognizeResponse) GetSpeechEventType() StreamingRecognizeResponse_SpeechEventType {
- if m != nil {
- return m.SpeechEventType
- }
- return StreamingRecognizeResponse_SPEECH_EVENT_UNSPECIFIED
- }
-
- // A streaming speech recognition result corresponding to a portion of the audio
- // that is currently being processed.
- type StreamingRecognitionResult struct {
- // Output only. May contain one or more recognition hypotheses (up to the
- // maximum specified in `max_alternatives`).
- // These alternatives are ordered in terms of accuracy, with the top (first)
- // alternative being the most probable, as ranked by the recognizer.
- Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives,proto3" json:"alternatives,omitempty"`
- // Output only. If `false`, this `StreamingRecognitionResult` represents an
- // interim result that may change. If `true`, this is the final time the
- // speech service will return this particular `StreamingRecognitionResult`,
- // the recognizer will not return any further hypotheses for this portion of
- // the transcript and corresponding audio.
- IsFinal bool `protobuf:"varint,2,opt,name=is_final,json=isFinal,proto3" json:"is_final,omitempty"`
- // Output only. An estimate of the likelihood that the recognizer will not
- // change its guess about this interim result. Values range from 0.0
- // (completely unstable) to 1.0 (completely stable).
- // This field is only provided for interim results (`is_final=false`).
- // The default of 0.0 is a sentinel value indicating `stability` was not set.
- Stability float32 `protobuf:"fixed32,3,opt,name=stability,proto3" json:"stability,omitempty"`
- // Output only. Time offset of the end of this result relative to the
- // beginning of the audio.
- ResultEndTime *duration.Duration `protobuf:"bytes,4,opt,name=result_end_time,json=resultEndTime,proto3" json:"result_end_time,omitempty"`
- // For multi-channel audio, this is the channel number corresponding to the
- // recognized result for the audio from that channel.
- // For audio_channel_count = N, its output values can range from '1' to 'N'.
- ChannelTag int32 `protobuf:"varint,5,opt,name=channel_tag,json=channelTag,proto3" json:"channel_tag,omitempty"`
- // Output only. The
- // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
- // language in this result. This language code was detected to have the most
- // likelihood of being spoken in the audio.
- LanguageCode string `protobuf:"bytes,6,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
- }
-
- func (m *StreamingRecognitionResult) Reset() { *m = StreamingRecognitionResult{} }
- func (m *StreamingRecognitionResult) String() string { return proto.CompactTextString(m) }
- func (*StreamingRecognitionResult) ProtoMessage() {}
- func (*StreamingRecognitionResult) Descriptor() ([]byte, []int) {
- return fileDescriptor_cloud_speech_9e62932d6cbd582d, []int{12}
- }
- func (m *StreamingRecognitionResult) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StreamingRecognitionResult.Unmarshal(m, b)
- }
- func (m *StreamingRecognitionResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StreamingRecognitionResult.Marshal(b, m, deterministic)
- }
- func (dst *StreamingRecognitionResult) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StreamingRecognitionResult.Merge(dst, src)
- }
- func (m *StreamingRecognitionResult) XXX_Size() int {
- return xxx_messageInfo_StreamingRecognitionResult.Size(m)
- }
- func (m *StreamingRecognitionResult) XXX_DiscardUnknown() {
- xxx_messageInfo_StreamingRecognitionResult.DiscardUnknown(m)
- }
-
- var xxx_messageInfo_StreamingRecognitionResult proto.InternalMessageInfo
-
- func (m *StreamingRecognitionResult) GetAlternatives() []*SpeechRecognitionAlternative {
- if m != nil {
- return m.Alternatives
- }
- return nil
- }
-
- func (m *StreamingRecognitionResult) GetIsFinal() bool {
- if m != nil {
- return m.IsFinal
- }
- return false
- }
-
- func (m *StreamingRecognitionResult) GetStability() float32 {
- if m != nil {
- return m.Stability
- }
- return 0
- }
-
- func (m *StreamingRecognitionResult) GetResultEndTime() *duration.Duration {
- if m != nil {
- return m.ResultEndTime
- }
- return nil
- }
-
- func (m *StreamingRecognitionResult) GetChannelTag() int32 {
- if m != nil {
- return m.ChannelTag
- }
- return 0
- }
-
- func (m *StreamingRecognitionResult) GetLanguageCode() string {
- if m != nil {
- return m.LanguageCode
- }
- return ""
- }
-
- // A speech recognition result corresponding to a portion of the audio.
- type SpeechRecognitionResult struct {
- // Output only. May contain one or more recognition hypotheses (up to the
- // maximum specified in `max_alternatives`).
- // These alternatives are ordered in terms of accuracy, with the top (first)
- // alternative being the most probable, as ranked by the recognizer.
- Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives,proto3" json:"alternatives,omitempty"`
- // For multi-channel audio, this is the channel number corresponding to the
- // recognized result for the audio from that channel.
- // For audio_channel_count = N, its output values can range from '1' to 'N'.
- ChannelTag int32 `protobuf:"varint,2,opt,name=channel_tag,json=channelTag,proto3" json:"channel_tag,omitempty"`
- // Output only. The
- // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of the
- // language in this result. This language code was detected to have the most
- // likelihood of being spoken in the audio.
- LanguageCode string `protobuf:"bytes,5,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
- }
-
- func (m *SpeechRecognitionResult) Reset() { *m = SpeechRecognitionResult{} }
- func (m *SpeechRecognitionResult) String() string { return proto.CompactTextString(m) }
- func (*SpeechRecognitionResult) ProtoMessage() {}
- func (*SpeechRecognitionResult) Descriptor() ([]byte, []int) {
- return fileDescriptor_cloud_speech_9e62932d6cbd582d, []int{13}
- }
- func (m *SpeechRecognitionResult) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SpeechRecognitionResult.Unmarshal(m, b)
- }
- func (m *SpeechRecognitionResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SpeechRecognitionResult.Marshal(b, m, deterministic)
- }
- func (dst *SpeechRecognitionResult) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SpeechRecognitionResult.Merge(dst, src)
- }
- func (m *SpeechRecognitionResult) XXX_Size() int {
- return xxx_messageInfo_SpeechRecognitionResult.Size(m)
- }
- func (m *SpeechRecognitionResult) XXX_DiscardUnknown() {
- xxx_messageInfo_SpeechRecognitionResult.DiscardUnknown(m)
- }
-
- var xxx_messageInfo_SpeechRecognitionResult proto.InternalMessageInfo
-
- func (m *SpeechRecognitionResult) GetAlternatives() []*SpeechRecognitionAlternative {
- if m != nil {
- return m.Alternatives
- }
- return nil
- }
-
- func (m *SpeechRecognitionResult) GetChannelTag() int32 {
- if m != nil {
- return m.ChannelTag
- }
- return 0
- }
-
- func (m *SpeechRecognitionResult) GetLanguageCode() string {
- if m != nil {
- return m.LanguageCode
- }
- return ""
- }
-
- // Alternative hypotheses (a.k.a. n-best list).
- type SpeechRecognitionAlternative struct {
- // Output only. Transcript text representing the words that the user spoke.
- Transcript string `protobuf:"bytes,1,opt,name=transcript,proto3" json:"transcript,omitempty"`
- // Output only. The confidence estimate between 0.0 and 1.0. A higher number
- // indicates an estimated greater likelihood that the recognized words are
- // correct. This field is set only for the top alternative of a non-streaming
- // result or, of a streaming result where `is_final=true`.
- // This field is not guaranteed to be accurate and users should not rely on it
- // to be always provided.
- // The default of 0.0 is a sentinel value indicating `confidence` was not set.
- Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
- // Output only. A list of word-specific information for each recognized word.
- // Note: When `enable_speaker_diarization` is true, you will see all the words
- // from the beginning of the audio.
- Words []*WordInfo `protobuf:"bytes,3,rep,name=words,proto3" json:"words,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
- }
-
- func (m *SpeechRecognitionAlternative) Reset() { *m = SpeechRecognitionAlternative{} }
- func (m *SpeechRecognitionAlternative) String() string { return proto.CompactTextString(m) }
- func (*SpeechRecognitionAlternative) ProtoMessage() {}
- func (*SpeechRecognitionAlternative) Descriptor() ([]byte, []int) {
- return fileDescriptor_cloud_speech_9e62932d6cbd582d, []int{14}
- }
- func (m *SpeechRecognitionAlternative) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SpeechRecognitionAlternative.Unmarshal(m, b)
- }
- func (m *SpeechRecognitionAlternative) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SpeechRecognitionAlternative.Marshal(b, m, deterministic)
- }
- func (dst *SpeechRecognitionAlternative) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SpeechRecognitionAlternative.Merge(dst, src)
- }
- func (m *SpeechRecognitionAlternative) XXX_Size() int {
- return xxx_messageInfo_SpeechRecognitionAlternative.Size(m)
- }
- func (m *SpeechRecognitionAlternative) XXX_DiscardUnknown() {
- xxx_messageInfo_SpeechRecognitionAlternative.DiscardUnknown(m)
- }
-
- var xxx_messageInfo_SpeechRecognitionAlternative proto.InternalMessageInfo
-
- func (m *SpeechRecognitionAlternative) GetTranscript() string {
- if m != nil {
- return m.Transcript
- }
- return ""
- }
-
- func (m *SpeechRecognitionAlternative) GetConfidence() float32 {
- if m != nil {
- return m.Confidence
- }
- return 0
- }
-
- func (m *SpeechRecognitionAlternative) GetWords() []*WordInfo {
- if m != nil {
- return m.Words
- }
- return nil
- }
-
- // Word-specific information for recognized words.
- type WordInfo struct {
- // Output only. Time offset relative to the beginning of the audio,
- // and corresponding to the start of the spoken word.
- // This field is only set if `enable_word_time_offsets=true` and only
- // in the top hypothesis.
- // This is an experimental feature and the accuracy of the time offset can
- // vary.
- StartTime *duration.Duration `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
- // Output only. Time offset relative to the beginning of the audio,
- // and corresponding to the end of the spoken word.
- // This field is only set if `enable_word_time_offsets=true` and only
- // in the top hypothesis.
- // This is an experimental feature and the accuracy of the time offset can
- // vary.
- EndTime *duration.Duration `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
- // Output only. The word corresponding to this set of information.
- Word string `protobuf:"bytes,3,opt,name=word,proto3" json:"word,omitempty"`
- // Output only. The confidence estimate between 0.0 and 1.0. A higher number
- // indicates an estimated greater likelihood that the recognized words are
- // correct. This field is set only for the top alternative of a non-streaming
- // result or, of a streaming result where `is_final=true`.
- // This field is not guaranteed to be accurate and users should not rely on it
- // to be always provided.
- // The default of 0.0 is a sentinel value indicating `confidence` was not set.
- Confidence float32 `protobuf:"fixed32,4,opt,name=confidence,proto3" json:"confidence,omitempty"`
- // Output only. A distinct integer value is assigned for every speaker within
- // the audio. This field specifies which one of those speakers was detected to
- // have spoken this word. Value ranges from '1' to diarization_speaker_count.
- // speaker_tag is set if enable_speaker_diarization = 'true' and only in the
- // top alternative.
- SpeakerTag int32 `protobuf:"varint,5,opt,name=speaker_tag,json=speakerTag,proto3" json:"speaker_tag,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
- }
-
- func (m *WordInfo) Reset() { *m = WordInfo{} }
- func (m *WordInfo) String() string { return proto.CompactTextString(m) }
- func (*WordInfo) ProtoMessage() {}
- func (*WordInfo) Descriptor() ([]byte, []int) {
- return fileDescriptor_cloud_speech_9e62932d6cbd582d, []int{15}
- }
- func (m *WordInfo) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_WordInfo.Unmarshal(m, b)
- }
- func (m *WordInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_WordInfo.Marshal(b, m, deterministic)
- }
- func (dst *WordInfo) XXX_Merge(src proto.Message) {
- xxx_messageInfo_WordInfo.Merge(dst, src)
- }
- func (m *WordInfo) XXX_Size() int {
- return xxx_messageInfo_WordInfo.Size(m)
- }
- func (m *WordInfo) XXX_DiscardUnknown() {
- xxx_messageInfo_WordInfo.DiscardUnknown(m)
- }
-
- var xxx_messageInfo_WordInfo proto.InternalMessageInfo
-
- func (m *WordInfo) GetStartTime() *duration.Duration {
- if m != nil {
- return m.StartTime
- }
- return nil
- }
-
- func (m *WordInfo) GetEndTime() *duration.Duration {
- if m != nil {
- return m.EndTime
- }
- return nil
- }
-
- func (m *WordInfo) GetWord() string {
- if m != nil {
- return m.Word
- }
- return ""
- }
-
- func (m *WordInfo) GetConfidence() float32 {
- if m != nil {
- return m.Confidence
- }
- return 0
- }
-
- func (m *WordInfo) GetSpeakerTag() int32 {
- if m != nil {
- return m.SpeakerTag
- }
- return 0
- }
-
- func init() {
- proto.RegisterType((*RecognizeRequest)(nil), "google.cloud.speech.v1p1beta1.RecognizeRequest")
- proto.RegisterType((*LongRunningRecognizeRequest)(nil), "google.cloud.speech.v1p1beta1.LongRunningRecognizeRequest")
- proto.RegisterType((*StreamingRecognizeRequest)(nil), "google.cloud.speech.v1p1beta1.StreamingRecognizeRequest")
- proto.RegisterType((*StreamingRecognitionConfig)(nil), "google.cloud.speech.v1p1beta1.StreamingRecognitionConfig")
- proto.RegisterType((*RecognitionConfig)(nil), "google.cloud.speech.v1p1beta1.RecognitionConfig")
- proto.RegisterType((*RecognitionMetadata)(nil), "google.cloud.speech.v1p1beta1.RecognitionMetadata")
- proto.RegisterType((*SpeechContext)(nil), "google.cloud.speech.v1p1beta1.SpeechContext")
- proto.RegisterType((*RecognitionAudio)(nil), "google.cloud.speech.v1p1beta1.RecognitionAudio")
- proto.RegisterType((*RecognizeResponse)(nil), "google.cloud.speech.v1p1beta1.RecognizeResponse")
- proto.RegisterType((*LongRunningRecognizeResponse)(nil), "google.cloud.speech.v1p1beta1.LongRunningRecognizeResponse")
- proto.RegisterType((*LongRunningRecognizeMetadata)(nil), "google.cloud.speech.v1p1beta1.LongRunningRecognizeMetadata")
- proto.RegisterType((*StreamingRecognizeResponse)(nil), "google.cloud.speech.v1p1beta1.StreamingRecognizeResponse")
- proto.RegisterType((*StreamingRecognitionResult)(nil), "google.cloud.speech.v1p1beta1.StreamingRecognitionResult")
- proto.RegisterType((*SpeechRecognitionResult)(nil), "google.cloud.speech.v1p1beta1.SpeechRecognitionResult")
- proto.RegisterType((*SpeechRecognitionAlternative)(nil), "google.cloud.speech.v1p1beta1.SpeechRecognitionAlternative")
- proto.RegisterType((*WordInfo)(nil), "google.cloud.speech.v1p1beta1.WordInfo")
- proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionConfig_AudioEncoding", RecognitionConfig_AudioEncoding_name, RecognitionConfig_AudioEncoding_value)
- proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionMetadata_InteractionType", RecognitionMetadata_InteractionType_name, RecognitionMetadata_InteractionType_value)
- proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionMetadata_MicrophoneDistance", RecognitionMetadata_MicrophoneDistance_name, RecognitionMetadata_MicrophoneDistance_value)
- proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionMetadata_OriginalMediaType", RecognitionMetadata_OriginalMediaType_name, RecognitionMetadata_OriginalMediaType_value)
- proto.RegisterEnum("google.cloud.speech.v1p1beta1.RecognitionMetadata_RecordingDeviceType", RecognitionMetadata_RecordingDeviceType_name, RecognitionMetadata_RecordingDeviceType_value)
- proto.RegisterEnum("google.cloud.speech.v1p1beta1.StreamingRecognizeResponse_SpeechEventType", StreamingRecognizeResponse_SpeechEventType_name, StreamingRecognizeResponse_SpeechEventType_value)
- }
-
- // Reference imports to suppress errors if they are not otherwise used.
- var _ context.Context
- var _ grpc.ClientConn
-
- // This is a compile-time assertion to ensure that this generated file
- // is compatible with the grpc package it is being compiled against.
- const _ = grpc.SupportPackageIsVersion4
-
- // SpeechClient is the client API for Speech service.
- //
- // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
- type SpeechClient interface {
- // Performs synchronous speech recognition: receive results after all audio
- // has been sent and processed.
- Recognize(ctx context.Context, in *RecognizeRequest, opts ...grpc.CallOption) (*RecognizeResponse, error)
- // Performs asynchronous speech recognition: receive results via the
- // google.longrunning.Operations interface. Returns either an
- // `Operation.error` or an `Operation.response` which contains
- // a `LongRunningRecognizeResponse` message.
- LongRunningRecognize(ctx context.Context, in *LongRunningRecognizeRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
- // Performs bidirectional streaming speech recognition: receive results while
- // sending audio. This method is only available via the gRPC API (not REST).
- StreamingRecognize(ctx context.Context, opts ...grpc.CallOption) (Speech_StreamingRecognizeClient, error)
- }
-
- type speechClient struct {
- cc *grpc.ClientConn
- }
-
- func NewSpeechClient(cc *grpc.ClientConn) SpeechClient {
- return &speechClient{cc}
- }
-
- func (c *speechClient) Recognize(ctx context.Context, in *RecognizeRequest, opts ...grpc.CallOption) (*RecognizeResponse, error) {
- out := new(RecognizeResponse)
- err := c.cc.Invoke(ctx, "/google.cloud.speech.v1p1beta1.Speech/Recognize", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
- }
-
- func (c *speechClient) LongRunningRecognize(ctx context.Context, in *LongRunningRecognizeRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
- out := new(longrunning.Operation)
- err := c.cc.Invoke(ctx, "/google.cloud.speech.v1p1beta1.Speech/LongRunningRecognize", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
- }
-
- func (c *speechClient) StreamingRecognize(ctx context.Context, opts ...grpc.CallOption) (Speech_StreamingRecognizeClient, error) {
- stream, err := c.cc.NewStream(ctx, &_Speech_serviceDesc.Streams[0], "/google.cloud.speech.v1p1beta1.Speech/StreamingRecognize", opts...)
- if err != nil {
- return nil, err
- }
- x := &speechStreamingRecognizeClient{stream}
- return x, nil
- }
-
- type Speech_StreamingRecognizeClient interface {
- Send(*StreamingRecognizeRequest) error
- Recv() (*StreamingRecognizeResponse, error)
- grpc.ClientStream
- }
-
- type speechStreamingRecognizeClient struct {
- grpc.ClientStream
- }
-
- func (x *speechStreamingRecognizeClient) Send(m *StreamingRecognizeRequest) error {
- return x.ClientStream.SendMsg(m)
- }
-
- func (x *speechStreamingRecognizeClient) Recv() (*StreamingRecognizeResponse, error) {
- m := new(StreamingRecognizeResponse)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
- }
-
- // SpeechServer is the server API for Speech service.
- type SpeechServer interface {
- // Performs synchronous speech recognition: receive results after all audio
- // has been sent and processed.
- Recognize(context.Context, *RecognizeRequest) (*RecognizeResponse, error)
- // Performs asynchronous speech recognition: receive results via the
- // google.longrunning.Operations interface. Returns either an
- // `Operation.error` or an `Operation.response` which contains
- // a `LongRunningRecognizeResponse` message.
- LongRunningRecognize(context.Context, *LongRunningRecognizeRequest) (*longrunning.Operation, error)
- // Performs bidirectional streaming speech recognition: receive results while
- // sending audio. This method is only available via the gRPC API (not REST).
- StreamingRecognize(Speech_StreamingRecognizeServer) error
- }
-
- func RegisterSpeechServer(s *grpc.Server, srv SpeechServer) {
- s.RegisterService(&_Speech_serviceDesc, srv)
- }
-
- func _Speech_Recognize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(RecognizeRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SpeechServer).Recognize(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.cloud.speech.v1p1beta1.Speech/Recognize",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SpeechServer).Recognize(ctx, req.(*RecognizeRequest))
- }
- return interceptor(ctx, in, info, handler)
- }
-
- func _Speech_LongRunningRecognize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(LongRunningRecognizeRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SpeechServer).LongRunningRecognize(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.cloud.speech.v1p1beta1.Speech/LongRunningRecognize",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SpeechServer).LongRunningRecognize(ctx, req.(*LongRunningRecognizeRequest))
- }
- return interceptor(ctx, in, info, handler)
- }
-
- func _Speech_StreamingRecognize_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(SpeechServer).StreamingRecognize(&speechStreamingRecognizeServer{stream})
- }
-
- type Speech_StreamingRecognizeServer interface {
- Send(*StreamingRecognizeResponse) error
- Recv() (*StreamingRecognizeRequest, error)
- grpc.ServerStream
- }
-
- type speechStreamingRecognizeServer struct {
- grpc.ServerStream
- }
-
- func (x *speechStreamingRecognizeServer) Send(m *StreamingRecognizeResponse) error {
- return x.ServerStream.SendMsg(m)
- }
-
- func (x *speechStreamingRecognizeServer) Recv() (*StreamingRecognizeRequest, error) {
- m := new(StreamingRecognizeRequest)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
- }
-
- var _Speech_serviceDesc = grpc.ServiceDesc{
- ServiceName: "google.cloud.speech.v1p1beta1.Speech",
- HandlerType: (*SpeechServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "Recognize",
- Handler: _Speech_Recognize_Handler,
- },
- {
- MethodName: "LongRunningRecognize",
- Handler: _Speech_LongRunningRecognize_Handler,
- },
- },
- Streams: []grpc.StreamDesc{
- {
- StreamName: "StreamingRecognize",
- Handler: _Speech_StreamingRecognize_Handler,
- ServerStreams: true,
- ClientStreams: true,
- },
- },
- Metadata: "google/cloud/speech/v1p1beta1/cloud_speech.proto",
- }
-
- func init() {
- proto.RegisterFile("google/cloud/speech/v1p1beta1/cloud_speech.proto", fileDescriptor_cloud_speech_9e62932d6cbd582d)
- }
-
- var fileDescriptor_cloud_speech_9e62932d6cbd582d = []byte{
- // 2178 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xbf, 0x73, 0xdb, 0xc8,
- 0xf5, 0x37, 0x48, 0x51, 0x12, 0x9f, 0x7e, 0x41, 0x2b, 0xdf, 0x89, 0x96, 0x75, 0x67, 0x1b, 0x9e,
- 0x3b, 0xfb, 0xee, 0x7b, 0x43, 0xd9, 0xfa, 0xde, 0x5c, 0xce, 0xbe, 0xe4, 0x26, 0x14, 0x00, 0x99,
- 0x98, 0x21, 0x09, 0xce, 0x92, 0xb2, 0xe3, 0x6b, 0x76, 0x56, 0xc4, 0x92, 0xc2, 0x84, 0x04, 0x10,
- 0x60, 0xe1, 0x58, 0x2e, 0xd3, 0xa6, 0x48, 0x91, 0x99, 0x74, 0xa9, 0x72, 0x75, 0xfe, 0x80, 0x34,
- 0x97, 0x26, 0x4d, 0x9a, 0x14, 0xe9, 0x52, 0xa5, 0xc8, 0x7f, 0x90, 0x26, 0x33, 0x69, 0x32, 0xbb,
- 0x0b, 0x50, 0x10, 0x29, 0x5b, 0xb6, 0x26, 0x37, 0x93, 0x0e, 0xfb, 0x79, 0x3f, 0xf6, 0xbd, 0xb7,
- 0x6f, 0xdf, 0xbe, 0x07, 0x78, 0x30, 0x0a, 0xc3, 0xd1, 0x98, 0xed, 0x0d, 0xc6, 0x61, 0xea, 0xed,
- 0x25, 0x11, 0x63, 0x83, 0x93, 0xbd, 0x17, 0x0f, 0xa3, 0x87, 0xc7, 0x8c, 0xd3, 0x87, 0x0a, 0x26,
- 0x0a, 0xae, 0x47, 0x71, 0xc8, 0x43, 0xf4, 0x81, 0x92, 0xa8, 0x4b, 0x52, 0x3d, 0x23, 0x4d, 0x25,
- 0x76, 0x76, 0x33, 0x85, 0x34, 0xf2, 0xf7, 0x68, 0x10, 0x84, 0x9c, 0x72, 0x3f, 0x0c, 0x12, 0x25,
- 0xbc, 0x73, 0x37, 0xa3, 0x8e, 0xc3, 0x60, 0x14, 0xa7, 0x41, 0xe0, 0x07, 0xa3, 0xbd, 0x30, 0x62,
- 0xf1, 0x39, 0xa6, 0x1b, 0x19, 0x93, 0x5c, 0x1d, 0xa7, 0xc3, 0x3d, 0x1a, 0x9c, 0x66, 0xa4, 0x0f,
- 0x67, 0x49, 0x5e, 0xaa, 0x64, 0x33, 0xfa, 0xcd, 0x59, 0x3a, 0x9b, 0x44, 0x3c, 0x17, 0xbe, 0x35,
- 0x4b, 0xe4, 0xfe, 0x84, 0x25, 0x9c, 0x4e, 0xa2, 0x8c, 0x61, 0x3b, 0x63, 0x88, 0xa3, 0xc1, 0x5e,
- 0xc2, 0x29, 0x4f, 0x33, 0x8b, 0x8c, 0xdf, 0x69, 0xa0, 0x63, 0x36, 0x08, 0x47, 0x81, 0xff, 0x8a,
- 0x61, 0xf6, 0xb3, 0x94, 0x25, 0x1c, 0x35, 0x61, 0x71, 0x10, 0x06, 0x43, 0x7f, 0x54, 0xd3, 0x6e,
- 0x6b, 0xf7, 0x57, 0xf6, 0x1f, 0xd4, 0xdf, 0x18, 0x99, 0x7a, 0xa6, 0x40, 0x58, 0x6b, 0x4a, 0x39,
- 0x9c, 0xc9, 0x23, 0x1b, 0x2a, 0x34, 0xf5, 0xfc, 0xb0, 0x56, 0x92, 0x8a, 0xf6, 0xde, 0x5e, 0x51,
- 0x43, 0x88, 0x61, 0x25, 0x6d, 0xfc, 0x5e, 0x83, 0x9b, 0xad, 0x30, 0x18, 0x61, 0x15, 0xd8, 0xff,
- 0x7d, 0x83, 0xbf, 0xd3, 0xe0, 0x46, 0x8f, 0xc7, 0x8c, 0x4e, 0x2e, 0x32, 0x77, 0x08, 0x7a, 0x92,
- 0x13, 0xc9, 0x39, 0xc3, 0x1f, 0x5d, 0xb2, 0xdf, 0xac, 0xce, 0x33, 0x0f, 0x9a, 0xd7, 0xf0, 0xc6,
- 0x54, 0xa9, 0x82, 0xd0, 0x47, 0xb0, 0x26, 0xcd, 0x11, 0x7b, 0x70, 0x16, 0x70, 0xe9, 0xd4, 0x6a,
- 0xf3, 0x1a, 0x5e, 0x95, 0xb0, 0xa9, 0xd0, 0x83, 0x2d, 0xd8, 0x3c, 0x33, 0x27, 0x56, 0x36, 0x1a,
- 0x7f, 0xd0, 0x60, 0xe7, 0xf5, 0xbb, 0xfd, 0x17, 0x23, 0xfe, 0x09, 0xe8, 0x89, 0x1f, 0x8c, 0xc6,
- 0x8c, 0xa4, 0x9c, 0xb3, 0x98, 0x06, 0x03, 0x26, 0xed, 0x5c, 0xc6, 0x1b, 0x0a, 0x3f, 0xca, 0x61,
- 0x74, 0x0f, 0x36, 0xfc, 0x80, 0xb3, 0xd8, 0x9f, 0x90, 0x98, 0x25, 0xe9, 0x98, 0x27, 0xb5, 0xb2,
- 0xe4, 0x5c, 0xcf, 0x60, 0xac, 0x50, 0xe3, 0x9f, 0xcb, 0xb0, 0x39, 0x6f, 0xf3, 0x37, 0xb0, 0xcc,
- 0x82, 0x41, 0xe8, 0xf9, 0x81, 0xb2, 0x7a, 0x7d, 0xff, 0xeb, 0x77, 0xb5, 0xba, 0x2e, 0x4f, 0xd9,
- 0xce, 0xb4, 0xe0, 0xa9, 0x3e, 0xf4, 0x29, 0x6c, 0x26, 0x74, 0x12, 0x8d, 0x19, 0x89, 0x29, 0x67,
- 0xe4, 0x84, 0xc5, 0xfc, 0x95, 0x74, 0xa3, 0x82, 0x37, 0x14, 0x01, 0x53, 0xce, 0x9a, 0x02, 0x46,
- 0x75, 0xd8, 0xca, 0x8e, 0xe5, 0x84, 0x06, 0x01, 0x1b, 0x93, 0x41, 0x98, 0x06, 0xbc, 0xb6, 0x24,
- 0xb9, 0x37, 0xd5, 0xd1, 0x28, 0x8a, 0x29, 0x08, 0xa8, 0x0f, 0xf7, 0x58, 0x40, 0x8f, 0xc7, 0x8c,
- 0x24, 0x2c, 0xa2, 0x52, 0x7f, 0x7c, 0x66, 0x18, 0x89, 0x58, 0x9c, 0x6b, 0xaa, 0xad, 0xca, 0x70,
- 0xdc, 0x55, 0xec, 0xbd, 0x8c, 0xbb, 0xe0, 0x45, 0x97, 0xc5, 0x99, 0x6a, 0x74, 0x17, 0xd6, 0xc6,
- 0x34, 0x18, 0xa5, 0x74, 0xc4, 0xc8, 0x20, 0xf4, 0x98, 0x0c, 0x65, 0x15, 0xaf, 0xe6, 0xa0, 0x19,
- 0x7a, 0x0c, 0xfd, 0x10, 0x76, 0xe8, 0x98, 0xb3, 0x38, 0xa0, 0xdc, 0x7f, 0xc1, 0xc8, 0x39, 0x81,
- 0xa4, 0x86, 0x6e, 0x97, 0xef, 0x57, 0x71, 0xad, 0xc0, 0xd1, 0x2a, 0x08, 0x27, 0xe2, 0x68, 0x27,
- 0xf4, 0x25, 0x29, 0xd0, 0x93, 0xda, 0x82, 0x8a, 0xc9, 0x84, 0xbe, 0x6c, 0x14, 0x60, 0xc1, 0x1a,
- 0xc5, 0xe1, 0x90, 0x06, 0x3e, 0x3f, 0x25, 0x43, 0x5f, 0x90, 0x6a, 0x15, 0x95, 0x05, 0x53, 0xfc,
- 0x50, 0xc2, 0xe8, 0x08, 0x36, 0xd4, 0x41, 0xa9, 0xb4, 0x7e, 0xc9, 0x93, 0xda, 0xe2, 0xed, 0xf2,
- 0xfd, 0x95, 0xfd, 0xcf, 0x2e, 0xbb, 0x3c, 0x12, 0x30, 0x95, 0x10, 0x5e, 0x4f, 0x8a, 0xcb, 0x04,
- 0xfd, 0x00, 0x6a, 0x59, 0x94, 0x7f, 0x1e, 0xc6, 0x1e, 0x11, 0x15, 0x94, 0x84, 0xc3, 0x61, 0xc2,
- 0x78, 0x52, 0x5b, 0x96, 0x96, 0xbc, 0xa7, 0xe8, 0xcf, 0xc2, 0xd8, 0xeb, 0xfb, 0x13, 0xe6, 0x2a,
- 0x22, 0xfa, 0x1c, 0xde, 0x2f, 0x0a, 0xca, 0xb4, 0xf6, 0x98, 0x48, 0xe3, 0x0d, 0x29, 0x76, 0xfd,
- 0x4c, 0xcc, 0x9c, 0xd2, 0xd0, 0x8f, 0x61, 0x37, 0x93, 0xa2, 0x29, 0x0f, 0x27, 0x94, 0xfb, 0x03,
- 0x12, 0xa5, 0xc1, 0x80, 0xa7, 0xb2, 0xea, 0xd7, 0x56, 0xa4, 0xec, 0x8e, 0xe2, 0x69, 0xe4, 0x2c,
- 0xdd, 0x33, 0x0e, 0x71, 0x36, 0x79, 0x5a, 0x44, 0x8c, 0xfe, 0x94, 0xc5, 0xc4, 0xf3, 0x69, 0xec,
- 0xbf, 0x52, 0xf2, 0xba, 0x94, 0xcf, 0x5c, 0xea, 0x29, 0x06, 0xeb, 0x8c, 0x8e, 0x1e, 0xc3, 0x8d,
- 0x02, 0xfb, 0x54, 0x85, 0x4a, 0xc5, 0x4d, 0x79, 0x48, 0xdb, 0x05, 0x86, 0x4c, 0x83, 0x4a, 0xc8,
- 0x0e, 0x2c, 0x4f, 0x18, 0xa7, 0x1e, 0xe5, 0xb4, 0x56, 0x95, 0xd7, 0x7f, 0xff, 0xed, 0x2f, 0x52,
- 0x3b, 0x93, 0xc4, 0x53, 0x1d, 0xe8, 0x3a, 0x54, 0x26, 0xa1, 0xc7, 0xc6, 0xb5, 0x35, 0x99, 0x82,
- 0x6a, 0x81, 0xee, 0xc0, 0x6a, 0x9a, 0x30, 0xc2, 0x82, 0x13, 0x71, 0xf9, 0xbd, 0xda, 0xba, 0xf4,
- 0x68, 0x25, 0x4d, 0x98, 0x9d, 0x41, 0xc6, 0x2f, 0x35, 0x58, 0x3b, 0x77, 0x23, 0x51, 0x0d, 0xae,
- 0xdb, 0x1d, 0xd3, 0xb5, 0x9c, 0xce, 0x13, 0x72, 0xd4, 0xe9, 0x75, 0x6d, 0xd3, 0x39, 0x74, 0x6c,
- 0x4b, 0xbf, 0x86, 0x56, 0x61, 0xb9, 0xe5, 0x74, 0xec, 0x06, 0x7e, 0xf8, 0x85, 0xae, 0xa1, 0x65,
- 0x58, 0x38, 0x6c, 0x35, 0x4c, 0xbd, 0x84, 0xaa, 0x50, 0x69, 0x1f, 0xb5, 0x1a, 0xcf, 0xf4, 0x32,
- 0x5a, 0x82, 0x72, 0xa3, 0x8d, 0xf5, 0x05, 0x04, 0xb0, 0xd8, 0x68, 0x63, 0xf2, 0xec, 0x40, 0xaf,
- 0x08, 0x39, 0xf7, 0xc9, 0x13, 0xe2, 0x76, 0x8f, 0x7a, 0xfa, 0x22, 0xda, 0x81, 0xf7, 0x7b, 0x5d,
- 0xdb, 0xfe, 0x09, 0x79, 0xe6, 0xf4, 0x9b, 0xa4, 0x69, 0x37, 0x2c, 0x1b, 0x93, 0x83, 0xe7, 0x7d,
- 0x5b, 0x5f, 0x32, 0xfe, 0x5d, 0x85, 0xad, 0x0b, 0x1c, 0x45, 0x13, 0xd0, 0x65, 0x7d, 0xa2, 0x03,
- 0x19, 0x6a, 0x7e, 0x1a, 0xb1, 0xac, 0xfe, 0x1c, 0xbc, 0x7b, 0xd8, 0xea, 0xce, 0x99, 0xaa, 0xfe,
- 0x69, 0xc4, 0xf0, 0x86, 0x7f, 0x1e, 0x40, 0x5f, 0xc3, 0xae, 0x1f, 0x78, 0x69, 0xc2, 0xe3, 0x53,
- 0x12, 0x50, 0x7f, 0x90, 0xc8, 0xdb, 0x4a, 0xc2, 0x21, 0x51, 0x2f, 0x9b, 0xb8, 0xe7, 0x6b, 0xb8,
- 0x96, 0xf3, 0x74, 0x04, 0x8b, 0xb8, 0xaf, 0xee, 0x50, 0x86, 0x12, 0xbd, 0x80, 0xad, 0x89, 0x3f,
- 0x88, 0xc3, 0xe8, 0x24, 0x0c, 0x18, 0xf1, 0xfc, 0x84, 0xcb, 0x9a, 0xbc, 0x20, 0x2d, 0xb6, 0xaf,
- 0x60, 0x71, 0x7b, 0xaa, 0xcd, 0xca, 0x94, 0x61, 0x34, 0x99, 0xc3, 0x10, 0x87, 0xad, 0x30, 0xf6,
- 0x47, 0x7e, 0x40, 0xc7, 0x64, 0xc2, 0x3c, 0x9f, 0xaa, 0x48, 0x55, 0xe4, 0xbe, 0xd6, 0x15, 0xf6,
- 0x75, 0x33, 0x6d, 0x6d, 0xa1, 0x4c, 0xc6, 0x6a, 0x33, 0x9c, 0x85, 0xd0, 0x2b, 0x78, 0x4f, 0x14,
- 0xd3, 0x58, 0x64, 0x0f, 0xf1, 0xd8, 0x0b, 0x7f, 0xc0, 0xd4, 0xbe, 0x8b, 0x72, 0xdf, 0xc3, 0x2b,
- 0xec, 0x8b, 0x73, 0x7d, 0x96, 0x54, 0x27, 0x77, 0xde, 0x8a, 0xe7, 0x41, 0xb4, 0x7f, 0xc1, 0xde,
- 0x01, 0x9d, 0x30, 0xf9, 0x14, 0x54, 0xe7, 0x64, 0x3a, 0x74, 0xc2, 0xd0, 0x67, 0x80, 0xce, 0xa2,
- 0x24, 0x6a, 0x94, 0x34, 0x76, 0x59, 0x0a, 0xe8, 0x53, 0xf7, 0xfc, 0x89, 0xda, 0xe1, 0x2e, 0xac,
- 0x85, 0xc7, 0xc3, 0x34, 0x19, 0x50, 0xce, 0x3c, 0xe2, 0x7b, 0xf2, 0xba, 0x96, 0xf1, 0xea, 0x19,
- 0xe8, 0x78, 0xe8, 0x16, 0xac, 0xa8, 0xf7, 0x88, 0x87, 0x91, 0x3f, 0xa8, 0x81, 0xd4, 0x05, 0x12,
- 0xea, 0x0b, 0xc4, 0xf8, 0x93, 0x06, 0x1b, 0x33, 0x69, 0x87, 0x6e, 0xc3, 0xae, 0xd3, 0xe9, 0xdb,
- 0xb8, 0x61, 0xf6, 0x1d, 0xb7, 0x43, 0xfa, 0xcf, 0xbb, 0xf6, 0xcc, 0x85, 0x5b, 0x07, 0xb0, 0x9c,
- 0x9e, 0x79, 0xd4, 0xeb, 0x39, 0x6e, 0x47, 0xd7, 0x90, 0x0e, 0xab, 0x5d, 0x6c, 0xf7, 0xec, 0x4e,
- 0xbf, 0x21, 0x44, 0xf4, 0x92, 0xe0, 0xe8, 0x36, 0xdd, 0x8e, 0x4d, 0xcc, 0x46, 0xab, 0xa5, 0x97,
- 0xd1, 0x1a, 0x54, 0x9f, 0xba, 0x8e, 0x69, 0xb7, 0x1b, 0x4e, 0x4b, 0x5f, 0x40, 0x37, 0x61, 0xbb,
- 0x8b, 0xdd, 0x43, 0x5b, 0x2a, 0x68, 0xb4, 0x5a, 0xcf, 0x49, 0x17, 0xbb, 0xd6, 0x91, 0x69, 0x5b,
- 0x7a, 0x45, 0x68, 0x93, 0xbc, 0xa4, 0x67, 0x37, 0xb0, 0xd9, 0xd4, 0x17, 0xd1, 0x26, 0xac, 0x29,
- 0xc4, 0x74, 0xdb, 0xed, 0x46, 0xc7, 0xd2, 0x97, 0x84, 0x42, 0xcb, 0x31, 0xb3, 0xfd, 0x96, 0x0d,
- 0x0f, 0xd0, 0x7c, 0x2e, 0xa2, 0xbb, 0x70, 0xab, 0xed, 0x98, 0xd8, 0x55, 0xa6, 0x58, 0x4e, 0xaf,
- 0xdf, 0xe8, 0x98, 0xb3, 0xce, 0xac, 0x41, 0x55, 0xd4, 0x8e, 0x43, 0xc7, 0x6e, 0x59, 0xba, 0x26,
- 0x8a, 0x42, 0xdb, 0xb1, 0xd4, 0xaa, 0x24, 0x56, 0x87, 0x39, 0xad, 0x6c, 0x74, 0x60, 0x73, 0x2e,
- 0xf3, 0xc4, 0x26, 0x2e, 0x76, 0x9e, 0x38, 0x9d, 0x46, 0x8b, 0xb4, 0x6d, 0xcb, 0x69, 0x5c, 0x14,
- 0xb1, 0x2a, 0x54, 0x1a, 0x47, 0x96, 0xe3, 0xea, 0x9a, 0xf8, 0x7c, 0xea, 0x58, 0xb6, 0xab, 0x97,
- 0x8c, 0x6f, 0x35, 0x55, 0x56, 0x66, 0xb3, 0xe7, 0x23, 0xb8, 0x83, 0x6d, 0xd3, 0xc5, 0xb2, 0xd6,
- 0x59, 0xf6, 0x53, 0xe1, 0xfa, 0xc5, 0xc7, 0xd0, 0x6b, 0x37, 0x70, 0x5f, 0xba, 0xa7, 0x6b, 0x68,
- 0x11, 0x4a, 0x5d, 0xb3, 0x18, 0x7c, 0x51, 0x15, 0xf5, 0x32, 0x5a, 0x81, 0xa5, 0xa7, 0x76, 0xd3,
- 0x31, 0x5b, 0xb6, 0xbe, 0x20, 0xca, 0xa8, 0xdb, 0x6f, 0xda, 0x98, 0xb8, 0x47, 0x7d, 0xcb, 0x75,
- 0x71, 0xa6, 0x5f, 0xaf, 0xa0, 0x6d, 0xd8, 0x52, 0x14, 0xa7, 0x53, 0x24, 0x2c, 0x1a, 0x9f, 0xc0,
- 0xda, 0xb9, 0x07, 0x16, 0xd5, 0x60, 0x29, 0x3a, 0x89, 0x69, 0xc2, 0x92, 0x9a, 0x26, 0x1b, 0x85,
- 0x7c, 0x69, 0xe0, 0xe9, 0xcc, 0x31, 0x6d, 0x9c, 0xd1, 0x0e, 0x2c, 0xe5, 0x5d, 0xaa, 0x96, 0x75,
- 0xa9, 0x39, 0x80, 0x10, 0x94, 0xd3, 0xd8, 0x97, 0xed, 0x54, 0xb5, 0x79, 0x0d, 0x8b, 0xc5, 0xc1,
- 0x3a, 0xa8, 0x26, 0x96, 0x24, 0x61, 0x1a, 0x0f, 0x98, 0xc1, 0xa6, 0x1d, 0x9f, 0xe8, 0xb3, 0x93,
- 0x28, 0x0c, 0x12, 0x86, 0xba, 0xb0, 0x94, 0x37, 0x8a, 0x25, 0xd9, 0x22, 0x7c, 0xf1, 0x56, 0x2d,
- 0x42, 0xc1, 0x38, 0xd5, 0x51, 0xe2, 0x5c, 0x8d, 0x11, 0xc1, 0xee, 0xc5, 0x83, 0xc8, 0xf7, 0xb6,
- 0xe3, 0x9f, 0xb5, 0x8b, 0xb7, 0x9c, 0x3e, 0x2f, 0xaa, 0x75, 0x1a, 0xc5, 0x2c, 0x49, 0x44, 0x2f,
- 0x38, 0xc8, 0x43, 0x58, 0x91, 0xad, 0x93, 0xc4, 0xbb, 0x0a, 0x46, 0x8f, 0x00, 0x12, 0x4e, 0x63,
- 0x2e, 0xbb, 0x9b, 0x6c, 0xc4, 0xd9, 0xc9, 0x0d, 0xcc, 0x87, 0xc7, 0x7a, 0x3f, 0x1f, 0x1e, 0x71,
- 0x55, 0x72, 0x8b, 0x35, 0xb2, 0x40, 0x1f, 0xd3, 0x84, 0x93, 0x34, 0xf2, 0x44, 0x03, 0x2a, 0x15,
- 0x94, 0x2f, 0x55, 0xb0, 0x2e, 0x64, 0x8e, 0xa4, 0x88, 0x00, 0x8d, 0xbf, 0x97, 0xe6, 0xa7, 0x8a,
- 0x42, 0xf4, 0xee, 0x43, 0x85, 0xc5, 0x71, 0x18, 0x67, 0x43, 0x05, 0xca, 0x35, 0xc7, 0xd1, 0xa0,
- 0xde, 0x93, 0x63, 0x2b, 0x56, 0x0c, 0xa8, 0x37, 0x1b, 0xe7, 0xab, 0x4c, 0x4e, 0x33, 0xa1, 0x46,
- 0x29, 0x6c, 0x66, 0x9d, 0x25, 0x7b, 0xc1, 0x02, 0xae, 0x4a, 0xab, 0x7a, 0xf7, 0x9c, 0x77, 0x54,
- 0x7f, 0xe6, 0x54, 0x76, 0xc2, 0xb6, 0xd0, 0xa8, 0x1e, 0xec, 0xe4, 0x3c, 0x60, 0xb4, 0x60, 0x63,
- 0x86, 0x07, 0xed, 0x42, 0x4d, 0xb4, 0x19, 0x66, 0x93, 0xd8, 0x4f, 0xed, 0x4e, 0x7f, 0xe6, 0x4a,
- 0xdf, 0x84, 0x6d, 0xbb, 0x63, 0x11, 0xf7, 0x90, 0xf4, 0x9c, 0xce, 0x93, 0x96, 0x4d, 0x8e, 0xfa,
- 0xa2, 0x12, 0x77, 0x4c, 0x5b, 0xd7, 0x8c, 0xef, 0x4a, 0x17, 0x0f, 0x6e, 0xca, 0x59, 0x44, 0x60,
- 0xf5, 0x5c, 0x3f, 0xae, 0xc9, 0xe8, 0x7d, 0xf5, 0xae, 0x59, 0x5a, 0x68, 0xde, 0xf1, 0x39, 0x85,
- 0xe8, 0x06, 0x2c, 0xfb, 0x09, 0x19, 0x8a, 0xf2, 0x97, 0xcd, 0x71, 0x4b, 0x7e, 0x72, 0x28, 0x96,
- 0x68, 0x17, 0x44, 0x42, 0x1d, 0xfb, 0x63, 0x9f, 0x9f, 0xca, 0xe4, 0x29, 0xe1, 0x33, 0x00, 0x35,
- 0x60, 0x43, 0x1d, 0x04, 0x61, 0x81, 0xea, 0xbf, 0x65, 0xec, 0x57, 0xf6, 0x6f, 0xcc, 0x25, 0x98,
- 0x95, 0xfd, 0x1b, 0xc1, 0x6b, 0x4a, 0xc2, 0x0e, 0x64, 0x47, 0x2e, 0x5e, 0xb2, 0x7c, 0xa6, 0xe2,
- 0x74, 0x24, 0x5b, 0x87, 0x0a, 0x86, 0x0c, 0xea, 0xd3, 0xd1, 0xfc, 0xd0, 0xb3, 0x38, 0x3f, 0xf4,
- 0x18, 0x7f, 0xd4, 0x60, 0xfb, 0x35, 0xd7, 0xf2, 0xfb, 0x0f, 0xdf, 0x8c, 0x0b, 0xa5, 0xcb, 0x5d,
- 0xa8, 0x5c, 0xe0, 0xc2, 0x6f, 0x35, 0xd8, 0x7d, 0xd3, 0xa6, 0xe8, 0x43, 0x00, 0x1e, 0xd3, 0x20,
- 0x19, 0xc4, 0x7e, 0xa4, 0xca, 0x45, 0x15, 0x17, 0x10, 0x41, 0x2f, 0x0c, 0x32, 0x25, 0x79, 0x56,
- 0x05, 0x04, 0xfd, 0x08, 0x2a, 0x62, 0xda, 0x11, 0x03, 0xb8, 0x08, 0xc0, 0xbd, 0x4b, 0x02, 0x20,
- 0x86, 0x1f, 0x27, 0x18, 0x86, 0x58, 0x49, 0x19, 0x7f, 0xd1, 0x60, 0x39, 0xc7, 0xd0, 0x97, 0xe7,
- 0xaa, 0x92, 0x76, 0xd9, 0x99, 0x17, 0x8a, 0xd2, 0xe7, 0x62, 0xa2, 0xf7, 0x8a, 0xd5, 0xec, 0x0d,
- 0x72, 0x4b, 0x2c, 0xcb, 0x12, 0x04, 0x0b, 0xc2, 0x8a, 0x6c, 0xe0, 0x95, 0xdf, 0x33, 0xfe, 0x2e,
- 0xcc, 0xf9, 0x7b, 0x0b, 0x56, 0xf2, 0x11, 0xa9, 0x90, 0x59, 0x19, 0xd4, 0xa7, 0xa3, 0xfd, 0xbf,
- 0x95, 0x61, 0x51, 0x45, 0x1c, 0xfd, 0x46, 0x83, 0xea, 0xb4, 0x0c, 0xa0, 0xb7, 0xfc, 0x85, 0x34,
- 0xfd, 0x3b, 0xb4, 0xf3, 0xe0, 0xed, 0x05, 0x54, 0x85, 0x31, 0x3e, 0xfe, 0xc5, 0x5f, 0xff, 0xf1,
- 0xeb, 0xd2, 0x6d, 0xe3, 0x66, 0xe1, 0xff, 0xa6, 0x12, 0x7b, 0x1c, 0xe7, 0xcc, 0x8f, 0xb5, 0x4f,
- 0xd1, 0xb7, 0x1a, 0x5c, 0xbf, 0xe8, 0x29, 0x41, 0x8f, 0x2f, 0xd9, 0xf2, 0x0d, 0xff, 0xde, 0x76,
- 0x3e, 0xc8, 0x65, 0x0b, 0x7f, 0x3e, 0xeb, 0x6e, 0xfe, 0xe7, 0xd3, 0x78, 0x28, 0x6d, 0xfb, 0x3f,
- 0xe3, 0xe3, 0x79, 0xdb, 0x0a, 0x02, 0xe7, 0xcc, 0xfc, 0x95, 0x06, 0x68, 0xbe, 0x9e, 0xa2, 0x2f,
- 0xaf, 0x50, 0x82, 0x95, 0x89, 0x8f, 0xae, 0x5c, 0xbc, 0x8d, 0x6b, 0xf7, 0xb5, 0x07, 0xda, 0xc1,
- 0x2b, 0xb8, 0x33, 0x08, 0x27, 0x6f, 0xd6, 0x72, 0xb0, 0xa2, 0x8e, 0xbf, 0x2b, 0x12, 0xaf, 0xab,
- 0x7d, 0x63, 0x66, 0xdc, 0xa3, 0x50, 0xdc, 0xcc, 0x7a, 0x18, 0x8f, 0xf6, 0x46, 0x2c, 0x90, 0x69,
- 0xb9, 0xa7, 0x48, 0x34, 0xf2, 0x93, 0xd7, 0xfc, 0x9e, 0xfe, 0x4a, 0x01, 0xff, 0xd2, 0xb4, 0xe3,
- 0x45, 0x29, 0xf2, 0xff, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x29, 0xdf, 0xd7, 0xd0, 0x16,
- 0x00, 0x00,
- }
|