You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 

1732 lines
75 KiB

  1. // Code generated by protoc-gen-go. DO NOT EDIT.
  2. // source: google/cloud/speech/v1/cloud_speech.proto
  3. package speech // import "google.golang.org/genproto/googleapis/cloud/speech/v1"
  4. import proto "github.com/golang/protobuf/proto"
  5. import fmt "fmt"
  6. import math "math"
  7. import _ "github.com/golang/protobuf/ptypes/any"
  8. import duration "github.com/golang/protobuf/ptypes/duration"
  9. import _ "github.com/golang/protobuf/ptypes/empty"
  10. import timestamp "github.com/golang/protobuf/ptypes/timestamp"
  11. import _ "google.golang.org/genproto/googleapis/api/annotations"
  12. import longrunning "google.golang.org/genproto/googleapis/longrunning"
  13. import status "google.golang.org/genproto/googleapis/rpc/status"
  14. import (
  15. context "golang.org/x/net/context"
  16. grpc "google.golang.org/grpc"
  17. )
  18. // Reference imports to suppress errors if they are not otherwise used.
  19. var _ = proto.Marshal
  20. var _ = fmt.Errorf
  21. var _ = math.Inf
  22. // This is a compile-time assertion to ensure that this generated file
  23. // is compatible with the proto package it is being compiled against.
  24. // A compilation error at this line likely means your copy of the
  25. // proto package needs to be updated.
  26. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
  27. // The encoding of the audio data sent in the request.
  28. //
  29. // All encodings support only 1 channel (mono) audio.
  30. //
  31. // For best results, the audio source should be captured and transmitted using
  32. // a lossless encoding (`FLAC` or `LINEAR16`). The accuracy of the speech
  33. // recognition can be reduced if lossy codecs are used to capture or transmit
  34. // audio, particularly if background noise is present. Lossy codecs include
  35. // `MULAW`, `AMR`, `AMR_WB`, `OGG_OPUS`, and `SPEEX_WITH_HEADER_BYTE`.
  36. //
  37. // The `FLAC` and `WAV` audio file formats include a header that describes the
  38. // included audio content. You can request recognition for `WAV` files that
  39. // contain either `LINEAR16` or `MULAW` encoded audio.
  40. // If you send `FLAC` or `WAV` audio file format in
  41. // your request, you do not need to specify an `AudioEncoding`; the audio
  42. // encoding format is determined from the file header. If you specify
  43. // an `AudioEncoding` when you send send `FLAC` or `WAV` audio, the
  44. // encoding configuration must match the encoding described in the audio
  45. // header; otherwise the request returns an
  46. // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT] error
  47. // code.
  48. type RecognitionConfig_AudioEncoding int32
  49. const (
  50. // Not specified.
  51. RecognitionConfig_ENCODING_UNSPECIFIED RecognitionConfig_AudioEncoding = 0
  52. // Uncompressed 16-bit signed little-endian samples (Linear PCM).
  53. RecognitionConfig_LINEAR16 RecognitionConfig_AudioEncoding = 1
  54. // `FLAC` (Free Lossless Audio
  55. // Codec) is the recommended encoding because it is
  56. // lossless--therefore recognition is not compromised--and
  57. // requires only about half the bandwidth of `LINEAR16`. `FLAC` stream
  58. // encoding supports 16-bit and 24-bit samples, however, not all fields in
  59. // `STREAMINFO` are supported.
  60. RecognitionConfig_FLAC RecognitionConfig_AudioEncoding = 2
  61. // 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
  62. RecognitionConfig_MULAW RecognitionConfig_AudioEncoding = 3
  63. // Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
  64. RecognitionConfig_AMR RecognitionConfig_AudioEncoding = 4
  65. // Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
  66. RecognitionConfig_AMR_WB RecognitionConfig_AudioEncoding = 5
  67. // Opus encoded audio frames in Ogg container
  68. // ([OggOpus](https://wiki.xiph.org/OggOpus)).
  69. // `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000.
  70. RecognitionConfig_OGG_OPUS RecognitionConfig_AudioEncoding = 6
  71. // Although the use of lossy encodings is not recommended, if a very low
  72. // bitrate encoding is required, `OGG_OPUS` is highly preferred over
  73. // Speex encoding. The [Speex](https://speex.org/) encoding supported by
  74. // Cloud Speech API has a header byte in each block, as in MIME type
  75. // `audio/x-speex-with-header-byte`.
  76. // It is a variant of the RTP Speex encoding defined in
  77. // [RFC 5574](https://tools.ietf.org/html/rfc5574).
  78. // The stream is a sequence of blocks, one block per RTP packet. Each block
  79. // starts with a byte containing the length of the block, in bytes, followed
  80. // by one or more frames of Speex data, padded to an integral number of
  81. // bytes (octets) as specified in RFC 5574. In other words, each RTP header
  82. // is replaced with a single byte containing the block length. Only Speex
  83. // wideband is supported. `sample_rate_hertz` must be 16000.
  84. RecognitionConfig_SPEEX_WITH_HEADER_BYTE RecognitionConfig_AudioEncoding = 7
  85. )
  86. var RecognitionConfig_AudioEncoding_name = map[int32]string{
  87. 0: "ENCODING_UNSPECIFIED",
  88. 1: "LINEAR16",
  89. 2: "FLAC",
  90. 3: "MULAW",
  91. 4: "AMR",
  92. 5: "AMR_WB",
  93. 6: "OGG_OPUS",
  94. 7: "SPEEX_WITH_HEADER_BYTE",
  95. }
  96. var RecognitionConfig_AudioEncoding_value = map[string]int32{
  97. "ENCODING_UNSPECIFIED": 0,
  98. "LINEAR16": 1,
  99. "FLAC": 2,
  100. "MULAW": 3,
  101. "AMR": 4,
  102. "AMR_WB": 5,
  103. "OGG_OPUS": 6,
  104. "SPEEX_WITH_HEADER_BYTE": 7,
  105. }
  106. func (x RecognitionConfig_AudioEncoding) String() string {
  107. return proto.EnumName(RecognitionConfig_AudioEncoding_name, int32(x))
  108. }
  109. func (RecognitionConfig_AudioEncoding) EnumDescriptor() ([]byte, []int) {
  110. return fileDescriptor_cloud_speech_1e3d83e50c51747b, []int{4, 0}
  111. }
  112. // Indicates the type of speech event.
  113. type StreamingRecognizeResponse_SpeechEventType int32
  114. const (
  115. // No speech event specified.
  116. StreamingRecognizeResponse_SPEECH_EVENT_UNSPECIFIED StreamingRecognizeResponse_SpeechEventType = 0
  117. // This event indicates that the server has detected the end of the user's
  118. // speech utterance and expects no additional speech. Therefore, the server
  119. // will not process additional audio (although it may subsequently return
  120. // additional results). The client should stop sending additional audio
  121. // data, half-close the gRPC connection, and wait for any additional results
  122. // until the server closes the gRPC connection. This event is only sent if
  123. // `single_utterance` was set to `true`, and is not used otherwise.
  124. StreamingRecognizeResponse_END_OF_SINGLE_UTTERANCE StreamingRecognizeResponse_SpeechEventType = 1
  125. )
  126. var StreamingRecognizeResponse_SpeechEventType_name = map[int32]string{
  127. 0: "SPEECH_EVENT_UNSPECIFIED",
  128. 1: "END_OF_SINGLE_UTTERANCE",
  129. }
  130. var StreamingRecognizeResponse_SpeechEventType_value = map[string]int32{
  131. "SPEECH_EVENT_UNSPECIFIED": 0,
  132. "END_OF_SINGLE_UTTERANCE": 1,
  133. }
  134. func (x StreamingRecognizeResponse_SpeechEventType) String() string {
  135. return proto.EnumName(StreamingRecognizeResponse_SpeechEventType_name, int32(x))
  136. }
  137. func (StreamingRecognizeResponse_SpeechEventType) EnumDescriptor() ([]byte, []int) {
  138. return fileDescriptor_cloud_speech_1e3d83e50c51747b, []int{10, 0}
  139. }
  140. // The top-level message sent by the client for the `Recognize` method.
  141. type RecognizeRequest struct {
  142. // *Required* Provides information to the recognizer that specifies how to
  143. // process the request.
  144. Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
  145. // *Required* The audio data to be recognized.
  146. Audio *RecognitionAudio `protobuf:"bytes,2,opt,name=audio,proto3" json:"audio,omitempty"`
  147. XXX_NoUnkeyedLiteral struct{} `json:"-"`
  148. XXX_unrecognized []byte `json:"-"`
  149. XXX_sizecache int32 `json:"-"`
  150. }
  151. func (m *RecognizeRequest) Reset() { *m = RecognizeRequest{} }
  152. func (m *RecognizeRequest) String() string { return proto.CompactTextString(m) }
  153. func (*RecognizeRequest) ProtoMessage() {}
  154. func (*RecognizeRequest) Descriptor() ([]byte, []int) {
  155. return fileDescriptor_cloud_speech_1e3d83e50c51747b, []int{0}
  156. }
  157. func (m *RecognizeRequest) XXX_Unmarshal(b []byte) error {
  158. return xxx_messageInfo_RecognizeRequest.Unmarshal(m, b)
  159. }
  160. func (m *RecognizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
  161. return xxx_messageInfo_RecognizeRequest.Marshal(b, m, deterministic)
  162. }
  163. func (dst *RecognizeRequest) XXX_Merge(src proto.Message) {
  164. xxx_messageInfo_RecognizeRequest.Merge(dst, src)
  165. }
  166. func (m *RecognizeRequest) XXX_Size() int {
  167. return xxx_messageInfo_RecognizeRequest.Size(m)
  168. }
  169. func (m *RecognizeRequest) XXX_DiscardUnknown() {
  170. xxx_messageInfo_RecognizeRequest.DiscardUnknown(m)
  171. }
  172. var xxx_messageInfo_RecognizeRequest proto.InternalMessageInfo
  173. func (m *RecognizeRequest) GetConfig() *RecognitionConfig {
  174. if m != nil {
  175. return m.Config
  176. }
  177. return nil
  178. }
  179. func (m *RecognizeRequest) GetAudio() *RecognitionAudio {
  180. if m != nil {
  181. return m.Audio
  182. }
  183. return nil
  184. }
  185. // The top-level message sent by the client for the `LongRunningRecognize`
  186. // method.
  187. type LongRunningRecognizeRequest struct {
  188. // *Required* Provides information to the recognizer that specifies how to
  189. // process the request.
  190. Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
  191. // *Required* The audio data to be recognized.
  192. Audio *RecognitionAudio `protobuf:"bytes,2,opt,name=audio,proto3" json:"audio,omitempty"`
  193. XXX_NoUnkeyedLiteral struct{} `json:"-"`
  194. XXX_unrecognized []byte `json:"-"`
  195. XXX_sizecache int32 `json:"-"`
  196. }
  197. func (m *LongRunningRecognizeRequest) Reset() { *m = LongRunningRecognizeRequest{} }
  198. func (m *LongRunningRecognizeRequest) String() string { return proto.CompactTextString(m) }
  199. func (*LongRunningRecognizeRequest) ProtoMessage() {}
  200. func (*LongRunningRecognizeRequest) Descriptor() ([]byte, []int) {
  201. return fileDescriptor_cloud_speech_1e3d83e50c51747b, []int{1}
  202. }
  203. func (m *LongRunningRecognizeRequest) XXX_Unmarshal(b []byte) error {
  204. return xxx_messageInfo_LongRunningRecognizeRequest.Unmarshal(m, b)
  205. }
  206. func (m *LongRunningRecognizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
  207. return xxx_messageInfo_LongRunningRecognizeRequest.Marshal(b, m, deterministic)
  208. }
  209. func (dst *LongRunningRecognizeRequest) XXX_Merge(src proto.Message) {
  210. xxx_messageInfo_LongRunningRecognizeRequest.Merge(dst, src)
  211. }
  212. func (m *LongRunningRecognizeRequest) XXX_Size() int {
  213. return xxx_messageInfo_LongRunningRecognizeRequest.Size(m)
  214. }
  215. func (m *LongRunningRecognizeRequest) XXX_DiscardUnknown() {
  216. xxx_messageInfo_LongRunningRecognizeRequest.DiscardUnknown(m)
  217. }
  218. var xxx_messageInfo_LongRunningRecognizeRequest proto.InternalMessageInfo
  219. func (m *LongRunningRecognizeRequest) GetConfig() *RecognitionConfig {
  220. if m != nil {
  221. return m.Config
  222. }
  223. return nil
  224. }
  225. func (m *LongRunningRecognizeRequest) GetAudio() *RecognitionAudio {
  226. if m != nil {
  227. return m.Audio
  228. }
  229. return nil
  230. }
  231. // The top-level message sent by the client for the `StreamingRecognize` method.
  232. // Multiple `StreamingRecognizeRequest` messages are sent. The first message
  233. // must contain a `streaming_config` message and must not contain `audio` data.
  234. // All subsequent messages must contain `audio` data and must not contain a
  235. // `streaming_config` message.
  236. type StreamingRecognizeRequest struct {
  237. // The streaming request, which is either a streaming config or audio content.
  238. //
  239. // Types that are valid to be assigned to StreamingRequest:
  240. // *StreamingRecognizeRequest_StreamingConfig
  241. // *StreamingRecognizeRequest_AudioContent
  242. StreamingRequest isStreamingRecognizeRequest_StreamingRequest `protobuf_oneof:"streaming_request"`
  243. XXX_NoUnkeyedLiteral struct{} `json:"-"`
  244. XXX_unrecognized []byte `json:"-"`
  245. XXX_sizecache int32 `json:"-"`
  246. }
  247. func (m *StreamingRecognizeRequest) Reset() { *m = StreamingRecognizeRequest{} }
  248. func (m *StreamingRecognizeRequest) String() string { return proto.CompactTextString(m) }
  249. func (*StreamingRecognizeRequest) ProtoMessage() {}
  250. func (*StreamingRecognizeRequest) Descriptor() ([]byte, []int) {
  251. return fileDescriptor_cloud_speech_1e3d83e50c51747b, []int{2}
  252. }
  253. func (m *StreamingRecognizeRequest) XXX_Unmarshal(b []byte) error {
  254. return xxx_messageInfo_StreamingRecognizeRequest.Unmarshal(m, b)
  255. }
  256. func (m *StreamingRecognizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
  257. return xxx_messageInfo_StreamingRecognizeRequest.Marshal(b, m, deterministic)
  258. }
  259. func (dst *StreamingRecognizeRequest) XXX_Merge(src proto.Message) {
  260. xxx_messageInfo_StreamingRecognizeRequest.Merge(dst, src)
  261. }
  262. func (m *StreamingRecognizeRequest) XXX_Size() int {
  263. return xxx_messageInfo_StreamingRecognizeRequest.Size(m)
  264. }
  265. func (m *StreamingRecognizeRequest) XXX_DiscardUnknown() {
  266. xxx_messageInfo_StreamingRecognizeRequest.DiscardUnknown(m)
  267. }
  268. var xxx_messageInfo_StreamingRecognizeRequest proto.InternalMessageInfo
  269. type isStreamingRecognizeRequest_StreamingRequest interface {
  270. isStreamingRecognizeRequest_StreamingRequest()
  271. }
  272. type StreamingRecognizeRequest_StreamingConfig struct {
  273. StreamingConfig *StreamingRecognitionConfig `protobuf:"bytes,1,opt,name=streaming_config,json=streamingConfig,proto3,oneof"`
  274. }
  275. type StreamingRecognizeRequest_AudioContent struct {
  276. AudioContent []byte `protobuf:"bytes,2,opt,name=audio_content,json=audioContent,proto3,oneof"`
  277. }
  278. func (*StreamingRecognizeRequest_StreamingConfig) isStreamingRecognizeRequest_StreamingRequest() {}
  279. func (*StreamingRecognizeRequest_AudioContent) isStreamingRecognizeRequest_StreamingRequest() {}
  280. func (m *StreamingRecognizeRequest) GetStreamingRequest() isStreamingRecognizeRequest_StreamingRequest {
  281. if m != nil {
  282. return m.StreamingRequest
  283. }
  284. return nil
  285. }
  286. func (m *StreamingRecognizeRequest) GetStreamingConfig() *StreamingRecognitionConfig {
  287. if x, ok := m.GetStreamingRequest().(*StreamingRecognizeRequest_StreamingConfig); ok {
  288. return x.StreamingConfig
  289. }
  290. return nil
  291. }
  292. func (m *StreamingRecognizeRequest) GetAudioContent() []byte {
  293. if x, ok := m.GetStreamingRequest().(*StreamingRecognizeRequest_AudioContent); ok {
  294. return x.AudioContent
  295. }
  296. return nil
  297. }
  298. // XXX_OneofFuncs is for the internal use of the proto package.
  299. func (*StreamingRecognizeRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
  300. return _StreamingRecognizeRequest_OneofMarshaler, _StreamingRecognizeRequest_OneofUnmarshaler, _StreamingRecognizeRequest_OneofSizer, []interface{}{
  301. (*StreamingRecognizeRequest_StreamingConfig)(nil),
  302. (*StreamingRecognizeRequest_AudioContent)(nil),
  303. }
  304. }
  305. func _StreamingRecognizeRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
  306. m := msg.(*StreamingRecognizeRequest)
  307. // streaming_request
  308. switch x := m.StreamingRequest.(type) {
  309. case *StreamingRecognizeRequest_StreamingConfig:
  310. b.EncodeVarint(1<<3 | proto.WireBytes)
  311. if err := b.EncodeMessage(x.StreamingConfig); err != nil {
  312. return err
  313. }
  314. case *StreamingRecognizeRequest_AudioContent:
  315. b.EncodeVarint(2<<3 | proto.WireBytes)
  316. b.EncodeRawBytes(x.AudioContent)
  317. case nil:
  318. default:
  319. return fmt.Errorf("StreamingRecognizeRequest.StreamingRequest has unexpected type %T", x)
  320. }
  321. return nil
  322. }
  323. func _StreamingRecognizeRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
  324. m := msg.(*StreamingRecognizeRequest)
  325. switch tag {
  326. case 1: // streaming_request.streaming_config
  327. if wire != proto.WireBytes {
  328. return true, proto.ErrInternalBadWireType
  329. }
  330. msg := new(StreamingRecognitionConfig)
  331. err := b.DecodeMessage(msg)
  332. m.StreamingRequest = &StreamingRecognizeRequest_StreamingConfig{msg}
  333. return true, err
  334. case 2: // streaming_request.audio_content
  335. if wire != proto.WireBytes {
  336. return true, proto.ErrInternalBadWireType
  337. }
  338. x, err := b.DecodeRawBytes(true)
  339. m.StreamingRequest = &StreamingRecognizeRequest_AudioContent{x}
  340. return true, err
  341. default:
  342. return false, nil
  343. }
  344. }
  345. func _StreamingRecognizeRequest_OneofSizer(msg proto.Message) (n int) {
  346. m := msg.(*StreamingRecognizeRequest)
  347. // streaming_request
  348. switch x := m.StreamingRequest.(type) {
  349. case *StreamingRecognizeRequest_StreamingConfig:
  350. s := proto.Size(x.StreamingConfig)
  351. n += 1 // tag and wire
  352. n += proto.SizeVarint(uint64(s))
  353. n += s
  354. case *StreamingRecognizeRequest_AudioContent:
  355. n += 1 // tag and wire
  356. n += proto.SizeVarint(uint64(len(x.AudioContent)))
  357. n += len(x.AudioContent)
  358. case nil:
  359. default:
  360. panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
  361. }
  362. return n
  363. }
  364. // Provides information to the recognizer that specifies how to process the
  365. // request.
  366. type StreamingRecognitionConfig struct {
  367. // *Required* Provides information to the recognizer that specifies how to
  368. // process the request.
  369. Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
  370. // *Optional* If `false` or omitted, the recognizer will perform continuous
  371. // recognition (continuing to wait for and process audio even if the user
  372. // pauses speaking) until the client closes the input stream (gRPC API) or
  373. // until the maximum time limit has been reached. May return multiple
  374. // `StreamingRecognitionResult`s with the `is_final` flag set to `true`.
  375. //
  376. // If `true`, the recognizer will detect a single spoken utterance. When it
  377. // detects that the user has paused or stopped speaking, it will return an
  378. // `END_OF_SINGLE_UTTERANCE` event and cease recognition. It will return no
  379. // more than one `StreamingRecognitionResult` with the `is_final` flag set to
  380. // `true`.
  381. SingleUtterance bool `protobuf:"varint,2,opt,name=single_utterance,json=singleUtterance,proto3" json:"single_utterance,omitempty"`
  382. // *Optional* If `true`, interim results (tentative hypotheses) may be
  383. // returned as they become available (these interim results are indicated with
  384. // the `is_final=false` flag).
  385. // If `false` or omitted, only `is_final=true` result(s) are returned.
  386. InterimResults bool `protobuf:"varint,3,opt,name=interim_results,json=interimResults,proto3" json:"interim_results,omitempty"`
  387. XXX_NoUnkeyedLiteral struct{} `json:"-"`
  388. XXX_unrecognized []byte `json:"-"`
  389. XXX_sizecache int32 `json:"-"`
  390. }
  391. func (m *StreamingRecognitionConfig) Reset() { *m = StreamingRecognitionConfig{} }
  392. func (m *StreamingRecognitionConfig) String() string { return proto.CompactTextString(m) }
  393. func (*StreamingRecognitionConfig) ProtoMessage() {}
  394. func (*StreamingRecognitionConfig) Descriptor() ([]byte, []int) {
  395. return fileDescriptor_cloud_speech_1e3d83e50c51747b, []int{3}
  396. }
  397. func (m *StreamingRecognitionConfig) XXX_Unmarshal(b []byte) error {
  398. return xxx_messageInfo_StreamingRecognitionConfig.Unmarshal(m, b)
  399. }
  400. func (m *StreamingRecognitionConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
  401. return xxx_messageInfo_StreamingRecognitionConfig.Marshal(b, m, deterministic)
  402. }
  403. func (dst *StreamingRecognitionConfig) XXX_Merge(src proto.Message) {
  404. xxx_messageInfo_StreamingRecognitionConfig.Merge(dst, src)
  405. }
  406. func (m *StreamingRecognitionConfig) XXX_Size() int {
  407. return xxx_messageInfo_StreamingRecognitionConfig.Size(m)
  408. }
  409. func (m *StreamingRecognitionConfig) XXX_DiscardUnknown() {
  410. xxx_messageInfo_StreamingRecognitionConfig.DiscardUnknown(m)
  411. }
  412. var xxx_messageInfo_StreamingRecognitionConfig proto.InternalMessageInfo
  413. func (m *StreamingRecognitionConfig) GetConfig() *RecognitionConfig {
  414. if m != nil {
  415. return m.Config
  416. }
  417. return nil
  418. }
  419. func (m *StreamingRecognitionConfig) GetSingleUtterance() bool {
  420. if m != nil {
  421. return m.SingleUtterance
  422. }
  423. return false
  424. }
  425. func (m *StreamingRecognitionConfig) GetInterimResults() bool {
  426. if m != nil {
  427. return m.InterimResults
  428. }
  429. return false
  430. }
  431. // Provides information to the recognizer that specifies how to process the
  432. // request.
  433. type RecognitionConfig struct {
  434. // Encoding of audio data sent in all `RecognitionAudio` messages.
  435. // This field is optional for `FLAC` and `WAV` audio files and required
  436. // for all other audio formats. For details, see
  437. // [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
  438. Encoding RecognitionConfig_AudioEncoding `protobuf:"varint,1,opt,name=encoding,proto3,enum=google.cloud.speech.v1.RecognitionConfig_AudioEncoding" json:"encoding,omitempty"`
  439. // Sample rate in Hertz of the audio data sent in all
  440. // `RecognitionAudio` messages. Valid values are: 8000-48000.
  441. // 16000 is optimal. For best results, set the sampling rate of the audio
  442. // source to 16000 Hz. If that's not possible, use the native sample rate of
  443. // the audio source (instead of re-sampling).
  444. // This field is optional for `FLAC` and `WAV` audio files and required
  445. // for all other audio formats. For details, see
  446. // [AudioEncoding][google.cloud.speech.v1.RecognitionConfig.AudioEncoding].
  447. SampleRateHertz int32 `protobuf:"varint,2,opt,name=sample_rate_hertz,json=sampleRateHertz,proto3" json:"sample_rate_hertz,omitempty"`
  448. // *Optional* The number of channels in the input audio data.
  449. // ONLY set this for MULTI-CHANNEL recognition.
  450. // Valid values for LINEAR16 and FLAC are `1`-`8`.
  451. // Valid values for OGG_OPUS are '1'-'254'.
  452. // Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`.
  453. // If `0` or omitted, defaults to one channel (mono).
  454. // Note: We only recognize the first channel by default.
  455. // To perform independent recognition on each channel set
  456. // `enable_separate_recognition_per_channel` to 'true'.
  457. AudioChannelCount int32 `protobuf:"varint,7,opt,name=audio_channel_count,json=audioChannelCount,proto3" json:"audio_channel_count,omitempty"`
  458. // This needs to be set to `true` explicitly and `audio_channel_count` > 1
  459. // to get each channel recognized separately. The recognition result will
  460. // contain a `channel_tag` field to state which channel that result belongs
  461. // to. If this is not true, we will only recognize the first channel. The
  462. // request is billed cumulatively for all channels recognized:
  463. // `audio_channel_count` multiplied by the length of the audio.
  464. EnableSeparateRecognitionPerChannel bool `protobuf:"varint,12,opt,name=enable_separate_recognition_per_channel,json=enableSeparateRecognitionPerChannel,proto3" json:"enable_separate_recognition_per_channel,omitempty"`
  465. // *Required* The language of the supplied audio as a
  466. // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
  467. // Example: "en-US".
  468. // See [Language Support](/speech-to-text/docs/languages)
  469. // for a list of the currently supported language codes.
  470. LanguageCode string `protobuf:"bytes,3,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
  471. // *Optional* Maximum number of recognition hypotheses to be returned.
  472. // Specifically, the maximum number of `SpeechRecognitionAlternative` messages
  473. // within each `SpeechRecognitionResult`.
  474. // The server may return fewer than `max_alternatives`.
  475. // Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
  476. // one. If omitted, will return a maximum of one.
  477. MaxAlternatives int32 `protobuf:"varint,4,opt,name=max_alternatives,json=maxAlternatives,proto3" json:"max_alternatives,omitempty"`
  478. // *Optional* If set to `true`, the server will attempt to filter out
  479. // profanities, replacing all but the initial character in each filtered word
  480. // with asterisks, e.g. "f***". If set to `false` or omitted, profanities
  481. // won't be filtered out.
  482. ProfanityFilter bool `protobuf:"varint,5,opt,name=profanity_filter,json=profanityFilter,proto3" json:"profanity_filter,omitempty"`
  483. // *Optional* array of [SpeechContext][google.cloud.speech.v1.SpeechContext].
  484. // A means to provide context to assist the speech recognition. For more
  485. // information, see [Phrase Hints](/speech-to-text/docs/basics#phrase-hints).
  486. SpeechContexts []*SpeechContext `protobuf:"bytes,6,rep,name=speech_contexts,json=speechContexts,proto3" json:"speech_contexts,omitempty"`
  487. // *Optional* If `true`, the top result includes a list of words and
  488. // the start and end time offsets (timestamps) for those words. If
  489. // `false`, no word-level time offset information is returned. The default is
  490. // `false`.
  491. EnableWordTimeOffsets bool `protobuf:"varint,8,opt,name=enable_word_time_offsets,json=enableWordTimeOffsets,proto3" json:"enable_word_time_offsets,omitempty"`
  492. // *Optional* If 'true', adds punctuation to recognition result hypotheses.
  493. // This feature is only available in select languages. Setting this for
  494. // requests in other languages has no effect at all.
  495. // The default 'false' value does not add punctuation to result hypotheses.
  496. // Note: This is currently offered as an experimental service, complimentary
  497. // to all users. In the future this may be exclusively available as a
  498. // premium feature.
  499. EnableAutomaticPunctuation bool `protobuf:"varint,11,opt,name=enable_automatic_punctuation,json=enableAutomaticPunctuation,proto3" json:"enable_automatic_punctuation,omitempty"`
  500. // *Optional* Which model to select for the given request. Select the model
  501. // best suited to your domain to get best results. If a model is not
  502. // explicitly specified, then we auto-select a model based on the parameters
  503. // in the RecognitionConfig.
  504. // <table>
  505. // <tr>
  506. // <td><b>Model</b></td>
  507. // <td><b>Description</b></td>
  508. // </tr>
  509. // <tr>
  510. // <td><code>command_and_search</code></td>
  511. // <td>Best for short queries such as voice commands or voice search.</td>
  512. // </tr>
  513. // <tr>
  514. // <td><code>phone_call</code></td>
  515. // <td>Best for audio that originated from a phone call (typically
  516. // recorded at an 8khz sampling rate).</td>
  517. // </tr>
  518. // <tr>
  519. // <td><code>video</code></td>
  520. // <td>Best for audio that originated from from video or includes multiple
  521. // speakers. Ideally the audio is recorded at a 16khz or greater
  522. // sampling rate. This is a premium model that costs more than the
  523. // standard rate.</td>
  524. // </tr>
  525. // <tr>
  526. // <td><code>default</code></td>
  527. // <td>Best for audio that is not one of the specific audio models.
  528. // For example, long-form audio. Ideally the audio is high-fidelity,
  529. // recorded at a 16khz or greater sampling rate.</td>
  530. // </tr>
  531. // </table>
  532. Model string `protobuf:"bytes,13,opt,name=model,proto3" json:"model,omitempty"`
  533. // *Optional* Set to true to use an enhanced model for speech recognition.
  534. // If `use_enhanced` is set to true and the `model` field is not set, then
  535. // an appropriate enhanced model is chosen if:
  536. // 1. project is eligible for requesting enhanced models
  537. // 2. an enhanced model exists for the audio
  538. //
  539. // If `use_enhanced` is true and an enhanced version of the specified model
  540. // does not exist, then the speech is recognized using the standard version
  541. // of the specified model.
  542. //
  543. // Enhanced speech models require that you opt-in to data logging using
  544. // instructions in the
  545. // [documentation](/speech-to-text/docs/enable-data-logging). If you set
  546. // `use_enhanced` to true and you have not enabled audio logging, then you
  547. // will receive an error.
  548. UseEnhanced bool `protobuf:"varint,14,opt,name=use_enhanced,json=useEnhanced,proto3" json:"use_enhanced,omitempty"`
  549. XXX_NoUnkeyedLiteral struct{} `json:"-"`
  550. XXX_unrecognized []byte `json:"-"`
  551. XXX_sizecache int32 `json:"-"`
  552. }
  553. func (m *RecognitionConfig) Reset() { *m = RecognitionConfig{} }
  554. func (m *RecognitionConfig) String() string { return proto.CompactTextString(m) }
  555. func (*RecognitionConfig) ProtoMessage() {}
  556. func (*RecognitionConfig) Descriptor() ([]byte, []int) {
  557. return fileDescriptor_cloud_speech_1e3d83e50c51747b, []int{4}
  558. }
  559. func (m *RecognitionConfig) XXX_Unmarshal(b []byte) error {
  560. return xxx_messageInfo_RecognitionConfig.Unmarshal(m, b)
  561. }
  562. func (m *RecognitionConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
  563. return xxx_messageInfo_RecognitionConfig.Marshal(b, m, deterministic)
  564. }
  565. func (dst *RecognitionConfig) XXX_Merge(src proto.Message) {
  566. xxx_messageInfo_RecognitionConfig.Merge(dst, src)
  567. }
  568. func (m *RecognitionConfig) XXX_Size() int {
  569. return xxx_messageInfo_RecognitionConfig.Size(m)
  570. }
  571. func (m *RecognitionConfig) XXX_DiscardUnknown() {
  572. xxx_messageInfo_RecognitionConfig.DiscardUnknown(m)
  573. }
  574. var xxx_messageInfo_RecognitionConfig proto.InternalMessageInfo
  575. func (m *RecognitionConfig) GetEncoding() RecognitionConfig_AudioEncoding {
  576. if m != nil {
  577. return m.Encoding
  578. }
  579. return RecognitionConfig_ENCODING_UNSPECIFIED
  580. }
  581. func (m *RecognitionConfig) GetSampleRateHertz() int32 {
  582. if m != nil {
  583. return m.SampleRateHertz
  584. }
  585. return 0
  586. }
  587. func (m *RecognitionConfig) GetAudioChannelCount() int32 {
  588. if m != nil {
  589. return m.AudioChannelCount
  590. }
  591. return 0
  592. }
  593. func (m *RecognitionConfig) GetEnableSeparateRecognitionPerChannel() bool {
  594. if m != nil {
  595. return m.EnableSeparateRecognitionPerChannel
  596. }
  597. return false
  598. }
  599. func (m *RecognitionConfig) GetLanguageCode() string {
  600. if m != nil {
  601. return m.LanguageCode
  602. }
  603. return ""
  604. }
  605. func (m *RecognitionConfig) GetMaxAlternatives() int32 {
  606. if m != nil {
  607. return m.MaxAlternatives
  608. }
  609. return 0
  610. }
  611. func (m *RecognitionConfig) GetProfanityFilter() bool {
  612. if m != nil {
  613. return m.ProfanityFilter
  614. }
  615. return false
  616. }
  617. func (m *RecognitionConfig) GetSpeechContexts() []*SpeechContext {
  618. if m != nil {
  619. return m.SpeechContexts
  620. }
  621. return nil
  622. }
  623. func (m *RecognitionConfig) GetEnableWordTimeOffsets() bool {
  624. if m != nil {
  625. return m.EnableWordTimeOffsets
  626. }
  627. return false
  628. }
  629. func (m *RecognitionConfig) GetEnableAutomaticPunctuation() bool {
  630. if m != nil {
  631. return m.EnableAutomaticPunctuation
  632. }
  633. return false
  634. }
  635. func (m *RecognitionConfig) GetModel() string {
  636. if m != nil {
  637. return m.Model
  638. }
  639. return ""
  640. }
  641. func (m *RecognitionConfig) GetUseEnhanced() bool {
  642. if m != nil {
  643. return m.UseEnhanced
  644. }
  645. return false
  646. }
  647. // Provides "hints" to the speech recognizer to favor specific words and phrases
  648. // in the results.
  649. type SpeechContext struct {
  650. // *Optional* A list of strings containing words and phrases "hints" so that
  651. // the speech recognition is more likely to recognize them. This can be used
  652. // to improve the accuracy for specific words and phrases, for example, if
  653. // specific commands are typically spoken by the user. This can also be used
  654. // to add additional words to the vocabulary of the recognizer. See
  655. // [usage limits](/speech-to-text/quotas#content).
  656. Phrases []string `protobuf:"bytes,1,rep,name=phrases,proto3" json:"phrases,omitempty"`
  657. XXX_NoUnkeyedLiteral struct{} `json:"-"`
  658. XXX_unrecognized []byte `json:"-"`
  659. XXX_sizecache int32 `json:"-"`
  660. }
  661. func (m *SpeechContext) Reset() { *m = SpeechContext{} }
  662. func (m *SpeechContext) String() string { return proto.CompactTextString(m) }
  663. func (*SpeechContext) ProtoMessage() {}
  664. func (*SpeechContext) Descriptor() ([]byte, []int) {
  665. return fileDescriptor_cloud_speech_1e3d83e50c51747b, []int{5}
  666. }
  667. func (m *SpeechContext) XXX_Unmarshal(b []byte) error {
  668. return xxx_messageInfo_SpeechContext.Unmarshal(m, b)
  669. }
  670. func (m *SpeechContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
  671. return xxx_messageInfo_SpeechContext.Marshal(b, m, deterministic)
  672. }
  673. func (dst *SpeechContext) XXX_Merge(src proto.Message) {
  674. xxx_messageInfo_SpeechContext.Merge(dst, src)
  675. }
  676. func (m *SpeechContext) XXX_Size() int {
  677. return xxx_messageInfo_SpeechContext.Size(m)
  678. }
  679. func (m *SpeechContext) XXX_DiscardUnknown() {
  680. xxx_messageInfo_SpeechContext.DiscardUnknown(m)
  681. }
  682. var xxx_messageInfo_SpeechContext proto.InternalMessageInfo
  683. func (m *SpeechContext) GetPhrases() []string {
  684. if m != nil {
  685. return m.Phrases
  686. }
  687. return nil
  688. }
  689. // Contains audio data in the encoding specified in the `RecognitionConfig`.
  690. // Either `content` or `uri` must be supplied. Supplying both or neither
  691. // returns [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT].
  692. // See [content limits](/speech-to-text/quotas#content).
  693. type RecognitionAudio struct {
  694. // The audio source, which is either inline content or a Google Cloud
  695. // Storage uri.
  696. //
  697. // Types that are valid to be assigned to AudioSource:
  698. // *RecognitionAudio_Content
  699. // *RecognitionAudio_Uri
  700. AudioSource isRecognitionAudio_AudioSource `protobuf_oneof:"audio_source"`
  701. XXX_NoUnkeyedLiteral struct{} `json:"-"`
  702. XXX_unrecognized []byte `json:"-"`
  703. XXX_sizecache int32 `json:"-"`
  704. }
  705. func (m *RecognitionAudio) Reset() { *m = RecognitionAudio{} }
  706. func (m *RecognitionAudio) String() string { return proto.CompactTextString(m) }
  707. func (*RecognitionAudio) ProtoMessage() {}
  708. func (*RecognitionAudio) Descriptor() ([]byte, []int) {
  709. return fileDescriptor_cloud_speech_1e3d83e50c51747b, []int{6}
  710. }
  711. func (m *RecognitionAudio) XXX_Unmarshal(b []byte) error {
  712. return xxx_messageInfo_RecognitionAudio.Unmarshal(m, b)
  713. }
  714. func (m *RecognitionAudio) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
  715. return xxx_messageInfo_RecognitionAudio.Marshal(b, m, deterministic)
  716. }
  717. func (dst *RecognitionAudio) XXX_Merge(src proto.Message) {
  718. xxx_messageInfo_RecognitionAudio.Merge(dst, src)
  719. }
  720. func (m *RecognitionAudio) XXX_Size() int {
  721. return xxx_messageInfo_RecognitionAudio.Size(m)
  722. }
  723. func (m *RecognitionAudio) XXX_DiscardUnknown() {
  724. xxx_messageInfo_RecognitionAudio.DiscardUnknown(m)
  725. }
  726. var xxx_messageInfo_RecognitionAudio proto.InternalMessageInfo
  727. type isRecognitionAudio_AudioSource interface {
  728. isRecognitionAudio_AudioSource()
  729. }
  730. type RecognitionAudio_Content struct {
  731. Content []byte `protobuf:"bytes,1,opt,name=content,proto3,oneof"`
  732. }
  733. type RecognitionAudio_Uri struct {
  734. Uri string `protobuf:"bytes,2,opt,name=uri,proto3,oneof"`
  735. }
  736. func (*RecognitionAudio_Content) isRecognitionAudio_AudioSource() {}
  737. func (*RecognitionAudio_Uri) isRecognitionAudio_AudioSource() {}
  738. func (m *RecognitionAudio) GetAudioSource() isRecognitionAudio_AudioSource {
  739. if m != nil {
  740. return m.AudioSource
  741. }
  742. return nil
  743. }
  744. func (m *RecognitionAudio) GetContent() []byte {
  745. if x, ok := m.GetAudioSource().(*RecognitionAudio_Content); ok {
  746. return x.Content
  747. }
  748. return nil
  749. }
  750. func (m *RecognitionAudio) GetUri() string {
  751. if x, ok := m.GetAudioSource().(*RecognitionAudio_Uri); ok {
  752. return x.Uri
  753. }
  754. return ""
  755. }
  756. // XXX_OneofFuncs is for the internal use of the proto package.
  757. func (*RecognitionAudio) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
  758. return _RecognitionAudio_OneofMarshaler, _RecognitionAudio_OneofUnmarshaler, _RecognitionAudio_OneofSizer, []interface{}{
  759. (*RecognitionAudio_Content)(nil),
  760. (*RecognitionAudio_Uri)(nil),
  761. }
  762. }
  763. func _RecognitionAudio_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
  764. m := msg.(*RecognitionAudio)
  765. // audio_source
  766. switch x := m.AudioSource.(type) {
  767. case *RecognitionAudio_Content:
  768. b.EncodeVarint(1<<3 | proto.WireBytes)
  769. b.EncodeRawBytes(x.Content)
  770. case *RecognitionAudio_Uri:
  771. b.EncodeVarint(2<<3 | proto.WireBytes)
  772. b.EncodeStringBytes(x.Uri)
  773. case nil:
  774. default:
  775. return fmt.Errorf("RecognitionAudio.AudioSource has unexpected type %T", x)
  776. }
  777. return nil
  778. }
  779. func _RecognitionAudio_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
  780. m := msg.(*RecognitionAudio)
  781. switch tag {
  782. case 1: // audio_source.content
  783. if wire != proto.WireBytes {
  784. return true, proto.ErrInternalBadWireType
  785. }
  786. x, err := b.DecodeRawBytes(true)
  787. m.AudioSource = &RecognitionAudio_Content{x}
  788. return true, err
  789. case 2: // audio_source.uri
  790. if wire != proto.WireBytes {
  791. return true, proto.ErrInternalBadWireType
  792. }
  793. x, err := b.DecodeStringBytes()
  794. m.AudioSource = &RecognitionAudio_Uri{x}
  795. return true, err
  796. default:
  797. return false, nil
  798. }
  799. }
  800. func _RecognitionAudio_OneofSizer(msg proto.Message) (n int) {
  801. m := msg.(*RecognitionAudio)
  802. // audio_source
  803. switch x := m.AudioSource.(type) {
  804. case *RecognitionAudio_Content:
  805. n += 1 // tag and wire
  806. n += proto.SizeVarint(uint64(len(x.Content)))
  807. n += len(x.Content)
  808. case *RecognitionAudio_Uri:
  809. n += 1 // tag and wire
  810. n += proto.SizeVarint(uint64(len(x.Uri)))
  811. n += len(x.Uri)
  812. case nil:
  813. default:
  814. panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
  815. }
  816. return n
  817. }
  818. // The only message returned to the client by the `Recognize` method. It
  819. // contains the result as zero or more sequential `SpeechRecognitionResult`
  820. // messages.
  821. type RecognizeResponse struct {
  822. // Output only. Sequential list of transcription results corresponding to
  823. // sequential portions of audio.
  824. Results []*SpeechRecognitionResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"`
  825. XXX_NoUnkeyedLiteral struct{} `json:"-"`
  826. XXX_unrecognized []byte `json:"-"`
  827. XXX_sizecache int32 `json:"-"`
  828. }
  829. func (m *RecognizeResponse) Reset() { *m = RecognizeResponse{} }
  830. func (m *RecognizeResponse) String() string { return proto.CompactTextString(m) }
  831. func (*RecognizeResponse) ProtoMessage() {}
  832. func (*RecognizeResponse) Descriptor() ([]byte, []int) {
  833. return fileDescriptor_cloud_speech_1e3d83e50c51747b, []int{7}
  834. }
  835. func (m *RecognizeResponse) XXX_Unmarshal(b []byte) error {
  836. return xxx_messageInfo_RecognizeResponse.Unmarshal(m, b)
  837. }
  838. func (m *RecognizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
  839. return xxx_messageInfo_RecognizeResponse.Marshal(b, m, deterministic)
  840. }
  841. func (dst *RecognizeResponse) XXX_Merge(src proto.Message) {
  842. xxx_messageInfo_RecognizeResponse.Merge(dst, src)
  843. }
  844. func (m *RecognizeResponse) XXX_Size() int {
  845. return xxx_messageInfo_RecognizeResponse.Size(m)
  846. }
  847. func (m *RecognizeResponse) XXX_DiscardUnknown() {
  848. xxx_messageInfo_RecognizeResponse.DiscardUnknown(m)
  849. }
  850. var xxx_messageInfo_RecognizeResponse proto.InternalMessageInfo
  851. func (m *RecognizeResponse) GetResults() []*SpeechRecognitionResult {
  852. if m != nil {
  853. return m.Results
  854. }
  855. return nil
  856. }
  857. // The only message returned to the client by the `LongRunningRecognize` method.
  858. // It contains the result as zero or more sequential `SpeechRecognitionResult`
  859. // messages. It is included in the `result.response` field of the `Operation`
  860. // returned by the `GetOperation` call of the `google::longrunning::Operations`
  861. // service.
  862. type LongRunningRecognizeResponse struct {
  863. // Output only. Sequential list of transcription results corresponding to
  864. // sequential portions of audio.
  865. Results []*SpeechRecognitionResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"`
  866. XXX_NoUnkeyedLiteral struct{} `json:"-"`
  867. XXX_unrecognized []byte `json:"-"`
  868. XXX_sizecache int32 `json:"-"`
  869. }
  870. func (m *LongRunningRecognizeResponse) Reset() { *m = LongRunningRecognizeResponse{} }
  871. func (m *LongRunningRecognizeResponse) String() string { return proto.CompactTextString(m) }
  872. func (*LongRunningRecognizeResponse) ProtoMessage() {}
  873. func (*LongRunningRecognizeResponse) Descriptor() ([]byte, []int) {
  874. return fileDescriptor_cloud_speech_1e3d83e50c51747b, []int{8}
  875. }
  876. func (m *LongRunningRecognizeResponse) XXX_Unmarshal(b []byte) error {
  877. return xxx_messageInfo_LongRunningRecognizeResponse.Unmarshal(m, b)
  878. }
  879. func (m *LongRunningRecognizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
  880. return xxx_messageInfo_LongRunningRecognizeResponse.Marshal(b, m, deterministic)
  881. }
  882. func (dst *LongRunningRecognizeResponse) XXX_Merge(src proto.Message) {
  883. xxx_messageInfo_LongRunningRecognizeResponse.Merge(dst, src)
  884. }
  885. func (m *LongRunningRecognizeResponse) XXX_Size() int {
  886. return xxx_messageInfo_LongRunningRecognizeResponse.Size(m)
  887. }
  888. func (m *LongRunningRecognizeResponse) XXX_DiscardUnknown() {
  889. xxx_messageInfo_LongRunningRecognizeResponse.DiscardUnknown(m)
  890. }
  891. var xxx_messageInfo_LongRunningRecognizeResponse proto.InternalMessageInfo
  892. func (m *LongRunningRecognizeResponse) GetResults() []*SpeechRecognitionResult {
  893. if m != nil {
  894. return m.Results
  895. }
  896. return nil
  897. }
  898. // Describes the progress of a long-running `LongRunningRecognize` call. It is
  899. // included in the `metadata` field of the `Operation` returned by the
  900. // `GetOperation` call of the `google::longrunning::Operations` service.
  901. type LongRunningRecognizeMetadata struct {
  902. // Approximate percentage of audio processed thus far. Guaranteed to be 100
  903. // when the audio is fully processed and the results are available.
  904. ProgressPercent int32 `protobuf:"varint,1,opt,name=progress_percent,json=progressPercent,proto3" json:"progress_percent,omitempty"`
  905. // Time when the request was received.
  906. StartTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
  907. // Time of the most recent processing update.
  908. LastUpdateTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty"`
  909. XXX_NoUnkeyedLiteral struct{} `json:"-"`
  910. XXX_unrecognized []byte `json:"-"`
  911. XXX_sizecache int32 `json:"-"`
  912. }
  913. func (m *LongRunningRecognizeMetadata) Reset() { *m = LongRunningRecognizeMetadata{} }
  914. func (m *LongRunningRecognizeMetadata) String() string { return proto.CompactTextString(m) }
  915. func (*LongRunningRecognizeMetadata) ProtoMessage() {}
  916. func (*LongRunningRecognizeMetadata) Descriptor() ([]byte, []int) {
  917. return fileDescriptor_cloud_speech_1e3d83e50c51747b, []int{9}
  918. }
  919. func (m *LongRunningRecognizeMetadata) XXX_Unmarshal(b []byte) error {
  920. return xxx_messageInfo_LongRunningRecognizeMetadata.Unmarshal(m, b)
  921. }
  922. func (m *LongRunningRecognizeMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
  923. return xxx_messageInfo_LongRunningRecognizeMetadata.Marshal(b, m, deterministic)
  924. }
  925. func (dst *LongRunningRecognizeMetadata) XXX_Merge(src proto.Message) {
  926. xxx_messageInfo_LongRunningRecognizeMetadata.Merge(dst, src)
  927. }
  928. func (m *LongRunningRecognizeMetadata) XXX_Size() int {
  929. return xxx_messageInfo_LongRunningRecognizeMetadata.Size(m)
  930. }
  931. func (m *LongRunningRecognizeMetadata) XXX_DiscardUnknown() {
  932. xxx_messageInfo_LongRunningRecognizeMetadata.DiscardUnknown(m)
  933. }
  934. var xxx_messageInfo_LongRunningRecognizeMetadata proto.InternalMessageInfo
  935. func (m *LongRunningRecognizeMetadata) GetProgressPercent() int32 {
  936. if m != nil {
  937. return m.ProgressPercent
  938. }
  939. return 0
  940. }
  941. func (m *LongRunningRecognizeMetadata) GetStartTime() *timestamp.Timestamp {
  942. if m != nil {
  943. return m.StartTime
  944. }
  945. return nil
  946. }
  947. func (m *LongRunningRecognizeMetadata) GetLastUpdateTime() *timestamp.Timestamp {
  948. if m != nil {
  949. return m.LastUpdateTime
  950. }
  951. return nil
  952. }
  953. // `StreamingRecognizeResponse` is the only message returned to the client by
  954. // `StreamingRecognize`. A series of zero or more `StreamingRecognizeResponse`
  955. // messages are streamed back to the client. If there is no recognizable
  956. // audio, and `single_utterance` is set to false, then no messages are streamed
  957. // back to the client.
  958. //
  959. // Here's an example of a series of ten `StreamingRecognizeResponse`s that might
  960. // be returned while processing audio:
  961. //
  962. // 1. results { alternatives { transcript: "tube" } stability: 0.01 }
  963. //
  964. // 2. results { alternatives { transcript: "to be a" } stability: 0.01 }
  965. //
  966. // 3. results { alternatives { transcript: "to be" } stability: 0.9 }
  967. // results { alternatives { transcript: " or not to be" } stability: 0.01 }
  968. //
  969. // 4. results { alternatives { transcript: "to be or not to be"
  970. // confidence: 0.92 }
  971. // alternatives { transcript: "to bee or not to bee" }
  972. // is_final: true }
  973. //
  974. // 5. results { alternatives { transcript: " that's" } stability: 0.01 }
  975. //
  976. // 6. results { alternatives { transcript: " that is" } stability: 0.9 }
  977. // results { alternatives { transcript: " the question" } stability: 0.01 }
  978. //
  979. // 7. results { alternatives { transcript: " that is the question"
  980. // confidence: 0.98 }
  981. // alternatives { transcript: " that was the question" }
  982. // is_final: true }
  983. //
  984. // Notes:
  985. //
  986. // - Only two of the above responses #4 and #7 contain final results; they are
  987. // indicated by `is_final: true`. Concatenating these together generates the
  988. // full transcript: "to be or not to be that is the question".
  989. //
  990. // - The others contain interim `results`. #3 and #6 contain two interim
  991. // `results`: the first portion has a high stability and is less likely to
  992. // change; the second portion has a low stability and is very likely to
  993. // change. A UI designer might choose to show only high stability `results`.
  994. //
  995. // - The specific `stability` and `confidence` values shown above are only for
  996. // illustrative purposes. Actual values may vary.
  997. //
  998. // - In each response, only one of these fields will be set:
  999. // `error`,
  1000. // `speech_event_type`, or
  1001. // one or more (repeated) `results`.
  1002. type StreamingRecognizeResponse struct {
  1003. // Output only. If set, returns a [google.rpc.Status][google.rpc.Status]
  1004. // message that specifies the error for the operation.
  1005. Error *status.Status `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
  1006. // Output only. This repeated list contains zero or more results that
  1007. // correspond to consecutive portions of the audio currently being processed.
  1008. // It contains zero or one `is_final=true` result (the newly settled portion),
  1009. // followed by zero or more `is_final=false` results (the interim results).
  1010. Results []*StreamingRecognitionResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"`
  1011. // Output only. Indicates the type of speech event.
  1012. SpeechEventType StreamingRecognizeResponse_SpeechEventType `protobuf:"varint,4,opt,name=speech_event_type,json=speechEventType,proto3,enum=google.cloud.speech.v1.StreamingRecognizeResponse_SpeechEventType" json:"speech_event_type,omitempty"`
  1013. XXX_NoUnkeyedLiteral struct{} `json:"-"`
  1014. XXX_unrecognized []byte `json:"-"`
  1015. XXX_sizecache int32 `json:"-"`
  1016. }
  1017. func (m *StreamingRecognizeResponse) Reset() { *m = StreamingRecognizeResponse{} }
  1018. func (m *StreamingRecognizeResponse) String() string { return proto.CompactTextString(m) }
  1019. func (*StreamingRecognizeResponse) ProtoMessage() {}
  1020. func (*StreamingRecognizeResponse) Descriptor() ([]byte, []int) {
  1021. return fileDescriptor_cloud_speech_1e3d83e50c51747b, []int{10}
  1022. }
  1023. func (m *StreamingRecognizeResponse) XXX_Unmarshal(b []byte) error {
  1024. return xxx_messageInfo_StreamingRecognizeResponse.Unmarshal(m, b)
  1025. }
  1026. func (m *StreamingRecognizeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
  1027. return xxx_messageInfo_StreamingRecognizeResponse.Marshal(b, m, deterministic)
  1028. }
  1029. func (dst *StreamingRecognizeResponse) XXX_Merge(src proto.Message) {
  1030. xxx_messageInfo_StreamingRecognizeResponse.Merge(dst, src)
  1031. }
  1032. func (m *StreamingRecognizeResponse) XXX_Size() int {
  1033. return xxx_messageInfo_StreamingRecognizeResponse.Size(m)
  1034. }
  1035. func (m *StreamingRecognizeResponse) XXX_DiscardUnknown() {
  1036. xxx_messageInfo_StreamingRecognizeResponse.DiscardUnknown(m)
  1037. }
  1038. var xxx_messageInfo_StreamingRecognizeResponse proto.InternalMessageInfo
  1039. func (m *StreamingRecognizeResponse) GetError() *status.Status {
  1040. if m != nil {
  1041. return m.Error
  1042. }
  1043. return nil
  1044. }
  1045. func (m *StreamingRecognizeResponse) GetResults() []*StreamingRecognitionResult {
  1046. if m != nil {
  1047. return m.Results
  1048. }
  1049. return nil
  1050. }
  1051. func (m *StreamingRecognizeResponse) GetSpeechEventType() StreamingRecognizeResponse_SpeechEventType {
  1052. if m != nil {
  1053. return m.SpeechEventType
  1054. }
  1055. return StreamingRecognizeResponse_SPEECH_EVENT_UNSPECIFIED
  1056. }
  1057. // A streaming speech recognition result corresponding to a portion of the audio
  1058. // that is currently being processed.
  1059. type StreamingRecognitionResult struct {
  1060. // Output only. May contain one or more recognition hypotheses (up to the
  1061. // maximum specified in `max_alternatives`).
  1062. // These alternatives are ordered in terms of accuracy, with the top (first)
  1063. // alternative being the most probable, as ranked by the recognizer.
  1064. Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives,proto3" json:"alternatives,omitempty"`
  1065. // Output only. If `false`, this `StreamingRecognitionResult` represents an
  1066. // interim result that may change. If `true`, this is the final time the
  1067. // speech service will return this particular `StreamingRecognitionResult`,
  1068. // the recognizer will not return any further hypotheses for this portion of
  1069. // the transcript and corresponding audio.
  1070. IsFinal bool `protobuf:"varint,2,opt,name=is_final,json=isFinal,proto3" json:"is_final,omitempty"`
  1071. // Output only. An estimate of the likelihood that the recognizer will not
  1072. // change its guess about this interim result. Values range from 0.0
  1073. // (completely unstable) to 1.0 (completely stable).
  1074. // This field is only provided for interim results (`is_final=false`).
  1075. // The default of 0.0 is a sentinel value indicating `stability` was not set.
  1076. Stability float32 `protobuf:"fixed32,3,opt,name=stability,proto3" json:"stability,omitempty"`
  1077. // For multi-channel audio, this is the channel number corresponding to the
  1078. // recognized result for the audio from that channel.
  1079. // For audio_channel_count = N, its output values can range from '1' to 'N'.
  1080. ChannelTag int32 `protobuf:"varint,5,opt,name=channel_tag,json=channelTag,proto3" json:"channel_tag,omitempty"`
  1081. XXX_NoUnkeyedLiteral struct{} `json:"-"`
  1082. XXX_unrecognized []byte `json:"-"`
  1083. XXX_sizecache int32 `json:"-"`
  1084. }
  1085. func (m *StreamingRecognitionResult) Reset() { *m = StreamingRecognitionResult{} }
  1086. func (m *StreamingRecognitionResult) String() string { return proto.CompactTextString(m) }
  1087. func (*StreamingRecognitionResult) ProtoMessage() {}
  1088. func (*StreamingRecognitionResult) Descriptor() ([]byte, []int) {
  1089. return fileDescriptor_cloud_speech_1e3d83e50c51747b, []int{11}
  1090. }
  1091. func (m *StreamingRecognitionResult) XXX_Unmarshal(b []byte) error {
  1092. return xxx_messageInfo_StreamingRecognitionResult.Unmarshal(m, b)
  1093. }
  1094. func (m *StreamingRecognitionResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
  1095. return xxx_messageInfo_StreamingRecognitionResult.Marshal(b, m, deterministic)
  1096. }
  1097. func (dst *StreamingRecognitionResult) XXX_Merge(src proto.Message) {
  1098. xxx_messageInfo_StreamingRecognitionResult.Merge(dst, src)
  1099. }
  1100. func (m *StreamingRecognitionResult) XXX_Size() int {
  1101. return xxx_messageInfo_StreamingRecognitionResult.Size(m)
  1102. }
  1103. func (m *StreamingRecognitionResult) XXX_DiscardUnknown() {
  1104. xxx_messageInfo_StreamingRecognitionResult.DiscardUnknown(m)
  1105. }
  1106. var xxx_messageInfo_StreamingRecognitionResult proto.InternalMessageInfo
  1107. func (m *StreamingRecognitionResult) GetAlternatives() []*SpeechRecognitionAlternative {
  1108. if m != nil {
  1109. return m.Alternatives
  1110. }
  1111. return nil
  1112. }
  1113. func (m *StreamingRecognitionResult) GetIsFinal() bool {
  1114. if m != nil {
  1115. return m.IsFinal
  1116. }
  1117. return false
  1118. }
  1119. func (m *StreamingRecognitionResult) GetStability() float32 {
  1120. if m != nil {
  1121. return m.Stability
  1122. }
  1123. return 0
  1124. }
  1125. func (m *StreamingRecognitionResult) GetChannelTag() int32 {
  1126. if m != nil {
  1127. return m.ChannelTag
  1128. }
  1129. return 0
  1130. }
  1131. // A speech recognition result corresponding to a portion of the audio.
  1132. type SpeechRecognitionResult struct {
  1133. // Output only. May contain one or more recognition hypotheses (up to the
  1134. // maximum specified in `max_alternatives`).
  1135. // These alternatives are ordered in terms of accuracy, with the top (first)
  1136. // alternative being the most probable, as ranked by the recognizer.
  1137. Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives,proto3" json:"alternatives,omitempty"`
  1138. // For multi-channel audio, this is the channel number corresponding to the
  1139. // recognized result for the audio from that channel.
  1140. // For audio_channel_count = N, its output values can range from '1' to 'N'.
  1141. ChannelTag int32 `protobuf:"varint,2,opt,name=channel_tag,json=channelTag,proto3" json:"channel_tag,omitempty"`
  1142. XXX_NoUnkeyedLiteral struct{} `json:"-"`
  1143. XXX_unrecognized []byte `json:"-"`
  1144. XXX_sizecache int32 `json:"-"`
  1145. }
  1146. func (m *SpeechRecognitionResult) Reset() { *m = SpeechRecognitionResult{} }
  1147. func (m *SpeechRecognitionResult) String() string { return proto.CompactTextString(m) }
  1148. func (*SpeechRecognitionResult) ProtoMessage() {}
  1149. func (*SpeechRecognitionResult) Descriptor() ([]byte, []int) {
  1150. return fileDescriptor_cloud_speech_1e3d83e50c51747b, []int{12}
  1151. }
  1152. func (m *SpeechRecognitionResult) XXX_Unmarshal(b []byte) error {
  1153. return xxx_messageInfo_SpeechRecognitionResult.Unmarshal(m, b)
  1154. }
  1155. func (m *SpeechRecognitionResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
  1156. return xxx_messageInfo_SpeechRecognitionResult.Marshal(b, m, deterministic)
  1157. }
  1158. func (dst *SpeechRecognitionResult) XXX_Merge(src proto.Message) {
  1159. xxx_messageInfo_SpeechRecognitionResult.Merge(dst, src)
  1160. }
  1161. func (m *SpeechRecognitionResult) XXX_Size() int {
  1162. return xxx_messageInfo_SpeechRecognitionResult.Size(m)
  1163. }
  1164. func (m *SpeechRecognitionResult) XXX_DiscardUnknown() {
  1165. xxx_messageInfo_SpeechRecognitionResult.DiscardUnknown(m)
  1166. }
  1167. var xxx_messageInfo_SpeechRecognitionResult proto.InternalMessageInfo
  1168. func (m *SpeechRecognitionResult) GetAlternatives() []*SpeechRecognitionAlternative {
  1169. if m != nil {
  1170. return m.Alternatives
  1171. }
  1172. return nil
  1173. }
  1174. func (m *SpeechRecognitionResult) GetChannelTag() int32 {
  1175. if m != nil {
  1176. return m.ChannelTag
  1177. }
  1178. return 0
  1179. }
  1180. // Alternative hypotheses (a.k.a. n-best list).
  1181. type SpeechRecognitionAlternative struct {
  1182. // Output only. Transcript text representing the words that the user spoke.
  1183. Transcript string `protobuf:"bytes,1,opt,name=transcript,proto3" json:"transcript,omitempty"`
  1184. // Output only. The confidence estimate between 0.0 and 1.0. A higher number
  1185. // indicates an estimated greater likelihood that the recognized words are
  1186. // correct. This field is set only for the top alternative of a non-streaming
  1187. // result or, of a streaming result where `is_final=true`.
  1188. // This field is not guaranteed to be accurate and users should not rely on it
  1189. // to be always provided.
  1190. // The default of 0.0 is a sentinel value indicating `confidence` was not set.
  1191. Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
  1192. // Output only. A list of word-specific information for each recognized word.
  1193. // Note: When `enable_speaker_diarization` is true, you will see all the words
  1194. // from the beginning of the audio.
  1195. Words []*WordInfo `protobuf:"bytes,3,rep,name=words,proto3" json:"words,omitempty"`
  1196. XXX_NoUnkeyedLiteral struct{} `json:"-"`
  1197. XXX_unrecognized []byte `json:"-"`
  1198. XXX_sizecache int32 `json:"-"`
  1199. }
  1200. func (m *SpeechRecognitionAlternative) Reset() { *m = SpeechRecognitionAlternative{} }
  1201. func (m *SpeechRecognitionAlternative) String() string { return proto.CompactTextString(m) }
  1202. func (*SpeechRecognitionAlternative) ProtoMessage() {}
  1203. func (*SpeechRecognitionAlternative) Descriptor() ([]byte, []int) {
  1204. return fileDescriptor_cloud_speech_1e3d83e50c51747b, []int{13}
  1205. }
  1206. func (m *SpeechRecognitionAlternative) XXX_Unmarshal(b []byte) error {
  1207. return xxx_messageInfo_SpeechRecognitionAlternative.Unmarshal(m, b)
  1208. }
  1209. func (m *SpeechRecognitionAlternative) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
  1210. return xxx_messageInfo_SpeechRecognitionAlternative.Marshal(b, m, deterministic)
  1211. }
  1212. func (dst *SpeechRecognitionAlternative) XXX_Merge(src proto.Message) {
  1213. xxx_messageInfo_SpeechRecognitionAlternative.Merge(dst, src)
  1214. }
  1215. func (m *SpeechRecognitionAlternative) XXX_Size() int {
  1216. return xxx_messageInfo_SpeechRecognitionAlternative.Size(m)
  1217. }
  1218. func (m *SpeechRecognitionAlternative) XXX_DiscardUnknown() {
  1219. xxx_messageInfo_SpeechRecognitionAlternative.DiscardUnknown(m)
  1220. }
  1221. var xxx_messageInfo_SpeechRecognitionAlternative proto.InternalMessageInfo
  1222. func (m *SpeechRecognitionAlternative) GetTranscript() string {
  1223. if m != nil {
  1224. return m.Transcript
  1225. }
  1226. return ""
  1227. }
  1228. func (m *SpeechRecognitionAlternative) GetConfidence() float32 {
  1229. if m != nil {
  1230. return m.Confidence
  1231. }
  1232. return 0
  1233. }
  1234. func (m *SpeechRecognitionAlternative) GetWords() []*WordInfo {
  1235. if m != nil {
  1236. return m.Words
  1237. }
  1238. return nil
  1239. }
  1240. // Word-specific information for recognized words.
  1241. type WordInfo struct {
  1242. // Output only. Time offset relative to the beginning of the audio,
  1243. // and corresponding to the start of the spoken word.
  1244. // This field is only set if `enable_word_time_offsets=true` and only
  1245. // in the top hypothesis.
  1246. // This is an experimental feature and the accuracy of the time offset can
  1247. // vary.
  1248. StartTime *duration.Duration `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
  1249. // Output only. Time offset relative to the beginning of the audio,
  1250. // and corresponding to the end of the spoken word.
  1251. // This field is only set if `enable_word_time_offsets=true` and only
  1252. // in the top hypothesis.
  1253. // This is an experimental feature and the accuracy of the time offset can
  1254. // vary.
  1255. EndTime *duration.Duration `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
  1256. // Output only. The word corresponding to this set of information.
  1257. Word string `protobuf:"bytes,3,opt,name=word,proto3" json:"word,omitempty"`
  1258. XXX_NoUnkeyedLiteral struct{} `json:"-"`
  1259. XXX_unrecognized []byte `json:"-"`
  1260. XXX_sizecache int32 `json:"-"`
  1261. }
  1262. func (m *WordInfo) Reset() { *m = WordInfo{} }
  1263. func (m *WordInfo) String() string { return proto.CompactTextString(m) }
  1264. func (*WordInfo) ProtoMessage() {}
  1265. func (*WordInfo) Descriptor() ([]byte, []int) {
  1266. return fileDescriptor_cloud_speech_1e3d83e50c51747b, []int{14}
  1267. }
  1268. func (m *WordInfo) XXX_Unmarshal(b []byte) error {
  1269. return xxx_messageInfo_WordInfo.Unmarshal(m, b)
  1270. }
  1271. func (m *WordInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
  1272. return xxx_messageInfo_WordInfo.Marshal(b, m, deterministic)
  1273. }
  1274. func (dst *WordInfo) XXX_Merge(src proto.Message) {
  1275. xxx_messageInfo_WordInfo.Merge(dst, src)
  1276. }
  1277. func (m *WordInfo) XXX_Size() int {
  1278. return xxx_messageInfo_WordInfo.Size(m)
  1279. }
  1280. func (m *WordInfo) XXX_DiscardUnknown() {
  1281. xxx_messageInfo_WordInfo.DiscardUnknown(m)
  1282. }
  1283. var xxx_messageInfo_WordInfo proto.InternalMessageInfo
  1284. func (m *WordInfo) GetStartTime() *duration.Duration {
  1285. if m != nil {
  1286. return m.StartTime
  1287. }
  1288. return nil
  1289. }
  1290. func (m *WordInfo) GetEndTime() *duration.Duration {
  1291. if m != nil {
  1292. return m.EndTime
  1293. }
  1294. return nil
  1295. }
  1296. func (m *WordInfo) GetWord() string {
  1297. if m != nil {
  1298. return m.Word
  1299. }
  1300. return ""
  1301. }
  1302. func init() {
  1303. proto.RegisterType((*RecognizeRequest)(nil), "google.cloud.speech.v1.RecognizeRequest")
  1304. proto.RegisterType((*LongRunningRecognizeRequest)(nil), "google.cloud.speech.v1.LongRunningRecognizeRequest")
  1305. proto.RegisterType((*StreamingRecognizeRequest)(nil), "google.cloud.speech.v1.StreamingRecognizeRequest")
  1306. proto.RegisterType((*StreamingRecognitionConfig)(nil), "google.cloud.speech.v1.StreamingRecognitionConfig")
  1307. proto.RegisterType((*RecognitionConfig)(nil), "google.cloud.speech.v1.RecognitionConfig")
  1308. proto.RegisterType((*SpeechContext)(nil), "google.cloud.speech.v1.SpeechContext")
  1309. proto.RegisterType((*RecognitionAudio)(nil), "google.cloud.speech.v1.RecognitionAudio")
  1310. proto.RegisterType((*RecognizeResponse)(nil), "google.cloud.speech.v1.RecognizeResponse")
  1311. proto.RegisterType((*LongRunningRecognizeResponse)(nil), "google.cloud.speech.v1.LongRunningRecognizeResponse")
  1312. proto.RegisterType((*LongRunningRecognizeMetadata)(nil), "google.cloud.speech.v1.LongRunningRecognizeMetadata")
  1313. proto.RegisterType((*StreamingRecognizeResponse)(nil), "google.cloud.speech.v1.StreamingRecognizeResponse")
  1314. proto.RegisterType((*StreamingRecognitionResult)(nil), "google.cloud.speech.v1.StreamingRecognitionResult")
  1315. proto.RegisterType((*SpeechRecognitionResult)(nil), "google.cloud.speech.v1.SpeechRecognitionResult")
  1316. proto.RegisterType((*SpeechRecognitionAlternative)(nil), "google.cloud.speech.v1.SpeechRecognitionAlternative")
  1317. proto.RegisterType((*WordInfo)(nil), "google.cloud.speech.v1.WordInfo")
  1318. proto.RegisterEnum("google.cloud.speech.v1.RecognitionConfig_AudioEncoding", RecognitionConfig_AudioEncoding_name, RecognitionConfig_AudioEncoding_value)
  1319. proto.RegisterEnum("google.cloud.speech.v1.StreamingRecognizeResponse_SpeechEventType", StreamingRecognizeResponse_SpeechEventType_name, StreamingRecognizeResponse_SpeechEventType_value)
  1320. }
  1321. // Reference imports to suppress errors if they are not otherwise used.
  1322. var _ context.Context
  1323. var _ grpc.ClientConn
  1324. // This is a compile-time assertion to ensure that this generated file
  1325. // is compatible with the grpc package it is being compiled against.
  1326. const _ = grpc.SupportPackageIsVersion4
  1327. // SpeechClient is the client API for Speech service.
  1328. //
  1329. // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
  1330. type SpeechClient interface {
  1331. // Performs synchronous speech recognition: receive results after all audio
  1332. // has been sent and processed.
  1333. Recognize(ctx context.Context, in *RecognizeRequest, opts ...grpc.CallOption) (*RecognizeResponse, error)
  1334. // Performs asynchronous speech recognition: receive results via the
  1335. // google.longrunning.Operations interface. Returns either an
  1336. // `Operation.error` or an `Operation.response` which contains
  1337. // a `LongRunningRecognizeResponse` message.
  1338. LongRunningRecognize(ctx context.Context, in *LongRunningRecognizeRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
  1339. // Performs bidirectional streaming speech recognition: receive results while
  1340. // sending audio. This method is only available via the gRPC API (not REST).
  1341. StreamingRecognize(ctx context.Context, opts ...grpc.CallOption) (Speech_StreamingRecognizeClient, error)
  1342. }
  1343. type speechClient struct {
  1344. cc *grpc.ClientConn
  1345. }
  1346. func NewSpeechClient(cc *grpc.ClientConn) SpeechClient {
  1347. return &speechClient{cc}
  1348. }
  1349. func (c *speechClient) Recognize(ctx context.Context, in *RecognizeRequest, opts ...grpc.CallOption) (*RecognizeResponse, error) {
  1350. out := new(RecognizeResponse)
  1351. err := c.cc.Invoke(ctx, "/google.cloud.speech.v1.Speech/Recognize", in, out, opts...)
  1352. if err != nil {
  1353. return nil, err
  1354. }
  1355. return out, nil
  1356. }
  1357. func (c *speechClient) LongRunningRecognize(ctx context.Context, in *LongRunningRecognizeRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
  1358. out := new(longrunning.Operation)
  1359. err := c.cc.Invoke(ctx, "/google.cloud.speech.v1.Speech/LongRunningRecognize", in, out, opts...)
  1360. if err != nil {
  1361. return nil, err
  1362. }
  1363. return out, nil
  1364. }
  1365. func (c *speechClient) StreamingRecognize(ctx context.Context, opts ...grpc.CallOption) (Speech_StreamingRecognizeClient, error) {
  1366. stream, err := c.cc.NewStream(ctx, &_Speech_serviceDesc.Streams[0], "/google.cloud.speech.v1.Speech/StreamingRecognize", opts...)
  1367. if err != nil {
  1368. return nil, err
  1369. }
  1370. x := &speechStreamingRecognizeClient{stream}
  1371. return x, nil
  1372. }
  1373. type Speech_StreamingRecognizeClient interface {
  1374. Send(*StreamingRecognizeRequest) error
  1375. Recv() (*StreamingRecognizeResponse, error)
  1376. grpc.ClientStream
  1377. }
  1378. type speechStreamingRecognizeClient struct {
  1379. grpc.ClientStream
  1380. }
  1381. func (x *speechStreamingRecognizeClient) Send(m *StreamingRecognizeRequest) error {
  1382. return x.ClientStream.SendMsg(m)
  1383. }
  1384. func (x *speechStreamingRecognizeClient) Recv() (*StreamingRecognizeResponse, error) {
  1385. m := new(StreamingRecognizeResponse)
  1386. if err := x.ClientStream.RecvMsg(m); err != nil {
  1387. return nil, err
  1388. }
  1389. return m, nil
  1390. }
  1391. // SpeechServer is the server API for Speech service.
  1392. type SpeechServer interface {
  1393. // Performs synchronous speech recognition: receive results after all audio
  1394. // has been sent and processed.
  1395. Recognize(context.Context, *RecognizeRequest) (*RecognizeResponse, error)
  1396. // Performs asynchronous speech recognition: receive results via the
  1397. // google.longrunning.Operations interface. Returns either an
  1398. // `Operation.error` or an `Operation.response` which contains
  1399. // a `LongRunningRecognizeResponse` message.
  1400. LongRunningRecognize(context.Context, *LongRunningRecognizeRequest) (*longrunning.Operation, error)
  1401. // Performs bidirectional streaming speech recognition: receive results while
  1402. // sending audio. This method is only available via the gRPC API (not REST).
  1403. StreamingRecognize(Speech_StreamingRecognizeServer) error
  1404. }
  1405. func RegisterSpeechServer(s *grpc.Server, srv SpeechServer) {
  1406. s.RegisterService(&_Speech_serviceDesc, srv)
  1407. }
  1408. func _Speech_Recognize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
  1409. in := new(RecognizeRequest)
  1410. if err := dec(in); err != nil {
  1411. return nil, err
  1412. }
  1413. if interceptor == nil {
  1414. return srv.(SpeechServer).Recognize(ctx, in)
  1415. }
  1416. info := &grpc.UnaryServerInfo{
  1417. Server: srv,
  1418. FullMethod: "/google.cloud.speech.v1.Speech/Recognize",
  1419. }
  1420. handler := func(ctx context.Context, req interface{}) (interface{}, error) {
  1421. return srv.(SpeechServer).Recognize(ctx, req.(*RecognizeRequest))
  1422. }
  1423. return interceptor(ctx, in, info, handler)
  1424. }
  1425. func _Speech_LongRunningRecognize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
  1426. in := new(LongRunningRecognizeRequest)
  1427. if err := dec(in); err != nil {
  1428. return nil, err
  1429. }
  1430. if interceptor == nil {
  1431. return srv.(SpeechServer).LongRunningRecognize(ctx, in)
  1432. }
  1433. info := &grpc.UnaryServerInfo{
  1434. Server: srv,
  1435. FullMethod: "/google.cloud.speech.v1.Speech/LongRunningRecognize",
  1436. }
  1437. handler := func(ctx context.Context, req interface{}) (interface{}, error) {
  1438. return srv.(SpeechServer).LongRunningRecognize(ctx, req.(*LongRunningRecognizeRequest))
  1439. }
  1440. return interceptor(ctx, in, info, handler)
  1441. }
  1442. func _Speech_StreamingRecognize_Handler(srv interface{}, stream grpc.ServerStream) error {
  1443. return srv.(SpeechServer).StreamingRecognize(&speechStreamingRecognizeServer{stream})
  1444. }
  1445. type Speech_StreamingRecognizeServer interface {
  1446. Send(*StreamingRecognizeResponse) error
  1447. Recv() (*StreamingRecognizeRequest, error)
  1448. grpc.ServerStream
  1449. }
  1450. type speechStreamingRecognizeServer struct {
  1451. grpc.ServerStream
  1452. }
  1453. func (x *speechStreamingRecognizeServer) Send(m *StreamingRecognizeResponse) error {
  1454. return x.ServerStream.SendMsg(m)
  1455. }
  1456. func (x *speechStreamingRecognizeServer) Recv() (*StreamingRecognizeRequest, error) {
  1457. m := new(StreamingRecognizeRequest)
  1458. if err := x.ServerStream.RecvMsg(m); err != nil {
  1459. return nil, err
  1460. }
  1461. return m, nil
  1462. }
  1463. var _Speech_serviceDesc = grpc.ServiceDesc{
  1464. ServiceName: "google.cloud.speech.v1.Speech",
  1465. HandlerType: (*SpeechServer)(nil),
  1466. Methods: []grpc.MethodDesc{
  1467. {
  1468. MethodName: "Recognize",
  1469. Handler: _Speech_Recognize_Handler,
  1470. },
  1471. {
  1472. MethodName: "LongRunningRecognize",
  1473. Handler: _Speech_LongRunningRecognize_Handler,
  1474. },
  1475. },
  1476. Streams: []grpc.StreamDesc{
  1477. {
  1478. StreamName: "StreamingRecognize",
  1479. Handler: _Speech_StreamingRecognize_Handler,
  1480. ServerStreams: true,
  1481. ClientStreams: true,
  1482. },
  1483. },
  1484. Metadata: "google/cloud/speech/v1/cloud_speech.proto",
  1485. }
  1486. func init() {
  1487. proto.RegisterFile("google/cloud/speech/v1/cloud_speech.proto", fileDescriptor_cloud_speech_1e3d83e50c51747b)
  1488. }
  1489. var fileDescriptor_cloud_speech_1e3d83e50c51747b = []byte{
  1490. // 1479 bytes of a gzipped FileDescriptorProto
  1491. 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0x4f, 0x53, 0x1b, 0xc9,
  1492. 0x15, 0x67, 0x24, 0x84, 0xe0, 0xf1, 0x4f, 0xb4, 0x89, 0x3d, 0xc8, 0xc4, 0x90, 0x71, 0x5c, 0x06,
  1493. 0x1f, 0xa4, 0x80, 0x5d, 0x76, 0xe2, 0xa4, 0x52, 0x11, 0x62, 0x00, 0x55, 0x81, 0x50, 0xb5, 0x44,
  1494. 0x70, 0x72, 0xc8, 0x54, 0x33, 0x6a, 0x0d, 0x53, 0x25, 0xf5, 0x4c, 0xa6, 0x7b, 0x88, 0xf1, 0xcd,
  1495. 0xb9, 0xa6, 0x2a, 0x97, 0x24, 0x3e, 0xe7, 0x96, 0xca, 0x79, 0x2f, 0xfb, 0x19, 0xf6, 0xb4, 0xb5,
  1496. 0xfb, 0x15, 0xf6, 0x43, 0xec, 0x71, 0xab, 0xbb, 0x67, 0x84, 0x24, 0x10, 0xc6, 0x55, 0xde, 0xaa,
  1497. 0xbd, 0xa9, 0xdf, 0xfb, 0xbd, 0x37, 0xbf, 0x7e, 0xdd, 0xef, 0xf7, 0x5a, 0xb0, 0xe9, 0x05, 0x81,
  1498. 0xd7, 0xa5, 0x65, 0xb7, 0x1b, 0xc4, 0xed, 0x32, 0x0f, 0x29, 0x75, 0xcf, 0xcb, 0x17, 0x5b, 0x7a,
  1499. 0xed, 0xe8, 0x75, 0x29, 0x8c, 0x02, 0x11, 0xa0, 0xfb, 0x1a, 0x5a, 0x52, 0xae, 0x52, 0xe2, 0xba,
  1500. 0xd8, 0x2a, 0xae, 0x26, 0x29, 0x48, 0xe8, 0x97, 0x09, 0x63, 0x81, 0x20, 0xc2, 0x0f, 0x18, 0xd7,
  1501. 0x51, 0xc5, 0xc7, 0x89, 0xb7, 0x1b, 0x30, 0x2f, 0x8a, 0x19, 0xf3, 0x99, 0x57, 0x0e, 0x42, 0x1a,
  1502. 0x0d, 0x81, 0x56, 0x12, 0x90, 0x5a, 0x9d, 0xc5, 0x9d, 0x32, 0x61, 0x97, 0x89, 0xeb, 0xd1, 0xa8,
  1503. 0xab, 0x1d, 0xeb, 0xd8, 0xc4, 0xff, 0x70, 0xd4, 0x4f, 0x7b, 0xa1, 0x48, 0x83, 0xd7, 0x46, 0x9d,
  1504. 0xc2, 0xef, 0x51, 0x2e, 0x48, 0x2f, 0x4c, 0x00, 0x0f, 0x12, 0x40, 0x14, 0xba, 0x65, 0x2e, 0x88,
  1505. 0x88, 0x13, 0x46, 0xd6, 0x7f, 0x0c, 0x28, 0x60, 0xea, 0x06, 0x1e, 0xf3, 0xdf, 0x51, 0x4c, 0xff,
  1506. 0x1a, 0x53, 0x2e, 0x50, 0x05, 0xa6, 0xdc, 0x80, 0x75, 0x7c, 0xcf, 0x34, 0xd6, 0x8d, 0x8d, 0xd9,
  1507. 0xed, 0xcd, 0xd2, 0xcd, 0x25, 0x29, 0x25, 0x91, 0x92, 0x66, 0x55, 0x05, 0xe0, 0x24, 0x10, 0xfd,
  1508. 0x1e, 0x72, 0x24, 0x6e, 0xfb, 0x81, 0x99, 0x51, 0x19, 0x36, 0xee, 0x90, 0xa1, 0x22, 0xf1, 0x58,
  1509. 0x87, 0x59, 0xff, 0x35, 0xe0, 0xe1, 0x61, 0xc0, 0x3c, 0xac, 0x4b, 0xf9, 0x53, 0xa4, 0xf8, 0xa5,
  1510. 0x01, 0x2b, 0x4d, 0x11, 0x51, 0xd2, 0xbb, 0x89, 0xa0, 0x03, 0x05, 0x9e, 0x3a, 0x9d, 0x21, 0xaa,
  1511. 0xdb, 0xe3, 0x3e, 0x34, 0x9a, 0xec, 0x8a, 0xf3, 0xc1, 0x04, 0x5e, 0xec, 0x67, 0xd3, 0x26, 0xf4,
  1512. 0x04, 0xe6, 0x15, 0x0f, 0x99, 0x5c, 0x50, 0x26, 0xd4, 0x36, 0xe6, 0x0e, 0x26, 0xf0, 0x9c, 0x32,
  1513. 0x57, 0xb5, 0x75, 0xe7, 0x1e, 0x2c, 0x5d, 0xf1, 0x88, 0x34, 0x39, 0xeb, 0x0b, 0x03, 0x8a, 0xe3,
  1514. 0xbf, 0xf6, 0x39, 0x8a, 0xbb, 0x09, 0x05, 0xee, 0x33, 0xaf, 0x4b, 0x9d, 0x58, 0x08, 0x1a, 0x11,
  1515. 0xe6, 0x52, 0x45, 0x70, 0x1a, 0x2f, 0x6a, 0xfb, 0x49, 0x6a, 0x46, 0x4f, 0x61, 0xd1, 0x67, 0x82,
  1516. 0x46, 0x7e, 0xcf, 0x89, 0x28, 0x8f, 0xbb, 0x82, 0x9b, 0x59, 0x85, 0x5c, 0x48, 0xcc, 0x58, 0x5b,
  1517. 0xad, 0xff, 0x4d, 0xc1, 0xd2, 0x75, 0xb2, 0x4d, 0x98, 0xa6, 0xcc, 0x0d, 0xda, 0x3e, 0xd3, 0x74,
  1518. 0x17, 0xb6, 0x5f, 0xdd, 0x99, 0x6e, 0x49, 0x1d, 0xa8, 0x9d, 0x84, 0xe3, 0x7e, 0x22, 0xf4, 0x0c,
  1519. 0x96, 0x38, 0xe9, 0x85, 0x5d, 0xea, 0x44, 0x44, 0x50, 0xe7, 0x9c, 0x46, 0xe2, 0x9d, 0xe2, 0x9f,
  1520. 0xc3, 0x8b, 0xda, 0x81, 0x89, 0xa0, 0x07, 0xd2, 0x8c, 0x4a, 0x70, 0x2f, 0x39, 0x88, 0x73, 0xc2,
  1521. 0x18, 0xed, 0x3a, 0x6e, 0x10, 0x33, 0x61, 0xe6, 0x15, 0x7a, 0x49, 0x1f, 0x86, 0xf6, 0x54, 0xa5,
  1522. 0x03, 0xb5, 0xe0, 0x29, 0x65, 0xe4, 0xac, 0x4b, 0x1d, 0x4e, 0x43, 0xa2, 0xf2, 0x47, 0x57, 0xc4,
  1523. 0x9c, 0x90, 0x46, 0x69, 0x26, 0x73, 0x4e, 0xd5, 0xe1, 0xb1, 0x86, 0x37, 0x13, 0xf4, 0xc0, 0x2e,
  1524. 0x1a, 0x34, 0x4a, 0x52, 0xa3, 0xc7, 0x30, 0xdf, 0x25, 0xcc, 0x8b, 0x89, 0x47, 0x1d, 0x37, 0x68,
  1525. 0x53, 0x55, 0xc3, 0x19, 0x3c, 0x97, 0x1a, 0xab, 0x41, 0x9b, 0xca, 0x53, 0xe9, 0x91, 0xb7, 0x0e,
  1526. 0xe9, 0x0a, 0x1a, 0x31, 0x22, 0xfc, 0x0b, 0xca, 0xcd, 0x49, 0xbd, 0xab, 0x1e, 0x79, 0x5b, 0x19,
  1527. 0x30, 0x4b, 0x68, 0x18, 0x05, 0x1d, 0xc2, 0x7c, 0x71, 0xe9, 0x74, 0x7c, 0xe9, 0x32, 0x73, 0xfa,
  1528. 0x00, 0xfb, 0xf6, 0x3d, 0x65, 0x46, 0x75, 0x58, 0xd4, 0x35, 0xd6, 0x57, 0xf1, 0xad, 0xe0, 0xe6,
  1529. 0xd4, 0x7a, 0x76, 0x63, 0x76, 0xfb, 0xc9, 0xd8, 0x9b, 0xae, 0x7e, 0x55, 0x35, 0x1a, 0x2f, 0xf0,
  1530. 0xc1, 0x25, 0x47, 0xaf, 0xc0, 0x4c, 0x0a, 0xf4, 0xb7, 0x20, 0x6a, 0x3b, 0x52, 0xcb, 0x9c, 0xa0,
  1531. 0xd3, 0xe1, 0x54, 0x70, 0x73, 0x5a, 0x51, 0xf8, 0x99, 0xf6, 0x9f, 0x06, 0x51, 0xbb, 0xe5, 0xf7,
  1532. 0xe8, 0xb1, 0x76, 0xa2, 0x3f, 0xc0, 0x6a, 0x12, 0x48, 0x62, 0x11, 0xf4, 0x88, 0xf0, 0x5d, 0x27,
  1533. 0x8c, 0x99, 0x2b, 0x62, 0xa5, 0xa4, 0xe6, 0xac, 0x0a, 0x2e, 0x6a, 0x4c, 0x25, 0x85, 0x34, 0xae,
  1534. 0x10, 0x68, 0x19, 0x72, 0xbd, 0xa0, 0x4d, 0xbb, 0xe6, 0xbc, 0xaa, 0x9e, 0x5e, 0xa0, 0x5f, 0xc0,
  1535. 0x5c, 0xcc, 0xa9, 0x43, 0xd9, 0xb9, 0xbc, 0xb0, 0x6d, 0x73, 0x41, 0xe5, 0x99, 0x8d, 0x39, 0xb5,
  1536. 0x13, 0x93, 0xf5, 0x0f, 0x03, 0xe6, 0x87, 0x2e, 0x13, 0x32, 0x61, 0xd9, 0xae, 0x57, 0x8f, 0x77,
  1537. 0x6b, 0xf5, 0x7d, 0xe7, 0xa4, 0xde, 0x6c, 0xd8, 0xd5, 0xda, 0x5e, 0xcd, 0xde, 0x2d, 0x4c, 0xa0,
  1538. 0x39, 0x98, 0x3e, 0xac, 0xd5, 0xed, 0x0a, 0xde, 0x7a, 0x59, 0x30, 0xd0, 0x34, 0x4c, 0xee, 0x1d,
  1539. 0x56, 0xaa, 0x85, 0x0c, 0x9a, 0x81, 0xdc, 0xd1, 0xc9, 0x61, 0xe5, 0xb4, 0x90, 0x45, 0x79, 0xc8,
  1540. 0x56, 0x8e, 0x70, 0x61, 0x12, 0x01, 0x4c, 0x55, 0x8e, 0xb0, 0x73, 0xba, 0x53, 0xc8, 0xc9, 0xb8,
  1541. 0xe3, 0xfd, 0x7d, 0xe7, 0xb8, 0x71, 0xd2, 0x2c, 0x4c, 0xa1, 0x22, 0xdc, 0x6f, 0x36, 0x6c, 0xfb,
  1542. 0x8d, 0x73, 0x5a, 0x6b, 0x1d, 0x38, 0x07, 0x76, 0x65, 0xd7, 0xc6, 0xce, 0xce, 0x9f, 0x5a, 0x76,
  1543. 0x21, 0x6f, 0x6d, 0xc2, 0xfc, 0x50, 0x89, 0x91, 0x09, 0xf9, 0xf0, 0x3c, 0x22, 0x9c, 0x72, 0xd3,
  1544. 0x58, 0xcf, 0x6e, 0xcc, 0xe0, 0x74, 0x69, 0xe1, 0xbe, 0xfe, 0xf7, 0x05, 0x0e, 0x15, 0x21, 0x9f,
  1545. 0x8a, 0x8a, 0x91, 0x88, 0x4a, 0x6a, 0x40, 0x08, 0xb2, 0x71, 0xe4, 0xab, 0x5e, 0x98, 0x39, 0x98,
  1546. 0xc0, 0x72, 0xb1, 0xb3, 0x00, 0x5a, 0x73, 0x1c, 0x1e, 0xc4, 0x91, 0x4b, 0xad, 0xbf, 0xf4, 0xfb,
  1547. 0x54, 0xea, 0x21, 0x0f, 0x03, 0xc6, 0x29, 0xaa, 0x41, 0x3e, 0x6d, 0xef, 0x8c, 0xba, 0x1d, 0xe5,
  1548. 0xdb, 0x6f, 0xc7, 0x00, 0x2b, 0x2d, 0x00, 0x38, 0x8d, 0xb7, 0x7c, 0x58, 0xbd, 0x79, 0x36, 0x7c,
  1549. 0xfe, 0x4f, 0x7d, 0x65, 0xdc, 0xfc, 0xad, 0x23, 0x2a, 0x48, 0x9b, 0x08, 0x92, 0xf4, 0x89, 0x17,
  1550. 0x51, 0xce, 0x65, 0xeb, 0xba, 0x69, 0xd1, 0x72, 0xaa, 0x4f, 0x94, 0xbd, 0xa1, 0xcd, 0xe8, 0x37,
  1551. 0x00, 0x5c, 0x90, 0x48, 0xa8, 0x1b, 0x9d, 0x4c, 0x9d, 0x62, 0xca, 0x2c, 0x1d, 0xdd, 0xa5, 0x56,
  1552. 0x3a, 0xba, 0xf1, 0x8c, 0x42, 0xcb, 0x35, 0xda, 0x85, 0x42, 0x97, 0x70, 0xe1, 0xc4, 0x61, 0x5b,
  1553. 0xea, 0x85, 0x4a, 0x90, 0xfd, 0x68, 0x82, 0x05, 0x19, 0x73, 0xa2, 0x42, 0xa4, 0xd1, 0xfa, 0x26,
  1554. 0x73, 0x5d, 0xf6, 0x07, 0xca, 0xb6, 0x01, 0x39, 0x1a, 0x45, 0x41, 0x94, 0xa8, 0x3e, 0x4a, 0x33,
  1555. 0x47, 0xa1, 0x5b, 0x6a, 0xaa, 0x47, 0x03, 0xd6, 0x00, 0x74, 0x38, 0x5a, 0xe0, 0x4f, 0x9a, 0x69,
  1556. 0x23, 0x35, 0x46, 0x0c, 0x96, 0x12, 0xfd, 0xa0, 0x17, 0x94, 0x09, 0x47, 0x5c, 0x86, 0x54, 0xc9,
  1557. 0xd2, 0xc2, 0xf6, 0xce, 0x5d, 0xf3, 0x5e, 0x6d, 0x23, 0x39, 0x53, 0x5b, 0xa6, 0x6a, 0x5d, 0x86,
  1558. 0x14, 0x27, 0xe2, 0xd4, 0x37, 0x58, 0x87, 0xb0, 0x38, 0x82, 0x41, 0xab, 0x60, 0xca, 0x66, 0xaa,
  1559. 0x1e, 0x38, 0xf6, 0x1f, 0xed, 0x7a, 0x6b, 0xa4, 0x61, 0x1f, 0xc2, 0x03, 0xbb, 0xbe, 0xeb, 0x1c,
  1560. 0xef, 0x39, 0xcd, 0x5a, 0x7d, 0xff, 0xd0, 0x76, 0x4e, 0x5a, 0x2d, 0x1b, 0x57, 0xea, 0x55, 0xbb,
  1561. 0x60, 0x58, 0x5f, 0x8f, 0x99, 0xa5, 0x7a, 0x97, 0xe8, 0x0d, 0xcc, 0x0d, 0xc9, 0xad, 0xa1, 0xea,
  1562. 0xf5, 0xe2, 0xce, 0x17, 0x72, 0x40, 0x94, 0xf1, 0x50, 0x26, 0xb4, 0x02, 0xd3, 0x3e, 0x77, 0x3a,
  1563. 0x3e, 0x23, 0xdd, 0x64, 0xb4, 0xe6, 0x7d, 0xbe, 0x27, 0x97, 0x68, 0x15, 0xe4, 0xdd, 0x39, 0xf3,
  1564. 0xbb, 0xbe, 0xb8, 0x54, 0xf7, 0x24, 0x83, 0xaf, 0x0c, 0x68, 0x0d, 0x66, 0xd3, 0x51, 0x25, 0x88,
  1565. 0xa7, 0x54, 0x3d, 0x87, 0x21, 0x31, 0xb5, 0x88, 0x67, 0xfd, 0xdb, 0x80, 0x07, 0x63, 0x3a, 0xe3,
  1566. 0x47, 0xdc, 0xcf, 0x08, 0xad, 0xcc, 0x35, 0x5a, 0x1f, 0x0c, 0x58, 0xbd, 0x2d, 0x1f, 0x7a, 0x04,
  1567. 0x20, 0x22, 0xc2, 0xb8, 0x1b, 0xf9, 0xa1, 0xee, 0xc2, 0x19, 0x3c, 0x60, 0x91, 0x7e, 0xf5, 0x3c,
  1568. 0x69, 0xd3, 0xf4, 0x39, 0x92, 0xc1, 0x03, 0x16, 0xf4, 0x12, 0x72, 0x72, 0xe2, 0xc8, 0xf7, 0x87,
  1569. 0xdc, 0xd4, 0xfa, 0xb8, 0x4d, 0xc9, 0xb9, 0x53, 0x63, 0x9d, 0x00, 0x6b, 0xb8, 0xf5, 0x4f, 0x03,
  1570. 0xa6, 0x53, 0x1b, 0xfa, 0xf5, 0x50, 0x97, 0xeb, 0x56, 0x5a, 0xb9, 0xd6, 0xa4, 0xbb, 0xc9, 0xeb,
  1571. 0x7e, 0xb0, 0xc9, 0x5f, 0xc8, 0x97, 0x4c, 0x7b, 0x50, 0x1d, 0x6e, 0x89, 0xcb, 0x53, 0xa6, 0x66,
  1572. 0x1f, 0x42, 0x30, 0x29, 0x59, 0x24, 0xf3, 0x5e, 0xfd, 0xde, 0xfe, 0x7f, 0x16, 0xa6, 0x74, 0xa5,
  1573. 0xd0, 0x7b, 0x03, 0x66, 0xfa, 0x3d, 0x82, 0x3e, 0xf6, 0xc8, 0xed, 0xbf, 0x5f, 0x8b, 0x9b, 0x77,
  1574. 0x40, 0xea, 0x86, 0xb3, 0xd6, 0xfe, 0xfe, 0xed, 0x77, 0xff, 0xca, 0xac, 0x58, 0xcb, 0xf2, 0x0f,
  1575. 0x95, 0x06, 0xbe, 0x8e, 0x52, 0xd4, 0x6b, 0xe3, 0x19, 0xfa, 0x60, 0xc0, 0xf2, 0x4d, 0x22, 0x8a,
  1576. 0x9e, 0x8f, 0xfb, 0xc8, 0x2d, 0x4f, 0xff, 0xe2, 0xcf, 0xd3, 0xa0, 0x81, 0xbf, 0x5a, 0xa5, 0xe3,
  1577. 0xf4, 0xaf, 0x96, 0xf5, 0x4c, 0xb1, 0xf9, 0xa5, 0xb5, 0x36, 0xc0, 0x66, 0x00, 0x39, 0x44, 0xec,
  1578. 0xbd, 0x01, 0xe8, 0xba, 0x92, 0xa0, 0xad, 0x4f, 0x51, 0x1d, 0x4d, 0x6a, 0xfb, 0xd3, 0x85, 0xca,
  1579. 0x9a, 0xd8, 0x30, 0x7e, 0x65, 0xec, 0x74, 0xa1, 0xe8, 0x06, 0xbd, 0x31, 0xe1, 0x3b, 0xb3, 0xfa,
  1580. 0x18, 0x1b, 0xf2, 0xfc, 0x1b, 0xc6, 0x9f, 0x7f, 0x97, 0xc0, 0xbc, 0x40, 0xbe, 0xeb, 0x4a, 0x41,
  1581. 0xe4, 0x95, 0x3d, 0xca, 0xd4, 0xed, 0x28, 0x6b, 0x17, 0x09, 0x7d, 0x3e, 0xfa, 0x2f, 0xf7, 0xb7,
  1582. 0xfa, 0xd7, 0xf7, 0x86, 0x71, 0x36, 0xa5, 0xb0, 0xcf, 0x7f, 0x08, 0x00, 0x00, 0xff, 0xff, 0x37,
  1583. 0x96, 0x14, 0x87, 0x10, 0x0f, 0x00, 0x00,
  1584. }