Documentation ¶
Overview ¶
Package videointelligence is a generated protocol buffer package.
It is generated from these files:
google/cloud/videointelligence/v1/video_intelligence.proto
It has these top-level messages:
AnnotateVideoRequest VideoContext LabelDetectionConfig ShotChangeDetectionConfig ExplicitContentDetectionConfig FaceDetectionConfig VideoSegment LabelSegment LabelFrame Entity LabelAnnotation ExplicitContentFrame ExplicitContentAnnotation NormalizedBoundingBox FaceSegment FaceFrame FaceAnnotation VideoAnnotationResults AnnotateVideoResponse VideoAnnotationProgress AnnotateVideoProgress
Index ¶
- Variables
- func RegisterVideoIntelligenceServiceServer(s *grpc.Server, srv VideoIntelligenceServiceServer)
- type AnnotateVideoProgress
- type AnnotateVideoRequest
- func (*AnnotateVideoRequest) Descriptor() ([]byte, []int)
- func (m *AnnotateVideoRequest) GetFeatures() []Feature
- func (m *AnnotateVideoRequest) GetInputContent() []byte
- func (m *AnnotateVideoRequest) GetInputUri() string
- func (m *AnnotateVideoRequest) GetLocationId() string
- func (m *AnnotateVideoRequest) GetOutputUri() string
- func (m *AnnotateVideoRequest) GetVideoContext() *VideoContext
- func (*AnnotateVideoRequest) ProtoMessage()
- func (m *AnnotateVideoRequest) Reset()
- func (m *AnnotateVideoRequest) String() string
- type AnnotateVideoResponse
- type Entity
- type ExplicitContentAnnotation
- type ExplicitContentDetectionConfig
- type ExplicitContentFrame
- func (*ExplicitContentFrame) Descriptor() ([]byte, []int)
- func (m *ExplicitContentFrame) GetPornographyLikelihood() Likelihood
- func (m *ExplicitContentFrame) GetTimeOffset() *google_protobuf3.Duration
- func (*ExplicitContentFrame) ProtoMessage()
- func (m *ExplicitContentFrame) Reset()
- func (m *ExplicitContentFrame) String() string
- type FaceAnnotation
- func (*FaceAnnotation) Descriptor() ([]byte, []int)
- func (m *FaceAnnotation) GetFrames() []*FaceFrame
- func (m *FaceAnnotation) GetSegments() []*FaceSegment
- func (m *FaceAnnotation) GetThumbnail() []byte
- func (*FaceAnnotation) ProtoMessage()
- func (m *FaceAnnotation) Reset()
- func (m *FaceAnnotation) String() string
- type FaceDetectionConfig
- type FaceFrame
- type FaceSegment
- type Feature
- type LabelAnnotation
- func (*LabelAnnotation) Descriptor() ([]byte, []int)
- func (m *LabelAnnotation) GetCategoryEntities() []*Entity
- func (m *LabelAnnotation) GetEntity() *Entity
- func (m *LabelAnnotation) GetFrames() []*LabelFrame
- func (m *LabelAnnotation) GetSegments() []*LabelSegment
- func (*LabelAnnotation) ProtoMessage()
- func (m *LabelAnnotation) Reset()
- func (m *LabelAnnotation) String() string
- type LabelDetectionConfig
- func (*LabelDetectionConfig) Descriptor() ([]byte, []int)
- func (m *LabelDetectionConfig) GetLabelDetectionMode() LabelDetectionMode
- func (m *LabelDetectionConfig) GetModel() string
- func (m *LabelDetectionConfig) GetStationaryCamera() bool
- func (*LabelDetectionConfig) ProtoMessage()
- func (m *LabelDetectionConfig) Reset()
- func (m *LabelDetectionConfig) String() string
- type LabelDetectionMode
- type LabelFrame
- type LabelSegment
- type Likelihood
- type NormalizedBoundingBox
- func (*NormalizedBoundingBox) Descriptor() ([]byte, []int)
- func (m *NormalizedBoundingBox) GetBottom() float32
- func (m *NormalizedBoundingBox) GetLeft() float32
- func (m *NormalizedBoundingBox) GetRight() float32
- func (m *NormalizedBoundingBox) GetTop() float32
- func (*NormalizedBoundingBox) ProtoMessage()
- func (m *NormalizedBoundingBox) Reset()
- func (m *NormalizedBoundingBox) String() string
- type ShotChangeDetectionConfig
- type VideoAnnotationProgress
- func (*VideoAnnotationProgress) Descriptor() ([]byte, []int)
- func (m *VideoAnnotationProgress) GetInputUri() string
- func (m *VideoAnnotationProgress) GetProgressPercent() int32
- func (m *VideoAnnotationProgress) GetStartTime() *google_protobuf4.Timestamp
- func (m *VideoAnnotationProgress) GetUpdateTime() *google_protobuf4.Timestamp
- func (*VideoAnnotationProgress) ProtoMessage()
- func (m *VideoAnnotationProgress) Reset()
- func (m *VideoAnnotationProgress) String() string
- type VideoAnnotationResults
- func (*VideoAnnotationResults) Descriptor() ([]byte, []int)
- func (m *VideoAnnotationResults) GetError() *google_rpc.Status
- func (m *VideoAnnotationResults) GetExplicitAnnotation() *ExplicitContentAnnotation
- func (m *VideoAnnotationResults) GetFaceAnnotations() []*FaceAnnotation
- func (m *VideoAnnotationResults) GetFrameLabelAnnotations() []*LabelAnnotation
- func (m *VideoAnnotationResults) GetInputUri() string
- func (m *VideoAnnotationResults) GetSegmentLabelAnnotations() []*LabelAnnotation
- func (m *VideoAnnotationResults) GetShotAnnotations() []*VideoSegment
- func (m *VideoAnnotationResults) GetShotLabelAnnotations() []*LabelAnnotation
- func (*VideoAnnotationResults) ProtoMessage()
- func (m *VideoAnnotationResults) Reset()
- func (m *VideoAnnotationResults) String() string
- type VideoContext
- func (*VideoContext) Descriptor() ([]byte, []int)
- func (m *VideoContext) GetExplicitContentDetectionConfig() *ExplicitContentDetectionConfig
- func (m *VideoContext) GetFaceDetectionConfig() *FaceDetectionConfig
- func (m *VideoContext) GetLabelDetectionConfig() *LabelDetectionConfig
- func (m *VideoContext) GetSegments() []*VideoSegment
- func (m *VideoContext) GetShotChangeDetectionConfig() *ShotChangeDetectionConfig
- func (*VideoContext) ProtoMessage()
- func (m *VideoContext) Reset()
- func (m *VideoContext) String() string
- type VideoIntelligenceServiceClient
- type VideoIntelligenceServiceServer
- type VideoSegment
Constants ¶
This section is empty.
Variables ¶
var Feature_name = map[int32]string{
0: "FEATURE_UNSPECIFIED",
1: "LABEL_DETECTION",
2: "SHOT_CHANGE_DETECTION",
3: "EXPLICIT_CONTENT_DETECTION",
4: "FACE_DETECTION",
}
var Feature_value = map[string]int32{
"FEATURE_UNSPECIFIED": 0,
"LABEL_DETECTION": 1,
"SHOT_CHANGE_DETECTION": 2,
"EXPLICIT_CONTENT_DETECTION": 3,
"FACE_DETECTION": 4,
}
var LabelDetectionMode_name = map[int32]string{
0: "LABEL_DETECTION_MODE_UNSPECIFIED",
1: "SHOT_MODE",
2: "FRAME_MODE",
3: "SHOT_AND_FRAME_MODE",
}
var LabelDetectionMode_value = map[string]int32{
"LABEL_DETECTION_MODE_UNSPECIFIED": 0,
"SHOT_MODE": 1,
"FRAME_MODE": 2,
"SHOT_AND_FRAME_MODE": 3,
}
var Likelihood_name = map[int32]string{
0: "LIKELIHOOD_UNSPECIFIED",
1: "VERY_UNLIKELY",
2: "UNLIKELY",
3: "POSSIBLE",
4: "LIKELY",
5: "VERY_LIKELY",
}
var Likelihood_value = map[string]int32{
"LIKELIHOOD_UNSPECIFIED": 0,
"VERY_UNLIKELY": 1,
"UNLIKELY": 2,
"POSSIBLE": 3,
"LIKELY": 4,
"VERY_LIKELY": 5,
}
Functions ¶
func RegisterVideoIntelligenceServiceServer ¶
func RegisterVideoIntelligenceServiceServer(s *grpc.Server, srv VideoIntelligenceServiceServer)
Types ¶
type AnnotateVideoProgress ¶
type AnnotateVideoProgress struct { // Progress metadata for all videos specified in `AnnotateVideoRequest`. AnnotationProgress []*VideoAnnotationProgress `protobuf:"bytes,1,rep,name=annotation_progress,json=annotationProgress" json:"annotation_progress,omitempty"` }
Video annotation progress. Included in the `metadata` field of the `Operation` returned by the `GetOperation` call of the `google::longrunning::Operations` service.
func (*AnnotateVideoProgress) Descriptor ¶
func (*AnnotateVideoProgress) Descriptor() ([]byte, []int)
func (*AnnotateVideoProgress) GetAnnotationProgress ¶
func (m *AnnotateVideoProgress) GetAnnotationProgress() []*VideoAnnotationProgress
func (*AnnotateVideoProgress) ProtoMessage ¶
func (*AnnotateVideoProgress) ProtoMessage()
func (*AnnotateVideoProgress) Reset ¶
func (m *AnnotateVideoProgress) Reset()
func (*AnnotateVideoProgress) String ¶
func (m *AnnotateVideoProgress) String() string
type AnnotateVideoRequest ¶
type AnnotateVideoRequest struct { // Input video location. Currently, only // [Google Cloud Storage](https://cloud.google.com/storage/) URIs are // supported, which must be specified in the following format: // `gs://bucket-id/object-id` (other URI formats return // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see // [Request URIs](/storage/docs/reference-uris). // A video URI may include wildcards in `object-id`, and thus identify // multiple videos. Supported wildcards: '*' to match 0 or more characters; // '?' to match 1 character. If unset, the input video should be embedded // in the request as `input_content`. If set, `input_content` should be unset. InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri" json:"input_uri,omitempty"` // The video data bytes. // If unset, the input video(s) should be specified via `input_uri`. // If set, `input_uri` should be unset. InputContent []byte `protobuf:"bytes,6,opt,name=input_content,json=inputContent,proto3" json:"input_content,omitempty"` // Requested video annotation features. Features []Feature `protobuf:"varint,2,rep,packed,name=features,enum=google.cloud.videointelligence.v1.Feature" json:"features,omitempty"` // Additional video context and/or feature-specific parameters. VideoContext *VideoContext `protobuf:"bytes,3,opt,name=video_context,json=videoContext" json:"video_context,omitempty"` // Optional location where the output (in JSON format) should be stored. // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) // URIs are supported, which must be specified in the following format: // `gs://bucket-id/object-id` (other URI formats return // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see // [Request URIs](/storage/docs/reference-uris). OutputUri string `protobuf:"bytes,4,opt,name=output_uri,json=outputUri" json:"output_uri,omitempty"` // Optional cloud region where annotation should take place. Supported cloud // regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region // is specified, a region will be determined based on video file location. LocationId string `protobuf:"bytes,5,opt,name=location_id,json=locationId" json:"location_id,omitempty"` }
Video annotation request.
func (*AnnotateVideoRequest) Descriptor ¶
func (*AnnotateVideoRequest) Descriptor() ([]byte, []int)
func (*AnnotateVideoRequest) GetFeatures ¶
func (m *AnnotateVideoRequest) GetFeatures() []Feature
func (*AnnotateVideoRequest) GetInputContent ¶
func (m *AnnotateVideoRequest) GetInputContent() []byte
func (*AnnotateVideoRequest) GetInputUri ¶
func (m *AnnotateVideoRequest) GetInputUri() string
func (*AnnotateVideoRequest) GetLocationId ¶
func (m *AnnotateVideoRequest) GetLocationId() string
func (*AnnotateVideoRequest) GetOutputUri ¶
func (m *AnnotateVideoRequest) GetOutputUri() string
func (*AnnotateVideoRequest) GetVideoContext ¶
func (m *AnnotateVideoRequest) GetVideoContext() *VideoContext
func (*AnnotateVideoRequest) ProtoMessage ¶
func (*AnnotateVideoRequest) ProtoMessage()
func (*AnnotateVideoRequest) Reset ¶
func (m *AnnotateVideoRequest) Reset()
func (*AnnotateVideoRequest) String ¶
func (m *AnnotateVideoRequest) String() string
type AnnotateVideoResponse ¶
type AnnotateVideoResponse struct { // Annotation results for all videos specified in `AnnotateVideoRequest`. AnnotationResults []*VideoAnnotationResults `protobuf:"bytes,1,rep,name=annotation_results,json=annotationResults" json:"annotation_results,omitempty"` }
Video annotation response. Included in the `response` field of the `Operation` returned by the `GetOperation` call of the `google::longrunning::Operations` service.
func (*AnnotateVideoResponse) Descriptor ¶
func (*AnnotateVideoResponse) Descriptor() ([]byte, []int)
func (*AnnotateVideoResponse) GetAnnotationResults ¶
func (m *AnnotateVideoResponse) GetAnnotationResults() []*VideoAnnotationResults
func (*AnnotateVideoResponse) ProtoMessage ¶
func (*AnnotateVideoResponse) ProtoMessage()
func (*AnnotateVideoResponse) Reset ¶
func (m *AnnotateVideoResponse) Reset()
func (*AnnotateVideoResponse) String ¶
func (m *AnnotateVideoResponse) String() string
type Entity ¶
type Entity struct { // Opaque entity ID. Some IDs may be available in // [Google Knowledge Graph Search // API](https://developers.google.com/knowledge-graph/). EntityId string `protobuf:"bytes,1,opt,name=entity_id,json=entityId" json:"entity_id,omitempty"` // Textual description, e.g. `Fixed-gear bicycle`. Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` // Language code for `description` in BCP-47 format. LanguageCode string `protobuf:"bytes,3,opt,name=language_code,json=languageCode" json:"language_code,omitempty"` }
Detected entity from video analysis.
func (*Entity) Descriptor ¶
func (*Entity) GetDescription ¶
func (*Entity) GetEntityId ¶
func (*Entity) GetLanguageCode ¶
func (*Entity) ProtoMessage ¶
func (*Entity) ProtoMessage()
type ExplicitContentAnnotation ¶
type ExplicitContentAnnotation struct { // All video frames where explicit content was detected. Frames []*ExplicitContentFrame `protobuf:"bytes,1,rep,name=frames" json:"frames,omitempty"` }
Explicit content annotation (based on per-frame visual signals only). If no explicit content has been detected in a frame, no annotations are present for that frame.
func (*ExplicitContentAnnotation) Descriptor ¶
func (*ExplicitContentAnnotation) Descriptor() ([]byte, []int)
func (*ExplicitContentAnnotation) GetFrames ¶
func (m *ExplicitContentAnnotation) GetFrames() []*ExplicitContentFrame
func (*ExplicitContentAnnotation) ProtoMessage ¶
func (*ExplicitContentAnnotation) ProtoMessage()
func (*ExplicitContentAnnotation) Reset ¶
func (m *ExplicitContentAnnotation) Reset()
func (*ExplicitContentAnnotation) String ¶
func (m *ExplicitContentAnnotation) String() string
type ExplicitContentDetectionConfig ¶
type ExplicitContentDetectionConfig struct { // Model to use for explicit content detection. // Supported values: "builtin/stable" (the default if unset) and // "builtin/latest". Model string `protobuf:"bytes,1,opt,name=model" json:"model,omitempty"` }
Config for EXPLICIT_CONTENT_DETECTION.
func (*ExplicitContentDetectionConfig) Descriptor ¶
func (*ExplicitContentDetectionConfig) Descriptor() ([]byte, []int)
func (*ExplicitContentDetectionConfig) GetModel ¶
func (m *ExplicitContentDetectionConfig) GetModel() string
func (*ExplicitContentDetectionConfig) ProtoMessage ¶
func (*ExplicitContentDetectionConfig) ProtoMessage()
func (*ExplicitContentDetectionConfig) Reset ¶
func (m *ExplicitContentDetectionConfig) Reset()
func (*ExplicitContentDetectionConfig) String ¶
func (m *ExplicitContentDetectionConfig) String() string
type ExplicitContentFrame ¶
type ExplicitContentFrame struct { // Time-offset, relative to the beginning of the video, corresponding to the // video frame for this location. TimeOffset *google_protobuf3.Duration `protobuf:"bytes,1,opt,name=time_offset,json=timeOffset" json:"time_offset,omitempty"` // Likelihood of the pornography content.. PornographyLikelihood Likelihood `` /* 168-byte string literal not displayed */ }
Video frame level annotation results for explicit content.
func (*ExplicitContentFrame) Descriptor ¶
func (*ExplicitContentFrame) Descriptor() ([]byte, []int)
func (*ExplicitContentFrame) GetPornographyLikelihood ¶
func (m *ExplicitContentFrame) GetPornographyLikelihood() Likelihood
func (*ExplicitContentFrame) GetTimeOffset ¶
func (m *ExplicitContentFrame) GetTimeOffset() *google_protobuf3.Duration
func (*ExplicitContentFrame) ProtoMessage ¶
func (*ExplicitContentFrame) ProtoMessage()
func (*ExplicitContentFrame) Reset ¶
func (m *ExplicitContentFrame) Reset()
func (*ExplicitContentFrame) String ¶
func (m *ExplicitContentFrame) String() string
type FaceAnnotation ¶
type FaceAnnotation struct { // Thumbnail of a representative face view (in JPEG format). Thumbnail []byte `protobuf:"bytes,1,opt,name=thumbnail,proto3" json:"thumbnail,omitempty"` // All video segments where a face was detected. Segments []*FaceSegment `protobuf:"bytes,2,rep,name=segments" json:"segments,omitempty"` // All video frames where a face was detected. Frames []*FaceFrame `protobuf:"bytes,3,rep,name=frames" json:"frames,omitempty"` }
Face annotation.
func (*FaceAnnotation) Descriptor ¶
func (*FaceAnnotation) Descriptor() ([]byte, []int)
func (*FaceAnnotation) GetFrames ¶
func (m *FaceAnnotation) GetFrames() []*FaceFrame
func (*FaceAnnotation) GetSegments ¶
func (m *FaceAnnotation) GetSegments() []*FaceSegment
func (*FaceAnnotation) GetThumbnail ¶
func (m *FaceAnnotation) GetThumbnail() []byte
func (*FaceAnnotation) ProtoMessage ¶
func (*FaceAnnotation) ProtoMessage()
func (*FaceAnnotation) Reset ¶
func (m *FaceAnnotation) Reset()
func (*FaceAnnotation) String ¶
func (m *FaceAnnotation) String() string
type FaceDetectionConfig ¶
type FaceDetectionConfig struct { // Model to use for face detection. // Supported values: "builtin/stable" (the default if unset) and // "builtin/latest". Model string `protobuf:"bytes,1,opt,name=model" json:"model,omitempty"` // Whether bounding boxes be included in the face annotation output. IncludeBoundingBoxes bool `protobuf:"varint,2,opt,name=include_bounding_boxes,json=includeBoundingBoxes" json:"include_bounding_boxes,omitempty"` }
Config for FACE_DETECTION.
func (*FaceDetectionConfig) Descriptor ¶
func (*FaceDetectionConfig) Descriptor() ([]byte, []int)
func (*FaceDetectionConfig) GetIncludeBoundingBoxes ¶
func (m *FaceDetectionConfig) GetIncludeBoundingBoxes() bool
func (*FaceDetectionConfig) GetModel ¶
func (m *FaceDetectionConfig) GetModel() string
func (*FaceDetectionConfig) ProtoMessage ¶
func (*FaceDetectionConfig) ProtoMessage()
func (*FaceDetectionConfig) Reset ¶
func (m *FaceDetectionConfig) Reset()
func (*FaceDetectionConfig) String ¶
func (m *FaceDetectionConfig) String() string
type FaceFrame ¶
type FaceFrame struct { // Normalized Bounding boxes in a frame. // There can be more than one boxes if the same face is detected in multiple // locations within the current frame. NormalizedBoundingBoxes []*NormalizedBoundingBox `protobuf:"bytes,1,rep,name=normalized_bounding_boxes,json=normalizedBoundingBoxes" json:"normalized_bounding_boxes,omitempty"` // Time-offset, relative to the beginning of the video, // corresponding to the video frame for this location. TimeOffset *google_protobuf3.Duration `protobuf:"bytes,2,opt,name=time_offset,json=timeOffset" json:"time_offset,omitempty"` }
Video frame level annotation results for face detection.
func (*FaceFrame) Descriptor ¶
func (*FaceFrame) GetNormalizedBoundingBoxes ¶
func (m *FaceFrame) GetNormalizedBoundingBoxes() []*NormalizedBoundingBox
func (*FaceFrame) GetTimeOffset ¶
func (m *FaceFrame) GetTimeOffset() *google_protobuf3.Duration
func (*FaceFrame) ProtoMessage ¶
func (*FaceFrame) ProtoMessage()
type FaceSegment ¶
type FaceSegment struct { // Video segment where a face was detected. Segment *VideoSegment `protobuf:"bytes,1,opt,name=segment" json:"segment,omitempty"` }
Video segment level annotation results for face detection.
func (*FaceSegment) Descriptor ¶
func (*FaceSegment) Descriptor() ([]byte, []int)
func (*FaceSegment) GetSegment ¶
func (m *FaceSegment) GetSegment() *VideoSegment
func (*FaceSegment) ProtoMessage ¶
func (*FaceSegment) ProtoMessage()
func (*FaceSegment) Reset ¶
func (m *FaceSegment) Reset()
func (*FaceSegment) String ¶
func (m *FaceSegment) String() string
type Feature ¶
type Feature int32
Video annotation feature.
const ( // Unspecified. Feature_FEATURE_UNSPECIFIED Feature = 0 // Label detection. Detect objects, such as dog or flower. Feature_LABEL_DETECTION Feature = 1 // Shot change detection. Feature_SHOT_CHANGE_DETECTION Feature = 2 // Explicit content detection. Feature_EXPLICIT_CONTENT_DETECTION Feature = 3 // Human face detection and tracking. Feature_FACE_DETECTION Feature = 4 )
func (Feature) EnumDescriptor ¶
type LabelAnnotation ¶
type LabelAnnotation struct { // Detected entity. Entity *Entity `protobuf:"bytes,1,opt,name=entity" json:"entity,omitempty"` // Common categories for the detected entity. // E.g. when the label is `Terrier` the category is likely `dog`. And in some // cases there might be more than one categories e.g. `Terrier` could also be // a `pet`. CategoryEntities []*Entity `protobuf:"bytes,2,rep,name=category_entities,json=categoryEntities" json:"category_entities,omitempty"` // All video segments where a label was detected. Segments []*LabelSegment `protobuf:"bytes,3,rep,name=segments" json:"segments,omitempty"` // All video frames where a label was detected. Frames []*LabelFrame `protobuf:"bytes,4,rep,name=frames" json:"frames,omitempty"` }
Label annotation.
func (*LabelAnnotation) Descriptor ¶
func (*LabelAnnotation) Descriptor() ([]byte, []int)
func (*LabelAnnotation) GetCategoryEntities ¶
func (m *LabelAnnotation) GetCategoryEntities() []*Entity
func (*LabelAnnotation) GetEntity ¶
func (m *LabelAnnotation) GetEntity() *Entity
func (*LabelAnnotation) GetFrames ¶
func (m *LabelAnnotation) GetFrames() []*LabelFrame
func (*LabelAnnotation) GetSegments ¶
func (m *LabelAnnotation) GetSegments() []*LabelSegment
func (*LabelAnnotation) ProtoMessage ¶
func (*LabelAnnotation) ProtoMessage()
func (*LabelAnnotation) Reset ¶
func (m *LabelAnnotation) Reset()
func (*LabelAnnotation) String ¶
func (m *LabelAnnotation) String() string
type LabelDetectionConfig ¶
type LabelDetectionConfig struct { // What labels should be detected with LABEL_DETECTION, in addition to // video-level labels or segment-level labels. // If unspecified, defaults to `SHOT_MODE`. LabelDetectionMode LabelDetectionMode `` /* 169-byte string literal not displayed */ // Whether the video has been shot from a stationary (i.e. non-moving) camera. // When set to true, might improve detection accuracy for moving objects. // Should be used with `SHOT_AND_FRAME_MODE` enabled. StationaryCamera bool `protobuf:"varint,2,opt,name=stationary_camera,json=stationaryCamera" json:"stationary_camera,omitempty"` // Model to use for label detection. // Supported values: "builtin/stable" (the default if unset) and // "builtin/latest". Model string `protobuf:"bytes,3,opt,name=model" json:"model,omitempty"` }
Config for LABEL_DETECTION.
func (*LabelDetectionConfig) Descriptor ¶
func (*LabelDetectionConfig) Descriptor() ([]byte, []int)
func (*LabelDetectionConfig) GetLabelDetectionMode ¶
func (m *LabelDetectionConfig) GetLabelDetectionMode() LabelDetectionMode
func (*LabelDetectionConfig) GetModel ¶
func (m *LabelDetectionConfig) GetModel() string
func (*LabelDetectionConfig) GetStationaryCamera ¶
func (m *LabelDetectionConfig) GetStationaryCamera() bool
func (*LabelDetectionConfig) ProtoMessage ¶
func (*LabelDetectionConfig) ProtoMessage()
func (*LabelDetectionConfig) Reset ¶
func (m *LabelDetectionConfig) Reset()
func (*LabelDetectionConfig) String ¶
func (m *LabelDetectionConfig) String() string
type LabelDetectionMode ¶
type LabelDetectionMode int32
Label detection mode.
const ( // Unspecified. LabelDetectionMode_LABEL_DETECTION_MODE_UNSPECIFIED LabelDetectionMode = 0 // Detect shot-level labels. LabelDetectionMode_SHOT_MODE LabelDetectionMode = 1 // Detect frame-level labels. LabelDetectionMode_FRAME_MODE LabelDetectionMode = 2 // Detect both shot-level and frame-level labels. LabelDetectionMode_SHOT_AND_FRAME_MODE LabelDetectionMode = 3 )
func (LabelDetectionMode) EnumDescriptor ¶
func (LabelDetectionMode) EnumDescriptor() ([]byte, []int)
func (LabelDetectionMode) String ¶
func (x LabelDetectionMode) String() string
type LabelFrame ¶
type LabelFrame struct { // Time-offset, relative to the beginning of the video, corresponding to the // video frame for this location. TimeOffset *google_protobuf3.Duration `protobuf:"bytes,1,opt,name=time_offset,json=timeOffset" json:"time_offset,omitempty"` // Confidence that the label is accurate. Range: [0, 1]. Confidence float32 `protobuf:"fixed32,2,opt,name=confidence" json:"confidence,omitempty"` }
Video frame level annotation results for label detection.
func (*LabelFrame) Descriptor ¶
func (*LabelFrame) Descriptor() ([]byte, []int)
func (*LabelFrame) GetConfidence ¶
func (m *LabelFrame) GetConfidence() float32
func (*LabelFrame) GetTimeOffset ¶
func (m *LabelFrame) GetTimeOffset() *google_protobuf3.Duration
func (*LabelFrame) ProtoMessage ¶
func (*LabelFrame) ProtoMessage()
func (*LabelFrame) Reset ¶
func (m *LabelFrame) Reset()
func (*LabelFrame) String ¶
func (m *LabelFrame) String() string
type LabelSegment ¶
type LabelSegment struct { // Video segment where a label was detected. Segment *VideoSegment `protobuf:"bytes,1,opt,name=segment" json:"segment,omitempty"` // Confidence that the label is accurate. Range: [0, 1]. Confidence float32 `protobuf:"fixed32,2,opt,name=confidence" json:"confidence,omitempty"` }
Video segment level annotation results for label detection.
func (*LabelSegment) Descriptor ¶
func (*LabelSegment) Descriptor() ([]byte, []int)
func (*LabelSegment) GetConfidence ¶
func (m *LabelSegment) GetConfidence() float32
func (*LabelSegment) GetSegment ¶
func (m *LabelSegment) GetSegment() *VideoSegment
func (*LabelSegment) ProtoMessage ¶
func (*LabelSegment) ProtoMessage()
func (*LabelSegment) Reset ¶
func (m *LabelSegment) Reset()
func (*LabelSegment) String ¶
func (m *LabelSegment) String() string
type Likelihood ¶
type Likelihood int32
Bucketized representation of likelihood.
const ( // Unspecified likelihood. Likelihood_LIKELIHOOD_UNSPECIFIED Likelihood = 0 // Very unlikely. Likelihood_VERY_UNLIKELY Likelihood = 1 // Unlikely. Likelihood_UNLIKELY Likelihood = 2 // Possible. Likelihood_POSSIBLE Likelihood = 3 // Likely. Likelihood_LIKELY Likelihood = 4 // Very likely. Likelihood_VERY_LIKELY Likelihood = 5 )
func (Likelihood) EnumDescriptor ¶
func (Likelihood) EnumDescriptor() ([]byte, []int)
func (Likelihood) String ¶
func (x Likelihood) String() string
type NormalizedBoundingBox ¶
type NormalizedBoundingBox struct { // Left X coordinate. Left float32 `protobuf:"fixed32,1,opt,name=left" json:"left,omitempty"` // Top Y coordinate. Top float32 `protobuf:"fixed32,2,opt,name=top" json:"top,omitempty"` // Right X coordinate. Right float32 `protobuf:"fixed32,3,opt,name=right" json:"right,omitempty"` // Bottom Y coordinate. Bottom float32 `protobuf:"fixed32,4,opt,name=bottom" json:"bottom,omitempty"` }
Normalized bounding box. The normalized vertex coordinates are relative to the original image. Range: [0, 1].
func (*NormalizedBoundingBox) Descriptor ¶
func (*NormalizedBoundingBox) Descriptor() ([]byte, []int)
func (*NormalizedBoundingBox) GetBottom ¶
func (m *NormalizedBoundingBox) GetBottom() float32
func (*NormalizedBoundingBox) GetLeft ¶
func (m *NormalizedBoundingBox) GetLeft() float32
func (*NormalizedBoundingBox) GetRight ¶
func (m *NormalizedBoundingBox) GetRight() float32
func (*NormalizedBoundingBox) GetTop ¶
func (m *NormalizedBoundingBox) GetTop() float32
func (*NormalizedBoundingBox) ProtoMessage ¶
func (*NormalizedBoundingBox) ProtoMessage()
func (*NormalizedBoundingBox) Reset ¶
func (m *NormalizedBoundingBox) Reset()
func (*NormalizedBoundingBox) String ¶
func (m *NormalizedBoundingBox) String() string
type ShotChangeDetectionConfig ¶
type ShotChangeDetectionConfig struct { // Model to use for shot change detection. // Supported values: "builtin/stable" (the default if unset) and // "builtin/latest". Model string `protobuf:"bytes,1,opt,name=model" json:"model,omitempty"` }
Config for SHOT_CHANGE_DETECTION.
func (*ShotChangeDetectionConfig) Descriptor ¶
func (*ShotChangeDetectionConfig) Descriptor() ([]byte, []int)
func (*ShotChangeDetectionConfig) GetModel ¶
func (m *ShotChangeDetectionConfig) GetModel() string
func (*ShotChangeDetectionConfig) ProtoMessage ¶
func (*ShotChangeDetectionConfig) ProtoMessage()
func (*ShotChangeDetectionConfig) Reset ¶
func (m *ShotChangeDetectionConfig) Reset()
func (*ShotChangeDetectionConfig) String ¶
func (m *ShotChangeDetectionConfig) String() string
type VideoAnnotationProgress ¶
type VideoAnnotationProgress struct { // Video file location in // [Google Cloud Storage](https://cloud.google.com/storage/). InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri" json:"input_uri,omitempty"` // Approximate percentage processed thus far. // Guaranteed to be 100 when fully processed. ProgressPercent int32 `protobuf:"varint,2,opt,name=progress_percent,json=progressPercent" json:"progress_percent,omitempty"` // Time when the request was received. StartTime *google_protobuf4.Timestamp `protobuf:"bytes,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"` // Time of the most recent update. UpdateTime *google_protobuf4.Timestamp `protobuf:"bytes,4,opt,name=update_time,json=updateTime" json:"update_time,omitempty"` }
Annotation progress for a single video.
func (*VideoAnnotationProgress) Descriptor ¶
func (*VideoAnnotationProgress) Descriptor() ([]byte, []int)
func (*VideoAnnotationProgress) GetInputUri ¶
func (m *VideoAnnotationProgress) GetInputUri() string
func (*VideoAnnotationProgress) GetProgressPercent ¶
func (m *VideoAnnotationProgress) GetProgressPercent() int32
func (*VideoAnnotationProgress) GetStartTime ¶
func (m *VideoAnnotationProgress) GetStartTime() *google_protobuf4.Timestamp
func (*VideoAnnotationProgress) GetUpdateTime ¶
func (m *VideoAnnotationProgress) GetUpdateTime() *google_protobuf4.Timestamp
func (*VideoAnnotationProgress) ProtoMessage ¶
func (*VideoAnnotationProgress) ProtoMessage()
func (*VideoAnnotationProgress) Reset ¶
func (m *VideoAnnotationProgress) Reset()
func (*VideoAnnotationProgress) String ¶
func (m *VideoAnnotationProgress) String() string
type VideoAnnotationResults ¶
type VideoAnnotationResults struct { // Video file location in // [Google Cloud Storage](https://cloud.google.com/storage/). InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri" json:"input_uri,omitempty"` // Label annotations on video level or user specified segment level. // There is exactly one element for each unique label. SegmentLabelAnnotations []*LabelAnnotation `protobuf:"bytes,2,rep,name=segment_label_annotations,json=segmentLabelAnnotations" json:"segment_label_annotations,omitempty"` // Label annotations on shot level. // There is exactly one element for each unique label. ShotLabelAnnotations []*LabelAnnotation `protobuf:"bytes,3,rep,name=shot_label_annotations,json=shotLabelAnnotations" json:"shot_label_annotations,omitempty"` // Label annotations on frame level. // There is exactly one element for each unique label. FrameLabelAnnotations []*LabelAnnotation `protobuf:"bytes,4,rep,name=frame_label_annotations,json=frameLabelAnnotations" json:"frame_label_annotations,omitempty"` // Face annotations. There is exactly one element for each unique face. FaceAnnotations []*FaceAnnotation `protobuf:"bytes,5,rep,name=face_annotations,json=faceAnnotations" json:"face_annotations,omitempty"` // Shot annotations. Each shot is represented as a video segment. ShotAnnotations []*VideoSegment `protobuf:"bytes,6,rep,name=shot_annotations,json=shotAnnotations" json:"shot_annotations,omitempty"` // Explicit content annotation. ExplicitAnnotation *ExplicitContentAnnotation `protobuf:"bytes,7,opt,name=explicit_annotation,json=explicitAnnotation" json:"explicit_annotation,omitempty"` // If set, indicates an error. Note that for a single `AnnotateVideoRequest` // some videos may succeed and some may fail. Error *google_rpc.Status `protobuf:"bytes,9,opt,name=error" json:"error,omitempty"` }
Annotation results for a single video.
func (*VideoAnnotationResults) Descriptor ¶
func (*VideoAnnotationResults) Descriptor() ([]byte, []int)
func (*VideoAnnotationResults) GetError ¶
func (m *VideoAnnotationResults) GetError() *google_rpc.Status
func (*VideoAnnotationResults) GetExplicitAnnotation ¶
func (m *VideoAnnotationResults) GetExplicitAnnotation() *ExplicitContentAnnotation
func (*VideoAnnotationResults) GetFaceAnnotations ¶
func (m *VideoAnnotationResults) GetFaceAnnotations() []*FaceAnnotation
func (*VideoAnnotationResults) GetFrameLabelAnnotations ¶
func (m *VideoAnnotationResults) GetFrameLabelAnnotations() []*LabelAnnotation
func (*VideoAnnotationResults) GetInputUri ¶
func (m *VideoAnnotationResults) GetInputUri() string
func (*VideoAnnotationResults) GetSegmentLabelAnnotations ¶
func (m *VideoAnnotationResults) GetSegmentLabelAnnotations() []*LabelAnnotation
func (*VideoAnnotationResults) GetShotAnnotations ¶
func (m *VideoAnnotationResults) GetShotAnnotations() []*VideoSegment
func (*VideoAnnotationResults) GetShotLabelAnnotations ¶
func (m *VideoAnnotationResults) GetShotLabelAnnotations() []*LabelAnnotation
func (*VideoAnnotationResults) ProtoMessage ¶
func (*VideoAnnotationResults) ProtoMessage()
func (*VideoAnnotationResults) Reset ¶
func (m *VideoAnnotationResults) Reset()
func (*VideoAnnotationResults) String ¶
func (m *VideoAnnotationResults) String() string
type VideoContext ¶
type VideoContext struct { // Video segments to annotate. The segments may overlap and are not required // to be contiguous or span the whole video. If unspecified, each video // is treated as a single segment. Segments []*VideoSegment `protobuf:"bytes,1,rep,name=segments" json:"segments,omitempty"` // Config for LABEL_DETECTION. LabelDetectionConfig *LabelDetectionConfig `protobuf:"bytes,2,opt,name=label_detection_config,json=labelDetectionConfig" json:"label_detection_config,omitempty"` // Config for SHOT_CHANGE_DETECTION. ShotChangeDetectionConfig *ShotChangeDetectionConfig `` /* 133-byte string literal not displayed */ // Config for EXPLICIT_CONTENT_DETECTION. ExplicitContentDetectionConfig *ExplicitContentDetectionConfig `` /* 148-byte string literal not displayed */ // Config for FACE_DETECTION. FaceDetectionConfig *FaceDetectionConfig `protobuf:"bytes,5,opt,name=face_detection_config,json=faceDetectionConfig" json:"face_detection_config,omitempty"` }
Video context and/or feature-specific parameters.
func (*VideoContext) Descriptor ¶
func (*VideoContext) Descriptor() ([]byte, []int)
func (*VideoContext) GetExplicitContentDetectionConfig ¶
func (m *VideoContext) GetExplicitContentDetectionConfig() *ExplicitContentDetectionConfig
func (*VideoContext) GetFaceDetectionConfig ¶
func (m *VideoContext) GetFaceDetectionConfig() *FaceDetectionConfig
func (*VideoContext) GetLabelDetectionConfig ¶
func (m *VideoContext) GetLabelDetectionConfig() *LabelDetectionConfig
func (*VideoContext) GetSegments ¶
func (m *VideoContext) GetSegments() []*VideoSegment
func (*VideoContext) GetShotChangeDetectionConfig ¶
func (m *VideoContext) GetShotChangeDetectionConfig() *ShotChangeDetectionConfig
func (*VideoContext) ProtoMessage ¶
func (*VideoContext) ProtoMessage()
func (*VideoContext) Reset ¶
func (m *VideoContext) Reset()
func (*VideoContext) String ¶
func (m *VideoContext) String() string
type VideoIntelligenceServiceClient ¶
type VideoIntelligenceServiceClient interface { // Performs asynchronous video annotation. Progress and results can be // retrieved through the `google.longrunning.Operations` interface. // `Operation.metadata` contains `AnnotateVideoProgress` (progress). // `Operation.response` contains `AnnotateVideoResponse` (results). AnnotateVideo(ctx context.Context, in *AnnotateVideoRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) }
func NewVideoIntelligenceServiceClient ¶
func NewVideoIntelligenceServiceClient(cc *grpc.ClientConn) VideoIntelligenceServiceClient
type VideoIntelligenceServiceServer ¶
type VideoIntelligenceServiceServer interface { // Performs asynchronous video annotation. Progress and results can be // retrieved through the `google.longrunning.Operations` interface. // `Operation.metadata` contains `AnnotateVideoProgress` (progress). // `Operation.response` contains `AnnotateVideoResponse` (results). AnnotateVideo(context.Context, *AnnotateVideoRequest) (*google_longrunning.Operation, error) }
type VideoSegment ¶
type VideoSegment struct { // Time-offset, relative to the beginning of the video, // corresponding to the start of the segment (inclusive). StartTimeOffset *google_protobuf3.Duration `protobuf:"bytes,1,opt,name=start_time_offset,json=startTimeOffset" json:"start_time_offset,omitempty"` // Time-offset, relative to the beginning of the video, // corresponding to the end of the segment (inclusive). EndTimeOffset *google_protobuf3.Duration `protobuf:"bytes,2,opt,name=end_time_offset,json=endTimeOffset" json:"end_time_offset,omitempty"` }
Video segment.
func (*VideoSegment) Descriptor ¶
func (*VideoSegment) Descriptor() ([]byte, []int)
func (*VideoSegment) GetEndTimeOffset ¶
func (m *VideoSegment) GetEndTimeOffset() *google_protobuf3.Duration
func (*VideoSegment) GetStartTimeOffset ¶
func (m *VideoSegment) GetStartTimeOffset() *google_protobuf3.Duration
func (*VideoSegment) ProtoMessage ¶
func (*VideoSegment) ProtoMessage()
func (*VideoSegment) Reset ¶
func (m *VideoSegment) Reset()
func (*VideoSegment) String ¶
func (m *VideoSegment) String() string