Label analysis
Label analysis identifies objects, locations, activities, animal species, products, and more.
Use the standard model
The following code shows how to use Video Intelligence API streaming label detection to annotate a video.
Java
To authenticate to Video Intelligence, set up Application Default Credentials. For more information, see Set up authentication for a local development environment.
importcom.google.api.gax.rpc.BidiStream ;
importcom.google.cloud.videointelligence.v1p3beta1.LabelAnnotation;
importcom.google.cloud.videointelligence.v1p3beta1.LabelFrame;
importcom.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest;
importcom.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse;
importcom.google.cloud.videointelligence.v1p3beta1.StreamingFeature;
importcom.google.cloud.videointelligence.v1p3beta1.StreamingLabelDetectionConfig;
importcom.google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults;
importcom.google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig;
importcom.google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceServiceClient;
importcom.google.protobuf.ByteString ;
importio.grpc.StatusRuntimeException;
importjava.io.IOException;
importjava.nio.file.Files;
importjava.nio.file.Path;
importjava.nio.file.Paths;
importjava.util.Arrays;
importjava.util.concurrent.TimeoutException;
class StreamingLabelDetection{
// Perform streaming video label detection
staticvoidstreamingLabelDetection(StringfilePath)
throwsIOException,TimeoutException,StatusRuntimeException{
// String filePath = "path_to_your_video_file";
try(StreamingVideoIntelligenceServiceClientclient=
StreamingVideoIntelligenceServiceClient.create()){
Pathpath=Paths.get(filePath);
byte[]data=Files.readAllBytes(path);
// Set the chunk size to 5MB (recommended less than 10MB).
intchunkSize=5*1024*1024;
intnumChunks=(int)Math.ceil((double)data.length/chunkSize);
StreamingLabelDetectionConfiglabelConfig=
StreamingLabelDetectionConfig.newBuilder().setStationaryCamera(false).build();
StreamingVideoConfigstreamingVideoConfig=
StreamingVideoConfig.newBuilder()
.setFeature(StreamingFeature.STREAMING_LABEL_DETECTION)
.setLabelDetectionConfig(labelConfig)
.build();
BidiStream<StreamingAnnotateVideoRequest,StreamingAnnotateVideoResponse>call=
client.streamingAnnotateVideoCallable().call();
// The first request must **only** contain the audio configuration:
call.send(
StreamingAnnotateVideoRequest.newBuilder().setVideoConfig(streamingVideoConfig).build());
// Subsequent requests must **only** contain the audio data.
// Send the requests in chunks
for(inti=0;i < numChunks;i++){
call.send(
StreamingAnnotateVideoRequest.newBuilder()
.setInputContent(
ByteString .copyFrom (
Arrays.copyOfRange(data,i*chunkSize,i*chunkSize+chunkSize)))
.build());
}
// Tell the service you are done sending data
call.closeSend();
for(StreamingAnnotateVideoResponseresponse:call){
StreamingVideoAnnotationResultsannotationResults=response.getAnnotationResults();
for(LabelAnnotationannotation:annotationResults.getLabelAnnotationsList()){
Stringentity=annotation.getEntity().getDescription ();
// There is only one frame per annotation
LabelFramelabelFrame=annotation.getFrames(0);
doubleoffset=
labelFrame.getTimeOffset().getSeconds()+labelFrame.getTimeOffset().getNanos()/1e9;
floatconfidence=labelFrame.getConfidence();
System.out.format("%fs: %s (%f)\n",offset,entity,confidence);
}
}
}
}
}Node.js
To authenticate to Video Intelligence, set up Application Default Credentials. For more information, see Set up authentication for a local development environment.
/**
* TODO(developer): Uncomment these variables before running the sample.
*/
// const path = 'Local file to analyze, e.g. ./my-file.mp4';
const{StreamingVideoIntelligenceServiceClient}=
require('@google-cloud/video-intelligence').v1p3beta1;
constfs=require('fs');
// Instantiates a client
constclient=newStreamingVideoIntelligenceServiceClient ();
// Streaming configuration
constconfigRequest={
videoConfig:{
feature:'STREAMING_LABEL_DETECTION',
},
};
constreadStream=fs.createReadStream(path,{
highWaterMark:5*1024*1024,//chunk size set to 5MB (recommended less than 10MB)
encoding:'base64',
});
//Load file content
constchunks=[];
readStream
.on('data',chunk=>{
constrequest={
inputContent:chunk.toString(),
};
chunks.push(request);
})
.on('close',()=>{
// configRequest should be the first in the stream of requests
stream.write(configRequest);
for(leti=0;i < chunks.length;i++){
stream.write(chunks[i]);
}
stream.end();
});
conststream=client.streamingAnnotateVideo().on('data',response=>{
//Gets annotations for video
constannotations=response.annotationResults;
constlabels=annotations.labelAnnotations;
labels.forEach(label=>{
console.log(
`Label ${label.entity.description} occurs at: ${
label.frames[0].timeOffset.seconds||0
}`+`.${(label.frames[0].timeOffset.nanos/1e6).toFixed(0)}s`
);
console.log(` Confidence: ${label.frames[0].confidence}`);
});
});Python
To authenticate to Video Intelligence, set up Application Default Credentials. For more information, see Set up authentication for a local development environment.
fromgoogle.cloudimport videointelligence_v1p3beta1 as videointelligence
# path = 'path_to_file'
client = videointelligence.StreamingVideoIntelligenceServiceClient()
# Set streaming config.
config = videointelligence.StreamingVideoConfig(
feature=(videointelligence.StreamingFeature.STREAMING_LABEL_DETECTION)
)
# config_request should be the first in the stream of requests.
config_request = videointelligence.StreamingAnnotateVideoRequest(
video_config=config
)
# Set the chunk size to 5MB (recommended less than 10MB).
chunk_size = 5 * 1024 * 1024
# Load file content.
stream = []
with io.open(path, "rb") as video_file:
while True:
data = video_file.read(chunk_size)
if not data:
break
stream.append(data)
defstream_generator():
yield config_request
for chunk in stream:
yield videointelligence.StreamingAnnotateVideoRequest(input_content=chunk)
requests = stream_generator()
# streaming_annotate_video returns a generator.
# The default timeout is about 300 seconds.
# To process longer videos it should be set to
# larger than the length (in seconds) of the stream.
responses = client.streaming_annotate_video(requests, timeout=600)
# Each response corresponds to about 1 second of video.
for response in responses:
# Check for errors.
if response.error.message:
print(response.error.message)
break
label_annotations = response.annotation_results.label_annotations
# label_annotations could be empty
if not label_annotations:
continue
for annotation in label_annotations:
# Each annotation has one frame, which has a timeoffset.
frame = annotation.frames[0]
time_offset = (
frame.time_offset.seconds + frame.time_offset.microseconds / 1e6
)
description = annotation.entity.description
confidence = annotation.frames[0].confidence
# description is in Unicode
print(
"{}s: {} (confidence: {})".format(time_offset, description, confidence)
)