Shot change
Stay organized with collections
Save and categorize content based on your preferences.
Shot change analysis detects shot changes in a video.
The following sample code shows how to use Video Intelligence API streaming to identify shot changes in a video.
Java
To authenticate to Video Intelligence, set up Application Default Credentials. For more information, see Set up authentication for a local development environment.
importcom.google.api.gax.rpc.BidiStream ;
importcom.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoRequest;
importcom.google.cloud.videointelligence.v1p3beta1.StreamingAnnotateVideoResponse;
importcom.google.cloud.videointelligence.v1p3beta1.StreamingFeature;
importcom.google.cloud.videointelligence.v1p3beta1.StreamingLabelDetectionConfig;
importcom.google.cloud.videointelligence.v1p3beta1.StreamingVideoAnnotationResults;
importcom.google.cloud.videointelligence.v1p3beta1.StreamingVideoConfig;
importcom.google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceServiceClient;
importcom.google.cloud.videointelligence.v1p3beta1.VideoSegment;
importcom.google.protobuf.ByteString ;
importio.grpc.StatusRuntimeException;
importjava.io.IOException;
importjava.nio.file.Files;
importjava.nio.file.Path;
importjava.nio.file.Paths;
importjava.util.Arrays;
importjava.util.concurrent.TimeoutException;
class StreamingShotChangeDetection{
// Perform streaming video detection for shot changes
staticvoidstreamingShotChangeDetection(StringfilePath)
throwsIOException,TimeoutException,StatusRuntimeException{
// String filePath = "path_to_your_video_file";
try(StreamingVideoIntelligenceServiceClientclient=
StreamingVideoIntelligenceServiceClient.create()){
Pathpath=Paths.get(filePath);
byte[]data=Files.readAllBytes(path);
// Set the chunk size to 5MB (recommended less than 10MB).
intchunkSize=5*1024*1024;
intnumChunks=(int)Math.ceil((double)data.length/chunkSize);
StreamingLabelDetectionConfiglabelConfig=
StreamingLabelDetectionConfig.newBuilder().setStationaryCamera(false).build();
StreamingVideoConfigstreamingVideoConfig=
StreamingVideoConfig.newBuilder()
.setFeature(StreamingFeature.STREAMING_SHOT_CHANGE_DETECTION)
.setLabelDetectionConfig(labelConfig)
.build();
BidiStream<StreamingAnnotateVideoRequest,StreamingAnnotateVideoResponse>call=
client.streamingAnnotateVideoCallable().call();
// The first request must **only** contain the audio configuration:
call.send(
StreamingAnnotateVideoRequest.newBuilder().setVideoConfig(streamingVideoConfig).build());
// Subsequent requests must **only** contain the audio data.
// Send the requests in chunks
for(inti=0;i < numChunks;i++){
call.send(
StreamingAnnotateVideoRequest.newBuilder()
.setInputContent(
ByteString .copyFrom (
Arrays.copyOfRange(data,i*chunkSize,i*chunkSize+chunkSize)))
.build());
}
// Tell the service you are done sending data
call.closeSend();
for(StreamingAnnotateVideoResponseresponse:call){
StreamingVideoAnnotationResultsannotationResults=response.getAnnotationResults();
if(response.hasError()){
System.out.println(response.getError().getMessage());
System.out.format(
"Error was occured with the following status: %s\n",response.getError());
}
for(VideoSegmentsegment:annotationResults.getShotAnnotationsList()){
doublestartTimeOffset=
segment.getStartTimeOffset().getSeconds()
+segment.getStartTimeOffset().getNanos()/1e9;
doubleendTimeOffset=
segment.getEndTimeOffset().getSeconds()+segment.getEndTimeOffset().getNanos()/1e9;
System.out.format("Shot: %fs to %fs\n",startTimeOffset,endTimeOffset);
}
}
}
}
}Node.js
To authenticate to Video Intelligence, set up Application Default Credentials. For more information, see Set up authentication for a local development environment.
/**
* TODO(developer): Uncomment these variables before running the sample.
*/
// const path = 'Local file to analyze, e.g. ./my-file.mp4';
const{StreamingVideoIntelligenceServiceClient}=
require('@google-cloud/video-intelligence').v1p3beta1;
constfs=require('fs');
// Instantiates a client
constclient=newStreamingVideoIntelligenceServiceClient ();
// Streaming configuration
constconfigRequest={
videoConfig:{
feature:'STREAMING_SHOT_CHANGE_DETECTION',
},
};
constreadStream=fs.createReadStream(path,{
highWaterMark:5*1024*1024,//chunk size set to 5MB (recommended less than 10MB)
encoding:'base64',
});
//Load file content
constchunks=[];
readStream
.on('data',chunk=>{
constrequest={
inputContent:chunk.toString(),
};
chunks.push(request);
})
.on('close',()=>{
// configRequest should be the first in the stream of requests
stream.write(configRequest);
for(leti=0;i < chunks.length;i++){
stream.write(chunks[i]);
}
stream.end();
});
conststream=client.streamingAnnotateVideo().on('data',response=>{
//Gets annotations for video
constannotations=response.annotationResults;
constshotChanges=annotations.shotAnnotations;
console.log(JSON.stringify(shotChanges));
if(shotChanges.length===1){
console.log('The entire video is one shot.');
}
shotChanges.forEach(shot=>{
console.log(
` Shot: ${shot.startTimeOffset.seconds||0}`+
`.${(shot.startTimeOffset.nanos/1e6).toFixed(0)}s to ${
shot.endTimeOffset.seconds||0
}`+
`.${(shot.endTimeOffset.nanos/1e6).toFixed(0)}s`
);
});
});Python
To authenticate to Video Intelligence, set up Application Default Credentials. For more information, see Set up authentication for a local development environment.
fromgoogle.cloudimport videointelligence_v1p3beta1 as videointelligence
# path = 'path_to_file'
client = videointelligence.StreamingVideoIntelligenceServiceClient()
# Set streaming config.
config = videointelligence.StreamingVideoConfig(
feature=(videointelligence.StreamingFeature.STREAMING_SHOT_CHANGE_DETECTION)
)
# config_request should be the first in the stream of requests.
config_request = videointelligence.StreamingAnnotateVideoRequest(
video_config=config
)
# Set the chunk size to 5MB (recommended less than 10MB).
chunk_size = 5 * 1024 * 1024
# Load file content.
stream = []
with io.open(path, "rb") as video_file:
while True:
data = video_file.read(chunk_size)
if not data:
break
stream.append(data)
defstream_generator():
yield config_request
for chunk in stream:
yield videointelligence.StreamingAnnotateVideoRequest(input_content=chunk)
requests = stream_generator()
# streaming_annotate_video returns a generator.
# The default timeout is about 300 seconds.
# To process longer videos it should be set to
# larger than the length (in seconds) of the stream.
responses = client.streaming_annotate_video(requests, timeout=600)
# Each response corresponds to about 1 second of video.
for response in responses:
# Check for errors.
if response.error.message:
print(response.error.message)
break
for annotation in response.annotation_results.shot_annotations:
start = (
annotation.start_time_offset.seconds
+ annotation.start_time_offset.microseconds / 1e6
)
end = (
annotation.end_time_offset.seconds
+ annotation.end_time_offset.microseconds / 1e6
)
print("Shot: {}s to {}s".format(start, end))