Saving face images to disk
joseph
joseph.celi at jci.com
Fri Nov 15 16:25:50 UTC 2019
I'm running the following gstreamer pipeline
rtspsrc location=rtsp://root:pass@192.168.1.14/ufirststream ! decodebin !
videoconvert n-threads=4 ! videoscale n-threads=4 ! capsfilter
caps="video/x-raw" ! gvadetect
model=/home/dldevmax/projects/gva/data/models/intel/face/FP32/face-detection-retail-0004.xml
model-proc=/home/dldevmax/projects/gva/data/models/intel/face/FP32/face-detection-retail-0004.json
! queue ! gvawatermark name=gvawatermark ! tee name=tp tp. ! queue !
videoconvert ! fpsdisplaysink video-sink=xvimagesink sync=false tp. ! queue
! videoconvert ! jpegenc ! multifilesink location=out/img1_%03d.jpeg tp. !
queue ! videoconvert ! appsink name=application
I'm able to obtain the GST_VIDEO_REGION_OF_INTEREST_META_API_TYPE meta data
on every sample that contains a recognized face. I would like to store
those faces as jpg files on the local filesystem. I have the bounding boxes
but am running into a ton of problems trying to first store the complete
frame. If I can store the complete frame it would be easy for me to obtain
the regions that contain the faces because I have the bounding boxes.
A better solution would be to figure out how to just save the faces directly
but at this point I cannot even seem to be able to save the complete frame
properly.
Any advice on either? My attempt to store the complete frame is in
dump_frame in the code below. I also saving the frames using a multisink
and if I could correlate that stored frame to the frame associated with my
face regions ... that would work too.
#include <gst/gst.h>
#include <stdio.h>
#include <gst/video/video.h>
#include <gst/video/gstvideometa.h>
#include <gdk-pixbuf/gdk-pixbuf.h>
/**
Play a simple RTSP Stream to a screen.
We use the gst_parse_launch to
run the pipeline
https://gstreamer.freedesktop.org/documentation/application-development/advanced/pipeline-manipulation.html?gi-language=python
**/
typedef struct __AllElements
{
GstElement *pipeline;
GstElement *rtspsrc;
GstElement *detect;
GstElement *watermark;
GstElement *appsink;
} Allelements;
static GstFlowReturn new_sample (GstElement *sink, Allelements *data);
void print_meta(GstSample * pSample, GstBuffer *buffer) ;
int main (int argc, char *argv[])
{
GstElement *pipeline;
GstBus *bus;
/* Initialize GStreamer */
gst_init (&argc, &argv);
/* Build the pipeline */
gchar *input_file = "rtsp://root:pass@192.168.1.14/ufirststream";
gchar *detection_model =
"/home/dldevmax/projects/gva/data/models/intel/face/FP32/face-detection-retail-0004.xml";
gchar *device = "cpu";
gchar *model_proc =
"/home/dldevmax/projects/gva/data/models/intel/face/FP32/face-detection-retail-0004.json";
gchar const *preprocess_pipeline = "decodebin ! videoconvert n-threads=4 !
videoscale n-threads=4 ";
gchar const *capfilter = "video/x-raw";
gchar const *screensink = " videoconvert ! fpsdisplaysink
video-sink=xvimagesink sync=false";
//gchar const *filesink = " videoconvert ! pngenc ! multifilesink
location=frame%05d.png";
gchar const *filesink = " videoconvert ! jpegenc ! multifilesink
location=out/img1_%03d.jpeg";
gchar const *appsink = " videoconvert ! appsink name=application";
// Build the pipeline
char *launch_str = g_strdup_printf("rtspsrc location=%s ! %s ! capsfilter
caps=\"%s\" ! "
"gvadetect model=%s model-proc=%s !
queue ! "
"gvawatermark name=gvawatermark ! tee
name=tp "
"tp. ! queue ! %s tp. ! queue ! %s tp. !
queue ! %s",
input_file, preprocess_pipeline,
capfilter, detection_model, model_proc, screensink, filesink, appsink);
g_print("PIPELINE: %s \n", launch_str);
// sleep(10);
pipeline = gst_parse_launch(launch_str, NULL);
g_free(launch_str);
Allelements element;
GstElement *sink = gst_bin_get_by_name (GST_BIN (pipeline),
"application");
g_object_set (sink, "sync", TRUE, NULL);
g_object_set (sink, "emit-signals", TRUE, NULL);
g_signal_connect (sink, "new-sample", G_CALLBACK (new_sample), &element);
/* Start playing */
gst_element_set_state (pipeline, GST_STATE_PLAYING);
/* Wait until error or EOS */
bus = gst_element_get_bus (pipeline);
int ret_code = 0;
GstMessage *msg = gst_bus_poll(bus, (GstMessageType)(GST_MESSAGE_ERROR |
GST_MESSAGE_EOS), -1);
if (msg && GST_MESSAGE_TYPE(msg) == GST_MESSAGE_ERROR) {
GError *err = NULL;
gchar *dbg_info = NULL;
gst_message_parse_error(msg, &err, &dbg_info);
g_printerr("ERROR from element %s: %s\n", GST_OBJECT_NAME(msg->src),
err->message);
g_printerr("Debugging info: %s\n", (dbg_info) ? dbg_info : "none");
g_error_free(err);
g_free(dbg_info);
ret_code = -1;
}
if (msg)
gst_message_unref(msg);
// Free resources
gst_object_unref(bus);
gst_element_set_state(pipeline, GST_STATE_NULL);
gst_object_unref(pipeline);
return ret_code;
}
static GstFlowReturn new_sample(GstElement *sink, Allelements *data) {
GstBuffer *pBuffer;
GstSample *pSample = gst_base_sink_get_last_sample(GST_BASE_SINK(sink));
if(pSample != NULL) {
pBuffer = gst_sample_get_buffer(pSample);
if(pBuffer != NULL) {
print_meta(pSample, pBuffer);
gst_buffer_unref(pBuffer);
}
gst_sample_unref(pSample);
}
return 0;
}
void dump_frame(GstSample *sample, guint8 *pixels) {
GstBuffer *buffer;
GstCaps *caps;
GstStructure *s;
GstMapInfo map;
GError *pError = NULL;
caps = gst_sample_get_caps (sample);
if (!caps) {
g_print ("could not get snapshot format\n");
return;
}
s = gst_caps_get_structure (caps, 0);
int width, height;
gst_structure_get_int (s, "width", &width);
gst_structure_get_int (s, "height", &height);
buffer = gst_sample_get_buffer (sample);
gst_buffer_map(buffer, &map, GST_MAP_READ);
g_print("This buffer has %lu bytes and is off %d x %d\n", map.size, width,
height);
int rowstride = GST_ROUND_UP_4(width * 3);
printf("rowstride is %d\n", rowstride);
GdkPixbuf *pixbuf = gdk_pixbuf_new_from_data
(pixels,GDK_COLORSPACE_RGB,FALSE,8,width, height,rowstride,NULL,NULL);
gdk_pixbuf_save(pixbuf, "frame.jpg", "jpeg", &pError, NULL);
gst_buffer_unmap (buffer, &map);
}
void copy_face(GstSample * pSample, GstBuffer * pBuffer) {
GstVideoFrame videoFrame;
GstVideoInfo videoInfo;
GstCaps *caps = gst_sample_get_caps(pSample);
gst_video_info_init(&videoInfo);
gst_video_info_from_caps(&videoInfo, caps);
gst_video_frame_map(&videoFrame, &videoInfo, pBuffer, 0);
g_print("width == %d\n", videoInfo.width);
g_print("height == %d\n", videoInfo.height);
g_print("size == %ld\n", videoInfo.size);
guint8 *pixels = GST_VIDEO_FRAME_PLANE_DATA(&videoFrame, 0);
dump_frame(pSample, pixels);
gst_video_frame_unmap(&videoFrame);
}
void print_meta(GstSample * pSample, GstBuffer *buffer) {
gpointer state = NULL;
GstMeta *meta = NULL;
while ((meta = gst_buffer_iterate_meta_filtered(buffer, &state,
GST_VIDEO_REGION_OF_INTEREST_META_API_TYPE)) != NULL) {
GstVideoRegionOfInterestMeta *roi_meta =
(GstVideoRegionOfInterestMeta*)meta;
printf("Object bounding box %d,%d,%d,%d\n", roi_meta->x, roi_meta->y,
roi_meta->w, roi_meta->h);
for (GList *l = roi_meta->params; l; l = g_list_next(l)) {
GstStructure *structure = (GstStructure *) l->data;
printf(" Attribute %s\n", gst_structure_get_name(structure));
if (gst_structure_has_field(structure, "label")) {
printf(" label=%s\n", gst_structure_get_string(structure,
"label"));
}
if (gst_structure_has_field(structure, "confidence")) {
double confidence;
gst_structure_get_double(structure, "confidence", &confidence);
printf(" confidence=%.2f\n", confidence);
}
}
copy_face(pSample, buffer);
}
}
--
Sent from: http://gstreamer-devel.966125.n4.nabble.com/
More information about the gstreamer-devel
mailing list