RTSP视频在gtk中的应用

RTSP视频在gtk中的应用,gtk,rtsp,gstreamer,Gtk,Rtsp,Gstreamer,我开发了一个gtk应用程序,用于从IP摄像机播放RTSP流 管道=gst\u解析\u启动(“rtspsrc位置=rtsp://192.168.127.100:554/moxa-cgi/UDP流延迟=0!decodebin!xImageSink“,NULL); gst元素设置状态(管道、gst状态) 我把这段代码粘贴到我的程序中,它工作得很好,但是另一个窗口。 如何在绘图区域或其他小部件上显示视频 特里斯坦在他的博客上有一篇关于嵌入视频和全屏视频的好文章 特里斯坦的更新示例如下: #包括 #包

我开发了一个gtk应用程序,用于从IP摄像机播放RTSP流

管道=gst\u解析\u启动(“rtspsrc位置=rtsp://192.168.127.100:554/moxa-cgi/UDP流延迟=0!decodebin!xImageSink“,NULL); gst元素设置状态(管道、gst状态)

我把这段代码粘贴到我的程序中,它工作得很好,但是另一个窗口。
如何在绘图区域或其他小部件上显示视频

特里斯坦在他的博客上有一篇关于嵌入视频和全屏视频的好文章

特里斯坦的更新示例如下:

#包括
#包括
#包括
#包括
#包括
gboolean handleBusMsg(GstMessage*消息,GtkWidget*窗口)
{
//忽略除“prepare xwindow id”元素消息以外的任何内容
if(GST\U消息类型(消息)!=GST\U消息元素)
返回FALSE;
如果(!gst_structure_具有_名称(消息->结构,“准备xwindow id”))
返回FALSE;
g_print(“获取准备xwindow id msg\n”);
//修理工:看到了吗https://bugzilla.gnome.org/show_bug.cgi?id=599885
gst_x_覆盖图_设置_x窗口_id(gst_x_覆盖图(gst_消息_SRC(消息)),GDK_窗口_x窗口(窗口->窗口));
返回TRUE;
}
gboolean总线调用(GstBus*总线、GstMessage*消息、gpointer数据)
{
GtkWidget*窗口=(GtkWidget*)数据;
开关(GST\信息\类型(msg))
{
案例GST_消息_元素:
{
把手味精(味精,窗户);
打破
}
违约:
打破
}
返回TRUE;
}
静态void makeWindowBlack(GtkWidget*窗口)
{
GdkColor颜色;
gdk_color_parse(“黑色”、“彩色”);
gtk_widget_modify_bg(窗口、gtk_STATE_NORMAL和颜色);//需要确保黑色背景
}
静态gboolean
按键事件cb(GtkWidget*小部件、GdkEventKey*事件、gpointer数据)
{
如果(事件->键值!=“f”)
返回TRUE;
其他的
g_print(“您点击f\n”);
gboolean isFullscreen=(gdk_窗口_获取_状态(gdk_窗口(小部件->窗口))==gdk_窗口_状态_全屏);
如果(全屏显示)
gtk_窗口_未满屏幕(gtk_窗口(小部件));
其他的
gtk_窗口_全屏(gtk_窗口(小部件));
返回TRUE;
}
void destroy_cb(GtkWidget*小部件,gpointer数据)
{
GMainLoop*循环=(GMainLoop*)数据;
g_print(“窗口被破坏”);
g_主循环退出(循环);
}
gint干管(gint-argc,gchar*argv[])
{
GSTStateChangeRet;
GstElement*管道、*src、*水槽;
GMainLoop*循环;
GtkWidget*窗口;
/*初始化*/
gst_init(&argc,&argv);
gtk_init(&argc,&argv);
loop=g_main_loop_new(NULL,FALSE);
//生成窗口并将expose事件附加到expose回调
窗口=gtk_窗口_新建(gtk_窗口_顶层);
/*创建元素*/
管道=gst_管道新(“我的管道”);
gst_总线添加_手表(gst_管道获取_总线(gst_管道(管道)),
(GstBusFunc)总线调用,窗口);
src=gst元素工厂制造(“videotestsrc”,空);
接收器=gst元件工厂制造(“xImage接收器”、“视频接收器”);
如果(!接收器)
g_print(“找不到输出-检查安装\n”);
gst_bin_add_many(gst_bin(管道)、src、sink、NULL);
g_对象集(g_对象(接收器),“力纵横比”,真,空);
/*把一切联系在一起*/
如果(!gst_元素_链接(src,接收器)){
g_print(“链接一个或多个元素失败!\n”);
返回-1;
}
//将按键信号附加到按键回叫
gtk_控件_设置_事件(窗口、GDK_键_按_掩码);
g_信号连接(g_对象(窗口),“按键事件”,g_回调(按键事件),接收器);
g_信号连接(g_对象(窗口),“销毁”,g_回调(销毁cb),循环);
/*跑*/
使窗口变黑(窗口);
gtk_小部件_全部显示(窗口);
ret=gst\u元素\u集合\u状态(管道、gst\u状态\u播放);
如果(ret==GST\u状态\u变化\u失败)
{
g_print(“启动管道失败!\n”);
返回1;
}
g_主循环运行(循环);
/*清理*/
gst\元素\集合\状态(管道,gst\状态\空);
gst_对象_unref(管道);
返回0;
}

我做了几乎相同的工作,并在Gtk3中为其创建了一个类。希望能有帮助

class Camera(Gtk.DrawingArea):
def __init__(self, type, filepath, ui_table, callback):
    self.ui_table = ui_table
    self.type = type
    self.callback = callback

    # create GStreamer pipeline
    pipeline = Gst.Pipeline()

    # create bus to get events from GStreamer pipeline
    bus_camera = pipeline.get_bus()
    bus_camera.add_signal_watch()
    bus_camera.connect('message::eos', self.on_eos_Camera)
    bus_camera.connect('message::error', self.on_error_Camera)


    source = Gst.ElementFactory.make("rtspsrc", "source")
    source.set_property("location", filepath)
    source.set_property("latency", 0)
    source.connect("pad-added", self.on_pad)
    pipeline.add(source)

    depay = Gst.ElementFactory.make("rtph264depay", "depay")
    pipeline.add(depay)
    source.link(depay)

    dec = Gst.ElementFactory.make("fluh264dec", "dec")
    pipeline.add(dec)
    depay.link(dec)

    sink = Gst.ElementFactory.make("xvimagesink", "sink")
    sink.set_property("sync", False)
    pipeline.add(sink)
    dec.link(sink)


    Gtk.DrawingArea.__init__(self)
    # main drawingarea
    self.add_events(Gdk.EventMask.BUTTON_PRESS_MASK)
    self.connect("button-press-event", self.drawingarea_methode)

    # get screen size
    screen = Gdk.Screen.get_default()
    screen_size_x = screen.get_width()
    screen_size_y = screen.get_height()

    self.set_size_request(screen_size_x, screen_size_y)

    # This is needed to make the video output in our DrawingArea:
    bus_camera.enable_sync_message_emission()
    bus_camera.connect('message', self.on_message_Camera)
    bus_camera.connect('sync-message::element', self.on_sync_message_Camera)

    self.source = source
    self.depay = depay
    self.pipeline = pipeline


def on_pad(self, rtspsrc, pad):
    depaySinkPad = self.depay.get_static_pad('sink')
    pad.link(depaySinkPad)

# clicked event onto drawingarea results in hiding the buttons
def drawingarea_methode(self, widget, event):
    #print "Mouse clicked... at ", event.x, ", ", event.y
    self.callback(self.type[-1])

def on_eos_Camera(self, bus, msg):
    self.player_camera.seek_simple(
        Gst.Format.TIME,
        Gst.SeekFlags.FLUSH | Gst.SeekFlags.KEY_UNIT,
        0
    )

def on_error_Camera(self, bus, msg):
    print('on_error():', msg.parse_error())

def on_message_Camera(self, bus, message):
    t = message.type
    #print(t)

def on_sync_message_Camera(self, bus, msg):
    if msg.get_structure().get_name() == 'prepare-window-handle':
        msg.src.set_window_handle(self.xid_Camera)
class Camera(Gtk.DrawingArea):
def __init__(self, type, filepath, ui_table, callback):
    self.ui_table = ui_table
    self.type = type
    self.callback = callback

    # create GStreamer pipeline
    pipeline = Gst.Pipeline()

    # create bus to get events from GStreamer pipeline
    bus_camera = pipeline.get_bus()
    bus_camera.add_signal_watch()
    bus_camera.connect('message::eos', self.on_eos_Camera)
    bus_camera.connect('message::error', self.on_error_Camera)


    source = Gst.ElementFactory.make("rtspsrc", "source")
    source.set_property("location", filepath)
    source.set_property("latency", 0)
    source.connect("pad-added", self.on_pad)
    pipeline.add(source)

    depay = Gst.ElementFactory.make("rtph264depay", "depay")
    pipeline.add(depay)
    source.link(depay)

    dec = Gst.ElementFactory.make("fluh264dec", "dec")
    pipeline.add(dec)
    depay.link(dec)

    sink = Gst.ElementFactory.make("xvimagesink", "sink")
    sink.set_property("sync", False)
    pipeline.add(sink)
    dec.link(sink)


    Gtk.DrawingArea.__init__(self)
    # main drawingarea
    self.add_events(Gdk.EventMask.BUTTON_PRESS_MASK)
    self.connect("button-press-event", self.drawingarea_methode)

    # get screen size
    screen = Gdk.Screen.get_default()
    screen_size_x = screen.get_width()
    screen_size_y = screen.get_height()

    self.set_size_request(screen_size_x, screen_size_y)

    # This is needed to make the video output in our DrawingArea:
    bus_camera.enable_sync_message_emission()
    bus_camera.connect('message', self.on_message_Camera)
    bus_camera.connect('sync-message::element', self.on_sync_message_Camera)

    self.source = source
    self.depay = depay
    self.pipeline = pipeline


def on_pad(self, rtspsrc, pad):
    depaySinkPad = self.depay.get_static_pad('sink')
    pad.link(depaySinkPad)

# clicked event onto drawingarea results in hiding the buttons
def drawingarea_methode(self, widget, event):
    #print "Mouse clicked... at ", event.x, ", ", event.y
    self.callback(self.type[-1])

def on_eos_Camera(self, bus, msg):
    self.player_camera.seek_simple(
        Gst.Format.TIME,
        Gst.SeekFlags.FLUSH | Gst.SeekFlags.KEY_UNIT,
        0
    )

def on_error_Camera(self, bus, msg):
    print('on_error():', msg.parse_error())

def on_message_Camera(self, bus, message):
    t = message.type
    #print(t)

def on_sync_message_Camera(self, bus, msg):
    if msg.get_structure().get_name() == 'prepare-window-handle':
        msg.src.set_window_handle(self.xid_Camera)