C++ 如何修复Gstreamer以捕获麦克风音频并将其缓冲或转储为原始文件,当我说话时,它不会保存任何内容

C++ 如何修复Gstreamer以捕获麦克风音频并将其缓冲或转储为原始文件,当我说话时,它不会保存任何内容,c++,c,gstreamer,C++,C,Gstreamer,我正在尝试捕获麦克风音频并将其保存为文件。但它不工作,我只能在分配时播放文件。如何启用麦克风并对其进行缓冲,或将其保存或转储为raw.odd/vorbis #include <gst/gst.h> #include <glib.h> static gboolean bus_call (GstBus *bus, GstMessage *msg, gpointer data) { GMainLoop *loop =

我正在尝试捕获麦克风音频并将其保存为文件。但它不工作,我只能在分配时播放文件。如何启用麦克风并对其进行缓冲,或将其保存或转储为raw.odd/vorbis

#include <gst/gst.h>
#include <glib.h>


static gboolean
bus_call (GstBus     *bus,
          GstMessage *msg,
          gpointer    data)
{
  GMainLoop *loop = (GMainLoop *) data;

  switch (GST_MESSAGE_TYPE (msg)) 
  {

    case GST_MESSAGE_EOS:
      g_print ("End of stream\n");
      g_main_loop_quit (loop);
      break;

    case GST_MESSAGE_ERROR: {
      gchar  *debug;
      GError *error;

      gst_message_parse_error (msg, &error, &debug);
      g_free (debug);

      g_printerr ("Error: %s\n", error->message);
      g_error_free (error);

      g_main_loop_quit (loop);
      break;
    }
    default:
      break;
  }

  return TRUE;
}


static void
on_pad_added (GstElement *element,
              GstPad     *pad,
              gpointer    data)
{
  GstPad *sinkpad;
  GstElement *decoder = (GstElement *) data;

  /* We can now link this pad with the vorbis-decoder sink pad */
  g_print ("Dynamic pad created, linking demuxer/decoder\n");

  sinkpad = gst_element_get_static_pad (decoder, "sink");

  gst_pad_link (pad, sinkpad);

  gst_object_unref (sinkpad);
}



int
main (int   argc,
      char *argv[])
{
  GMainLoop *loop;

  GstElement *pipeline, *source, *demuxer, *decoder, *conv, *sink;
  GstBus *bus;

  /* Initialisation */
  gst_init (&argc, &argv);

  loop = g_main_loop_new (NULL, FALSE);


  /* Check input arguments */
  if (argc != 2) {
    g_printerr ("Usage: %s <Ogg/Vorbis filename>\n", argv[0]);
    return -1;
  }


  /* Create gstreamer elements */
  pipeline = gst_pipeline_new ("audio-player");
  source   = gst_element_factory_make ("filesrc",       "file-source");
  demuxer  = gst_element_factory_make ("oggdemux",      "ogg-demuxer");
  decoder  = gst_element_factory_make ("vorbisdec",     "vorbis-decoder");
  conv     = gst_element_factory_make ("audioconvert",  "converter");
  sink     = gst_element_factory_make ("autoaudiosink", "audio-output");

  if (!pipeline || !source || !demuxer || !decoder || !conv || !sink) {
    g_printerr ("One element could not be created. Exiting.\n");
    return -1;
  }

  /* Set up the pipeline */

  /* we set the input filename to the source element */
  g_object_set (G_OBJECT (source), "location", argv[1], NULL);

  /* we add a message handler */
  bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
  gst_bus_add_watch (bus, bus_call, loop);
  gst_object_unref (bus);

  /* we add all elements into the pipeline */
  /* file-source | ogg-demuxer | vorbis-decoder | converter | alsa-output */
  gst_bin_add_many (GST_BIN (pipeline),
                    source, demuxer, decoder, conv, sink, NULL);

  /* we link the elements together */
  /* file-source -> ogg-demuxer ~> vorbis-decoder -> converter -> alsa-output */
  gst_element_link (source, demuxer);
  gst_element_link_many (decoder, conv, sink, NULL);
  g_signal_connect (demuxer, "pad-added", G_CALLBACK (on_pad_added), decoder);

  /* note that the demuxer will be linked to the decoder dynamically.
     The reason is that Ogg may contain various streams (for example
     audio and video). The source pad(s) will be created at run time,
     by the demuxer when it detects the amount and nature of streams.
     Therefore we connect a callback function which will be executed
     when the "pad-added" is emitted.*/


  /* Set the pipeline to "playing" state*/
  g_print ("Now playing: %s\n", argv[1]);
  gst_element_set_state (pipeline, GST_STATE_PLAYING);


  /* Iterate */
  g_print ("Running...\n");
  g_main_loop_run (loop);


  /* Out of the main loop, clean up nicely */
  g_print ("Returned, stopping playback\n");
  gst_element_set_state (pipeline, GST_STATE_NULL);

  g_print ("Deleting pipeline\n");
  gst_object_unref (GST_OBJECT (pipeline));

  return 0;
}
#包括
#包括
静态gboolean
总线呼叫(GstBus*总线,
GstMessage*msg,
gpointer数据)
{
GMainLoop*循环=(GMainLoop*)数据;
开关(GST\信息\类型(msg))
{
案例GST\信息\ EOS:
g_打印(“流结束\n”);
g_主循环退出(循环);
打破
案例GST\u消息\u错误:{
gchar*调试;
GError*错误;
gst\消息\解析\错误(消息、错误和调试);
g_自由(调试);
g_printerr(“错误:%s\n”,错误->消息);
g_无错误(错误);
g_主循环退出(循环);
打破
}
违约:
打破
}
返回TRUE;
}
静态空隙
在键盘上添加(GstElement*元素,
GstPad*焊盘,
gpointer数据)
{
GstPad*下沉板;
GstElement*解码器=(GstElement*)数据;
/*我们现在可以把这个垫子和vorbis解码器接收器垫子连接起来*/
g_print(“创建动态焊盘,链接解复用器/解码器”);
sinkpad=gst_元素_get_静态_pad(译码器,“接收器”);
gst_垫_链接(垫、下沉垫);
gst_object_unref(下沉板);
}
int
主(内部argc,
字符*argv[]
{
GMainLoop*循环;
GstElement*管道、*源、*解复用器、*解码器、*conv、*接收器;
GstBus*总线;
/*初始化*/
gst_init(&argc,&argv);
loop=g_main_loop_new(NULL,FALSE);
/*检查输入参数*/
如果(argc!=2){
g_printerr(“用法:%s\n”,argv[0]);
返回-1;
}
/*创建gstreamer元素*/
管道=gst_管道_新(“音频播放器”);
来源=gst元素工厂制造(“文件来源”);
解复用器=商品及服务税(gst)、元件、工厂制造(“oggdemux”、“ogg解复用器”);
解码器=商品及服务税(gst)、元件、工厂、制造商(“vorbisdec”、“vorbis解码器”);
conv=gst元件工厂制造(“音频转换”、“转换器”);
接收器=gst元件工厂制造(“自动音频接收器”、“音频输出”);
if(!pipeline | | |!source | | |!demuxer | |!decoder | |!conv | |!sink){
g_printerr(“无法创建一个元素。正在退出。\n”);
返回-1;
}
/*铺设管道*/
/*我们将输入文件名设置为源元素*/
g_对象集(g_对象(源),“位置”,argv[1],空);
/*我们添加了一个消息处理程序*/
总线=gst_管道_获取_总线(gst_管道(管道));
gst_总线_添加_手表(总线、总线呼叫、环路);
商品及服务税(巴士);
/*我们将所有元素添加到管道中*/
/*文件源| ogg解复用器| vorbis解码器|转换器| alsa输出*/
gst_bin_add_many(gst_bin(管道),
源、解复用器、解码器、conv、接收器、空);
/*我们将这些元素连接在一起*/
/*文件源->ogg解复用器->vorbis解码器->转换器->alsa输出*/
gst_元素_链接(来源、解复用器);
gst\u元素\u链接\u多(解码器、conv、接收器、空);
g_信号_连接(解复用器,“添加焊盘”,g_回调(添加焊盘上),解码器);
/*请注意,解复用器将动态链接到解码器。
原因是Ogg可能包含各种流(例如
音频和视频)。将在运行时创建源焊盘,
当解复用器检测到流的数量和性质时。
因此,我们连接一个将要执行的回调函数
当“添加的焊盘”发出时*/
/*将管道设置为“正在播放”状态*/
g_print(“正在播放:%s\n”,argv[1]);
gst元素设置状态(管道、gst状态);
/*迭代*/
g_打印(“运行…\n”);
g_主循环运行(循环);
/*离开主回路,好好清理*/
g_print(“返回,停止播放”);
gst\元素\集合\状态(管道,gst\状态\空);
g_print(“删除管道”);
gst_对象_unref(gst_对象(管道));
返回0;
}

问题到底是什么

在使用pulseaudio的linux上,它非常简单

$ gst-launch pulsesrc ! filesink location=dump.raw
$ gst-launch pulsesrc ! audioconvert ! vorbisenc ! oggmux ! filesink location=dump.ogg

您还可以使用以下管道:

gst-launch osssrc device=<mic i/p dev> ! audioconvert ! vorbisenc ! oggmux ! filesink location=dump.ogg
gst启动OSSRC设备=!音频转换!沃比森克!oggmux!filesink location=dump.ogg

哈哈哈。天才,它起作用了。你刚刚回答了问题。但不幸的是,我必须编写一个跨平台的应用程序。如果它不是linux,那么我该怎么做呢?@Stackfan,你可以使用
autoaudiosrc
而不是
pulsesrc
(我希望如此)