当前位置: 移动技术网 > 移动技术>移动开发>Android > Android中音视频合成的几种方案详析

Android中音视频合成的几种方案详析

2019年07月24日  | 移动技术网移动技术  | 我要评论

前言

最近工作中遇到了音视频处理的需求,android下音视频合成,在当前调研方案中主要有三大类方法:mediamux硬解码,mp4parser,ffmepg。三种方法均可实现,但是也有不同的局限和问题,先将实现和问题记录于此,便于之后的总结学习。下面话不多说了,来一起看看详细的介绍吧。

方法一(fail)

利用mediamux实现音视频的合成。

效果:可以实现音视频的合并,利用android原生的videoview和surfaceview播放正常,大部分的播放器也播放正常,但是,但是,在上传youtube就会出现问题:音频不连续,分析主要是上传youtube时会被再次的压缩,可能在压缩的过程中出现音频的帧率出现问题。

分析:在mediacodec.bufferinfo的处理中,时间戳presentationtimeus出现问题,导致youtube的压缩造成音频的紊乱。

public static void muxvideoandaudio(string videopath, string audiopath, string muxpath) {
 try {
  mediaextractor videoextractor = new mediaextractor();
  videoextractor.setdatasource(videopath);
  mediaformat videoformat = null;
  int videotrackindex = -1;
  int videotrackcount = videoextractor.gettrackcount();
  for (int i = 0; i < videotrackcount; i++) {
  videoformat = videoextractor.gettrackformat(i);
  string mimetype = videoformat.getstring(mediaformat.key_mime);
  if (mimetype.startswith("video/")) {
   videotrackindex = i;
   break;
  }
  }
  mediaextractor audioextractor = new mediaextractor();
  audioextractor.setdatasource(audiopath);
  mediaformat audioformat = null;
  int audiotrackindex = -1;
  int audiotrackcount = audioextractor.gettrackcount();
  for (int i = 0; i < audiotrackcount; i++) {
  audioformat = audioextractor.gettrackformat(i);
  string mimetype = audioformat.getstring(mediaformat.key_mime);
  if (mimetype.startswith("audio/")) {
   audiotrackindex = i;
   break;
  }
  }
  videoextractor.selecttrack(videotrackindex);
  audioextractor.selecttrack(audiotrackindex);
  mediacodec.bufferinfo videobufferinfo = new mediacodec.bufferinfo();
  mediacodec.bufferinfo audiobufferinfo = new mediacodec.bufferinfo();
  mediamuxer mediamuxer = new mediamuxer(muxpath, mediamuxer.outputformat.muxer_output_mpeg_4);
  int writevideotrackindex = mediamuxer.addtrack(videoformat);
  int writeaudiotrackindex = mediamuxer.addtrack(audioformat);
  mediamuxer.start();
  bytebuffer bytebuffer = bytebuffer.allocate(500 * 1024);
  long sampletime = 0;
  {
  videoextractor.readsampledata(bytebuffer, 0);
  if (videoextractor.getsampleflags() == mediaextractor.sample_flag_sync) {
   videoextractor.advance();
  }
  videoextractor.readsampledata(bytebuffer, 0);
  long secondtime = videoextractor.getsampletime();
  videoextractor.advance();
  long thirdtime = videoextractor.getsampletime();
  sampletime = math.abs(thirdtime - secondtime);
  }
  videoextractor.unselecttrack(videotrackindex);
  videoextractor.selecttrack(videotrackindex);
  while (true) {
  int readvideosamplesize = videoextractor.readsampledata(bytebuffer, 0);
  if (readvideosamplesize < 0) {
   break;
  }
  videobufferinfo.size = readvideosamplesize;
  videobufferinfo.presentationtimeus += sampletime;
  videobufferinfo.offset = 0;
  //noinspection wrongconstant
  videobufferinfo.flags = mediacodec.buffer_flag_sync_frame;//videoextractor.getsampleflags()
  mediamuxer.writesampledata(writevideotrackindex, bytebuffer, videobufferinfo);
  videoextractor.advance();
  }
  while (true) {
  int readaudiosamplesize = audioextractor.readsampledata(bytebuffer, 0);
  if (readaudiosamplesize < 0) {
   break;
  }
  audiobufferinfo.size = readaudiosamplesize;
  audiobufferinfo.presentationtimeus += sampletime;
  audiobufferinfo.offset = 0;
  //noinspection wrongconstant
  audiobufferinfo.flags = mediacodec.buffer_flag_sync_frame;// videoextractor.getsampleflags()
  mediamuxer.writesampledata(writeaudiotrackindex, bytebuffer, audiobufferinfo);
  audioextractor.advance();
  }
  mediamuxer.stop();
  mediamuxer.release();
  videoextractor.release();
  audioextractor.release();
 } catch (ioexception e) {
  e.printstacktrace();
 }
 }

方法二(success)

public static void muxvideoaudio(string videofilepath, string audiofilepath, string outputfile) {
 try {
  mediaextractor videoextractor = new mediaextractor();
  videoextractor.setdatasource(videofilepath);
  mediaextractor audioextractor = new mediaextractor();
  audioextractor.setdatasource(audiofilepath);
  mediamuxer muxer = new mediamuxer(outputfile, mediamuxer.outputformat.muxer_output_mpeg_4);
  videoextractor.selecttrack(0);
  mediaformat videoformat = videoextractor.gettrackformat(0);
  int videotrack = muxer.addtrack(videoformat);
  audioextractor.selecttrack(0);
  mediaformat audioformat = audioextractor.gettrackformat(0);
  int audiotrack = muxer.addtrack(audioformat);
  logutil.d(tag, "video format " + videoformat.tostring());
  logutil.d(tag, "audio format " + audioformat.tostring());
  boolean saweos = false;
  int framecount = 0;
  int offset = 100;
  int samplesize = 256 * 1024;
  bytebuffer videobuf = bytebuffer.allocate(samplesize);
  bytebuffer audiobuf = bytebuffer.allocate(samplesize);
  mediacodec.bufferinfo videobufferinfo = new mediacodec.bufferinfo();
  mediacodec.bufferinfo audiobufferinfo = new mediacodec.bufferinfo();
  videoextractor.seekto(0, mediaextractor.seek_to_closest_sync);
  audioextractor.seekto(0, mediaextractor.seek_to_closest_sync);
  muxer.start();
  while (!saweos) {
  videobufferinfo.offset = offset;
  videobufferinfo.size = videoextractor.readsampledata(videobuf, offset);
  if (videobufferinfo.size < 0 || audiobufferinfo.size < 0) {
   saweos = true;
   videobufferinfo.size = 0;
  } else {
   videobufferinfo.presentationtimeus = videoextractor.getsampletime();
   //noinspection wrongconstant
   videobufferinfo.flags = videoextractor.getsampleflags();
   muxer.writesampledata(videotrack, videobuf, videobufferinfo);
   videoextractor.advance();
   framecount++;
  }
  }
  
  boolean saweos2 = false;
  int framecount2 = 0;
  while (!saweos2) {
  framecount2++;
  audiobufferinfo.offset = offset;
  audiobufferinfo.size = audioextractor.readsampledata(audiobuf, offset);
  if (videobufferinfo.size < 0 || audiobufferinfo.size < 0) {
   saweos2 = true;
   audiobufferinfo.size = 0;
  } else {
   audiobufferinfo.presentationtimeus = audioextractor.getsampletime();
   //noinspection wrongconstant
   audiobufferinfo.flags = audioextractor.getsampleflags();
   muxer.writesampledata(audiotrack, audiobuf, audiobufferinfo);
   audioextractor.advance();
  }
  }
  muxer.stop();
  muxer.release();
  logutil.d(tag,"output: "+outputfile);
 } catch (ioexception e) {
  logutil.d(tag, "mixer error 1 " + e.getmessage());
 } catch (exception e) {
  logutil.d(tag, "mixer error 2 " + e.getmessage());
 }
 }

方法三

利用mp4parser实现

mp4parser是一个视频处理的开源工具箱,由于mp4parser里的方法都依靠工具箱里的一些内容,所以需要将这些内容打包成jar包,放到自己的工程里,才能对mp4parser的方法进行调用。

compile “com.googlecode.mp4parser:isoparser:1.1.21”

问题:上传youtube压缩后,视频数据丢失严重,大部分就只剩下一秒钟的时长,相当于把视频变成图片了,囧

 public boolean mux(string videofile, string audiofile, final string outputfile) {
 if (isstopmux) {
  return false;
 }
 movie video;
 try {
  video = moviecreator.build(videofile);
 } catch (runtimeexception e) {
  e.printstacktrace();
  return false;
 } catch (ioexception e) {
  e.printstacktrace();
  return false;
 }
 movie audio;
 try {
  audio = moviecreator.build(audiofile);
 } catch (ioexception e) {
  e.printstacktrace();
  return false;
 } catch (nullpointerexception e) {
  e.printstacktrace();
  return false;
 }
 track audiotrack = audio.gettracks().get(0);
 video.addtrack(audiotrack);
 container out = new defaultmp4builder().build(video);
 fileoutputstream fos;
 try {
  fos = new fileoutputstream(outputfile);
 } catch (filenotfoundexception e) {
  e.printstacktrace();
  return false;
 }
 bufferedwritablefilebytechannel bytebufferbytechannel = new
  bufferedwritablefilebytechannel(fos);
 try {
  out.writecontainer(bytebufferbytechannel);
  bytebufferbytechannel.close();
  fos.close();
  if (isstopmux) {
  return false;
  }
  runonuithread(new runnable() {
  @override
  public void run() {
   mcustomeprogressdialog.setprogress(100);
   goshareactivity(outputfile);
//   fileutils.insertmediadb(addaudiosactivity.this,outputfile);//
  }
  });
 } catch (ioexception e) {
  e.printstacktrace();
  if (mcustomeprogressdialog.isshowing()) {
  mcustomeprogressdialog.dismiss();
  }
  toastutil.showshort(getstring(r.string.process_failed));
  return false;
 }
 return true;
 }
 private static class bufferedwritablefilebytechannel implements writablebytechannel {
 private static final int buffer_capacity = 2000000;
 private boolean isopen = true;
 private final outputstream outputstream;
 private final bytebuffer bytebuffer;
 private final byte[] rawbuffer = new byte[buffer_capacity];
 private bufferedwritablefilebytechannel(outputstream outputstream) {
  this.outputstream = outputstream;
  this.bytebuffer = bytebuffer.wrap(rawbuffer);
 }
 @override
 public int write(bytebuffer inputbuffer) throws ioexception {
  int inputbytes = inputbuffer.remaining();
  if (inputbytes > bytebuffer.remaining()) {
  dumptofile();
  bytebuffer.clear();
  if (inputbytes > bytebuffer.remaining()) {
   throw new bufferoverflowexception();
  }
  }
  bytebuffer.put(inputbuffer);
  return inputbytes;
 }
 @override
 public boolean isopen() {
  return isopen;
 }
 @override
 public void close() throws ioexception {
  dumptofile();
  isopen = false;
 }
 private void dumptofile() {
  try {
  outputstream.write(rawbuffer, 0, bytebuffer.position());
  } catch (ioexception e) {
  throw new runtimeexception(e);
  }
 }
 }

方法四

利用ffmpeg大法

ffmpeg 由于其丰富的 codec 插件,详细的文档说明,并且与其调试复杂量大的编解码代码(是的,用 mediacodec 实现起来十分啰嗦和繁琐)还是不如调试一行 ffmpeg 命令来的简单。

merge video /audio and retain both audios

可以实现,兼容性强,但由于是软解码,合并速度很慢,忍受不了,而相应的ffmpeg优化还不太了解,囧…….

总结

以上就是这篇文章的全部内容了,希望本文的内容对大家的学习或者工作具有一定的参考学习价值,如果有疑问大家可以留言交流,谢谢大家对移动技术网的支持。

如对本文有疑问, 点击进行留言回复!!

相关文章:

验证码:
移动技术网