FFmpeg+javacpp中纯音频播放
FFmpeg+javacpp中纯音频播放
- 1. Java Sound播放
- 2、整合音频信息AudioInfo
- 3、添加ExecutorService执行播放
FFmpeg+javacpp+javacv使用
FFmpeg+javacpp中FFmpegFrameGrabber
FFmpeg+javacpp中仿ffplay播放
JavaCV 1.5.12 API
JavaCPP Presets for FFmpeg 7.1.1-1.5.12 API
1. Java Sound播放
如FFmpeg+javacpp中仿ffplay播放中
2.1 grabSamples()
播放案例
gitee: xhbruce/JavacvAddFFmpegDemo
public class AudioPlayer implements Runnable {private final String filePath;public AudioPlayer(String filePath) {this.filePath = filePath;//AvLog.close();}public void playAudio() throws Exception {FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(filePath);grabber.start();int audioBitrate = grabber.getAudioBitrate(); // 获取音频流的比特率(bitrate)信息int audioChannels = grabber.getAudioChannels(); // 获取当前音频流的通道数量(声道数)int audioCodec = grabber.getAudioCodec(); //String audioCodecName = grabber.getAudioCodecName(); // 获取当前音频流所使用的音频编码器(Codec)名称double audioFrameRate = grabber.getAudioFrameRate(); //Map<String, String> audioMetadata = grabber.getAudioMetadata(); //Map<String, Buffer> audioSideData = grabber.getAudioSideData(); //int lengthInAudioFrames = grabber.getLengthInAudioFrames(); // 获取音频流的总帧数(以音频帧为单位)boolean hasAudio = grabber.hasAudio(); // 是否有音频流int sampleFormat = grabber.getSampleFormat(); // 获取音频采样格式(Sample Format)int sampleRate = grabber.getSampleRate(); // 获取音频流的采样率(Sample Rate)XLog.i("audioBitrate: " + audioBitrate + ", audioChannels: " + audioChannels + ", audioCodec: " + audioCodec + ", audioCodecName: " + audioCodecName+ ", audioFrameRate: " + audioFrameRate + ", audioMetadata=" + audioMetadata + ", audioSideData=" + audioSideData + ", lengthInAudioFrames: " + lengthInAudioFrames+ ", hasAudio=" + hasAudio + ", sampleFormat: " + sampleFormat + ",sampleRate: " + sampleRate);AudioFormat audioFormat = AudioUtil.toAudioFormat(sampleFormat, sampleRate, audioChannels);XLog.d("audioFormat: " + audioFormat);DataLine.Info info = new DataLine.Info(SourceDataLine.class, audioFormat);SourceDataLine audioLine = (SourceDataLine) AudioSystem.getLine(info);audioLine.open(audioFormat);audioLine.start();Frame frame;long totalDurationMs = grabber.getLengthInTime(); // 获取总时长(毫秒)XLog.d("音频总时长: " + totalDurationMs + " ms -- " + AudioUtil.getDurationString(totalDurationMs));while ((frame = grabber.grabSamples()) != null) {if (frame.samples == null || frame.samples.length == 0) continue;Buffer buffer = frame.samples[0];byte[] audioBytes = AudioUtil.toByteArrayApplySourceDataLine(buffer);audioLine.write(audioBytes, 0, audioBytes.length);}audioLine.drain();audioLine.stop();audioLine.close();grabber.stop();}@Overridepublic void run() {try {playAudio();} catch (Exception e) {throw new RuntimeException(e);}}public static void main(String[] args) {String musicUrl = "F:\\Music\\Let Me Down Slowly.mp3";
// String musicUrl = "F:\\Music\\张碧晨 - 凉凉.flac";
// String musicUrl = "F:\\Music\\爱得比你深 - 张学友.ape";new Thread(new AudioPlayer(musicUrl)).start();}
}
2、整合音频信息AudioInfo
audioBitrate = grabber.getAudioBitrate();
// 获取音频流的比特率(bitrate)信息
audioChannels = grabber.getAudioChannels();
// 获取当前音频流的通道数量(声道数)
audioCodec = grabber.getAudioCodec();
//
audioCodecName = grabber.getAudioCodecName();
// 获取当前音频流所使用的音频编码器(Codec)名称
audioFrameRate = grabber.getAudioFrameRate();
//
metadata = new Metadata(grabber.getMetadata());
//
audioMetadata = grabber.getAudioMetadata();
//
audioSideData = grabber.getAudioSideData();
//
lengthInAudioFrames = grabber.getLengthInAudioFrames();
// 获取音频流的总帧数(以音频帧为单位)
sampleFormat = grabber.getSampleFormat();
// 获取音频采样格式(Sample Format)
sampleRate = grabber.getSampleRate();
// 获取音频流的采样率(Sample Rate)
ext = grabber.getFormat();
//grabber.getFormatContext().iformat().name().getString();
totalDurationMs = grabber.getLengthInTime();
// 获取总时长(毫秒)https://gitee.com/xhbruce/JavacvAddFFmpegDemo/blob/master/src/main/java/org/xhbruce/ffmpeg/info/AudioInfo.java
import org.bytedeco.javacv.FFmpegFrameGrabber;
import org.xhbruce.utils.AudioUtil;import java.nio.Buffer;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Map;/*** @author xhbruce*/
public class AudioInfo {private static final String TAG = "AudioInfo";public FFmpegFrameGrabber grabber;public Path filePath;public String ext; //文件格式public int audioBitrate;public int audioChannels;public int audioCodec;public String audioCodecName;public double audioFrameRate;Metadata metadata;// tag信息Map<String, String> audioMetadata;Map<String, Buffer> audioSideData;public int lengthInAudioFrames;public int sampleFormat;public int sampleRate;public long totalDurationMs;public long startTime;public AudioInfo(String url) {grabber = new FFmpegFrameGrabber(url);filePath = Paths.get(url);try {init();} catch (FFmpegFrameGrabber.Exception e) {throw new RuntimeException(e);}}private void init() throws FFmpegFrameGrabber.Exception {grabber.start();if (grabber.hasAudio()/*是否有音频流*/) {audioBitrate = grabber.getAudioBitrate(); // 获取音频流的比特率(bitrate)信息audioChannels = grabber.getAudioChannels(); // 获取当前音频流的通道数量(声道数)audioCodec = grabber.getAudioCodec(); //audioCodecName = grabber.getAudioCodecName(); // 获取当前音频流所使用的音频编码器(Codec)名称audioFrameRate = grabber.getAudioFrameRate(); //metadata = new Metadata(grabber.getMetadata()); //audioMetadata = grabber.getAudioMetadata(); //audioSideData = grabber.getAudioSideData(); //lengthInAudioFrames = grabber.getLengthInAudioFrames(); // 获取音频流的总帧数(以音频帧为单位)sampleFormat = grabber.getSampleFormat(); // 获取音频采样格式(Sample Format)sampleRate = grabber.getSampleRate(); // 获取音频流的采样率(Sample Rate)ext = grabber.getFormat(); //grabber.getFormatContext().iformat().name().getString();totalDurationMs = grabber.getLengthInTime(); // 获取总时长(毫秒)startTime = grabber.getFormatContext().start_time();}grabber.stop();}public String getMetadata(String key) {return metadata.getValue(key);}@Overridepublic String toString() {StringBuilder stringBuffer = new StringBuilder("AudioInfo [\nInput #0, " + ext + "(" + audioCodecName + "), from '" + filePath.toAbsolutePath() + "': title: " + getMetadata(Metadata.TITLE) + ", artist: " + getMetadata(Metadata.ARTIST) + "\n");stringBuffer.append(metadata.toString());stringBuffer.append("Duration: ").append(AudioUtil.getDurationString(totalDurationMs)).append(", start: ").append(AudioUtil.getStartTimeString(startTime)).append(", sampleRate: ").append(sampleRate).append(" HZ").append(", bitrate: ").append(audioBitrate / 1000).append(" kb/s\n");if (!audioMetadata.isEmpty()) stringBuffer.append("AudioMetadata: ").append(audioMetadata.size()).append("\n");for (Map.Entry<String, String> entry : audioMetadata.entrySet()) {stringBuffer.append(String.format("%-14s%s", "\t" + entry.getKey(), ":\t" + entry.getValue())).append("\n");}if (!audioSideData.isEmpty()) stringBuffer.append("AudioSideData: ").append(audioSideData.size()).append("\n");for (Map.Entry<String, Buffer> entry : audioSideData.entrySet()) {stringBuffer.append(String.format("%-14s%s", "\t" + entry.getKey(), ":\t" + entry.getValue())).append("\n");}stringBuffer.append("]");return stringBuffer.toString();}
}
import org.bytedeco.ffmpeg.avformat.AVFormatContext;
import org.bytedeco.ffmpeg.avutil.AVDictionary;
import org.bytedeco.ffmpeg.avutil.AVDictionaryEntry;import java.util.HashMap;
import java.util.Map;import static org.bytedeco.ffmpeg.global.avutil.*;/*** @author xhbruce** ffmpeg-6.0\libavformat\avformat.h* album -- name of the set this work belongs to* album_artist -- main creator of the set/album, if different from artist.* e.g. "Various Artists" for compilation albums.* artist -- main creator of the work* comment -- any additional description of the file.* composer -- who composed the work, if different from artist.* copyright -- name of copyright holder.* creation_time-- date when the file was created, preferably in ISO 8601.* date -- date when the work was created, preferably in ISO 8601.* disc -- number of a subset, e.g. disc in a multi-disc collection.* encoder -- name/settings of the software/hardware that produced the file.* encoded_by -- person/group who created the file.* filename -- original name of the file.* genre -- <self-evident>.* language -- main language in which the work is performed, preferably* in ISO 639-2 format. Multiple languages can be specified by* separating them with commas.* performer -- artist who performed the work, if different from artist.* E.g for "Also sprach Zarathustra", artist would be "Richard* Strauss" and performer "London Philharmonic Orchestra".* publisher -- name of the label/publisher.* service_name -- name of the service in broadcasting (channel name).* service_provider -- name of the service provider in broadcasting.* title -- name of the work.* track -- number of this work in the set, can be in form current/total.* variant_bitrate -- the total bitrate of the bitrate variant that the current stream is part of*/
public class Metadata {private AVDictionary avDictionary;private Map<String, String> metadata = new HashMap<String, String>();public static final String ALBUM = "album";public static final String ALBUM_ARTIST = "album_artist";public static final String ARTIST = "artist";public static final String COMMENT = "comment";public static final String COMPOSER = "composer";public static final String COMPYRIGHT = "copyright";public static final String CREATION_TIME = "creation_time";public static final String DATE = "date";public static final String DISC = "disc";public static final String ENCODER = "encoder";public static final String ENCODER_BY = "encoded_by";public static final String FILENAME = "filename";public static final String GENRE = "genre";public static final String LANGUAGE = "language";public static final String PERFORMER = "performer";public static final String PUBLISHER = "publisher";public static final String SERVICE_NAME = "service_name";public static final String SERVICE_PROVIDER = "service_provider";public static final String TITLE = "title";public static final String TRACK = "track";public static final String VARIANT_BITRATE = "variant_bitrate";private int dictCount;public Metadata(AVFormatContext formatContext) {this(formatContext.metadata());}public Metadata(AVDictionary dictionary) {avDictionary = dictionary;dictCount = av_dict_count(avDictionary);initMetadate();}private void initMetadate() {metadata = new HashMap<String, String>();AVDictionaryEntry tag = null;while ((tag = av_dict_iterate(avDictionary, tag)) != null) {String key = tag.key().getString();String value = tag.value().getString();metadata.put(key, value);}}public Metadata(Map<String, String> metadata) {this.metadata = metadata;dictCount = metadata.size();}/*** or av_dict_get(avDictionary, key, null, 0);*/public String getValue(String key) {return metadata.get(key);}public int size() {return dictCount;}@Overridepublic String toString() {StringBuilder stringBuffer = new StringBuilder();stringBuffer.append("Metadata: ").append(dictCount).append("\n");for (Map.Entry<String, String> entry : metadata.entrySet()) {stringBuffer.append(String.format("%-14s%s", "\t" + entry.getKey(), ":\t" + entry.getValue())).append("\n");}return stringBuffer.toString();}
}
3、添加ExecutorService执行播放
implementation 'com.github.goxr3plus:java-stream-player:+'
实质也是Java Sound
播放,作为参考。
ExecutorService audioPlayerExecutorServicec
护理音频播放ExecutorService eventsExecutorService
处理音频播放状态
public class AudioPlayer implements AudioPlayerInterface, Callable<Void> {private AudioInfo audioInfo;private volatile AudioInputStream audioInputStream;private AudioFormat audioFormat;private DataLine.Info info;private SourceDataLine audioLine;private final Object audioLock = new Object();/*** audio 播放服务*/private final ExecutorService audioPlayerExecutorService;/*** audio 播放状态更新服务*/private final ExecutorService eventsExecutorService;/*** 通知监听事件*/private final ArrayList<AudioPlayerListener> listeners;public AudioPlayer() {audioPlayerExecutorService = Executors.newSingleThreadExecutor();eventsExecutorService = Executors.newSingleThreadExecutor();listeners = new ArrayList<>();}@Overridepublic void addStreamPlayerListener(AudioPlayerListener audioPlayerListener) {listeners.add(audioPlayerListener);}@Overridepublic void removeStreamPlayerListener(AudioPlayerListener audioPlayerListener) {if (listeners != null) {listeners.remove(audioPlayerListener);}}public void open(String filePath) {open(new File(filePath));}@Overridepublic void open(File file) {audioInfo = new AudioInfo(file);initAudioInputStream();try {audioInfo.grabber.start();} catch (FFmpegFrameGrabber.Exception e) {throw new RuntimeException(e);}}private void initAudioInputStream() {audioFormat = AudioUtil.toAudioFormat(audioInfo.sampleFormat, audioInfo.sampleRate, audioInfo.audioChannels);createAudioLine();}private void createAudioLine() {try {info = new DataLine.Info(SourceDataLine.class, audioFormat);audioLine = (SourceDataLine) AudioSystem.getLine(info);XLog.i("Line : " + audioLine);XLog.i("Line Info : " + info);XLog.i("Line AudioFormat: " + audioFormat);audioLine.open(audioFormat);} catch (LineUnavailableException e) {throw new RuntimeException(e);}}@Overridepublic void play() {audioLine.start();audioPlayerExecutorService.submit(this);}@Overridepublic boolean pause() {return false;}@Overridepublic boolean resume() {return false;}@Overridepublic void stop() {}@Overridepublic Void call() throws FFmpegFrameGrabber.Exception {XLog.d("响应 audioPlayerExecutorService");synchronized (audioLock) {Frame frame;while ((frame = audioInfo.grabber.grabSamples()) != null) {if (frame.samples == null || frame.samples.length == 0) continue;Buffer buffer = frame.samples[0];byte[] audioBytes = AudioUtil.toByteArrayApplySourceDataLine(buffer);audioLine.write(audioBytes, 0, audioBytes.length);}}audioLine.drain();audioLine.stop();audioLine.close();audioInfo.grabber.stop();return null;}
}