当前位置: 首页 > news >正文

FFmpeg+javacpp中纯音频播放

FFmpeg+javacpp中纯音频播放

  • 1. Java Sound播放
  • 2、整合音频信息AudioInfo
  • 3、添加ExecutorService执行播放

FFmpeg+javacpp+javacv使用
FFmpeg+javacpp中FFmpegFrameGrabber
FFmpeg+javacpp中仿ffplay播放

JavaCV 1.5.12 API
JavaCPP Presets for FFmpeg 7.1.1-1.5.12 API

1. Java Sound播放

如FFmpeg+javacpp中仿ffplay播放中 2.1 grabSamples() 播放案例
gitee: xhbruce/JavacvAddFFmpegDemo

public class AudioPlayer implements Runnable {private final String filePath;public AudioPlayer(String filePath) {this.filePath = filePath;//AvLog.close();}public void playAudio() throws Exception {FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(filePath);grabber.start();int audioBitrate = grabber.getAudioBitrate(); // 获取音频流的比特率(bitrate)信息int audioChannels = grabber.getAudioChannels(); // 获取当前音频流的通道数量(声道数)int audioCodec = grabber.getAudioCodec(); //String audioCodecName = grabber.getAudioCodecName(); // 获取当前音频流所使用的音频编码器(Codec)名称double audioFrameRate = grabber.getAudioFrameRate(); //Map<String, String> audioMetadata = grabber.getAudioMetadata(); //Map<String, Buffer> audioSideData = grabber.getAudioSideData(); //int lengthInAudioFrames = grabber.getLengthInAudioFrames(); // 获取音频流的总帧数(以音频帧为单位)boolean hasAudio = grabber.hasAudio(); // 是否有音频流int sampleFormat = grabber.getSampleFormat(); // 获取音频采样格式(Sample Format)int sampleRate = grabber.getSampleRate(); // 获取音频流的采样率(Sample Rate)XLog.i("audioBitrate: " + audioBitrate + ", audioChannels: " + audioChannels + ", audioCodec: " + audioCodec + ", audioCodecName: " + audioCodecName+ ", audioFrameRate: " + audioFrameRate + ", audioMetadata=" + audioMetadata + ", audioSideData=" + audioSideData + ", lengthInAudioFrames: " + lengthInAudioFrames+ ", hasAudio=" + hasAudio + ", sampleFormat: " + sampleFormat + ",sampleRate: " + sampleRate);AudioFormat audioFormat = AudioUtil.toAudioFormat(sampleFormat, sampleRate, audioChannels);XLog.d("audioFormat: " + audioFormat);DataLine.Info info = new DataLine.Info(SourceDataLine.class, audioFormat);SourceDataLine audioLine = (SourceDataLine) AudioSystem.getLine(info);audioLine.open(audioFormat);audioLine.start();Frame frame;long totalDurationMs = grabber.getLengthInTime(); // 获取总时长(毫秒)XLog.d("音频总时长: " + totalDurationMs + " ms -- " + AudioUtil.getDurationString(totalDurationMs));while ((frame = grabber.grabSamples()) != null) {if (frame.samples == null || frame.samples.length == 0) continue;Buffer buffer = frame.samples[0];byte[] audioBytes = AudioUtil.toByteArrayApplySourceDataLine(buffer);audioLine.write(audioBytes, 0, audioBytes.length);}audioLine.drain();audioLine.stop();audioLine.close();grabber.stop();}@Overridepublic void run() {try {playAudio();} catch (Exception e) {throw new RuntimeException(e);}}public static void main(String[] args) {String musicUrl = "F:\\Music\\Let Me Down Slowly.mp3";
//        String musicUrl = "F:\\Music\\张碧晨 - 凉凉.flac";
//        String musicUrl = "F:\\Music\\爱得比你深 - 张学友.ape";new Thread(new AudioPlayer(musicUrl)).start();}
}

2、整合音频信息AudioInfo

audioBitrate = grabber.getAudioBitrate(); // 获取音频流的比特率(bitrate)信息
audioChannels = grabber.getAudioChannels(); // 获取当前音频流的通道数量(声道数)
audioCodec = grabber.getAudioCodec(); //
audioCodecName = grabber.getAudioCodecName(); // 获取当前音频流所使用的音频编码器(Codec)名称
audioFrameRate = grabber.getAudioFrameRate(); //
metadata = new Metadata(grabber.getMetadata()); //
audioMetadata = grabber.getAudioMetadata(); //
audioSideData = grabber.getAudioSideData(); //
lengthInAudioFrames = grabber.getLengthInAudioFrames(); // 获取音频流的总帧数(以音频帧为单位)
sampleFormat = grabber.getSampleFormat(); // 获取音频采样格式(Sample Format)
sampleRate = grabber.getSampleRate(); // 获取音频流的采样率(Sample Rate)
ext = grabber.getFormat(); //grabber.getFormatContext().iformat().name().getString();
totalDurationMs = grabber.getLengthInTime(); // 获取总时长(毫秒)

https://gitee.com/xhbruce/JavacvAddFFmpegDemo/blob/master/src/main/java/org/xhbruce/ffmpeg/info/AudioInfo.java

import org.bytedeco.javacv.FFmpegFrameGrabber;
import org.xhbruce.utils.AudioUtil;import java.nio.Buffer;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Map;/*** @author xhbruce*/
public class AudioInfo {private static final String TAG = "AudioInfo";public FFmpegFrameGrabber grabber;public Path filePath;public String ext; //文件格式public int audioBitrate;public int audioChannels;public int audioCodec;public String audioCodecName;public double audioFrameRate;Metadata metadata;// tag信息Map<String, String> audioMetadata;Map<String, Buffer> audioSideData;public int lengthInAudioFrames;public int sampleFormat;public int sampleRate;public long totalDurationMs;public long startTime;public AudioInfo(String url) {grabber = new FFmpegFrameGrabber(url);filePath = Paths.get(url);try {init();} catch (FFmpegFrameGrabber.Exception e) {throw new RuntimeException(e);}}private void init() throws FFmpegFrameGrabber.Exception {grabber.start();if (grabber.hasAudio()/*是否有音频流*/) {audioBitrate = grabber.getAudioBitrate(); // 获取音频流的比特率(bitrate)信息audioChannels = grabber.getAudioChannels(); // 获取当前音频流的通道数量(声道数)audioCodec = grabber.getAudioCodec(); //audioCodecName = grabber.getAudioCodecName(); // 获取当前音频流所使用的音频编码器(Codec)名称audioFrameRate = grabber.getAudioFrameRate(); //metadata = new Metadata(grabber.getMetadata()); //audioMetadata = grabber.getAudioMetadata(); //audioSideData = grabber.getAudioSideData(); //lengthInAudioFrames = grabber.getLengthInAudioFrames(); // 获取音频流的总帧数(以音频帧为单位)sampleFormat = grabber.getSampleFormat(); // 获取音频采样格式(Sample Format)sampleRate = grabber.getSampleRate(); // 获取音频流的采样率(Sample Rate)ext = grabber.getFormat(); //grabber.getFormatContext().iformat().name().getString();totalDurationMs = grabber.getLengthInTime(); // 获取总时长(毫秒)startTime = grabber.getFormatContext().start_time();}grabber.stop();}public String getMetadata(String key) {return metadata.getValue(key);}@Overridepublic String toString() {StringBuilder stringBuffer = new StringBuilder("AudioInfo [\nInput #0, " + ext + "(" + audioCodecName + "), from '" + filePath.toAbsolutePath() + "': title: " + getMetadata(Metadata.TITLE) + ", artist: " + getMetadata(Metadata.ARTIST) + "\n");stringBuffer.append(metadata.toString());stringBuffer.append("Duration: ").append(AudioUtil.getDurationString(totalDurationMs)).append(", start: ").append(AudioUtil.getStartTimeString(startTime)).append(", sampleRate: ").append(sampleRate).append(" HZ").append(", bitrate: ").append(audioBitrate / 1000).append(" kb/s\n");if (!audioMetadata.isEmpty()) stringBuffer.append("AudioMetadata: ").append(audioMetadata.size()).append("\n");for (Map.Entry<String, String> entry : audioMetadata.entrySet()) {stringBuffer.append(String.format("%-14s%s", "\t" + entry.getKey(), ":\t" + entry.getValue())).append("\n");}if (!audioSideData.isEmpty()) stringBuffer.append("AudioSideData: ").append(audioSideData.size()).append("\n");for (Map.Entry<String, Buffer> entry : audioSideData.entrySet()) {stringBuffer.append(String.format("%-14s%s", "\t" + entry.getKey(), ":\t" + entry.getValue())).append("\n");}stringBuffer.append("]");return stringBuffer.toString();}
}
import org.bytedeco.ffmpeg.avformat.AVFormatContext;
import org.bytedeco.ffmpeg.avutil.AVDictionary;
import org.bytedeco.ffmpeg.avutil.AVDictionaryEntry;import java.util.HashMap;
import java.util.Map;import static org.bytedeco.ffmpeg.global.avutil.*;/*** @author xhbruce** ffmpeg-6.0\libavformat\avformat.h* album        -- name of the set this work belongs to* album_artist -- main creator of the set/album, if different from artist.* e.g. "Various Artists" for compilation albums.* artist       -- main creator of the work* comment      -- any additional description of the file.* composer     -- who composed the work, if different from artist.* copyright    -- name of copyright holder.* creation_time-- date when the file was created, preferably in ISO 8601.* date         -- date when the work was created, preferably in ISO 8601.* disc         -- number of a subset, e.g. disc in a multi-disc collection.* encoder      -- name/settings of the software/hardware that produced the file.* encoded_by   -- person/group who created the file.* filename     -- original name of the file.* genre        -- <self-evident>.* language     -- main language in which the work is performed, preferably* in ISO 639-2 format. Multiple languages can be specified by* separating them with commas.* performer    -- artist who performed the work, if different from artist.* E.g for "Also sprach Zarathustra", artist would be "Richard* Strauss" and performer "London Philharmonic Orchestra".* publisher    -- name of the label/publisher.* service_name     -- name of the service in broadcasting (channel name).* service_provider -- name of the service provider in broadcasting.* title        -- name of the work.* track        -- number of this work in the set, can be in form current/total.* variant_bitrate -- the total bitrate of the bitrate variant that the current stream is part of*/
public class Metadata {private AVDictionary avDictionary;private Map<String, String> metadata = new HashMap<String, String>();public static final String ALBUM = "album";public static final String ALBUM_ARTIST = "album_artist";public static final String ARTIST = "artist";public static final String COMMENT = "comment";public static final String COMPOSER = "composer";public static final String COMPYRIGHT = "copyright";public static final String CREATION_TIME = "creation_time";public static final String DATE = "date";public static final String DISC = "disc";public static final String ENCODER = "encoder";public static final String ENCODER_BY = "encoded_by";public static final String FILENAME = "filename";public static final String GENRE = "genre";public static final String LANGUAGE = "language";public static final String PERFORMER = "performer";public static final String PUBLISHER = "publisher";public static final String SERVICE_NAME = "service_name";public static final String SERVICE_PROVIDER = "service_provider";public static final String TITLE = "title";public static final String TRACK = "track";public static final String VARIANT_BITRATE = "variant_bitrate";private int dictCount;public Metadata(AVFormatContext formatContext) {this(formatContext.metadata());}public Metadata(AVDictionary dictionary) {avDictionary = dictionary;dictCount = av_dict_count(avDictionary);initMetadate();}private void initMetadate() {metadata = new HashMap<String, String>();AVDictionaryEntry tag = null;while ((tag = av_dict_iterate(avDictionary, tag)) != null) {String key = tag.key().getString();String value = tag.value().getString();metadata.put(key, value);}}public Metadata(Map<String, String> metadata) {this.metadata = metadata;dictCount = metadata.size();}/*** or av_dict_get(avDictionary, key, null, 0);*/public String getValue(String key) {return metadata.get(key);}public int size() {return dictCount;}@Overridepublic String toString() {StringBuilder stringBuffer = new StringBuilder();stringBuffer.append("Metadata: ").append(dictCount).append("\n");for (Map.Entry<String, String> entry : metadata.entrySet()) {stringBuffer.append(String.format("%-14s%s", "\t" + entry.getKey(), ":\t" + entry.getValue())).append("\n");}return stringBuffer.toString();}
}

3、添加ExecutorService执行播放

implementation 'com.github.goxr3plus:java-stream-player:+' 实质也是Java Sound播放,作为参考。

  • ExecutorService audioPlayerExecutorServicec护理音频播放
  • ExecutorService eventsExecutorService处理音频播放状态
public class AudioPlayer implements AudioPlayerInterface, Callable<Void> {private AudioInfo audioInfo;private volatile AudioInputStream audioInputStream;private AudioFormat audioFormat;private DataLine.Info info;private SourceDataLine audioLine;private final Object audioLock = new Object();/*** audio 播放服务*/private final ExecutorService audioPlayerExecutorService;/*** audio 播放状态更新服务*/private final ExecutorService eventsExecutorService;/*** 通知监听事件*/private final ArrayList<AudioPlayerListener> listeners;public AudioPlayer() {audioPlayerExecutorService = Executors.newSingleThreadExecutor();eventsExecutorService = Executors.newSingleThreadExecutor();listeners = new ArrayList<>();}@Overridepublic void addStreamPlayerListener(AudioPlayerListener audioPlayerListener) {listeners.add(audioPlayerListener);}@Overridepublic void removeStreamPlayerListener(AudioPlayerListener audioPlayerListener) {if (listeners != null) {listeners.remove(audioPlayerListener);}}public void open(String filePath) {open(new File(filePath));}@Overridepublic void open(File file) {audioInfo = new AudioInfo(file);initAudioInputStream();try {audioInfo.grabber.start();} catch (FFmpegFrameGrabber.Exception e) {throw new RuntimeException(e);}}private void initAudioInputStream() {audioFormat = AudioUtil.toAudioFormat(audioInfo.sampleFormat, audioInfo.sampleRate, audioInfo.audioChannels);createAudioLine();}private void createAudioLine() {try {info = new DataLine.Info(SourceDataLine.class, audioFormat);audioLine = (SourceDataLine) AudioSystem.getLine(info);XLog.i("Line : " + audioLine);XLog.i("Line Info : " + info);XLog.i("Line AudioFormat: " + audioFormat);audioLine.open(audioFormat);} catch (LineUnavailableException e) {throw new RuntimeException(e);}}@Overridepublic void play() {audioLine.start();audioPlayerExecutorService.submit(this);}@Overridepublic boolean pause() {return false;}@Overridepublic boolean resume() {return false;}@Overridepublic void stop() {}@Overridepublic Void call() throws FFmpegFrameGrabber.Exception {XLog.d("响应 audioPlayerExecutorService");synchronized (audioLock) {Frame frame;while ((frame = audioInfo.grabber.grabSamples()) != null) {if (frame.samples == null || frame.samples.length == 0) continue;Buffer buffer = frame.samples[0];byte[] audioBytes = AudioUtil.toByteArrayApplySourceDataLine(buffer);audioLine.write(audioBytes, 0, audioBytes.length);}}audioLine.drain();audioLine.stop();audioLine.close();audioInfo.grabber.stop();return null;}
}
http://www.lryc.cn/news/608351.html

相关文章:

  • 互联网医院系统,互联网医院好处有哪些?
  • 音视频学习(四十八):PCM和WAV
  • CatBoost 完整解析:类别特征友好的梯度提升框架
  • 基于单片机智能雨刷器/汽车刮水器设计
  • zset 中特殊的操作
  • nodejs读写文件
  • 【redis】基于工业界技术分享的内容总结
  • C++ 模板初阶
  • 阿里云:Ubuntu系统部署宝塔
  • 回归预测 | Matlab实现CNN-LSTM-self-Attention多变量回归预测
  • ventoy 是一个非常棒的开源工具,可以制作多系统的usb启动盘
  • 基于落霞归雁思维框架的软件需求管理实践指南
  • Vulnhub ELECTRICAL靶机复现(附提权)
  • 计算机技术与软件专业技术资格(水平)考试简介
  • Dispersive Loss:为生成模型引入表示学习 | 如何分析kaiming新提出的dispersive loss,对扩散模型和aigc会带来什么影响?
  • 《React+TypeScript实战:前端状态管理的安全架构与性能优化深解》
  • 【Unity3D实例-功能-移动】小兵移动-通过鼠标点击进行
  • 咨询进阶——解读57页企业发展战略咨询常用工具【附全文阅读】
  • Java Optional 类教程详解
  • C++ vector底层实现与迭代器失效问题
  • 【智能体cooragent】新智能体创建相关代码解析
  • Node.js 操作 MongoDB
  • Linux系统编程Day3-- Linux常用操作(终)
  • 2025-08 安卓开发面试拷打记录(面试题)
  • 3 使用 Jenkins 构建镜像:将你的应用打包成镜像
  • K8S部署ELK(三):部署Elasticsearch搜索引擎
  • 【机器学习】非线性分类算法(上):KNN(基于距离相似度)与朴素(特征独立)贝叶斯(基于概率统计)
  • 排序算法-堆排序
  • SQL 四大语言分类详解:DDL、DML、DCL、DQL
  • 分布在内侧内嗅皮层的层Ⅱ或层Ⅲ的头部方向细胞(head direction cells)对NLP中的深层语义分析的积极影响和启示