This commit is contained in:
zpc 2025-03-29 11:15:22 +08:00
parent ef17f5e10b
commit a39c5af74c
2 changed files with 443 additions and 302 deletions

View File

@ -43,6 +43,9 @@ namespace ShengShengBuXi.Hubs
/// </summary>
private static Timer _cleanupTimer;
private static bool isInitialized = false;
/// <summary>
/// 显示文本队列
/// </summary>
@ -265,6 +268,9 @@ namespace ShengShengBuXi.Hubs
_cleanupTimer = new Timer(CleanupOldProcessedPaths, null, TimeSpan.FromMinutes(30), TimeSpan.FromMinutes(30));
}
if (!isInitialized)
{
isInitialized = true;
// 从文件加载预设句子
LoadPresetSentencesFromFile();
@ -278,6 +284,7 @@ namespace ShengShengBuXi.Hubs
AppDomain.CurrentDomain.ProcessExit += (sender, e) => SaveMonitorTextQueueToFile();
AppDomain.CurrentDomain.DomainUnload += (sender, e) => SaveMonitorTextQueueToFile();
}
}
/// <summary>
/// 清理过期的处理记录
@ -513,8 +520,8 @@ namespace ShengShengBuXi.Hubs
if (_clients.TryAdd(Context.ConnectionId, clientInfo))
{
_logger.LogInformation($"已自动注册音频源客户端: {Context.ConnectionId}");
}
//return;
}
if (clientInfo.ClientType != ClientType.Controller)
@ -523,53 +530,90 @@ namespace ShengShengBuXi.Hubs
return;
}
// 处理音频数据
var config = _configurationService.CurrentConfig.Recording;
await _audioProcessingService.ProcessAudioDataAsync(audioData, config.SampleRate, config.Channels, Context.ConnectionId);
// 处理语音识别
if (_configurationService.CurrentConfig.Network.EnableSpeechToText)
// 音频数据基本验证
if (audioData == null || audioData.Length < 2)
{
await _speechToTextService.ProcessAudioAsync(audioData, Context.ConnectionId);
return; // 忽略无效数据
}
// 转发音频数据到管理端
if (_configurationService.CurrentConfig.Network.EnableAudioStreaming)
{
// await Clients.Group("webadmin").SendAsync("ReceiveAudioData", Context.ConnectionId, audioData);
// 获取配置,避免多次读取
var config = _configurationService.CurrentConfig.Recording;
bool enableSpeechToText = _configurationService.CurrentConfig.Network.EnableSpeechToText;
bool enableAudioStreaming = _configurationService.CurrentConfig.Network.EnableAudioStreaming;
bool enableNoiseReduction = _configurationService.CurrentConfig.Network.EnableAudioNoiseReduction;
// 转发音频到正在监听的显示端
// 并行处理不同的任务
var tasks = new List<Task>();
// 处理音频数据(总是执行)
tasks.Add(_audioProcessingService.ProcessAudioDataAsync(audioData, config.SampleRate, config.Channels, Context.ConnectionId));
// 处理语音识别(如果启用)
if (enableSpeechToText)
{
tasks.Add(_speechToTextService.ProcessAudioAsync(audioData, Context.ConnectionId));
}
// 音频流转发(如果启用)
if (enableAudioStreaming)
{
var monitoringClients = _clients.Values
.Where(c => (c.ClientType == ClientType.Monitor || c.ClientType == ClientType.WebAdmin))
.Where(c => c.ClientType == ClientType.Monitor)
.Select(c => c.ClientId)
.ToList();
if (monitoringClients.Any())
if (monitoringClients.Count > 0)
{
// 音频数据处理和发送速率控制
try
{
byte[] dataToSend;
// 基于当前时间创建节流标识
var now = DateTime.Now;
var throttleKey = now.ToString("yyyyMMddHHmmss") + (now.Millisecond / 100).ToString();
if (_configurationService.CurrentConfig.Network.EnableAudioNoiseReduction)
// 使用当前客户端连接ID和时间标识作为处理标记
var processingKey = $"{Context.ConnectionId}:{throttleKey}";
// 获取降噪或原始数据
byte[] dataToSend;
if (enableNoiseReduction)
{
dataToSend = _audioProcessingService.ApplyNoiseReduction(audioData, config.SampleRate, config.Channels);
_logger.LogDebug($"转发音频数据到{monitoringClients.Count}个监听客户端,数据长度: {audioData.Length},降噪后长度:{dataToSend.Length}");
}
else
{
dataToSend = audioData;
_logger.LogDebug($"转发音频数据到{monitoringClients.Count}个监听客户端,数据长度: {audioData.Length}");
}
// 始终使用二进制格式发送数据,避免字符串转换
await Clients.Clients(monitoringClients).SendAsync("ReceiveAudioData", dataToSend);
// 为每个监控客户端创建单独的发送任务,确保数据流的独立性
foreach (var clientId in monitoringClients)
{
// 添加音频数据发送任务,使用单独的客户端连接
tasks.Add(Clients.Client(clientId).SendAsync("ReceiveAudioData", dataToSend));
}
// 只在日志级别为Debug时输出详细信息
if (_logger.IsEnabled(LogLevel.Debug) && monitoringClients.Count > 0)
{
_logger.LogDebug($"分发音频数据到{monitoringClients.Count}个监听客户端,数据长度: {dataToSend.Length}字节,标识: {throttleKey}");
}
}
catch (Exception ex)
{
_logger.LogError($"转发音频数据到监听端失败: {ex.Message}");
_logger.LogError($"准备音频数据分发时出错: {ex.Message}");
}
}
}
// 等待所有任务完成
try
{
await Task.WhenAll(tasks);
}
catch (Exception ex)
{
_logger.LogError($"处理音频数据时发生错误: {ex.Message}");
}
}
/// <summary>
@ -1062,7 +1106,9 @@ namespace ShengShengBuXi.Hubs
// 如果是真实用户的发言,持久化保存到文件
if (textToDisplay.IsRealUser)
{
SaveRealUserDisplayToFile(textToDisplay);
_presetSentences.Add(textToDisplay.Text);
}
}
else

View File

@ -4,6 +4,9 @@
ViewData["Title"] = "清竹园-中控页面";
}
<!-- 添加Bootstrap Icons库的引用 -->
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/bootstrap-icons@1.10.5/font/bootstrap-icons.css">
<div class="container-fluid">
<!-- 头部区域 (100%, 20%) -->
<div class="row mb-3">
@ -108,8 +111,9 @@
<div id="input-text-area" style="height: 40%;">
<div class="card h-100">
<div class="card-header">
<h5 class="mb-0">文本编辑 <span
style="color:#6c757d;font-size:12px;">(最多输入30个文字)</span></h5>
<h5 class="mb-0">
文本编辑 <span style="color:#6c757d;font-size:12px;">(最多输入30个文字)</span>
</h5>
</div>
<div class="card-body">
<textarea id="text-input" class="form-control h-100" placeholder="请输入要显示的文本..."
@ -210,7 +214,36 @@
</div>
</div>
@section Scripts {
<div class="card mb-3">
<div class="card-header">
<h5>音频控制</h5>
</div>
<div class="card-body">
<div class="form-check form-switch mb-3">
<input class="form-check-input" type="checkbox" id="audioStreamToggle">
<label class="form-check-label" for="audioStreamToggle">音频流接收</label>
</div>
<!-- 音量控制 -->
<div class="mb-3">
<label for="volumeSlider" class="form-label">音量控制 <span id="volumeDisplay">100%</span></label>
<div class="d-flex align-items-center">
<i class="bi bi-volume-down me-2"></i>
<input type="range" class="form-range flex-grow-1" id="volumeSlider" min="0" max="100" value="100">
<i class="bi bi-volume-up ms-2"></i>
</div>
<small class="text-muted">音量已增强3倍可按需调整</small>
</div>
<div class="mt-3" id="callStatus">
<div class="alert alert-secondary">
未检测到通话
</div>
</div>
</div>
</div>
@section Scripts {
<script src="~/lib/microsoft-signalr/signalr.min.js"></script>
<script>
@ -230,15 +263,24 @@
let audioStreamSource = null;
let isAudioStreamEnabled = false;
let audioGainNode = null;
let currentVolume = 1.0; // 默认音量为1.0 (100%)
let currentVolume = 1.0; // 默认音量
let volumeBoost = 3.0; // 音量增益倍数,提高接收到的音频音量
// 添加音频缓冲区变量
let audioBufferQueue = [];
const MAX_BUFFER_SIZE = 15; // 最大缓冲队列大小调整为15帧以适应网络延迟
const MAX_BUFFER_SIZE = 400; // 增加缓冲队列大小,提供更充足的数据缓冲
let isAudioPlaying = false;
let audioBufferTimeout = null;
const BUFFER_PROCESS_INTERVAL = 33; // 缓冲区处理间隔(毫秒)调整为约30fps的处理速率
let bufferStartSizeThreshold = 5; // 开始播放所需的最小缓冲数据量增加到5帧以减少卡顿
const BUFFER_PROCESS_INTERVAL = 85; // 显著增加处理间隔,确保音频帧有足够的播放时间
let bufferStartSizeThreshold = 25; // 增加开始阈值,确保有足够数据开始播放
let lastAudioTimestamp = 0;
let audioJitterBuffer = 60; // 增加抖动缓冲,避免播放过快
let sampleRate = 16000; // 采样率固定为16kHz
let frameSize = 320; // 每帧样本数 (20ms @@ 16kHz)
let targetFrameDuration = 20; // 目标帧时长(毫秒)
let receivedPacketCounter = 0;
let lastPacketTime = 0;
let receivedPacketRate = 0;
// 调试日志
function log(message) {
@ -247,7 +289,7 @@
console.log(logMsg);
const logElem = document.getElementById("debug-log");
if (logElem) {
if (logElem&&false) {
const logLine = document.createElement("div");
logLine.textContent = logMsg;
logElem.insertBefore(logLine, logElem.firstChild);
@ -1030,34 +1072,42 @@
// 初始化页面
document.addEventListener('DOMContentLoaded', function () {
// 初始化音频上下文
initAudioContext();
// 初始化SignalR连接
initSignalR();
// 设置显示模式监听器
// 设置各种事件监听器
setupEventListeners();
setupDisplayModeListeners();
// 设置音频流开关监听器
setupAudioStreamingListeners();
// 设置控屏开关监听器
setupScreenControlListeners();
// 设置音量控制监听器
setupVolumeControlListener();
// 初始化音量控制
setupVolumeControl();
// 注册为监控端
registerAsMonitor();
// 初始化工具提示
setTimeout(initTooltips, 1000);
// 默认设置为手动显示模式
document.getElementById("displayMode1").checked = true;
// 默认开启音频传输
document.getElementById("audioStreaming1").checked = true;
isAudioStreamEnabled = true;
// 显示当前数据处理模式的提示消息
setTimeout(() => {
// 显示提示消息
setTimeout(function () {
// 检查控屏模式
if (isManualScreenControlEnabled) {
showMessage("当前为手动控屏模式,需要您手动确认要显示的文本", "info", 10000);
}
// 检查显示模式
if (displayType === 1) {
showMessage("当前为手动处理数据模式,需要您手动审核并添加文本到显示队列", "info", 10000);
}
}, 1500);
});
@ -1348,89 +1398,114 @@
// 设置音量控制监听器
function setupVolumeControl() {
const volumeControl = document.getElementById('volumeControl');
if (volumeControl) {
volumeControl.addEventListener('input', function (e) {
currentVolume = parseFloat(e.target.value);
log(`音量已调整为: ${currentVolume * 100}%`);
const volumeSlider = document.getElementById("volumeSlider");
if (volumeSlider) {
// 初始化滑块值为当前音量 * 音量增益系数(转换为百分比)
volumeSlider.value = Math.floor(currentVolume * 100);
// 如果已经创建了增益节点,立即应用音量设置
// 更新显示
const volumeDisplay = document.getElementById("volumeDisplay");
if (volumeDisplay) {
volumeDisplay.textContent = `${Math.floor(currentVolume * 100)}%`;
}
// 监听音量变化
volumeSlider.addEventListener("input", function() {
const newVolume = parseFloat(this.value) / 100;
currentVolume = newVolume;
// 应用音量设置(包含增益)
if (audioGainNode) {
audioGainNode.gain.value = currentVolume;
audioGainNode.gain.value = newVolume * volumeBoost;
}
// 同时调整已播放的录音音量
if (currentAudio) {
currentAudio.volume = currentVolume;
// 更新显示
if (volumeDisplay) {
volumeDisplay.textContent = `${Math.floor(newVolume * 100)}%`;
}
log(`音量已调整: ${Math.floor(newVolume * 100)}%, 实际增益: ${(newVolume * volumeBoost).toFixed(1)}`);
});
}
}
// 初始化音频上下文
function initAudioContext() {
if (audioContext) {
log("音频上下文已存在,使用现有上下文");
// 确保音频上下文已激活
if (audioContext.state === 'suspended') {
audioContext.resume().then(() => {
log("已恢复暂停的音频上下文");
}).catch(err => {
log("恢复音频上下文失败: " + err);
});
}
// 启动缓冲处理
startBufferProcessing();
return;
}
try {
// 创建音频上下文
const AudioContext = window.AudioContext || window.webkitAudioContext;
audioContext = new AudioContext();
// 关闭现有的音频上下文
if (audioContext) {
try {
if (audioGainNode) {
audioGainNode.disconnect();
audioGainNode = null;
}
audioContext.close().catch(e => log("关闭音频上下文失败: " + e));
audioContext = null;
} catch (e) {
log("重置音频上下文失败: " + e);
}
}
// 创建增益节点用于控制音量
// 创建新的音频上下文
const AudioContext = window.AudioContext || window.webkitAudioContext;
audioContext = new AudioContext({
sampleRate: sampleRate, // 固定使用16kHz采样率
latencyHint: 'interactive' // 低延迟设置
});
// 创建增益节点并设置更高的音量
audioGainNode = audioContext.createGain();
audioGainNode.gain.value = currentVolume; // 设置初始音量
audioGainNode.gain.value = currentVolume * volumeBoost; // 应用音量增益
audioGainNode.connect(audioContext.destination);
log("音频上下文已初始化,创建了增益节点,初始音量: " + currentVolume);
log(`音频上下文已初始化: 采样率=${audioContext.sampleRate}Hz, 状态=${audioContext.state}, 音量增益=${volumeBoost}倍`);
// 如果音频上下文处于挂起状态,需要用户交互来激活
// 重置音频缓冲处理
startBufferProcessing();
// 恢复音频上下文
if (audioContext.state === 'suspended') {
const resumeAudio = function () {
const resumeAudio = function() {
if (audioContext && audioContext.state === 'suspended') {
audioContext.resume().then(() => {
log("用户交互已激活音频上下文");
document.removeEventListener('click', resumeAudio);
document.removeEventListener('touchstart', resumeAudio);
document.removeEventListener('touchend', resumeAudio);
log("音频上下文已激活");
}).catch(err => {
log("激活音频上下文失败: " + err);
});
}
};
document.addEventListener('click', resumeAudio);
document.addEventListener('touchstart', resumeAudio);
document.addEventListener('touchend', resumeAudio);
// 设置单次事件监听
document.addEventListener('click', resumeAudio, { once: true });
document.addEventListener('touchstart', resumeAudio, { once: true });
document.addEventListener('keydown', resumeAudio, { once: true });
// 尝试立即恢复
resumeAudio();
}
log("音频上下文已初始化,状态: " + audioContext.state + ", 采样率: " + audioContext.sampleRate);
// 启动音频缓冲处理
startBufferProcessing();
} catch (e) {
log("无法创建音频上下文: " + e);
log("初始化音频上下文失败: " + e);
showMessage("无法初始化音频播放: " + e, "danger");
}
}
// 启动缓冲区处理
function startBufferProcessing() {
// 停止现有的处理
if (audioBufferTimeout) {
clearInterval(audioBufferTimeout);
}
// 重置状态
audioBufferQueue = [];
isAudioPlaying = false;
lastAudioTimestamp = 0;
receivedPacketCounter = 0;
lastPacketTime = 0;
// 启动处理间隔
audioBufferTimeout = setInterval(processAudioBuffer, BUFFER_PROCESS_INTERVAL);
log("音频缓冲处理已启动");
log(`音频处理已启动: 间隔=${BUFFER_PROCESS_INTERVAL}ms, 缓冲阈值=${bufferStartSizeThreshold}帧`);
}
// 停止缓冲区处理
@ -1439,10 +1514,12 @@
clearInterval(audioBufferTimeout);
audioBufferTimeout = null;
}
// 清空缓冲区
// 清空状态
audioBufferQueue = [];
isAudioPlaying = false;
log("音频缓冲处理已停止");
lastAudioTimestamp = 0;
log("音频处理已停止");
}
// 处理音频缓冲区
@ -1451,7 +1528,7 @@
return;
}
// 没有足够的缓冲数据
// 没有数据时等待
if (audioBufferQueue.length === 0) {
if (isAudioPlaying) {
log("缓冲区已空,等待更多数据...");
@ -1462,217 +1539,130 @@
// 初始播放需要达到阈值
if (!isAudioPlaying && audioBufferQueue.length < bufferStartSizeThreshold) {
log(`缓冲中,当前数据量: ${audioBufferQueue.length}/${bufferStartSizeThreshold}`);
// 只在达到特定比例时记录
if (audioBufferQueue.length === 1 ||
audioBufferQueue.length === Math.floor(bufferStartSizeThreshold/2) ||
audioBufferQueue.length === bufferStartSizeThreshold-1) {
log(`缓冲中: ${audioBufferQueue.length}/${bufferStartSizeThreshold}`);
}
return;
}
// 从队列取出一个音频数据并播放
const audioData = audioBufferQueue.shift();
playBufferedAudio(audioData);
// 当前时间和上次播放的时间间隔检查
const now = Date.now();
const elapsed = now - lastAudioTimestamp;
// 如果上次播放时间太近,等待足够的时间间隔
if (isAudioPlaying && elapsed < audioJitterBuffer) {
return;
}
// 从队列获取下一个音频包
const packet = audioBufferQueue.shift();
if (packet && !packet.processed) {
playBufferedAudio(packet.data, packet.estimatedDuration);
packet.processed = true;
isAudioPlaying = true;
lastAudioTimestamp = now;
// 自适应调整缓冲区大小
if (audioBufferQueue.length > MAX_BUFFER_SIZE * 0.8) {
log("缓冲区接近上限,增加处理频率");
// 增加处理频率
// 自适应调整缓冲区处理间隔
const bufferRatio = audioBufferQueue.length / MAX_BUFFER_SIZE;
if (bufferRatio > 0.7) {
// 数据充足,可以稍微加快处理
if (audioBufferTimeout) {
clearInterval(audioBufferTimeout);
const newInterval = Math.max(BUFFER_PROCESS_INTERVAL * 0.8, 60);
audioBufferTimeout = setInterval(processAudioBuffer, newInterval);
log(`缓冲区数据充足 (${audioBufferQueue.length}/${MAX_BUFFER_SIZE}), 调整间隔为 ${newInterval.toFixed(0)}ms`);
}
audioBufferTimeout = setInterval(processAudioBuffer, BUFFER_PROCESS_INTERVAL / 2);
} else if (audioBufferQueue.length < MAX_BUFFER_SIZE * 0.2 && audioBufferQueue.length > 0) {
log("缓冲区数据较少,减少处理频率");
// 减少处理频率
} else if (bufferRatio < 0.1 && audioBufferQueue.length > 0) {
// 数据不足,需要减缓处理速度
if (audioBufferTimeout) {
clearInterval(audioBufferTimeout);
}
audioBufferTimeout = setInterval(processAudioBuffer, BUFFER_PROCESS_INTERVAL * 1.5);
const newInterval = BUFFER_PROCESS_INTERVAL * 1.5;
audioBufferTimeout = setInterval(processAudioBuffer, newInterval);
log(`缓冲区数据不足 (${audioBufferQueue.length}/${MAX_BUFFER_SIZE}), 调整间隔为 ${newInterval.toFixed(0)}ms`);
}
}
// 播放实时音频
function playRealTimeAudio(audioData) {
if (!audioContext || !isAudioStreamEnabled) return;
try {
log("接收到实时音频数据...");
// 检查音频数据类型
log(`音频数据类型: ${Object.prototype.toString.call(audioData)}, 长度: ${audioData ? (audioData.length || audioData.byteLength || 'unknown') : 'null'}`);
// 处理接收到的音频数据
let pcmData;
if (audioData instanceof Uint8Array) {
log("接收到 Uint8Array 类型数据");
pcmData = audioData;
} else if (audioData instanceof ArrayBuffer) {
log("接收到 ArrayBuffer 类型数据");
pcmData = new Uint8Array(audioData);
} else if (Array.isArray(audioData)) {
log("接收到数组类型数据,尝试转换");
// 尝试将数组转换为 Uint8Array
pcmData = new Uint8Array(audioData);
} else if (typeof audioData === 'object' && audioData !== null) {
log("接收到对象类型数据,尝试解析");
// 尝试解析对象
if (audioData.data && (audioData.data instanceof Uint8Array || audioData.data instanceof ArrayBuffer)) {
pcmData = audioData.data instanceof Uint8Array ? audioData.data : new Uint8Array(audioData.data);
} else if (audioData.buffer && audioData.buffer instanceof ArrayBuffer) {
pcmData = new Uint8Array(audioData.buffer);
} else {
// 尝试将对象转换为JSON并记录以便调试
try {
log("对象数据:" + JSON.stringify(audioData).substring(0, 100));
} catch (e) {
log("无法序列化对象: " + e);
}
log("无法识别的对象类型数据");
return;
}
} else {
log(`不支持的音频数据格式: ${typeof audioData}`);
// 尝试更详细地记录数据内容
if (typeof audioData === 'string') {
log("字符串数据前20字符: " + audioData.substring(0, 20));
// 尝试从Base64字符串解码
try {
// 尝试使用更健壮的Base64解码先规范化字符串格式
const base64Str = audioData.trim().replace(/^data:[^;]+;base64,/, '');
// 创建具有适当长度的Uint8Array
const binary = atob(base64Str);
pcmData = new Uint8Array(binary.length);
for (let i = 0; i < binary.length; i++) {
pcmData[i] = binary.charCodeAt(i);
}
log("已从Base64字符串转换为Uint8Array");
} catch (e) {
log("Base64转换失败: " + e);
// 尝试直接解码二进制字符串
try {
pcmData = new Uint8Array(audioData.length);
for (let i = 0; i < audioData.length; i++) {
pcmData[i] = audioData.charCodeAt(i);
}
log("已从二进制字符串转换为Uint8Array");
} catch (e2) {
log("二进制字符串转换也失败: " + e2);
return;
}
}
} else {
return;
}
}
// 检查数据是否为空或过小
if (!pcmData || pcmData.length < 2) {
log("音频数据无效或过小");
return;
}
log(`处理音频数据: 长度=${pcmData.length} 字节`);
// 确保数据长度是偶数16位样本需要2个字节
const validLength = Math.floor(pcmData.length / 2) * 2;
if (validLength < pcmData.length) {
log(`调整音频数据长度从 ${pcmData.length} 到 ${validLength} 字节`);
pcmData = pcmData.slice(0, validLength);
}
// 添加到缓冲队列
if (audioBufferQueue.length < MAX_BUFFER_SIZE) {
audioBufferQueue.push(pcmData);
log(`已添加到缓冲区,当前缓冲数量: ${audioBufferQueue.length}`);
} else {
log("缓冲区已满,丢弃数据");
// 队列满时,移除最早的,添加最新的
audioBufferQueue.shift();
audioBufferQueue.push(pcmData);
}
} catch (e) {
log("处理实时音频失败: " + e);
}
}
// 播放缓冲区中的音频
async function playBufferedAudio(pcmData) {
async function playBufferedAudio(pcmData, estimatedDuration) {
try {
// 获取有效的DataView
// 确保音频上下文正常
if (!audioContext || audioContext.state === 'closed') {
initAudioContext();
if (!audioContext) return;
}
if (audioContext.state === 'suspended') {
try {
await audioContext.resume();
} catch (e) {
return;
}
}
// 数据准备
let dataView;
try {
dataView = new DataView(pcmData.buffer, pcmData.byteOffset, pcmData.byteLength);
} catch (e) {
log("创建DataView失败: " + e);
// 尝试创建新的ArrayBuffer
try {
const newBuffer = new ArrayBuffer(pcmData.length);
const newBufferView = new Uint8Array(newBuffer);
newBufferView.set(pcmData);
dataView = new DataView(newBuffer);
log("通过创建新缓冲区成功获取DataView");
} catch (e2) {
log("创建替代DataView也失败: " + e2);
return;
}
}
// 将PCM数据16位整数转换为32位浮点数组
// 转换为音频数据并增强音量
const floatData = new Float32Array(pcmData.length / 2);
// 转换16位PCM到32位浮点使用try-catch保护读取操作
try {
for (let i = 0; i < floatData.length; i++) {
// 确保我们不会读取超出范围的数据
if ((i * 2) + 1 < pcmData.length) {
// 读取16位整数小端序
const int16 = dataView.getInt16(i * 2, true);
// 转换为-1.0到1.0的浮点数
floatData[i] = int16 / 32768.0;
} else {
// 如果到达数据末尾使用0填充
floatData[i] = 0;
for (let i = 0, j = 0; i < pcmData.length; i += 2, j++) {
if (i + 1 < pcmData.length) {
const int16 = dataView.getInt16(i, true);
// 标准化16位PCM数据到-1.0到1.0,但不进行音量增益
// 音量增益由audioGainNode处理避免信号失真
floatData[j] = int16 / 32768.0;
}
}
} catch (e) {
log("转换音频数据失败: " + e);
return;
// 实际帧长度计算和调整
const actualFrameDuration = (floatData.length / sampleRate) * 1000; // 毫秒
let outputFloatData = floatData;
// 确保音频帧至少有目标时长
if (actualFrameDuration < targetFrameDuration && floatData.length > 0) {
const targetLength = Math.ceil(targetFrameDuration * sampleRate / 1000);
outputFloatData = new Float32Array(targetLength);
outputFloatData.set(floatData);
// 其余部分为0即静音填充
}
// 创建一个包含音频数据的AudioBuffer
const sampleRate = 16000; // 采样率固定为16kHz
const buffer = audioContext.createBuffer(1, floatData.length, sampleRate);
// 将浮点数据复制到AudioBuffer的第一个通道
try {
// 创建音频缓冲区
const buffer = audioContext.createBuffer(1, outputFloatData.length, sampleRate);
const channel = buffer.getChannelData(0);
channel.set(floatData);
} catch (e) {
log("设置音频通道数据失败: " + e);
return;
}
channel.set(outputFloatData);
// 创建音频源并连接到音频输出
// 创建音频源
const source = audioContext.createBufferSource();
source.buffer = buffer;
// 连接到增益节点而不是直接连接到输出
// 应用音量控制
source.connect(audioGainNode);
// 确保音量设置被应用
if (audioGainNode) {
audioGainNode.gain.value = currentVolume;
audioGainNode.gain.value = currentVolume * volumeBoost; // 应用音量增益
}
// 播放前检查音频上下文状态
if (audioContext.state === 'suspended') {
log("音频上下文处于暂停状态,尝试恢复...");
await audioContext.resume();
}
// 确保音频播放完成时进行清理
source.onended = () => {
// 在音频片段播放完成时可以执行一些操作
};
// 使用精确调度启动播放
const startTime = audioContext.currentTime;
source.start(startTime);
// 播放
source.start(0);
} catch (e) {
log("播放缓冲音频失败: " + e);
}
@ -1806,5 +1796,110 @@
updateScreenControlUI(false);
});
}
// 播放实时音频
function playRealTimeAudio(audioData) {
if (!audioContext || !isAudioStreamEnabled) return;
try {
// 计算数据接收速率
const now = Date.now();
receivedPacketCounter++;
if (now - lastPacketTime > 1000) {
receivedPacketRate = receivedPacketCounter;
receivedPacketCounter = 0;
lastPacketTime = now;
log(`音频数据接收速率: ${receivedPacketRate} 包/秒`);
}
// 处理音频数据
let pcmData;
// 不同类型的音频数据处理
if (audioData instanceof Uint8Array) {
pcmData = audioData;
} else if (audioData instanceof ArrayBuffer) {
pcmData = new Uint8Array(audioData);
} else if (Array.isArray(audioData)) {
pcmData = new Uint8Array(audioData);
} else if (typeof audioData === 'object' && audioData !== null) {
if (audioData.data && (audioData.data instanceof Uint8Array || audioData.data instanceof ArrayBuffer)) {
pcmData = audioData.data instanceof Uint8Array ? audioData.data : new Uint8Array(audioData.data);
} else if (audioData.buffer && audioData.buffer instanceof ArrayBuffer) {
pcmData = new Uint8Array(audioData.buffer);
} else {
log("无法识别的对象类型数据");
return;
}
} else if (typeof audioData === 'string') {
try {
// 移除可能的数据URL前缀
const base64Str = audioData.trim().replace(/^data:[^;]+;base64,/, '');
// Base64解码
const binary = atob(base64Str);
pcmData = new Uint8Array(binary.length);
for (let i = 0; i < binary.length; i++) {
pcmData[i] = binary.charCodeAt(i);
}
} catch (e) {
try {
pcmData = new Uint8Array(audioData.length);
for (let i = 0; i < audioData.length; i++) {
pcmData[i] = audioData.charCodeAt(i);
}
} catch (e2) {
return;
}
}
} else {
return;
}
// 数据有效性检查
if (!pcmData || pcmData.length < 2) {
return;
}
// 确保数据长度是偶数16位样本需要2个字节
const validLength = Math.floor(pcmData.length / 2) * 2;
if (validLength < pcmData.length) {
pcmData = pcmData.slice(0, validLength);
}
// 估算当前数据帧的时长
const numSamples = validLength / 2; // 16位PCM = 2字节/样本
const frameDuration = (numSamples / sampleRate) * 1000; // 毫秒
// 时间戳校准功能 - 固定帧率处理
const timestamp = Date.now();
const packet = {
data: pcmData,
timestamp: timestamp,
estimatedDuration: frameDuration,
processed: false
};
// 添加到缓冲队列
if (audioBufferQueue.length < MAX_BUFFER_SIZE) {
audioBufferQueue.push(packet);
// 只在达到重要阈值时记录
if (audioBufferQueue.length === bufferStartSizeThreshold ||
audioBufferQueue.length % 20 === 0) {
log(`缓冲区状态: ${audioBufferQueue.length}/${MAX_BUFFER_SIZE}, 估计时长: ${frameDuration.toFixed(1)}ms`);
}
} else {
// 缓冲区已满时的处理策略
const keepFrames = Math.floor(MAX_BUFFER_SIZE * 0.75);
audioBufferQueue.splice(0, MAX_BUFFER_SIZE - keepFrames);
audioBufferQueue.push(packet);
log(`缓冲区已满 (${MAX_BUFFER_SIZE}), 丢弃旧数据, 保留${keepFrames}帧`);
}
} catch (e) {
log("处理实时音频失败: " + e);
}
}
</script>
}
}