This commit is contained in:
zpc 2025-03-28 15:55:39 +08:00
parent 53da11d416
commit e6816d49b7
5 changed files with 515 additions and 104 deletions

View File

@ -559,7 +559,7 @@ public class PhoneBoothService : IPhoneBoothService, IDisposable
{
await PlayAudioAndWait(
_config.AudioFiles.GetFullPath(_config.AudioFiles.BeepPromptFile),
null, false, recordingCts.Token, 0.3f);
null, false, recordingCts.Token, 0.1f);
}
catch (OperationCanceledException)
{
@ -1307,7 +1307,11 @@ public class PhoneBoothService : IPhoneBoothService, IDisposable
device.Volume = volume.Value;
}
device.Init(reader);
//device.Volume
if (volume != null)
{
device.Volume = volume.Value;
}
// 如果需要循环播放,创建一个循环播放的任务
if (loop && maxDuration.HasValue)
{

View File

@ -1,5 +1,5 @@
{
"SignalRHubUrl": "http://115.159.44.16/audiohub",
"SignalRHubUrl": "http://localhost:81/audiohub",
"ConfigBackupPath": "config.json",
"AutoConnectToServer": true
}

View File

@ -92,6 +92,17 @@ namespace ShengShengBuXi.Hubs
private static readonly string ConfigDirectory = Path.Combine(Directory.GetCurrentDirectory(), "config");
private static readonly string DisplayConfigPath = Path.Combine(ConfigDirectory, "display.json");
private static DateTime _lastRealTimeResultSentTime = DateTime.MinValue;
private static int _realTimeResultCounter = 0;
private static readonly TimeSpan _realTimeResultThrottleWindow = TimeSpan.FromSeconds(0.3);
private static readonly int _maxRealTimeResultsPerSecond = 1;
// 增加对最终结果的限流变量
private static DateTime _lastFinalResultSentTime = DateTime.MinValue;
private static int _finalResultCounter = 0;
private static TimeSpan _finalResultThrottleWindow = TimeSpan.FromSeconds(0.3);
private static readonly int _maxFinalResultsPerSecond = 1;
// 用于初始化配置
static AudioHub()
{
@ -480,18 +491,21 @@ namespace ShengShengBuXi.Hubs
{
try
{
byte[] dataToSend;
if (_configurationService.CurrentConfig.Network.EnableAudioNoiseReduction)
{
var jiangzao = _audioProcessingService.ApplyNoiseReduction(audioData, config.SampleRate, config.Channels);
_logger.LogDebug($"转发音频数据到{monitoringClients.Count}个监听客户端,数据长度: {audioData.Length},降噪后长度:{jiangzao.Length}");
// 尝试直接发送数据
await Clients.Clients(monitoringClients).SendAsync("ReceiveAudioData", jiangzao);
dataToSend = _audioProcessingService.ApplyNoiseReduction(audioData, config.SampleRate, config.Channels);
_logger.LogDebug($"转发音频数据到{monitoringClients.Count}个监听客户端,数据长度: {audioData.Length},降噪后长度:{dataToSend.Length}");
}
else
{
// 尝试直接发送数据
await Clients.Clients(monitoringClients).SendAsync("ReceiveAudioData", audioData);
dataToSend = audioData;
_logger.LogDebug($"转发音频数据到{monitoringClients.Count}个监听客户端,数据长度: {audioData.Length}");
}
// 始终使用二进制格式发送数据,避免字符串转换
await Clients.Clients(monitoringClients).SendAsync("ReceiveAudioData", dataToSend);
}
catch (Exception ex)
{
@ -736,19 +750,62 @@ namespace ShengShengBuXi.Hubs
{
_logger.LogInformation($"接收到语音识别最终结果: {e.Text}");
// 将结果添加到显示文本队列
//AddRecognizedTextToDisplay(e.Text, true, e.Id);
// 发送频率限制每秒最多发送3次最终识别结果
DateTime now = DateTime.Now;
TimeSpan elapsed = now - _lastFinalResultSentTime;
// 将最终结果发送给所有监听中的客户端
await _hubContext.Clients.Groups(new[] { "webadmin", "monitor" })
.SendAsync("ReceiveSpeechToEndTextResult", e.Text);
// 检查是否已进入新的时间窗口
if (elapsed >= _finalResultThrottleWindow)
{
// 重置计数器和时间窗口
_finalResultCounter = 0;
_lastFinalResultSentTime = now;
}
// 检查当前时间窗口内是否已达到发送上限
if (_finalResultCounter < _maxFinalResultsPerSecond)
{
// 将最终结果发送给所有监听中的客户端
await _hubContext.Clients.Groups(new[] { "webadmin", "monitor" })
.SendAsync("ReceiveSpeechToEndTextResult", e.Text);
// 增加计数器
_finalResultCounter++;
}
else
{
_logger.LogDebug($"已达到最终结果发送频率限制,跳过发送: {e.Text}");
}
}
else
{
// 发送实时识别结果给客户端
_logger.LogInformation($"接收到语音识别实时结果: {e.Text}");
await _hubContext.Clients.Groups(new[] { "webadmin", "monitor" })
.SendAsync("ReceiveSpeechToTextResult", e.Text);
// 发送频率限制每秒最多发送3次实时识别结果
DateTime now = DateTime.Now;
TimeSpan elapsed = now - _lastRealTimeResultSentTime;
// 检查是否已进入新的时间窗口
if (elapsed >= _realTimeResultThrottleWindow)
{
// 重置计数器和时间窗口
_realTimeResultCounter = 0;
_lastRealTimeResultSentTime = now;
}
// 检查当前时间窗口内是否已达到发送上限
if (_realTimeResultCounter < _maxRealTimeResultsPerSecond)
{
// 发送实时识别结果给客户端
_logger.LogInformation($"接收到语音识别实时结果: {e.Text}");
await _hubContext.Clients.Groups(new[] { "webadmin", "monitor" })
.SendAsync("ReceiveSpeechToTextResult", e.Text);
// 增加计数器
_realTimeResultCounter++;
}
else
{
_logger.LogDebug($"已达到实时结果发送频率限制,跳过发送: {e.Text}");
}
}
}
catch (Exception ex)

View File

@ -187,6 +187,14 @@
let isAudioStreamEnabled = false;
let audioGainNode = null;
let currentVolume = 1.0; // 默认音量为1.0 (100%)
// 添加音频缓冲区变量
let audioBufferQueue = [];
const MAX_BUFFER_SIZE = 15; // 最大缓冲队列大小调整为15帧以适应网络延迟
let isAudioPlaying = false;
let audioBufferTimeout = null;
const BUFFER_PROCESS_INTERVAL = 33; // 缓冲区处理间隔(毫秒)调整为约30fps的处理速率
let bufferStartSizeThreshold = 5; // 开始播放所需的最小缓冲数据量增加到5帧以减少卡顿
// 调试日志
function log(message) {
@ -357,15 +365,30 @@
// 接收实时语音识别结果
connection.on("ReceiveSpeechToTextResult", (result) => {
log(`接收到实时语音识别结果: ${result.substring(0, 30)}${result.length > 30 ? '...' : ''}`);
// 显示实时识别结果
showRealtimeTextResult(result);
// 使用防抖动技术显示实时识别结果,避免频繁更新导致页面卡顿
if (window.realtimeTextDebounceTimer) {
clearTimeout(window.realtimeTextDebounceTimer);
}
window.realtimeTextDebounceTimer = setTimeout(() => {
showRealtimeTextResult(result);
}, 300); // 300毫秒的防抖动延迟
});
// 接收最终语音识别结果
connection.on("ReceiveSpeechToEndTextResult", (result) => {
log(`接收到最终语音识别结果: ${result.substring(0, 30)}${result.length > 30 ? '...' : ''}`);
// 清除任何正在进行的实时文本更新定时器
if (window.realtimeTextDebounceTimer) {
clearTimeout(window.realtimeTextDebounceTimer);
}
// 显示最终识别结果
showFinalTextResult(result);
// 同时添加到监控列表的顶部(如果不存在于列表中)
addToMonitorList(result);
});
// 显示模式更新消息
@ -424,12 +447,33 @@
statusText.textContent = "正在通话中";
// 初始化音频上下文(如果需要且启用了音频流)
if (isAudioStreamEnabled && !audioContext) {
if (isAudioStreamEnabled) {
// 如果存在音频上下文,则先尝试关闭,然后重新创建
if (audioContext) {
log("重置音频上下文以确保新的通话正常播放");
try {
// 释放旧的增益节点
if (audioGainNode) {
audioGainNode.disconnect();
audioGainNode = null;
}
// 关闭旧的音频上下文
audioContext.close().catch(e => log("关闭音频上下文失败: " + e));
audioContext = null;
} catch (e) {
log("重置音频上下文失败: " + e);
}
}
// 创建新的音频上下文
initAudioContext();
}
} else {
indicator.style.backgroundColor = "red";
statusText.textContent = "未检测到通话";
// 停止缓冲区处理
stopBufferProcessing();
}
// 有新通话时刷新监控列表
@ -845,9 +889,13 @@
clearInterval(refreshDisplayInterval);
}
// 停止缓冲区处理
stopBufferProcessing();
// 关闭音频上下文
if (audioContext) {
audioContext.close().catch(e => console.log("关闭音频上下文失败: " + e));
audioContext = null;
}
});
@ -1143,7 +1191,22 @@
// 初始化音频上下文
function initAudioContext() {
if (audioContext) return; // 避免重复初始化
if (audioContext) {
log("音频上下文已存在,使用现有上下文");
// 确保音频上下文已激活
if (audioContext.state === 'suspended') {
audioContext.resume().then(() => {
log("已恢复暂停的音频上下文");
}).catch(err => {
log("恢复音频上下文失败: " + err);
});
}
// 启动缓冲处理
startBufferProcessing();
return;
}
try {
// 创建音频上下文
@ -1174,12 +1237,80 @@
}
log("音频上下文已初始化,状态: " + audioContext.state + ", 采样率: " + audioContext.sampleRate);
// 启动音频缓冲处理
startBufferProcessing();
} catch (e) {
log("无法创建音频上下文: " + e);
showMessage("无法初始化音频播放: " + e, "danger");
}
}
// 启动缓冲区处理
function startBufferProcessing() {
if (audioBufferTimeout) {
clearInterval(audioBufferTimeout);
}
audioBufferTimeout = setInterval(processAudioBuffer, BUFFER_PROCESS_INTERVAL);
log("音频缓冲处理已启动");
}
// 停止缓冲区处理
function stopBufferProcessing() {
if (audioBufferTimeout) {
clearInterval(audioBufferTimeout);
audioBufferTimeout = null;
}
// 清空缓冲区
audioBufferQueue = [];
isAudioPlaying = false;
log("音频缓冲处理已停止");
}
// 处理音频缓冲区
function processAudioBuffer() {
if (!audioContext || !isAudioStreamEnabled || !callInProgress) {
return;
}
// 没有足够的缓冲数据
if (audioBufferQueue.length === 0) {
if (isAudioPlaying) {
log("缓冲区已空,等待更多数据...");
isAudioPlaying = false;
}
return;
}
// 初始播放需要达到阈值
if (!isAudioPlaying && audioBufferQueue.length < bufferStartSizeThreshold) {
log(`缓冲中,当前数据量: ${audioBufferQueue.length}/${bufferStartSizeThreshold}`);
return;
}
// 从队列取出一个音频数据并播放
const audioData = audioBufferQueue.shift();
playBufferedAudio(audioData);
isAudioPlaying = true;
// 自适应调整缓冲区大小
if (audioBufferQueue.length > MAX_BUFFER_SIZE * 0.8) {
log("缓冲区接近上限,增加处理频率");
// 增加处理频率
if (audioBufferTimeout) {
clearInterval(audioBufferTimeout);
}
audioBufferTimeout = setInterval(processAudioBuffer, BUFFER_PROCESS_INTERVAL / 2);
} else if (audioBufferQueue.length < MAX_BUFFER_SIZE * 0.2 && audioBufferQueue.length > 0) {
log("缓冲区数据较少,减少处理频率");
// 减少处理频率
if (audioBufferTimeout) {
clearInterval(audioBufferTimeout);
}
audioBufferTimeout = setInterval(processAudioBuffer, BUFFER_PROCESS_INTERVAL * 1.5);
}
}
// 播放实时音频
function playRealTimeAudio(audioData) {
if (!audioContext || !isAudioStreamEnabled) return;
@ -1226,15 +1357,30 @@
log("字符串数据前20字符: " + audioData.substring(0, 20));
// 尝试从Base64字符串解码
try {
const binary = atob(audioData);
// 尝试使用更健壮的Base64解码先规范化字符串格式
const base64Str = audioData.trim().replace(/^data:[^;]+;base64,/, '');
// 创建具有适当长度的Uint8Array
const binary = atob(base64Str);
pcmData = new Uint8Array(binary.length);
for (let i = 0; i < binary.length; i++) {
pcmData[i] = binary.charCodeAt(i);
}
log("已从Base64字符串转换为Uint8Array");
} catch (e) {
} catch (e) {
log("Base64转换失败: " + e);
return;
// 尝试直接解码二进制字符串
try {
pcmData = new Uint8Array(audioData.length);
for (let i = 0; i < audioData.length; i++) {
pcmData[i] = audioData.charCodeAt(i);
}
log("已从二进制字符串转换为Uint8Array");
} catch (e2) {
log("二进制字符串转换也失败: " + e2);
return;
}
}
} else {
return;
@ -1256,6 +1402,25 @@
pcmData = pcmData.slice(0, validLength);
}
// 添加到缓冲队列
if (audioBufferQueue.length < MAX_BUFFER_SIZE) {
audioBufferQueue.push(pcmData);
log(`已添加到缓冲区,当前缓冲数量: ${audioBufferQueue.length}`);
} else {
log("缓冲区已满,丢弃数据");
// 队列满时,移除最早的,添加最新的
audioBufferQueue.shift();
audioBufferQueue.push(pcmData);
}
} catch (e) {
log("处理实时音频失败: " + e);
}
}
// 播放缓冲区中的音频
async function playBufferedAudio(pcmData) {
try {
// 获取有效的DataView
let dataView;
try {
@ -1322,11 +1487,16 @@
audioGainNode.gain.value = currentVolume;
}
// 播放前检查音频上下文状态
if (audioContext.state === 'suspended') {
log("音频上下文处于暂停状态,尝试恢复...");
await audioContext.resume();
}
// 播放
source.start(0);
log(`实时音频播放中...音量: ${currentVolume * 100}%`);
} catch (e) {
log("处理实时音频失败: " + e);
log("播放缓冲音频失败: " + e);
}
}
@ -1358,5 +1528,75 @@
log("显示最终语音识别结果");
}
// 添加文本到监控列表
function addToMonitorList(text) {
if (!text || text.trim() === "") return;
// 检查是否已经存在相同内容
const existingItems = document.querySelectorAll("#monitor-text-list .list-group-item");
for (let i = 0; i < existingItems.length; i++) {
if (existingItems[i].dataset.fullText === text) {
// 已存在相同内容,不需要添加
return;
}
}
// 创建新的列表项
const container = document.getElementById("monitor-text-list");
const listItem = document.createElement("div");
listItem.className = "list-group-item";
// 使用临时ID服务器端会分配真正的ID
listItem.dataset.id = "temp-" + Date.now();
listItem.dataset.fullText = text;
// 添加点击事件
listItem.addEventListener('click', function (e) {
// 如果点击的是按钮,不触发选择
if (e.target.tagName === 'BUTTON' || e.target.tagName === 'I' ||
e.target.closest('button')) {
return;
}
// 移除其他项的active类
document.querySelectorAll("#monitor-text-list .list-group-item").forEach(item => {
item.classList.remove("active");
});
// 添加active类到当前项
this.classList.add("active");
// 显示文本
displayTextInCenter(this.dataset.fullText);
});
// 获取当前时间
const date = new Date();
const formattedDate = `${date.getFullYear()}-${String(date.getMonth() + 1).padStart(2, '0')}-${String(date.getDate()).padStart(2, '0')} ${String(date.getHours()).padStart(2, '0')}:${String(date.getMinutes()).padStart(2, '0')}:${String(date.getSeconds()).padStart(2, '0')}`;
// 截取前10个字符若不足10个则全部显示
const shortText = text.length > 10 ? text.substring(0, 10) : text;
listItem.innerHTML = `
<div class="d-flex justify-content-between align-items-start mb-1">
<small class="text-muted">【${shortText}】</small>
<div class="btn-group btn-group-sm">
<button class="btn btn-outline-danger btn-sm" onclick="deleteMonitorText('${listItem.dataset.id}')">
<i class="bi bi-trash"></i>
</button>
</div>
</div>
<div>${formattedDate} (本地)</div>
`;
// 添加到列表顶部
if (container.firstChild) {
container.insertBefore(listItem, container.firstChild);
} else {
container.appendChild(listItem);
}
log(`已添加文本到监控列表: ${shortText}...`);
}
</script>
}

View File

@ -102,15 +102,23 @@ public class AudioProcessingService : IAudioProcessingService
throw new ArgumentNullException(nameof(clientId));
}
// 检查是否有活动的录音
if (!_activeRecordings.TryGetValue(clientId, out var writer))
{
_logger.LogWarning($"客户端没有活动的录音会话: {clientId}");
return;
}
try
{
// 判断采样率如果高于16kHz则降采样
if (sampleRate > 16000)
{
_logger.LogInformation($"原始采样率为{sampleRate}Hz进行降采样到16000Hz");
audioData = ResampleAudio(audioData, sampleRate, 16000, 16, channels);
sampleRate = 16000; // 更新采样率为降采样后的值
}
// 检查是否有活动的录音
if (!_activeRecordings.TryGetValue(clientId, out var writer))
{
_logger.LogWarning($"客户端没有活动的录音会话: {clientId}");
return;
}
// 异步写入音频数据
await Task.Run(() => { writer.Write(audioData, 0, audioData.Length); writer.Flush(); }, token);
}
@ -121,7 +129,67 @@ public class AudioProcessingService : IAudioProcessingService
}
/// <summary>
/// 应用噪声消除
/// 对音频数据进行降采样
/// </summary>
/// <param name="audioData">原始音频数据</param>
/// <param name="originalSampleRate">原始采样率</param>
/// <param name="targetSampleRate">目标采样率</param>
/// <param name="bitsPerSample">采样位深</param>
/// <param name="channels">声道数</param>
/// <returns>降采样后的音频数据</returns>
private byte[] ResampleAudio(byte[] audioData, int originalSampleRate, int targetSampleRate, int bitsPerSample, int channels)
{
if (originalSampleRate == targetSampleRate)
{
return audioData;
}
try
{
// 简单的降采样算法 - 采样率转换比例
double ratio = (double)originalSampleRate / targetSampleRate;
// 计算新音频数据的字节数
int bytesPerSample = bitsPerSample / 8;
int samplesPerChannel = audioData.Length / (bytesPerSample * channels);
int newSamplesPerChannel = (int)(samplesPerChannel / ratio);
int newDataLength = newSamplesPerChannel * bytesPerSample * channels;
byte[] result = new byte[newDataLength];
// 对每个声道执行降采样
for (int i = 0; i < newSamplesPerChannel; i++)
{
int originalIndex = Math.Min((int)(i * ratio), samplesPerChannel - 1);
for (int ch = 0; ch < channels; ch++)
{
// 复制每个采样点的所有字节
int originalOffset = (originalIndex * channels + ch) * bytesPerSample;
int newOffset = (i * channels + ch) * bytesPerSample;
for (int b = 0; b < bytesPerSample; b++)
{
if (originalOffset + b < audioData.Length && newOffset + b < result.Length)
{
result[newOffset + b] = audioData[originalOffset + b];
}
}
}
}
_logger.LogInformation($"音频降采样完成: {audioData.Length}字节 -> {result.Length}字节");
return result;
}
catch (Exception ex)
{
_logger.LogError($"音频降采样失败: {ex.Message}");
return audioData; // 如果降采样失败,返回原始数据
}
}
/// <summary>
/// 应用降噪处理
/// </summary>
/// <param name="audioData">音频数据</param>
/// <param name="sampleRate">采样率</param>
@ -129,41 +197,53 @@ public class AudioProcessingService : IAudioProcessingService
/// <param name="noiseThreshold">噪声门限值</param>
/// <param name="attackSeconds">攻击时间</param>
/// <param name="releaseSeconds">释放时间</param>
/// <param name="highPassCutoff">高通滤波器截止频率(Hz)</param>
/// <param name="highPassCutoff">高通滤波器截止频率</param>
/// <param name="q">滤波器Q值</param>
/// <returns></returns>
public byte[] ApplyNoiseReduction(byte[] audioData, int sampleRate = 16000, int channels = 1, float noiseThreshold = 0.015f, float attackSeconds = 0.01f, float releaseSeconds = 0.1f, int highPassCutoff = 80, float q = 1.0f)
{
using (var inputStream = new MemoryStream(audioData))
using (var waveStream = new RawSourceWaveStream(inputStream, new WaveFormat(16000, 16, 1)))
// 调用内部实现
return ApplyNoiseReductionInternal(audioData, noiseThreshold, attackSeconds, releaseSeconds, highPassCutoff, q);
}
private byte[] ApplyNoiseReductionInternal(
byte[] audioData,
float noiseThreshold = 0.02f, // 噪声门限值
float attackSeconds = 0.01f, // 攻击时间
float releaseSeconds = 0.1f, // 释放时间
int highPassCutoff = 80, // 高通滤波器截止频率(Hz)
float q = 1.0f) // 滤波器Q值
{
var sampleProvider = waveStream.ToSampleProvider();
// 改进1更温和的噪声门参数
var noiseGate = new ImprovedNoiseGate(sampleProvider)
// 1. 将字节数组转换为 WaveStream
using (var inputStream = new MemoryStream(audioData))
using (var waveStream = new RawSourceWaveStream(inputStream, new WaveFormat(16000, 16, 1)))
{
Threshold = 0.015f, // 降低阈值(原0.02)
AttackSeconds = 0.05f, // 延长Attack时间(原0.01)
ReleaseSeconds = 0.3f, // 延长Release时间(原0.1)
HoldSeconds = 0.2f // 新增保持时间
};
// 改进2更平缓的高通滤波
var highPassFilter = new BiQuadFilterSampleProvider(noiseGate);
highPassFilter.Filter = BiQuadFilter.HighPassFilter(
sampleProvider.WaveFormat.SampleRate,
60, // 降低截止频率(原80)
0.707f); // 使用更平缓的Q值(原1.0)
// 改进3添加平滑处理
var smoothedProvider = new SmoothingSampleProvider(highPassFilter);
var outputStream = new MemoryStream();
WaveFileWriter.WriteWavFileToStream(outputStream, smoothedProvider.ToWaveProvider16());
return outputStream.ToArray();
}
// 2. 转换为浮点样本便于处理
var sampleProvider = waveStream.ToSampleProvider();
// 3. 应用噪声门(Noise Gate)
var noiseGate = new NoiseGateSampleProvider(sampleProvider)
{
Threshold = noiseThreshold,
AttackSeconds = attackSeconds,
ReleaseSeconds = releaseSeconds
};
// 4. 应用高通滤波器去除低频噪音
var highPassFilter = new BiQuadFilterSampleProvider(noiseGate);
highPassFilter.Filter = BiQuadFilter.HighPassFilter(
sampleProvider.WaveFormat.SampleRate,
highPassCutoff,
q);
// 5. 处理后的音频转回字节数组
var outputStream = new MemoryStream();
WaveFileWriter.WriteWavFileToStream(outputStream, highPassFilter.ToWaveProvider16());
return outputStream.ToArray();
}
}
/// <summary>
/// 开始新的音频流处理
/// </summary>
@ -447,67 +527,97 @@ public class AudioProcessingService : IAudioProcessingService
}
}
// 简单的噪声门实现
// 自定义噪声门实现
public class NoiseGateSampleProvider : ISampleProvider
{
private readonly ISampleProvider source;
private float threshold;
private float attackSeconds;
private float releaseSeconds;
private float envelope;
private float gain;
private float threshold = 0.02f;
private float attackSeconds = 0.01f;
private float releaseSeconds = 0.1f;
private float currentLevel = 0;
private float attackRate;
private float releaseRate;
private bool open = false;
public NoiseGateSampleProvider(ISampleProvider source)
{
this.source = source;
this.WaveFormat = source.WaveFormat;
CalculateAttackAndReleaseRates();
}
public float Threshold
public WaveFormat WaveFormat => source.WaveFormat;
public float Threshold
{
get => threshold;
set
{
threshold = value;
}
}
public float AttackSeconds
{
get => attackSeconds;
set
{
attackSeconds = value;
CalculateAttackAndReleaseRates();
}
}
public float ReleaseSeconds
{
get => releaseSeconds;
set
{
releaseSeconds = value;
CalculateAttackAndReleaseRates();
}
}
private void CalculateAttackAndReleaseRates()
{
get => threshold;
set => threshold = Math.Max(0, Math.Min(1, value));
attackRate = 1.0f / (WaveFormat.SampleRate * attackSeconds);
releaseRate = 1.0f / (WaveFormat.SampleRate * releaseSeconds);
}
public float AttackSeconds
{
get => attackSeconds;
set => attackSeconds = Math.Max(0.001f, value);
}
public float ReleaseSeconds
{
get => releaseSeconds;
set => releaseSeconds = Math.Max(0.001f, value);
}
public WaveFormat WaveFormat { get; }
public int Read(float[] buffer, int offset, int count)
{
int samplesRead = source.Read(buffer, offset, count);
float attackCoeff = (float)Math.Exp(-1.0 / (WaveFormat.SampleRate * attackSeconds));
float releaseCoeff = (float)Math.Exp(-1.0 / (WaveFormat.SampleRate * releaseSeconds));
for (int n = 0; n < samplesRead; n++)
for (int i = 0; i < samplesRead; i++)
{
float sample = buffer[offset + n];
float absSample = Math.Abs(sample);
// 包络跟踪
if (absSample > envelope)
envelope = absSample;
float currentSample = Math.Abs(buffer[offset + i]);
// 更新当前电平
if (currentSample > currentLevel)
{
// 攻击:升高比较快
currentLevel += attackRate * (currentSample - currentLevel);
}
else
envelope *= (absSample > threshold) ? attackCoeff : releaseCoeff;
{
// 释放:降低比较慢
currentLevel -= releaseRate * (currentLevel - currentSample);
if (currentLevel < 0) currentLevel = 0;
}
// 应用增益
if (envelope > threshold)
gain = 1.0f;
else
gain = 0.0f;
// 应用噪声门
if (currentLevel >= threshold)
{
open = true;
}
else if (open && currentLevel < threshold * 0.5f) // 加入一点滞后效应
{
open = false;
}
buffer[offset + n] = sample * gain;
// 根据噪声门状态保留或衰减信号
if (!open)
{
buffer[offset + i] *= 0.05f; // 不完全消除仅保留5%的信号强度
}
}
return samplesRead;