Learn.VideoAnalysis/VideoAnalysisCore/AICore/SherpaOnnx/SenseVoice.cs

158 lines
6.5 KiB
C#

using Microsoft.Extensions.Options;
using SherpaOnnx;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using VideoAnalysisCore.AICore.Whisper;
using VideoAnalysisCore.Common;
namespace VideoAnalysisCore.AICore.SherpaOnnx
{
public class SenseVoice
{
static OfflineRecognizer OR =default!;
static VoiceActivityDetector VAD =default!;
static VadModelConfig VADModelConfig = default!;
/// <summary>
/// 初始化 SenseVoice
/// </summary>
/// <param name="numThreads"></param>
/// <param name="useGPU">是否使用gpu 报错请看安装CUDA环境<see cref="https://k2-fsa.github.io/sherpa/onnx/pretrained_models/whisper/large-v3.html#run-with-gpu-float32"/></param>
public static void Init(int numThreads =4,bool useGPU=false)
{
Console.WriteLine("初始化 SenseVoice");
OfflineRecognizerConfig config = new OfflineRecognizerConfig();
//采样率
config.FeatConfig.SampleRate = 16000;
//用于训练模型的特征维度
config.FeatConfig.FeatureDim = 80;
//Path to tokens.txt
config.ModelConfig.Tokens = Path.Combine(AppCommon.AIModelFile, "sherpa-onnx-sense-voice-24-07-17", "tokens.txt");
//SenseVoice 模型
config.ModelConfig.SenseVoice.Model = Path.Combine(AppCommon.AIModelFile, "sherpa-onnx-sense-voice-24-07-17", "model.onnx");
//1 使用逆文本规范化处理感官语音。
config.ModelConfig.SenseVoice.UseInverseTextNormalization =1;
config.ModelConfig.SenseVoice.Language = "zh";
//模型类型
config.ModelConfig.ModelType = string.Empty;
config.ModelConfig.NumThreads = numThreads;
//需要使用GPU
if (!useGPU)
config.ModelConfig.Provider = "cuda";
#region
//贪婪搜索[greedy_search] 改进的波束搜索 [modified_beam_search]
//贪婪搜索
config.DecodingMethod = "greedy_search";
////改进的波束搜索
//config.DecodingMethod = "modified_beam_search";
////仅在 --decoding--method 为 [波束搜索]modified_beam_search 时使用。
////它指定搜索过程中要保留的活动路径数
//config.MaxActivePaths =4;
#endregion
//热词目录
config.HotwordsFile = string.Empty;
//热词得分
config.HotwordsScore =1.5f ;
//反转文本规范化规则 fst 的路径
config.RuleFsts = string.Empty;
config.ModelConfig.Debug = 1;
OR = new OfflineRecognizer(config);
VADModelConfig = new VadModelConfig();
VADModelConfig.SileroVad.Model = Path.Combine(AppCommon.AIModelFile, "sherpa-onnx-sense-voice-24-07-17", "silero_VAD.onnx");
VADModelConfig.Debug = 0;
//缓冲区大小
VAD = new VoiceActivityDetector(VADModelConfig, 60);
}
/// <summary>
/// 获取语音字幕
/// </summary>
/// <param name="task"></param>
/// <returns></returns>
public static async Task RunTask(string task)
{
if (OR is null)
Init();
var filePath = Path.Combine(task.LocalPath(), task + ".wav");
if (string.IsNullOrEmpty(filePath) || !File.Exists(filePath))
throw new Exception("task 音频路径未找到");
WaveReader reader = new WaveReader(filePath);
int numSamples = reader.Samples.Length;
int windowSize = VADModelConfig.SileroVad.WindowSize;
int sampleRate = VADModelConfig.SampleRate;
int numIter = numSamples / windowSize;
var totalSecond = numSamples / (double)sampleRate;
var res = new List<SenseVoiceRes>(500);
for (int i = 0; i != numIter; ++i)
{
int start = i * windowSize;
float[] samples = new float[windowSize];
Array.Copy(reader.Samples, start, samples, 0, windowSize);
VAD.AcceptWaveform(samples);
//是否检测到语音
if (VAD.IsSpeechDetected())
{
while (!VAD.IsEmpty())
{
//获取最新的发言片段
SpeechSegment segment = VAD.Front();
float startTime = segment.Start / (float)sampleRate;
float duration = segment.Samples.Length / (float)sampleRate;
OfflineStream stream = OR.CreateStream();
stream.AcceptWaveform(sampleRate, segment.Samples);
OR.Decode(stream);
if (!string.IsNullOrEmpty(stream.Result.Text))
{
res.Add(new()
{
Text = stream.Result.Text,
Start= startTime,
End = startTime + duration
});
var progress = (double)(startTime + duration) / (totalSecond) * 100;
RedisExpand.SetTaskProgress(task, progress);
}
VAD.Pop();
}
}
}
VAD.Flush();
while (!VAD.IsEmpty())
{
SpeechSegment segment = VAD.Front();
float startTime = segment.Start / (float)sampleRate;
float duration = segment.Samples.Length / (float)sampleRate;
OfflineStream stream = OR.CreateStream();
stream.AcceptWaveform(sampleRate, segment.Samples);
OR.Decode(stream);
if (!string.IsNullOrEmpty(stream.Result.Text))
{
res.Add(new()
{
Text = stream.Result.Text,
Start = startTime,
End = startTime + duration
});
}
VAD.Pop();
}
await RedisExpand.Redis.HMSetAsync(RedisExpandKey.Task(task), "Captions", res);
RedisExpand.InsertChannel(Enum.RedisChannelEnum.ParsingSpeaker, task);
}
}
}