diff --git a/VideoAnalysisCore/AICore/GPT/DeepSeek/DeepSeek_GPT.cs b/VideoAnalysisCore/AICore/GPT/DeepSeek/DeepSeek_GPT.cs index f1cc766..fe87210 100644 --- a/VideoAnalysisCore/AICore/GPT/DeepSeek/DeepSeek_GPT.cs +++ b/VideoAnalysisCore/AICore/GPT/DeepSeek/DeepSeek_GPT.cs @@ -69,16 +69,24 @@ namespace VideoAnalysisCore.AICore.GPT.DeepSeek || s.Depth == 2)) .Select(s => s.Name).ToArrayAsync(); string title = taskInfo.MediaName; + var speakerArr = JsonSerializer.Deserialize(taskInfo.Speaker); + var captionsArr = JsonSerializer.Deserialize(taskInfo.Captions); + var fileNameResFormat = "{授课章节: string|null}"; + //var fileNamePostMessages = title + + // " 这是一堂课的标题,请你基于标题帮我分析出这堂课所讲授的内容与最恰当的授课章节(关联最贴切的章节,保留一个章节!)." + + // $"章节范围限定在[{string.Join(',', xkwKnows)}]范围内." + + // $"输出格式 json字符串 对象格式{fileNameResFormat}"; + var fileNamePostMessages = title + - " 这是一堂课的标题,请你基于标题帮我分析出这堂课所讲授的内容与最恰当的授课章节(关联最贴切的章节,保留一个章节!)." + + " 这是一堂课的部分授课字幕,请你基于字幕内容帮我分析出这堂课所讲授的内容与最恰当的授课章节(关联最贴切的章节,保留一个章节!)." + $"章节范围限定在[{string.Join(',', xkwKnows)}]范围内." + $"输出格式 json字符串 对象格式{fileNameResFormat}"; + var fileNameInfoRes = await ChatAsync (task, fileNamePostMessages, null);//, "deepseek-chat"); - var speakerArr = JsonSerializer.Deserialize(taskInfo.Speaker); - var captionsArr = JsonSerializer.Deserialize(taskInfo.Captions); + var captions = ExpandFunction.GetSpeakerCaptions(captionsArr, speakerArr); var maxVideoTime = captions?.TimeBase?.LastOrDefault()?.End ?? 0; var criteriaBuilder = new StringBuilder(); diff --git a/VideoAnalysisCore/AICore/SherpaOnnx/SenseVoice.cs b/VideoAnalysisCore/AICore/SherpaOnnx/SenseVoice.cs index 1961817..de8f980 100644 --- a/VideoAnalysisCore/AICore/SherpaOnnx/SenseVoice.cs +++ b/VideoAnalysisCore/AICore/SherpaOnnx/SenseVoice.cs @@ -17,10 +17,10 @@ using static System.Runtime.InteropServices.JavaScript.JSType; namespace VideoAnalysisCore.AICore.SherpaOnnx { - public class SenseVoice + public static class SenseVoice { const string TransducerStr = "sherpa-onnx-streaming-zipformer-bilingual-zh-en-2023-02-20"; - static OfflineRecognizer OR =default!; + static OfflineRecognizer OR = default!; //static VoiceActivityDetector VAD = default!; static VadModelConfig VADModelConfig = default!; /// @@ -28,7 +28,7 @@ namespace VideoAnalysisCore.AICore.SherpaOnnx /// /// 默认6线程 /// 是否使用gpu 报错请看安装CUDA环境 - public static void Init(int numThreads =6,bool useGPU=false,bool useHotwords = false) + public static void Init(int numThreads = 6, bool useGPU = false, bool useHotwords = false) { Console.WriteLine("初始化 SenseVoice"); OfflineRecognizerConfig config = new OfflineRecognizerConfig(); @@ -41,14 +41,14 @@ namespace VideoAnalysisCore.AICore.SherpaOnnx //SenseVoice 模型 config.ModelConfig.SenseVoice.Model = Path.Combine(AppCommon.AIModelFile, "sherpa-onnx-sense-voice-24-07-17", "model.onnx"); //1 使用逆文本规范化处理感官语音。 - config.ModelConfig.SenseVoice.UseInverseTextNormalization =1; + config.ModelConfig.SenseVoice.UseInverseTextNormalization = 1; config.ModelConfig.SenseVoice.Language = "zh"; //模型类型 config.ModelConfig.ModelType = string.Empty; config.ModelConfig.NumThreads = numThreads; config.ModelConfig.Provider = "cpu"; //需要使用GPU - if (!useGPU) + if (!useGPU) config.ModelConfig.Provider = "cuda"; #region 有效的解码方法 @@ -104,78 +104,10 @@ namespace VideoAnalysisCore.AICore.SherpaOnnx /// public static async Task> RunTask(Stream s) { - if (OR is null) - Init(); if (s is null) throw new Exception("音频路径 is null"); + return await TaskHandle(new WaveReader(s)); - WaveReader reader = new WaveReader(s); - int numSamples = reader.Samples.Length; - int windowSize = VADModelConfig.SileroVad.WindowSize; - int sampleRate = VADModelConfig.SampleRate; - int numIter = numSamples / windowSize; - var totalSecond = numSamples / (float)sampleRate; - var res = new List(500); - - //缓冲区大小 - var VAD = new VoiceActivityDetector(VADModelConfig, 60); - //var VAD = new VoiceActivityDetector(VADModelConfig, 60); - for (int i = 0; i != numIter; ++i) - { - int start = i * windowSize; - float[] samples = new float[windowSize]; - Array.Copy(reader.Samples, start, samples, 0, windowSize); - VAD.AcceptWaveform(samples); - //是否检测到语音 - if (VAD.IsSpeechDetected()) - { - while (!VAD.IsEmpty()) - { - //获取最新的发言片段 - SpeechSegment segment = VAD.Front(); - float startTime = segment.Start / (float)sampleRate; - float duration = segment.Samples.Length / (float)sampleRate; - using OfflineStream stream = OR.CreateStream(); - stream.AcceptWaveform(sampleRate, segment.Samples); - OR.Decode(stream); - if (!string.IsNullOrEmpty(stream.Result.Text)) - { - res.Add(new() - { - Text = stream.Result.Text, - Start = (float)Math.Round(startTime, 2, MidpointRounding.AwayFromZero), - End = (float)Math.Round(startTime + duration, 2, MidpointRounding.AwayFromZero), - }); - } - VAD.Pop(); - } - } - } - VAD.Flush(); - - while (!VAD.IsEmpty()) - { - SpeechSegment segment = VAD.Front(); - float startTime = segment.Start / (float)sampleRate; - float duration = segment.Samples.Length / (float)sampleRate; - - OfflineStream stream = OR.CreateStream(); - stream.AcceptWaveform(sampleRate, segment.Samples); - OR.Decode(stream); - if (!string.IsNullOrEmpty(stream.Result.Text)) - { - res.Add(new() - { - Text = stream.Result.Text, - Start = (float)Math.Round(startTime, 2, MidpointRounding.AwayFromZero), - End = (float)Math.Round(startTime + duration, 2, MidpointRounding.AwayFromZero), - }); - } - - VAD.Pop(); - } - VAD.Reset(); - return res; } /// @@ -185,95 +117,108 @@ namespace VideoAnalysisCore.AICore.SherpaOnnx /// public static async Task RunTask(string task) { - if (OR is null) - Init(); var filePath = Path.Combine(task.LocalPath(), task + ".wav"); if (string.IsNullOrEmpty(filePath) || !File.Exists(filePath)) throw new Exception("task 音频路径未找到"); + await TaskHandle(new WaveReader(filePath)); + } - WaveReader reader = new WaveReader(filePath); + /// + /// 任务处理 + /// + /// Wave + /// 任务id [默认Null] + /// + /// + public static async Task> TaskHandle(WaveReader reader, string? task = null) + { + if (OR is null) + Init(); int numSamples = reader.Samples.Length; int windowSize = VADModelConfig.SileroVad.WindowSize; int sampleRate = VADModelConfig.SampleRate; int numIter = numSamples / windowSize; var totalSecond = numSamples / (float)sampleRate; var res = new List(500); - var VAD = new VoiceActivityDetector(VADModelConfig, 60); + using var VAD = new VoiceActivityDetector(VADModelConfig, 30); for (int i = 0; i != numIter; ++i) { int start = i * windowSize; - float[] samples = new float[windowSize]; - Array.Copy(reader.Samples, start, samples, 0, windowSize); - VAD.AcceptWaveform(samples); + //float[] samples = new float[windowSize]; + //Array.Copy(reader.Samples, start, samples, 0, windowSize); + //VAD.AcceptWaveform(samples); + + Memory samples = new float[windowSize]; + Memory sourceSpan = reader.Samples.AsMemory(start, windowSize); + sourceSpan.CopyTo(samples); + VAD.AcceptWaveform(samples.ToArray()); + //是否检测到语音 if (VAD.IsSpeechDetected()) { + //获取最新的发言片段 while (!VAD.IsEmpty()) - { - //获取最新的发言片段 - SpeechSegment segment = VAD.Front(); - float startTime = segment.Start / (float)sampleRate; - float duration = segment.Samples.Length / (float)sampleRate; - OfflineStream stream = OR.CreateStream(); - stream.AcceptWaveform(sampleRate, segment.Samples); - OR.Decode(stream); - if (!string.IsNullOrEmpty(stream.Result.Text)) - { - res.Add(new() - { - Text = stream.Result.Text, - //Text = ExpandFunction.HandleFormula(stream.Result.Text), - Start = (float)Math.Round(startTime, 2, MidpointRounding.AwayFromZero), - End = (float)Math.Round(startTime + duration, 2, MidpointRounding.AwayFromZero), - }); - var progress = (float)(startTime + duration) / (totalSecond) * 100; - RedisExpand.SetTaskProgress(task, progress); - - } - VAD.Pop(); - } + await VAD.ReadNext(res, totalSecond, task); } } - VAD.Flush(); - while (!VAD.IsEmpty()) + await VAD.ReadNext(res, totalSecond, task); + VAD.Flush(); + //如果携带任务ID + if (!string.IsNullOrEmpty(task)) { - SpeechSegment segment = VAD.Front(); - float startTime = segment.Start / (float)sampleRate; - float duration = segment.Samples.Length / (float)sampleRate; + Console.WriteLine(DateTime.Now + "=> SenseVoice 字幕数量" + res.Count); + var captionsStr = JsonSerializer.Serialize(res); + await DbScoped.Sugar + .Updateable() + .SetColumns(it => it.Captions == captionsStr) + .Where(it => it.Id == long.Parse(task)) + .ExecuteCommandAsync(); + await RedisExpand.Redis.HMSetAsync(RedisExpandKey.Task(task), "Captions", res); + //RedisExpand.InsertChannel(Enum.RedisChannelEnum.ParsingSpeaker, task); + //分析完成视频字幕后继续接收任务 + RedisExpand.NewTask(); - OfflineStream stream = OR.CreateStream(); - stream.AcceptWaveform(sampleRate, segment.Samples); - OR.Decode(stream); - if (!string.IsNullOrEmpty(stream.Result.Text)) - { - res.Add(new() - { - - Text = stream.Result.Text, - //Text = ExpandFunction.HandleFormula(stream.Result.Text), - Start = (float)Math.Round(startTime, 2, MidpointRounding.AwayFromZero), - End = (float)Math.Round(startTime + duration, 2, MidpointRounding.AwayFromZero), - }); - } - VAD.Pop(); + RedisExpand.InsertChannel(RedisChannelEnum.ChatModelAnalysis, task); } - - Console.WriteLine(DateTime.Now + "=> SenseVoice 字幕数量"+ res.Count); - - var captionsStr = JsonSerializer.Serialize(res); - await DbScoped.Sugar - .Updateable() - .SetColumns(it => it.Captions == captionsStr) - .Where(it => it.Id == long.Parse(task)) - .ExecuteCommandAsync(); - await RedisExpand.Redis.HMSetAsync(RedisExpandKey.Task(task), "Captions", res); - //RedisExpand.InsertChannel(Enum.RedisChannelEnum.ParsingSpeaker, task); - //分析完成视频字幕后继续接收任务 - RedisExpand.NewTask(); - - RedisExpand.InsertChannel(RedisChannelEnum.ChatModelAnalysis, task); - + return res; + } + /// + /// 处理vad 下一个切片 + /// + /// + /// 字幕处理后写入数组 + /// 总时长 + /// 所属任务id + /// + public static async Task ReadNext(this VoiceActivityDetector VAD, List res, float totalSecond, string? task = null) + { + var segment = VAD.Front(); + var sampleRate = VADModelConfig.SampleRate; + var sampleRateF = (float)VADModelConfig.SampleRate; + float startTime = segment.Start / sampleRateF; + float duration = segment.Samples.Length / sampleRateF; + using var stream = OR.CreateStream(); + stream.AcceptWaveform(sampleRate, segment.Samples); + OR.Decode(stream); + if (!string.IsNullOrEmpty(stream.Result.Text)) + { + var text = stream.Result.Text.Trim(); + if (text.Length == 1 && text.First() >= '\uFF00' && text.First() <= '\uFFEF') // 检查字符是否在全角半角字符集的标点符号范围内 + { + VAD.Pop(); + return; + } + res.Add(new() + { + Text = stream.Result.Text, + Start = (float)Math.Round(startTime, 2, MidpointRounding.AwayFromZero), + End = (float)Math.Round(startTime + duration, 2, MidpointRounding.AwayFromZero), + }); + if (!string.IsNullOrEmpty(task)) + RedisExpand.SetTaskProgress(task, (double)(startTime + duration) / (totalSecond) * 100); + } + VAD.Pop(); } } } diff --git a/VideoAnalysisCore/AICore/SherpaOnnx/Speaker.cs b/VideoAnalysisCore/AICore/SherpaOnnx/Speaker.cs index 13983e8..c4b0725 100644 --- a/VideoAnalysisCore/AICore/SherpaOnnx/Speaker.cs +++ b/VideoAnalysisCore/AICore/SherpaOnnx/Speaker.cs @@ -9,6 +9,7 @@ using VideoAnalysisCore.Model; using System.Text.Json; using VideoAnalysisCore.Model.Enum; using Microsoft.Extensions.DependencyInjection; +using UserCenter.Model.Enum; namespace VideoAnalysisCore.AICore.SherpaOnnx {