using Microsoft.Extensions.Hosting;
using System.Net.Http.Headers;
using System.Net.Http;
using System.Net;
using System.Text;
using VideoAnalysisCore.AICore.GPT.Dto;
using VideoAnalysisCore.AICore.SherpaOnnx;
using VideoAnalysisCore.Common;
using Whisper.net;
using Microsoft.Extensions.DependencyInjection;
using VideoAnalysisCore.AICore.GPT.ChatGPT;
using VideoAnalysisCore.AICore.GPT.DeepSeek;
using System.Text.Json.Serialization;
namespace VideoAnalysisCore.AICore.GPT
{
public interface IBserGPTWorkflow
{
///
/// 访问GPT
///
/// 任务id
///
public Task GetKnow(string task);
///
/// 获取 视频分段内的 试题
///
/// 任务id
///
public Task GetVideoQuestion(string task);
///
/// 获取视频类型
///
///
///
public Task GetVideoType(string task);
}
///
/// 请求数据
///
public class ChatRequest
{
///
/// 对话
///
public Message[] messages { get; set; }
///
/// 提问种子值[用来确保 相同参数请求尽可能返回相同参数]
/// 默认:null
/// 此功能处于 Beta 阶段。 如果指定,我们的系统将尽最大努力确定性地采样,这样具有相同 and 参数的重复请求应该返回相同的结果。 无法保证确定性,您应该参考 response 参数来监控后端的变化
///
public int? seed { get; set; } = null;
[JsonIgnore]
public string? title { get; set; } = null;
///
/// 推理模型 (deepseek-reasoner)
///
public string model { get; set; } = "deepseek-reasoner";
public float? max_tokens { get; set; } = 8000;
public float? max_completion_tokens { get; set; } = 8000;
///
/// 要使用的采样温度,介于 0 和 2 之间。较高的值(如 0.8)将使输出更加随机,而较低的值(如 0.2)将使其更加集中和确定。 我们通常建议更改此项或同时更改两者。top_p
/// 默认为 1
/// 联动
///
public float? temperature { get; set; } = 0.2f;
///
/// 一种替代温度采样的方法,称为原子核采样, 其中,模型考虑具有top_p概率的标记的结果 质量。所以 0.1 表示仅包含前 10% 概率质量的代币 被考
/// 建议与联动
///
public float? top_p { get; set; } = 0.1f;
///
/// 一个对象,用于指定模型必须输出的格式。设置为 enable 结构化输出,确保模型与您提供的 JSON 匹配 图式。
///
//public object response_format { get; set; } = new { type = "json_object" };
///
/// 流式返回
///
public bool stream { get; set; } = true;
///
/// 您希望模型为此请求生成的 Output types。 大多数模型都能够生成文本,这是
/// 默认设置: ["text"]
/// 该模型还可用于生成音频。自 请求此模型同时生成文本和音频响应,您可以 用:gpt-4o-audio-preview["text", "audio"]
///
public string modalities { get; set; } = "[\"json\"]";
///
/// 任务id
///
public string taskId { get; set; }
public object stream_options { get; set; } = new { include_usage = true };
}
///
/// GPT 接口
///
///
/// gpt返回值
///
public class ChatRes
{
public string id { get; set; }
public string _object { get; set; }
public int created { get; set; }
public string model { get; set; }
public ChatResError error { get; set; }
public Choice[] choices { get; set; }
public Usage usage { get; set; }
///
/// 系统指纹
///
public string system_fingerprint { get; set; }
}
public class Usage
{
public int prompt_tokens { get; set; }
public int completion_tokens { get; set; }
public int total_tokens { get; set; }
}
public class Choice
{
public int index { get; set; }
public Message message { get; set; }
public object logprobs { get; set; }
public string finish_reason { get; set; }
}
public class ChatResError
{
public string message { get; set; }
public string type { get; set; }
}
public class ChatResSSE
{
public string id { get; set; }
public int created { get; set; }
///
/// 模型id
///
public string model { get; set; }
///
/// 对话
///
public ChoiceSSE[] choices { get; set; }
///
/// token使用情况
///
public Usage usage { get; set; }
}
public class ChoiceSSE
{
public int index { get; set; }
public Message delta { get; set; }
public string finish_reason { get; set; }
}
public class Message
{
public Message()
{
}
public Message(string content, string role)
{
this.role = role;
this.content = content;
}
public string role { get; set; }
public string content { get; set; }
///
/// 推理内容
///
public string reasoning_content { get; set; }
public string refusal { get; set; }
}
public static class GPTExpand
{
///
/// 注册GPT服务
///
///
public static void AddGPTService(this IServiceCollection services)
{
services.AddSingleton();
services.AddSingleton();
services.AddSingleton();
services.AddSingleton();
}
}
}