200 lines
6.5 KiB
C#
200 lines
6.5 KiB
C#
using Microsoft.Extensions.Hosting;
|
||
using System.Net.Http.Headers;
|
||
using System.Net.Http;
|
||
using System.Net;
|
||
using System.Text;
|
||
using VideoAnalysisCore.AICore.GPT.Dto;
|
||
using VideoAnalysisCore.AICore.SherpaOnnx;
|
||
using VideoAnalysisCore.Common;
|
||
using Whisper.net;
|
||
using Microsoft.Extensions.DependencyInjection;
|
||
using VideoAnalysisCore.AICore.GPT.ChatGPT;
|
||
using VideoAnalysisCore.AICore.GPT.DeepSeek;
|
||
using System.Text.Json.Serialization;
|
||
|
||
namespace VideoAnalysisCore.AICore.GPT
|
||
{
|
||
|
||
public interface IBserGPT
|
||
{
|
||
/// <summary>
|
||
/// 访问GPT
|
||
/// </summary>
|
||
/// <param name="task">任务id</param>
|
||
/// <returns></returns>
|
||
public Task<TaskRes> GetKnow(string task);
|
||
/// <summary>
|
||
/// 获取 视频分段内的 试题
|
||
/// </summary>
|
||
/// <param name="task">任务id</param>
|
||
/// <returns></returns>
|
||
public Task GetVideoQuestion(string task);
|
||
/// <summary>
|
||
/// 获取视频类型
|
||
/// </summary>
|
||
/// <param name="task"></param>
|
||
/// <returns></returns>
|
||
public Task GetVideoType(string task);
|
||
}
|
||
|
||
/// <summary>
|
||
/// 请求数据
|
||
/// </summary>
|
||
public class ChatRequest
|
||
{
|
||
/// <summary>
|
||
/// 对话
|
||
/// </summary>
|
||
public Message[] messages { get; set; }
|
||
/// <summary>
|
||
/// 提问种子值[用来确保 相同参数请求尽可能返回相同参数]
|
||
/// <para>默认:null</para>
|
||
/// <para>此功能处于 Beta 阶段。 如果指定,我们的系统将尽最大努力确定性地采样,这样具有相同 and 参数的重复请求应该返回相同的结果。 无法保证确定性,您应该参考 response 参数来监控后端的变化</para>
|
||
/// </summary>
|
||
public int? seed { get; set; } = null;
|
||
[JsonIgnore]
|
||
public string? title { get; set; } = null;
|
||
/// <summary>
|
||
/// 推理模型 (deepseek-reasoner)
|
||
/// </summary>
|
||
public string model { get; set; } = "deepseek-reasoner";
|
||
|
||
public float? max_tokens { get; set; } = 8000;
|
||
public float? max_completion_tokens { get; set; } = 8000;
|
||
/// <summary>
|
||
/// 要使用的采样温度,介于 0 和 2 之间。较高的值(如 0.8)将使输出更加随机,而较低的值(如 0.2)将使其更加集中和确定。 我们通常建议更改此项或同时更改两者。top_p
|
||
/// <para> 默认为 1</para>
|
||
/// <para> <see cref="ChatRequest.top_p"/>联动</para>
|
||
/// </summary>
|
||
public float? temperature { get; set; } = 0.2f;
|
||
/// <summary>
|
||
/// 一种替代温度采样的方法,称为原子核采样, 其中,模型考虑具有top_p概率的标记的结果 质量。所以 0.1 表示仅包含前 10% 概率质量的代币 被考
|
||
/// <para>建议与<see cref="ChatRequest.temperature"/>联动</para>
|
||
/// </summary>
|
||
public float? top_p { get; set; } = 0.1f;
|
||
/// <summary>
|
||
/// 一个对象,用于指定模型必须输出的格式。设置为 enable 结构化输出,确保模型与您提供的 JSON 匹配 图式。
|
||
/// </summary>
|
||
//public object response_format { get; set; } = new { type = "json_object" };
|
||
/// <summary>
|
||
/// 流式返回
|
||
/// </summary>
|
||
public bool stream { get; set; } = true;
|
||
/// <summary>
|
||
/// 您希望模型为此请求生成的 Output types。 大多数模型都能够生成文本,这是
|
||
/// <para>默认设置: ["text"]</para>
|
||
/// <para>该模型还可用于生成音频。自 请求此模型同时生成文本和音频响应,您可以 用:gpt-4o-audio-preview["text", "audio"]</para>
|
||
/// </summary>
|
||
public string modalities { get; set; } = "[\"json\"]";
|
||
/// <summary>
|
||
/// 任务id
|
||
/// </summary>
|
||
public string taskId { get; set; }
|
||
public object stream_options { get; set; } = new { include_usage = true };
|
||
|
||
}
|
||
/// <summary>
|
||
/// GPT 接口
|
||
/// </summary>
|
||
|
||
|
||
/// <summary>
|
||
/// gpt返回值
|
||
/// </summary>
|
||
public class ChatRes
|
||
{
|
||
public string id { get; set; }
|
||
public string _object { get; set; }
|
||
public int created { get; set; }
|
||
public string model { get; set; }
|
||
public ChatResError error { get; set; }
|
||
public Choice[] choices { get; set; }
|
||
public Usage usage { get; set; }
|
||
/// <summary>
|
||
/// 系统指纹
|
||
/// </summary>
|
||
public string system_fingerprint { get; set; }
|
||
}
|
||
public class Usage
|
||
{
|
||
public int prompt_tokens { get; set; }
|
||
public int completion_tokens { get; set; }
|
||
public int total_tokens { get; set; }
|
||
}
|
||
|
||
|
||
public class Choice
|
||
{
|
||
public int index { get; set; }
|
||
public Message message { get; set; }
|
||
public object logprobs { get; set; }
|
||
public string finish_reason { get; set; }
|
||
}
|
||
public class ChatResError
|
||
{
|
||
public string message { get; set; }
|
||
public string type { get; set; }
|
||
}
|
||
|
||
public class ChatResSSE
|
||
{
|
||
public string id { get; set; }
|
||
public int created { get; set; }
|
||
/// <summary>
|
||
/// 模型id
|
||
/// </summary>
|
||
public string model { get; set; }
|
||
/// <summary>
|
||
/// 对话
|
||
/// </summary>
|
||
public ChoiceSSE[] choices { get; set; }
|
||
/// <summary>
|
||
/// token使用情况
|
||
/// </summary>
|
||
public Usage usage { get; set; }
|
||
}
|
||
public class ChoiceSSE
|
||
{
|
||
public int index { get; set; }
|
||
public Message delta { get; set; }
|
||
public string finish_reason { get; set; }
|
||
}
|
||
|
||
public class Message
|
||
{
|
||
public Message()
|
||
{
|
||
|
||
}
|
||
public Message(string content, string role)
|
||
{
|
||
this.role = role;
|
||
this.content = content;
|
||
}
|
||
public string role { get; set; }
|
||
public string content { get; set; }
|
||
/// <summary>
|
||
/// 推理内容
|
||
/// </summary>
|
||
public string reasoning_content { get; set; }
|
||
public string refusal { get; set; }
|
||
}
|
||
|
||
|
||
public static class GPTExpand
|
||
{
|
||
/// <summary>
|
||
/// 注册GPT服务
|
||
/// </summary>
|
||
/// <param name="services"></param>
|
||
public static void AddGPTService(this IServiceCollection services)
|
||
{
|
||
services.AddSingleton<DeepSeekGPTClient>();
|
||
services.AddSingleton<ChatGPTClient>();
|
||
services.AddSingleton<IBserGPT, DeepSeek_GPT>();
|
||
|
||
|
||
}
|
||
}
|
||
}
|