using VideoAnalysisCore.Common; using System.Net.Http.Headers; using System.Text; using Microsoft.Extensions.Logging; using Newtonsoft.Json.Linq; using System.Net.Http; using Newtonsoft.Json; using System.Net.Http.Json; using System.Net; using VideoAnalysisCore.AICore.GPT.DeepSeek; using System.Text.Json; namespace VideoAnalysisCore.AICore.GPT.Gemini { public class GeminiGPTClient : GPTClient { public override GptConfig Config { get; set; } = AppCommon.Config.ChatGpt.ChatGpt; private readonly IHttpClientFactory _httpClientFactory; private readonly RedisManager redisManager; public GeminiGPTClient(IHttpClientFactory httpClientFactory, RedisManager redisManager, VideoSliceWorkflowManager workflowManager) : base(httpClientFactory, redisManager, workflowManager) { _httpClientFactory = httpClientFactory; this.redisManager = redisManager; } /// /// 请求AI /// /// 返回JSON类型 /// 任务id /// 提示词 /// 任务类型 /// GPT版本 /// 最大token 不设置默认最大值 16000/8000 /// /// public override async Task ChatAsync(string task, string postMessages, string title, string model = null, int max_tokens = 32_000) { Message[] messageArr = [ new Message(postMessages,"user"), ]; model = model ?? ChatGPTType.Gemini_3_Chat; messageArr = messageArr.Where(s => s != null).ToArray(); var chatReq = new ChatRequest { taskId = task, title = title, model = model, max_tokens = max_tokens, stream = true, messages = messageArr, max_completion_tokens = 12288, }; chatReq.modalities = null; chatReq.max_tokens = null; chatReq.top_p = null; return await ChatAsync(chatReq); } } }