Learn.VideoAnalysis/VideoAnalysisCore/AICore/GPT/Gemini/GeminiGPTClient.cs

70 lines
2.3 KiB
C#

using VideoAnalysisCore.Common;
using System.Net.Http.Headers;
using System.Text;
using Microsoft.Extensions.Logging;
using Newtonsoft.Json.Linq;
using System.Net.Http;
using Newtonsoft.Json;
using System.Net.Http.Json;
using System.Net;
using VideoAnalysisCore.AICore.GPT.DeepSeek;
using System.Text.Json;
namespace VideoAnalysisCore.AICore.GPT.Gemini
{
public class GeminiGPTClient : GPTClient
{
public override GptConfig Config { get; set; } = AppCommon.Config.ChatGpt.ChatGpt;
private readonly IHttpClientFactory _httpClientFactory;
private readonly RedisManager redisManager;
public GeminiGPTClient(IHttpClientFactory httpClientFactory, RedisManager redisManager) : base(httpClientFactory, redisManager)
{
_httpClientFactory = httpClientFactory;
this.redisManager = redisManager;
}
/// <summary>
/// 请求AI
/// </summary>
/// <typeparam name="T">返回JSON类型</typeparam>
/// <param name="task">任务id</param>
/// <param name="postMessages">提示词</param>
/// <param name="title">任务类型</param>
/// <param name="model">GPT版本</param>
/// <param name="max_tokens">最大token <para>不设置默认最大值 16000/8000</para></param>
/// <returns></returns>
/// <exception cref="Exception"></exception>
public override async Task<T> ChatAsync<T>(string task, string postMessages, string title,
string model = null, int max_tokens = 32_000)
{
Message[] messageArr = [
new Message(postMessages,"user"),
];
model = model ?? ChatGPTType.Gemini_3_Chat_flash;
messageArr = messageArr.Where(s => s != null).ToArray();
var chatReq = new ChatRequest
{
taskId = task,
title = title,
model = model,
max_tokens = max_tokens,
stream = true,
messages = messageArr,
max_completion_tokens = 12288,
};
chatReq.modalities = null;
chatReq.max_tokens = null;
chatReq.top_p = null;
return await ChatAsync<T>(chatReq);
}
}
}