diff --git a/VideoAnalysis/WebUI/src/components/hTable/index.vue b/VideoAnalysis/WebUI/src/components/hTable/index.vue
index 65d75a0..3d67f19 100644
--- a/VideoAnalysis/WebUI/src/components/hTable/index.vue
+++ b/VideoAnalysis/WebUI/src/components/hTable/index.vue
@@ -64,9 +64,7 @@ onBeforeMount(() => {
});
});
onMounted(() => {
- nextTick(async () => {
- // appStyle();
- });
+ window.addEventListener("resize", appStyle);
});
onUnmounted(() => {});
// defineExpose({
diff --git a/VideoAnalysisCore/AICore/GPT/BserGPT.cs b/VideoAnalysisCore/AICore/GPT/BserGPT.cs
index 6f248f6..97957fa 100644
--- a/VideoAnalysisCore/AICore/GPT/BserGPT.cs
+++ b/VideoAnalysisCore/AICore/GPT/BserGPT.cs
@@ -51,6 +51,8 @@ namespace VideoAnalysisCore.AICore.GPT
/// 此功能处于 Beta 阶段。 如果指定,我们的系统将尽最大努力确定性地采样,这样具有相同 and 参数的重复请求应该返回相同的结果。 无法保证确定性,您应该参考 response 参数来监控后端的变化
///
public int? seed { get; set; } = null;
+ [json]
+ public string? title { get; set; } = null;
///
/// 推理模型 (deepseek-reasoner)
///
diff --git a/VideoAnalysisCore/AICore/GPT/ChatGPT/ChatGPTClient.cs b/VideoAnalysisCore/AICore/GPT/ChatGPT/ChatGPTClient.cs
index 982ad63..4f82c08 100644
--- a/VideoAnalysisCore/AICore/GPT/ChatGPT/ChatGPTClient.cs
+++ b/VideoAnalysisCore/AICore/GPT/ChatGPT/ChatGPTClient.cs
@@ -23,7 +23,7 @@ namespace VideoAnalysisCore.AICore.GPT.ChatGPT
private readonly IHttpClientFactory _httpClientFactory;
private readonly RedisManager redisManager;
- public ChatGPTClient(IHttpClientFactory httpClientFactory, RedisManager redisManager):base(httpClientFactory, redisManager)
+ public ChatGPTClient(IHttpClientFactory httpClientFactory, RedisManager redisManager) : base(httpClientFactory, redisManager)
{
_httpClientFactory = httpClientFactory;
this.redisManager = redisManager;
@@ -41,15 +41,27 @@ namespace VideoAnalysisCore.AICore.GPT.ChatGPT
/// 最大token 不设置默认最大值 16000/8000
///
///
- public async Task ChatAsync(string task, string postMessages, string title, string model =null, int max_tokens = 8000)
+ public async Task ChatAsync(string task, string postMessages, string title, string model = null, int max_tokens = 8000)
{
+ Message[] messageArr = [
+ new Message(postMessages,"user"),
+ ];
+ messageArr = messageArr.Where(s => s != null).ToArray();
+ var chatReq = new ChatRequest
+ {
+ taskId = task,
+ model = model,
+ max_tokens = model == "deepseek-reasoner" ? 16000 : 8000,
+ stream = true,
+ temperature = 0.2f,
+ messages = messageArr
+ };
+ chatReq.modalities = null;
+ chatReq.max_tokens = null;
+ chatReq.top_p = null;
- //chatReq.modalities = null;
- //chatReq.max_tokens = null;
- //chatReq.top_p = null;
-
- return await base.ChatAsync(task, postMessages, title, model ?? ChatGPTType.GPT5_mini, max_tokens);
+ return await base.ChatAsync(chatReq);
}
}
diff --git a/VideoAnalysisCore/AICore/GPT/DeepSeek/DeepSeekGPTClient.cs b/VideoAnalysisCore/AICore/GPT/DeepSeek/DeepSeekGPTClient.cs
index 0a63966..3544f07 100644
--- a/VideoAnalysisCore/AICore/GPT/DeepSeek/DeepSeekGPTClient.cs
+++ b/VideoAnalysisCore/AICore/GPT/DeepSeek/DeepSeekGPTClient.cs
@@ -44,9 +44,20 @@ namespace VideoAnalysisCore.AICore.GPT.ChatGPT
///
public async Task ChatAsync(string task, string postMessages, string title, string model =null, int max_tokens = 8000)
{
- model = model ?? ChatGPTType.Deepseek_Reasoner;
- max_tokens = model == ChatGPTType.Deepseek_Reasoner ? 16000 : max_tokens;
- return await base.ChatAsync(task, postMessages, title, model, max_tokens);
+ Message[] messageArr = [
+ new Message(postMessages,"user"),
+ ];
+ messageArr = messageArr.Where(s => s != null).ToArray();
+ var chatReq = new ChatRequest
+ {
+ taskId = task,
+ model = model ?? ChatGPTType.Deepseek_Reasoner,
+ max_tokens = model == ChatGPTType.Deepseek_Reasoner ? 16000 : max_tokens,
+ stream = true,
+ temperature = 0.2f,
+ messages = messageArr
+ };
+ return await base.ChatAsync(chatReq);
}
}
diff --git a/VideoAnalysisCore/AICore/GPT/GPTClient.cs b/VideoAnalysisCore/AICore/GPT/GPTClient.cs
index 393ee86..d35326a 100644
--- a/VideoAnalysisCore/AICore/GPT/GPTClient.cs
+++ b/VideoAnalysisCore/AICore/GPT/GPTClient.cs
@@ -150,31 +150,16 @@ namespace VideoAnalysisCore.AICore.GPT
/// 最大token 不设置默认最大值 16000/8000
///
///
- public async Task ChatAsync(string task, string postMessages, string title, string model = "deepseek-reasoner", int max_tokens = -1)
+ public async Task ChatAsync(ChatRequest chatRep)
{
- Message[] messageArr = [
- new Message(postMessages,"user"),
- ];
- messageArr = messageArr.Where(s => s != null).ToArray();
- var chatRep = new ChatRequest
- {
- taskId = task,
- model = model,
- max_tokens = model == "deepseek-reasoner" ? 16000 : 8000,
- stream = true,
- temperature = 0.2f,
- messages = messageArr
- };
- if (max_tokens != -1)
- chatRep.max_tokens = max_tokens;
var tryCount = 10;
while (tryCount-- > 0)
{
try
{
- var time = title + DateTime.Now.ToString("MMddHHmmss");
+ var time = chatRep.title + DateTime.Now.ToString("MMddHHmmss");
var redisCached = new object[2] { chatRep, null };
- redisManager.SetTaskGPTCached(task, time, chatRep);
+ redisManager.SetTaskGPTCached(chatRep.taskId, time, chatRep);
var chatResp = await Chat(chatRep);
var chatResContent = chatResp?.res;
if (string.IsNullOrEmpty(chatResContent))
@@ -182,7 +167,7 @@ namespace VideoAnalysisCore.AICore.GPT
if (chatResp != null)
{
redisCached[1] = new object[] { chatResp.Value.res, chatResp.Value.u, chatResp.Value.reasoning };
- redisManager.SetTaskGPTCached(task, time, redisCached);
+ redisManager.SetTaskGPTCached(chatRep.taskId, time, redisCached);
}
chatResContent = chatResContent?.ExtractJsonStrings()?.FirstOrDefault();
chatResContent = chatResContent?.Replace("\n", "");
@@ -214,10 +199,10 @@ namespace VideoAnalysisCore.AICore.GPT
}
catch (Exception ex)
{
- await redisManager.AddTaskLog(task, $"=>GPT结果解析错误 重试剩余{tryCount} {ex.Message}");
+ await redisManager.AddTaskLog(chatRep.taskId, $"=>GPT结果解析错误 重试剩余{tryCount} {ex.Message}");
}
}
- await redisManager.AddTaskLog(task, $"=>GPT请求失败次数过多!!!");
+ await redisManager.AddTaskLog(chatRep.taskId, $"=>GPT请求失败次数过多!!!");
throw new Exception(DateTime.Now + "=>GPT请求失败次数过多!!!");
}
diff --git a/VideoAnalysisCore/Common/RedisExpand.cs b/VideoAnalysisCore/Common/RedisExpand.cs
index 01c76c2..6b442b9 100644
--- a/VideoAnalysisCore/Common/RedisExpand.cs
+++ b/VideoAnalysisCore/Common/RedisExpand.cs
@@ -215,6 +215,9 @@ namespace VideoAnalysisCore.Common
/// 内容
public async Task AddTaskLog(object taskId, string msg)
{
+#if DEBUG
+ Console.WriteLine($"{DateTime.Now.ToString("MM-dd HH:mm:ss")} => {taskId} \r\n{msg}\r\n");
+ #endif
await Redis.RPushAsync(RedisExpandKey.TaskLog,
new TaskLog()
{
@@ -222,17 +225,18 @@ namespace VideoAnalysisCore.Common
CreateTime = DateTime.Now,
Message = msg
});
+ var count = 50;
lock (RedisExpandKey.TaskLog)
{
var oldTaskCount = Redis.LLen(RedisExpandKey.TaskLog);
- if (oldTaskCount > 100)
+ if (oldTaskCount > count)
{
try
{
- var insertData = Redis.LRange(RedisExpandKey.TaskLog, 0, 99);
+ var insertData = Redis.LRange(RedisExpandKey.TaskLog, 0, count -1);
taskLogDB.AsInsertable(insertData).ExecuteCommand();
//同步删除redis
- Redis.LTrim(RedisExpandKey.TaskLog, 100, 1000);
+ Redis.LTrim(RedisExpandKey.TaskLog, count, 1000);
}
catch (Exception ex)
{