110 lines
4.0 KiB
C#
110 lines
4.0 KiB
C#
using System;
|
||
using System.Collections.Generic;
|
||
using System.Linq;
|
||
using System.Text;
|
||
using System.Threading.Tasks;
|
||
|
||
namespace VideoAnalysisCore.AICore.GPT.ChatGPT
|
||
{
|
||
/// <summary>
|
||
/// 请求数据
|
||
/// </summary>
|
||
public class ChatRequest
|
||
{
|
||
/// <summary>
|
||
/// 对话
|
||
/// </summary>
|
||
public Message[] messages { get; set; }
|
||
/// <summary>
|
||
/// 提问种子值[用来确保 相同参数请求尽可能返回相同参数]
|
||
/// <para>默认:null</para>
|
||
/// <para>此功能处于 Beta 阶段。 如果指定,我们的系统将尽最大努力确定性地采样,这样具有相同 and 参数的重复请求应该返回相同的结果。 无法保证确定性,您应该参考 response 参数来监控后端的变化</para>
|
||
/// </summary>
|
||
public int? seed { get; set; } =null;
|
||
public string model { get; set; } = ChatGPTType.GPT4o;
|
||
/// <summary>
|
||
/// 要使用的采样温度,介于 0 和 2 之间。较高的值(如 0.8)将使输出更加随机,而较低的值(如 0.2)将使其更加集中和确定。 我们通常建议更改此项或同时更改两者。top_p
|
||
/// <para> 默认为 1</para>
|
||
/// <para> <see cref="ChatRequest.top_p"/>联动</para>
|
||
/// </summary>
|
||
public float temperature { get; set; } = 0.2f;
|
||
/// <summary>
|
||
/// 一种替代温度采样的方法,称为原子核采样, 其中,模型考虑具有top_p概率的标记的结果 质量。所以 0.1 表示仅包含前 10% 概率质量的代币 被考
|
||
/// <para>建议与<see cref="ChatRequest.temperature"/>联动</para>
|
||
/// </summary>
|
||
public float top_p { get; set; } = 0.5f;
|
||
public float max_completion_tokens { get; set; } = 5000;
|
||
/// <summary>
|
||
/// 一个对象,用于指定模型必须输出的格式。设置为 enable 结构化输出,确保模型与您提供的 JSON 匹配 图式。
|
||
/// </summary>
|
||
public object response_format = new { type = "json_object" };
|
||
/// <summary>
|
||
/// 流式返回
|
||
/// </summary>
|
||
public bool stream =false;
|
||
/// <summary>
|
||
/// 您希望模型为此请求生成的 Output types。 大多数模型都能够生成文本,这是
|
||
/// <para>默认设置: ["text"]</para>
|
||
/// <para>该模型还可用于生成音频。自 请求此模型同时生成文本和音频响应,您可以 用:gpt-4o-audio-preview["text", "audio"]</para>
|
||
/// </summary>
|
||
public string modalities = "[\"json\"]";
|
||
/// <summary>
|
||
/// ai引导新话题
|
||
/// <para>默认-2 范围[-2~2]</para>
|
||
/// </summary>
|
||
public int presence_penalty = -2;
|
||
|
||
}
|
||
public class Message
|
||
{
|
||
public Message(string content, string role)
|
||
{
|
||
this.role = role;
|
||
this.content = content;
|
||
}
|
||
public string role { get; set; }
|
||
public string content { get; set; }
|
||
public string refusal { get; set; }
|
||
}
|
||
|
||
|
||
/// <summary>
|
||
/// gpt返回值
|
||
/// </summary>
|
||
public class ChatRes
|
||
{
|
||
public string id { get; set; }
|
||
public string _object { get; set; }
|
||
public int created { get; set; }
|
||
public string model { get; set; }
|
||
public ChatResError error { get; set; }
|
||
public Choice[] choices { get; set; }
|
||
public Usage usage { get; set; }
|
||
/// <summary>
|
||
/// 系统指纹
|
||
/// </summary>
|
||
public string system_fingerprint { get; set; }
|
||
}
|
||
|
||
public class Usage
|
||
{
|
||
public int prompt_tokens { get; set; }
|
||
public int completion_tokens { get; set; }
|
||
public int total_tokens { get; set; }
|
||
}
|
||
|
||
|
||
public class Choice
|
||
{
|
||
public int index { get; set; }
|
||
public Message message { get; set; }
|
||
public object logprobs { get; set; }
|
||
public string finish_reason { get; set; }
|
||
}
|
||
public class ChatResError
|
||
{
|
||
public string message { get; set; }
|
||
public string type { get; set; }
|
||
}
|
||
}
|