瀏覽代碼

:art: AI supports configuration randomness and context number https://github.com/siyuan-note/siyuan/issues/10660

Daniel 1 年之前
父節點
當前提交
a7e1e3abac

+ 2 - 0
app/appearance/langs/en_US.json

@@ -295,6 +295,8 @@
   "apiModelTip": "The <code class='fn__code'>model</code> parameter passed in when requesting the API is used to control the generated text style (the Deployment ID needs to be filled in when using the Azure OpenAI service)",
   "apiMaxTokens": "Maximum number of Tokens",
   "apiMaxTokensTip": "The <code class='fn__code'>max_tokens</code> parameter passed in when requesting the API is used to control the length of the generated text",
+  "apiTemperature": "Temperature",
+  "apiTemperatureTip": "The <code class='fn__code'>temperature</code> parameter passed in when requesting the API is used to control the randomness of the generated text",
   "apiBaseURL": "API Base URL",
   "apiBaseURLTip": "The base address of the request, such as <code class='fn__code'>https://api.openai.com/v1</code>",
   "apiUserAgentTip": "The user agent that initiated the request, that is, the HTTP header <code class='fn__code'>User-Agent</code>",

+ 2 - 0
app/appearance/langs/es_ES.json

@@ -295,6 +295,8 @@
   "apiModelTip": "El parámetro <code class='fn__code'>model</code> pasado al solicitar la API se usa para controlar el estilo del texto generado (el ID de implementación debe completarse cuando se usa el servicio Azure OpenAI)",
   "apiMaxTokens": "Número máximo de tokens",
   "apiMaxTokensTip": "El parámetro <code class='fn__code'>max_tokens</code> que se pasa al solicitar la API se usa para controlar la longitud del texto generado",
+  "apiTemperature": "Temperatura",
+  "apiTemperatureTip": "El parámetro <code class='fn__code'>temperature</code> pasado al solicitar la API se utiliza para controlar la aleatoriedad del texto generado",
   "apiBaseURL": "URL base de la API",
   "apiBaseURLTip": "La dirección base de la solicitud, como <code class='fn__code'>https://api.openai.com/v1</code>",
   "apiUserAgentTip": "El agente de usuario que inició la solicitud, es decir, el encabezado HTTP <code class='fn__code'>User-Agent</code>",

+ 2 - 0
app/appearance/langs/fr_FR.json

@@ -295,6 +295,8 @@
   "apiModelTip": "Le paramètre <code class='fn__code'>model</code> transmis lors de la demande de l'API est utilisé pour contrôler le style de texte généré (l'ID de déploiement doit être renseigné lors de l'utilisation du service Azure OpenAI)",
   "apiMaxTokens": "Nombre maximum de jetons",
   "apiMaxTokensTip": "Le paramètre <code class='fn__code'>max_tokens</code> transmis lors de la demande de l'API est utilisé pour contrôler la longueur du texte généré",
+  "apiTemperature": "Température",
+  "apiTemperatureTip": "Le paramètre <code class='fn__code'>temperature</code> transmis lors de la requête à l'API est utilisé pour contrôler le caractère aléatoire du texte généré",
   "apiBaseURL": "URL de base de l'API",
   "apiBaseURLTip": "L'adresse de base de la requête, telle que <code class='fn__code'>https://api.openai.com/v1</code>",
   "apiUserAgentTip": "L'agent utilisateur qui a initié la requête, c'est-à-dire l'en-tête HTTP <code class='fn__code'>User-Agent</code>",

+ 2 - 0
app/appearance/langs/zh_CHT.json

@@ -295,6 +295,8 @@
   "apiModelTip": "請求 API 時傳入的 <code class='fn__code'>model</code> 參數,用於控制產生的文字風格(使用 Azure OpenAI 服務時需填入 Deployment ID)",
   "apiMaxTokens": "最大 Token 數",
   "apiMaxTokensTip": "請求 API 時傳入的 <code class='fn__code'>max_tokens</code> 參數,用於控制生成的文字長度",
+  "apiTemperature": "溫度",
+  "apiTemperatureTip": "請求 API 時傳入的 <code class='fn__code'>temperature</code> 參數,用來控制產生的文字隨機性",
   "apiBaseURL": "API 基礎地址",
   "apiBaseURLTip": "發起請求的基礎地址,如 <code class='fn__code'>https://api.openai.com/v1</code>",
   "apiUserAgentTip": "發起請求的使用者代理,即 HTTP 標頭 <code class='fn__code'>User-Agent</code>",

+ 2 - 0
app/appearance/langs/zh_CN.json

@@ -295,6 +295,8 @@
   "apiModelTip": "请求 API 时传入的 <code class='fn__code'>model</code> 参数,用于控制生成的文本风格(使用 Azure OpenAI 服务时需填入 Deployment ID)",
   "apiMaxTokens": "最大 Token 数",
   "apiMaxTokensTip": "请求 API 时传入的 <code class='fn__code'>max_tokens</code> 参数,用于控制生成的文本长度",
+  "apiTemperature": "温度",
+  "apiTemperatureTip": "请求 API 时传入的 <code class='fn__code'>temperature</code> 参数,用于控制生成的文本随机性",
   "apiBaseURL": "API 基础地址",
   "apiBaseURLTip": "发起请求的基础地址,如 <code class='fn__code'>https://api.openai.com/v1</code>",
   "apiVersion": "API 版本",

+ 15 - 0
app/src/config/ai.ts

@@ -29,6 +29,12 @@ export const ai = {
     <input class="b3-text-field fn__flex-center fn__block" type="number" step="1" min="0" id="apiMaxTokens" value="${window.siyuan.config.ai.openAI.apiMaxTokens}"/>
     <div class="b3-label__text">${window.siyuan.languages.apiMaxTokensTip}</div>
 </div>
+<div class="b3-label">
+    ${window.siyuan.languages.apiTemperature}
+    <div class="fn__hr"></div>
+    <input class="b3-text-field fn__flex-center fn__block" type="number" step="0.1" min="0" max="2" id="apiTemperature" value="${window.siyuan.config.ai.openAI.apiTemperature}"/>
+    <div class="b3-label__text">${window.siyuan.languages.apiTemperatureTip}</div>
+</div>
 <div class="b3-label">
     ${window.siyuan.languages.apiModel}
     <div class="fn__hr"></div>
@@ -96,6 +102,14 @@ export const ai = {
     <span class="fn__space"></span>
     <input class="b3-text-field fn__flex-center fn__size200" type="number" step="1" min="0" id="apiMaxTokens" value="${window.siyuan.config.ai.openAI.apiMaxTokens}"/>
 </div>
+<div class="fn__flex b3-label">
+    <div class="fn__flex-1">
+        ${window.siyuan.languages.apiTemperature}
+        <div class="b3-label__text">${window.siyuan.languages.apiTemperatureTip}</div>
+    </div>
+    <span class="fn__space"></span>
+    <input class="b3-text-field fn__flex-center fn__size200" type="number" step="0.1" min="0" max="2" id="apiTemperature" value="${window.siyuan.config.ai.openAI.apiTemperature}"/>
+</div>
 <div class="fn__flex b3-label">
     <div class="fn__block">
         ${window.siyuan.languages.apiModel}
@@ -176,6 +190,7 @@ export const ai = {
                         apiKey: (ai.element.querySelector("#apiKey") as HTMLInputElement).value,
                         apiModel: (ai.element.querySelector("#apiModel") as HTMLSelectElement).value,
                         apiMaxTokens: parseInt((ai.element.querySelector("#apiMaxTokens") as HTMLInputElement).value),
+                        apiTemperature: parseInt((ai.element.querySelector("#apiTemperature") as HTMLInputElement).value),
                         apiProxy: (ai.element.querySelector("#apiProxy") as HTMLInputElement).value,
                         apiTimeout: parseInt((ai.element.querySelector("#apiTimeout") as HTMLInputElement).value),
                         apiProvider: (ai.element.querySelector("#apiProvider") as HTMLSelectElement).value,

+ 1 - 0
app/src/types/index.d.ts

@@ -738,6 +738,7 @@ interface IConfig {
             apiKey: string
             apiModel: string
             apiMaxTokens: number
+            apiTemperature: number
             apiProxy: string
             apiTimeout: number
         },

+ 4 - 0
kernel/api/setting.go

@@ -194,6 +194,10 @@ func setAI(c *gin.Context) {
 		ai.OpenAI.APIMaxTokens = 0
 	}
 
+	if 0 >= ai.OpenAI.APITemperature || 2 < ai.OpenAI.APITemperature {
+		ai.OpenAI.APITemperature = 1.0
+	}
+
 	model.Conf.AI = ai
 	model.Conf.Save()
 

+ 17 - 9
kernel/conf/ai.go

@@ -29,15 +29,16 @@ type AI struct {
 }
 
 type OpenAI struct {
-	APIKey       string `json:"apiKey"`
-	APITimeout   int    `json:"apiTimeout"`
-	APIProxy     string `json:"apiProxy"`
-	APIModel     string `json:"apiModel"`
-	APIMaxTokens int    `json:"apiMaxTokens"`
-	APIBaseURL   string `json:"apiBaseURL"`
-	APIUserAgent string `json:"apiUserAgent"`
-	APIProvider  string `json:"apiProvider"` // OpenAI, Azure
-	APIVersion   string `json:"apiVersion"`  // Azure API version
+	APIKey         string  `json:"apiKey"`
+	APITimeout     int     `json:"apiTimeout"`
+	APIProxy       string  `json:"apiProxy"`
+	APIModel       string  `json:"apiModel"`
+	APIMaxTokens   int     `json:"apiMaxTokens"`
+	APITemperature float64 `json:"apiTemperature"`
+	APIBaseURL     string  `json:"apiBaseURL"`
+	APIUserAgent   string  `json:"apiUserAgent"`
+	APIProvider    string  `json:"apiProvider"` // OpenAI, Azure
+	APIVersion     string  `json:"apiVersion"`  // Azure API version
 }
 
 func NewAI() *AI {
@@ -69,6 +70,13 @@ func NewAI() *AI {
 		}
 	}
 
+	if temperature := os.Getenv("SIYUAN_OPENAI_API_TEMPERATURE"); "" != temperature {
+		temperatureFloat, err := strconv.ParseFloat(temperature, 64)
+		if nil == err {
+			openAI.APITemperature = temperatureFloat
+		}
+	}
+
 	if baseURL := os.Getenv("SIYUAN_OPENAI_API_BASE_URL"); "" != baseURL {
 		openAI.APIBaseURL = baseURL
 	}

+ 1 - 1
kernel/model/ai.go

@@ -170,7 +170,7 @@ type OpenAIGPT struct {
 }
 
 func (gpt *OpenAIGPT) chat(msg string, contextMsgs []string) (partRet string, stop bool, err error) {
-	return util.ChatGPT(msg, contextMsgs, gpt.c, Conf.AI.OpenAI.APIModel, Conf.AI.OpenAI.APIMaxTokens, Conf.AI.OpenAI.APITimeout)
+	return util.ChatGPT(msg, contextMsgs, gpt.c, Conf.AI.OpenAI.APIModel, Conf.AI.OpenAI.APIMaxTokens, Conf.AI.OpenAI.APITemperature, Conf.AI.OpenAI.APITimeout)
 }
 
 type CloudGPT struct {

+ 13 - 2
kernel/model/conf.go

@@ -408,9 +408,18 @@ func InitConf() {
 	if "" == Conf.AI.OpenAI.APIUserAgent {
 		Conf.AI.OpenAI.APIUserAgent = util.UserAgent
 	}
+	if strings.HasPrefix(Conf.AI.OpenAI.APIUserAgent, "SiYuan/") {
+		Conf.AI.OpenAI.APIUserAgent = util.UserAgent
+	}
 	if "" == Conf.AI.OpenAI.APIProvider {
 		Conf.AI.OpenAI.APIProvider = "OpenAI"
 	}
+	if 0 > Conf.AI.OpenAI.APIMaxTokens {
+		Conf.AI.OpenAI.APIMaxTokens = 0
+	}
+	if 0 >= Conf.AI.OpenAI.APITemperature || 2 < Conf.AI.OpenAI.APITemperature {
+		Conf.AI.OpenAI.APITemperature = 1.0
+	}
 
 	if "" != Conf.AI.OpenAI.APIKey {
 		logging.LogInfof("OpenAI API enabled\n"+
@@ -419,13 +428,15 @@ func InitConf() {
 			"    timeout=%ds\n"+
 			"    proxy=%s\n"+
 			"    model=%s\n"+
-			"    maxTokens=%d",
+			"    maxTokens=%d\n"+
+			"    temperature=%.1f",
 			Conf.AI.OpenAI.APIUserAgent,
 			Conf.AI.OpenAI.APIBaseURL,
 			Conf.AI.OpenAI.APITimeout,
 			Conf.AI.OpenAI.APIProxy,
 			Conf.AI.OpenAI.APIModel,
-			Conf.AI.OpenAI.APIMaxTokens)
+			Conf.AI.OpenAI.APIMaxTokens,
+			Conf.AI.OpenAI.APITemperature)
 	}
 
 	Conf.ReadOnly = util.ReadOnly

+ 5 - 4
kernel/util/openai.go

@@ -27,7 +27,7 @@ import (
 	"github.com/siyuan-note/logging"
 )
 
-func ChatGPT(msg string, contextMsgs []string, c *openai.Client, model string, maxTokens, timeout int) (ret string, stop bool, err error) {
+func ChatGPT(msg string, contextMsgs []string, c *openai.Client, model string, maxTokens int, temperature float64, timeout int) (ret string, stop bool, err error) {
 	var reqMsgs []openai.ChatCompletionMessage
 
 	for _, ctxMsg := range contextMsgs {
@@ -42,9 +42,10 @@ func ChatGPT(msg string, contextMsgs []string, c *openai.Client, model string, m
 	})
 
 	req := openai.ChatCompletionRequest{
-		Model:     model,
-		MaxTokens: maxTokens,
-		Messages:  reqMsgs,
+		Model:       model,
+		MaxTokens:   maxTokens,
+		Temperature: float32(temperature),
+		Messages:    reqMsgs,
 	}
 	ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeout)*time.Second)
 	defer cancel()