🎨 AI supports configuration randomness and context number https://github.com/siyuan-note/siyuan/issues/10660

This commit is contained in:
Daniel 2024-03-20 11:45:22 +08:00
parent 41b4984de3
commit 5300638622
No known key found for this signature in database
GPG key ID: 86211BA83DF03017
11 changed files with 55 additions and 10 deletions

View file

@ -297,6 +297,8 @@
"apiMaxTokensTip": "The <code class='fn__code'>max_tokens</code> parameter passed in when requesting the API is used to control the length of the generated text",
"apiTemperature": "Temperature",
"apiTemperatureTip": "The <code class='fn__code'>temperature</code> parameter passed in when requesting the API is used to control the randomness of the generated text",
"apiMaxContexts": "Maximum number of contexts",
"apiMaxContextsTip": "The maximum number of contexts passed in when requesting the API",
"apiBaseURL": "API Base URL",
"apiBaseURLTip": "The base address of the request, such as <code class='fn__code'>https://api.openai.com/v1</code>",
"apiUserAgentTip": "The user agent that initiated the request, that is, the HTTP header <code class='fn__code'>User-Agent</code>",

View file

@ -297,6 +297,8 @@
"apiMaxTokensTip": "El parámetro <code class='fn__code'>max_tokens</code> que se pasa al solicitar la API se usa para controlar la longitud del texto generado",
"apiTemperature": "Temperatura",
"apiTemperatureTip": "El parámetro <code class='fn__code'>temperature</code> pasado al solicitar la API se utiliza para controlar la aleatoriedad del texto generado",
"apiMaxContexts": "Número máximo de contextos",
"apiMaxContextsTip": "El número máximo de contextos pasados al solicitar la API",
"apiBaseURL": "URL base de la API",
"apiBaseURLTip": "La dirección base de la solicitud, como <code class='fn__code'>https://api.openai.com/v1</code>",
"apiUserAgentTip": "El agente de usuario que inició la solicitud, es decir, el encabezado HTTP <code class='fn__code'>User-Agent</code>",

View file

@ -297,6 +297,8 @@
"apiMaxTokensTip": "Le paramètre <code class='fn__code'>max_tokens</code> transmis lors de la demande de l'API est utilisé pour contrôler la longueur du texte généré",
"apiTemperature": "Température",
"apiTemperatureTip": "Le paramètre <code class='fn__code'>temperature</code> transmis lors de la requête à l'API est utilisé pour contrôler le caractère aléatoire du texte généré",
"apiMaxContexts": "Nombre maximum de contextes",
"apiMaxContextsTip": "Le nombre maximum de contextes transmis lors de la requête de l'API",
"apiBaseURL": "URL de base de l'API",
"apiBaseURLTip": "L'adresse de base de la requête, telle que <code class='fn__code'>https://api.openai.com/v1</code>",
"apiUserAgentTip": "L'agent utilisateur qui a initié la requête, c'est-à-dire l'en-tête HTTP <code class='fn__code'>User-Agent</code>",

View file

@ -297,6 +297,8 @@
"apiMaxTokensTip": "請求 API 時傳入的 <code class='fn__code'>max_tokens</code> 參數,用於控制生成的文字長度",
"apiTemperature": "溫度",
"apiTemperatureTip": "請求 API 時傳入的 <code class='fn__code'>temperature</code> 參數,用來控制產生的文字隨機性",
"apiMaxContexts": "最大上下文數",
"apiMaxContextsTip": "請求 API 時傳入的最大上下文數",
"apiBaseURL": "API 基礎地址",
"apiBaseURLTip": "發起請求的基礎地址,如 <code class='fn__code'>https://api.openai.com/v1</code>",
"apiUserAgentTip": "發起請求的使用者代理,即 HTTP 標頭 <code class='fn__code'>User-Agent</code>",

View file

@ -297,6 +297,8 @@
"apiMaxTokensTip": "请求 API 时传入的 <code class='fn__code'>max_tokens</code> 参数,用于控制生成的文本长度",
"apiTemperature": "温度",
"apiTemperatureTip": "请求 API 时传入的 <code class='fn__code'>temperature</code> 参数,用于控制生成的文本随机性",
"apiMaxContexts": "最大上下文数",
"apiMaxContextsTip": "请求 API 时传入的最大上下文数",
"apiBaseURL": "API 基础地址",
"apiBaseURLTip": "发起请求的基础地址,如 <code class='fn__code'>https://api.openai.com/v1</code>",
"apiVersion": "API 版本",

View file

@ -35,6 +35,12 @@ export const ai = {
<input class="b3-text-field fn__flex-center fn__block" type="number" step="0.1" min="0" max="2" id="apiTemperature" value="${window.siyuan.config.ai.openAI.apiTemperature}"/>
<div class="b3-label__text">${window.siyuan.languages.apiTemperatureTip}</div>
</div>
<div class="b3-label">
${window.siyuan.languages.apiMaxContexts}
<div class="fn__hr"></div>
<input class="b3-text-field fn__flex-center fn__block" type="number" step="1" min="1" max="64" id="apiMaxContexts" value="${window.siyuan.config.ai.openAI.apiMaxContexts}"/>
<div class="b3-label__text">${window.siyuan.languages.apiMaxContextsTip}</div>
</div>
<div class="b3-label">
${window.siyuan.languages.apiModel}
<div class="fn__hr"></div>
@ -110,6 +116,14 @@ export const ai = {
<span class="fn__space"></span>
<input class="b3-text-field fn__flex-center fn__size200" type="number" step="0.1" min="0" max="2" id="apiTemperature" value="${window.siyuan.config.ai.openAI.apiTemperature}"/>
</div>
<div class="fn__flex b3-label">
<div class="fn__flex-1">
${window.siyuan.languages.apiMaxContexts}
<div class="b3-label__text">${window.siyuan.languages.apiMaxContextsTip}</div>
</div>
<span class="fn__space"></span>
<input class="b3-text-field fn__flex-center fn__size200" type="number" step="1" min="1" max="64" id="apiMaxContexts" value="${window.siyuan.config.ai.openAI.apiMaxContexts}"/>
</div>
<div class="fn__flex b3-label">
<div class="fn__block">
${window.siyuan.languages.apiModel}
@ -191,6 +205,7 @@ export const ai = {
apiModel: (ai.element.querySelector("#apiModel") as HTMLSelectElement).value,
apiMaxTokens: parseInt((ai.element.querySelector("#apiMaxTokens") as HTMLInputElement).value),
apiTemperature: parseFloat((ai.element.querySelector("#apiTemperature") as HTMLInputElement).value),
apiMaxContexts: parseInt((ai.element.querySelector("#apiMaxContexts") as HTMLInputElement).value),
apiProxy: (ai.element.querySelector("#apiProxy") as HTMLInputElement).value,
apiTimeout: parseInt((ai.element.querySelector("#apiTimeout") as HTMLInputElement).value),
apiProvider: (ai.element.querySelector("#apiProvider") as HTMLSelectElement).value,

View file

@ -739,6 +739,7 @@ interface IConfig {
apiModel: string
apiMaxTokens: number
apiTemperature: number
apiMaxContexts: number
apiProxy: string
apiTimeout: number
},

View file

@ -198,6 +198,10 @@ func setAI(c *gin.Context) {
ai.OpenAI.APITemperature = 1.0
}
if 1 > ai.OpenAI.APIMaxContexts || 64 < ai.OpenAI.APIMaxContexts {
ai.OpenAI.APIMaxContexts = 7
}
model.Conf.AI = ai
model.Conf.Save()

View file

@ -35,6 +35,7 @@ type OpenAI struct {
APIModel string `json:"apiModel"`
APIMaxTokens int `json:"apiMaxTokens"`
APITemperature float64 `json:"apiTemperature"`
APIMaxContexts int `json:"apiMaxContexts"`
APIBaseURL string `json:"apiBaseURL"`
APIUserAgent string `json:"apiUserAgent"`
APIProvider string `json:"apiProvider"` // OpenAI, Azure
@ -43,11 +44,13 @@ type OpenAI struct {
func NewAI() *AI {
openAI := &OpenAI{
APITimeout: 30,
APIModel: openai.GPT3Dot5Turbo,
APIBaseURL: "https://api.openai.com/v1",
APIUserAgent: util.UserAgent,
APIProvider: "OpenAI",
APITemperature: 1.0,
APIMaxContexts: 7,
APITimeout: 30,
APIModel: openai.GPT3Dot5Turbo,
APIBaseURL: "https://api.openai.com/v1",
APIUserAgent: util.UserAgent,
APIProvider: "OpenAI",
}
openAI.APIKey = os.Getenv("SIYUAN_OPENAI_API_KEY")
@ -77,6 +80,13 @@ func NewAI() *AI {
}
}
if maxContexts := os.Getenv("SIYUAN_OPENAI_API_MAX_CONTEXTS"); "" != maxContexts {
maxContextsInt, err := strconv.Atoi(maxContexts)
if nil == err {
openAI.APIMaxContexts = maxContextsInt
}
}
if baseURL := os.Getenv("SIYUAN_OPENAI_API_BASE_URL"); "" != baseURL {
openAI.APIBaseURL = baseURL
}

View file

@ -84,8 +84,8 @@ func chatGPTContinueWrite(msg string, contextMsgs []string, cloud bool) (ret str
util.PushEndlessProgress("Requesting...")
defer util.ClearPushProgress(100)
if 7 < len(contextMsgs) {
contextMsgs = contextMsgs[len(contextMsgs)-7:]
if Conf.AI.OpenAI.APIMaxContexts < len(contextMsgs) {
contextMsgs = contextMsgs[len(contextMsgs)-Conf.AI.OpenAI.APIMaxContexts:]
}
var gpt GPT
@ -96,7 +96,7 @@ func chatGPTContinueWrite(msg string, contextMsgs []string, cloud bool) (ret str
}
buf := &bytes.Buffer{}
for i := 0; i < 7; i++ {
for i := 0; i < Conf.AI.OpenAI.APIMaxContexts; i++ {
part, stop, chatErr := gpt.chat(msg, contextMsgs)
buf.WriteString(part)

View file

@ -420,6 +420,9 @@ func InitConf() {
if 0 >= Conf.AI.OpenAI.APITemperature || 2 < Conf.AI.OpenAI.APITemperature {
Conf.AI.OpenAI.APITemperature = 1.0
}
if 1 > Conf.AI.OpenAI.APIMaxContexts || 64 < Conf.AI.OpenAI.APIMaxContexts {
Conf.AI.OpenAI.APIMaxContexts = 7
}
if "" != Conf.AI.OpenAI.APIKey {
logging.LogInfof("OpenAI API enabled\n"+
@ -429,14 +432,16 @@ func InitConf() {
" proxy=%s\n"+
" model=%s\n"+
" maxTokens=%d\n"+
" temperature=%.1f",
" temperature=%.1f\n"+
" maxContexts=%d",
Conf.AI.OpenAI.APIUserAgent,
Conf.AI.OpenAI.APIBaseURL,
Conf.AI.OpenAI.APITimeout,
Conf.AI.OpenAI.APIProxy,
Conf.AI.OpenAI.APIModel,
Conf.AI.OpenAI.APIMaxTokens,
Conf.AI.OpenAI.APITemperature)
Conf.AI.OpenAI.APITemperature,
Conf.AI.OpenAI.APIMaxContexts)
}
Conf.ReadOnly = util.ReadOnly