This commit is contained in:
Liang Ding 2023-04-30 21:47:24 +08:00
parent f6f3ebbcb2
commit 7daafcb3af
No known key found for this signature in database
GPG key ID: 136F30F901A2231D
13 changed files with 63 additions and 19 deletions

View file

@ -35,6 +35,8 @@
"apiTimeoutTip": "The timeout period for initiating a request, unit: second",
"apiProxy": "Network Proxy",
"apiProxyTip": "The network proxy that initiates the request, such as <code class='fn__code'>socks://127.0.0.1:1080</code>",
"apiModel": "Model",
"apiModelTip": "The <code class='fn__code'>model</code> parameter passed in when requesting the API is used to control the style of the generated text",
"apiMaxTokens": "Maximum number of Tokens",
"apiMaxTokensTip": "The <code class='fn__code'>max_tokens</code> parameter passed in when requesting the API is used to control the length of the generated text",
"apiBaseURL": "API Base URL",

View file

@ -35,6 +35,8 @@
"apiTimeoutTip": "El tiempo de espera para iniciar una solicitud, unidad: segundo",
"apiProxy": "Proxy web",
"apiProxyTip": "El proxy de red que inicia la solicitud, como <code class='fn__code'>socks://127.0.0.1:1080</code>",
"apiModel": "Modelo",
"apiModelTip": "El parámetro <code class='fn__code'>model</code> pasado al solicitar la API se usa para controlar el estilo del texto generado",
"apiMaxTokens": "Número máximo de tokens",
"apiMaxTokensTip": "El parámetro <code class='fn__code'>max_tokens</code> que se pasa al solicitar la API se usa para controlar la longitud del texto generado",
"apiBaseURL": "URL base de la API",

View file

@ -35,6 +35,8 @@
"apiTimeoutTip": "Le délai d'attente pour lancer une requête, unité : seconde",
"apiProxy": "Proxy Web",
"apiProxyTip": "Le proxy réseau qui lance la requête, tel que <code class='fn__code'>socks://127.0.0.1:1080</code>",
"apiModel": "Modelo",
"apiModelTip": "El parámetro <code class='fn__code'>model</code> pasado al solicitar la API se usa para controlar el estilo del texto generado",
"apiMaxTokens": "Nombre maximum de jetons",
"apiMaxTokensTip": "Le paramètre <code class='fn__code'>max_tokens</code> transmis lors de la demande de l'API est utilisé pour contrôler la longueur du texte généré",
"apiBaseURL": "URL de base de l'API",

View file

@ -35,6 +35,8 @@
"apiTimeoutTip": "發起請求的超時時間,單位:秒",
"apiProxy": "網絡代理",
"apiProxyTip": "發起請求的網絡代理,如 <code class='fn__code'>socks://127.0.0.1:1080</code>",
"apiModel": "模型",
"apiModelTip": "請求 API 時傳入的 <code class='fn__code'>model</code> 參數,用於控制生成的文本風格",
"apiMaxTokens": "最大 Token 數",
"apiMaxTokensTip": "請求 API 時傳入的 <code class='fn__code'>max_tokens</code> 參數,用於控制生成的文本長度",
"apiBaseURL": "API 基礎地址",

View file

@ -35,6 +35,8 @@
"apiTimeoutTip": "发起请求的超时时间,单位:秒",
"apiProxy": "网络代理",
"apiProxyTip": "发起请求的网络代理,如 <code class='fn__code'>socks://127.0.0.1:1080</code>",
"apiModel": "模型",
"apiModelTip": "请求 API 时传入的 <code class='fn__code'>model</code> 参数,用于控制生成的文本风格",
"apiMaxTokens": "最大 Token 数",
"apiMaxTokensTip": "请求 API 时传入的 <code class='fn__code'>max_tokens</code> 参数,用于控制生成的文本长度",
"apiBaseURL": "API 基础地址",

View file

@ -11,6 +11,19 @@ export const ai = {
<input class="b3-text-field fn__flex-center fn__block" type="number" step="1" min="5" max="600" id="apiTimeout" value="${window.siyuan.config.ai.openAI.apiTimeout}"/>
<div class="b3-label__text">${window.siyuan.languages.apiTimeoutTip}</div>
</div>
<div class="b3-label">
${window.siyuan.languages.apiModel}
<div class="b3-label__text">
${window.siyuan.languages.apiModelTip}
</div>
<div class="b3-label__text fn__flex config__item" style="padding: 4px 0 4px 4px;">
<select id="apiModel" class="b3-select">
<option value="gpt-4" ${window.siyuan.config.ai.openAI.apiModel === "gpt-4" ? "selected" : ""}>gpt-4</option>
<option value="gpt-4-32k" ${window.siyuan.config.ai.openAI.apiModel === "gpt-4-32k" ? "selected" : ""}>gpt-4-32k</option>
<option value="gpt-3.5-turbo" ${window.siyuan.config.ai.openAI.apiModel === "gpt-3.5-turbo" ? "selected" : ""}>gpt-3.5-turbo</option>
</select>
</div>
</div>
<div class="b3-label">
${window.siyuan.languages.apiMaxTokens}
<div class="fn__hr"></div>
@ -44,6 +57,18 @@ export const ai = {
<span class="fn__space"></span>
<input class="b3-text-field fn__flex-center fn__size200" type="number" step="1" min="5" max="600" id="apiTimeout" value="${window.siyuan.config.ai.openAI.apiTimeout}"/>
</label>
<label class="fn__flex b3-label config__item">
<div class="fn__flex-1">
${window.siyuan.languages.apiModel}
<div class="b3-label__text">${window.siyuan.languages.apiModelTip}</div>
</div>
<span class="fn__space"></span>
<select id="apiModel" class="b3-select fn__flex-center fn__size200">
<option value="gpt-4" ${window.siyuan.config.ai.openAI.apiModel === "gpt-4" ? "selected" : ""}>gpt-4</option>
<option value="gpt-4-32k" ${window.siyuan.config.ai.openAI.apiModel === "gpt-4-32k" ? "selected" : ""}>gpt-4-32k</option>
<option value="gpt-3.5-turbo" ${window.siyuan.config.ai.openAI.apiModel === "gpt-3.5-turbo" ? "selected" : ""}>gpt-3.5-turbo</option>
</select>
</label>
<label class="fn__flex b3-label">
<div class="fn__flex-1">
${window.siyuan.languages.apiMaxTokens}
@ -89,12 +114,13 @@ export const ai = {
</div>`;
},
bindEvent: () => {
ai.element.querySelectorAll("input").forEach((item) => {
ai.element.querySelectorAll("input,select").forEach((item) => {
item.addEventListener("change", () => {
fetchPost("/api/setting/setAI", {
openAI: {
apiBaseURL: (ai.element.querySelector("#apiBaseURL") as HTMLInputElement).value,
apiKey: (ai.element.querySelector("#apiKey") as HTMLInputElement).value,
apiModel: (ai.element.querySelector("#apiModel") as HTMLSelectElement).value,
apiMaxTokens: parseInt((ai.element.querySelector("#apiMaxTokens") as HTMLInputElement).value),
apiProxy: (ai.element.querySelector("#apiProxy") as HTMLInputElement).value,
apiTimeout: parseInt((ai.element.querySelector("#apiTimeout") as HTMLInputElement).value),

View file

@ -424,6 +424,7 @@ declare interface IConfig {
openAI: {
apiBaseURL: string
apiKey: string
apiModel: string
apiMaxTokens: number
apiProxy: string
apiTimeout: number

View file

@ -29,6 +29,7 @@ type OpenAI struct {
APIKey string `json:"apiKey"`
APITimeout int `json:"apiTimeout"`
APIProxy string `json:"apiProxy"`
APIModel string `json:"apiModel"`
APIMaxTokens int `json:"apiMaxTokens"`
APIBaseURL string `json:"apiBaseURL"`
}

View file

@ -42,7 +42,7 @@ require (
github.com/panjf2000/ants/v2 v2.7.3
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/radovskyb/watcher v1.0.7
github.com/sashabaranov/go-gpt3 v1.4.0
github.com/sashabaranov/go-openai v1.9.0
github.com/shirou/gopsutil/v3 v3.23.2
github.com/siyuan-note/dejavu v0.0.0-20230425070132-9eeaf90cb5ba
github.com/siyuan-note/encryption v0.0.0-20220713091850-5ecd92177b75

View file

@ -267,8 +267,8 @@ github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6po
github.com/rwtodd/Go.Sed v0.0.0-20210816025313-55464686f9ef/go.mod h1:8AEUvGVi2uQ5b24BIhcr0GCcpd/RNAFWaN2CJFrWIIQ=
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 h1:OkMGxebDjyw0ULyrTYWeN0UNCCkmCWfjPnIA2W6oviI=
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06/go.mod h1:+ePHsJ1keEjQtpvf9HHw0f4ZeJ0TLRsxhunSI2hYJSs=
github.com/sashabaranov/go-gpt3 v1.4.0 h1:UqHYdXgJNtNvTtbzDnnQgkQ9TgTnHtCXx966uFTYXvU=
github.com/sashabaranov/go-gpt3 v1.4.0/go.mod h1:BIZdbwdzxZbCrcKGMGH6u2eyGe1xFuX9Anmh3tCP8lQ=
github.com/sashabaranov/go-openai v1.9.0 h1:NoiO++IISxxJ1pRc0n7uZvMGMake0G+FJ1XPwXtprsA=
github.com/sashabaranov/go-openai v1.9.0/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adOgerWeFyZLUhAKRg=
github.com/scylladb/termtables v0.0.0-20191203121021-c4c0b6d42ff4/go.mod h1:C1a7PQSMz9NShzorzCiG2fk9+xuCgLkPeCvMHYR2OWg=
github.com/shirou/gopsutil/v3 v3.23.2 h1:PAWSuiAszn7IhPMBtXsbSCafej7PqUOvY6YywlQUExU=
github.com/shirou/gopsutil/v3 v3.23.2/go.mod h1:gv0aQw33GLo3pG8SiWKiQrbDzbRY1K80RyZJ7V4Th1M=

View file

@ -22,7 +22,7 @@ import (
"github.com/88250/lute/ast"
"github.com/88250/lute/parse"
gogpt "github.com/sashabaranov/go-gpt3"
"github.com/sashabaranov/go-openai"
"github.com/siyuan-note/siyuan/kernel/treenode"
"github.com/siyuan-note/siyuan/kernel/util"
)
@ -154,11 +154,11 @@ type GPT interface {
}
type OpenAIGPT struct {
c *gogpt.Client
c *openai.Client
}
func (gpt *OpenAIGPT) chat(msg string, contextMsgs []string) (partRet string, stop bool, err error) {
return util.ChatGPT(msg, contextMsgs, gpt.c, Conf.AI.OpenAI.APIMaxTokens, Conf.AI.OpenAI.APITimeout)
return util.ChatGPT(msg, contextMsgs, gpt.c, Conf.AI.OpenAI.APIModel, Conf.AI.OpenAI.APIMaxTokens, Conf.AI.OpenAI.APITimeout)
}
type CloudGPT struct {

View file

@ -19,6 +19,7 @@ package model
import (
"bytes"
"fmt"
"github.com/sashabaranov/go-openai"
"os"
"path/filepath"
"runtime"
@ -337,14 +338,18 @@ func InitConf() {
if nil == Conf.AI {
Conf.AI = conf.NewAI()
}
if "" == Conf.AI.OpenAI.APIModel {
Conf.AI.OpenAI.APIModel = openai.GPT4
}
if "" != Conf.AI.OpenAI.APIKey {
logging.LogInfof("OpenAI API enabled\n"+
" baseURL=%s\n"+
" timeout=%ds\n"+
" proxy=%s\n"+
" model=%s\n"+
" maxTokens=%d",
Conf.AI.OpenAI.APIBaseURL, Conf.AI.OpenAI.APITimeout, Conf.AI.OpenAI.APIProxy, Conf.AI.OpenAI.APIMaxTokens)
Conf.AI.OpenAI.APIBaseURL, Conf.AI.OpenAI.APITimeout, Conf.AI.OpenAI.APIProxy, Conf.AI.OpenAI.APIModel, Conf.AI.OpenAI.APIMaxTokens)
}
Conf.ReadOnly = util.ReadOnly

View file

@ -18,30 +18,31 @@ package util
import (
"context"
gogpt "github.com/sashabaranov/go-gpt3"
"github.com/siyuan-note/logging"
"net/http"
"net/url"
"strings"
"time"
"github.com/sashabaranov/go-openai"
"github.com/siyuan-note/logging"
)
func ChatGPT(msg string, contextMsgs []string, c *gogpt.Client, maxTokens, timeout int) (ret string, stop bool, err error) {
var reqMsgs []gogpt.ChatCompletionMessage
func ChatGPT(msg string, contextMsgs []string, c *openai.Client, model string, maxTokens, timeout int) (ret string, stop bool, err error) {
var reqMsgs []openai.ChatCompletionMessage
for _, ctxMsg := range contextMsgs {
reqMsgs = append(reqMsgs, gogpt.ChatCompletionMessage{
reqMsgs = append(reqMsgs, openai.ChatCompletionMessage{
Role: "user",
Content: ctxMsg,
})
}
reqMsgs = append(reqMsgs, gogpt.ChatCompletionMessage{
reqMsgs = append(reqMsgs, openai.ChatCompletionMessage{
Role: "user",
Content: msg,
})
req := gogpt.ChatCompletionRequest{
Model: gogpt.GPT3Dot5Turbo,
req := openai.ChatCompletionRequest{
Model: model,
MaxTokens: maxTokens,
Messages: reqMsgs,
}
@ -74,8 +75,8 @@ func ChatGPT(msg string, contextMsgs []string, c *gogpt.Client, maxTokens, timeo
return
}
func NewOpenAIClient(apiKey, apiProxy, apiBaseURL string) *gogpt.Client {
config := gogpt.DefaultConfig(apiKey)
func NewOpenAIClient(apiKey, apiProxy, apiBaseURL string) *openai.Client {
config := openai.DefaultConfig(apiKey)
if "" != apiProxy {
proxyUrl, err := url.Parse(apiProxy)
if nil != err {
@ -86,5 +87,5 @@ func NewOpenAIClient(apiKey, apiProxy, apiBaseURL string) *gogpt.Client {
}
config.BaseURL = apiBaseURL
return gogpt.NewClientWithConfig(config)
return openai.NewClientWithConfig(config)
}