-
Notifications
You must be signed in to change notification settings - Fork 84
Expand file tree
/
Copy pathsettings.ts
More file actions
130 lines (126 loc) · 4.68 KB
/
settings.ts
File metadata and controls
130 lines (126 loc) · 4.68 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
import { SettingSchemaDesc } from "@logseq/libs/dist/LSPlugin";
import { DalleImageSize, OpenAIOptions } from "./openai";
interface PluginOptions extends OpenAIOptions {
injectPrefix?: string;
}
export const settingsSchema: SettingSchemaDesc[] = [
{
key: "openAIKey",
type: "string",
default: "",
title: "OpenAI API Key",
description:
"Your OpenAI API key. You can get one at https://beta.openai.com",
},
{
key: "openAICompletionEngine",
type: "string",
default: "gpt-3.5-turbo",
title: "OpenAI Completion Engine",
description: "See Engines in OpenAI docs.",
},
{
key: "chatCompletionEndpoint",
type: "string",
default: "http://api.openai.com/v1",
title: "OpenAI API Completion Endpoint",
description: "The endpoint to use for OpenAI API completion requests. You shouldn't need to change this."
},
{
key: "useChatCompletionRequestMessage",
type: "boolean",
default: false,
title: "Use OpenAI Chat Completion Request Message",
description: "Send chat completion message using ChatCompletionRequestMessage interface rather than raw string. Useful for Oobabooga Text-Generation-WebUI. See https://github.com/oobabooga/text-generation-webui for more info.",
},
{
key: "chatCompletionCharacter",
type: "string",
default: "Assistant",
title: "Completion Character",
description: "Only used for Oobabooga Text-Generation-WebUI. See https://github.com/oobabooga/text-generation-webui for more info."
},
{
key: "chatPrompt",
type: "string",
default: "Do not refer to yourself in your answers. Do not say as an AI language model...",
title: "OpenAI Chat Prompt",
description: "Initial message that tells ChatGPT how to answer. Only used for gpt-3.5. See https://platform.openai.com/docs/guides/chat/introduction for more info.",
},
{
key: "openAITemperature",
type: "number",
default: 1.0,
title: "OpenAI Temperature",
description:
"The temperature controls how much randomness is in the output.<br/>"+
"You can set a different temperature in your own prompt templates by adding a 'prompt-template' property to the block.",
},
{
key: "openAIMaxTokens",
type: "number",
default: 1000,
title: "OpenAI Max Tokens",
description:
"The maximum amount of tokens to generate. Tokens can be words or just chunks of characters. The number of tokens processed in a given API request depends on the length of both your inputs and outputs. As a rough rule of thumb, 1 token is approximately 4 characters or 0.75 words for English text. One limitation to keep in mind is that your text prompt and generated completion combined must be no more than the model's maximum context length (for most models this is 2048 tokens, or about 1500 words).",
},
{
key: "injectPrefix",
type: "string",
default: "",
title: "Output prefix",
description:
"Prepends the output with this string. Such as a tag like [[gpt3]] or markdown like > to blockquote. Add a space at the end if you want a space between the prefix and the output or \\n for a linebreak.",
},
{
key: "dalleImageSize",
type: "number",
default: 1024,
title: "DALL-E Image Size",
description:
"Size of the image to generate. Can be 256, 512, or 1024. Smaller images are faster to generate.",
},
{
key: "shortcutBlock",
type: "string",
default: "mod+j",
title: "Keyboard Shortcut for /gpt-block",
description: ""
},
{
key: "popupShortcut",
type: "string",
default: "mod+g",
title: "Keyboard Shortcut for /gpt popup",
description: ""
},
];
function unescapeNewlines(s: string) {
return s.replace(/\\n/g, "\n");
}
export function getOpenaiSettings(): PluginOptions {
const apiKey = logseq.settings!["openAIKey"];
const completionEngine = logseq.settings!["openAICompletionEngine"];
const completionCharacter = logseq.settings!["chatCompletionCharacter"];
const useChatCompletionRequestMessage = logseq.settings!["useChatCompletionRequestMessage"];
const injectPrefix = unescapeNewlines(logseq.settings!["injectPrefix"]);
const temperature = Number.parseFloat(logseq.settings!["openAITemperature"]);
const maxTokens = Number.parseInt(logseq.settings!["openAIMaxTokens"]);
const dalleImageSize = Number.parseInt(
logseq.settings!["dalleImageSize"]
) as DalleImageSize;
const chatPrompt = logseq.settings!["chatPrompt"];
const completionEndpoint = logseq.settings!["chatCompletionEndpoint"];
return {
apiKey,
completionEngine,
completionCharacter,
useChatCompletionRequestMessage,
temperature,
maxTokens,
dalleImageSize,
injectPrefix,
chatPrompt,
completionEndpoint,
};
}