adjust /model command priority
This commit is contained in:
@@ -6,6 +6,14 @@ import { log } from "../utils/log";
|
|||||||
const enc = get_encoding("cl100k_base");
|
const enc = get_encoding("cl100k_base");
|
||||||
|
|
||||||
const getUseModel = (req: Request, tokenCount: number) => {
|
const getUseModel = (req: Request, tokenCount: number) => {
|
||||||
|
const [provider, model] = req.body.model.split(",");
|
||||||
|
if (provider && model) {
|
||||||
|
return {
|
||||||
|
provider,
|
||||||
|
model,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
// if tokenCount is greater than 32K, use the long context model
|
// if tokenCount is greater than 32K, use the long context model
|
||||||
if (tokenCount > 1000 * 32) {
|
if (tokenCount > 1000 * 32) {
|
||||||
log("Using long context model due to token count:", tokenCount);
|
log("Using long context model due to token count:", tokenCount);
|
||||||
@@ -33,13 +41,6 @@ const getUseModel = (req: Request, tokenCount: number) => {
|
|||||||
model,
|
model,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
const [provider, model] = req.body.model.split(",");
|
|
||||||
if (provider && model) {
|
|
||||||
return {
|
|
||||||
provider,
|
|
||||||
model,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
return {
|
return {
|
||||||
provider: "default",
|
provider: "default",
|
||||||
model: req.config.OPENAI_MODEL,
|
model: req.config.OPENAI_MODEL,
|
||||||
|
|||||||
Reference in New Issue
Block a user