feat: support deepseek (#69)
The DeepSeek API reduces costs through disk caching, and DeepSeek-Coder-V2-0724 achieves GPT-4-Turbo-0409 level code capabilities with excellent math and reasoning skills.
This commit is contained in:
parent
b979b3d8ce
commit
6bef72e287
10
README.md
10
README.md
@ -81,8 +81,8 @@ _See [config.lua#L9](./lua/avante/config.lua) for the full config_
|
||||
|
||||
```lua
|
||||
{
|
||||
---@alias Provider "openai" | "claude" | "azure"
|
||||
provider = "claude", -- "claude" or "openai" or "azure"
|
||||
---@alias Provider "openai" | "claude" | "azure" | "deepseek"
|
||||
provider = "claude", -- "claude" or "openai" or "azure" or "deepseek"
|
||||
openai = {
|
||||
endpoint = "https://api.openai.com",
|
||||
model = "gpt-4o",
|
||||
@ -167,6 +167,12 @@ Given its early stage, `avante.nvim` currently supports the following basic func
|
||||
> ```sh
|
||||
> export AZURE_OPENAI_API_KEY=your-api-key
|
||||
> ```
|
||||
>
|
||||
> For DeepSeek
|
||||
>
|
||||
> ```sh
|
||||
> export DEEPSEEK_API_KEY=you-api-key
|
||||
> ```
|
||||
|
||||
1. Open a code file in Neovim.
|
||||
2. Use the `:AvanteAsk` command to query the AI about the code.
|
||||
|
@ -17,6 +17,7 @@ local E = {
|
||||
openai = "OPENAI_API_KEY",
|
||||
claude = "ANTHROPIC_API_KEY",
|
||||
azure = "AZURE_OPENAI_API_KEY",
|
||||
deepseek = "DEEPSEEK_API_KEY",
|
||||
},
|
||||
_once = false,
|
||||
}
|
||||
@ -298,6 +299,23 @@ local function call_openai_api_stream(question, code_lang, code_content, selecte
|
||||
max_tokens = Config.azure.max_tokens,
|
||||
stream = true,
|
||||
}
|
||||
elseif Config.provider == "deepseek" then
|
||||
api_key = os.getenv(E.key("deepseek"))
|
||||
url = Utils.trim_suffix(Config.deepseek.endpoint, "/") .. "/chat/completions"
|
||||
headers = {
|
||||
["Content-Type"] = "application/json",
|
||||
["Authorization"] = "Bearer " .. api_key,
|
||||
}
|
||||
body = {
|
||||
model = Config.deepseek.model,
|
||||
messages = {
|
||||
{ role = "system", content = system_prompt },
|
||||
{ role = "user", content = user_prompt },
|
||||
},
|
||||
temperature = Config.deepseek.temperature,
|
||||
max_tokens = Config.deepseek.max_tokens,
|
||||
stream = true,
|
||||
}
|
||||
else
|
||||
url = Utils.trim_suffix(Config.openai.endpoint, "/") .. "/v1/chat/completions"
|
||||
headers = {
|
||||
@ -364,7 +382,7 @@ end
|
||||
---@param on_chunk fun(chunk: string): any
|
||||
---@param on_complete fun(err: string|nil): any
|
||||
function M.call_ai_api_stream(question, code_lang, code_content, selected_content_content, on_chunk, on_complete)
|
||||
if Config.provider == "openai" or Config.provider == "azure" then
|
||||
if Config.provider == "openai" or Config.provider == "azure" or Config.provider == "deepseek" then
|
||||
call_openai_api_stream(question, code_lang, code_content, selected_content_content, on_chunk, on_complete)
|
||||
elseif Config.provider == "claude" then
|
||||
call_claude_api_stream(question, code_lang, code_content, selected_content_content, on_chunk, on_complete)
|
||||
|
@ -6,8 +6,8 @@ local M = {}
|
||||
|
||||
---@class avante.Config
|
||||
M.defaults = {
|
||||
---@alias Provider "openai" | "claude" | "azure"
|
||||
provider = "claude", -- "claude" or "openai" or "azure"
|
||||
---@alias Provider "openai" | "claude" | "azure" | "deepseek"
|
||||
provider = "claude", -- "claude" or "openai" or "azure" or "deepseek"
|
||||
openai = {
|
||||
endpoint = "https://api.openai.com",
|
||||
model = "gpt-4o",
|
||||
@ -27,6 +27,12 @@ M.defaults = {
|
||||
temperature = 0,
|
||||
max_tokens = 4096,
|
||||
},
|
||||
deepseek = {
|
||||
endpoint = "https://api.deepseek.com",
|
||||
model = "deepseek-coder",
|
||||
temperature = 0,
|
||||
max_tokens = 4096,
|
||||
},
|
||||
behaviour = {
|
||||
auto_apply_diff_after_generation = false, -- Whether to automatically apply diff after LLM response.
|
||||
},
|
||||
|
Loading…
x
Reference in New Issue
Block a user