From 5f84e0dcff412f06d3d59d062506d05a951175fe Mon Sep 17 00:00:00 2001 From: m3tam3re Date: Tue, 8 Oct 2024 11:11:01 +0200 Subject: [PATCH] nvim changes --- nvim/init.lua | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/nvim/init.lua b/nvim/init.lua index 75b45c8..efceb8c 100644 --- a/nvim/init.lua +++ b/nvim/init.lua @@ -94,7 +94,9 @@ vim.g.maplocalleader = ' ' vim.g.have_nerd_font = true -- [[ Setting options ]] +-- l -- See `:help vim.opt` +-- -- NOTE: You can change these options as you wish! -- For more options, you can see `:help option-list` @@ -829,13 +831,18 @@ require('lazy').setup({ { 'David-Kunz/gen.nvim', opts = { - model = 'dolphin-llama3', -- The default model to use. + model = 'dolphin-llama3:latest', -- The default model to use. + quit_map = 'q', -- set keymap to close the response window + retry_map = '', -- set keymap to re-send the current prompt + accept_map = '', -- set keymap to replace the previous selection with the last result host = 'localhost', -- The host running the Ollama service. port = '11434', -- The port on which the Ollama service is listening. - display_mode = 'float', -- The display mode. Can be "float" or "split". - show_prompt = true, -- Shows the Prompt submitted to Ollama. - show_model = true, -- Displays which model you are using at the beginning of your chat session. + display_mode = 'float', -- The display mode. Can be "float" or "split" or "horizontal-split". + show_prompt = false, -- Shows the prompt submitted to Ollama. + show_model = false, -- Displays which model you are using at the beginning of your chat session. no_auto_close = false, -- Never closes the window automatically. + file = false, -- Write the payload to a temporary file to keep the command short. + hidden = false, -- Hide the generation window (if true, will implicitly set `prompt.replace = true`), requires Neovim >= 0.10 init = function(options) pcall(io.popen, 'ollama serve > /dev/null 2>&1 &') end,