got rid of --claude and --local. everything is in --model
This commit is contained in:
parent
d2152b7da6
commit
d7fb8fe92d
@ -197,12 +197,11 @@ Once you have it all set up, here's how to use it.
|
|||||||
`fabric -h`
|
`fabric -h`
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
fabric -h
|
fabric [-h] [--text TEXT] [--copy] [--agents {trip_planner,ApiKeys}]
|
||||||
usage: fabric [-h] [--text TEXT] [--copy] [--agents {trip_planner,ApiKeys}]
|
|
||||||
[--output [OUTPUT]] [--stream] [--list] [--update]
|
[--output [OUTPUT]] [--stream] [--list] [--update]
|
||||||
[--pattern PATTERN] [--setup]
|
[--pattern PATTERN] [--setup]
|
||||||
[--changeDefaultModel CHANGEDEFAULTMODEL] [--local] [--claude]
|
[--changeDefaultModel CHANGEDEFAULTMODEL] [--model MODEL]
|
||||||
[--model MODEL] [--listmodels] [--context]
|
[--listmodels] [--context]
|
||||||
|
|
||||||
An open source framework for augmenting humans using AI.
|
An open source framework for augmenting humans using AI.
|
||||||
|
|
||||||
@ -228,8 +227,6 @@ options:
|
|||||||
Change the default model. Your choice will be saved in
|
Change the default model. Your choice will be saved in
|
||||||
~/.config/fabric/.env). For a list of available
|
~/.config/fabric/.env). For a list of available
|
||||||
models, use the --listmodels flag.
|
models, use the --listmodels flag.
|
||||||
--local, -L Use local LLM. Default is llama2
|
|
||||||
--claude Use Claude AI
|
|
||||||
--model MODEL, -m MODEL
|
--model MODEL, -m MODEL
|
||||||
Select the model to use (GPT-4 by default for chatGPT
|
Select the model to use (GPT-4 by default for chatGPT
|
||||||
and llama2 for Ollama)
|
and llama2 for Ollama)
|
||||||
|
@ -45,11 +45,6 @@ def main():
|
|||||||
)
|
)
|
||||||
parser.add_argument('--changeDefaultModel',
|
parser.add_argument('--changeDefaultModel',
|
||||||
help="Change the default model. Your choice will be saved in ~/.config/fabric/.env). For a list of available models, use the --listmodels flag.")
|
help="Change the default model. Your choice will be saved in ~/.config/fabric/.env). For a list of available models, use the --listmodels flag.")
|
||||||
parser.add_argument(
|
|
||||||
'--local', '-L', help="Use local LLM. Default is llama2", action="store_true")
|
|
||||||
|
|
||||||
parser.add_argument(
|
|
||||||
"--claude", help="Use Claude AI", action="store_true")
|
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--model", "-m", help="Select the model to use (GPT-4 by default for chatGPT and llama2 for Ollama)", default="gpt-4-turbo-preview"
|
"--model", "-m", help="Select the model to use (GPT-4 by default for chatGPT and llama2 for Ollama)", default="gpt-4-turbo-preview"
|
||||||
@ -81,6 +76,7 @@ def main():
|
|||||||
sys.exit()
|
sys.exit()
|
||||||
if args.changeDefaultModel:
|
if args.changeDefaultModel:
|
||||||
Setup().default_model(args.changeDefaultModel)
|
Setup().default_model(args.changeDefaultModel)
|
||||||
|
print(f"Default model changed to {args.changeDefaultModel}")
|
||||||
sys.exit()
|
sys.exit()
|
||||||
if args.agents:
|
if args.agents:
|
||||||
# Handle the agents logic
|
# Handle the agents logic
|
||||||
@ -101,12 +97,6 @@ def main():
|
|||||||
if not os.path.exists(os.path.join(config, "context.md")):
|
if not os.path.exists(os.path.join(config, "context.md")):
|
||||||
print("Please create a context.md file in ~/.config/fabric")
|
print("Please create a context.md file in ~/.config/fabric")
|
||||||
sys.exit()
|
sys.exit()
|
||||||
standalone = None
|
|
||||||
if args.local:
|
|
||||||
standalone = Standalone(args, args.pattern, local=True)
|
|
||||||
elif args.claude:
|
|
||||||
standalone = Standalone(args, args.pattern, claude=True)
|
|
||||||
else:
|
|
||||||
standalone = Standalone(args, args.pattern)
|
standalone = Standalone(args, args.pattern)
|
||||||
if args.list:
|
if args.list:
|
||||||
try:
|
try:
|
||||||
@ -118,9 +108,15 @@ def main():
|
|||||||
print("No patterns found")
|
print("No patterns found")
|
||||||
sys.exit()
|
sys.exit()
|
||||||
if args.listmodels:
|
if args.listmodels:
|
||||||
setup = Setup()
|
gptmodels, localmodels, claudemodels = standalone.fetch_available_models()
|
||||||
allmodels = setup.fetch_available_models()
|
print("GPT Models:")
|
||||||
for model in allmodels:
|
for model in gptmodels:
|
||||||
|
print(model)
|
||||||
|
print("\nLocal Models:")
|
||||||
|
for model in localmodels:
|
||||||
|
print(model)
|
||||||
|
print("\nClaude Models:")
|
||||||
|
for model in claudemodels:
|
||||||
print(model)
|
print(model)
|
||||||
sys.exit()
|
sys.exit()
|
||||||
if args.text is not None:
|
if args.text is not None:
|
||||||
|
@ -17,7 +17,7 @@ env_file = os.path.join(config_directory, ".env")
|
|||||||
|
|
||||||
|
|
||||||
class Standalone:
|
class Standalone:
|
||||||
def __init__(self, args, pattern="", env_file="~/.config/fabric/.env", local=False, claude=False):
|
def __init__(self, args, pattern="", env_file="~/.config/fabric/.env"):
|
||||||
""" Initialize the class with the provided arguments and environment file.
|
""" Initialize the class with the provided arguments and environment file.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@ -46,21 +46,19 @@ class Standalone:
|
|||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
print("No API key found. Use the --apikey option to set the key")
|
print("No API key found. Use the --apikey option to set the key")
|
||||||
sys.exit()
|
sys.exit()
|
||||||
self.local = local
|
self.local = False
|
||||||
self.config_pattern_directory = config_directory
|
self.config_pattern_directory = config_directory
|
||||||
self.pattern = pattern
|
self.pattern = pattern
|
||||||
self.args = args
|
self.args = args
|
||||||
self.model = args.model
|
self.model = args.model
|
||||||
self.claude = claude
|
self.claude = False
|
||||||
|
sorted_gpt_models, ollamaList, claudeList = self.fetch_available_models()
|
||||||
try:
|
try:
|
||||||
self.model = os.environ["DEFAULT_MODEL"]
|
self.model = os.environ["DEFAULT_MODEL"]
|
||||||
except:
|
except:
|
||||||
if self.local:
|
pass
|
||||||
if self.args.model == 'gpt-4-turbo-preview':
|
self.local = self.model.strip() in ollamaList
|
||||||
self.model = 'llama2'
|
self.claude = self.model.strip() in claudeList
|
||||||
if self.claude:
|
|
||||||
if self.args.model == 'gpt-4-turbo-preview':
|
|
||||||
self.model = 'claude-3-opus-20240229'
|
|
||||||
|
|
||||||
async def localChat(self, messages):
|
async def localChat(self, messages):
|
||||||
from ollama import AsyncClient
|
from ollama import AsyncClient
|
||||||
@ -259,6 +257,9 @@ class Standalone:
|
|||||||
f.write(response.choices[0].message.content)
|
f.write(response.choices[0].message.content)
|
||||||
|
|
||||||
def fetch_available_models(self):
|
def fetch_available_models(self):
|
||||||
|
gptlist = []
|
||||||
|
fullOllamaList = []
|
||||||
|
claudeList = ['claude-3-opus-20240229']
|
||||||
headers = {
|
headers = {
|
||||||
"Authorization": f"Bearer {self.client.api_key}"
|
"Authorization": f"Bearer {self.client.api_key}"
|
||||||
}
|
}
|
||||||
@ -267,25 +268,27 @@ class Standalone:
|
|||||||
"https://api.openai.com/v1/models", headers=headers)
|
"https://api.openai.com/v1/models", headers=headers)
|
||||||
|
|
||||||
if response.status_code == 200:
|
if response.status_code == 200:
|
||||||
print("OpenAI GPT models:\n")
|
|
||||||
models = response.json().get("data", [])
|
models = response.json().get("data", [])
|
||||||
# Filter only gpt models
|
# Filter only gpt models
|
||||||
gpt_models = [model for model in models if model.get(
|
gpt_models = [model for model in models if model.get(
|
||||||
"id", "").startswith(("gpt"))]
|
"id", "").startswith(("gpt"))]
|
||||||
# Sort the models alphabetically by their ID
|
# Sort the models alphabetically by their ID
|
||||||
sorted_gpt_models = sorted(gpt_models, key=lambda x: x.get("id"))
|
sorted_gpt_models = sorted(
|
||||||
|
gpt_models, key=lambda x: x.get("id"))
|
||||||
|
|
||||||
for model in sorted_gpt_models:
|
for model in sorted_gpt_models:
|
||||||
print(model.get("id"))
|
gptlist.append(model.get("id"))
|
||||||
print("\nLocal Ollama models:")
|
|
||||||
import ollama
|
|
||||||
ollamaList = ollama.list()['models']
|
|
||||||
for model in ollamaList:
|
|
||||||
print(model['name'].rstrip(":latest"))
|
|
||||||
print("\nClaude models:")
|
|
||||||
print("claude-3-opus-20240229")
|
|
||||||
else:
|
else:
|
||||||
print(f"Failed to fetch models: HTTP {response.status_code}")
|
print(f"Failed to fetch models: HTTP {response.status_code}")
|
||||||
|
sys.exit()
|
||||||
|
import ollama
|
||||||
|
try:
|
||||||
|
default_modelollamaList = ollama.list()['models']
|
||||||
|
for model in default_modelollamaList:
|
||||||
|
fullOllamaList.append(model['name'].rstrip(":latest"))
|
||||||
|
except:
|
||||||
|
fullOllamaList = []
|
||||||
|
return gptlist, fullOllamaList, claudeList
|
||||||
|
|
||||||
def get_cli_input(self):
|
def get_cli_input(self):
|
||||||
""" aided by ChatGPT; uses platform library
|
""" aided by ChatGPT; uses platform library
|
||||||
@ -520,17 +523,14 @@ class Setup:
|
|||||||
|
|
||||||
def update_fabric_command(self, line, model):
|
def update_fabric_command(self, line, model):
|
||||||
fabric_command_regex = re.compile(
|
fabric_command_regex = re.compile(
|
||||||
r"(alias.*fabric --pattern\s+\S+.*?)( --claude| --local)?'")
|
r"(alias.*fabric --pattern\s+\S+.*?)( --model.*)?'")
|
||||||
match = fabric_command_regex.search(line)
|
match = fabric_command_regex.search(line)
|
||||||
if match:
|
if match:
|
||||||
base_command = match.group(1)
|
base_command = match.group(1)
|
||||||
# Provide a default value for current_flag
|
# Provide a default value for current_flag
|
||||||
current_flag = match.group(2) if match.group(2) else ""
|
current_flag = match.group(2) if match.group(2) else ""
|
||||||
new_flag = ""
|
new_flag = ""
|
||||||
if model in self.claudeList:
|
new_flag = f" --model {model}"
|
||||||
new_flag = " --claude"
|
|
||||||
elif model in self.fullOllamaList:
|
|
||||||
new_flag = " --local"
|
|
||||||
# Update the command if the new flag is different or to remove an existing flag.
|
# Update the command if the new flag is different or to remove an existing flag.
|
||||||
# Ensure to add the closing quote that was part of the original regex
|
# Ensure to add the closing quote that was part of the original regex
|
||||||
return f"{base_command}{new_flag}'\n"
|
return f"{base_command}{new_flag}'\n"
|
||||||
@ -539,15 +539,11 @@ class Setup:
|
|||||||
|
|
||||||
def update_fabric_alias(self, line, model):
|
def update_fabric_alias(self, line, model):
|
||||||
fabric_alias_regex = re.compile(
|
fabric_alias_regex = re.compile(
|
||||||
r"(alias fabric='[^']+?)( --claude| --local)?'")
|
r"(alias fabric='[^']+?)( --model.*)?'")
|
||||||
match = fabric_alias_regex.search(line)
|
match = fabric_alias_regex.search(line)
|
||||||
if match:
|
if match:
|
||||||
base_command, current_flag = match.groups()
|
base_command, current_flag = match.groups()
|
||||||
new_flag = ""
|
new_flag = f" --model {model}"
|
||||||
if model in self.claudeList:
|
|
||||||
new_flag = " --claude"
|
|
||||||
elif model in self.fullOllamaList:
|
|
||||||
new_flag = " --local"
|
|
||||||
# Update the alias if the new flag is different or to remove an existing flag.
|
# Update the alias if the new flag is different or to remove an existing flag.
|
||||||
return f"{base_command}{new_flag}'\n"
|
return f"{base_command}{new_flag}'\n"
|
||||||
else:
|
else:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user