|
12 | 12 |
|
13 | 13 | COHERE_API_KEY = config("COHERE_API_KEY", default="")
|
14 | 14 | KH_MODE = "dev"
|
15 |
| -KH_FEATURE_USER_MANAGEMENT = True |
| 15 | +KH_FEATURE_USER_MANAGEMENT = False |
16 | 16 | KH_FEATURE_USER_MANAGEMENT_ADMIN = str(
|
17 | 17 | config("KH_FEATURE_USER_MANAGEMENT_ADMIN", default="admin")
|
18 | 18 | )
|
|
21 | 21 | )
|
22 | 22 | KH_ENABLE_ALEMBIC = False
|
23 | 23 | KH_DATABASE = f"sqlite:///{user_cache_dir / 'sql.db'}"
|
| 24 | +KH_FILESTORAGE_PATH = str(user_cache_dir / "files") |
| 25 | + |
24 | 26 | KH_DOCSTORE = {
|
25 | 27 | "__type__": "kotaemon.storages.SimpleFileDocumentStore",
|
26 | 28 | "path": str(user_cache_dir / "docstore"),
|
|
29 | 31 | "__type__": "kotaemon.storages.ChromaVectorStore",
|
30 | 32 | "path": str(user_cache_dir / "vectorstore"),
|
31 | 33 | }
|
32 |
| -KH_FILESTORAGE_PATH = str(user_cache_dir / "files") |
33 | 34 | KH_LLMS = {
|
34 |
| - "gpt4": { |
| 35 | + # example for using Azure OpenAI, the config variables can set as environment |
| 36 | + # variables or in the .env file |
| 37 | + # "gpt4": { |
| 38 | + # "def": { |
| 39 | + # "__type__": "kotaemon.llms.AzureChatOpenAI", |
| 40 | + # "temperature": 0, |
| 41 | + # "azure_endpoint": config("AZURE_OPENAI_ENDPOINT", default=""), |
| 42 | + # "openai_api_key": config("AZURE_OPENAI_API_KEY", default=""), |
| 43 | + # "openai_api_version": config("OPENAI_API_VERSION", default=""), |
| 44 | + # "deployment_name": "<your deployment name>", |
| 45 | + # "stream": True, |
| 46 | + # }, |
| 47 | + # "accuracy": 10, |
| 48 | + # "cost": 10, |
| 49 | + # "default": False, |
| 50 | + # }, |
| 51 | + # "gpt35": { |
| 52 | + # "def": { |
| 53 | + # "__type__": "kotaemon.llms.AzureChatOpenAI", |
| 54 | + # "temperature": 0, |
| 55 | + # "azure_endpoint": config("AZURE_OPENAI_ENDPOINT", default=""), |
| 56 | + # "openai_api_key": config("AZURE_OPENAI_API_KEY", default=""), |
| 57 | + # "openai_api_version": config("OPENAI_API_VERSION", default=""), |
| 58 | + # "deployment_name": "<your deployment name>", |
| 59 | + # "request_timeout": 10, |
| 60 | + # "stream": False, |
| 61 | + # }, |
| 62 | + # "accuracy": 5, |
| 63 | + # "cost": 5, |
| 64 | + # "default": False, |
| 65 | + # }, |
| 66 | + "local": { |
35 | 67 | "def": {
|
36 |
| - "__type__": "kotaemon.llms.AzureChatOpenAI", |
37 |
| - "temperature": 0, |
38 |
| - "azure_endpoint": config("AZURE_OPENAI_ENDPOINT", default=""), |
39 |
| - "openai_api_key": config("AZURE_OPENAI_API_KEY", default=""), |
40 |
| - "openai_api_version": config("OPENAI_API_VERSION", default=""), |
41 |
| - "deployment_name": "dummy-q2", |
42 |
| - "stream": True, |
| 68 | + "__type__": "kotaemon.llms.EndpointChatLLM", |
| 69 | + "endpoint_url": "http://localhost:31415/v1/chat/completions", |
43 | 70 | },
|
44 |
| - "accuracy": 10, |
45 |
| - "cost": 10, |
46 | 71 | "default": False,
|
47 | 72 | },
|
48 |
| - "gpt35": { |
49 |
| - "def": { |
50 |
| - "__type__": "kotaemon.llms.AzureChatOpenAI", |
51 |
| - "temperature": 0, |
52 |
| - "azure_endpoint": config("AZURE_OPENAI_ENDPOINT", default=""), |
53 |
| - "openai_api_key": config("AZURE_OPENAI_API_KEY", default=""), |
54 |
| - "openai_api_version": config("OPENAI_API_VERSION", default=""), |
55 |
| - "deployment_name": "dummy-q2", |
56 |
| - "request_timeout": 10, |
57 |
| - "stream": False, |
58 |
| - }, |
59 |
| - "accuracy": 5, |
60 |
| - "cost": 5, |
61 |
| - "default": True, |
62 |
| - }, |
63 | 73 | }
|
64 | 74 | KH_EMBEDDINGS = {
|
65 |
| - "ada": { |
| 75 | + # example for using Azure OpenAI, the config variables can set as environment |
| 76 | + # variables or in the .env file |
| 77 | + # "ada": { |
| 78 | + # "def": { |
| 79 | + # "__type__": "kotaemon.embeddings.AzureOpenAIEmbeddings", |
| 80 | + # "model": "text-embedding-ada-002", |
| 81 | + # "azure_endpoint": config("AZURE_OPENAI_ENDPOINT", default=""), |
| 82 | + # "openai_api_key": config("AZURE_OPENAI_API_KEY", default=""), |
| 83 | + # "deployment": "<your deployment name>", |
| 84 | + # "chunk_size": 16, |
| 85 | + # }, |
| 86 | + # "accuracy": 5, |
| 87 | + # "cost": 5, |
| 88 | + # "default": True, |
| 89 | + # }, |
| 90 | + "local": { |
66 | 91 | "def": {
|
67 |
| - "__type__": "kotaemon.embeddings.AzureOpenAIEmbeddings", |
68 |
| - "model": "text-embedding-ada-002", |
69 |
| - "azure_endpoint": config("AZURE_OPENAI_ENDPOINT", default=""), |
70 |
| - "openai_api_key": config("AZURE_OPENAI_API_KEY", default=""), |
71 |
| - "deployment": "dummy-q2-text-embedding", |
72 |
| - "chunk_size": 16, |
| 92 | + "__type__": "kotaemon.embeddings.EndpointEmbeddings", |
| 93 | + "endpoint_url": "http://localhost:31415/v1/embeddings", |
73 | 94 | },
|
74 |
| - "accuracy": 5, |
75 |
| - "cost": 5, |
76 |
| - "default": True, |
| 95 | + "default": False, |
77 | 96 | },
|
78 | 97 | }
|
79 | 98 | KH_REASONINGS = ["ktem.reasoning.simple.FullQAPipeline"]
|
|
0 commit comments