|
23 | 23 | "download_url": "https://huggingface.co/QuantFactory/Meta-Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct.Q4_K_M.gguf", |
24 | 24 | "sha256": "c57380038ea85d8bec586ec2af9c91abc2f2b332d41d6cf180581d7bdffb93c1", |
25 | 25 | "n_ctx": 8192, |
26 | | - "supportes_system_message": True, |
| 26 | + "supports_system_message": True, |
27 | 27 | }, |
28 | 28 | "gemma2-9b-q4": { |
29 | 29 | "download_url": "https://huggingface.co/bartowski/gemma-2-9b-it-GGUF/resolve/main/gemma-2-9b-it-Q4_K_M.gguf", |
30 | 30 | "sha256": "13b2a7b4115bbd0900162edcebe476da1ba1fc24e718e8b40d32f6e300f56dfe", |
31 | 31 | "n_ctx": 8192, |
32 | | - "supportes_system_message": False, |
| 32 | + "supports_system_message": False, |
33 | 33 | }, |
34 | 34 | "phi3-mini-q4": { |
35 | 35 | "download_url": "https://huggingface.co/microsoft/Phi-3-mini-4k-instruct-gguf/resolve/main/Phi-3-mini-4k-instruct-q4.gguf", |
36 | 36 | "sha256": "8a83c7fb9049a9b2e92266fa7ad04933bb53aa1e85136b7b30f1b8000ff2edef", |
37 | 37 | "n_ctx": 4096, |
38 | | - "supportes_system_message": True, |
| 38 | + "supports_system_message": False, |
39 | 39 | }, |
40 | 40 | "mistral0.3-7b-q4": { |
41 | 41 | "download_url": "https://huggingface.co/lmstudio-community/Mistral-7B-Instruct-v0.3-GGUF/resolve/main/Mistral-7B-Instruct-v0.3-Q4_K_M.gguf", |
42 | 42 | "sha256": "1270d22c0fbb3d092fb725d4d96c457b7b687a5f5a715abe1e818da303e562b6", |
43 | 43 | "n_ctx": 32768, |
44 | | - "supportes_system_message": True, |
| 44 | + "supports_system_message": False, |
| 45 | + }, |
| 46 | + "gemma2-2b-q6": { |
| 47 | + "download_url": "https://huggingface.co/bartowski/gemma-2-2b-it-GGUF/resolve/main/gemma-2-2b-it-Q6_K_L.gguf", |
| 48 | + "sha256": "b2ef9f67b38c6e246e593cdb9739e34043d84549755a1057d402563a78ff2254", |
| 49 | + "n_ctx": 8192, |
| 50 | + "supports_system_message": False, |
45 | 51 | }, |
46 | 52 | } |
47 | 53 |
|
@@ -113,7 +119,7 @@ def __init__(self, model: str): |
113 | 119 | sha256 = supported_models[model]["sha256"] |
114 | 120 | n_ctx = supported_models[model]["n_ctx"] |
115 | 121 | self.supports_system_message = supported_models[model][ |
116 | | - "supportes_system_message" |
| 122 | + "supports_system_message" |
117 | 123 | ] |
118 | 124 | if not self.supports_system_message: |
119 | 125 | warn( |
|
0 commit comments