|
1 | 1 | --- |
| 2 | +- name: "qwen3.6-35b-a3b" |
| 3 | + url: "github:mudler/LocalAI/gallery/virtual.yaml@master" |
| 4 | + urls: |
| 5 | + - https://huggingface.co/unsloth/Qwen3.6-35B-A3B-GGUF |
| 6 | + description: | |
| 7 | + # Qwen3.6-35B-A3B |
| 8 | + |
| 9 | + [](https://chat.qwen.ai) |
| 10 | + |
| 11 | + > [!Note] |
| 12 | + > This repository contains model weights and configuration files for the post-trained model in the Hugging Face Transformers format. |
| 13 | + > |
| 14 | + > These artifacts are compatible with Hugging Face Transformers, vLLM, SGLang, KTransformers, etc. |
| 15 | + |
| 16 | + Following the February release of the Qwen3.5 series, we're pleased to share the first open-weight variant of Qwen3.6. Built on direct feedback from the community, Qwen3.6 prioritizes stability and real-world utility, offering developers a more intuitive, responsive, and genuinely productive coding experience. |
| 17 | + |
| 18 | + ## Qwen3.6 Highlights |
| 19 | + |
| 20 | + This release delivers substantial upgrades, particularly in |
| 21 | + |
| 22 | + - **Agentic Coding:** the model now handles frontend workflows and repository-level reasoning with greater fluency and precision. |
| 23 | + - **Thinking Preservation:** we've introduced a new option to retain reasoning context from historical messages, streamlining iterative development and reducing overhead. |
| 24 | + |
| 25 | + For more details, please refer to our blog post Qwen3.6-35B-A3B. |
| 26 | + |
| 27 | + ## Model Overview |
| 28 | + |
| 29 | + ... |
| 30 | + license: "apache-2.0" |
| 31 | + tags: |
| 32 | + - llm |
| 33 | + - gguf |
| 34 | + - qwen |
| 35 | + icon: https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen3.6/Figures/qwen3.6_35b_a3b_score.png |
| 36 | + overrides: |
| 37 | + backend: llama-cpp |
| 38 | + function: |
| 39 | + automatic_tool_parsing_fallback: true |
| 40 | + grammar: |
| 41 | + disable: true |
| 42 | + known_usecases: |
| 43 | + - chat |
| 44 | + mmproj: llama-cpp/mmproj/Qwen3.6-35B-A3B-GGUF/mmproj-F32.gguf |
| 45 | + options: |
| 46 | + - use_jinja:true |
| 47 | + parameters: |
| 48 | + min_p: 0 |
| 49 | + model: llama-cpp/models/Qwen3.6-35B-A3B-GGUF/Qwen3.6-35B-A3B-UD-Q4_K_M.gguf |
| 50 | + presence_penalty: 1.5 |
| 51 | + repeat_penalty: 1 |
| 52 | + temperature: 0.7 |
| 53 | + top_k: 20 |
| 54 | + top_p: 0.8 |
| 55 | + template: |
| 56 | + use_tokenizer_template: true |
| 57 | + files: |
| 58 | + - filename: llama-cpp/models/Qwen3.6-35B-A3B-GGUF/Qwen3.6-35B-A3B-UD-Q4_K_M.gguf |
| 59 | + sha256: ac0e2c1189e055faa36eff361580e79c5bd6f8e76bffb4ce547f167d53e31a61 |
| 60 | + uri: https://huggingface.co/unsloth/Qwen3.6-35B-A3B-GGUF/resolve/main/Qwen3.6-35B-A3B-UD-Q4_K_M.gguf |
| 61 | + - filename: llama-cpp/mmproj/Qwen3.6-35B-A3B-GGUF/mmproj-F32.gguf |
| 62 | + sha256: 0a1c1cd2772ae6de5e87e023cea454720924675f11fe2b0e7bb7648e48debdc0 |
| 63 | + uri: https://huggingface.co/unsloth/Qwen3.6-35B-A3B-GGUF/resolve/main/mmproj-F32.gguf |
2 | 64 | - name: "gemma-4-26b-a4b-it-apex" |
3 | 65 | url: "github:mudler/LocalAI/gallery/virtual.yaml@master" |
4 | 66 | urls: |
|
0 commit comments