diff --git a/invokeai/app/invocations/flux2_denoise.py b/invokeai/app/invocations/flux2_denoise.py
index 1b5ea372d68..0a5d854f534 100644
--- a/invokeai/app/invocations/flux2_denoise.py
+++ b/invokeai/app/invocations/flux2_denoise.py
@@ -53,8 +53,8 @@
"flux2_denoise",
title="FLUX2 Denoise",
tags=["image", "flux", "flux2", "klein", "denoise"],
- category="latents",
- version="1.4.0",
+ category="image",
+ version="1.5.0",
classification=Classification.Prototype,
)
class Flux2DenoiseInvocation(BaseInvocation):
@@ -101,6 +101,13 @@ class Flux2DenoiseInvocation(BaseInvocation):
description="Negative conditioning tensor. Can be None if cfg_scale is 1.0.",
input=Input.Connection,
)
+ guidance: float = InputField(
+ default=4.0,
+ ge=0,
+ le=20,
+ description="The guidance strength. Only used by undistilled models (Klein 9B Base). "
+ "Ignored by distilled models (Klein 4B, Klein 9B).",
+ )
cfg_scale: float = InputField(
default=1.0,
description=FieldDescriptions.cfg_scale,
@@ -467,6 +474,7 @@ def _run_diffusion(self, context: InvocationContext) -> torch.Tensor:
txt_ids=txt_ids,
timesteps=timesteps,
step_callback=self._build_step_callback(context),
+ guidance=self.guidance,
cfg_scale=cfg_scale_list,
neg_txt=neg_txt,
neg_txt_ids=neg_txt_ids,
diff --git a/invokeai/backend/flux/util.py b/invokeai/backend/flux/util.py
index da6590c7573..81f8caf46a1 100644
--- a/invokeai/backend/flux/util.py
+++ b/invokeai/backend/flux/util.py
@@ -133,11 +133,26 @@ def get_flux_ae_params() -> AutoEncoderParams:
axes_dim=[16, 56, 56],
theta=10_000,
qkv_bias=True,
- guidance_embed=True,
+ guidance_embed=False,
),
# Flux2 Klein 9B uses Qwen3 8B text encoder with stacked embeddings from layers [9, 18, 27]
# The context_in_dim is 3 * hidden_size of Qwen3 (3 * 4096 = 12288)
Flux2VariantType.Klein9B: FluxParams(
+ in_channels=64,
+ vec_in_dim=4096, # Qwen3-8B hidden size (used for pooled output)
+ context_in_dim=12288, # 3 layers * 4096 = 12288 for Qwen3-8B
+ hidden_size=3072,
+ mlp_ratio=4.0,
+ num_heads=24,
+ depth=19,
+ depth_single_blocks=38,
+ axes_dim=[16, 56, 56],
+ theta=10_000,
+ qkv_bias=True,
+ guidance_embed=False,
+ ),
+ # Flux2 Klein 9B Base is the undistilled foundation model with guidance_embeds=True
+ Flux2VariantType.Klein9BBase: FluxParams(
in_channels=64,
vec_in_dim=4096, # Qwen3-8B hidden size (used for pooled output)
context_in_dim=12288, # 3 layers * 4096 = 12288 for Qwen3-8B
diff --git a/invokeai/backend/flux2/denoise.py b/invokeai/backend/flux2/denoise.py
index 7b5bd6194e0..47a1af68023 100644
--- a/invokeai/backend/flux2/denoise.py
+++ b/invokeai/backend/flux2/denoise.py
@@ -26,6 +26,7 @@ def denoise(
# sampling parameters
timesteps: list[float],
step_callback: Callable[[PipelineIntermediateState], None],
+ guidance: float,
cfg_scale: list[float],
# Negative conditioning for CFG
neg_txt: torch.Tensor | None = None,
@@ -45,7 +46,9 @@ def denoise(
This is a simplified denoise function for FLUX.2 Klein models that uses
the diffusers Flux2Transformer2DModel interface.
- Note: FLUX.2 Klein has guidance_embeds=False, so no guidance parameter is used.
+ Distilled models (Klein 4B, Klein 9B) have guidance_embeds=False, so the guidance
+ value is passed but ignored by the model. Undistilled models (Klein 9B Base) have
+ guidance_embeds=True and use the guidance value for generation.
CFG is applied externally using negative conditioning when cfg_scale != 1.0.
Args:
@@ -56,6 +59,8 @@ def denoise(
txt_ids: Text position IDs tensor.
timesteps: List of timesteps for denoising schedule (linear sigmas from 1.0 to 1/n).
step_callback: Callback function for progress updates.
+ guidance: Guidance strength. Used by undistilled models (Klein 9B Base),
+ ignored by distilled models (Klein 4B, Klein 9B).
cfg_scale: List of CFG scale values per step.
neg_txt: Negative text embeddings for CFG (optional).
neg_txt_ids: Negative text position IDs (optional).
@@ -76,9 +81,10 @@ def denoise(
img = torch.cat([img, img_cond_seq], dim=1)
img_ids = torch.cat([img_ids, img_cond_seq_ids], dim=1)
- # Klein has guidance_embeds=False, but the transformer forward() still requires a guidance tensor
- # We pass a dummy value (1.0) since it won't affect the output when guidance_embeds=False
- guidance = torch.full((img.shape[0],), 1.0, device=img.device, dtype=img.dtype)
+ # The transformer forward() requires a guidance tensor.
+ # For distilled models (guidance_embeds=False), this value is ignored by the model.
+ # For undistilled models (Klein 9B Base, guidance_embeds=True), it controls guidance strength.
+ guidance_vec = torch.full((img.shape[0],), guidance, device=img.device, dtype=img.dtype)
# Use scheduler if provided
use_scheduler = scheduler is not None
@@ -121,7 +127,7 @@ def denoise(
timestep=t_vec,
img_ids=img_ids,
txt_ids=txt_ids,
- guidance=guidance,
+ guidance=guidance_vec,
return_dict=False,
)
@@ -141,7 +147,7 @@ def denoise(
timestep=t_vec,
img_ids=img_ids,
txt_ids=neg_txt_ids if neg_txt_ids is not None else txt_ids,
- guidance=guidance,
+ guidance=guidance_vec,
return_dict=False,
)
@@ -222,7 +228,7 @@ def denoise(
timestep=t_vec,
img_ids=img_ids,
txt_ids=txt_ids,
- guidance=guidance,
+ guidance=guidance_vec,
return_dict=False,
)
@@ -242,7 +248,7 @@ def denoise(
timestep=t_vec,
img_ids=img_ids,
txt_ids=neg_txt_ids if neg_txt_ids is not None else txt_ids,
- guidance=guidance,
+ guidance=guidance_vec,
return_dict=False,
)
diff --git a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx
index e123d0ebd06..105ad3dfd67 100644
--- a/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx
+++ b/invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx
@@ -63,6 +63,8 @@ export const ImageMetadataActions = memo((props: Props) => {
+
+
);
diff --git a/invokeai/frontend/web/src/features/metadata/parsing.test.tsx b/invokeai/frontend/web/src/features/metadata/parsing.test.tsx
new file mode 100644
index 00000000000..01e33ee2dbb
--- /dev/null
+++ b/invokeai/frontend/web/src/features/metadata/parsing.test.tsx
@@ -0,0 +1,128 @@
+import type { AppStore } from 'app/store/store';
+import type * as paramsSliceModule from 'features/controlLayers/store/paramsSlice';
+import { ImageMetadataHandlers } from 'features/metadata/parsing';
+import type * as modelsApiModule from 'services/api/endpoints/models';
+import { beforeEach, describe, expect, it, vi } from 'vitest';
+
+// ---------------------------------------------------------------------------
+// Module mocks
+//
+// We are testing only the *gating* logic of the model-related metadata
+// handlers (`VAEModel`, `KleinVAEModel`, `KleinQwen3EncoderModel`). The actual
+// model lookup goes through `parseModelIdentifier`, which dispatches RTK
+// Query thunks. We stub the models endpoint so that any lookup resolves to a
+// canned model identifier — the parse step then succeeds and the assertions
+// inside each handler become observable.
+// ---------------------------------------------------------------------------
+
+let currentBase: string | null = 'flux2';
+
+vi.mock('features/controlLayers/store/paramsSlice', async (importOriginal) => {
+ const mod = await importOriginal();
+ return { ...mod, selectBase: () => currentBase };
+});
+
+const fakeModel = (type: 'vae' | 'qwen3_encoder', base: string) => ({
+ key: `${type}-key`,
+ hash: 'hash',
+ name: `Some ${type}`,
+ base,
+ type,
+});
+
+let nextResolved: ReturnType = fakeModel('vae', 'flux2');
+
+vi.mock('services/api/endpoints/models', async (importOriginal) => {
+ const mod = await importOriginal();
+ return {
+ ...mod,
+ modelsApi: {
+ ...mod.modelsApi,
+ endpoints: {
+ ...mod.modelsApi.endpoints,
+ getModelConfig: { initiate: (key: string) => ({ type: 'rtkq/initiate', key }) },
+ },
+ },
+ };
+});
+
+const makeStore = (): AppStore =>
+ ({
+ dispatch: vi.fn(() => ({
+ unwrap: () => Promise.resolve(nextResolved),
+ })),
+ getState: () => ({}),
+ }) as unknown as AppStore;
+
+beforeEach(() => {
+ currentBase = 'flux2';
+ nextResolved = fakeModel('vae', 'flux2');
+});
+
+describe('ImageMetadataHandlers — Klein recall gating', () => {
+ describe('KleinVAEModel', () => {
+ it('parses metadata.vae when the current main model is FLUX.2 Klein', async () => {
+ currentBase = 'flux2';
+ nextResolved = fakeModel('vae', 'flux2');
+ const store = makeStore();
+
+ const parsed = await ImageMetadataHandlers.KleinVAEModel.parse({ vae: nextResolved }, store);
+
+ expect(parsed.key).toBe('vae-key');
+ expect(parsed.type).toBe('vae');
+ });
+
+ it('rejects parsing when the current main model is not FLUX.2 Klein', async () => {
+ currentBase = 'sdxl';
+ nextResolved = fakeModel('vae', 'flux2');
+ const store = makeStore();
+
+ await expect(ImageMetadataHandlers.KleinVAEModel.parse({ vae: nextResolved }, store)).rejects.toThrow();
+ });
+ });
+
+ describe('KleinQwen3EncoderModel', () => {
+ it('parses metadata.qwen3_encoder when the current main model is FLUX.2 Klein', async () => {
+ currentBase = 'flux2';
+ nextResolved = fakeModel('qwen3_encoder', 'flux2');
+ const store = makeStore();
+
+ const parsed = await ImageMetadataHandlers.KleinQwen3EncoderModel.parse({ qwen3_encoder: nextResolved }, store);
+
+ expect(parsed.key).toBe('qwen3_encoder-key');
+ expect(parsed.type).toBe('qwen3_encoder');
+ });
+
+ it('rejects parsing when the current main model is not FLUX.2 Klein', async () => {
+ currentBase = 'sdxl';
+ nextResolved = fakeModel('qwen3_encoder', 'flux2');
+ const store = makeStore();
+
+ await expect(
+ ImageMetadataHandlers.KleinQwen3EncoderModel.parse({ qwen3_encoder: nextResolved }, store)
+ ).rejects.toThrow();
+ });
+ });
+
+ describe('VAEModel (generic)', () => {
+ // The generic VAEModel handler must NOT also fire for FLUX.2 / Z-Image
+ // images, otherwise the metadata viewer renders duplicate VAE rows next
+ // to the dedicated KleinVAEModel / ZImageVAEModel handlers.
+ it.each(['flux2', 'z-image'])('rejects parsing when current base is %s', async (base) => {
+ currentBase = base;
+ nextResolved = fakeModel('vae', base);
+ const store = makeStore();
+
+ await expect(ImageMetadataHandlers.VAEModel.parse({ vae: nextResolved }, store)).rejects.toThrow();
+ });
+
+ it('parses successfully for non-Klein, non-Z-Image bases', async () => {
+ currentBase = 'sdxl';
+ nextResolved = fakeModel('vae', 'sdxl');
+ const store = makeStore();
+
+ const parsed = await ImageMetadataHandlers.VAEModel.parse({ vae: nextResolved }, store);
+ expect(parsed.key).toBe('vae-key');
+ });
+ });
+});
diff --git a/invokeai/frontend/web/src/features/metadata/parsing.tsx b/invokeai/frontend/web/src/features/metadata/parsing.tsx
index fc8f3f7f6ca..059807bdb8c 100644
--- a/invokeai/frontend/web/src/features/metadata/parsing.tsx
+++ b/invokeai/frontend/web/src/features/metadata/parsing.tsx
@@ -952,6 +952,9 @@ const VAEModel: SingleMetadataHandler = {
const parsed = await parseModelIdentifier(raw, store, 'vae');
assert(parsed.type === 'vae');
assert(isCompatibleWithMainModel(parsed, store));
+ // Z-Image and FLUX.2 Klein have dedicated VAE handlers; avoid rendering a duplicate row.
+ const base = selectBase(store.getState());
+ assert(base !== 'z-image' && base !== 'flux2', 'VAEModel handler does not apply to Z-Image or FLUX.2 Klein');
return Promise.resolve(parsed);
},
recall: (value, store) => {
diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildFLUXGraph.test.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildFLUXGraph.test.ts
new file mode 100644
index 00000000000..a8cdcadbf72
--- /dev/null
+++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildFLUXGraph.test.ts
@@ -0,0 +1,205 @@
+import { buildFLUXGraph } from 'features/nodes/util/graph/generation/buildFLUXGraph';
+import type { Graph } from 'features/nodes/util/graph/generation/Graph';
+import type { GraphBuilderArg } from 'features/nodes/util/graph/types';
+import type { Invocation } from 'services/api/types';
+import { beforeEach, describe, expect, it, vi } from 'vitest';
+
+// ---------------------------------------------------------------------------
+// Module mocks
+//
+// `buildFLUXGraph` pulls in a large slice of the app: redux selectors, every
+// `add*` helper, validators, the canvas manager, etc. The function itself only
+// orchestrates these; the unit under test here is the orchestration logic
+// (variant-gated guidance, scheduler propagation, metadata persistence). So we
+// stub out every collaborator and assert against the resulting `Graph` object.
+// ---------------------------------------------------------------------------
+
+const mockState = {
+ // buildFLUXGraph reads `state.system.shouldUse{NSFWChecker,Watermarker}` directly,
+ // every other access is funneled through the mocked selectors below.
+ system: { shouldUseNSFWChecker: false, shouldUseWatermarker: false },
+} as unknown as Parameters[0]['state'];
+
+const mockParams = {
+ guidance: 3.5,
+ steps: 28,
+ fluxScheduler: 'euler' as const,
+ fluxDypePreset: 'off' as const,
+ fluxDypeScale: 1,
+ fluxDypeExponent: 1,
+ fluxVAE: null,
+ t5EncoderModel: null,
+ clipEmbedModel: null,
+};
+
+let currentModel: { key: string; hash: string; name: string; base: string; type: string; variant?: string } | null;
+let currentKleinVae: { key: string; hash: string; name: string; base: string; type: string } | null;
+let currentKleinQwen3: { key: string; hash: string; name: string; base: string; type: string } | null;
+
+vi.mock('features/controlLayers/store/paramsSlice', () => ({
+ selectMainModelConfig: () => currentModel,
+ selectParamsSlice: () => mockParams,
+ selectKleinVaeModel: () => currentKleinVae,
+ selectKleinQwen3EncoderModel: () => currentKleinQwen3,
+}));
+
+vi.mock('features/controlLayers/store/refImagesSlice', () => ({
+ selectRefImagesSlice: () => ({ entities: [] }),
+}));
+
+vi.mock('features/controlLayers/store/selectors', () => ({
+ selectCanvasSlice: () => ({
+ bbox: { rect: { x: 0, y: 0, width: 1024, height: 1024 } },
+ controlLayers: { entities: [] },
+ regionalGuidance: { entities: [] },
+ }),
+ selectCanvasMetadata: () => ({}),
+}));
+
+vi.mock('features/controlLayers/store/types', () => ({
+ isFlux2ReferenceImageConfig: () => false,
+ isFluxKontextReferenceImageConfig: () => false,
+}));
+
+vi.mock('features/controlLayers/store/validators', () => ({
+ getGlobalReferenceImageWarnings: () => [],
+}));
+
+vi.mock('features/ui/store/uiSelectors', () => ({
+ selectActiveTab: () => 'generate',
+}));
+
+vi.mock('features/nodes/util/graph/graphBuilderUtils', () => ({
+ selectCanvasOutputFields: () => ({}),
+}));
+
+// Helper add* functions: each test cares only that the FLUX.2 orchestration
+// path produces the right metadata + denoise inputs. The actual node graph
+// produced by these helpers is irrelevant here.
+vi.mock('features/nodes/util/graph/generation/addTextToImage', () => ({
+ addTextToImage: ({ l2i }: { l2i: Invocation<'flux2_vae_decode'> }) => l2i,
+}));
+vi.mock('features/nodes/util/graph/generation/addImageToImage', () => ({
+ addImageToImage: vi.fn(),
+}));
+vi.mock('features/nodes/util/graph/generation/addInpaint', () => ({ addInpaint: vi.fn() }));
+vi.mock('features/nodes/util/graph/generation/addOutpaint', () => ({ addOutpaint: vi.fn() }));
+vi.mock('features/nodes/util/graph/generation/addNSFWChecker', () => ({ addNSFWChecker: vi.fn() }));
+vi.mock('features/nodes/util/graph/generation/addWatermarker', () => ({ addWatermarker: vi.fn() }));
+vi.mock('features/nodes/util/graph/generation/addRegions', () => ({ addRegions: vi.fn(() => []) }));
+vi.mock('features/nodes/util/graph/generation/addFLUXLoRAs', () => ({ addFLUXLoRAs: vi.fn() }));
+vi.mock('features/nodes/util/graph/generation/addFlux2KleinLoRAs', () => ({ addFlux2KleinLoRAs: vi.fn() }));
+vi.mock('features/nodes/util/graph/generation/addFLUXFill', () => ({ addFLUXFill: vi.fn() }));
+vi.mock('features/nodes/util/graph/generation/addFLUXRedux', () => ({
+ addFLUXReduxes: () => ({ addedFLUXReduxes: 0 }),
+}));
+vi.mock('features/nodes/util/graph/generation/addControlAdapters', () => ({
+ addControlNets: vi.fn(() => Promise.resolve({ addedControlNets: 0 })),
+ addControlLoRA: vi.fn(),
+}));
+vi.mock('features/nodes/util/graph/generation/addIPAdapters', () => ({
+ addIPAdapters: () => ({ addedIPAdapters: 0 }),
+}));
+
+// ---------------------------------------------------------------------------
+// Test harness
+// ---------------------------------------------------------------------------
+
+const makeFlux2Model = (variant: string) => ({
+ key: `flux2-${variant}`,
+ hash: 'hash',
+ name: `FLUX.2 Klein ${variant}`,
+ base: 'flux2',
+ type: 'main',
+ variant,
+});
+
+const buildArg = (): GraphBuilderArg =>
+ ({
+ generationMode: 'txt2img',
+ state: mockState,
+ manager: null,
+ }) as unknown as GraphBuilderArg;
+
+const findFlux2Denoise = (g: Graph): Invocation<'flux2_denoise'> | undefined => {
+ // The Graph object stores nodes on `_graph.nodes` keyed by id.
+ const nodes = (g as unknown as { _graph: { nodes: Record } })._graph.nodes;
+ return Object.values(nodes).find((n) => n.type === 'flux2_denoise') as Invocation<'flux2_denoise'> | undefined;
+};
+
+const getMetadata = (g: Graph): Record =>
+ (g as unknown as { getMetadataNode: () => Record }).getMetadataNode();
+
+beforeEach(() => {
+ currentModel = null;
+ currentKleinVae = null;
+ currentKleinQwen3 = null;
+});
+
+describe('buildFLUXGraph (FLUX.2 Klein)', () => {
+ describe('guidance gating', () => {
+ it('writes guidance into metadata and the denoise node for klein_9b_base', async () => {
+ currentModel = makeFlux2Model('klein_9b_base');
+
+ const { g } = await buildFLUXGraph(buildArg());
+
+ const metadata = getMetadata(g);
+ expect(metadata.guidance).toBe(mockParams.guidance);
+
+ const denoise = findFlux2Denoise(g);
+ expect(denoise).toBeDefined();
+ expect(denoise?.guidance).toBe(mockParams.guidance);
+ });
+
+ it.each(['klein_9b', 'klein_4b'])(
+ 'omits guidance from metadata and denoise for distilled variant %s',
+ async (variant) => {
+ currentModel = makeFlux2Model(variant);
+
+ const { g } = await buildFLUXGraph(buildArg());
+
+ const metadata = getMetadata(g);
+ expect(metadata.guidance).toBeUndefined();
+
+ const denoise = findFlux2Denoise(g);
+ expect(denoise).toBeDefined();
+ expect(denoise?.guidance).toBeUndefined();
+ }
+ );
+ });
+
+ describe('scheduler persistence', () => {
+ it('writes the FLUX scheduler into metadata and the denoise node for FLUX.2', async () => {
+ currentModel = makeFlux2Model('klein_9b_base');
+
+ const { g } = await buildFLUXGraph(buildArg());
+
+ expect(getMetadata(g).scheduler).toBe(mockParams.fluxScheduler);
+ expect(findFlux2Denoise(g)?.scheduler).toBe(mockParams.fluxScheduler);
+ });
+ });
+
+ describe('Klein VAE / Qwen3 metadata', () => {
+ it('persists separately selected Klein VAE and Qwen3 encoder into metadata', async () => {
+ currentModel = makeFlux2Model('klein_9b_base');
+ currentKleinVae = { key: 'vae-1', hash: 'h', name: 'Klein VAE', base: 'flux2', type: 'vae' };
+ currentKleinQwen3 = { key: 'q3-1', hash: 'h', name: 'Qwen3', base: 'flux2', type: 'qwen3_encoder' };
+
+ const { g } = await buildFLUXGraph(buildArg());
+
+ const metadata = getMetadata(g);
+ expect(metadata.vae).toEqual(currentKleinVae);
+ expect(metadata.qwen3_encoder).toEqual(currentKleinQwen3);
+ });
+
+ it('omits vae / qwen3_encoder when none are selected', async () => {
+ currentModel = makeFlux2Model('klein_9b_base');
+
+ const { g } = await buildFLUXGraph(buildArg());
+
+ const metadata = getMetadata(g);
+ expect(metadata.vae).toBeUndefined();
+ expect(metadata.qwen3_encoder).toBeUndefined();
+ });
+ });
+});
diff --git a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildFLUXGraph.ts b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildFLUXGraph.ts
index ba27e5dbf6e..d03924d4979 100644
--- a/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildFLUXGraph.ts
+++ b/invokeai/frontend/web/src/features/nodes/util/graph/generation/buildFLUXGraph.ts
@@ -160,6 +160,8 @@ export const buildFLUXGraph = async (arg: GraphBuilderArg): Promise = {
model: Graph.getModelMetadataField(model),
steps,
+ scheduler: fluxScheduler,
};
+ if (model.variant === 'klein_9b_base') {
+ flux2Metadata.guidance = guidance;
+ }
if (kleinVaeModel) {
flux2Metadata.vae = kleinVaeModel;
}
diff --git a/invokeai/frontend/web/src/features/settingsAccordions/components/GenerationSettingsAccordion/GenerationSettingsAccordion.tsx b/invokeai/frontend/web/src/features/settingsAccordions/components/GenerationSettingsAccordion/GenerationSettingsAccordion.tsx
index 2b21c823084..5b4b6ca2611 100644
--- a/invokeai/frontend/web/src/features/settingsAccordions/components/GenerationSettingsAccordion/GenerationSettingsAccordion.tsx
+++ b/invokeai/frontend/web/src/features/settingsAccordions/components/GenerationSettingsAccordion/GenerationSettingsAccordion.tsx
@@ -92,11 +92,14 @@ export const GenerationSettingsAccordion = memo(() => {
{!isFLUX && !isFlux2 && !isSD3 && !isCogView4 && !isZImage && !isQwenImage && !isAnima && (
)}
- {isFLUX && }
+ {(isFLUX || isFlux2) && }
{isZImage && }
{isAnima && }
- {(isFLUX || isFlux2) && modelConfig && !isFluxFillMainModelModelConfig(modelConfig) && }
+ {isFLUX && modelConfig && !isFluxFillMainModelModelConfig(modelConfig) && }
+ {isFlux2 && modelConfig && 'variant' in modelConfig && modelConfig.variant === 'klein_9b_base' && (
+
+ )}
{!isFLUX && !isFlux2 && }
{isZImage && }
{isQwenImage && }
diff --git a/invokeai/frontend/web/src/services/api/schema.ts b/invokeai/frontend/web/src/services/api/schema.ts
index 2e93e98ad56..5e79d10da09 100644
--- a/invokeai/frontend/web/src/services/api/schema.ts
+++ b/invokeai/frontend/web/src/services/api/schema.ts
@@ -9475,6 +9475,12 @@ export type components = {
* @default null
*/
negative_text_conditioning?: components["schemas"]["FluxConditioningField"] | null;
+ /**
+ * Guidance
+ * @description The guidance strength. Only used by undistilled models (Klein 9B Base). Ignored by distilled models (Klein 4B, Klein 9B).
+ * @default 4
+ */
+ guidance?: number;
/**
* CFG Scale
* @description Classifier-Free Guidance scale