Skip to content

Commit 92be83b

Browse files
committed
feat(api-nodes): add Luma UNI-1 model
Signed-off-by: bigcat88 <bigcat88@icloud.com>
1 parent fce0398 commit 92be83b

2 files changed

Lines changed: 280 additions & 14 deletions

File tree

comfy_api_nodes/apis/luma.py

Lines changed: 32 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,12 @@
11
from __future__ import annotations
22

3-
4-
import torch
5-
63
from enum import Enum
74
from typing import Optional, Union
85

6+
import torch
97
from pydantic import BaseModel, Field, confloat
108

119

12-
1310
class LumaIO:
1411
LUMA_REF = "LUMA_REF"
1512
LUMA_CONCEPTS = "LUMA_CONCEPTS"
@@ -183,13 +180,13 @@ class LumaAssets(BaseModel):
183180

184181

185182
class LumaImageRef(BaseModel):
186-
'''Used for image gen'''
183+
"""Used for image gen"""
187184
url: str = Field(..., description='The URL of the image reference')
188185
weight: confloat(ge=0.0, le=1.0) = Field(..., description='The weight of the image reference')
189186

190187

191188
class LumaImageReference(BaseModel):
192-
'''Used for video gen'''
189+
"""Used for video gen"""
193190
type: Optional[str] = Field('image', description='Input type, defaults to image')
194191
url: str = Field(..., description='The URL of the image')
195192

@@ -251,3 +248,32 @@ class LumaGeneration(BaseModel):
251248
assets: Optional[LumaAssets] = Field(None, description='The assets of the generation')
252249
model: str = Field(..., description='The model used for the generation')
253250
request: Union[LumaGenerationRequest, LumaImageGenerationRequest] = Field(..., description="The request used for the generation")
251+
252+
253+
class Luma2ImageRef(BaseModel):
254+
url: str | None = None
255+
data: str | None = None
256+
media_type: str | None = None
257+
258+
259+
class Luma2GenerationRequest(BaseModel):
260+
prompt: str = Field(..., min_length=1, max_length=6000)
261+
model: str | None = None
262+
type: str | None = None
263+
aspect_ratio: str | None = None
264+
style: str | None = None
265+
output_format: str | None = None
266+
web_search: bool | None = None
267+
image_ref: list[Luma2ImageRef] | None = None
268+
source: Luma2ImageRef | None = None
269+
270+
271+
class Luma2Generation(BaseModel):
272+
id: str | None = None
273+
type: str | None = None
274+
state: str | None = None
275+
model: str | None = None
276+
created_at: str | None = None
277+
output: list[LumaImageReference] | None = None
278+
failure_reason: str | None = None
279+
failure_code: str | None = None

comfy_api_nodes/nodes_luma.py

Lines changed: 248 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,11 @@
1-
from typing import Optional
2-
31
import torch
42
from typing_extensions import override
53

6-
from comfy_api.latest import IO, ComfyExtension
4+
from comfy_api.latest import IO, ComfyExtension, Input
75
from comfy_api_nodes.apis.luma import (
6+
Luma2Generation,
7+
Luma2GenerationRequest,
8+
Luma2ImageRef,
89
LumaAspectRatio,
910
LumaCharacterRef,
1011
LumaConceptChain,
@@ -30,6 +31,7 @@
3031
download_url_to_video_output,
3132
poll_op,
3233
sync_op,
34+
upload_image_to_comfyapi,
3335
upload_images_to_comfyapi,
3436
validate_string,
3537
)
@@ -212,9 +214,9 @@ async def execute(
212214
aspect_ratio: str,
213215
seed,
214216
style_image_weight: float,
215-
image_luma_ref: Optional[LumaReferenceChain] = None,
216-
style_image: Optional[torch.Tensor] = None,
217-
character_image: Optional[torch.Tensor] = None,
217+
image_luma_ref: LumaReferenceChain | None = None,
218+
style_image: torch.Tensor | None = None,
219+
character_image: torch.Tensor | None = None,
218220
) -> IO.NodeOutput:
219221
validate_string(prompt, strip_whitespace=True, min_length=3)
220222
# handle image_luma_ref
@@ -434,7 +436,7 @@ async def execute(
434436
duration: str,
435437
loop: bool,
436438
seed,
437-
luma_concepts: Optional[LumaConceptChain] = None,
439+
luma_concepts: LumaConceptChain | None = None,
438440
) -> IO.NodeOutput:
439441
validate_string(prompt, strip_whitespace=False, min_length=3)
440442
duration = duration if model != LumaVideoModel.ray_1_6 else None
@@ -533,7 +535,6 @@ def define_schema(cls) -> IO.Schema:
533535
],
534536
is_api_node=True,
535537
price_badge=PRICE_BADGE_VIDEO,
536-
537538
)
538539

539540
@classmethod
@@ -644,6 +645,243 @@ async def _convert_to_keyframes(
644645
)
645646

646647

648+
def _luma2_uni1_common_inputs(max_image_refs: int) -> list:
649+
return [
650+
IO.Combo.Input(
651+
"style",
652+
options=["auto", "manga"],
653+
default="auto",
654+
tooltip="Style preset. 'auto' picks based on the prompt; "
655+
"'manga' applies a manga/anime aesthetic and requires a portrait "
656+
"aspect ratio (2:3, 9:16, 1:2, 1:3).",
657+
),
658+
IO.Boolean.Input(
659+
"web_search",
660+
default=False,
661+
tooltip="Search the web for visual references before generating.",
662+
),
663+
IO.Autogrow.Input(
664+
"image_ref",
665+
template=IO.Autogrow.TemplateNames(
666+
IO.Image.Input("image"),
667+
names=[f"image_{i}" for i in range(1, max_image_refs + 1)],
668+
min=0,
669+
),
670+
optional=True,
671+
tooltip=f"Up to {max_image_refs} reference images for style/content guidance.",
672+
),
673+
]
674+
675+
676+
async def _luma2_upload_image_refs(
677+
cls: type[IO.ComfyNode],
678+
refs: dict | None,
679+
max_count: int,
680+
) -> list[Luma2ImageRef] | None:
681+
if not refs:
682+
return None
683+
out: list[Luma2ImageRef] = []
684+
for key in refs:
685+
url = await upload_image_to_comfyapi(cls, refs[key])
686+
out.append(Luma2ImageRef(url=url))
687+
if len(out) > max_count:
688+
raise ValueError(f"Maximum {max_count} reference images are allowed.")
689+
return out or None
690+
691+
692+
async def _luma2_submit_and_poll(
693+
cls: type[IO.ComfyNode],
694+
request: Luma2GenerationRequest,
695+
) -> Input.Image:
696+
initial = await sync_op(
697+
cls,
698+
ApiEndpoint(path="/proxy/luma_2/generations", method="POST"),
699+
response_model=Luma2Generation,
700+
data=request,
701+
)
702+
if not initial.id:
703+
raise RuntimeError("Luma 2 API did not return a generation id.")
704+
final = await poll_op(
705+
cls,
706+
ApiEndpoint(path=f"/proxy/luma_2/generations/{initial.id}", method="GET"),
707+
response_model=Luma2Generation,
708+
status_extractor=lambda r: r.state,
709+
progress_extractor=lambda r: None,
710+
)
711+
if not final.output:
712+
msg = final.failure_reason or "no output returned"
713+
raise RuntimeError(f"Luma 2 generation failed: {msg}")
714+
url = final.output[0].url
715+
if not url:
716+
raise RuntimeError("Luma 2 generation completed without an output URL.")
717+
return await download_url_to_image_tensor(url)
718+
719+
720+
class LumaImageNode(IO.ComfyNode):
721+
722+
@classmethod
723+
def define_schema(cls) -> IO.Schema:
724+
return IO.Schema(
725+
node_id="LumaImageNode2",
726+
display_name="Luma UNI-1 Image",
727+
category="api node/image/Luma",
728+
description="Generate images from text using the Luma UNI-1 model.",
729+
inputs=[
730+
IO.String.Input(
731+
"prompt",
732+
multiline=True,
733+
default="",
734+
tooltip="Text description of the desired image. 1–6000 characters.",
735+
),
736+
IO.DynamicCombo.Input(
737+
"model",
738+
options=[
739+
IO.DynamicCombo.Option(
740+
"uni-1",
741+
[
742+
IO.Combo.Input(
743+
"aspect_ratio",
744+
options=[
745+
"auto",
746+
"3:1",
747+
"2:1",
748+
"16:9",
749+
"3:2",
750+
"1:1",
751+
"2:3",
752+
"9:16",
753+
"1:2",
754+
"1:3",
755+
],
756+
default="auto",
757+
tooltip="Output image aspect ratio. 'auto' lets "
758+
"the model pick based on the prompt.",
759+
),
760+
*_luma2_uni1_common_inputs(max_image_refs=9),
761+
],
762+
),
763+
],
764+
tooltip="Model to use for generation.",
765+
),
766+
IO.Int.Input(
767+
"seed",
768+
default=0,
769+
min=0,
770+
max=2147483647,
771+
control_after_generate=True,
772+
tooltip="Seed controls whether the node should re-run; "
773+
"results are non-deterministic regardless of seed.",
774+
),
775+
],
776+
outputs=[IO.Image.Output()],
777+
hidden=[
778+
IO.Hidden.auth_token_comfy_org,
779+
IO.Hidden.api_key_comfy_org,
780+
IO.Hidden.unique_id,
781+
],
782+
is_api_node=True,
783+
)
784+
785+
@classmethod
786+
async def execute(
787+
cls,
788+
prompt: str,
789+
model: dict,
790+
seed: int,
791+
) -> IO.NodeOutput:
792+
validate_string(prompt, min_length=1, max_length=6000)
793+
aspect_ratio = model["aspect_ratio"]
794+
style = model["style"]
795+
allowed_manga_ratios = {"2:3", "9:16", "1:2", "1:3"}
796+
if style == "manga" and aspect_ratio != "auto" and aspect_ratio not in allowed_manga_ratios:
797+
raise ValueError(
798+
f"'manga' style requires a portrait aspect ratio "
799+
f"({', '.join(sorted(allowed_manga_ratios))}) or 'auto'; got '{aspect_ratio}'."
800+
)
801+
request = Luma2GenerationRequest(
802+
prompt=prompt,
803+
model=model["model"],
804+
type="image",
805+
aspect_ratio=aspect_ratio if aspect_ratio != "auto" else None,
806+
style=style if style != "auto" else None,
807+
output_format="png",
808+
web_search=model["web_search"],
809+
image_ref=await _luma2_upload_image_refs(cls, model.get("image_ref"), max_count=9),
810+
)
811+
return IO.NodeOutput(await _luma2_submit_and_poll(cls, request))
812+
813+
814+
class LumaImageEditNode(IO.ComfyNode):
815+
816+
@classmethod
817+
def define_schema(cls) -> IO.Schema:
818+
return IO.Schema(
819+
node_id="LumaImageEditNode2",
820+
display_name="Luma UNI-1 Image Edit",
821+
category="api node/image/Luma",
822+
description="Edit an existing image with a text prompt using the Luma UNI-1 model.",
823+
inputs=[
824+
IO.Image.Input(
825+
"source",
826+
tooltip="Source image to edit.",
827+
),
828+
IO.String.Input(
829+
"prompt",
830+
multiline=True,
831+
default="",
832+
tooltip="Description of the desired edit. 1–6000 characters.",
833+
),
834+
IO.DynamicCombo.Input(
835+
"model",
836+
options=[
837+
IO.DynamicCombo.Option(
838+
"uni-1",
839+
_luma2_uni1_common_inputs(max_image_refs=8),
840+
),
841+
],
842+
tooltip="Model to use for editing.",
843+
),
844+
IO.Int.Input(
845+
"seed",
846+
default=0,
847+
min=0,
848+
max=2147483647,
849+
control_after_generate=True,
850+
tooltip="Seed controls whether the node should re-run; "
851+
"results are non-deterministic regardless of seed.",
852+
),
853+
],
854+
outputs=[IO.Image.Output()],
855+
hidden=[
856+
IO.Hidden.auth_token_comfy_org,
857+
IO.Hidden.api_key_comfy_org,
858+
IO.Hidden.unique_id,
859+
],
860+
is_api_node=True,
861+
)
862+
863+
@classmethod
864+
async def execute(
865+
cls,
866+
source: Input.Image,
867+
prompt: str,
868+
model: dict,
869+
seed: int,
870+
) -> IO.NodeOutput:
871+
validate_string(prompt, min_length=1, max_length=6000)
872+
request = Luma2GenerationRequest(
873+
prompt=prompt,
874+
model=model["model"],
875+
type="image_edit",
876+
source=Luma2ImageRef(url=await upload_image_to_comfyapi(cls, source)),
877+
style=model["style"] if model["style"] != "auto" else None,
878+
output_format="png",
879+
web_search=model["web_search"],
880+
image_ref=await _luma2_upload_image_refs(cls, model.get("image_ref"), max_count=8),
881+
)
882+
return IO.NodeOutput(await _luma2_submit_and_poll(cls, request))
883+
884+
647885
class LumaExtension(ComfyExtension):
648886
@override
649887
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
@@ -654,6 +892,8 @@ async def get_node_list(self) -> list[type[IO.ComfyNode]]:
654892
LumaImageToVideoGenerationNode,
655893
LumaReferenceNode,
656894
LumaConceptsNode,
895+
LumaImageNode,
896+
LumaImageEditNode,
657897
]
658898

659899

0 commit comments

Comments
 (0)