Training a LoRA in ComfyUI
Video Lecture
| Section | Video Links |
|---|---|
| Train Lora | ![]() |
Description
LoRA is an abbreviation for Low Rank Adaptation. It is a method of guiding an image during generation.
To create your own LoRA, you will need some training images.
Download and extract these images into your ComfyUI/input folder.
You will also need a workflow. Copy & paste this JSON into your ComfyUI.
ComfUI v0.3.49
{
"id": "64f1bf8f-d1c6-405a-b920-430211113eb7",
"revision": 0,
"last_node_id": 16,
"last_link_id": 20,
"nodes": [
{
"id": 15,
"type": "TrainLoraNode",
"pos": [548.8584594726562, 113.40888214111328],
"size": [301.08331298828125, 430],
"flags": {},
"order": 5,
"mode": 0,
"inputs": [
{
"localized_name": "model",
"name": "model",
"type": "MODEL",
"link": 18
},
{
"localized_name": "latents",
"name": "latents",
"type": "LATENT",
"link": 19
},
{
"localized_name": "positive",
"name": "positive",
"type": "CONDITIONING",
"link": 20
},
{
"localized_name": "batch_size",
"name": "batch_size",
"type": "INT",
"widget": { "name": "batch_size" },
"link": null
},
{
"localized_name": "grad_accumulation_steps",
"name": "grad_accumulation_steps",
"type": "INT",
"widget": { "name": "grad_accumulation_steps" },
"link": null
},
{
"localized_name": "steps",
"name": "steps",
"type": "INT",
"widget": { "name": "steps" },
"link": null
},
{
"localized_name": "learning_rate",
"name": "learning_rate",
"type": "FLOAT",
"widget": { "name": "learning_rate" },
"link": null
},
{
"localized_name": "rank",
"name": "rank",
"type": "INT",
"widget": { "name": "rank" },
"link": null
},
{
"localized_name": "optimizer",
"name": "optimizer",
"type": "COMBO",
"widget": { "name": "optimizer" },
"link": null
},
{
"localized_name": "loss_function",
"name": "loss_function",
"type": "COMBO",
"widget": { "name": "loss_function" },
"link": null
},
{
"localized_name": "seed",
"name": "seed",
"type": "INT",
"widget": { "name": "seed" },
"link": null
},
{
"localized_name": "training_dtype",
"name": "training_dtype",
"type": "COMBO",
"widget": { "name": "training_dtype" },
"link": null
},
{
"localized_name": "lora_dtype",
"name": "lora_dtype",
"type": "COMBO",
"widget": { "name": "lora_dtype" },
"link": null
},
{
"localized_name": "algorithm",
"name": "algorithm",
"type": "COMBO",
"widget": { "name": "algorithm" },
"link": null
},
{
"localized_name": "gradient_checkpointing",
"name": "gradient_checkpointing",
"type": "BOOLEAN",
"widget": { "name": "gradient_checkpointing" },
"link": null
},
{
"localized_name": "existing_lora",
"name": "existing_lora",
"type": "COMBO",
"widget": { "name": "existing_lora" },
"link": null
}
],
"outputs": [
{
"localized_name": "model_with_lora",
"name": "model_with_lora",
"type": "MODEL",
"links": null
},
{
"localized_name": "lora",
"name": "lora",
"type": "LORA_MODEL",
"links": [16]
},
{
"localized_name": "loss",
"name": "loss",
"type": "LOSS_MAP",
"links": [11]
},
{
"localized_name": "steps",
"name": "steps",
"type": "INT",
"links": [17]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.49",
"Node name for S&R": "TrainLoraNode"
},
"widgets_values": [
1,
1,
640,
0.0005,
16,
"AdamW",
"MSE",
794161203312319,
"randomize",
"bf16",
"bf16",
"LoRA",
true,
"[None]"
]
},
{
"id": 10,
"type": "VAEEncode",
"pos": [215.4689178466797, 110.34325408935547],
"size": [140, 46],
"flags": {},
"order": 3,
"mode": 0,
"inputs": [
{
"localized_name": "pixels",
"name": "pixels",
"type": "IMAGE",
"link": 12
},
{ "localized_name": "vae", "name": "vae", "type": "VAE", "link": 13 }
],
"outputs": [
{
"localized_name": "LATENT",
"name": "LATENT",
"type": "LATENT",
"links": [19]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.49",
"Node name for S&R": "VAEEncode"
},
"widgets_values": []
},
{
"id": 11,
"type": "CheckpointLoaderSimple",
"pos": [-298.9602355957031, 101.03082275390625],
"size": [270, 98],
"flags": {},
"order": 0,
"mode": 0,
"inputs": [
{
"localized_name": "ckpt_name",
"name": "ckpt_name",
"type": "COMBO",
"widget": { "name": "ckpt_name" },
"link": null
}
],
"outputs": [
{
"localized_name": "MODEL",
"name": "MODEL",
"type": "MODEL",
"links": [18]
},
{
"localized_name": "CLIP",
"name": "CLIP",
"type": "CLIP",
"links": [15]
},
{
"localized_name": "VAE",
"name": "VAE",
"type": "VAE",
"links": [13]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.49",
"Node name for S&R": "CheckpointLoaderSimple"
},
"widgets_values": ["sd_xl_base_1.0.safetensors"]
},
{
"id": 14,
"type": "SaveLoRANode",
"pos": [968.5587768554688, 5.361778736114502],
"size": [285.263427734375, 232.21673583984375],
"flags": {},
"order": 7,
"mode": 0,
"inputs": [
{
"localized_name": "lora",
"name": "lora",
"type": "LORA_MODEL",
"link": 16
},
{
"localized_name": "steps",
"name": "steps",
"shape": 7,
"type": "INT",
"link": 17
},
{
"localized_name": "prefix",
"name": "prefix",
"type": "STRING",
"widget": { "name": "prefix" },
"link": null
}
],
"outputs": [],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.49",
"Node name for S&R": "SaveLoRANode"
},
"widgets_values": ["loras/SBCODE-LORA-TEST-16"]
},
{
"id": 9,
"type": "LossGraphNode",
"pos": [971.4131469726562, 313.1512756347656],
"size": [278.72186279296875, 272.18048095703125],
"flags": {},
"order": 6,
"mode": 0,
"inputs": [
{
"localized_name": "loss",
"name": "loss",
"type": "LOSS_MAP",
"link": 11
},
{
"localized_name": "filename_prefix",
"name": "filename_prefix",
"type": "STRING",
"widget": { "name": "filename_prefix" },
"link": null
}
],
"outputs": [],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.49",
"Node name for S&R": "LossGraphNode"
},
"widgets_values": ["loss_graph"]
},
{
"id": 12,
"type": "PreviewImage",
"pos": [-154.1158905029297, 588.8074340820312],
"size": [1085.2469482421875, 328.8590087890625],
"flags": {},
"order": 4,
"mode": 0,
"inputs": [
{
"localized_name": "images",
"name": "images",
"type": "IMAGE",
"link": 14
}
],
"outputs": [],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.49",
"Node name for S&R": "PreviewImage"
},
"widgets_values": []
},
{
"id": 13,
"type": "CLIPTextEncode",
"pos": [175.48941040039062, 241.1298370361328],
"size": [211.06666564941406, 88],
"flags": {},
"order": 2,
"mode": 0,
"inputs": [
{
"localized_name": "clip",
"name": "clip",
"type": "CLIP",
"link": 15
},
{
"localized_name": "text",
"name": "text",
"type": "STRING",
"widget": { "name": "text" },
"link": null
}
],
"outputs": [
{
"localized_name": "CONDITIONING",
"name": "CONDITIONING",
"type": "CONDITIONING",
"links": [20]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.49",
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": ["SBCODE-LORA-TEST"]
},
{
"id": 16,
"type": "LoadImageSetFromFolderNode",
"pos": [-275.1855163574219, 338.2147216796875],
"size": [302.29583740234375, 82],
"flags": {},
"order": 1,
"mode": 0,
"inputs": [
{
"localized_name": "folder",
"name": "folder",
"type": "COMBO",
"widget": { "name": "folder" },
"link": null
},
{
"localized_name": "resize_method",
"name": "resize_method",
"shape": 7,
"type": "COMBO",
"widget": { "name": "resize_method" },
"link": null
}
],
"outputs": [
{
"localized_name": "IMAGE",
"name": "IMAGE",
"type": "IMAGE",
"links": [12, 14]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.49",
"Node name for S&R": "LoadImageSetFromFolderNode"
},
"widgets_values": ["SBCODE-LORA-TEST", "None"]
}
],
"links": [
[11, 15, 2, 9, 0, "LOSS_MAP"],
[12, 16, 0, 10, 0, "IMAGE"],
[13, 11, 2, 10, 1, "VAE"],
[14, 16, 0, 12, 0, "IMAGE"],
[15, 11, 1, 13, 0, "CLIP"],
[16, 15, 1, 14, 0, "LORA_MODEL"],
[17, 15, 3, 14, 1, "INT"],
[18, 11, 0, 15, 0, "MODEL"],
[19, 10, 0, 15, 1, "LATENT"],
[20, 13, 0, 15, 2, "CONDITIONING"]
],
"groups": [],
"config": {},
"extra": {
"ds": {
"scale": 0.7337764350453186,
"offset": [577.8390328956199, 24.932266278186997]
}
},
"version": 0.4
}
Decide what rank and how many steps you want to train on. See the video for ideas. Higher rank and steps produce better results, but is slower to train.
Press the run button to start the generation.
If the training was successful, then you will find the generated LoRA file in your ComfyUI/output/loras folder.
Using the LoRA
To use a LoRA, you will need to load the model into your workflow and use the trigger word in the prompt.
Example LoRA trigger word : SBCODE-LORA-TEST
To see an example using this trigger word, then drag this image into ComfyUI.

Examples from the Video


































