Training a LoRA in ComfyUI
Warning
This workflow doesn't work for SDXL based models with ComfyUI versions between 0.3.68 and 0.6.0
Update your portable ComfyUI using the update_comfyui_stable.bat in your .\ComfyUI_windows_portable\update\ folder.
Video Lecture
| Section | Video Links |
|---|---|
| Train Lora | ![]() |
Description
LoRA is an abbreviation for Low Rank Adaptation. It is a method of guiding an image during generation.
To create your own LoRA, you will need some training images.
Download and extract these images into your ComfyUI/input folder.
You will also need a workflow. Copy & paste this JSON into your ComfyUI.
ComfUI v0.6.0
{
"id": "58ce460f-d3b5-454a-9d32-6a4bc81820d6",
"revision": 0,
"last_node_id": 16,
"last_link_id": 29,
"nodes": [
{
"id": 2,
"type": "CheckpointLoaderSimple",
"pos": [-77.89600085533732, 74.82955645066255],
"size": [270, 98],
"flags": {},
"order": 0,
"mode": 0,
"inputs": [
{
"localized_name": "ckpt_name",
"name": "ckpt_name",
"type": "COMBO",
"widget": { "name": "ckpt_name" },
"link": null
}
],
"outputs": [
{
"localized_name": "MODEL",
"name": "MODEL",
"type": "MODEL",
"links": [16]
},
{
"localized_name": "CLIP",
"name": "CLIP",
"type": "CLIP",
"links": [6]
},
{ "localized_name": "VAE", "name": "VAE", "type": "VAE", "links": [9] }
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "CheckpointLoaderSimple"
},
"widgets_values": ["sd_xl_base_1.0.safetensors"]
},
{
"id": 7,
"type": "VAEEncode",
"pos": [284.53127310505045, 128.3471545186551],
"size": [140, 46],
"flags": {},
"order": 3,
"mode": 0,
"inputs": [
{
"localized_name": "pixels",
"name": "pixels",
"type": "IMAGE",
"link": 26
},
{ "localized_name": "vae", "name": "vae", "type": "VAE", "link": 9 }
],
"outputs": [
{
"localized_name": "LATENT",
"name": "LATENT",
"type": "LATENT",
"links": [17]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "VAEEncode"
},
"widgets_values": []
},
{
"id": 4,
"type": "CLIPTextEncode",
"pos": [253.2194438349094, 247.68916664261795],
"size": [371.92805908203127, 88],
"flags": {},
"order": 2,
"mode": 0,
"inputs": [
{
"localized_name": "clip",
"name": "clip",
"type": "CLIP",
"link": 6
},
{
"localized_name": "text",
"name": "text",
"type": "STRING",
"widget": { "name": "text" },
"link": null
}
],
"outputs": [
{
"localized_name": "CONDITIONING",
"name": "CONDITIONING",
"type": "CONDITIONING",
"links": [18]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": ["SBCODE-LORA-TEST"]
},
{
"id": 9,
"type": "PreviewImage",
"pos": [202.33304052932849, 398.8137349625583],
"size": [562.8320621276857, 258],
"flags": {},
"order": 4,
"mode": 0,
"inputs": [
{
"localized_name": "images",
"name": "images",
"type": "IMAGE",
"link": 27
}
],
"outputs": [],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "PreviewImage"
},
"widgets_values": []
},
{
"id": 10,
"type": "LossGraphNode",
"pos": [1177.5620997865326, 204.39181912515647],
"size": [270, 270],
"flags": {},
"order": 6,
"mode": 0,
"inputs": [
{
"localized_name": "loss",
"name": "loss",
"type": "LOSS_MAP",
"link": 21
},
{
"localized_name": "filename_prefix",
"name": "filename_prefix",
"type": "STRING",
"widget": { "name": "filename_prefix" },
"link": null
}
],
"outputs": [],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.76",
"Node name for S&R": "LossGraphNode"
},
"widgets_values": ["loss_graph"]
},
{
"id": 15,
"type": "LoadImageDataSetFromFolder",
"pos": [-150.48169446897805, 273.84137321366734],
"size": [313.1083282470703, 58],
"flags": {},
"order": 1,
"mode": 0,
"inputs": [
{
"localized_name": "folder",
"name": "folder",
"type": "COMBO",
"widget": { "name": "folder" },
"link": null
}
],
"outputs": [
{
"localized_name": "images",
"name": "images",
"shape": 6,
"type": "IMAGE",
"links": [26, 27]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.5.1",
"Node name for S&R": "LoadImageDataSetFromFolder"
},
"widgets_values": ["SBCODE-LORA-TEST"]
},
{
"id": 16,
"type": "SaveLoRA",
"pos": [1182.897281116961, -7.243057282548512],
"size": [270, 82],
"flags": {},
"order": 7,
"mode": 0,
"inputs": [
{
"localized_name": "lora",
"name": "lora",
"type": "LORA_MODEL",
"link": 28
},
{
"localized_name": "prefix",
"name": "prefix",
"type": "STRING",
"widget": { "name": "prefix" },
"link": null
},
{
"localized_name": "steps",
"name": "steps",
"shape": 7,
"type": "INT",
"widget": { "name": "steps" },
"link": 29
}
],
"outputs": [],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.5.1",
"Node name for S&R": "SaveLoRA"
},
"widgets_values": ["loras/ComfyUI_trained_lora", 0]
},
{
"id": 12,
"type": "TrainLoraNode",
"pos": [808.3840809293013, 89.02255541469303],
"size": [301.0833282470703, 454],
"flags": {},
"order": 5,
"mode": 0,
"inputs": [
{
"localized_name": "model",
"name": "model",
"type": "MODEL",
"link": 16
},
{
"localized_name": "latents",
"name": "latents",
"type": "LATENT",
"link": 17
},
{
"localized_name": "positive",
"name": "positive",
"type": "CONDITIONING",
"link": 18
},
{
"localized_name": "batch_size",
"name": "batch_size",
"type": "INT",
"widget": { "name": "batch_size" },
"link": null
},
{
"localized_name": "grad_accumulation_steps",
"name": "grad_accumulation_steps",
"type": "INT",
"widget": { "name": "grad_accumulation_steps" },
"link": null
},
{
"localized_name": "steps",
"name": "steps",
"type": "INT",
"widget": { "name": "steps" },
"link": null
},
{
"localized_name": "learning_rate",
"name": "learning_rate",
"type": "FLOAT",
"widget": { "name": "learning_rate" },
"link": null
},
{
"localized_name": "rank",
"name": "rank",
"type": "INT",
"widget": { "name": "rank" },
"link": null
},
{
"localized_name": "optimizer",
"name": "optimizer",
"type": "COMBO",
"widget": { "name": "optimizer" },
"link": null
},
{
"localized_name": "loss_function",
"name": "loss_function",
"type": "COMBO",
"widget": { "name": "loss_function" },
"link": null
},
{
"localized_name": "seed",
"name": "seed",
"type": "INT",
"widget": { "name": "seed" },
"link": null
},
{
"localized_name": "training_dtype",
"name": "training_dtype",
"type": "COMBO",
"widget": { "name": "training_dtype" },
"link": null
},
{
"localized_name": "lora_dtype",
"name": "lora_dtype",
"type": "COMBO",
"widget": { "name": "lora_dtype" },
"link": null
},
{
"localized_name": "algorithm",
"name": "algorithm",
"type": "COMBO",
"widget": { "name": "algorithm" },
"link": null
},
{
"localized_name": "gradient_checkpointing",
"name": "gradient_checkpointing",
"type": "BOOLEAN",
"widget": { "name": "gradient_checkpointing" },
"link": null
},
{
"localized_name": "existing_lora",
"name": "existing_lora",
"type": "COMBO",
"widget": { "name": "existing_lora" },
"link": null
},
{
"localized_name": "bucket_mode",
"name": "bucket_mode",
"type": "BOOLEAN",
"widget": { "name": "bucket_mode" },
"link": null
}
],
"outputs": [
{
"localized_name": "model",
"name": "model",
"type": "MODEL",
"links": null
},
{
"localized_name": "lora",
"name": "lora",
"type": "LORA_MODEL",
"links": [28]
},
{
"localized_name": "loss_map",
"name": "loss_map",
"type": "LOSS_MAP",
"links": [21]
},
{
"localized_name": "steps",
"name": "steps",
"type": "INT",
"links": [29]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.4.0",
"Node name for S&R": "TrainLoraNode"
},
"widgets_values": [
1,
1,
1000,
0.0005,
16,
"AdamW",
"MSE",
59646795917277,
"randomize",
"bf16",
"bf16",
"LoRA",
true,
"[None]",
false
]
}
],
"links": [
[6, 2, 1, 4, 0, "CLIP"],
[9, 2, 2, 7, 1, "VAE"],
[16, 2, 0, 12, 0, "MODEL"],
[17, 7, 0, 12, 1, "LATENT"],
[18, 4, 0, 12, 2, "CONDITIONING"],
[21, 12, 2, 10, 0, "LOSS_MAP"],
[26, 15, 0, 7, 0, "IMAGE"],
[27, 15, 0, 9, 0, "IMAGE"],
[28, 12, 1, 16, 0, "LORA_MODEL"],
[29, 12, 3, 16, 2, "INT"]
],
"groups": [],
"config": {},
"extra": {
"ds": {
"scale": 0.8264462809917354,
"offset": [177.2748562881364, 110.96146021791975]
},
"workflowRendererVersion": "LG"
},
"version": 0.4
}
Decide what rank and how many steps you want to train on. See the video for ideas. Higher rank and steps produce better results, but is slower to train.
Press the run button to start the generation.
If the training was successful, then you will find the generated LoRA file in your ComfyUI/output/loras folder.
Using the LoRA
To use a LoRA, you will need to load the model into your workflow and use the trigger word in the prompt.
Example LoRA trigger word : SBCODE-LORA-TEST
To see an example using this trigger word, then drag this image into ComfyUI.

Examples from the Video



Troubleshooting
Error : Allocation on device This error means you ran out of memory on your GPU.
You may need to reset the gradient_checkpointing flag. Select it to false. Try to run the workflow again. You will likely get an error. Then Select it to back to true and this time it should work.
































