Nekochu commited on
Commit
57df0f6
·
1 Parent(s): 6d9fb39

fix: save PEFT adapter (not full model), remove random suffix from LoRA names, fix epoch cap to 1000

Browse files
Files changed (2) hide show
  1. app.py +1 -5
  2. train_engine.py +4 -1
app.py CHANGED
@@ -573,13 +573,9 @@ def gradio_main():
573
  return
574
 
575
  lora_name = (lora_name or "").strip() or "my-lora"
576
- # Sanitize: alphanumeric, dash, underscore only
577
  lora_name = "".join(c if c.isalnum() or c in "-_" else "-" for c in lora_name)
578
- # Append random suffix to prevent naming collisions between users
579
- suffix = "".join(random.choices(string.ascii_lowercase + string.digits, k=4))
580
- lora_name = f"{lora_name}-{suffix}"
581
 
582
- epochs = max(1, min(int(epochs), 10))
583
  lr = float(lr)
584
  rank = max(1, min(int(rank), 64))
585
 
 
573
  return
574
 
575
  lora_name = (lora_name or "").strip() or "my-lora"
 
576
  lora_name = "".join(c if c.isalnum() or c in "-_" else "-" for c in lora_name)
 
 
 
577
 
578
+ epochs = max(1, min(int(epochs), 1000))
579
  lr = float(lr)
580
  rank = max(1, min(int(rank), 64))
581
 
train_engine.py CHANGED
@@ -442,7 +442,10 @@ def inject_lora(model, lora_cfg: LoRAConfig) -> Tuple[Any, Dict[str, Any]]:
442
 
443
  def save_lora_adapter(model, output_dir: str) -> None:
444
  os.makedirs(output_dir, exist_ok=True)
445
- decoder = _unwrap_decoder(model)
 
 
 
446
 
447
  if hasattr(decoder, "save_pretrained"):
448
  decoder.save_pretrained(output_dir)
 
442
 
443
  def save_lora_adapter(model, output_dir: str) -> None:
444
  os.makedirs(output_dir, exist_ok=True)
445
+ # Use the PEFT-wrapped decoder (model.decoder), NOT the unwrapped base model.
446
+ # _unwrap_decoder strips the PEFT wrapper, causing save_pretrained to save
447
+ # the full model instead of just the LoRA adapter weights.
448
+ decoder = model.decoder if hasattr(model, "decoder") else model
449
 
450
  if hasattr(decoder, "save_pretrained"):
451
  decoder.save_pretrained(output_dir)