File size: 6,844 Bytes
72a5beb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 |
#!/usr/bin/env python
"""
DeepSeek-V3-Base 671B Β· QLoRA NF4 Β· ZeRO-3 (CPU off-load)
Loads **locally** from /workspace/DeepSeek-V3-Base and force-casts F8 β FP16
before 4-bit quantisation, so bitsandbytes is happy.
"""
# ββ 1 Β· ENV flags βββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
import os, gc, time, logging
from pathlib import Path
os.environ["TRANSFORMERS_NO_FLASH_ATT N"] = "1" # disable FlashAttention
os.environ["DEEPSEEK_MOE_EXPERTS"] = "2" # fewer experts/token
os.environ["HF_HUB_OFFLINE"] = "1" # never hit the Hub
# ββ 2 Β· Imports βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
import torch
from accelerate import Accelerator
from datasets import load_dataset, load_from_disk, DatasetDict
from transformers import (
AutoTokenizer, AutoConfig, AutoModelForCausalLM, BitsAndBytesConfig
)
from peft import LoraConfig, get_peft_model
import deepspeed.utils.groups as ds_groups
from tqdm.auto import tqdm
# ββ 2.1 Β· DeepSpeed EP-group helper (works on β€0.14 and β₯0.15) ββββββββββββ
def ensure_ep_groups(ep=8):
if hasattr(ds_groups, "is_initialized"):
if not ds_groups.is_initialized():
ds_groups.initialize(ep_size=ep)
elif hasattr(ds_groups, "initialized"):
if not ds_groups.initialized():
ds_groups.initialize(ep_size=ep)
ensure_ep_groups(8)
# ββ 3 Β· Logging & paths βββββββββββββββββββββββββββββββββββββββββββββββββββ
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
log = logging.getLogger("train_qlora")
MODEL_DIR = Path("/workspace/DeepSeek-V3-Base") # <β change if needed
DATA_JSONL = "/workspace/data/splits/train.jsonl"
CACHE_DIR = "/workspace/data/processed_10k_nf4"
if not MODEL_DIR.exists():
raise FileNotFoundError(f"β Local model dir {MODEL_DIR} not found.")
# ββ 4 Β· Hyper-params (edit freely) ββββββββββββββββββββββββββββββββββββββββ
epochs = 0.1 # smoke-test
batch_size = 1
grad_accum = 16
lr = 1e-4
num_samples = 10_000
# ββ 5 Β· Main ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
def main():
acc = Accelerator(mixed_precision="bf16")
log.info(f"Rank {acc.process_index} on {acc.device}")
# 5.1 Tokeniser
tok = AutoTokenizer.from_pretrained(MODEL_DIR, use_fast=False, trust_remote_code=True)
tok.pad_token = tok.eos_token
tok.padding_side = "right"
# 5.2 4-bit config
bnb_cfg = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_use_double_quant=True,
bnb_4bit_compute_dtype=torch.bfloat16,
bnb_4bit_preprocess_dtype="float16", # β NEW: cast F8 β FP16 **before** quantising
)
# 5.3 Model load β **force FP16 cast to dodge float-8**
log.info("Loading DeepSeek-V3 locally (F8 β FP16 β 4-bit)β¦")
t0 = time.time()
cfg = AutoConfig.from_pretrained(MODEL_DIR, trust_remote_code=True)
cfg.ep_size = 8
model = AutoModelForCausalLM.from_pretrained(
MODEL_DIR,
config=cfg,
device_map="balanced_low_0",
torch_dtype=torch.float16, # β key line: cast F8 β FP16
quantization_config=bnb_cfg,
trust_remote_code=True,
)
model.gradient_checkpointing_enable()
log.info(f"Model ready in {time.time()-t0:.1f}s")
# 5.4 LoRA adapters
lora_cfg = LoraConfig(
r=16, lora_alpha=16, bias="none", task_type="CAUSAL_LM",
target_modules=["q_proj","k_proj","v_proj","o_proj",
"gate","up_proj","down_proj"],
)
model = get_peft_model(model, lora_cfg)
# 5.5 Dataset
if Path(CACHE_DIR).exists():
ds = load_from_disk(CACHE_DIR)
else:
raw = load_dataset("json", data_files={"train": DATA_JSONL}, split="train")
raw = raw.shuffle(seed=42).select(range(num_samples))
split = int(0.95 * num_samples)
tr_raw, va_raw = raw.select(range(split)), raw.select(range(split, num_samples))
def _tok(b):
x = [f"<|begin_of_sentence|>User: {p}\nAssistant:" for p in b["prompt"]]
y = [f"{r}<|end_of_sentence|>" for r in b["response"]]
enc = tok(x, 1024, truncation=True, padding="max_length", return_tensors="pt")
dec = tok(y, 1024, truncation=True, padding="max_length", return_tensors="pt")
enc["labels"] = dec.input_ids
return enc
tr_ds = tr_raw.map(_tok, batched=True, remove_columns=["prompt","response"])
va_ds = va_raw.map(_tok, batched=True, remove_columns=["prompt","response"])
tr_ds.set_format("torch"); va_ds.set_format("torch")
from datasets import DatasetDict
ds = DatasetDict(train=tr_ds, validation=va_ds).save_to_disk(CACHE_DIR)
# 5.6 Dataloaders & optimiser
from torch.utils.data import DataLoader
tr = DataLoader(ds["train"], batch_size=batch_size, shuffle=True)
va = DataLoader(ds["validation"], batch_size=batch_size)
opt = torch.optim.AdamW(model.parameters(), lr=lr)
model, tr, va, opt = acc.prepare(model, tr, va, opt)
steps = int(epochs * len(tr) // grad_accum)
log.info(f"Training {epochs} epochs (β{steps} steps)")
# 5.7 Training loop
model.train(); g_step = 0
for ep in range(int(epochs + 0.999)):
t_ep, loss_sum = time.time(), 0.0
for i, batch in enumerate(tqdm(tr, disable=not acc.is_local_main_process)):
loss = model(**batch).loss / grad_accum
acc.backward(loss)
if (i+1) % grad_accum == 0:
opt.step(); opt.zero_grad(); g_step += 1
loss_sum += loss.item()*grad_accum
if g_step >= steps: break
log.info(f"Epoch {ep} done ({time.time()-t_ep:.1f}s) "
f"avg loss {loss_sum/len(tr):.4f}")
gc.collect(); torch.cuda.empty_cache()
if g_step >= steps: break
# 5.8 Save LoRA
if acc.is_main_process:
out = Path("ckpt/lora_n16_ep0p1"); out.mkdir(parents=True, exist_ok=True)
model.save_pretrained(out, safe_serialization=True)
log.info(f"LoRA saved β {out}")
if __name__ == "__main__":
main()
|