|
| 1 | +W0406 05:48:52.138000 4182076 torch/distributed/run.py:803] |
| 2 | +W0406 05:48:52.138000 4182076 torch/distributed/run.py:803] ***************************************** |
| 3 | +W0406 05:48:52.138000 4182076 torch/distributed/run.py:803] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. |
| 4 | +W0406 05:48:52.138000 4182076 torch/distributed/run.py:803] ***************************************** |
| 5 | +Hyperparameters: |
| 6 | + adam_eps: 1e-08 |
| 7 | + adam_wd: 0.02 |
| 8 | + beta1: 0.9 |
| 9 | + beta2: 0.95 |
| 10 | + compressor: brotli |
| 11 | + data_dir: ./data/ |
| 12 | + datasets_dir: ./data/datasets/fineweb10B_sp8192 |
| 13 | + distributed: True |
| 14 | + ema_decay: 0.997 |
| 15 | + embed_bits: 8 |
| 16 | + embed_clip_sigmas: 20.0 |
| 17 | + embed_lr: 0.6 |
| 18 | + embed_wd: 0.085 |
| 19 | + embedding_dim: 512 |
| 20 | + enable_looping_at: 0.5 |
| 21 | + eval_seq_len: 2048 |
| 22 | + eval_stride: 64 |
| 23 | + gptq_calibration_batches: 64 |
| 24 | + gptq_reserve_seconds: 12.0 |
| 25 | + grad_accum_steps: 1 |
| 26 | + grad_clip_norm: 0.3 |
| 27 | + head_lr: 0.008 |
| 28 | + hessian_clip_lambda: 0.175 |
| 29 | + is_main_process: True |
| 30 | + iterations: 20000 |
| 31 | + ln_scale: True |
| 32 | + local_rank: 0 |
| 33 | + logfile: logs/f3971278-d577-499b-8fde-755434809ba9.txt |
| 34 | + logit_softcap: 30.0 |
| 35 | + loop_end: 5 |
| 36 | + loop_layer_bits: 0 |
| 37 | + loop_layer_clip_sigmas: 0.0 |
| 38 | + loop_phase2_at: 0.65 |
| 39 | + loop_start: 4 |
| 40 | + matrix_bits: 6 |
| 41 | + matrix_clip_sigmas: 12.85 |
| 42 | + matrix_lr: 0.02 |
| 43 | + max_wallclock_seconds: 600.0 |
| 44 | + min_lr: 0.0 |
| 45 | + mlp_mult: 4.0 |
| 46 | + model_dim: 512 |
| 47 | + model_path: final_model.pt |
| 48 | + muon_backend_steps: 5 |
| 49 | + muon_beta2: 0.95 |
| 50 | + muon_momentum: 0.99 |
| 51 | + muon_momentum_warmup_start: 0.92 |
| 52 | + muon_momentum_warmup_steps: 1500 |
| 53 | + muon_row_normalize: True |
| 54 | + muon_wd: 0.085 |
| 55 | + num_heads: 8 |
| 56 | + num_kv_heads: 4 |
| 57 | + num_layers: 11 |
| 58 | + num_loops: 2 |
| 59 | + parallel_residual_start: 7 |
| 60 | + qk_gain_init: 4.0 |
| 61 | + quantized_model_path: final_model.int6.ptz |
| 62 | + rank: 0 |
| 63 | + rope_base: 10000.0 |
| 64 | + rope_dims: 16 |
| 65 | + rope_train_seq_len: 2048 |
| 66 | + run_id: f3971278-d577-499b-8fde-755434809ba9 |
| 67 | + scalar_lr: 0.02 |
| 68 | + seed: 1337 |
| 69 | + skip_gates_enabled: True |
| 70 | + sliding_window_enabled: True |
| 71 | + tie_embeddings: True |
| 72 | + tied_embed_init_std: 0.005 |
| 73 | + tied_embed_lr: 0.03 |
| 74 | + tokenizer_path: ./data/tokenizers/fineweb_8192_bpe.model |
| 75 | + train_batch_tokens: 786432 |
| 76 | + train_files: ./data/datasets/fineweb10B_sp8192/fineweb_train_*.bin |
| 77 | + train_log_every: 500 |
| 78 | + train_seq_len: 2048 |
| 79 | + ttt_chunk_tokens: 32768 |
| 80 | + ttt_enabled: False |
| 81 | + ttt_entropy_high: 2.1 |
| 82 | + ttt_entropy_low: 1.75 |
| 83 | + ttt_epochs: 4 |
| 84 | + ttt_freeze_blocks: 2 |
| 85 | + ttt_lr: 0.0005 |
| 86 | + ttt_ns_steps: 3 |
| 87 | + untie_loop_mlps: False |
| 88 | + val_batch_tokens: 524288 |
| 89 | + val_files: ./data/datasets/fineweb10B_sp8192/fineweb_val_*.bin |
| 90 | + val_loss_every: 4000 |
| 91 | + vocab_size: 8192 |
| 92 | + warmdown_frac: 0.667 |
| 93 | + warmup_steps: 20 |
| 94 | + world_size: 8 |
| 95 | + xsa_last_n: 11 |
| 96 | +train_shards: 128 |
| 97 | +val_tokens: 40540160 |
| 98 | +model_params:35943512 |
| 99 | +hessian_clip: lambda=0.175 |
| 100 | +parallel_residuals: ON (layers 7-10) |
| 101 | +progressive_recurrence: phase1=0.5 phase2=0.65 |
| 102 | +gptq:reserving 12s, effective=588000ms |
| 103 | +warmup_step: 1/20 |
| 104 | +warmup_step: 2/20 |
| 105 | +warmup_step: 3/20 |
| 106 | +warmup_step: 4/20 |
| 107 | +warmup_step: 5/20 |
| 108 | +warmup_step: 6/20 |
| 109 | +warmup_step: 10/20 |
| 110 | +warmup_step: 20/20 |
| 111 | +loop_warmup_phase1: encoder:[0, 1, 2, 3, 4, 5] decoder:[4, 5, 6, 7, 8, 9, 10] |
| 112 | +loop_warmup_p1_step: 1/20 |
| 113 | +loop_warmup_p1_step: 2/20 |
| 114 | +loop_warmup_p1_step: 3/20 |
| 115 | +loop_warmup_p1_step: 4/20 |
| 116 | +loop_warmup_p1_step: 5/20 |
| 117 | +loop_warmup_p1_step: 6/20 |
| 118 | +loop_warmup_p1_step: 10/20 |
| 119 | +loop_warmup_p1_step: 20/20 |
| 120 | +loop_warmup_phase2: encoder:[0, 1, 2, 3, 4, 5, 4] decoder:[5, 4, 5, 6, 7, 8, 9, 10] |
| 121 | +loop_warmup_p2_step: 1/20 |
| 122 | +loop_warmup_p2_step: 2/20 |
| 123 | +loop_warmup_p2_step: 3/20 |
| 124 | +loop_warmup_p2_step: 4/20 |
| 125 | +loop_warmup_p2_step: 5/20 |
| 126 | +loop_warmup_p2_step: 6/20 |
| 127 | +loop_warmup_p2_step: 10/20 |
| 128 | +loop_warmup_p2_step: 20/20 |
| 129 | +0/20000 val_loss: 9.0052 val_bpb: 3.4862 |
| 130 | +1/20000 train_loss: 9.0086 train_time: 0.0m tok/s: 8101558 looping:False |
| 131 | +2/20000 train_loss: 12.1911 train_time: 0.0m tok/s: 8047236 looping:False |
| 132 | +3/20000 train_loss: 11.0242 train_time: 0.0m tok/s: 7763017 looping:False |
| 133 | +4/20000 train_loss: 9.5010 train_time: 0.0m tok/s: 7744899 looping:False |
| 134 | +5/20000 train_loss: 8.3911 train_time: 0.0m tok/s: 7764479 looping:False |
| 135 | +500/20000 train_loss: 3.3106 train_time: 0.8m tok/s: 7721577 looping:False |
| 136 | +1000/20000 train_loss: 3.2012 train_time: 1.7m tok/s: 7711891 looping:False |
| 137 | +1500/20000 train_loss: 3.1827 train_time: 2.5m tok/s: 7711479 looping:False |
| 138 | +2000/20000 train_loss: 2.9936 train_time: 3.4m tok/s: 7711845 looping:False |
| 139 | +2500/20000 train_loss: 3.0679 train_time: 4.2m tok/s: 7713114 looping:False |
| 140 | +layer_loop:phase1 step:2884 frac:0.500 |
| 141 | +3000/20000 train_loss: 3.1068 train_time: 5.1m tok/s: 7668374 looping:True |
| 142 | +3500/20000 train_loss: 2.9483 train_time: 6.1m tok/s: 7510155 looping:True |
| 143 | +layer_loop:phase2 step:3634 frac:0.650 |
| 144 | +4000/20000 train_loss: 2.9482 train_time: 7.2m tok/s: 7297673 looping:True |
| 145 | +4000/20000 val_loss: 2.9279 val_bpb: 1.1335 |
| 146 | +4500/20000 train_loss: 2.8499 train_time: 8.3m tok/s: 7110716 looping:True |
| 147 | +5000/20000 train_loss: 2.8598 train_time: 9.4m tok/s: 6967320 looping:True |
| 148 | +5178/20000 val_loss: 2.8121 val_bpb: 1.0887 |
| 149 | +stopping_early: wallclock_cap train_time: 588103ms step: 5178/20000 |
| 150 | +peak memory allocated: 34604 MiB reserved: 34634 MiB |
| 151 | +ema:applying EMA weights |
| 152 | +pre-quantization post-ema val_loss:2.80947408 val_bpb:1.08764765 eval_time:6554ms |
| 153 | +Serialized model: 135426937 bytes |
| 154 | +Code size: 78688 bytes |
| 155 | +GPTQ:collecting Hessians from calibration data... |
| 156 | +GPTQ:collected 67 Hessians in 11.3s |
| 157 | +GPTQ:saved Hessian diagnostics to hessian_diagnostics.pt (67 matrices) |
| 158 | +Quantized weights: |
| 159 | + gptq (int6): blocks.attn.c_k.weight, blocks.attn.c_q.weight, blocks.attn.c_v.weight, blocks.attn.proj.weight, blocks.mlp.fc.weight, blocks.mlp.proj.weight |
| 160 | + gptq (int8): tok_emb.weight |
| 161 | + passthrough (float16): blocks.attn.q_gain, blocks.attn_scale, blocks.mlp_scale, blocks.resid_mix, skip_gates, skip_weights |
| 162 | +Serialized model quantized+brotli: 15976275 bytes |
| 163 | +Total submission size quantized+brotli: 16054963 bytes |
| 164 | +quantized val_loss:2.84032998 val_bpb:1.09959307 eval_time:8134ms |
| 165 | +quantized_sliding_window val_loss:2.79749368 val_bpb:1.08300961 eval_time:82837ms |
0 commit comments