Files
Nemotron-Orchestrator-8B-Qw…/noesis_provenance.json

21 lines
884 B
JSON
Raw Permalink Normal View History

{
"noesis_version": "v14.1",
"framework": "DHCF-FNO",
"founder": "Ilia Bolotnikov",
"organization": "AMAImedia.com",
"source_model": "nvidia/Nemotron-Orchestrator-8B",
"source_format": "FP32 safetensors",
"source_license": "NVIDIA Open Model License (research and development only)",
"base_model": "Qwen/Qwen3-8B",
"architecture": "Qwen3ForCausalLM (dense decoder-only, NO MoE)",
"vocab_size": 151936,
"language": "en",
"conversion": {
"operation": "FP32 -> BF16 cast",
"method": "torch.Tensor.to(dtype=torch.bfloat16)",
"rounding": "IEEE 754 round-to-nearest-even (PyTorch default)",
"lossless_for_inference": true,
"reason": "BF16 has same 8-bit exponent as FP32; 7-bit mantissa sufficient for weight storage"
},
"purpose": "Bandwidth-friendly BF16 reference checkpoint for downstream quantization and inference"
}