初始化项目,由ModelHub XC社区提供模型
Model: AMAImedia/Qwen3-8B-Nemotron-Orchestrator-NOESIS-BF16 Source: Original Platform
This commit is contained in:
21
noesis_provenance.json
Normal file
21
noesis_provenance.json
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"noesis_version": "v14.1",
|
||||
"framework": "DHCF-FNO",
|
||||
"founder": "Ilia Bolotnikov",
|
||||
"organization": "AMAImedia.com",
|
||||
"source_model": "nvidia/Nemotron-Orchestrator-8B",
|
||||
"source_format": "FP32 safetensors",
|
||||
"source_license": "NVIDIA Open Model License (research and development only)",
|
||||
"base_model": "Qwen/Qwen3-8B",
|
||||
"architecture": "Qwen3ForCausalLM (dense decoder-only, NO MoE)",
|
||||
"vocab_size": 151936,
|
||||
"language": "en",
|
||||
"conversion": {
|
||||
"operation": "FP32 -> BF16 cast",
|
||||
"method": "torch.Tensor.to(dtype=torch.bfloat16)",
|
||||
"rounding": "IEEE 754 round-to-nearest-even (PyTorch default)",
|
||||
"lossless_for_inference": true,
|
||||
"reason": "BF16 has same 8-bit exponent as FP32; 7-bit mantissa sufficient for weight storage"
|
||||
},
|
||||
"purpose": "Bandwidth-friendly BF16 reference checkpoint for downstream quantization and inference"
|
||||
}
|
||||
Reference in New Issue
Block a user