{ "architectures": [ "CustomLLM" ], "hidden_size": 768, "model_type": "custom_llm", "num_attention_heads": 12, "num_layers": 6, "torch_dtype": "float32", "transformers_version": "4.48.3", "vocab_size": 50257 }