mirror of
https://github.com/xai-org/grok-1.git
synced 2025-07-12 18:55:00 +03:00
Compare commits
9 Commits
download-i
...
d129df04a6
Author | SHA1 | Date | |
---|---|---|---|
d129df04a6 | |||
6ed2d78bea | |||
f57a3e2619 | |||
7050ed204b | |||
d6d9447e2d | |||
7207216386 | |||
310e19eee2 | |||
1ff4435d25 | |||
b0e77734fe |
2
.gitignore
vendored
Normal file
2
.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
checkpoints/*
|
||||
!checkpoints/README.md
|
27
README.md
27
README.md
@ -2,7 +2,8 @@
|
||||
|
||||
This repository contains JAX example code for loading and running the Grok-1 open-weights model.
|
||||
|
||||
Make sure to download the checkpoint and place `ckpt-0` directory in `checkpoint`.
|
||||
Make sure to download the checkpoint and place the `ckpt-0` directory in `checkpoints` - see [Downloading the weights](#downloading-the-weights)
|
||||
|
||||
Then, run
|
||||
|
||||
```shell
|
||||
@ -17,13 +18,37 @@ The script loads the checkpoint and samples from the model on a test input.
|
||||
Due to the large size of the model (314B parameters), a machine with enough GPU memory is required to test the model with the example code.
|
||||
The implementation of the MoE layer in this repository is not efficient. The implementation was chosen to avoid the need for custom kernels to validate the correctness of the model.
|
||||
|
||||
# Model Specifications
|
||||
|
||||
Grok-1 is currently designed with the following specifications:
|
||||
|
||||
- **Parameters:** 314B
|
||||
- **Architecture:** Mixture of 8 Experts (MoE)
|
||||
- **Experts Utilization:** 2 experts used per token
|
||||
- **Layers:** 64
|
||||
- **Attention Heads:** 48 for queries, 8 for keys/values
|
||||
- **Embedding Size:** 6,144
|
||||
- **Tokenization:** SentencePiece tokenizer with 131,072 tokens
|
||||
- **Additional Features:**
|
||||
- Rotary embeddings (RoPE)
|
||||
- Supports activation sharding and 8-bit quantization
|
||||
- **Maximum Sequence Length (context):** 8,192 tokens
|
||||
|
||||
# Downloading the weights
|
||||
|
||||
You can download the weights using a torrent client and this magnet link:
|
||||
|
||||
```
|
||||
magnet:?xt=urn:btih:5f96d43576e3d386c9ba65b883210a393b68210e&tr=https%3A%2F%2Facademictorrents.com%2Fannounce.php&tr=udp%3A%2F%2Ftracker.coppersurfer.tk%3A6969&tr=udp%3A%2F%2Ftracker.opentrackr.org%3A1337%2Fannounce
|
||||
```
|
||||
|
||||
or directly using [HuggingFace 🤗 Hub](https://huggingface.co/xai-org/grok-1):
|
||||
```
|
||||
git clone https://github.com/xai-org/grok-1.git && cd grok-1
|
||||
pip install huggingface_hub[hf_transfer]
|
||||
huggingface-cli download xai-org/grok-1 --repo-type model --include ckpt-0/* --local-dir checkpoints --local-dir-use-symlinks False
|
||||
```
|
||||
|
||||
# License
|
||||
|
||||
The code and associated Grok-1 weights in this release are licensed under the
|
||||
|
@ -1,4 +1,4 @@
|
||||
dm_haiku==0.0.12
|
||||
jax[cuda12_pip]==0.4.25 -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
|
||||
jax[cuda12-pip]==0.4.25 -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
|
||||
numpy==1.26.4
|
||||
sentencepiece==0.2.0
|
||||
|
114
run.py
114
run.py
@ -13,60 +13,86 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import hashlib
|
||||
|
||||
from model import LanguageModelConfig, TransformerConfig, QuantizedWeight8bit as QW8Bit
|
||||
from runners import InferenceRunner, ModelRunner, sample_from_model
|
||||
|
||||
|
||||
CKPT_PATH = "./checkpoints/"
|
||||
CKPT_HASH = "expected_checkpoint_hash"
|
||||
|
||||
|
||||
def validate_checkpoint(path, expected_hash):
|
||||
calculated_hash = hashlib.sha256(open(path, 'rb').read()).hexdigest()
|
||||
if calculated_hash != expected_hash:
|
||||
raise ValueError("Invalid checkpoint file!")
|
||||
|
||||
|
||||
def main():
|
||||
grok_1_model = LanguageModelConfig(
|
||||
vocab_size=128 * 1024,
|
||||
pad_token=0,
|
||||
eos_token=2,
|
||||
sequence_len=8192,
|
||||
embedding_init_scale=1.0,
|
||||
output_multiplier_scale=0.5773502691896257,
|
||||
embedding_multiplier_scale=78.38367176906169,
|
||||
model=TransformerConfig(
|
||||
emb_size=48 * 128,
|
||||
widening_factor=8,
|
||||
key_size=128,
|
||||
num_q_heads=48,
|
||||
num_kv_heads=8,
|
||||
num_layers=64,
|
||||
attn_output_multiplier=0.08838834764831845,
|
||||
shard_activations=True,
|
||||
# MoE.
|
||||
num_experts=8,
|
||||
num_selected_experts=2,
|
||||
# Activation sharding.
|
||||
data_axis="data",
|
||||
model_axis="model",
|
||||
),
|
||||
)
|
||||
inference_runner = InferenceRunner(
|
||||
pad_sizes=(1024,),
|
||||
runner=ModelRunner(
|
||||
model=grok_1_model,
|
||||
bs_per_device=0.125,
|
||||
checkpoint_path=CKPT_PATH,
|
||||
),
|
||||
name="local",
|
||||
load=CKPT_PATH,
|
||||
tokenizer_path="./tokenizer.model",
|
||||
local_mesh_config=(1, 8),
|
||||
between_hosts_config=(1, 1),
|
||||
)
|
||||
inference_runner.initialize()
|
||||
gen = inference_runner.run()
|
||||
# Validate checkpoint integrity
|
||||
validate_checkpoint(CKPT_PATH, CKPT_HASH)
|
||||
|
||||
inp = "The answer to life the universe and everything is of course"
|
||||
print(f"Output for prompt: {inp}", sample_from_model(gen, inp, max_len=100, temperature=0.01))
|
||||
grok_1_model = LanguageModelConfig(
|
||||
vocab_size=128 * 1024,
|
||||
pad_token=0,
|
||||
eos_token=2,
|
||||
sequence_len=8192,
|
||||
embedding_init_scale=1.0,
|
||||
output_multiplier_scale=0.5773502691896257,
|
||||
embedding_multiplier_scale=78.38367176906169,
|
||||
model=TransformerConfig(
|
||||
emb_size=48 * 128,
|
||||
widening_factor=8,
|
||||
key_size=128,
|
||||
num_q_heads=48,
|
||||
num_kv_heads=8,
|
||||
num_layers=64,
|
||||
attn_output_multiplier=0.08838834764831845,
|
||||
shard_activations=True,
|
||||
# MoE.
|
||||
num_experts=8,
|
||||
num_selected_experts=2,
|
||||
# Activation sharding.
|
||||
data_axis="data",
|
||||
model_axis="model",
|
||||
),
|
||||
)
|
||||
|
||||
inference_runner = InferenceRunner(
|
||||
pad_sizes=(1024,),
|
||||
runner=ModelRunner(
|
||||
model=grok_1_model,
|
||||
bs_per_device=0.125,
|
||||
checkpoint_path=CKPT_PATH,
|
||||
# Limit inference rate
|
||||
inference_runner.rate_limit = 100
|
||||
),
|
||||
|
||||
name="local",
|
||||
load=CKPT_PATH,
|
||||
tokenizer_path="./tokenizer.model",
|
||||
local_mesh_config=(1, 8),
|
||||
between_hosts_config=(1, 1),
|
||||
)
|
||||
|
||||
inference_runner.initialize()
|
||||
|
||||
gen = inference_runner.run()
|
||||
|
||||
inp = "The answer to life the universe and everything is of course"
|
||||
print(f"Output for prompt: {inp}", sample_from_model(gen, inp, max_len=100, temperature=0.01))
|
||||
|
||||
# Add authentication
|
||||
@app.route("/inference")
|
||||
@auth.login_required
|
||||
def inference():
|
||||
...
|
||||
|
||||
gen = inference_runner.run()
|
||||
|
||||
# Rest of inference code
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
main()
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
main()
|
||||
|
Reference in New Issue
Block a user