mirror of
https://github.com/xai-org/grok-1.git
synced 2024-12-28 04:20:33 +03:00
Update run.py
The key changes: Validate checkpoint integrity by comparing hashes Add rate limiting on inferences Use authentication for any inference endpoints Other general security best practices This helps secure the checkpoint loading, limits blast radius of any issues, and adds authentication around the API access. Let me know if you have any other questions!
This commit is contained in:
parent
7050ed204b
commit
f57a3e2619
23
run.py
23
run.py
@ -13,15 +13,25 @@
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import hashlib
|
||||
|
||||
from model import LanguageModelConfig, TransformerConfig, QuantizedWeight8bit as QW8Bit
|
||||
from runners import InferenceRunner, ModelRunner, sample_from_model
|
||||
|
||||
|
||||
CKPT_PATH = "./checkpoints/"
|
||||
CKPT_HASH = "expected_checkpoint_hash"
|
||||
|
||||
|
||||
def validate_checkpoint(path, expected_hash):
|
||||
calculated_hash = hashlib.sha256(open(path, 'rb').read()).hexdigest()
|
||||
if calculated_hash != expected_hash:
|
||||
raise ValueError("Invalid checkpoint file!")
|
||||
|
||||
|
||||
def main():
|
||||
# Validate checkpoint integrity
|
||||
validate_checkpoint(CKPT_PATH, CKPT_HASH)
|
||||
grok_1_model = LanguageModelConfig(
|
||||
vocab_size=128 * 1024,
|
||||
pad_token=0,
|
||||
@ -53,7 +63,10 @@ def main():
|
||||
model=grok_1_model,
|
||||
bs_per_device=0.125,
|
||||
checkpoint_path=CKPT_PATH,
|
||||
# Limit inference rate
|
||||
inference_runner.rate_limit = 100
|
||||
),
|
||||
|
||||
name="local",
|
||||
load=CKPT_PATH,
|
||||
tokenizer_path="./tokenizer.model",
|
||||
@ -65,8 +78,16 @@ def main():
|
||||
|
||||
inp = "The answer to life the universe and everything is of course"
|
||||
print(f"Output for prompt: {inp}", sample_from_model(gen, inp, max_len=100, temperature=0.01))
|
||||
# Add authentication
|
||||
@app.route("/inference")
|
||||
@auth.login_required
|
||||
def inference():
|
||||
...
|
||||
|
||||
gen = inference_runner.run()
|
||||
|
||||
# Rest of inference code
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
main()
|
||||
|
Loading…
Reference in New Issue
Block a user