File size: 1,836 Bytes
ba67f04 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
import sys
import json
import os
from datetime import datetime
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
from peft import PeftModel, PeftConfig
# Load model and tokenizer
def load_model():
base_model = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
adapter_path = "Harish2002/cli-lora-tinyllama" # ✅ fixed path
tokenizer = AutoTokenizer.from_pretrained(base_model)
model = AutoModelForCausalLM.from_pretrained(base_model)
model = PeftModel.from_pretrained(model, adapter_path)
return tokenizer, model
# Generate plan from input instruction
def generate_plan(prompt, tokenizer, model):
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=256)
output = pipe(prompt)[0]['generated_text']
return output.strip()
# Check if first line is a shell command
def is_shell_command(line):
return line.startswith(("git", "bash", "tar", "gzip", "grep", "python", "./", "cd", "ls"))
# Log to logs/trace.jsonl
def log_trace(prompt, response):
os.makedirs("logs", exist_ok=True)
trace = {
"timestamp": datetime.utcnow().isoformat(),
"input": prompt,
"response": response
}
with open("logs/trace.jsonl", "a") as f:
f.write(json.dumps(trace) + "\n")
# Main
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: python agent.py \"Your instruction here\"")
sys.exit(1)
user_input = sys.argv[1]
tokenizer, model = load_model()
result = generate_plan(user_input, tokenizer, model)
# Print result and echo dry-run if it's a shell command
print("\nGenerated Plan:\n")
print(result)
first_line = result.splitlines()[0]
if is_shell_command(first_line):
print("\nDry-run:")
print(f"echo {first_line}")
log_trace(user_input, result)
|