biswanath2.roul
commited on
Commit
Β·
6f40440
1
Parent(s):
e54fd17
Update documentation and remove deprecated files
Browse files- README.md +12 -12
- docs/advanced_features.md +10 -10
- docs/api_reference.md +9 -9
- docs/cli_usage.md +27 -27
- docs/getting_started.md +4 -4
- docs/integration_examples.md +6 -6
- hello.py +2 -0
- {promptlab β llmpromptkit}/__init__.py +2 -2
- {promptlab β llmpromptkit}/cli/__init__.py +0 -0
- {promptlab β llmpromptkit}/cli/commands.py +2 -2
- {promptlab β llmpromptkit}/core/__init__.py +0 -0
- {promptlab β llmpromptkit}/core/evaluation.py +0 -0
- {promptlab β llmpromptkit}/core/prompt_manager.py +1 -1
- {promptlab β llmpromptkit}/core/testing.py +0 -0
- {promptlab β llmpromptkit}/core/version_control.py +0 -0
- {promptlab β llmpromptkit}/examples/__init__.py +0 -0
- {promptlab β llmpromptkit}/examples/ab_testing.py +4 -4
- {promptlab β llmpromptkit}/examples/basic_usage.py +4 -4
- {promptlab β llmpromptkit}/examples/evaluation_example.py +4 -4
- {promptlab β llmpromptkit}/tests/__init__.py +0 -0
- {promptlab β llmpromptkit}/tests/test_evaluation.py +0 -0
- {promptlab β llmpromptkit}/tests/test_prompt_manager.py +1 -1
- {promptlab β llmpromptkit}/tests/test_testing.py +0 -0
- {promptlab β llmpromptkit}/tests/test_version_control.py +0 -0
- {promptlab β llmpromptkit}/utils/__init__.py +0 -0
- {promptlab β llmpromptkit}/utils/metrics.py +0 -0
- {promptlab β llmpromptkit}/utils/storage.py +1 -1
- {promptlab β llmpromptkit}/utils/templating.py +0 -0
- llmpromptkit_storage/prompts/0a6614b65f.json +15 -0
- llmpromptkit_storage/prompts/4e9d34fb56.json +15 -0
- llmpromptkit_storage/tests/test_cases/6c30b375-7.json +12 -0
- llmpromptkit_storage/versions/0a6614b65f/v1.json +8 -0
- llmpromptkit_storage/versions/0a6614b65f/v2.json +8 -0
- pyproject.toml +7 -7
- simple_test.py +15 -0
- test_script.py +81 -0
README.md
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
-
#
|
| 2 |
|
| 3 |
-
|
| 4 |
|
| 5 |
## Features
|
| 6 |
|
|
@@ -24,11 +24,11 @@ For detailed documentation, see the [docs](./docs) directory:
|
|
| 24 |
## Installation
|
| 25 |
|
| 26 |
```bash
|
| 27 |
-
pip install
|
| 28 |
|
| 29 |
Quick Start
|
| 30 |
|
| 31 |
-
from
|
| 32 |
|
| 33 |
# Initialize components
|
| 34 |
prompt_manager = PromptManager()
|
|
@@ -91,25 +91,25 @@ evaluation_result = asyncio.run(evaluator.evaluate_prompt(
|
|
| 91 |
print(f"Evaluation metrics: {evaluation_result['aggregated_metrics']}")
|
| 92 |
|
| 93 |
Command-line Interface
|
| 94 |
-
|
| 95 |
|
| 96 |
# Create a prompt
|
| 97 |
-
|
| 98 |
|
| 99 |
# List all prompts
|
| 100 |
-
|
| 101 |
|
| 102 |
# Create a new version
|
| 103 |
-
|
| 104 |
|
| 105 |
# Run tests
|
| 106 |
-
|
| 107 |
|
| 108 |
Advanced Usage
|
| 109 |
Advanced Templating
|
| 110 |
-
|
| 111 |
|
| 112 |
-
from
|
| 113 |
|
| 114 |
template = PromptTemplate("""
|
| 115 |
{system_message}
|
|
@@ -134,7 +134,7 @@ rendered = template.render(
|
|
| 134 |
|
| 135 |
Custom Evaluation Metrics
|
| 136 |
Create custom metrics to evaluate prompt performance:
|
| 137 |
-
from
|
| 138 |
|
| 139 |
class CustomMetric(EvaluationMetric):
|
| 140 |
def __init__(self):
|
|
|
|
| 1 |
+
# LLMPromptKit: LLM Prompt Management System
|
| 2 |
|
| 3 |
+
LLMPromptKit is a comprehensive library for managing, versioning, testing, and evaluating prompts for Large Language Models (LLMs). It provides a structured framework to help data scientists and developers create, optimize, and maintain high-quality prompts.
|
| 4 |
|
| 5 |
## Features
|
| 6 |
|
|
|
|
| 24 |
## Installation
|
| 25 |
|
| 26 |
```bash
|
| 27 |
+
pip install llmpromptkit
|
| 28 |
|
| 29 |
Quick Start
|
| 30 |
|
| 31 |
+
from llmpromptkit import PromptManager, VersionControl, PromptTesting, Evaluator
|
| 32 |
|
| 33 |
# Initialize components
|
| 34 |
prompt_manager = PromptManager()
|
|
|
|
| 91 |
print(f"Evaluation metrics: {evaluation_result['aggregated_metrics']}")
|
| 92 |
|
| 93 |
Command-line Interface
|
| 94 |
+
LLMPromptKit comes with a powerful CLI for managing prompts:
|
| 95 |
|
| 96 |
# Create a prompt
|
| 97 |
+
llmpromptkit prompt create "Summarization" --content "Summarize: {text}" --tags "summarization,basic"
|
| 98 |
|
| 99 |
# List all prompts
|
| 100 |
+
llmpromptkit prompt list
|
| 101 |
|
| 102 |
# Create a new version
|
| 103 |
+
llmpromptkit version commit <prompt_id> --message "Updated prompt"
|
| 104 |
|
| 105 |
# Run tests
|
| 106 |
+
llmpromptkit test run-all <prompt_id> --llm openai
|
| 107 |
|
| 108 |
Advanced Usage
|
| 109 |
Advanced Templating
|
| 110 |
+
LLMPromptKit supports advanced templating with conditionals and loops:
|
| 111 |
|
| 112 |
+
from llmpromptkit import PromptTemplate
|
| 113 |
|
| 114 |
template = PromptTemplate("""
|
| 115 |
{system_message}
|
|
|
|
| 134 |
|
| 135 |
Custom Evaluation Metrics
|
| 136 |
Create custom metrics to evaluate prompt performance:
|
| 137 |
+
from llmpromptkit import EvaluationMetric, Evaluator
|
| 138 |
|
| 139 |
class CustomMetric(EvaluationMetric):
|
| 140 |
def __init__(self):
|
docs/advanced_features.md
CHANGED
|
@@ -1,15 +1,15 @@
|
|
| 1 |
# Advanced Features
|
| 2 |
|
| 3 |
-
|
| 4 |
|
| 5 |
## Advanced Templating
|
| 6 |
|
| 7 |
-
|
| 8 |
|
| 9 |
### Basic Variable Substitution
|
| 10 |
|
| 11 |
```python
|
| 12 |
-
from
|
| 13 |
|
| 14 |
# Simple variable substitution
|
| 15 |
template = PromptTemplate("Hello, {name}!")
|
|
@@ -101,7 +101,7 @@ You can create custom metrics to evaluate prompt outputs based on your specific
|
|
| 101 |
### Creating a Custom Metric
|
| 102 |
|
| 103 |
```python
|
| 104 |
-
from
|
| 105 |
|
| 106 |
class RelevanceMetric(EvaluationMetric):
|
| 107 |
"""Evaluates relevance of output to a given topic."""
|
|
@@ -129,7 +129,7 @@ class RelevanceMetric(EvaluationMetric):
|
|
| 129 |
### Using Custom Metrics
|
| 130 |
|
| 131 |
```python
|
| 132 |
-
from
|
| 133 |
|
| 134 |
# Initialize components
|
| 135 |
prompt_manager = PromptManager()
|
|
@@ -156,7 +156,7 @@ print(f"Relevance score: {results['aggregated_metrics']['relevance']}")
|
|
| 156 |
|
| 157 |
## Customizing Storage
|
| 158 |
|
| 159 |
-
|
| 160 |
|
| 161 |
### Custom Storage Locations
|
| 162 |
|
|
@@ -180,13 +180,13 @@ with open("exported_prompt.json", "r") as f:
|
|
| 180 |
|
| 181 |
## LLM Integration
|
| 182 |
|
| 183 |
-
|
| 184 |
|
| 185 |
### OpenAI Integration
|
| 186 |
|
| 187 |
```python
|
| 188 |
import openai
|
| 189 |
-
from
|
| 190 |
|
| 191 |
prompt_manager = PromptManager()
|
| 192 |
testing = PromptTesting(prompt_manager)
|
|
@@ -212,7 +212,7 @@ test_results = await testing.run_all_tests("abc123", openai_callback)
|
|
| 212 |
|
| 213 |
```python
|
| 214 |
import anthropic
|
| 215 |
-
from
|
| 216 |
|
| 217 |
prompt_manager = PromptManager()
|
| 218 |
evaluator = Evaluator(prompt_manager)
|
|
@@ -242,7 +242,7 @@ eval_results = await evaluator.evaluate_prompt(
|
|
| 242 |
```python
|
| 243 |
from transformers import pipeline
|
| 244 |
import asyncio
|
| 245 |
-
from
|
| 246 |
|
| 247 |
prompt_manager = PromptManager()
|
| 248 |
version_control = VersionControl(prompt_manager)
|
|
|
|
| 1 |
# Advanced Features
|
| 2 |
|
| 3 |
+
LLMPromptKit provides several advanced features for sophisticated prompt engineering.
|
| 4 |
|
| 5 |
## Advanced Templating
|
| 6 |
|
| 7 |
+
LLMPromptKit's templating system goes beyond simple variable substitution, offering conditionals and loops.
|
| 8 |
|
| 9 |
### Basic Variable Substitution
|
| 10 |
|
| 11 |
```python
|
| 12 |
+
from llmpromptkit import PromptTemplate
|
| 13 |
|
| 14 |
# Simple variable substitution
|
| 15 |
template = PromptTemplate("Hello, {name}!")
|
|
|
|
| 101 |
### Creating a Custom Metric
|
| 102 |
|
| 103 |
```python
|
| 104 |
+
from llmpromptkit import EvaluationMetric
|
| 105 |
|
| 106 |
class RelevanceMetric(EvaluationMetric):
|
| 107 |
"""Evaluates relevance of output to a given topic."""
|
|
|
|
| 129 |
### Using Custom Metrics
|
| 130 |
|
| 131 |
```python
|
| 132 |
+
from llmpromptkit import Evaluator, PromptManager
|
| 133 |
|
| 134 |
# Initialize components
|
| 135 |
prompt_manager = PromptManager()
|
|
|
|
| 156 |
|
| 157 |
## Customizing Storage
|
| 158 |
|
| 159 |
+
LLMPromptKit allows you to customize where and how prompts and related data are stored.
|
| 160 |
|
| 161 |
### Custom Storage Locations
|
| 162 |
|
|
|
|
| 180 |
|
| 181 |
## LLM Integration
|
| 182 |
|
| 183 |
+
LLMPromptKit is designed to work with any LLM through callback functions. Here are examples of integrating with popular LLM APIs.
|
| 184 |
|
| 185 |
### OpenAI Integration
|
| 186 |
|
| 187 |
```python
|
| 188 |
import openai
|
| 189 |
+
from llmpromptkit import PromptManager, PromptTesting
|
| 190 |
|
| 191 |
prompt_manager = PromptManager()
|
| 192 |
testing = PromptTesting(prompt_manager)
|
|
|
|
| 212 |
|
| 213 |
```python
|
| 214 |
import anthropic
|
| 215 |
+
from llmpromptkit import PromptManager, Evaluator
|
| 216 |
|
| 217 |
prompt_manager = PromptManager()
|
| 218 |
evaluator = Evaluator(prompt_manager)
|
|
|
|
| 242 |
```python
|
| 243 |
from transformers import pipeline
|
| 244 |
import asyncio
|
| 245 |
+
from llmpromptkit import PromptManager, VersionControl
|
| 246 |
|
| 247 |
prompt_manager = PromptManager()
|
| 248 |
version_control = VersionControl(prompt_manager)
|
docs/api_reference.md
CHANGED
|
@@ -1,13 +1,13 @@
|
|
| 1 |
# API Reference
|
| 2 |
|
| 3 |
-
This document provides detailed API documentation for the main components of
|
| 4 |
|
| 5 |
## PromptManager
|
| 6 |
|
| 7 |
The `PromptManager` class is the core component for managing prompts.
|
| 8 |
|
| 9 |
```python
|
| 10 |
-
from
|
| 11 |
```
|
| 12 |
|
| 13 |
### Methods
|
|
@@ -15,7 +15,7 @@ from promptlab import PromptManager
|
|
| 15 |
#### `__init__(storage_path=None)`
|
| 16 |
- **Description**: Initialize a new PromptManager.
|
| 17 |
- **Parameters**:
|
| 18 |
-
- `storage_path` (str, optional): Path to store prompts. Defaults to "~/
|
| 19 |
|
| 20 |
#### `create(content, name, description='', tags=None, metadata=None)`
|
| 21 |
- **Description**: Create a new prompt.
|
|
@@ -66,7 +66,7 @@ from promptlab import PromptManager
|
|
| 66 |
The `VersionControl` class manages prompt versions.
|
| 67 |
|
| 68 |
```python
|
| 69 |
-
from
|
| 70 |
```
|
| 71 |
|
| 72 |
### Methods
|
|
@@ -117,7 +117,7 @@ from promptlab import VersionControl
|
|
| 117 |
The `PromptTesting` class provides testing capabilities.
|
| 118 |
|
| 119 |
```python
|
| 120 |
-
from
|
| 121 |
```
|
| 122 |
|
| 123 |
### Methods
|
|
@@ -166,7 +166,7 @@ from promptlab import PromptTesting
|
|
| 166 |
The `Evaluator` class handles prompt evaluation.
|
| 167 |
|
| 168 |
```python
|
| 169 |
-
from
|
| 170 |
```
|
| 171 |
|
| 172 |
### Methods
|
|
@@ -196,7 +196,7 @@ from promptlab import Evaluator
|
|
| 196 |
The `PromptTemplate` class provides advanced templating.
|
| 197 |
|
| 198 |
```python
|
| 199 |
-
from
|
| 200 |
```
|
| 201 |
|
| 202 |
### Methods
|
|
@@ -217,7 +217,7 @@ from promptlab import PromptTemplate
|
|
| 217 |
The `EvaluationMetric` is the base class for evaluation metrics.
|
| 218 |
|
| 219 |
```python
|
| 220 |
-
from
|
| 221 |
```
|
| 222 |
|
| 223 |
### Methods
|
|
@@ -243,5 +243,5 @@ from promptlab import EvaluationMetric
|
|
| 243 |
- `LengthMetric`: Scores based on output length.
|
| 244 |
|
| 245 |
```python
|
| 246 |
-
from
|
| 247 |
```
|
|
|
|
| 1 |
# API Reference
|
| 2 |
|
| 3 |
+
This document provides detailed API documentation for the main components of LLMPromptKit.
|
| 4 |
|
| 5 |
## PromptManager
|
| 6 |
|
| 7 |
The `PromptManager` class is the core component for managing prompts.
|
| 8 |
|
| 9 |
```python
|
| 10 |
+
from llmpromptkit import PromptManager
|
| 11 |
```
|
| 12 |
|
| 13 |
### Methods
|
|
|
|
| 15 |
#### `__init__(storage_path=None)`
|
| 16 |
- **Description**: Initialize a new PromptManager.
|
| 17 |
- **Parameters**:
|
| 18 |
+
- `storage_path` (str, optional): Path to store prompts. Defaults to "~/llmpromptkit_storage".
|
| 19 |
|
| 20 |
#### `create(content, name, description='', tags=None, metadata=None)`
|
| 21 |
- **Description**: Create a new prompt.
|
|
|
|
| 66 |
The `VersionControl` class manages prompt versions.
|
| 67 |
|
| 68 |
```python
|
| 69 |
+
from llmpromptkit import VersionControl
|
| 70 |
```
|
| 71 |
|
| 72 |
### Methods
|
|
|
|
| 117 |
The `PromptTesting` class provides testing capabilities.
|
| 118 |
|
| 119 |
```python
|
| 120 |
+
from llmpromptkit import PromptTesting
|
| 121 |
```
|
| 122 |
|
| 123 |
### Methods
|
|
|
|
| 166 |
The `Evaluator` class handles prompt evaluation.
|
| 167 |
|
| 168 |
```python
|
| 169 |
+
from llmpromptkit import Evaluator
|
| 170 |
```
|
| 171 |
|
| 172 |
### Methods
|
|
|
|
| 196 |
The `PromptTemplate` class provides advanced templating.
|
| 197 |
|
| 198 |
```python
|
| 199 |
+
from llmpromptkit import PromptTemplate
|
| 200 |
```
|
| 201 |
|
| 202 |
### Methods
|
|
|
|
| 217 |
The `EvaluationMetric` is the base class for evaluation metrics.
|
| 218 |
|
| 219 |
```python
|
| 220 |
+
from llmpromptkit import EvaluationMetric
|
| 221 |
```
|
| 222 |
|
| 223 |
### Methods
|
|
|
|
| 243 |
- `LengthMetric`: Scores based on output length.
|
| 244 |
|
| 245 |
```python
|
| 246 |
+
from llmpromptkit import ExactMatchMetric, ContainsKeywordsMetric, LengthMetric
|
| 247 |
```
|
docs/cli_usage.md
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
# CLI Usage
|
| 2 |
|
| 3 |
-
|
| 4 |
|
| 5 |
## Basic Commands
|
| 6 |
|
|
@@ -8,78 +8,78 @@ PromptLab provides a command-line interface (CLI) for managing prompts, versions
|
|
| 8 |
|
| 9 |
```bash
|
| 10 |
# Create a prompt
|
| 11 |
-
|
| 12 |
|
| 13 |
# List all prompts
|
| 14 |
-
|
| 15 |
|
| 16 |
# Get prompt details
|
| 17 |
-
|
| 18 |
|
| 19 |
# Update a prompt
|
| 20 |
-
|
| 21 |
|
| 22 |
# Delete a prompt
|
| 23 |
-
|
| 24 |
```
|
| 25 |
|
| 26 |
### Version Control
|
| 27 |
|
| 28 |
```bash
|
| 29 |
# Commit a version
|
| 30 |
-
|
| 31 |
|
| 32 |
# List versions
|
| 33 |
-
|
| 34 |
|
| 35 |
# Check out (revert to) a specific version
|
| 36 |
-
|
| 37 |
|
| 38 |
# Compare versions
|
| 39 |
-
|
| 40 |
```
|
| 41 |
|
| 42 |
### Testing
|
| 43 |
|
| 44 |
```bash
|
| 45 |
# Create a test case
|
| 46 |
-
|
| 47 |
|
| 48 |
# List test cases
|
| 49 |
-
|
| 50 |
|
| 51 |
# Run a specific test case
|
| 52 |
-
|
| 53 |
|
| 54 |
# Run all test cases for a prompt
|
| 55 |
-
|
| 56 |
|
| 57 |
# Run an A/B test between two prompts
|
| 58 |
-
|
| 59 |
```
|
| 60 |
|
| 61 |
### Evaluation
|
| 62 |
|
| 63 |
```bash
|
| 64 |
# Evaluate a prompt
|
| 65 |
-
|
| 66 |
|
| 67 |
# List available metrics
|
| 68 |
-
|
| 69 |
|
| 70 |
# Register a custom metric
|
| 71 |
-
|
| 72 |
```
|
| 73 |
|
| 74 |
## Environment Configuration
|
| 75 |
|
| 76 |
The CLI supports environment variables for configuration:
|
| 77 |
|
| 78 |
-
- `
|
| 79 |
-
- `
|
| 80 |
-
- `
|
| 81 |
|
| 82 |
-
You can also create a config file at `~/.
|
| 83 |
|
| 84 |
```json
|
| 85 |
{
|
|
@@ -97,22 +97,22 @@ You can also create a config file at `~/.promptlab/config.json`:
|
|
| 97 |
|
| 98 |
```bash
|
| 99 |
# Specify a storage location for a command
|
| 100 |
-
|
| 101 |
|
| 102 |
# Export a prompt to another storage
|
| 103 |
-
|
| 104 |
|
| 105 |
# Import a prompt from a file
|
| 106 |
-
|
| 107 |
```
|
| 108 |
|
| 109 |
### Automation and Scripting
|
| 110 |
|
| 111 |
```bash
|
| 112 |
# Get output in JSON format
|
| 113 |
-
|
| 114 |
|
| 115 |
# Use in shell scripts
|
| 116 |
-
PROMPT_ID=$(
|
| 117 |
echo "Created prompt with ID: $PROMPT_ID"
|
| 118 |
```
|
|
|
|
| 1 |
# CLI Usage
|
| 2 |
|
| 3 |
+
LLMPromptKit provides a command-line interface (CLI) for managing prompts, versions, tests, and evaluations.
|
| 4 |
|
| 5 |
## Basic Commands
|
| 6 |
|
|
|
|
| 8 |
|
| 9 |
```bash
|
| 10 |
# Create a prompt
|
| 11 |
+
llmpromptkit prompt create "Weather Forecast" --content "Provide a weather forecast for {location} on {date}" --tags "weather,forecast"
|
| 12 |
|
| 13 |
# List all prompts
|
| 14 |
+
llmpromptkit prompt list
|
| 15 |
|
| 16 |
# Get prompt details
|
| 17 |
+
llmpromptkit prompt get <prompt_id>
|
| 18 |
|
| 19 |
# Update a prompt
|
| 20 |
+
llmpromptkit prompt update <prompt_id> --content "New content" --tags "new,tags"
|
| 21 |
|
| 22 |
# Delete a prompt
|
| 23 |
+
llmpromptkit prompt delete <prompt_id>
|
| 24 |
```
|
| 25 |
|
| 26 |
### Version Control
|
| 27 |
|
| 28 |
```bash
|
| 29 |
# Commit a version
|
| 30 |
+
llmpromptkit version commit <prompt_id> --message "Version description"
|
| 31 |
|
| 32 |
# List versions
|
| 33 |
+
llmpromptkit version list <prompt_id>
|
| 34 |
|
| 35 |
# Check out (revert to) a specific version
|
| 36 |
+
llmpromptkit version checkout <prompt_id> <version_number>
|
| 37 |
|
| 38 |
# Compare versions
|
| 39 |
+
llmpromptkit version diff <prompt_id> <version1> <version2>
|
| 40 |
```
|
| 41 |
|
| 42 |
### Testing
|
| 43 |
|
| 44 |
```bash
|
| 45 |
# Create a test case
|
| 46 |
+
llmpromptkit test create <prompt_id> --input '{"location": "New York", "date": "tomorrow"}' --expected "Expected output"
|
| 47 |
|
| 48 |
# List test cases
|
| 49 |
+
llmpromptkit test list <prompt_id>
|
| 50 |
|
| 51 |
# Run a specific test case
|
| 52 |
+
llmpromptkit test run <test_case_id> --llm openai
|
| 53 |
|
| 54 |
# Run all test cases for a prompt
|
| 55 |
+
llmpromptkit test run-all <prompt_id> --llm openai
|
| 56 |
|
| 57 |
# Run an A/B test between two prompts
|
| 58 |
+
llmpromptkit test ab <prompt_id_a> <prompt_id_b> --inputs '[{"var": "value1"}, {"var": "value2"}]' --llm openai
|
| 59 |
```
|
| 60 |
|
| 61 |
### Evaluation
|
| 62 |
|
| 63 |
```bash
|
| 64 |
# Evaluate a prompt
|
| 65 |
+
llmpromptkit eval run <prompt_id> --inputs '[{"var": "value1"}, {"var": "value2"}]' --llm openai
|
| 66 |
|
| 67 |
# List available metrics
|
| 68 |
+
llmpromptkit eval metrics
|
| 69 |
|
| 70 |
# Register a custom metric
|
| 71 |
+
llmpromptkit eval register-metric <metric_file.py>
|
| 72 |
```
|
| 73 |
|
| 74 |
## Environment Configuration
|
| 75 |
|
| 76 |
The CLI supports environment variables for configuration:
|
| 77 |
|
| 78 |
+
- `LLMPROMPTKIT_STORAGE`: Path to store prompts and related data
|
| 79 |
+
- `LLMPROMPTKIT_OPENAI_API_KEY`: OpenAI API key for built-in LLM support
|
| 80 |
+
- `LLMPROMPTKIT_DEFAULT_LLM`: Default LLM to use for testing and evaluation
|
| 81 |
|
| 82 |
+
You can also create a config file at `~/.llmpromptkit/config.json`:
|
| 83 |
|
| 84 |
```json
|
| 85 |
{
|
|
|
|
| 97 |
|
| 98 |
```bash
|
| 99 |
# Specify a storage location for a command
|
| 100 |
+
llmpromptkit --storage /path/to/storage prompt list
|
| 101 |
|
| 102 |
# Export a prompt to another storage
|
| 103 |
+
llmpromptkit prompt export <prompt_id> --output /path/to/output.json
|
| 104 |
|
| 105 |
# Import a prompt from a file
|
| 106 |
+
llmpromptkit prompt import /path/to/prompt.json
|
| 107 |
```
|
| 108 |
|
| 109 |
### Automation and Scripting
|
| 110 |
|
| 111 |
```bash
|
| 112 |
# Get output in JSON format
|
| 113 |
+
llmpromptkit --json prompt list
|
| 114 |
|
| 115 |
# Use in shell scripts
|
| 116 |
+
PROMPT_ID=$(llmpromptkit --json prompt create "Script Prompt" --content "Content" | jq -r '.id')
|
| 117 |
echo "Created prompt with ID: $PROMPT_ID"
|
| 118 |
```
|
docs/getting_started.md
CHANGED
|
@@ -1,11 +1,11 @@
|
|
| 1 |
-
# Getting Started with
|
| 2 |
|
| 3 |
-
This guide will help you get started with
|
| 4 |
|
| 5 |
## Installation
|
| 6 |
|
| 7 |
```bash
|
| 8 |
-
pip install
|
| 9 |
```
|
| 10 |
|
| 11 |
## Basic Usage
|
|
@@ -13,7 +13,7 @@ pip install promptlab
|
|
| 13 |
### Initialize Components
|
| 14 |
|
| 15 |
```python
|
| 16 |
-
from
|
| 17 |
|
| 18 |
# Initialize with default storage location
|
| 19 |
prompt_manager = PromptManager()
|
|
|
|
| 1 |
+
# Getting Started with LLMPromptKit
|
| 2 |
|
| 3 |
+
This guide will help you get started with LLMPromptKit, a comprehensive library for managing LLM prompts.
|
| 4 |
|
| 5 |
## Installation
|
| 6 |
|
| 7 |
```bash
|
| 8 |
+
pip install llmpromptkit
|
| 9 |
```
|
| 10 |
|
| 11 |
## Basic Usage
|
|
|
|
| 13 |
### Initialize Components
|
| 14 |
|
| 15 |
```python
|
| 16 |
+
from llmpromptkit import PromptManager, VersionControl, PromptTesting, Evaluator
|
| 17 |
|
| 18 |
# Initialize with default storage location
|
| 19 |
prompt_manager = PromptManager()
|
docs/integration_examples.md
CHANGED
|
@@ -1,13 +1,13 @@
|
|
| 1 |
# Integration Examples
|
| 2 |
|
| 3 |
-
This document provides concrete examples of integrating
|
| 4 |
|
| 5 |
## Customer Support Chatbot
|
| 6 |
|
| 7 |
### Setup
|
| 8 |
|
| 9 |
```python
|
| 10 |
-
from
|
| 11 |
import openai
|
| 12 |
|
| 13 |
# Initialize components
|
|
@@ -71,7 +71,7 @@ def handle_customer_message(customer_name, message, is_new_conversation):
|
|
| 71 |
### Setup
|
| 72 |
|
| 73 |
```python
|
| 74 |
-
from
|
| 75 |
import asyncio
|
| 76 |
|
| 77 |
# Initialize components
|
|
@@ -155,7 +155,7 @@ async def generate_content(content_type, parameters):
|
|
| 155 |
### Setup
|
| 156 |
|
| 157 |
```python
|
| 158 |
-
from
|
| 159 |
import json
|
| 160 |
import openai
|
| 161 |
|
|
@@ -261,7 +261,7 @@ def save_research_data(research_project, data_type, content):
|
|
| 261 |
### Setup
|
| 262 |
|
| 263 |
```python
|
| 264 |
-
from
|
| 265 |
import asyncio
|
| 266 |
import aiohttp
|
| 267 |
|
|
@@ -409,7 +409,7 @@ async def generate_quiz(topic, difficulty, num_questions, grade_level, include_e
|
|
| 409 |
### Setup
|
| 410 |
|
| 411 |
```python
|
| 412 |
-
from
|
| 413 |
import asyncio
|
| 414 |
import subprocess
|
| 415 |
import tempfile
|
|
|
|
| 1 |
# Integration Examples
|
| 2 |
|
| 3 |
+
This document provides concrete examples of integrating LLMPromptKit into various applications and workflows.
|
| 4 |
|
| 5 |
## Customer Support Chatbot
|
| 6 |
|
| 7 |
### Setup
|
| 8 |
|
| 9 |
```python
|
| 10 |
+
from llmpromptkit import PromptManager, VersionControl
|
| 11 |
import openai
|
| 12 |
|
| 13 |
# Initialize components
|
|
|
|
| 71 |
### Setup
|
| 72 |
|
| 73 |
```python
|
| 74 |
+
from llmpromptkit import PromptManager, PromptTesting, Evaluator
|
| 75 |
import asyncio
|
| 76 |
|
| 77 |
# Initialize components
|
|
|
|
| 155 |
### Setup
|
| 156 |
|
| 157 |
```python
|
| 158 |
+
from llmpromptkit import PromptManager, VersionControl
|
| 159 |
import json
|
| 160 |
import openai
|
| 161 |
|
|
|
|
| 261 |
### Setup
|
| 262 |
|
| 263 |
```python
|
| 264 |
+
from llmpromptkit import PromptManager, PromptTemplate
|
| 265 |
import asyncio
|
| 266 |
import aiohttp
|
| 267 |
|
|
|
|
| 409 |
### Setup
|
| 410 |
|
| 411 |
```python
|
| 412 |
+
from llmpromptkit import PromptManager, PromptTesting
|
| 413 |
import asyncio
|
| 414 |
import subprocess
|
| 415 |
import tempfile
|
hello.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
print("Hello World test!")
|
{promptlab β llmpromptkit}/__init__.py
RENAMED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
"""
|
| 2 |
-
|
| 3 |
|
| 4 |
-
|
| 5 |
testing, and evaluating prompts for Large Language Models.
|
| 6 |
|
| 7 |
Features:
|
|
|
|
| 1 |
"""
|
| 2 |
+
LLMPromptKit - A comprehensive LLM Prompt Management System
|
| 3 |
|
| 4 |
+
LLMPromptKit is a Python library that provides tools for managing, versioning,
|
| 5 |
testing, and evaluating prompts for Large Language Models.
|
| 6 |
|
| 7 |
Features:
|
{promptlab β llmpromptkit}/cli/__init__.py
RENAMED
|
File without changes
|
{promptlab β llmpromptkit}/cli/commands.py
RENAMED
|
@@ -12,14 +12,14 @@ from ..core.evaluation import Evaluator, ContainsKeywordsMetric, LengthMetric
|
|
| 12 |
|
| 13 |
|
| 14 |
class CLI:
|
| 15 |
-
"""Command-line interface for
|
| 16 |
def __init__(self):
|
| 17 |
self.prompt_manager = PromptManager()
|
| 18 |
self.version_control = VersionControl(self.prompt_manager)
|
| 19 |
self.testing = PromptTesting(self.prompt_manager)
|
| 20 |
self.evaluator = Evaluator(self.prompt_manager)
|
| 21 |
|
| 22 |
-
self.parser = argparse.ArgumentParser(description="
|
| 23 |
self._setup_commands()
|
| 24 |
|
| 25 |
def _setup_commands(self) -> None:
|
|
|
|
| 12 |
|
| 13 |
|
| 14 |
class CLI:
|
| 15 |
+
"""Command-line interface for LLMPromptKit."""
|
| 16 |
def __init__(self):
|
| 17 |
self.prompt_manager = PromptManager()
|
| 18 |
self.version_control = VersionControl(self.prompt_manager)
|
| 19 |
self.testing = PromptTesting(self.prompt_manager)
|
| 20 |
self.evaluator = Evaluator(self.prompt_manager)
|
| 21 |
|
| 22 |
+
self.parser = argparse.ArgumentParser(description="LLMPromptKit - LLM Prompt Management System")
|
| 23 |
self._setup_commands()
|
| 24 |
|
| 25 |
def _setup_commands(self) -> None:
|
{promptlab β llmpromptkit}/core/__init__.py
RENAMED
|
File without changes
|
{promptlab β llmpromptkit}/core/evaluation.py
RENAMED
|
File without changes
|
{promptlab β llmpromptkit}/core/prompt_manager.py
RENAMED
|
@@ -80,7 +80,7 @@ class Prompt:
|
|
| 80 |
|
| 81 |
class PromptManager:
|
| 82 |
def __init__(self, storage_path: Optional[str] = None):
|
| 83 |
-
self.storage_path = storage_path or os.path.join(os.getcwd(), "
|
| 84 |
self.prompts: Dict[str, Prompt] = {}
|
| 85 |
self._ensure_storage_dir()
|
| 86 |
self._load_prompts()
|
|
|
|
| 80 |
|
| 81 |
class PromptManager:
|
| 82 |
def __init__(self, storage_path: Optional[str] = None):
|
| 83 |
+
self.storage_path = storage_path or os.path.join(os.getcwd(), "llmpromptkit_storage")
|
| 84 |
self.prompts: Dict[str, Prompt] = {}
|
| 85 |
self._ensure_storage_dir()
|
| 86 |
self._load_prompts()
|
{promptlab β llmpromptkit}/core/testing.py
RENAMED
|
File without changes
|
{promptlab β llmpromptkit}/core/version_control.py
RENAMED
|
File without changes
|
{promptlab β llmpromptkit}/examples/__init__.py
RENAMED
|
File without changes
|
{promptlab β llmpromptkit}/examples/ab_testing.py
RENAMED
|
@@ -1,13 +1,13 @@
|
|
| 1 |
"""
|
| 2 |
-
A/B testing example for
|
| 3 |
|
| 4 |
-
This example demonstrates how to use
|
| 5 |
on different prompt variations to find the most effective one.
|
| 6 |
"""
|
| 7 |
|
| 8 |
import asyncio
|
| 9 |
import os
|
| 10 |
-
from
|
| 11 |
|
| 12 |
async def llm_callback(prompt, vars):
|
| 13 |
"""
|
|
@@ -25,7 +25,7 @@ async def llm_callback(prompt, vars):
|
|
| 25 |
|
| 26 |
async def main():
|
| 27 |
# Initialize the prompt manager with a custom storage path
|
| 28 |
-
storage_path = os.path.join(os.getcwd(), "
|
| 29 |
prompt_manager = PromptManager(storage_path)
|
| 30 |
|
| 31 |
# Initialize testing
|
|
|
|
| 1 |
"""
|
| 2 |
+
A/B testing example for LLMPromptKit.
|
| 3 |
|
| 4 |
+
This example demonstrates how to use LLMPromptKit to perform A/B testing
|
| 5 |
on different prompt variations to find the most effective one.
|
| 6 |
"""
|
| 7 |
|
| 8 |
import asyncio
|
| 9 |
import os
|
| 10 |
+
from llmpromptkit import PromptManager, PromptTesting
|
| 11 |
|
| 12 |
async def llm_callback(prompt, vars):
|
| 13 |
"""
|
|
|
|
| 25 |
|
| 26 |
async def main():
|
| 27 |
# Initialize the prompt manager with a custom storage path
|
| 28 |
+
storage_path = os.path.join(os.getcwd(), "llmpromptkit_storage")
|
| 29 |
prompt_manager = PromptManager(storage_path)
|
| 30 |
|
| 31 |
# Initialize testing
|
{promptlab β llmpromptkit}/examples/basic_usage.py
RENAMED
|
@@ -1,18 +1,18 @@
|
|
| 1 |
|
| 2 |
"""
|
| 3 |
-
Basic usage example for
|
| 4 |
|
| 5 |
-
This example demonstrates the fundamental features of
|
| 6 |
including creating prompts, versioning, and rendering.
|
| 7 |
"""
|
| 8 |
|
| 9 |
import asyncio
|
| 10 |
import os
|
| 11 |
-
from
|
| 12 |
|
| 13 |
async def main():
|
| 14 |
# Initialize the prompt manager with a custom storage path
|
| 15 |
-
storage_path = os.path.join(os.getcwd(), "
|
| 16 |
prompt_manager = PromptManager(storage_path)
|
| 17 |
|
| 18 |
# Initialize version control
|
|
|
|
| 1 |
|
| 2 |
"""
|
| 3 |
+
Basic usage example for LLMPromptKit.
|
| 4 |
|
| 5 |
+
This example demonstrates the fundamental features of LLMPromptKit
|
| 6 |
including creating prompts, versioning, and rendering.
|
| 7 |
"""
|
| 8 |
|
| 9 |
import asyncio
|
| 10 |
import os
|
| 11 |
+
from llmpromptkit import PromptManager, VersionControl
|
| 12 |
|
| 13 |
async def main():
|
| 14 |
# Initialize the prompt manager with a custom storage path
|
| 15 |
+
storage_path = os.path.join(os.getcwd(), "llmpromptkit_storage")
|
| 16 |
prompt_manager = PromptManager(storage_path)
|
| 17 |
|
| 18 |
# Initialize version control
|
{promptlab β llmpromptkit}/examples/evaluation_example.py
RENAMED
|
@@ -1,13 +1,13 @@
|
|
| 1 |
"""
|
| 2 |
-
Evaluation example for
|
| 3 |
|
| 4 |
-
This example demonstrates how to use
|
| 5 |
to measure the quality of prompts using various metrics.
|
| 6 |
"""
|
| 7 |
|
| 8 |
import asyncio
|
| 9 |
import os
|
| 10 |
-
from
|
| 11 |
|
| 12 |
async def llm_callback(prompt, vars):
|
| 13 |
"""
|
|
@@ -29,7 +29,7 @@ async def llm_callback(prompt, vars):
|
|
| 29 |
|
| 30 |
async def main():
|
| 31 |
# Initialize the prompt manager with a custom storage path
|
| 32 |
-
storage_path = os.path.join(os.getcwd(), "
|
| 33 |
prompt_manager = PromptManager(storage_path)
|
| 34 |
|
| 35 |
# Initialize evaluator
|
|
|
|
| 1 |
"""
|
| 2 |
+
Evaluation example for LLMPromptKit.
|
| 3 |
|
| 4 |
+
This example demonstrates how to use LLMPromptKit's evaluation framework
|
| 5 |
to measure the quality of prompts using various metrics.
|
| 6 |
"""
|
| 7 |
|
| 8 |
import asyncio
|
| 9 |
import os
|
| 10 |
+
from llmpromptkit import PromptManager, Evaluator, ContainsKeywordsMetric, LengthMetric
|
| 11 |
|
| 12 |
async def llm_callback(prompt, vars):
|
| 13 |
"""
|
|
|
|
| 29 |
|
| 30 |
async def main():
|
| 31 |
# Initialize the prompt manager with a custom storage path
|
| 32 |
+
storage_path = os.path.join(os.getcwd(), "llmpromptkit_storage")
|
| 33 |
prompt_manager = PromptManager(storage_path)
|
| 34 |
|
| 35 |
# Initialize evaluator
|
{promptlab β llmpromptkit}/tests/__init__.py
RENAMED
|
File without changes
|
{promptlab β llmpromptkit}/tests/test_evaluation.py
RENAMED
|
File without changes
|
{promptlab β llmpromptkit}/tests/test_prompt_manager.py
RENAMED
|
@@ -2,7 +2,7 @@ import unittest
|
|
| 2 |
import os
|
| 3 |
import shutil
|
| 4 |
import tempfile
|
| 5 |
-
from
|
| 6 |
|
| 7 |
class TestPromptManager(unittest.TestCase):
|
| 8 |
def setUp(self):
|
|
|
|
| 2 |
import os
|
| 3 |
import shutil
|
| 4 |
import tempfile
|
| 5 |
+
from llmpromptkit.core.prompt_manager import PromptManager, Prompt
|
| 6 |
|
| 7 |
class TestPromptManager(unittest.TestCase):
|
| 8 |
def setUp(self):
|
{promptlab β llmpromptkit}/tests/test_testing.py
RENAMED
|
File without changes
|
{promptlab β llmpromptkit}/tests/test_version_control.py
RENAMED
|
File without changes
|
{promptlab β llmpromptkit}/utils/__init__.py
RENAMED
|
File without changes
|
{promptlab β llmpromptkit}/utils/metrics.py
RENAMED
|
File without changes
|
{promptlab β llmpromptkit}/utils/storage.py
RENAMED
|
@@ -4,7 +4,7 @@ import shutil
|
|
| 4 |
from typing import Dict, Any, Optional, List
|
| 5 |
|
| 6 |
class Storage:
|
| 7 |
-
"""Handles persistent storage for
|
| 8 |
def __init__(self, base_path: str):
|
| 9 |
self.base_path = base_path
|
| 10 |
os.makedirs(base_path, exist_ok=True)
|
|
|
|
| 4 |
from typing import Dict, Any, Optional, List
|
| 5 |
|
| 6 |
class Storage:
|
| 7 |
+
"""Handles persistent storage for LLMPromptKit."""
|
| 8 |
def __init__(self, base_path: str):
|
| 9 |
self.base_path = base_path
|
| 10 |
os.makedirs(base_path, exist_ok=True)
|
{promptlab β llmpromptkit}/utils/templating.py
RENAMED
|
File without changes
|
llmpromptkit_storage/prompts/0a6614b65f.json
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "0a6614b65f",
|
| 3 |
+
"name": "Weather Forecast",
|
| 4 |
+
"content": "Provide a detailed weather forecast for {location} on {date}. Include temperature, precipitation chances, and wind information.",
|
| 5 |
+
"description": "A comprehensive weather forecast prompt",
|
| 6 |
+
"tags": [
|
| 7 |
+
"weather",
|
| 8 |
+
"forecast",
|
| 9 |
+
"testing"
|
| 10 |
+
],
|
| 11 |
+
"metadata": {},
|
| 12 |
+
"created_at": "2025-05-21T10:55:45.716023",
|
| 13 |
+
"updated_at": "2025-05-21T11:06:17.983763",
|
| 14 |
+
"version": 1
|
| 15 |
+
}
|
llmpromptkit_storage/prompts/4e9d34fb56.json
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "4e9d34fb56",
|
| 3 |
+
"name": "Test Prompt",
|
| 4 |
+
"content": "This is a test prompt for {parameter}",
|
| 5 |
+
"description": "",
|
| 6 |
+
"tags": [
|
| 7 |
+
"test",
|
| 8 |
+
"cli",
|
| 9 |
+
"llmpromptkit"
|
| 10 |
+
],
|
| 11 |
+
"metadata": {},
|
| 12 |
+
"created_at": "2025-05-21T10:19:04.907396",
|
| 13 |
+
"updated_at": "2025-05-21T10:19:04.907396",
|
| 14 |
+
"version": 1
|
| 15 |
+
}
|
llmpromptkit_storage/tests/test_cases/6c30b375-7.json
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "6c30b375-7",
|
| 3 |
+
"prompt_id": "0a6614b65f",
|
| 4 |
+
"input_vars": {
|
| 5 |
+
"location": "New York",
|
| 6 |
+
"date": "tomorrow"
|
| 7 |
+
},
|
| 8 |
+
"expected_output": "Weather forecast for New York tomorrow should include temperature, precipitation, wind, UV index, air quality, and alerts.",
|
| 9 |
+
"name": "Test case 6c30b375-7",
|
| 10 |
+
"description": "",
|
| 11 |
+
"created_at": "2025-05-21T11:01:26.603328"
|
| 12 |
+
}
|
llmpromptkit_storage/versions/0a6614b65f/v1.json
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"prompt_id": "0a6614b65f",
|
| 3 |
+
"version": 1,
|
| 4 |
+
"content": "Provide a detailed weather forecast for {location} on {date}. Include temperature, precipitation chances, and wind information.",
|
| 5 |
+
"metadata": {},
|
| 6 |
+
"commit_message": "Initial detailed weather forecast version",
|
| 7 |
+
"created_at": "2025-05-21T10:56:43.548028"
|
| 8 |
+
}
|
llmpromptkit_storage/versions/0a6614b65f/v2.json
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"prompt_id": "0a6614b65f",
|
| 3 |
+
"version": 2,
|
| 4 |
+
"content": "Provide a comprehensive weather forecast for {location} on {date}. Please include the following information:\\n- High and low temperatures (in both Celsius and Fahrenheit)\\n- Precipitation chances and amount\\n- Wind speed and direction\\n- UV index\\n- Air quality\\n- Any weather warnings or alerts",
|
| 5 |
+
"metadata": {},
|
| 6 |
+
"commit_message": "Enhanced with more detailed weather parameters",
|
| 7 |
+
"created_at": "2025-05-21T11:00:51.632374"
|
| 8 |
+
}
|
pyproject.toml
CHANGED
|
@@ -3,7 +3,7 @@ requires = ["setuptools>=42", "wheel"]
|
|
| 3 |
build-backend = "setuptools.build_meta"
|
| 4 |
|
| 5 |
[project]
|
| 6 |
-
name = "
|
| 7 |
version = "0.1.0"
|
| 8 |
description = "A comprehensive LLM Prompt Management System"
|
| 9 |
readme = "README.md"
|
|
@@ -33,13 +33,13 @@ dependencies = [
|
|
| 33 |
]
|
| 34 |
|
| 35 |
[project.urls]
|
| 36 |
-
"Homepage" = "https://github.com/biswanathroul/
|
| 37 |
-
"Bug Tracker" = "https://github.com/biswanathroul/
|
| 38 |
-
"Documentation" = "https://github.com/biswanathroul/
|
| 39 |
-
"Source Code" = "https://github.com/biswanathroul/
|
| 40 |
|
| 41 |
[project.scripts]
|
| 42 |
-
|
| 43 |
|
| 44 |
[tool.setuptools]
|
| 45 |
-
packages = ["
|
|
|
|
| 3 |
build-backend = "setuptools.build_meta"
|
| 4 |
|
| 5 |
[project]
|
| 6 |
+
name = "llmpromptkit"
|
| 7 |
version = "0.1.0"
|
| 8 |
description = "A comprehensive LLM Prompt Management System"
|
| 9 |
readme = "README.md"
|
|
|
|
| 33 |
]
|
| 34 |
|
| 35 |
[project.urls]
|
| 36 |
+
"Homepage" = "https://github.com/biswanathroul/llmpromptkit"
|
| 37 |
+
"Bug Tracker" = "https://github.com/biswanathroul/llmpromptkit/issues"
|
| 38 |
+
"Documentation" = "https://github.com/biswanathroul/llmpromptkit/wiki"
|
| 39 |
+
"Source Code" = "https://github.com/biswanathroul/llmpromptkit"
|
| 40 |
|
| 41 |
[project.scripts]
|
| 42 |
+
llmpromptkit = "llmpromptkit.cli.commands:main"
|
| 43 |
|
| 44 |
[tool.setuptools]
|
| 45 |
+
packages = ["llmpromptkit", "llmpromptkit.core", "llmpromptkit.cli", "llmpromptkit.utils"]
|
simple_test.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
A simple test script for the llmpromptkit library.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from llmpromptkit import PromptManager
|
| 7 |
+
|
| 8 |
+
# Initialize the prompt manager
|
| 9 |
+
prompt_manager = PromptManager()
|
| 10 |
+
|
| 11 |
+
# List all prompts
|
| 12 |
+
prompts = prompt_manager.list()
|
| 13 |
+
print(f"Found {len(prompts)} prompts:")
|
| 14 |
+
for prompt in prompts:
|
| 15 |
+
print(f"ID: {prompt.id} | Name: {prompt.name} | Tags: {', '.join(prompt.tags)}")
|
test_script.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
A test script to demonstrate using the llmpromptkit library programmatically.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
import asyncio
|
| 8 |
+
from llmpromptkit import PromptManager, VersionControl, PromptTesting, Evaluator
|
| 9 |
+
|
| 10 |
+
async def main():
|
| 11 |
+
# Initialize components with a custom storage path
|
| 12 |
+
storage_path = os.path.join(os.getcwd(), "llmpromptkit_storage")
|
| 13 |
+
prompt_manager = PromptManager(storage_path)
|
| 14 |
+
version_control = VersionControl(prompt_manager)
|
| 15 |
+
testing = PromptTesting(prompt_manager)
|
| 16 |
+
evaluator = Evaluator(prompt_manager)
|
| 17 |
+
|
| 18 |
+
# Print all existing prompts
|
| 19 |
+
print("===== Existing Prompts =====")
|
| 20 |
+
prompts = prompt_manager.list()
|
| 21 |
+
for prompt in prompts:
|
| 22 |
+
print(f"ID: {prompt.id} | Name: {prompt.name} | Tags: {', '.join(prompt.tags)}")
|
| 23 |
+
|
| 24 |
+
# Create a new prompt
|
| 25 |
+
print("\n===== Creating a New Prompt =====")
|
| 26 |
+
translation_prompt = prompt_manager.create(
|
| 27 |
+
content="Translate the following text from {source_language} to {target_language}: {text}",
|
| 28 |
+
name="Translation Prompt",
|
| 29 |
+
description="A prompt for translating text between languages",
|
| 30 |
+
tags=["translation", "multilingual", "test"]
|
| 31 |
+
)
|
| 32 |
+
print(f"Created prompt with ID: {translation_prompt.id}")
|
| 33 |
+
|
| 34 |
+
# Render the prompt with variables
|
| 35 |
+
print("\n===== Rendering the Prompt =====")
|
| 36 |
+
rendered = translation_prompt.render(
|
| 37 |
+
source_language="English",
|
| 38 |
+
target_language="French",
|
| 39 |
+
text="Hello, how are you today?"
|
| 40 |
+
)
|
| 41 |
+
print(f"Rendered prompt: {rendered}")
|
| 42 |
+
|
| 43 |
+
# Create a version
|
| 44 |
+
print("\n===== Creating a Version =====")
|
| 45 |
+
version = version_control.commit(
|
| 46 |
+
prompt_id=translation_prompt.id,
|
| 47 |
+
commit_message="Initial translation prompt"
|
| 48 |
+
)
|
| 49 |
+
print(f"Created version {version.version} for prompt {translation_prompt.id}")
|
| 50 |
+
|
| 51 |
+
# List all versions for this prompt
|
| 52 |
+
print("\n===== Listing Versions =====")
|
| 53 |
+
versions = version_control.list_versions(translation_prompt.id)
|
| 54 |
+
print(f"Found {len(versions)} versions for prompt {translation_prompt.id}:")
|
| 55 |
+
for v in versions:
|
| 56 |
+
print(f"Version: {v.version} | Created: {v.created_at} | Message: {v.commit_message}")
|
| 57 |
+
|
| 58 |
+
# Create a test case
|
| 59 |
+
print("\n===== Creating a Test Case =====")
|
| 60 |
+
test_case = testing.create_test_case(
|
| 61 |
+
prompt_id=translation_prompt.id,
|
| 62 |
+
input_vars={
|
| 63 |
+
"source_language": "English",
|
| 64 |
+
"target_language": "French",
|
| 65 |
+
"text": "Hello, world!"
|
| 66 |
+
},
|
| 67 |
+
expected_output="Bonjour, monde!"
|
| 68 |
+
)
|
| 69 |
+
print(f"Created test case with ID: {test_case.id}")
|
| 70 |
+
|
| 71 |
+
# List all test cases
|
| 72 |
+
print("\n===== Listing Test Cases =====")
|
| 73 |
+
test_cases = testing.list_test_cases()
|
| 74 |
+
print(f"Found {len(test_cases)} test cases:")
|
| 75 |
+
for tc in test_cases:
|
| 76 |
+
print(f"ID: {tc.id} | Prompt ID: {tc.prompt_id}")
|
| 77 |
+
|
| 78 |
+
print("\n===== Test Complete =====")
|
| 79 |
+
|
| 80 |
+
if __name__ == "__main__":
|
| 81 |
+
asyncio.run(main())
|