async def run_warm_start_task():
print("="*60)
print("🔥 WARM START: Reusing beforehand advanced expertise")
print("="*60)
activity = (
"Create a Python script that analyzes a CSV file containing "
"stock information with columns: date, merchandise, amount, price. "
"The script ought to compute month-to-month expenditures, determine the highest "
"5 most bought objects, and output a formatted abstract report."
)
print(f"n📝 Activity: {activity[:100]}...")
print(" (Just like chilly begin activity — expertise must be reused)n")
start_time = time.time()
strive:
from openspace import OpenSpace
async with OpenSpace() as cs:
consequence = await cs.execute(activity)
elapsed = time.time() - start_time
print(f"n⏱️ Execution time: {elapsed:.1f}s")
response_text = consequence.get("response", str(consequence))
print(f"n📄 Response (first 500 chars):")
print("-" * 40)
print(response_text[:500])
advanced = consequence.get("evolved_skills", [])
reused = consequence.get("reused_skills", [])
if reused:
print(f"n♻️ Expertise Reused: {len(reused)}")
for ability in reused:
print(f" • {ability.get('identify', 'unnamed')}")
if advanced:
print(f"n🧬 New Expertise Advanced: {len(advanced)}")
for ability in advanced:
print(f" • {ability.get('identify', 'unnamed')} ({ability.get('origin', '')})")
return consequence
besides Exception as e:
print(f"n⚠️ Execution error: {kind(e).__name__}: {e}")
print("We'll simulate the comparability beneath.")
return None
warm_start_result = await run_warm_start_task()
async def demo_skill_search():
print("="*60)
print("🔎 SKILL SEARCH & DISCOVERY")
print("="*60)
strive:
from openspace import OpenSpace
async with OpenSpace() as cs:
queries = [
"CSV data analysis with pandas",
"PDF report generation",
"web scraping with error handling",
]
for question in queries:
print(f"n🔍 Question: '{question}'")
if hasattr(cs, 'skill_engine') and cs.skill_engine:
outcomes = await cs.skill_engine.search(question)
if outcomes:
for r in outcomes[:3]:
print(f" 📋 {r.get('identify', 'unnamed')} "
f"(rating: {r.get('rating', 'N/A')})")
else:
print(" (no matching expertise discovered)")
else:
print(" (ability engine not initialized — "
"expertise accumulate after activity executions)")
besides Exception as e:
print(f"n⚠️ Search demo: {e}")
print("n💡 Ability search turns into accessible after expertise are advanced.")
print(" In manufacturing, run a number of duties first to construct up the ability database.")
await demo_skill_search()
def create_custom_skill(skill_name, description, directions, triggers):
skill_dir = SKILLS_DIR / skill_name
skill_dir.mkdir(dad and mom=True, exist_ok=True)
skill_md = f"""---
identify: {skill_name}
description: {description}
model: 1.0.0
origin: handbook
triggers: {json.dumps(triggers)}
---
# {skill_name}
{description}
## Directions
{directions}
## High quality Metrics
- Utilized Fee: 0% (new ability)
- Completion Fee: N/A
- Efficient Fee: N/A
"""
skill_path = skill_dir / "SKILL.md"
skill_path.write_text(skill_md)
print(f"✅ Created ability: {skill_name}")
print(f" Path: {skill_path}")
return skill_path
create_custom_skill(
skill_name="data-validation-csv",
description="Validate CSV information for frequent points earlier than processing: test encoding, detect delimiter, deal with lacking values, confirm column sorts.",
directions="""When working with CSV information:
1. **Encoding Detection**: Attempt UTF-8 first, then fall again to latin-1, cp1252
2. **Delimiter Detection**: Use csv.Sniffer() to auto-detect delimiter
3. **Lacking Values**: Depend NaN/null per column, report proportion
4. **Sort Inference**: Verify if numeric columns are literally numeric
5. **Duplicate Verify**: Establish duplicate rows
```python
import pandas as pd
import csv
import chardet
def validate_csv(filepath):
with open(filepath, 'rb') as f:
consequence = chardet.detect(f.learn(10000))
encoding = consequence['encoding']
df = pd.read_csv(filepath, encoding=encoding)
report = {
'rows': len(df),
'columns': record(df.columns),
'lacking': df.isnull().sum().to_dict(),
'duplicates': df.duplicated().sum(),
'dtypes': df.dtypes.astype(str).to_dict()
}
return report
```""",
triggers=["csv", "data validation", "data quality", "pandas"]
)
print()
create_custom_skill(
skill_name="report-gen-fallback",
description="Generate experiences with a number of fallback methods: strive reportlab PDF first, fall again to HTML, then plain textual content.",
directions="""When producing experiences:
1. **Attempt reportlab PDF** first for skilled output
2. **Fall again to HTML** if reportlab fails (frequent in sandboxed envs)
3. **Last fallback: plain textual content** with formatted tables
At all times confirm the output file exists and has non-zero dimension after era.
```python
def generate_report(information, output_path):
strive:
from reportlab.lib.pagesizes import letter
from reportlab.platypus import SimpleDocTemplate
return output_path
besides ImportError:
go
strive:
html_path = output_path.change('.pdf', '.html')
return html_path
besides Exception:
go
txt_path = output_path.change('.pdf', '.txt')
return txt_path
```""",
triggers=["report", "PDF", "document generation", "reportlab"]
)
print()
create_custom_skill(
skill_name="execution-recovery",
description="Multi-layer execution restoration: deal with sandbox failures, shell errors, and file write points with progressive fallbacks.",
directions="""When code execution fails:
1. **Seize the complete error** together with traceback
2. **Establish the failure kind**: ImportError, PermissionError, TimeoutError, and so on.
3. **Apply focused repair**:
- ImportError → pip set up the lacking package deal
- PermissionError → change output listing to /tmp
- TimeoutError → scale back information dimension or add chunking
- MemoryError → course of in batches
4. **Retry with repair utilized**
5. **Log the repair** for future ability evolution
This ability was captured from 28 actual execution failures within the GDPVal benchmark.""",
triggers=["error", "failure", "recovery", "fallback", "retry"]
)
print("n" + "="*60)
print("📋 All registered expertise:")
print("="*60)
for skill_dir in sorted(SKILLS_DIR.iterdir()):
if skill_dir.is_dir():
skill_md = skill_dir / "SKILL.md"
if skill_md.exists():
content material = skill_md.read_text()
for content material line.break up('n'):
if line.startswith('identify:'):
identify = line.break up(':', 1)[1].strip()
print(f" 🧩 {identify}")
break
