On this tutorial, we implement IWE: an open-source, Rust-powered private data administration system that treats markdown notes as a navigable data graph. Since IWE is a CLI/LSP software designed for native editors. We construct a practical developer data base from scratch, wire up wiki-links and markdown hyperlinks right into a directed graph, after which stroll via each main IWE operation: fuzzy search with discover, context-aware retrieval with retrieve, hierarchy show with tree, doc consolidation with squash, statistics with stats, and DOT graph export for visualization. We then transcend the CLI by integrating OpenAI to energy IWE-style AI transforms: summarization, hyperlink suggestion, and todo extraction, immediately towards our data graph. Lastly, we assemble a full agentic RAG pipeline the place an AI agent navigates the graph utilizing function-calling instruments, performs multi-hop reasoning throughout interconnected paperwork, identifies data gaps, and even generates new notes that slot into the prevailing construction.
import subprocess, sys
def _install(pkg):
subprocess.check_call([sys.executable, "-m", "pip", "install", "-q", pkg])
_install("openai")
_install("graphviz")
import re, json, textwrap, os, getpass
from collections import defaultdict
from dataclasses import dataclass, area
from typing import Optionally available
from datetime import datetime
attempt:
from google.colab import userdata
OPENAI_API_KEY = userdata.get("OPENAI_API_KEY")
if not OPENAI_API_KEY:
increase ValueError
print("✅ Loaded OPENAI_API_KEY from Colab secrets and techniques.")
besides Exception:
OPENAI_API_KEY = getpass.getpass("🔑 Enter your OpenAI API key: ")
print("✅ API key acquired.")
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
from openai import OpenAI
shopper = OpenAI(api_key=OPENAI_API_KEY)
print("n" + "=" * 72)
print(" IWE Superior Tutorial — Data Graph + AI Brokers")
print("=" * 72)
@dataclass
class Part:
degree: int
title: str
content material: str
youngsters: record = area(default_factory=record)
@dataclass
class Doc:
key: str
title: str
raw_content: str
sections: record = area(default_factory=record)
outgoing_links: record = area(default_factory=record)
tags: record = area(default_factory=record)
created: str = ""
modified: str = ""
class KnowledgeGraph:
def __init__(self):
self.paperwork: dict[str, Document] = {}
self.backlinks: dict[str, set] = defaultdict(set)
_WIKI_LINK = re.compile(r"[[([^]|]+)(?:|([^]]+))?]]")
_MD_LINK = re.compile(r"[([^]]+)](([^)]+))")
_HEADER = re.compile(r"^(#{1,6})s+(.+)", re.MULTILINE)
_TAG = re.compile(r"#([a-zA-Z][w/-]*)")
def _extract_links(self, textual content: str) -> record[str]:
hyperlinks = []
for match in self._WIKI_LINK.finditer(textual content):
hyperlinks.append(match.group(1).strip())
for match in self._MD_LINK.finditer(textual content):
goal = match.group(2).strip()
if not goal.startswith("http"):
goal = goal.change(".md", "")
hyperlinks.append(goal)
return hyperlinks
def _parse_sections(self, textual content: str) -> record[Section]:
sections = []
elements = self._HEADER.cut up(textual content)
i = 1
whereas i < len(elements) - 1:
degree = len(elements[i])
title = elements[i + 1].strip()
physique = elements[i + 2] if i + 2 < len(elements) else ""
sections.append(Part(degree=degree, title=title, content material=physique.strip()))
i += 3
return sections
def _extract_tags(self, textual content: str) -> record[str]:
tags = set()
for line in textual content.cut up("n"):
if line.strip().startswith("#") and " " in line.strip():
stripped = re.sub(r"^#{1,6}s+.*", "", line)
for m in self._TAG.finditer(stripped):
tags.add(m.group(1))
else:
for m in self._TAG.finditer(line):
tags.add(m.group(1))
return sorted(tags)
def add_document(self, key: str, content material: str) -> Doc:
sections = self._parse_sections(content material)
title = sections[0].title if sections else key
hyperlinks = self._extract_links(content material)
tags = self._extract_tags(content material)
now = datetime.now().strftime("%Y-%m-%d %H:%M")
doc = Doc(
key=key, title=title, raw_content=content material,
sections=sections, outgoing_links=hyperlinks, tags=tags,
created=now, modified=now,
)
self.paperwork[key] = doc
for goal in hyperlinks:
self.backlinks[target].add(key)
return doc
def get(self, key: str) -> Optionally available[Document]:
return self.paperwork.get(key)
def discover(self, question: str, roots_only: bool = False, restrict: int = 10) -> record[str]:
q = question.decrease()
scored = []
for key, doc in self.paperwork.gadgets():
rating = 0
if q in doc.title.decrease():
rating += 10
if q in doc.raw_content.decrease():
rating += doc.raw_content.decrease().depend(q)
if q in key.decrease():
rating += 5
for tag in doc.tags:
if q in tag.decrease():
rating += 3
if rating > 0:
scored.append((key, rating))
scored.type(key=lambda x: -x[1])
outcomes = [k for k, _ in scored[:limit]]
if roots_only:
outcomes = [k for k in results if not self.backlinks.get(k)]
return outcomes
def retrieve(self, key: str, depth: int = 1, context: int = 1,
exclude: set = None) -> str:
exclude = exclude or set()
elements = []
if context > 0:
parents_of = record(self.backlinks.get(key, set()) - exclude)
for p in parents_of[:context]:
pdoc = self.get(p)
if pdoc:
elements.append(f"[CONTEXT: {pdoc.title}]n{pdoc.raw_content[:300]}...n")
exclude.add(p)
doc = self.get(key)
if not doc:
return f"⚠ Doc '{key}' not discovered."
elements.append(doc.raw_content)
exclude.add(key)
if depth > 0:
for hyperlink in doc.outgoing_links:
if hyperlink not in exclude:
youngster = self.get(hyperlink)
if youngster:
elements.append(f"n---n[LINKED: {child.title}]n")
elements.append(
self.retrieve(hyperlink, depth=depth - 1,
context=0, exclude=exclude)
)
return "n".be part of(elements)
def tree(self, key: str, indent: int = 0, _visited: set = None) -> str:
_visited = _visited if _visited isn't None else set()
doc = self.get(key)
if not doc:
return ""
prefix = " " * indent + ("└─ " if indent else "")
if key in _visited:
return f"{prefix}{doc.title} ({key}) ↩ (round ref)"
_visited.add(key)
traces = [f"{prefix}{doc.title} ({key})"]
for hyperlink in doc.outgoing_links:
if self.get(hyperlink):
traces.append(self.tree(hyperlink, indent + 1, _visited))
return "n".be part of(traces)
def squash(self, key: str, visited: set = None) -> str:
visited = visited or set()
doc = self.get(key)
if not doc or key in visited:
return ""
visited.add(key)
elements = [doc.raw_content]
for hyperlink in doc.outgoing_links:
child_content = self.squash(hyperlink, visited)
if child_content:
elements.append(f"n{'─' * 40}n")
elements.append(child_content)
return "n".be part of(elements)
def stats(self) -> dict:
total_words = sum(len(d.raw_content.cut up()) for d in self.paperwork.values())
total_links = sum(len(d.outgoing_links) for d in self.paperwork.values())
orphans = [k for k in self.documents if not self.backlinks.get(k)
and not self.documents[k].outgoing_links]
all_tags = set()
for d in self.paperwork.values():
all_tags.replace(d.tags)
return {
"total_documents": len(self.paperwork),
"total_words": total_words,
"total_links": total_links,
"unique_tags": len(all_tags),
"tags": sorted(all_tags),
"orphan_notes": orphans,
"avg_words_per_doc": total_words // max(len(self.paperwork), 1),
}
def export_dot(self, highlight_key: str = None) -> str:
traces = ['digraph KnowledgeGraph {',
' rankdir=LR;',
' node [shape=box, style="rounded,filled", fillcolor="#f0f4ff", '
'fontname="Helvetica", fontsize=10];',
' edge [color="#666666", arrowsize=0.7];']
for key, doc in self.paperwork.gadgets():
label = doc.title[:30]
coloration="#ffe4b5" if highlight_key == key else '#f0f4ff'
traces.append(f' "{key}" [label="{label}", fillcolor="{color}"];')
for key, doc in self.paperwork.gadgets():
for hyperlink in doc.outgoing_links:
if hyperlink in self.paperwork:
traces.append(f' "{key}" -> "{hyperlink}";')
traces.append("}")
return "n".be part of(traces)
print("n✅ Part 1 full — KnowledgeGraph class outlined.n")We set up the required dependencies, securely settle for the OpenAI API key via Colab secrets and techniques or a password immediate, and initialize the OpenAI shopper. We then outline the three foundational knowledge courses, Part, Doc, and KnowledgeGraph, that mirror IWE’s arena-based graph structure the place each markdown file is a node and each hyperlink is a directed edge. We implement the complete suite of IWE CLI operations on the KnowledgeGraph class, together with markdown parsing for wiki-links and headers, fuzzy search with discover, context-aware retrieval with retrieve, cycle-safe hierarchy show with tree, doc consolidation with squash, data base analytics with stats, and DOT graph export for Graphviz visualization.
kg = KnowledgeGraph()
kg.add_document("project-index", """# Net App Challenge
That is the **Map of Content material** for our net utility venture.
## Structure
- [Authentication System](authentication)
- [Database Design](database-design)
- [API Design](api-design)
## Growth
- [Frontend Stack](frontend-stack)
- [Deployment Pipeline](deployment)
## Analysis
- [[caching-strategies]]
- [[performance-notes]]
""")
kg.add_document("authentication", """# Authentication System
Our app makes use of **JWT-based authentication** with refresh tokens.
## Stream
1. Consumer submits credentials to `/api/auth/login`
2. Server validates towards [Database Design](database-design) person desk
3. Returns short-lived entry token (15 min) + refresh token (7 days)
4. Consumer shops refresh token in HTTP-only cookie
## Safety Issues
- Passwords hashed with bcrypt (price issue 12)
- Fee limiting on login endpoint: 5 makes an attempt / minute
- Refresh token rotation on every use
- See [[caching-strategies]] for session caching
#safety #jwt #auth
""")
kg.add_document("database-design", """# Database Design
We use **PostgreSQL 16** with the next core tables.
## Customers Desk
```sql
CREATE TABLE customers (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
electronic mail VARCHAR(255) UNIQUE NOT NULL,
password VARCHAR(255) NOT NULL,
created_at TIMESTAMPTZ DEFAULT NOW()
);
```
## Periods Desk
```sql
CREATE TABLE periods (
id UUID PRIMARY KEY,
user_id UUID REFERENCES customers(id),
token_hash VARCHAR(255) NOT NULL,
expires_at TIMESTAMPTZ NOT NULL
);
```
## Indexing Technique
- B-tree on `customers.electronic mail` for login lookups
- B-tree on `periods.token_hash` for token validation
- See [[performance-notes]] for question optimization
#database #postgresql #schema
""")
kg.add_document("api-design", """# API Design
RESTful API following OpenAPI 3.0 specification.
## Endpoints
| Technique | Path | Description |
|--------|------|-------------|
| POST | /api/auth/login | Authenticate person |
| POST | /api/auth/refresh | Refresh entry token |
| GET | /api/customers/me | Get present person profile |
| PUT | /api/customers/me | Replace profile |
## Error Dealing with
All errors return JSON with `{ "error": "code", "message": "..." }`.
Authentication endpoints documented in [Authentication System](authentication).
Knowledge fashions align with [Database Design](database-design).
#api #relaxation #openapi
""")
kg.add_document("frontend-stack", """# Frontend Stack
## Know-how Decisions
- **Framework**: React 19 with Server Parts
- **Styling**: Tailwind CSS v4
- **State Administration**: Zustand for shopper state
- **Knowledge Fetching**: TanStack Question v5
## Auth Integration
The frontend consumes the [API Design](api-design) endpoints.
Entry tokens are saved in reminiscence (not localStorage) for safety.
Refresh dealt with transparently through Axios interceptors.
#frontend #react #tailwind
""")
kg.add_document("deployment", """# Deployment Pipeline
## Infrastructure
- **Container Runtime**: Docker with multi-stage builds
- **Orchestration**: Kubernetes on GKE
- **CI/CD**: GitHub Actions → Google Artifact Registry → GKE
## Pipeline Phases
1. Lint & type-check
2. Unit assessments (Jest + pytest)
3. Construct Docker photos
4. Push to Artifact Registry
5. Deploy to staging (auto)
6. Deploy to manufacturing (guide approval)
## Monitoring
- Prometheus + Grafana for metrics
- Structured logging with correlation IDs
- See [[performance-notes]] for SLOs
#devops #kubernetes #cicd
""")
kg.add_document("caching-strategies", """# Caching Methods
## Software-Degree Caching
- **Redis** for session storage and charge limiting
- Cache-aside sample for continuously accessed person profiles
- TTL: 5 minutes for profiles, quarter-hour for config
## HTTP Caching
- `Cache-Management: non-public, max-age=0` for authenticated endpoints
- `Cache-Management: public, max-age=3600` for static property
- ETag assist for conditional requests
## Cache Invalidation
- Occasion-driven invalidation through pub/sub
- Versioned cache keys: `person:{id}:v{model}`
Associated: [Authentication System](authentication) makes use of Redis for refresh tokens.
#caching #redis #efficiency
""")
kg.add_document("performance-notes", """# Efficiency Notes
## Database Question Optimization
- Use `EXPLAIN ANALYZE` earlier than deploying new queries
- Connection pooling with PgBouncer (max 50 connections)
- Keep away from N+1 queries — use JOINs or DataLoader sample
## SLO Targets
| Metric | Goal | Present |
|--------|--------|---------|
| p99 latency | < 200ms | 180ms |
| Availability | 99.9% | 99.95% |
| Error charge | < 0.1% | 0.05% |
## Load Testing
- k6 scripts in `/assessments/load/`
- Baseline: 1000 RPS sustained
- Spike: 5000 RPS for 60 seconds
Associated to [Database Design](database-design) indexing and [[caching-strategies]].
#efficiency #slo #monitoring
""")
print("✅ Part 2 full — 8 paperwork loaded into data graph.n")
print("─" * 72)
print(" 3A · iwe discover — Search the Data Graph")
print("─" * 72)
outcomes = kg.discover("authentication")
print(f"n🔍 discover('authentication'): {outcomes}")
outcomes = kg.discover("efficiency")
print(f"🔍 discover('efficiency'): {outcomes}")
outcomes = kg.discover("cache", roots_only=True)
print(f"🔍 discover('cache', roots_only=True): {outcomes}")
print("n" + "─" * 72)
print(" 3B · iwe tree — Doc Hierarchy")
print("─" * 72)
print()
print(kg.tree("project-index"))
print("n" + "─" * 72)
print(" 3C · iwe stats — Data Base Statistics")
print("─" * 72)
stats = kg.stats()
for okay, v in stats.gadgets():
print(f" {okay:>25s}: {v}")
print("n" + "─" * 72)
print(" 3D · iwe retrieve — Context-Conscious Retrieval")
print("─" * 72)
print("n📄 Retrieving 'authentication' with depth=1, context=1:n")
retrieved = kg.retrieve("authentication", depth=1, context=1)
print(retrieved[:800] + "n... (truncated)")
print("n" + "─" * 72)
print(" 3E · iwe squash — Mix Paperwork")
print("─" * 72)
squashed = kg.squash("project-index")
print(f"n📋 Squashed 'project-index': {len(squashed)} characters, "
f"{len(squashed.cut up())} phrases")
print("n" + "─" * 72)
print(" 3F · iwe export dot — Graph Visualization")
print("─" * 72)
dot_output = kg.export_dot(highlight_key="project-index")
print(f"n🎨 DOT output ({len(dot_output)} chars):n")
print(dot_output[:500] + "n...")
attempt:
import graphviz
src = graphviz.Supply(dot_output)
src.render("knowledge_graph", format="png", cleanup=True)
print("n✅ Graph rendered to 'knowledge_graph.png'")
attempt:
from IPython.show import Picture, show
show(Picture("knowledge_graph.png"))
besides ImportError:
print(" (Run in Colab/Jupyter to see the picture inline)")
besides Exception as e:
print(f" ⚠ Graphviz rendering skipped: {e}")
print("n✅ Part 3 full — all graph operations demonstrated.n")We instantiate the KnowledgeGraph and populate it with eight interconnected markdown paperwork that type a practical developer data base, spanning authentication, database design, API design, frontend, deployment, caching, and efficiency, all organized beneath a Map of Content material entry level, precisely as we might construction notes in IWE. We then train each graph operation towards this information base: we search with discover, show the complete doc hierarchy with tree, pull statistics with stats, carry out context-aware retrieval that follows hyperlinks with retrieve, consolidate all the graph right into a single doc with squash, and export the construction as a DOT graph. We render the graph visually utilizing Graphviz and show it inline, giving us a transparent image of how all our notes join to one another.
print("─" * 72)
print(" 4 · AI-Powered Doc Transforms")
print("─" * 72)
def ai_transform(textual content: str, motion: str, context: str = "",
mannequin: str = "gpt-4o-mini") -> str:
prompts = {
"rewrite": (
"Rewrite the next textual content to enhance readability and readability. "
"Maintain the markdown formatting. Return ONLY the rewritten textual content."
),
"summarize": (
"Summarize the next textual content in 2-3 concise bullet factors. "
"Concentrate on the important thing selections and technical selections."
),
"develop": (
"Increase the next textual content with extra technical element and examples. "
"Maintain the identical construction and add depth."
),
"extract_todos": (
"Extract all actionable gadgets from this textual content and format them as "
"a markdown todo record. If there aren't any actionable gadgets, recommend "
"related subsequent steps primarily based on the content material."
),
"generate_links": (
"Analyze the next word and recommend associated subjects that ought to "
"be linked. Format as a markdown record of wiki-links: [[topic-name]]. "
"Solely recommend subjects which might be genuinely associated."
),
}
system_msg = prompts.get(motion, prompts["rewrite"])
if context:
system_msg += f"nnDocument context:n{context[:500]}"
messages = [
{"role": "system", "content": system_msg},
{"role": "user", "content": text},
]
response = shopper.chat.completions.create(
mannequin=mannequin, messages=messages, temperature=0.3, max_tokens=1000,
)
return response.selections[0].message.content material.strip()
auth_doc = kg.get("authentication")
print("n🔄 Rework: SUMMARIZE — Authentication Systemn")
abstract = ai_transform(auth_doc.raw_content, "summarize")
print(abstract)
print("nn🔗 Rework: GENERATE_LINKS — Authentication Systemn")
hyperlinks = ai_transform(auth_doc.raw_content, "generate_links")
print(hyperlinks)
print("nn✅ Rework: EXTRACT_TODOS — Efficiency Notesn")
perf_doc = kg.get("performance-notes")
todos = ai_transform(perf_doc.raw_content, "extract_todos")
print(todos)
print("n✅ Part 4 full — AI transforms demonstrated.n")We outline the ai_transform perform that mirrors IWE’s config.toml motion system, supporting 5 rework varieties: rewrite, summarize, develop, extract_todos, and generate_links, every backed by a tailor-made system immediate despatched to OpenAI. We run three stay demonstrations towards our data base: we summarize the Authentication System doc into concise bullet factors, analyze it for recommended wiki hyperlinks to associated subjects, and extract actionable to-do gadgets from the Efficiency Notes doc. We see how IWE’s AI motion sample, deciding on a doc, selecting a rework, and making use of it in-place, interprets immediately right into a reusable Python perform that works with any word in our graph.
print("─" * 72)
print(" 5 · Agentic RAG — AI Navigates Your Data Graph")
print("─" * 72)
AGENT_TOOLS = [
{
"type": "function",
"function": {
"name": "iwe_find",
"description": "Search the knowledge graph for documents matching a query. Returns a list of document keys.",
"parameters": {
"type": "object",
"properties": {
"query": {"type": "string", "description": "Search query"},
"roots_only": {"type": "boolean", "description": "Only return root/MOC documents", "default": False},
},
"required": ["query"],
},
},
},
{
"kind": "perform",
"perform": {
"title": "iwe_retrieve",
"description": "Retrieve a doc's content material with linked context. Use depth>0 to comply with outgoing hyperlinks, context>0 to incorporate father or mother paperwork.",
"parameters": {
"kind": "object",
"properties": {
"key": {"kind": "string", "description": "Doc key to retrieve"},
"depth": {"kind": "integer", "description": "What number of ranges of kid hyperlinks to comply with (0-2)", "default": 1},
"context": {"kind": "integer", "description": "What number of ranges of father or mother context (0-1)", "default": 0},
},
"required": ["key"],
},
},
},
{
"kind": "perform",
"perform": {
"title": "iwe_tree",
"description": "Present the doc hierarchy ranging from a given key.",
"parameters": {
"kind": "object",
"properties": {
"key": {"kind": "string", "description": "Root doc key"},
},
"required": ["key"],
},
},
},
{
"kind": "perform",
"perform": {
"title": "iwe_stats",
"description": "Get statistics about all the data base.",
"parameters": {"kind": "object", "properties": {}},
},
},
]
def execute_tool(title: str, args: dict) -> str:
if title == "iwe_find":
outcomes = kg.discover(args["query"], roots_only=args.get("roots_only", False))
return json.dumps({"outcomes": outcomes})
elif title == "iwe_retrieve":
content material = kg.retrieve(
args["key"],
depth=args.get("depth", 1),
context=args.get("context", 0),
)
return content material[:3000]
elif title == "iwe_tree":
return kg.tree(args["key"])
elif title == "iwe_stats":
return json.dumps(kg.stats(), indent=2)
return "Unknown software"
def run_agent(query: str, max_turns: int = 6, mannequin: str = "gpt-4o-mini") -> str:
system_prompt = textwrap.dedent("""
You might be an AI assistant with entry to a private data graph (IWE).
Use the offered instruments to navigate the graph and reply questions.
Workflow:
1. Use iwe_find to find related paperwork
2. Use iwe_retrieve to learn content material (set depth=1 to comply with hyperlinks)
3. Observe relationships to construct complete understanding
4. Synthesize data from a number of paperwork
Be particular and cite which paperwork you discovered data in.
In the event you can not discover sufficient data, say so clearly.
""")
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": question},
]
for flip in vary(max_turns):
response = shopper.chat.completions.create(
mannequin=mannequin, messages=messages, instruments=AGENT_TOOLS,
tool_choice="auto",
)
msg = response.selections[0].message
if msg.tool_calls:
messages.append(msg)
for tc in msg.tool_calls:
fn_name = tc.perform.title
fn_args = json.masses(tc.perform.arguments)
print(f" 🔧 Agent calls: {fn_name}({fn_args})")
outcome = execute_tool(fn_name, fn_args)
messages.append({
"position": "software",
"tool_call_id": tc.id,
"content material": outcome,
})
else:
return msg.content material
return "Agent reached most turns with out finishing."
questions = [
"How does our authentication system work, and what database tables does it depend on?",
"What is our deployment pipeline, and what are the performance SLO targets?",
"Give me a high-level overview of the entire project architecture.",
]
for i, q in enumerate(questions, 1):
print(f"n{'═' * 72}")
print(f" Query {i}: {q}")
print(f"{'═' * 72}n")
reply = run_agent(q)
print(f"n💡 Agent Reply:n{reply}n")
print("n✅ Part 5 full — Agentic RAG demonstrated.n")We construct the complete agentic retrieval pipeline that embodies IWE’s “Context Bridge” idea: an AI agent that navigates our data graph utilizing OpenAI perform calling with 4 instruments: iwe_find for discovery, iwe_retrieve for context-aware content material fetching, iwe_tree for hierarchy exploration, and iwe_stats for data base analytics. We wire up the software executor that dispatches every perform name to our KnowledgeGraph occasion, and we implement the agent loop that iterates via search-retrieve-synthesize cycles till it assembles a whole reply. We then run three progressively advanced demo questions, asking about authentication dependencies, deployment and SLO targets, and a full venture structure overview, and watch the agent autonomously name instruments, comply with hyperlinks between paperwork, and produce complete solutions grounded in our notes.
print("─" * 72)
print(" 6 · AI-Powered Data Graph Upkeep")
print("─" * 72)
def analyze_knowledge_gaps(mannequin: str = "gpt-4o-mini") -> str:
stats_info = json.dumps(kg.stats(), indent=2)
titles = [f"- {d.title} ({k}): links to {d.outgoing_links}"
for k, d in kg.documents.items()]
graph_overview = "n".be part of(titles)
response = shopper.chat.completions.create(
mannequin=mannequin,
messages=[
{"role": "system", "content": (
"You are a knowledge management consultant. Analyze this "
"knowledge graph and identify: (1) missing topics that should "
"exist, (2) documents that should be linked but aren't, "
"(3) areas that need more detail. Be specific and actionable."
)},
{"role": "user", "content": (
f"Knowledge base stats:n{stats_info}nn"
f"Document structure:n{graph_overview}"
)},
],
temperature=0.4, max_tokens=1000,
)
return response.selections[0].message.content material.strip()
def generate_new_note(subject: str, related_keys: record[str],
mannequin: str = "gpt-4o-mini") -> str:
context_parts = []
for key in related_keys[:3]:
doc = kg.get(key)
if doc:
context_parts.append(f"## {doc.title}n{doc.raw_content[:400]}")
context = "nn".be part of(context_parts)
response = shopper.chat.completions.create(
mannequin=mannequin,
messages=[
{"role": "system", "content": (
"You are a technical writer. Generate a new markdown note "
"about the given topic. Use wiki-links [[like-this]] to "
"reference associated current paperwork. Embody related "
"headers, code examples the place acceptable, and hashtag tags."
)},
{"position": "person", "content material": (
f"Subject: {subject}nn"
f"Associated current notes for context:n{context}nn"
f"Obtainable paperwork to hyperlink to: {record(kg.paperwork.keys())}"
)},
],
temperature=0.5, max_tokens=1200,
)
return response.selections[0].message.content material.strip()
print("n🔍 Analyzing data gaps...n")
gaps = analyze_knowledge_gaps()
print(gaps)
print("nn📝 Producing a brand new word: 'Error Dealing with Technique'...n")
new_note = generate_new_note(
"Error Dealing with Technique",
related_keys=["api-design", "authentication", "frontend-stack"],
)
print(new_note[:1000] + "n... (truncated)")
kg.add_document("error-handling", new_note)
print(f"n✅ Added 'error-handling' to data graph. "
f"Complete paperwork: {len(kg.paperwork)}")
dot_output = kg.export_dot(highlight_key="error-handling")
attempt:
import graphviz
src = graphviz.Supply(dot_output)
src.render("knowledge_graph_v2", format="png", cleanup=True)
print("✅ Up to date graph rendered to 'knowledge_graph_v2.png'")
attempt:
from IPython.show import Picture, show
show(Picture("knowledge_graph_v2.png"))
besides ImportError:
move
besides Exception as e:
print(f" ⚠ Graphviz rendering skipped: {e}")
print("n✅ Part 6 full — AI-powered upkeep demonstrated.n")
print("─" * 72)
print(" 7 · Multi-Hop Reasoning Throughout the Data Graph")
print("─" * 72)
complex_question = (
"If we enhance our site visitors from 1000 RPS to 5000 RPS sustained, "
"what modifications could be wanted throughout all the stack — from database "
"connection pooling, to caching, to authentication token dealing with, "
"to deployment infrastructure?"
)
print(f"n🧠 Complicated multi-hop query:n {complex_question}n")
reply = run_agent(complex_question, max_turns=8)
print(f"n💡 Agent Reply:n{reply}")
print("nn" + "=" * 72)
print(" ✅ TUTORIAL COMPLETE")
print("=" * 72)
print("""
You've got explored all of the core ideas of IWE:
1. Data Graph — Paperwork as nodes, hyperlinks as edges
2. Markdown Parsing — Wiki-links, headers, tags
3. Maps of Content material — Hierarchical organisation (MOC)
4. Graph Operations — discover, retrieve, tree, squash, stats, export
5. AI Transforms — Rewrite, summarize, develop, extract todos
6. Agentic Retrieval — AI agent navigating your data graph
7. Graph Upkeep — AI-powered hole evaluation and word technology
8. Multi-Hop Reasoning — Cross-document synthesis
To make use of IWE for actual (along with your editor):
→ https://github.com/iwe-org/iwe
→ https://iwe.md/quick-start
IWE helps VS Code, Neovim, Zed, and Helix through LSP.
""")We use AI to research our data graph for structural gaps, figuring out lacking subjects, unlinked paperwork, and areas that want extra depth. We then mechanically generate a brand new “Error Dealing with Technique” word that references current paperwork through wiki hyperlinks and add it to the stay graph. We re-render the up to date Graphviz visualization, highlighting the brand new node to point out how the data base grows organically as AI and human contributions layer on prime of one another. We shut with a posh multi-hop reasoning problem, asking what modifications are wanted throughout all the stack if we scale from 1000 to 5000 RPS, the place the agent should traverse database, caching, authentication, and deployment paperwork to synthesize a cross-cutting reply that no single word may present alone.
In conclusion, we now have a whole, working implementation of IWE’s core concepts working in Colab setting. Now we have seen how structuring notes as a graph, somewhat than treating them as flat recordsdata, unlocks highly effective capabilities: relationships grow to be navigable paths, context flows naturally from father or mother to youngster paperwork, and AI brokers can uncover, traverse, and synthesize data precisely as we set up it. Now we have constructed the complete pipeline from markdown parsing and backlink indexing to graph traversal operations, AI-powered doc transforms, agentic retrieval with tool-calling, data hole evaluation, and multi-hop reasoning spanning all the data base. Every little thing we construct right here maps on to IWE’s actual options: the discover, retrieve, tree, squash, and export instructions, the config.toml AI actions and the Context Bridge philosophy, which positions your private data graph as shared reminiscence between you and your AI brokers.
Try the Full Notebook here. Additionally, be happy to comply with us on Twitter and don’t overlook to hitch our 120k+ ML SubReddit and Subscribe to our Newsletter. Wait! are you on telegram? now you can join us on telegram as well.
Michal Sutter is a knowledge science skilled with a Grasp of Science in Knowledge Science from the College of Padova. With a strong basis in statistical evaluation, machine studying, and knowledge engineering, Michal excels at remodeling advanced datasets into actionable insights.
