!/usr/bin/env python3
"""
Test script for LocalAI Content Engine blog generation.
Uses phi4:14b on Gaming PC via Tailscale.
"""
import httpx
import json
import sys
from datetime import datetime
Gaming PC Ollama endpoint
OLLAMA_URL = "http://matt-pc.tail864e81.ts.net:11434/api/generate"
MODEL = "phi4:14b"
Sovereign Operator voice — banned words
BANNED_WORDS = [
"delve", "tapestry", "moreover", "leveraging", "holistic",
"paradigm", "synergy", "unlock", "potential", "seamless",
"robust", "streamline", "utilize", "embark", "journey",
"transformative", "cutting-edge", "innovative", "groundbreaking"
]
SYSTEM_PROMPT = """You are a technical writer with a sovereign, pragmatic voice.
Your style is direct, specific, and human. You write about home infrastructure,
AI systems, and the messy reality of building things.
Rules:
- Start with the human moment, not the solution
- Admit wrong turns before revealing fixes
- Use specific versions, error messages, timestamps
- End with actionable takeaways
- NEVER use these words: {banned}
- Write like a competent engineer talking to another engineer
""".format(banned=", ".join(BANNED_WORDS))
def generate_content(prompt: str, max_tokens: int = 2000) -> str:
"""Generate content via Ollama on Gaming PC."""
full_prompt = f"{SYSTEM_PROMPT}\n\n{prompt}\n\nWrite 800-1200 words. Be specific. No fluff."
response = httpx.post(
OLLAMA_URL,
json={
"model": MODEL,
"prompt": full_prompt,
"stream": False,
"options": {
"temperature": 0.7,
"num_predict": max_tokens,
"top_p": 0.9,
}
},
timeout=300.0
)
response.raise_for_status()
return response.json()["response"]
def generate_titles(topic: str) -> list:
"""Generate 3-5 title options for a topic."""
prompt = f"""Topic: {topic}
Generate 5 title options for a blog post. Mix of styles:
- Direct/how-to
- Story-based
- Specific technical angle
Return ONLY a JSON array of strings. No markdown, no explanation.
"""
response = generate_content(prompt, max_tokens=500)
# Extract JSON from response
try:
# Find JSON array in response
start = response.find("[")
end = response.rfind("]") + 1
if start >= 0 and end > start:
return json.loads(response[start:end])
except:
pass
# Fallback: parse line by line
lines = [l.strip("- •\"' ") for l in response.split("\n") if l.strip()]
return [l for l in lines if l and not l.startswith("{")][:5]
def generate_draft(title: str, outline: list) -> str:
"""Generate a full blog post draft from title and outline."""
outline_text = "\n".join(f"- {item}" for item in outline)
prompt = f"""Title: {title}
Outline:
{outline_text}
Write the full blog post following the outline. Match the voice rules.
Include specific details, error messages, timestamps where relevant.
"""
return generate_content(prompt, max_tokens=4000)
def generate_seo(content: str) -> dict:
"""Generate SEO metadata from content."""
prompt = f"""Given this blog post content, generate:
- A 1-2 sentence excerpt/summary
- 5-7 relevant tags (comma-separated)
- A meta description (under 160 characters)
Content excerpt (first 500 chars):
{content[:500]}
Return ONLY JSON in this format:
{{"excerpt": "...", "tags": "tag1, tag2, tag3", "meta_description": "..."}}
"""
response = generate_content(prompt, max_tokens=500)
try:
start = response.find("{")
end = response.rfind("}") + 1
if start >= 0 and end > start:
return json.loads(response[start:end])
except:
pass
return {
"excerpt": "Blog post about " + content[:50],
"tags": "home-lab, ai, infrastructure",
"meta_description": content[:160]
}
def test_pipeline():
"""Run a full content generation test."""
print("=" * 60)
print("LOCALAI CONTENT ENGINE TEST")
print(f"Model: {MODEL}")
print(f"Time: {datetime.now().isoformat()}")
print("=" * 60)
# Test 1: Title Generation
print("\n📝 TEST 1: Title Generation")
print("-" * 40)
topic = "How I migrated from Google Calendar to a self-hosted CalDAV server"
print(f"Topic: {topic}")
try:
titles = generate_titles(topic)
print(f"\nGenerated {len(titles)} titles:")
for i, title in enumerate(titles, 1):
print(f" {i}. {title}")
except Exception as e:
print(f"❌ Title generation failed: {e}")
titles = []
# Test 2: Draft Generation
print("\n📝 TEST 2: Draft Generation")
print("-" * 40)
if titles:
title = titles[0]
else:
title = "The Night I Broke DNS: A CalDAV Migration Story"
outline = [
"The trigger: Aundrea asks why the family calendar is empty",
"The Google Calendar dependency audit",
"Choosing Radicale over NextCloud (simplicity wins)",
"The migration: icalendar + caldav Python scripts",
"Setting up Cloudflare Email Worker for webhook parsing",
"The moment it worked: seeing events sync to phones",
"What I'd do differently next time"
]
print(f"Title: {title}")
print(f"Outline: {len(outline)} points")
try:
start = datetime.now()
draft = generate_draft(title, outline)
elapsed = (datetime.now() - start).total_seconds()
print(f"\n✅ Draft generated in {elapsed:.1f}s")
print(f"Length: {len(draft)} chars, ~{len(draft.split())} words")
print(f"\n--- DRAFT PREVIEW (first 800 chars) ---")
print(draft[:800])
print("...")
print(f"\n--- END PREVIEW ---")
# Test 3: SEO Generation
print("\n📝 TEST 3: SEO Metadata Generation")
print("-" * 40)
try:
seo = generate_seo(draft)
print(f"Excerpt: {seo.get('excerpt', 'N/A')}")
print(f"Tags: {seo.get('tags', 'N/A')}")
print(f"Meta: {seo.get('meta_description', 'N/A')}")
except Exception as e:
print(f"❌ SEO generation failed: {e}")
# Save output
output = {
"title": title,
"titles": titles,
"outline": outline,
"draft": draft,
"seo": seo if 'seo' in dir() else {},
"metadata": {
"model": MODEL,
"generated_at": datetime.now().isoformat(),
"elapsed_seconds": elapsed,
"draft_length": len(draft),
"word_count": len(draft.split())
}
}
output_file = f"/home/hoffmann_admin/.openclaw/workspace-socrates/test_output_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
with open(output_file, 'w') as f:
json.dump(output, f, indent=2)
print(f"\n💾 Full output saved to: {output_file}")
except Exception as e:
print(f"❌ Draft generation failed: {e}")
import traceback
traceback.print_exc()
print("\n" + "=" * 60)
print("TEST COMPLETE")
print("=" * 60)
if name == "main":
test_pipeline()