"""Content generation v2 — struggle-first pipeline with style injection.
Triggered when a brief is approved. Builds prompt from brief + style reference,
calls phi4:14b on the Gaming PC, calculates struggle score, stores output.
"""
import json
import os
import re
from datetime import datetime, timezone
from typing import Optional
import requests
from .brief_service import update_generation_status, get_brief, calculate_struggle_score
from .style_loader import build_style_prompt
LocalAI endpoint on Gaming PC (via Tailscale)
LLM_URL = os.getenv("LLM_URL", "http://100.104.147.116:11434/api/generate")
DEFAULT_MODEL = os.getenv("BLOG_GENERATION_MODEL", "phi4:14b")
def build_generation_prompt(brief_id: str) -> tuple[str, dict]:
"""Build the v2 generation prompt from a brief.
Returns (prompt_string, brief_data_dict).
"""
brief = get_brief(brief_id)
# Build style injection
style_prompt = build_style_prompt(brief.style_reference)
# Build the core prompt
prompt = f"""You are a technical writer who blogs about homelab, infrastructure, and personal projects. You write in first person with honesty, humor, and specific details. Your voice is conversational but precise — like you're explaining a late-night debugging session to a friend over coffee.
{style_prompt}
Today's Topic: {brief.title}
What Broke
{brief.struggle_angle}
Origin Story
{brief.origin_story}
Attempts
"""
for i, attempt in enumerate(brief.attempts, 1):
prompt += f"{i}. **{attempt.attempt}** — Why it failed: {attempt.why_failed}\n"
prompt += f"""
The Moment
{brief.the_moment}
The Fix
{brief.the_fix}
Reflection
{brief.reflection}
Instructions
Write a blog post of approximately {brief.target_length} words. Follow the structural template above. Use first person throughout. Include specific timestamps, names, and costs. Be honest about mistakes. End with a clear lesson.
Output only the article text in Markdown format. No preamble, no "Here's the article:", just the content starting with the title as an H1.
"""
return prompt, {
"title": brief.title,
"target_length": brief.target_length,
"style_reference": brief.style_reference,
}
def generate_content(brief_id: str, model: Optional[str] = None) -> dict:
"""Generate content for an approved brief.
Returns dict with success status, content, score, and metadata.
"""
brief = get_brief(brief_id)
if brief.status != "approved":
return {"success": False, "error": f"Brief status is '{brief.status}', expected 'approved'"}
# Set generating status
update_generation_status(brief_id, "generating")
try:
# Build prompt
prompt, meta = build_generation_prompt(brief_id)
# Call local LLM
payload = {
"model": model or DEFAULT_MODEL,
"prompt": prompt,
"stream": False,
"options": {
"temperature": 0.7,
"num_predict": brief.target_length * 2, # ~2 tokens per word, generous
"stop": ["\n\n---", "## End"],
},
}
resp = requests.post(LLM_URL, json=payload, timeout=300)
resp.raise_for_status()
result = resp.json()
content = result.get("response", "").strip()
# Clean up common LLM artifacts
content = _clean_generation_output(content, brief.title)
# Calculate struggle score
score_result = calculate_struggle_score(content)
struggle_score = score_result["score"]
# Convert to HTML (simple Markdown → HTML)
content_html = _md_to_html(content)
# Save to brief
update_generation_status(brief_id, "completed", content=content, html=content_html, struggle_score=struggle_score)
return {
"success": True,
"brief_id": brief_id,
"content_length": len(content),
"struggle_score": struggle_score,
"interpretation": score_result["interpretation"],
"checks": score_result["checks"],
}
except Exception as e:
# Revert to approved status on error
update_generation_status(brief_id, "approved")
return {"success": False, "error": str(e)}
def _clean_generation_output(content: str, title: str) -> str:
"""Clean up common LLM generation artifacts."""
# Remove markdown code fences if present
content = re.sub(r'^markdown\s*', '', content)
content = re.sub(r'\s*\s*$', '', content)
# Ensure title is H1
if not content.startswith('# '):
content = f"# {title}\n\n{content}"
# Remove "Title:" prefix if LLM added one
content = re.sub(r'^#\s*Title:\s*', '# ', content, count=1)
return content.strip()
def _md_to_html(content: str) -> str:
"""Simple Markdown to HTML conversion.
Uses regex — sufficient for blog content. No external deps.
"""
html = content
# Headers
html = re.sub(r'^### (.+)$', r'<h3>\1</h3>', html, flags=re.MULTILINE)
html = re.sub(r'^## (.+)$', r'<h2>\1</h2>', html, flags=re.MULTILINE)
html = re.sub(r'^# (.+)$', r'<h1>\1</h1>', html, flags=re.MULTILINE)
# Bold/italic
html = re.sub(r'\*\*\*(.+?)\*\*\*', r'<strong><em>\1</em></strong>', html)
html = re.sub(r'\*\*(.+?)\*\*', r'<strong>\1</strong>', html)
html = re.sub(r'\*(.+?)\*', r'<em>\1</em>', html)
# Code blocks
html = re.sub(r'```(\w+)?\n(.*?)```', r'<pre><code>\2</code></pre>', html, flags=re.DOTALL)
html = re.sub(r'`(.+?)`', r'<code>\1</code>', html)
# Blockquotes
html = re.sub(r'^\> (.+)$', r'<blockquote>\1</blockquote>', html, flags=re.MULTILINE)
# Links
html = re.sub(r'\[(.+?)\]\((.+?)\)', r'<a href="\2">\1</a>', html)
# Paragraphs (simple: split on double newlines, wrap in <p>)
paragraphs = html.split('\n\n')
wrapped = []
for p in paragraphs:
p = p.strip()
if not p:
continue
# Skip if already wrapped in a block element
if p.startswith('<'):
wrapped.append(p)
else:
# Handle line breaks within paragraphs
p = p.replace('\n', '<br>')
wrapped.append(f'<p>{p}</p>')
return '\n'.join(wrapped)
─── Async wrapper for background generation ───────────────────────────────
def trigger_generation(brief_id: str, model: Optional[str] = None) -> dict:
"""Trigger content generation and notify on completion.
This is designed to be called synchronously in the request handler
but could be moved to a background task in production.
"""
from .notifications import on_generation_complete
result = generate_content(brief_id, model=model)
if result["success"]:
brief = get_brief(brief_id)
output_url = f"https://notes.hoffdesk.com/admin/content/briefs/{brief_id}/output"
on_generation_complete(brief_id, brief.title, result["struggle_score"], output_url)
return result