Files
site11/backup-services/ai-writer/backend/app/main.py
jungwoo choi 070032006e feat: Implement async queue-based news pipeline with microservices
Major architectural transformation from synchronous to asynchronous processing:

## Pipeline Services (8 microservices)
- pipeline-scheduler: APScheduler for 30-minute periodic job triggers
- pipeline-rss-collector: RSS feed collection with deduplication (7-day TTL)
- pipeline-google-search: Content enrichment via Google Search API
- pipeline-ai-summarizer: AI summarization using Claude API (claude-sonnet-4-20250514)
- pipeline-translator: Translation using DeepL Pro API
- pipeline-image-generator: Image generation with Replicate API (Stable Diffusion)
- pipeline-article-assembly: Final article assembly and MongoDB storage
- pipeline-monitor: Real-time monitoring dashboard (port 8100)

## Key Features
- Redis-based job queue with deduplication
- Asynchronous processing with Python asyncio
- Shared models and queue manager for inter-service communication
- Docker containerization for all services
- Container names standardized with site11_ prefix

## Removed Services
- Moved to backup: google-search, rss-feed, news-aggregator, ai-writer

## Configuration
- DeepL Pro API: 3abbc796-2515-44a8-972d-22dcf27ab54a
- Claude Model: claude-sonnet-4-20250514
- Redis Queue TTL: 7 days for deduplication

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-09-13 19:22:14 +09:00

746 lines
29 KiB
Python

"""
AI Writer Service
Claude API를 사용한 전문적인 뉴스 기사 생성 서비스
"""
from fastapi import FastAPI, HTTPException, BackgroundTasks
from fastapi.middleware.cors import CORSMiddleware
from typing import List, Dict, Any, Optional
from datetime import datetime
from pydantic import BaseModel, Field
import httpx
import asyncio
import logging
import json
import uuid
from anthropic import AsyncAnthropic
import os
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
app = FastAPI(
title="AI Writer Service",
description="Claude API를 사용한 전문적인 뉴스 기사 생성 서비스",
version="1.0.0"
)
# CORS 설정
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Configuration
NEWS_AGGREGATOR_URL = os.getenv("NEWS_AGGREGATOR_URL", "http://news-aggregator-backend:8000")
CLAUDE_API_KEY = os.getenv("CLAUDE_API_KEY", "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")
MONGODB_URL = os.getenv("MONGODB_URL", "mongodb://mongodb:27017")
DB_NAME = os.getenv("DB_NAME", "ai_writer_db")
# Claude client
claude_client = AsyncAnthropic(api_key=CLAUDE_API_KEY)
# HTTP Client
http_client = httpx.AsyncClient(timeout=120.0)
# Queue Manager
from app.queue_manager import RedisQueueManager
from app.queue_models import NewsJobData, JobResult, JobStatus, QueueStats
queue_manager = RedisQueueManager(
redis_url=os.getenv("REDIS_URL", "redis://redis:6379")
)
# MongoDB client (optional for storing generated articles)
from motor.motor_asyncio import AsyncIOMotorClient
mongo_client = None
db = None
# Data Models
class NewsSource(BaseModel):
"""참고한 뉴스 소스 정보"""
title: str = Field(..., description="뉴스 제목")
url: str = Field(..., description="뉴스 URL")
published_date: Optional[str] = Field(None, description="발행일")
source_site: Optional[str] = Field(None, description="출처 사이트")
class SubTopic(BaseModel):
"""기사 소주제"""
title: str = Field(..., description="소주제 제목")
content: List[str] = Field(..., description="소주제 내용 (문단 리스트)", min_items=1, max_items=10)
class Event(BaseModel):
"""이벤트 정보"""
name: str = Field(..., description="이벤트명")
date: Optional[str] = Field(None, description="일시")
location: Optional[str] = Field(None, description="장소")
class NewsEntities(BaseModel):
"""뉴스에 포함된 개체들"""
people: List[str] = Field(default_factory=list, description="뉴스에 포함된 인물")
organizations: List[str] = Field(default_factory=list, description="뉴스에 포함된 기관")
groups: List[str] = Field(default_factory=list, description="뉴스에 포함된 단체")
countries: List[str] = Field(default_factory=list, description="뉴스에 포함된 나라")
events: List[Event] = Field(default_factory=list, description="뉴스에 포함된 일정/이벤트 (일시와 장소 포함)")
keywords: List[str] = Field(default_factory=list, description="핵심 키워드 (최대 10개)", max_items=10)
class GeneratedArticle(BaseModel):
"""생성된 기사"""
news_id: str = Field(..., description="뉴스 아이디")
title: str = Field(..., description="뉴스 제목")
created_at: str = Field(..., description="생성년월일시분초")
summary: str = Field(..., description="한 줄 요약")
subtopics: List[SubTopic] = Field(..., description="소주제 리스트", min_items=2, max_items=6)
categories: List[str] = Field(..., description="카테고리 리스트")
entities: NewsEntities = Field(..., description="뉴스에 포함된 개체들")
source_keyword: Optional[str] = Field(None, description="원본 검색 키워드")
source_count: Optional[int] = Field(None, description="참조한 소스 수")
sources: List[NewsSource] = Field(default_factory=list, description="참고한 뉴스 소스 목록")
class ArticleGenerationRequest(BaseModel):
"""기사 생성 요청"""
keyword: str = Field(..., description="검색 키워드")
limit: int = Field(5, description="처리할 RSS 항목 수", ge=1, le=20)
google_results_per_title: int = Field(3, description="각 제목당 구글 검색 결과 수", ge=1, le=10)
lang: str = Field("ko", description="언어 코드")
country: str = Field("KR", description="국가 코드")
style: str = Field("professional", description="기사 스타일 (professional/analytical/investigative)")
class PerItemGenerationRequest(BaseModel):
"""개별 아이템별 기사 생성 요청"""
keyword: str = Field(..., description="검색 키워드")
limit: Optional[int] = Field(None, description="처리할 RSS 항목 수 (None이면 전체)")
google_results_per_title: int = Field(3, description="각 제목당 구글 검색 결과 수", ge=1, le=10)
lang: str = Field("ko", description="언어 코드")
country: str = Field("KR", description="국가 코드")
style: str = Field("professional", description="기사 스타일 (professional/analytical/investigative)")
skip_existing: bool = Field(True, description="이미 생성된 기사는 건너뛰기")
@app.on_event("startup")
async def startup():
"""서비스 시작"""
global mongo_client, db
try:
mongo_client = AsyncIOMotorClient(MONGODB_URL)
db = mongo_client[DB_NAME]
logger.info("AI Writer Service starting...")
logger.info(f"Connected to MongoDB: {MONGODB_URL}")
# Redis 큐 연결
await queue_manager.connect()
logger.info("Connected to Redis queue")
except Exception as e:
logger.error(f"Failed to connect to services: {e}")
@app.on_event("shutdown")
async def shutdown():
"""서비스 종료"""
await http_client.aclose()
if mongo_client:
mongo_client.close()
await queue_manager.disconnect()
logger.info("AI Writer Service stopped")
@app.get("/")
async def root():
return {
"service": "AI Writer Service",
"version": "1.0.0",
"description": "Claude API를 사용한 전문적인 뉴스 기사 생성 서비스",
"endpoints": {
"generate_article": "POST /api/generate",
"generate_per_item": "POST /api/generate/per-item",
"generate_from_aggregated": "POST /api/generate/from-aggregated",
"get_article": "GET /api/articles/{article_id}",
"list_articles": "GET /api/articles",
"health": "GET /health"
}
}
@app.get("/health")
async def health_check():
"""헬스 체크"""
try:
# Check News Aggregator service
aggregator_response = await http_client.get(f"{NEWS_AGGREGATOR_URL}/health")
aggregator_healthy = aggregator_response.status_code == 200
# Check MongoDB
mongo_healthy = False
if db is not None:
await db.command("ping")
mongo_healthy = True
return {
"status": "healthy" if (aggregator_healthy and mongo_healthy) else "degraded",
"services": {
"news_aggregator": "healthy" if aggregator_healthy else "unhealthy",
"mongodb": "healthy" if mongo_healthy else "unhealthy",
"claude_api": "configured"
},
"timestamp": datetime.now().isoformat()
}
except Exception as e:
return {
"status": "unhealthy",
"error": str(e),
"timestamp": datetime.now().isoformat()
}
async def generate_article_with_claude(news_data: Dict[str, Any], style: str = "professional") -> GeneratedArticle:
"""Claude API를 사용하여 기사 생성"""
# Collect source information
sources_info = []
# Prepare the prompt
system_prompt = """당신은 전문적인 한국 언론사의 수석 기자입니다.
제공된 데이터를 기반으로 깊이 있고 통찰력 있는 기사를 작성해야 합니다.
기사는 다음 요구사항을 충족해야 합니다:
1. 소주제는 최소 2개, 최대 6개로 구성해야 합니다
2. 각 소주제는 최소 1개, 최대 10개의 문단으로 구성해야 합니다
3. 전문적이고 객관적인 어조를 유지해야 합니다
4. 사실에 기반한 분석과 통찰을 제공해야 합니다
5. 한국 독자를 대상으로 작성되어야 합니다
6. 이벤트 정보는 가능한 일시와 장소를 포함해야 합니다
7. 핵심 키워드를 최대 10개까지 추출해야 합니다
반드시 다음 JSON 형식으로 응답하세요:
{
"title": "기사 제목",
"summary": "한 줄 요약 (100자 이내)",
"subtopics": [
{
"title": "소주제 제목",
"content": ["문단1", "문단2", ...] // 1-10개 문단
}
], // 2-6개 소주제
"categories": ["카테고리1", "카테고리2"],
"entities": {
"people": ["인물1", "인물2"],
"organizations": ["기관1", "기관2"],
"groups": ["단체1", "단체2"],
"countries": ["나라1", "나라2"],
"events": [
{
"name": "이벤트명",
"date": "2025년 1월 15일", // 선택사항
"location": "서울 코엑스" // 선택사항
}
],
"keywords": ["키워드1", "키워드2", ...] // 최대 10개
}
}"""
# Prepare news content for Claude and collect sources
news_content = []
for item in news_data.get("news_items", []):
# Add RSS source info
rss_title = item.get('rss_title', '')
rss_link = item.get('rss_link', '')
rss_published = item.get('rss_published', '')
if rss_title and rss_link:
sources_info.append(NewsSource(
title=rss_title,
url=rss_link,
published_date=rss_published,
source_site="RSS Feed"
))
item_text = f"제목: {rss_title}\n"
for result in item.get("google_results", []):
# Add Google search result sources
if "title" in result and "link" in result:
sources_info.append(NewsSource(
title=result.get('title', ''),
url=result.get('link', ''),
published_date=None,
source_site="Google Search"
))
if "full_content" in result and result["full_content"]:
content = result["full_content"]
if isinstance(content, dict):
item_text += f"출처: {content.get('url', '')}\n"
item_text += f"내용: {content.get('content', '')[:1000]}...\n\n"
else:
item_text += f"내용: {str(content)[:1000]}...\n\n"
news_content.append(item_text)
combined_content = "\n".join(news_content[:10]) # Limit to prevent token overflow
user_prompt = f"""다음 뉴스 데이터를 기반으로 종합적인 기사를 작성하세요:
키워드: {news_data.get('keyword', '')}
수집된 뉴스 수: {len(news_data.get('news_items', []))}
뉴스 내용:
{combined_content}
스타일: {style}
- professional: 전통적인 뉴스 기사 스타일
- analytical: 분석적이고 심층적인 스타일
- investigative: 탐사보도 스타일
위의 데이터를 종합하여 통찰력 있는 기사를 JSON 형식으로 작성해주세요."""
try:
# Call Claude API
response = await claude_client.messages.create(
model="claude-3-5-sonnet-20241022", # Latest Claude model
max_tokens=4000,
temperature=0.7,
system=system_prompt,
messages=[
{"role": "user", "content": user_prompt}
]
)
# Parse Claude's response
content = response.content[0].text
# Extract JSON from response
import re
json_match = re.search(r'\{.*\}', content, re.DOTALL)
if json_match:
article_data = json.loads(json_match.group())
else:
# If no JSON found, try to parse the entire content
article_data = json.loads(content)
# Create GeneratedArticle object
entities_data = article_data.get("entities", {})
events_data = entities_data.get("events", [])
# Parse events - handle both old string format and new object format
parsed_events = []
for event in events_data:
if isinstance(event, str):
# Old format: just event name as string
parsed_events.append(Event(name=event))
elif isinstance(event, dict):
# New format: event object with name, date, location
parsed_events.append(Event(
name=event.get("name", ""),
date=event.get("date"),
location=event.get("location")
))
article = GeneratedArticle(
news_id=str(uuid.uuid4()),
title=article_data.get("title", "제목 없음"),
created_at=datetime.now().isoformat(),
summary=article_data.get("summary", ""),
subtopics=[
SubTopic(
title=st.get("title", ""),
content=st.get("content", [])
) for st in article_data.get("subtopics", [])
],
categories=article_data.get("categories", []),
entities=NewsEntities(
people=entities_data.get("people", []),
organizations=entities_data.get("organizations", []),
groups=entities_data.get("groups", []),
countries=entities_data.get("countries", []),
events=parsed_events,
keywords=entities_data.get("keywords", [])
),
source_keyword=news_data.get("keyword"),
source_count=len(news_data.get("news_items", [])),
sources=sources_info
)
return article
except Exception as e:
logger.error(f"Error generating article with Claude: {e}")
raise HTTPException(status_code=500, detail=f"Failed to generate article: {str(e)}")
@app.post("/api/generate")
async def generate_article(request: ArticleGenerationRequest):
"""
뉴스 수집부터 기사 생성까지 전체 파이프라인 실행
RSS → Google Search → AI 기사 생성
단일 종합 기사 생성 (기존 방식)
"""
try:
# Step 1: Get aggregated news from News Aggregator service
logger.info(f"Fetching aggregated news for keyword: {request.keyword}")
aggregator_response = await http_client.get(
f"{NEWS_AGGREGATOR_URL}/api/aggregate",
params={
"q": request.keyword,
"limit": request.limit,
"google_results_per_title": request.google_results_per_title,
"lang": request.lang,
"country": request.country
}
)
aggregator_response.raise_for_status()
news_data = aggregator_response.json()
if not news_data.get("news_items"):
raise HTTPException(status_code=404, detail="No news items found for the given keyword")
# Step 2: Generate article using Claude
logger.info(f"Generating article with Claude for {len(news_data['news_items'])} news items")
article = await generate_article_with_claude(news_data, request.style)
# Step 3: Store article in MongoDB (optional)
if db is not None:
try:
article_dict = article.dict()
await db.articles.insert_one(article_dict)
logger.info(f"Article saved with ID: {article.news_id}")
except Exception as e:
logger.error(f"Failed to save article to MongoDB: {e}")
return article
except httpx.HTTPStatusError as e:
logger.error(f"HTTP error from aggregator service: {e}")
raise HTTPException(status_code=e.response.status_code, detail=str(e))
except Exception as e:
logger.error(f"Error in generate_article: {e}")
raise HTTPException(status_code=500, detail=str(e))
@app.post("/api/generate/from-aggregated", response_model=GeneratedArticle)
async def generate_from_aggregated_data(news_data: Dict[str, Any], style: str = "professional"):
"""
이미 수집된 뉴스 데이터로부터 직접 기사 생성
(News Aggregator 결과를 직접 입력받아 처리)
"""
try:
if not news_data.get("news_items"):
raise HTTPException(status_code=400, detail="No news items in provided data")
# Generate article using Claude
logger.info(f"Generating article from {len(news_data['news_items'])} news items")
article = await generate_article_with_claude(news_data, style)
# Store article in MongoDB
if db is not None:
try:
article_dict = article.dict()
await db.articles.insert_one(article_dict)
logger.info(f"Article saved with ID: {article.news_id}")
except Exception as e:
logger.error(f"Failed to save article to MongoDB: {e}")
return article
except Exception as e:
logger.error(f"Error in generate_from_aggregated_data: {e}")
raise HTTPException(status_code=500, detail=str(e))
@app.get("/api/articles/{article_id}", response_model=GeneratedArticle)
async def get_article(article_id: str):
"""저장된 기사 조회"""
if db is None:
raise HTTPException(status_code=503, detail="Database not available")
article = await db.articles.find_one({"news_id": article_id})
if not article:
raise HTTPException(status_code=404, detail="Article not found")
# Convert MongoDB document to GeneratedArticle
article.pop("_id", None)
return GeneratedArticle(**article)
@app.get("/api/articles")
async def list_articles(
skip: int = 0,
limit: int = 10,
keyword: Optional[str] = None,
category: Optional[str] = None
):
"""저장된 기사 목록 조회"""
if db is None:
raise HTTPException(status_code=503, detail="Database not available")
query = {}
if keyword:
query["source_keyword"] = {"$regex": keyword, "$options": "i"}
if category:
query["categories"] = category
cursor = db.articles.find(query).skip(skip).limit(limit).sort("created_at", -1)
articles = []
async for article in cursor:
article.pop("_id", None)
articles.append(article)
total = await db.articles.count_documents(query)
return {
"articles": articles,
"total": total,
"skip": skip,
"limit": limit
}
@app.post("/api/generate/batch")
async def generate_batch_articles(keywords: List[str], style: str = "professional"):
"""여러 키워드에 대한 기사 일괄 생성"""
results = []
errors = []
for keyword in keywords[:5]: # Limit to 5 keywords to prevent overload
try:
request = ArticleGenerationRequest(
keyword=keyword,
style=style
)
article = await generate_article(request)
results.append({
"keyword": keyword,
"status": "success",
"article_id": article.news_id,
"title": article.title
})
except Exception as e:
errors.append({
"keyword": keyword,
"status": "error",
"error": str(e)
})
return {
"success": results,
"errors": errors,
"total_processed": len(results) + len(errors)
}
@app.post("/api/generate/per-item")
async def generate_articles_per_rss_item(request: PerItemGenerationRequest):
"""
RSS 피드의 각 아이템별로 개별 기사 생성
각 RSS 아이템이 독립적인 기사가 됨
중복 생성 방지 기능 포함
"""
try:
# Step 1: Get aggregated news from News Aggregator service
logger.info(f"Fetching aggregated news for keyword: {request.keyword}")
# limit이 None이면 모든 항목 처리 (최대 100개로 제한)
actual_limit = request.limit if request.limit is not None else 100
aggregator_response = await http_client.get(
f"{NEWS_AGGREGATOR_URL}/api/aggregate",
params={
"q": request.keyword,
"limit": actual_limit,
"google_results_per_title": request.google_results_per_title,
"lang": request.lang,
"country": request.country
}
)
aggregator_response.raise_for_status()
news_data = aggregator_response.json()
if not news_data.get("news_items"):
raise HTTPException(status_code=404, detail="No news items found for the given keyword")
# Step 2: Check for existing articles if skip_existing is True
existing_titles = set()
skipped_count = 0
if request.skip_existing and db is not None:
# RSS 제목으로 중복 체크 (최근 24시간 내)
from datetime import datetime, timedelta
cutoff_time = (datetime.now() - timedelta(hours=24)).isoformat()
existing_cursor = db.articles.find(
{
"source_keyword": request.keyword,
"created_at": {"$gte": cutoff_time}
},
{"sources": 1}
)
async for doc in existing_cursor:
for source in doc.get("sources", []):
if source.get("source_site") == "RSS Feed":
existing_titles.add(source.get("title", ""))
# Step 3: Generate individual article for each RSS item
generated_articles = []
for item in news_data["news_items"]:
try:
rss_title = item.get('rss_title', '')
# Skip if already exists
if request.skip_existing and rss_title in existing_titles:
logger.info(f"Skipping already generated article: {rss_title}")
skipped_count += 1
continue
logger.info(f"Generating article for RSS item: {rss_title or 'Unknown'}")
# Create individual news_data for this item
individual_news_data = {
"keyword": news_data.get("keyword"),
"news_items": [item] # Single item only
}
# Generate article for this single item
article = await generate_article_with_claude(individual_news_data, request.style)
# Store in MongoDB
if db is not None:
try:
article_dict = article.dict()
await db.articles.insert_one(article_dict)
logger.info(f"Article saved with ID: {article.news_id}")
except Exception as e:
logger.error(f"Failed to save article to MongoDB: {e}")
generated_articles.append(article)
except Exception as e:
logger.error(f"Failed to generate article for item: {e}")
# Continue with next item even if one fails
continue
if not generated_articles and skipped_count == 0:
raise HTTPException(status_code=500, detail="Failed to generate any articles")
# Return all generated articles
return {
"total_generated": len(generated_articles),
"total_items": len(news_data["news_items"]),
"skipped_duplicates": skipped_count,
"articles": generated_articles
}
except httpx.HTTPStatusError as e:
logger.error(f"HTTP error from aggregator service: {e}")
raise HTTPException(status_code=e.response.status_code, detail=str(e))
except Exception as e:
logger.error(f"Error in generate_articles_per_rss_item: {e}")
raise HTTPException(status_code=500, detail=str(e))
# Queue Management Endpoints
@app.post("/api/queue/enqueue")
async def enqueue_items(request: PerItemGenerationRequest):
"""
RSS 아이템들을 큐에 추가 (비동기 처리)
Consumer 워커가 백그라운드에서 처리
"""
try:
# Step 1: Get aggregated news from News Aggregator service
logger.info(f"Fetching aggregated news for enqueue: {request.keyword}")
actual_limit = request.limit if request.limit is not None else 100
aggregator_response = await http_client.get(
f"{NEWS_AGGREGATOR_URL}/api/aggregate",
params={
"q": request.keyword,
"limit": actual_limit,
"google_results_per_title": request.google_results_per_title,
"lang": request.lang,
"country": request.country
}
)
aggregator_response.raise_for_status()
news_data = aggregator_response.json()
if not news_data.get("news_items"):
raise HTTPException(status_code=404, detail="No news items found for the given keyword")
# Step 2: Check for existing articles if skip_existing is True
existing_titles = set()
skipped_count = 0
if request.skip_existing and db is not None:
from datetime import datetime, timedelta
cutoff_time = (datetime.now() - timedelta(hours=24)).isoformat()
existing_cursor = db.articles.find(
{
"source_keyword": request.keyword,
"created_at": {"$gte": cutoff_time}
},
{"sources": 1}
)
async for doc in existing_cursor:
for source in doc.get("sources", []):
if source.get("source_site") == "RSS Feed":
existing_titles.add(source.get("title", ""))
# Step 3: Enqueue items for processing
enqueued_jobs = []
for item in news_data["news_items"]:
rss_title = item.get('rss_title', '')
# Skip if already exists
if request.skip_existing and rss_title in existing_titles:
logger.info(f"Skipping already generated article: {rss_title}")
skipped_count += 1
continue
# Create job data
job_data = NewsJobData(
job_id=str(uuid.uuid4()),
keyword=request.keyword,
rss_title=rss_title,
rss_link=item.get('rss_link'),
rss_published=item.get('rss_published'),
google_results=item.get('google_results', []),
style=request.style,
created_at=datetime.now()
)
# Enqueue job
job_id = await queue_manager.enqueue(job_data)
enqueued_jobs.append({
"job_id": job_id,
"title": rss_title[:100]
})
logger.info(f"Enqueued job {job_id} for: {rss_title}")
return {
"total_enqueued": len(enqueued_jobs),
"total_items": len(news_data["news_items"]),
"skipped_duplicates": skipped_count,
"jobs": enqueued_jobs,
"message": f"{len(enqueued_jobs)} jobs added to queue for processing"
}
except httpx.HTTPStatusError as e:
logger.error(f"HTTP error from aggregator service: {e}")
raise HTTPException(status_code=e.response.status_code, detail=str(e))
except Exception as e:
logger.error(f"Error in enqueue_items: {e}")
raise HTTPException(status_code=500, detail=str(e))
@app.get("/api/queue/stats", response_model=QueueStats)
async def get_queue_stats():
"""큐 상태 및 통계 조회"""
try:
stats = await queue_manager.get_stats()
return stats
except Exception as e:
logger.error(f"Error getting queue stats: {e}")
raise HTTPException(status_code=500, detail=str(e))
@app.delete("/api/queue/clear")
async def clear_queue():
"""큐 초기화 (관리자용)"""
try:
await queue_manager.clear_queue()
return {"message": "Queue cleared successfully"}
except Exception as e:
logger.error(f"Error clearing queue: {e}")
raise HTTPException(status_code=500, detail=str(e))