Coverage for apps / recipes / api.py: 93%

177 statements  

« prev     ^ index     » next       coverage.py v7.13.1, created at 2026-01-11 00:40 +0000

1""" 

2Recipe API endpoints. 

3""" 

4 

5import asyncio 

6import logging 

7from typing import List, Optional 

8 

9logger = logging.getLogger(__name__) 

10 

11from asgiref.sync import sync_to_async 

12from django.shortcuts import get_object_or_404 

13from ninja import Router, Schema 

14 

15from apps.profiles.utils import aget_current_profile_or_none, get_current_profile_or_none 

16 

17from .models import Recipe 

18from .services.image_cache import SearchImageCache 

19from .services.scraper import RecipeScraper, FetchError, ParseError 

20from .services.search import RecipeSearch 

21 

22router = Router(tags=['recipes']) 

23 

24 

25# Schemas 

26 

27class RecipeOut(Schema): 

28 id: int 

29 source_url: Optional[str] 

30 canonical_url: str 

31 host: str 

32 site_name: str 

33 title: str 

34 author: str 

35 description: str 

36 image_url: str 

37 image: Optional[str] # Local image path 

38 ingredients: list 

39 ingredient_groups: list 

40 instructions: list 

41 instructions_text: str 

42 prep_time: Optional[int] 

43 cook_time: Optional[int] 

44 total_time: Optional[int] 

45 yields: str 

46 servings: Optional[int] 

47 category: str 

48 cuisine: str 

49 cooking_method: str 

50 keywords: list 

51 dietary_restrictions: list 

52 equipment: list 

53 nutrition: dict 

54 rating: Optional[float] 

55 rating_count: Optional[int] 

56 language: str 

57 links: list 

58 ai_tips: list 

59 is_remix: bool 

60 remix_profile_id: Optional[int] 

61 scraped_at: str 

62 updated_at: str 

63 

64 @staticmethod 

65 def resolve_image(obj): 

66 if obj.image: 

67 return obj.image.url 

68 return None 

69 

70 @staticmethod 

71 def resolve_scraped_at(obj): 

72 return obj.scraped_at.isoformat() 

73 

74 @staticmethod 

75 def resolve_updated_at(obj): 

76 return obj.updated_at.isoformat() 

77 

78 

79class RecipeListOut(Schema): 

80 """Condensed recipe output for list views.""" 

81 id: int 

82 title: str 

83 host: str 

84 image_url: str 

85 image: Optional[str] 

86 total_time: Optional[int] 

87 rating: Optional[float] 

88 is_remix: bool 

89 scraped_at: str 

90 

91 @staticmethod 

92 def resolve_image(obj): 

93 if obj.image: 

94 return obj.image.url 

95 return None 

96 

97 @staticmethod 

98 def resolve_scraped_at(obj): 

99 return obj.scraped_at.isoformat() 

100 

101 

102class ScrapeIn(Schema): 

103 url: str 

104 

105 

106class ErrorOut(Schema): 

107 detail: str 

108 

109 

110class SearchResultOut(Schema): 

111 url: str 

112 title: str 

113 host: str 

114 image_url: str # External URL (fallback) 

115 cached_image_url: Optional[str] = None # Local cached URL 

116 description: str 

117 rating_count: Optional[int] = None 

118 

119 

120class SearchOut(Schema): 

121 results: List[SearchResultOut] 

122 total: int 

123 page: int 

124 has_more: bool 

125 sites: dict 

126 

127 

128# Endpoints 

129# NOTE: Static routes must come before dynamic routes (e.g., /search/ before /{recipe_id}/) 

130 

131@router.get('/', response=List[RecipeListOut]) 

132def list_recipes( 

133 request, 

134 host: Optional[str] = None, 

135 is_remix: Optional[bool] = None, 

136 limit: int = 50, 

137 offset: int = 0, 

138): 

139 """ 

140 List saved recipes with optional filters. 

141 

142 - **host**: Filter by source host (e.g., "allrecipes.com") 

143 - **is_remix**: Filter by remix status 

144 - **limit**: Number of recipes to return (default 50) 

145 - **offset**: Offset for pagination 

146 

147 Returns only recipes owned by the current profile. 

148 """ 

149 profile = get_current_profile_or_none(request) 

150 if not profile: 

151 return [] 

152 

153 # Only show recipes owned by this profile 

154 qs = Recipe.objects.filter(profile=profile).order_by('-scraped_at') 

155 

156 if host: 

157 qs = qs.filter(host=host) 

158 if is_remix is not None: 

159 qs = qs.filter(is_remix=is_remix) 

160 

161 return qs[offset:offset + limit] 

162 

163 

164@router.post('/scrape/', response={201: RecipeOut, 400: ErrorOut, 403: ErrorOut, 502: ErrorOut}) 

165async def scrape_recipe(request, payload: ScrapeIn): 

166 """ 

167 Scrape a recipe from a URL. 

168 

169 The URL is fetched, parsed for recipe data, and saved to the database. 

170 If the recipe has an image, it will be downloaded and stored locally. 

171 The recipe will be owned by the current profile. 

172 

173 Note: Re-scraping the same URL will create a new recipe record. 

174 """ 

175 profile = await aget_current_profile_or_none(request) 

176 if not profile: 

177 return 403, {'detail': 'Profile required to scrape recipes'} 

178 

179 scraper = RecipeScraper() 

180 logger.info(f'Scrape request: {payload.url}') 

181 

182 try: 

183 recipe = await scraper.scrape_url(payload.url, profile) 

184 logger.info(f'Scrape success: {payload.url} -> recipe {recipe.id} "{recipe.title}"') 

185 return 201, recipe 

186 except FetchError as e: 

187 logger.warning(f'Scrape fetch error: {payload.url} - {e}') 

188 return 502, {'detail': str(e)} 

189 except ParseError as e: 

190 logger.warning(f'Scrape parse error: {payload.url} - {e}') 

191 return 400, {'detail': str(e)} 

192 

193 

194@router.get('/search/', response=SearchOut) 

195async def search_recipes( 

196 request, 

197 q: str, 

198 sources: Optional[str] = None, 

199 page: int = 1, 

200 per_page: int = 20, 

201): 

202 """ 

203 Search for recipes across multiple sites. 

204 

205 - **q**: Search query 

206 - **sources**: Comma-separated list of hosts to search (optional) 

207 - **page**: Page number (default 1) 

208 - **per_page**: Results per page (default 20) 

209 

210 Returns recipe URLs from enabled search sources. 

211 Uses cached images when available for iOS 9 compatibility. 

212 Use the scrape endpoint to save a recipe from the results. 

213 """ 

214 source_list = None 

215 if sources: 

216 source_list = [s.strip() for s in sources.split(',') if s.strip()] 

217 

218 search = RecipeSearch() 

219 results = await search.search( 

220 query=q, 

221 sources=source_list, 

222 page=page, 

223 per_page=per_page, 

224 ) 

225 

226 # Extract image URLs from search results 

227 image_urls = [r['image_url'] for r in results['results'] if r.get('image_url')] 

228 

229 # Look up already-cached images 

230 image_cache = SearchImageCache() 

231 cached_urls = await image_cache.get_cached_urls_batch(image_urls) 

232 

233 # Add cached_image_url to results 

234 for result in results['results']: 

235 external_url = result.get('image_url', '') 

236 result['cached_image_url'] = cached_urls.get(external_url) 

237 

238 # Cache uncached images in background thread (fire-and-forget) 

239 uncached_urls = [url for url in image_urls if url not in cached_urls] 

240 if uncached_urls: 

241 import threading 

242 import asyncio 

243 

244 def cache_in_background(): 

245 """Run async cache_images in a new event loop (thread-safe).""" 

246 try: 

247 loop = asyncio.new_event_loop() 

248 asyncio.set_event_loop(loop) 

249 loop.run_until_complete(image_cache.cache_images(uncached_urls)) 

250 except Exception as e: 

251 import logging 

252 logger = logging.getLogger(__name__) 

253 logger.error(f"Background image caching failed: {e}") 

254 finally: 

255 loop.close() 

256 

257 # Start background thread (daemon=True so it doesn't block shutdown) 

258 thread = threading.Thread(target=cache_in_background, daemon=True) 

259 thread.start() 

260 

261 return results 

262 

263 

264@router.get('/cache/health/', response={200: dict}) 

265def cache_health(request): 

266 """ 

267 Health check endpoint for image cache monitoring. 

268 

269 Returns cache statistics and status for monitoring the background 

270 image caching system. Use this to verify caching is working correctly 

271 and to track cache hit rates. 

272 """ 

273 from apps.recipes.models import CachedSearchImage 

274 

275 total = CachedSearchImage.objects.count() 

276 success = CachedSearchImage.objects.filter(status=CachedSearchImage.STATUS_SUCCESS).count() 

277 pending = CachedSearchImage.objects.filter(status=CachedSearchImage.STATUS_PENDING).count() 

278 failed = CachedSearchImage.objects.filter(status=CachedSearchImage.STATUS_FAILED).count() 

279 

280 return { 

281 'status': 'healthy', 

282 'cache_stats': { 

283 'total': total, 

284 'success': success, 

285 'pending': pending, 

286 'failed': failed, 

287 'success_rate': f"{(success/total*100):.1f}%" if total > 0 else "N/A" 

288 } 

289 } 

290 

291 

292# Dynamic routes with {recipe_id} must come last 

293 

294@router.get('/{recipe_id}/', response={200: RecipeOut, 404: ErrorOut}) 

295def get_recipe(request, recipe_id: int): 

296 """ 

297 Get a recipe by ID. 

298 

299 Only returns recipes owned by the current profile. 

300 """ 

301 profile = get_current_profile_or_none(request) 

302 if not profile: 

303 return 404, {'detail': 'Recipe not found'} 

304 

305 # Only allow access to recipes owned by this profile 

306 recipe = get_object_or_404(Recipe, id=recipe_id, profile=profile) 

307 return recipe 

308 

309 

310@router.delete('/{recipe_id}/', response={204: None, 404: ErrorOut}) 

311def delete_recipe(request, recipe_id: int): 

312 """ 

313 Delete a recipe by ID. 

314 

315 Only the owning profile can delete a recipe. 

316 """ 

317 profile = get_current_profile_or_none(request) 

318 if not profile: 

319 return 404, {'detail': 'Recipe not found'} 

320 

321 # Only allow deletion of recipes owned by this profile 

322 recipe = get_object_or_404(Recipe, id=recipe_id, profile=profile) 

323 recipe.delete() 

324 return 204, None 

← Back to Dashboard