Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ Handles file system operations and provides a secure bridge between the frontend

- Smart tagging of photos based on detected objects, faces, and their recognition
- Traditional gallery features of album management
- **Memories**: Auto-generated photo memories grouped by time and location (similar to Google Photos "On this day")
- Advanced image analysis with object detection and facial recognition
- Privacy-focused design with offline functionality
- Efficient data handling and parallel processing
Expand Down
171 changes: 171 additions & 0 deletions backend/app/routes/memories.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,171 @@
"""
Memories API routes for retrieving auto-generated photo memories.
"""

from fastapi import APIRouter, HTTPException, status, Query
from typing import List, Optional
from pydantic import BaseModel
from app.utils.memories import generate_memories
from app.schemas.images import ErrorResponse
from app.logging.setup_logging import get_logger

logger = get_logger(__name__)
router = APIRouter()


# Response Models
class RepresentativeMedia(BaseModel):
"""Representative media thumbnail for a memory."""
id: str
thumbnailPath: str


class DateRange(BaseModel):
"""Date range for a memory."""
start: str
end: str


class Memory(BaseModel):
"""A memory object containing clustered photos."""
id: str
title: str
type: str # "on_this_day", "trip", "date_cluster", etc.
date_range: DateRange
location: Optional[str] = None
media_count: int
representative_media: List[RepresentativeMedia]
media_ids: List[str]


class GetMemoriesResponse(BaseModel):
"""Response model for GET /memories endpoint."""
success: bool
message: str
data: List[Memory]


@router.get(
"/",
response_model=GetMemoriesResponse,
responses={500: {"model": ErrorResponse}},
)
def get_memories(
limit: Optional[int] = Query(None, description="Maximum number of memories to return", ge=1, le=100)
):
"""
Get all auto-generated memories.

Memories are automatically generated by clustering photos based on:
- Date similarity (same day, month, year, or "on this day" from past years)
- Geographic proximity (nearby locations)

Returns memories sorted by date (most recent first).
"""
try:
memories = generate_memories()

# Apply limit if specified
if limit is not None:
memories = memories[:limit]

# Convert to response models
memory_models = [
Memory(
id=mem["id"],
title=mem["title"],
type=mem["type"],
date_range=DateRange(
start=mem["date_range"]["start"],
end=mem["date_range"]["end"],
),
location=mem.get("location"),
media_count=mem["media_count"],
representative_media=[
RepresentativeMedia(
id=media["id"],
thumbnailPath=media["thumbnailPath"],
)
for media in mem["representative_media"]
],
media_ids=mem["media_ids"],
)
for mem in memories
]

return GetMemoriesResponse(
success=True,
message=f"Successfully retrieved {len(memory_models)} memories",
data=memory_models,
)

except Exception as e:
logger.error(f"Error retrieving memories: {e}", exc_info=True)
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=ErrorResponse(
success=False,
error="Internal server error",
message=f"Unable to retrieve memories: {str(e)}",
).model_dump(),
)

Comment on lines +48 to +112
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Error responses won’t match ErrorResponse schema (wrapped under detail) + leak internals.
HTTPException(detail=ErrorResponse(...).model_dump()) yields {"detail": {...}}, not an ErrorResponse body, and message echoes str(e) to clients.

-from fastapi import APIRouter, HTTPException, status, Query
+from fastapi import APIRouter, HTTPException, status, Query
+from fastapi.responses import JSONResponse
@@
-    except Exception as e:
+    except Exception as e:
         logger.error(f"Error retrieving memories: {e}", exc_info=True)
-        raise HTTPException(
-            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
-            detail=ErrorResponse(
-                success=False,
-                error="Internal server error",
-                message=f"Unable to retrieve memories: {str(e)}",
-            ).model_dump(),
-        )
+        err = ErrorResponse(
+            success=False,
+            error="Internal server error",
+            message="Unable to retrieve memories",
+        )
+        return JSONResponse(
+            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
+            content=err.model_dump(),
+        )
🤖 Prompt for AI Agents
In backend/app/routes/memories.py around lines 48 to 112, the exception handler
currently wraps an ErrorResponse into HTTPException(detail=...) which results in
a {"detail": {...}} envelope and also exposes internal error text to clients;
instead, log the full exception (keep logger.error with exc_info=True) but
return the ErrorResponse as the actual response body (not nested under "detail")
and avoid leaking internals by using a generic client-facing message; implement
this by returning a JSONResponse (status_code=500,
content=ErrorResponse(success=False, error="Internal server error",
message="Unable to retrieve memories").model_dump()) or equivalent so the
response matches ErrorResponse exactly and do not include str(e) in the response
content.


@router.get(
"/{memory_id}",
response_model=Memory,
responses={404: {"model": ErrorResponse}, 500: {"model": ErrorResponse}},
)
def get_memory_by_id(memory_id: str):
"""
Get a specific memory by ID.
"""
try:
memories = generate_memories()

# Find memory by ID
memory = next((m for m in memories if m["id"] == memory_id), None)

if not memory:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=ErrorResponse(
success=False,
error="Not Found",
message=f"Memory with ID '{memory_id}' not found",
).model_dump(),
)

return Memory(
id=memory["id"],
title=memory["title"],
type=memory["type"],
date_range=DateRange(
start=memory["date_range"]["start"],
end=memory["date_range"]["end"],
),
location=memory.get("location"),
media_count=memory["media_count"],
representative_media=[
RepresentativeMedia(
id=media["id"],
thumbnailPath=media["thumbnailPath"],
)
for media in memory["representative_media"]
],
media_ids=memory["media_ids"],
)

except HTTPException:
raise
except Exception as e:
logger.error(f"Error retrieving memory {memory_id}: {e}", exc_info=True)
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=ErrorResponse(
success=False,
error="Internal server error",
message=f"Unable to retrieve memory: {str(e)}",
).model_dump(),
)

Comment on lines +114 to +171
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Same error-shape + info-leak problem on 404/500 for GET /{memory_id}.
The 404 path also returns {"detail": ErrorResponse} rather than an ErrorResponse body, and the 500 path echoes str(e).

@@
-        if not memory:
-            raise HTTPException(
-                status_code=status.HTTP_404_NOT_FOUND,
-                detail=ErrorResponse(
-                    success=False,
-                    error="Not Found",
-                    message=f"Memory with ID '{memory_id}' not found",
-                ).model_dump(),
-            )
+        if not memory:
+            err = ErrorResponse(
+                success=False,
+                error="Not Found",
+                message=f"Memory with ID '{memory_id}' not found",
+            )
+            return JSONResponse(
+                status_code=status.HTTP_404_NOT_FOUND,
+                content=err.model_dump(),
+            )
@@
-    except HTTPException:
-        raise
     except Exception as e:
         logger.error(f"Error retrieving memory {memory_id}: {e}", exc_info=True)
-        raise HTTPException(
-            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
-            detail=ErrorResponse(
-                success=False,
-                error="Internal server error",
-                message=f"Unable to retrieve memory: {str(e)}",
-            ).model_dump(),
-        )
+        err = ErrorResponse(
+            success=False,
+            error="Internal server error",
+            message="Unable to retrieve memory",
+        )
+        return JSONResponse(
+            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
+            content=err.model_dump(),
+        )
🤖 Prompt for AI Agents
In backend/app/routes/memories.py around lines 114-171, the 404 and 500 branches
currently return an HTTPException with its ErrorResponse put into the exception
"detail" (causing the API response to be {"detail": {...}}) and the 500 path
echoes str(e) (information leak); change both to return proper JSON body
ErrorResponse objects and avoid exposing internal errors: import and use
fastapi.responses.JSONResponse (or return Response with JSON) to return
JSONResponse(status_code=404, content=ErrorResponse(...).model_dump()) for the
not-found case, and for unexpected errors log the exception (keep
logger.error(..., exc_info=True)) but return JSONResponse(status_code=500,
content=ErrorResponse(success=False, error="Internal server error",
message="Unable to retrieve memory").model_dump()) so the client gets a clean
ErrorResponse body and no internal exception text is leaked.

Loading