feat: add resource library and real image workflow
This commit is contained in:
45
app/infra/db/models/library_resource.py
Normal file
45
app/infra/db/models/library_resource.py
Normal file
@@ -0,0 +1,45 @@
|
||||
"""Library resource ORM model."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from sqlalchemy import Enum, Integer, JSON, String, Text
|
||||
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
||||
|
||||
from app.domain.enums import LibraryResourceStatus, LibraryResourceType
|
||||
from app.infra.db.base import Base, TimestampMixin
|
||||
|
||||
|
||||
class LibraryResourceORM(TimestampMixin, Base):
|
||||
"""Persisted library resource independent from order-generated assets."""
|
||||
|
||||
__tablename__ = "library_resources"
|
||||
|
||||
id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
|
||||
resource_type: Mapped[LibraryResourceType] = mapped_column(
|
||||
Enum(LibraryResourceType, native_enum=False),
|
||||
nullable=False,
|
||||
index=True,
|
||||
)
|
||||
name: Mapped[str] = mapped_column(String(255), nullable=False)
|
||||
description: Mapped[str | None] = mapped_column(Text, nullable=True)
|
||||
tags: Mapped[list[str]] = mapped_column(JSON, nullable=False, default=list)
|
||||
status: Mapped[LibraryResourceStatus] = mapped_column(
|
||||
Enum(LibraryResourceStatus, native_enum=False),
|
||||
nullable=False,
|
||||
default=LibraryResourceStatus.ACTIVE,
|
||||
index=True,
|
||||
)
|
||||
gender: Mapped[str | None] = mapped_column(String(32), nullable=True, index=True)
|
||||
age_group: Mapped[str | None] = mapped_column(String(32), nullable=True, index=True)
|
||||
pose_id: Mapped[int | None] = mapped_column(Integer, nullable=True)
|
||||
environment: Mapped[str | None] = mapped_column(String(32), nullable=True, index=True)
|
||||
category: Mapped[str | None] = mapped_column(String(128), nullable=True, index=True)
|
||||
cover_file_id: Mapped[int | None] = mapped_column(Integer, nullable=True)
|
||||
original_file_id: Mapped[int | None] = mapped_column(Integer, nullable=True)
|
||||
|
||||
files = relationship(
|
||||
"LibraryResourceFileORM",
|
||||
back_populates="resource",
|
||||
lazy="selectin",
|
||||
cascade="all, delete-orphan",
|
||||
)
|
||||
37
app/infra/db/models/library_resource_file.py
Normal file
37
app/infra/db/models/library_resource_file.py
Normal file
@@ -0,0 +1,37 @@
|
||||
"""Library resource file ORM model."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from sqlalchemy import Enum, ForeignKey, Integer, String
|
||||
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
||||
|
||||
from app.domain.enums import LibraryFileRole
|
||||
from app.infra.db.base import Base, TimestampMixin
|
||||
|
||||
|
||||
class LibraryResourceFileORM(TimestampMixin, Base):
|
||||
"""Persisted uploaded file metadata for a library resource."""
|
||||
|
||||
__tablename__ = "library_resource_files"
|
||||
|
||||
id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
|
||||
resource_id: Mapped[int] = mapped_column(
|
||||
ForeignKey("library_resources.id"),
|
||||
nullable=False,
|
||||
index=True,
|
||||
)
|
||||
file_role: Mapped[LibraryFileRole] = mapped_column(
|
||||
Enum(LibraryFileRole, native_enum=False),
|
||||
nullable=False,
|
||||
index=True,
|
||||
)
|
||||
storage_key: Mapped[str] = mapped_column(String(500), nullable=False)
|
||||
public_url: Mapped[str] = mapped_column(String(500), nullable=False)
|
||||
bucket: Mapped[str] = mapped_column(String(255), nullable=False)
|
||||
mime_type: Mapped[str] = mapped_column(String(255), nullable=False)
|
||||
size_bytes: Mapped[int] = mapped_column(Integer, nullable=False)
|
||||
sort_order: Mapped[int] = mapped_column(Integer, nullable=False, default=0)
|
||||
width: Mapped[int | None] = mapped_column(Integer, nullable=True)
|
||||
height: Mapped[int | None] = mapped_column(Integer, nullable=True)
|
||||
|
||||
resource = relationship("LibraryResourceORM", back_populates="files")
|
||||
@@ -27,12 +27,11 @@ class OrderORM(TimestampMixin, Base):
|
||||
default=OrderStatus.CREATED,
|
||||
)
|
||||
model_id: Mapped[int] = mapped_column(Integer, nullable=False)
|
||||
pose_id: Mapped[int] = mapped_column(Integer, nullable=False)
|
||||
pose_id: Mapped[int | None] = mapped_column(Integer, nullable=True)
|
||||
garment_asset_id: Mapped[int] = mapped_column(Integer, nullable=False)
|
||||
scene_ref_asset_id: Mapped[int] = mapped_column(Integer, nullable=False)
|
||||
scene_ref_asset_id: Mapped[int | None] = mapped_column(Integer, nullable=True)
|
||||
final_asset_id: Mapped[int | None] = mapped_column(Integer, nullable=True)
|
||||
|
||||
assets = relationship("AssetORM", back_populates="order", lazy="selectin")
|
||||
review_tasks = relationship("ReviewTaskORM", back_populates="order", lazy="selectin")
|
||||
workflow_runs = relationship("WorkflowRunORM", back_populates="order", lazy="selectin")
|
||||
|
||||
|
||||
@@ -44,12 +44,14 @@ async def init_database() -> None:
|
||||
"""Create database tables when running the MVP without migrations."""
|
||||
|
||||
from app.infra.db.models.asset import AssetORM
|
||||
from app.infra.db.models.library_resource import LibraryResourceORM
|
||||
from app.infra.db.models.library_resource_file import LibraryResourceFileORM
|
||||
from app.infra.db.models.order import OrderORM
|
||||
from app.infra.db.models.review_task import ReviewTaskORM
|
||||
from app.infra.db.models.workflow_run import WorkflowRunORM
|
||||
from app.infra.db.models.workflow_step import WorkflowStepORM
|
||||
|
||||
del AssetORM, OrderORM, ReviewTaskORM, WorkflowRunORM, WorkflowStepORM
|
||||
del AssetORM, LibraryResourceORM, LibraryResourceFileORM, OrderORM, ReviewTaskORM, WorkflowRunORM, WorkflowStepORM
|
||||
|
||||
async with get_async_engine().begin() as connection:
|
||||
await connection.run_sync(Base.metadata.create_all)
|
||||
|
||||
38
app/infra/image_generation/base.py
Normal file
38
app/infra/image_generation/base.py
Normal file
@@ -0,0 +1,38 @@
|
||||
"""Shared types for image-generation providers."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Protocol
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class SourceImage:
|
||||
"""A binary source image passed into an image-generation provider."""
|
||||
|
||||
url: str
|
||||
mime_type: str
|
||||
data: bytes
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class GeneratedImageResult:
|
||||
"""Normalized image-generation output returned by providers."""
|
||||
|
||||
image_bytes: bytes
|
||||
mime_type: str
|
||||
provider: str
|
||||
model: str
|
||||
prompt: str
|
||||
|
||||
|
||||
class ImageGenerationProvider(Protocol):
|
||||
"""Contract implemented by concrete image-generation providers."""
|
||||
|
||||
async def generate_tryon_image(
|
||||
self,
|
||||
*,
|
||||
prompt: str,
|
||||
person_image: SourceImage,
|
||||
garment_image: SourceImage,
|
||||
) -> GeneratedImageResult: ...
|
||||
149
app/infra/image_generation/gemini_provider.py
Normal file
149
app/infra/image_generation/gemini_provider.py
Normal file
@@ -0,0 +1,149 @@
|
||||
"""Gemini-backed image-generation provider."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
|
||||
import httpx
|
||||
|
||||
from app.infra.image_generation.base import GeneratedImageResult, SourceImage
|
||||
|
||||
|
||||
class GeminiImageProvider:
|
||||
"""Call the Gemini image-generation API via a configurable REST endpoint."""
|
||||
|
||||
provider_name = "gemini"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
api_key: str,
|
||||
base_url: str,
|
||||
model: str,
|
||||
timeout_seconds: int,
|
||||
max_attempts: int = 2,
|
||||
http_client: httpx.AsyncClient | None = None,
|
||||
) -> None:
|
||||
self.api_key = api_key
|
||||
self.base_url = base_url.rstrip("/")
|
||||
self.model = model
|
||||
self.timeout_seconds = timeout_seconds
|
||||
self.max_attempts = max(1, max_attempts)
|
||||
self._http_client = http_client
|
||||
|
||||
async def generate_tryon_image(
|
||||
self,
|
||||
*,
|
||||
prompt: str,
|
||||
person_image: SourceImage,
|
||||
garment_image: SourceImage,
|
||||
) -> GeneratedImageResult:
|
||||
"""Generate a try-on image from a prepared person image and a garment image."""
|
||||
|
||||
return await self._generate_two_image_edit(
|
||||
prompt=prompt,
|
||||
first_image=person_image,
|
||||
second_image=garment_image,
|
||||
)
|
||||
|
||||
async def generate_scene_image(
|
||||
self,
|
||||
*,
|
||||
prompt: str,
|
||||
source_image: SourceImage,
|
||||
scene_image: SourceImage,
|
||||
) -> GeneratedImageResult:
|
||||
"""Generate a scene-composited image from a rendered subject image and a scene reference."""
|
||||
|
||||
return await self._generate_two_image_edit(
|
||||
prompt=prompt,
|
||||
first_image=source_image,
|
||||
second_image=scene_image,
|
||||
)
|
||||
|
||||
async def _generate_two_image_edit(
|
||||
self,
|
||||
*,
|
||||
prompt: str,
|
||||
first_image: SourceImage,
|
||||
second_image: SourceImage,
|
||||
) -> GeneratedImageResult:
|
||||
"""Generate an edited image from a prompt plus two inline image inputs."""
|
||||
|
||||
payload = {
|
||||
"contents": [
|
||||
{
|
||||
"parts": [
|
||||
{"text": prompt},
|
||||
{
|
||||
"inline_data": {
|
||||
"mime_type": first_image.mime_type,
|
||||
"data": base64.b64encode(first_image.data).decode("utf-8"),
|
||||
}
|
||||
},
|
||||
{
|
||||
"inline_data": {
|
||||
"mime_type": second_image.mime_type,
|
||||
"data": base64.b64encode(second_image.data).decode("utf-8"),
|
||||
}
|
||||
},
|
||||
]
|
||||
}
|
||||
],
|
||||
"generationConfig": {
|
||||
"responseModalities": ["TEXT", "IMAGE"],
|
||||
},
|
||||
}
|
||||
|
||||
owns_client = self._http_client is None
|
||||
client = self._http_client or httpx.AsyncClient(timeout=self.timeout_seconds)
|
||||
try:
|
||||
response = None
|
||||
for attempt in range(1, self.max_attempts + 1):
|
||||
try:
|
||||
response = await client.post(
|
||||
f"{self.base_url}/models/{self.model}:generateContent",
|
||||
headers={
|
||||
"x-goog-api-key": self.api_key,
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
json=payload,
|
||||
timeout=self.timeout_seconds,
|
||||
)
|
||||
response.raise_for_status()
|
||||
break
|
||||
except httpx.TransportError:
|
||||
if attempt >= self.max_attempts:
|
||||
raise
|
||||
finally:
|
||||
if owns_client:
|
||||
await client.aclose()
|
||||
|
||||
if response is None:
|
||||
raise RuntimeError("Gemini provider did not receive a response")
|
||||
|
||||
body = response.json()
|
||||
image_part = self._find_image_part(body)
|
||||
image_data = image_part.get("inlineData") or image_part.get("inline_data")
|
||||
mime_type = image_data.get("mimeType") or image_data.get("mime_type") or "image/png"
|
||||
data = image_data.get("data")
|
||||
if not data:
|
||||
raise ValueError("Gemini response did not include image bytes")
|
||||
|
||||
return GeneratedImageResult(
|
||||
image_bytes=base64.b64decode(data),
|
||||
mime_type=mime_type,
|
||||
provider=self.provider_name,
|
||||
model=self.model,
|
||||
prompt=prompt,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _find_image_part(body: dict) -> dict:
|
||||
candidates = body.get("candidates") or []
|
||||
for candidate in candidates:
|
||||
content = candidate.get("content") or {}
|
||||
for part in content.get("parts") or []:
|
||||
if part.get("inlineData") or part.get("inline_data"):
|
||||
return part
|
||||
raise ValueError("Gemini response did not contain an image part")
|
||||
107
app/infra/storage/s3.py
Normal file
107
app/infra/storage/s3.py
Normal file
@@ -0,0 +1,107 @@
|
||||
"""S3 direct-upload helpers."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from uuid import uuid4
|
||||
|
||||
import boto3
|
||||
|
||||
from app.config.settings import get_settings
|
||||
from app.domain.enums import LibraryResourceType, WorkflowStepName
|
||||
|
||||
RESOURCE_PREFIXES: dict[LibraryResourceType, str] = {
|
||||
LibraryResourceType.MODEL: "models",
|
||||
LibraryResourceType.SCENE: "scenes",
|
||||
LibraryResourceType.GARMENT: "garments",
|
||||
}
|
||||
|
||||
|
||||
class S3PresignService:
|
||||
"""Generate presigned upload URLs and derived public URLs."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.settings = get_settings()
|
||||
self._client = boto3.client(
|
||||
"s3",
|
||||
region_name=self.settings.s3_region or None,
|
||||
endpoint_url=self.settings.s3_endpoint or None,
|
||||
aws_access_key_id=self.settings.s3_access_key or None,
|
||||
aws_secret_access_key=self.settings.s3_secret_key or None,
|
||||
)
|
||||
|
||||
def create_upload(self, resource_type: LibraryResourceType, file_name: str, content_type: str) -> tuple[str, str]:
|
||||
"""Return a storage key and presigned PUT URL for a resource file."""
|
||||
|
||||
storage_key = self._build_storage_key(resource_type, file_name)
|
||||
upload_url = self._client.generate_presigned_url(
|
||||
"put_object",
|
||||
Params={
|
||||
"Bucket": self.settings.s3_bucket,
|
||||
"Key": storage_key,
|
||||
"ContentType": content_type,
|
||||
},
|
||||
ExpiresIn=self.settings.s3_presign_expiry_seconds,
|
||||
HttpMethod="PUT",
|
||||
)
|
||||
return storage_key, upload_url
|
||||
|
||||
def get_public_url(self, storage_key: str) -> str:
|
||||
"""Return the public CDN URL for an uploaded object."""
|
||||
|
||||
if self.settings.s3_cname:
|
||||
base = self.settings.s3_cname
|
||||
if not base.startswith("http://") and not base.startswith("https://"):
|
||||
base = f"https://{base}"
|
||||
return f"{base.rstrip('/')}/{storage_key}"
|
||||
|
||||
endpoint = self.settings.s3_endpoint.rstrip("/")
|
||||
return f"{endpoint}/{self.settings.s3_bucket}/{storage_key}"
|
||||
|
||||
def _build_storage_key(self, resource_type: LibraryResourceType, file_name: str) -> str:
|
||||
suffix = Path(file_name).suffix or ".bin"
|
||||
stem = Path(file_name).stem.replace(" ", "-").lower() or "file"
|
||||
return f"library/{RESOURCE_PREFIXES[resource_type]}/{uuid4().hex}-{stem}{suffix.lower()}"
|
||||
|
||||
|
||||
class S3ObjectStorageService:
|
||||
"""Upload generated workflow artifacts to the configured object store."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.settings = get_settings()
|
||||
self._client = boto3.client(
|
||||
"s3",
|
||||
region_name=self.settings.s3_region or None,
|
||||
endpoint_url=self.settings.s3_endpoint or None,
|
||||
aws_access_key_id=self.settings.s3_access_key or None,
|
||||
aws_secret_access_key=self.settings.s3_secret_key or None,
|
||||
)
|
||||
self._presign = S3PresignService()
|
||||
|
||||
async def upload_generated_image(
|
||||
self,
|
||||
*,
|
||||
order_id: int,
|
||||
step_name: WorkflowStepName,
|
||||
image_bytes: bytes,
|
||||
mime_type: str,
|
||||
) -> tuple[str, str]:
|
||||
"""Upload bytes and return the storage key plus public URL."""
|
||||
|
||||
storage_key = self._build_storage_key(order_id=order_id, step_name=step_name, mime_type=mime_type)
|
||||
self._client.put_object(
|
||||
Bucket=self.settings.s3_bucket,
|
||||
Key=storage_key,
|
||||
Body=image_bytes,
|
||||
ContentType=mime_type,
|
||||
)
|
||||
return storage_key, self._presign.get_public_url(storage_key)
|
||||
|
||||
@staticmethod
|
||||
def _build_storage_key(*, order_id: int, step_name: WorkflowStepName, mime_type: str) -> str:
|
||||
suffix = {
|
||||
"image/png": ".png",
|
||||
"image/jpeg": ".jpg",
|
||||
"image/webp": ".webp",
|
||||
}.get(mime_type, ".bin")
|
||||
return f"orders/{order_id}/{step_name.value}/{uuid4().hex}{suffix}"
|
||||
Reference in New Issue
Block a user