37 lines
846 B
Plaintext
37 lines
846 B
Plaintext
# Configuration for Florence-2 caption pipeline
|
|
|
|
[model]
|
|
# HuggingFace model id (replace with your model)
|
|
id = "your-org/your-model"
|
|
# Device to run model on (cuda/cpu)
|
|
device = "cuda"
|
|
# Prompt token used for captioning (replace with your token)
|
|
prompt_token = "<PROMPT_TOKEN>"
|
|
|
|
[preprocessing]
|
|
# Max side for resizing (longest dimension)
|
|
image_max_side = 768
|
|
|
|
[pipeline]
|
|
# Batch sizes and queue sizing
|
|
gpu_batch_size = 8
|
|
download_concurrency = 16
|
|
image_queue_max_size = 64
|
|
result_queue_max_size = 128
|
|
db_write_batch_size = 64
|
|
|
|
[database]
|
|
# Async DB connection string
|
|
dsn = ""
|
|
# Table and column names
|
|
table = "images"
|
|
id_column = "id"
|
|
url_column = "url"
|
|
caption_column = "caption"
|
|
# Default WHERE clause used to fetch rows to caption
|
|
query_where = "caption IS NULL"
|
|
|
|
[debug]
|
|
# Dry run disables writing to DB for quick local tests
|
|
dry_run = true
|