feat: make version deployable
This commit is contained in:
parent
4aa8759514
commit
49bd94cda2
22 changed files with 7785 additions and 10962 deletions
|
|
@ -1,27 +1,14 @@
|
|||
# Memory Sharing for ML Models
|
||||
# ============================
|
||||
# This app is designed to run with Gunicorn's --preload flag, which loads the
|
||||
# SentenceTransformer models once in the master process before forking workers.
|
||||
# On Linux, fork uses copy-on-write (COW) semantics, so workers share the
|
||||
# read-only model weights in memory rather than each loading their own copy.
|
||||
# This is critical for keeping memory usage reasonable with large transformer models.
|
||||
#
|
||||
# ResourceTracker errors on shutdown (Python 3.14):
|
||||
# When you Ctrl+C the Gunicorn process, you may see
|
||||
# "ChildProcessError: [Errno 10] No child processes"
|
||||
# from multiprocessing.resource_tracker.
|
||||
#
|
||||
# I think this is harmless. I think what happens is each forked worker gets a
|
||||
# copy of the ResourceTracker object, then each copy tries to deallocate the
|
||||
# same resources. The process still shuts down reasonbly quickly, so I'm not
|
||||
# concerned.
|
||||
# Salience API
|
||||
# ============
|
||||
# Uses a worker thread for model inference to avoid fork() issues with Metal/MPS.
|
||||
# The worker thread owns all model instances; HTTP handlers submit work via queue.
|
||||
|
||||
print("Starting salience __init__.py...")
|
||||
|
||||
from flask import Flask, request
|
||||
from flask_cors import CORS
|
||||
import numpy as np
|
||||
from .salience import extract, AVAILABLE_MODELS
|
||||
from .salience import submit_work, AVAILABLE_MODELS
|
||||
import json
|
||||
import time
|
||||
from collections import deque
|
||||
|
|
@ -117,7 +104,7 @@ def salience_view_default():
|
|||
if model_name not in AVAILABLE_MODELS:
|
||||
return json.dumps({'error': f'Invalid model: {model_name}'}), 400
|
||||
|
||||
sentence_ranges, adjacency = extract(default_source_text, model_name)
|
||||
sentence_ranges, adjacency = submit_work(default_source_text, model_name)
|
||||
|
||||
end_time = time.time()
|
||||
stats_tracker.add_processing_span(start_time, end_time)
|
||||
|
|
@ -146,7 +133,7 @@ def salience_view_custom():
|
|||
if not source_text:
|
||||
return json.dumps({'error': 'No text provided'}), 400
|
||||
|
||||
sentence_ranges, adjacency = extract(source_text, model_name)
|
||||
sentence_ranges, adjacency = submit_work(source_text, model_name)
|
||||
|
||||
end_time = time.time()
|
||||
stats_tracker.add_processing_span(start_time, end_time)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue