Use sessionmaker instead of context block
Essentially there were two bugs: 1) The logging lines before explicit `Session.begin` caused the root "A transaction is already begun on this Session.", which is resolved by moving the logging within the explicit session block, as intended. 2) We were reusing a Session across cron runs. The architecture was intended to not do so, but we didn't have an actual sessionmaker available to cron. We probably ought to get rid of scoped_session elsewhere, but baby steps to resolve the immediate issue. In testing, this works correctly with the fix to `db.begin`, and tests with deliberately reintroducing the logging bug, the changes to session creation prevent the every-15sec stack trace spam.
This commit is contained in:
parent
46a392ba31
commit
26acca47b9
2 changed files with 51 additions and 51 deletions
|
@ -194,11 +194,12 @@ limiter = flask_limiter.Limiter(
|
|||
# ...and then after that we can load the database.
|
||||
|
||||
engine: Engine = create_engine(DATABASE_URL)
|
||||
db_session: scoped_session = scoped_session(sessionmaker(
|
||||
db_session_factory: sessionmaker = sessionmaker(
|
||||
bind=engine,
|
||||
autoflush=False,
|
||||
future=True,
|
||||
))
|
||||
)
|
||||
db_session: scoped_session = scoped_session(db_session_factory)
|
||||
|
||||
# now that we've that, let's add the cache, compression, and mail extensions to our app...
|
||||
|
||||
|
|
|
@ -4,9 +4,9 @@ import time
|
|||
from datetime import datetime, timezone
|
||||
from typing import Final
|
||||
|
||||
from sqlalchemy.orm import scoped_session, Session
|
||||
from sqlalchemy.orm import sessionmaker, Session
|
||||
|
||||
from files.__main__ import app, db_session
|
||||
from files.__main__ import app, db_session_factory
|
||||
from files.classes.cron.tasks import (DayOfWeek, RepeatableTask,
|
||||
RepeatableTaskRun, ScheduledTaskState)
|
||||
|
||||
|
@ -41,7 +41,7 @@ def cron_app_worker():
|
|||
logging.info("Starting scheduler worker process")
|
||||
while True:
|
||||
try:
|
||||
_run_tasks(db_session)
|
||||
_run_tasks(db_session_factory)
|
||||
except Exception as e:
|
||||
logging.exception(
|
||||
"An unhandled exception occurred while running tasks",
|
||||
|
@ -77,7 +77,7 @@ def _acquire_lock_exclusive(db: Session, table: str):
|
|||
raise
|
||||
|
||||
|
||||
def _run_tasks(db_session_factory: scoped_session):
|
||||
def _run_tasks(db_session_factory: sessionmaker):
|
||||
'''
|
||||
Runs tasks, attempting to guarantee that a task is ran once and only once.
|
||||
This uses postgres to lock the table containing our tasks at key points in
|
||||
|
@ -87,9 +87,8 @@ def _run_tasks(db_session_factory: scoped_session):
|
|||
running task does not lock the entire table for its entire run, which would
|
||||
for example, prevent any statistics about status from being gathered.
|
||||
'''
|
||||
db: Session = db_session_factory()
|
||||
|
||||
db: Session
|
||||
with db_session_factory() as db:
|
||||
with _acquire_lock_exclusive(db, RepeatableTask.__tablename__):
|
||||
now: datetime = datetime.now(tz=timezone.utc)
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue