pierrejeambrun commented on code in PR #45062:
URL: https://github.com/apache/airflow/pull/45062#discussion_r1904194885
##########
airflow/models/backfill.py:
##########
@@ -47,13 +38,15 @@
from airflow.settings import json
from airflow.utils import timezone
from airflow.utils.session import create_session
-from airflow.utils.sqlalchemy import UtcDateTime, nulls_first, with_row_locks
+from airflow.utils.sqlalchemy import UtcDateTime, with_row_locks
from airflow.utils.state import DagRunState
from airflow.utils.types import DagRunTriggeredByType, DagRunType
if TYPE_CHECKING:
from datetime import datetime
+ from typing_extensions import Literal
Review Comment:
Import Literal from `typing`
##########
airflow/models/backfill.py:
##########
@@ -158,72 +151,125 @@ def validate_sort_ordinal(self, key, val):
def _create_backfill_dag_run(
*,
dag,
- info,
- reprocess_behavior: ReprocessBehavior,
+ dagrun_info_list,
+ reprocess_behavior: ReprocessBehavior | None = None,
backfill_id,
- dag_run_conf,
- backfill_sort_ordinal,
+ dag_run_conf: dict | None,
session,
-):
+ dry_run,
+) -> list[datetime]:
from airflow.models import DagRun
- with session.begin_nested() as nested:
- dr = session.scalar(
- with_row_locks(
- select(DagRun)
- .where(DagRun.logical_date == info.logical_date)
- .order_by(nulls_first(desc(DagRun.start_date),
session=session))
- .limit(1),
- session=session,
+ backfill_sort_ordinal = 0
+ logical_dates = []
+ dagrun_infos = list(dagrun_info_list)
+
+ if reprocess_behavior is None:
+ reprocess_behavior = ReprocessBehavior.NONE
+ if dag_run_conf is None:
+ dag_run_conf = {}
+
+ dag_run_ranked = (
+ select(
+ DagRun.logical_date,
+ DagRun.start_date,
+ DagRun.dag_id,
+ func.row_number()
+ .over(
+ partition_by=DagRun.logical_date,
+ order_by=(case([(DagRun.start_date.is_(None), 0)], else_=1),
DagRun.start_date.desc()),
)
+ .label("row_number"),
)
- if dr:
- non_create_reason = None
- if dr.state not in (DagRunState.SUCCESS, DagRunState.FAILED):
- non_create_reason = BackfillDagRunExceptionReason.IN_FLIGHT
- elif reprocess_behavior is ReprocessBehavior.NONE:
- non_create_reason =
BackfillDagRunExceptionReason.ALREADY_EXISTS
- elif reprocess_behavior is ReprocessBehavior.FAILED:
- if dr.state != DagRunState.FAILED:
+ .where(DagRun.dag_id == dag.dag_id)
+ .where(DagRun.logical_date.in_([info.logical_date for info in
dagrun_infos]))
+ .subquery()
+ )
+
+ existing_dag_runs = {
+ dr.logical_date: dr
+ for dr in session.scalars(
+ select(DagRun)
+ .join(
+ dag_run_ranked,
+ (DagRun.logical_date == dag_run_ranked.c.logical_date)
+ & (
+ (DagRun.start_date == dag_run_ranked.c.start_date)
+ | ((DagRun.start_date.is_(None)) &
(dag_run_ranked.c.start_date.is_(None)))
+ )
+ & (DagRun.dag_id == dag_run_ranked.c.dag_id),
+ )
+ .where(dag_run_ranked.c.row_number == 1)
+ ).all()
+ }
+
+ print(existing_dag_runs)
Review Comment:
to remove `print` statements
##########
airflow/models/backfill.py:
##########
@@ -158,72 +151,125 @@ def validate_sort_ordinal(self, key, val):
def _create_backfill_dag_run(
*,
dag,
- info,
- reprocess_behavior: ReprocessBehavior,
+ dagrun_info_list,
+ reprocess_behavior: ReprocessBehavior | None = None,
backfill_id,
- dag_run_conf,
- backfill_sort_ordinal,
+ dag_run_conf: dict | None,
session,
-):
+ dry_run,
+) -> list[datetime]:
from airflow.models import DagRun
- with session.begin_nested() as nested:
- dr = session.scalar(
- with_row_locks(
- select(DagRun)
- .where(DagRun.logical_date == info.logical_date)
- .order_by(nulls_first(desc(DagRun.start_date),
session=session))
- .limit(1),
- session=session,
+ backfill_sort_ordinal = 0
+ logical_dates = []
+ dagrun_infos = list(dagrun_info_list)
Review Comment:
I think we need to slightly modify `_get_info_list` to not have to
explicitely cast to dict. Also we can add types. ( -> list[DagRunInfo])
##########
airflow/models/backfill.py:
##########
@@ -158,72 +151,125 @@ def validate_sort_ordinal(self, key, val):
def _create_backfill_dag_run(
*,
dag,
- info,
- reprocess_behavior: ReprocessBehavior,
+ dagrun_info_list,
+ reprocess_behavior: ReprocessBehavior | None = None,
backfill_id,
- dag_run_conf,
- backfill_sort_ordinal,
+ dag_run_conf: dict | None,
session,
-):
+ dry_run,
+) -> list[datetime]:
from airflow.models import DagRun
- with session.begin_nested() as nested:
- dr = session.scalar(
- with_row_locks(
- select(DagRun)
- .where(DagRun.logical_date == info.logical_date)
- .order_by(nulls_first(desc(DagRun.start_date),
session=session))
- .limit(1),
- session=session,
+ backfill_sort_ordinal = 0
+ logical_dates = []
+ dagrun_infos = list(dagrun_info_list)
+
+ if reprocess_behavior is None:
+ reprocess_behavior = ReprocessBehavior.NONE
+ if dag_run_conf is None:
+ dag_run_conf = {}
+
+ dag_run_ranked = (
+ select(
+ DagRun.logical_date,
+ DagRun.start_date,
+ DagRun.dag_id,
+ func.row_number()
+ .over(
+ partition_by=DagRun.logical_date,
+ order_by=(case([(DagRun.start_date.is_(None), 0)], else_=1),
DagRun.start_date.desc()),
)
+ .label("row_number"),
)
- if dr:
- non_create_reason = None
- if dr.state not in (DagRunState.SUCCESS, DagRunState.FAILED):
- non_create_reason = BackfillDagRunExceptionReason.IN_FLIGHT
- elif reprocess_behavior is ReprocessBehavior.NONE:
- non_create_reason =
BackfillDagRunExceptionReason.ALREADY_EXISTS
- elif reprocess_behavior is ReprocessBehavior.FAILED:
- if dr.state != DagRunState.FAILED:
+ .where(DagRun.dag_id == dag.dag_id)
+ .where(DagRun.logical_date.in_([info.logical_date for info in
dagrun_infos]))
+ .subquery()
+ )
+
+ existing_dag_runs = {
+ dr.logical_date: dr
+ for dr in session.scalars(
+ select(DagRun)
+ .join(
+ dag_run_ranked,
+ (DagRun.logical_date == dag_run_ranked.c.logical_date)
+ & (
+ (DagRun.start_date == dag_run_ranked.c.start_date)
+ | ((DagRun.start_date.is_(None)) &
(dag_run_ranked.c.start_date.is_(None)))
+ )
+ & (DagRun.dag_id == dag_run_ranked.c.dag_id),
+ )
+ .where(dag_run_ranked.c.row_number == 1)
+ ).all()
+ }
Review Comment:
I assume all that is necessary because of the removal of the unique
constraint on the `logical_date` ?
What are the update on that ?
cc: @dstandish
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]