anishgirianish commented on code in PR #63491:
URL: https://github.com/apache/airflow/pull/63491#discussion_r3031199891


##########
airflow-core/src/airflow/executors/base_executor.py:
##########
@@ -212,57 +214,70 @@ def __repr__(self):
         _repr += ")"
         return _repr
 
+    @property
+    def queued_tasks(self) -> dict:
+        """Backward-compat property: delegates to 
``executor_queues[WorkloadType.EXECUTE_TASK]``."""
+        warnings.warn(
+            "queued_tasks is deprecated. Use 
executor_queues[WorkloadType.EXECUTE_TASK] instead.",
+            RemovedInAirflow4Warning,
+            stacklevel=2,
+        )
+        return self.executor_queues[WorkloadType.EXECUTE_TASK]
+
+    @property
+    def queued_callbacks(self) -> dict:
+        """Backward-compat property: delegates to 
``executor_queues[WorkloadType.EXECUTE_CALLBACK]``."""
+        warnings.warn(
+            "queued_callbacks is deprecated. Use 
executor_queues[WorkloadType.EXECUTE_CALLBACK] instead.",
+            RemovedInAirflow4Warning,
+            stacklevel=2,
+        )
+        return self.executor_queues[WorkloadType.EXECUTE_CALLBACK]
+
+    @property
+    def supports_callbacks(self) -> bool:
+        """Backward-compat property: True if EXECUTE_CALLBACK is in 
supported_workload_types."""
+        warnings.warn(
+            "supports_callbacks is deprecated. "
+            "Use WorkloadType.EXECUTE_CALLBACK in supported_workload_types 
instead.",
+            RemovedInAirflow4Warning,
+            stacklevel=2,
+        )
+        return WorkloadType.EXECUTE_CALLBACK in self.supported_workload_types
+
     def start(self):  # pragma: no cover
         """Executors may need to get things started."""
 
     def log_task_event(self, *, event: str, extra: str, ti_key: 
TaskInstanceKey):
         """Add an event to the log table."""
         self._task_event_logs.append(Log(event=event, task_instance=ti_key, 
extra=extra))
 
-    def queue_workload(self, workload: workloads.All, session: Session) -> 
None:
-        if isinstance(workload, workloads.ExecuteTask):
-            ti = workload.ti
-            self.queued_tasks[ti.key] = workload
-        elif isinstance(workload, workloads.ExecuteCallback):
-            if not self.supports_callbacks:
-                raise NotImplementedError(
-                    f"{type(self).__name__} does not support ExecuteCallback 
workloads. "
-                    f"Set supports_callbacks = True and implement callback 
handling in _process_workloads(). "
-                    f"See LocalExecutor or CeleryExecutor for reference 
implementation."
-                )
-            self.queued_callbacks[workload.callback.id] = workload
-        else:
-            raise ValueError(
-                f"Un-handled workload type {type(workload).__name__!r} in 
{type(self).__name__}. "
-                f"Workload must be one of: ExecuteTask, ExecuteCallback."
+    def queue_workload(self, workload: QueueableWorkload, session: Session) -> 
None:
+        if workload.type not in self.supported_workload_types:
+            raise NotImplementedError(
+                f"{type(self).__name__} does not support {workload.type!r} 
workloads. "
+                f"Add {workload.type!r} to supported_workload_types and 
implement handling "
+                f"in _process_workloads()."
             )
+        self.executor_queues[workload.type][workload.queue_key] = workload
 
-    def _get_workloads_to_schedule(self, open_slots: int) -> 
list[tuple[WorkloadKey, workloads.All]]:
+    def _get_workloads_to_schedule(self, open_slots: int) -> 
list[tuple[WorkloadKey, QueueableWorkload]]:
         """
         Select and return the next batch of workloads to schedule, respecting 
priority policy.
 
-        Priority Policy: Callbacks are scheduled before tasks (callbacks 
complete existing work).
-        Callbacks are processed in FIFO order. Tasks are sorted by 
priority_weight (higher priority first).
+        Workloads are sorted by ``WORKLOAD_TYPE_TIER`` (tier assigned by 
workload type) first,
+        then by ``sort_key`` within the same tier.  Lower tier values are 
scheduled first;
+        within the same tier, lower ``sort_key`` values come first 
(``sort_key=0`` gives FIFO).
 
         :param open_slots: Number of available execution slots
         """
-        workloads_to_schedule: list[tuple[WorkloadKey, workloads.All]] = []
-
-        if self.queued_callbacks:
-            for key, workload in self.queued_callbacks.items():
-                if len(workloads_to_schedule) >= open_slots:
-                    break
-                workloads_to_schedule.append((key, workload))
-
-        if open_slots > len(workloads_to_schedule) and self.queued_tasks:
-            for task_key, task_workload in 
self.order_queued_tasks_by_priority():
-                if len(workloads_to_schedule) >= open_slots:
-                    break
-                workloads_to_schedule.append((task_key, task_workload))
+        all_workloads: list[tuple[WorkloadKey, QueueableWorkload]] = [
+            (key, workload) for queue in self.executor_queues.values() for 
key, workload in queue.items()
+        ]
+        all_workloads.sort(key=lambda item: 
(workloads.WORKLOAD_TYPE_TIER[item[1].type], item[1].sort_key))
+        return all_workloads[:open_slots]

Review Comment:
   Already handled, queue_workload validates workload.type against 
supported_workload_types and raises a clear NotImplementedError before anything 
reaches the queue, so _get_workloads_to_schedule can never encounter an 
unregistered type.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to