Skip to content

prefect.server.orchestration.core_policy

Orchestration logic that fires on state transitions.

CoreFlowPolicy and CoreTaskPolicy contain all default orchestration rules that Prefect enforces on a state transition.

BackgroundTaskPolicy

Bases: BaseOrchestrationPolicy

Orchestration rules that run against task-run-state transitions in priority order.

Source code in src/prefect/server/orchestration/core_policy.py
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
class BackgroundTaskPolicy(BaseOrchestrationPolicy):
    """
    Orchestration rules that run against task-run-state transitions in priority order.
    """

    @staticmethod
    def priority():
        return [
            PreventPendingTransitions,
            CacheRetrieval,
            HandleTaskTerminalStateTransitions,
            # SecureTaskConcurrencySlots,  # retrieve cached states even if slots are full
            CopyScheduledTime,
            CopyTaskParametersID,
            WaitForScheduledTime,
            RetryFailedTasks,
            RenameReruns,
            UpdateFlowRunTrackerOnTasks,
            CacheInsertion,
            ReleaseTaskConcurrencySlots,
            EnqueueScheduledTasks,
        ]

BypassCancellingFlowRunsWithNoInfra

Bases: BaseOrchestrationRule

Rejects transitions from Scheduled to Cancelling, and instead sets the state to Cancelled, if the flow run has no associated infrastructure process ID. Also Rejects transitions from Paused to Cancelling if the Paused state's details indicates the flow run has been suspended, exiting the flow and tearing down infra.

The Cancelling state is used to clean up infrastructure. If there is not infrastructure to clean up, we can transition directly to Cancelled. Runs that are Resuming are in a Scheduled state that were previously Suspended and do not yet have infrastructure.

Runs that are AwaitingRetry are a Scheduled state that may have associated infrastructure.

Source code in src/prefect/server/orchestration/core_policy.py
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
class BypassCancellingFlowRunsWithNoInfra(BaseOrchestrationRule):
    """Rejects transitions from Scheduled to Cancelling, and instead sets the state to Cancelled,
    if the flow run has no associated infrastructure process ID. Also Rejects transitions from
    Paused to Cancelling if the Paused state's details indicates the flow run has been suspended,
    exiting the flow and tearing down infra.

    The `Cancelling` state is used to clean up infrastructure. If there is not infrastructure
    to clean up, we can transition directly to `Cancelled`. Runs that are `Resuming` are in a
    `Scheduled` state that were previously `Suspended` and do not yet have infrastructure.

    Runs that are `AwaitingRetry` are a `Scheduled` state that may have associated infrastructure.
    """

    FROM_STATES = {StateType.SCHEDULED, StateType.PAUSED}
    TO_STATES = {StateType.CANCELLING}

    async def before_transition(
        self,
        initial_state: Optional[states.State],
        proposed_state: Optional[states.State],
        context: FlowOrchestrationContext,
    ) -> None:
        if (
            initial_state.type == states.StateType.SCHEDULED
            and not context.run.infrastructure_pid
            or initial_state.name == "Resuming"
        ):
            await self.reject_transition(
                state=states.Cancelled(),
                reason="Scheduled flow run has no infrastructure to terminate.",
            )
        elif (
            initial_state.type == states.StateType.PAUSED
            and initial_state.state_details.pause_reschedule
        ):
            await self.reject_transition(
                state=states.Cancelled(),
                reason="Suspended flow run has no infrastructure to terminate.",
            )

CacheInsertion

Bases: BaseOrchestrationRule

Caches completed states with cache keys after they are validated.

Source code in src/prefect/server/orchestration/core_policy.py
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
class CacheInsertion(BaseOrchestrationRule):
    """
    Caches completed states with cache keys after they are validated.
    """

    FROM_STATES = ALL_ORCHESTRATION_STATES
    TO_STATES = [StateType.COMPLETED]

    @inject_db
    async def after_transition(
        self,
        initial_state: Optional[states.State],
        validated_state: Optional[states.State],
        context: TaskOrchestrationContext,
        db: PrefectDBInterface,
    ) -> None:
        if not validated_state or not context.session:
            return

        cache_key = validated_state.state_details.cache_key
        if cache_key:
            new_cache_item = db.TaskRunStateCache(
                cache_key=cache_key,
                cache_expiration=validated_state.state_details.cache_expiration,
                task_run_state_id=validated_state.id,
            )
            context.session.add(new_cache_item)

CacheRetrieval

Bases: BaseOrchestrationRule

Rejects running states if a completed state has been cached.

This rule rejects transitions into a running state with a cache key if the key has already been associated with a completed state in the cache table. The client will be instructed to transition into the cached completed state instead.

Source code in src/prefect/server/orchestration/core_policy.py
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
class CacheRetrieval(BaseOrchestrationRule):
    """
    Rejects running states if a completed state has been cached.

    This rule rejects transitions into a running state with a cache key if the key
    has already been associated with a completed state in the cache table. The client
    will be instructed to transition into the cached completed state instead.
    """

    FROM_STATES = ALL_ORCHESTRATION_STATES
    TO_STATES = [StateType.RUNNING]

    @inject_db
    async def before_transition(
        self,
        initial_state: Optional[states.State],
        proposed_state: Optional[states.State],
        context: TaskOrchestrationContext,
        db: PrefectDBInterface,
    ) -> None:
        cache_key = proposed_state.state_details.cache_key
        if cache_key and not proposed_state.state_details.refresh_cache:
            # Check for cached states matching the cache key
            cached_state_id = (
                select(db.TaskRunStateCache.task_run_state_id)
                .where(
                    sa.and_(
                        db.TaskRunStateCache.cache_key == cache_key,
                        sa.or_(
                            db.TaskRunStateCache.cache_expiration.is_(None),
                            db.TaskRunStateCache.cache_expiration > pendulum.now("utc"),
                        ),
                    ),
                )
                .order_by(db.TaskRunStateCache.created.desc())
                .limit(1)
            ).scalar_subquery()
            query = select(db.TaskRunState).where(db.TaskRunState.id == cached_state_id)
            cached_state = (await context.session.execute(query)).scalar()
            if cached_state:
                new_state = cached_state.as_state().fresh_copy()
                new_state.name = "Cached"
                await self.reject_transition(
                    state=new_state, reason="Retrieved state from cache"
                )

ClientSideTaskOrchestrationPolicy

Bases: BaseOrchestrationPolicy

Orchestration rules that run against task-run-state transitions in priority order, specifically for clients doing client-side orchestration.

Source code in src/prefect/server/orchestration/core_policy.py
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
class ClientSideTaskOrchestrationPolicy(BaseOrchestrationPolicy):
    """
    Orchestration rules that run against task-run-state transitions in priority order,
    specifically for clients doing client-side orchestration.
    """

    @staticmethod
    def priority():
        return [
            CacheRetrieval,
            HandleTaskTerminalStateTransitions,
            PreventRunningTasksFromStoppedFlows,
            CopyScheduledTime,
            WaitForScheduledTime,
            RetryFailedTasks,
            RenameReruns,
            UpdateFlowRunTrackerOnTasks,
            CacheInsertion,
            ReleaseTaskConcurrencySlots,
        ]

CopyScheduledTime

Bases: BaseOrchestrationRule

Ensures scheduled time is copied from scheduled states to pending states.

If a new scheduled time has been proposed on the pending state, the scheduled time on the scheduled state will be ignored.

Source code in src/prefect/server/orchestration/core_policy.py
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
class CopyScheduledTime(BaseOrchestrationRule):
    """
    Ensures scheduled time is copied from scheduled states to pending states.

    If a new scheduled time has been proposed on the pending state, the scheduled time
    on the scheduled state will be ignored.
    """

    FROM_STATES = [StateType.SCHEDULED]
    TO_STATES = [StateType.PENDING]

    async def before_transition(
        self,
        initial_state: Optional[states.State],
        proposed_state: Optional[states.State],
        context: OrchestrationContext,
    ) -> None:
        if not proposed_state.state_details.scheduled_time:
            proposed_state.state_details.scheduled_time = (
                initial_state.state_details.scheduled_time
            )

CopyTaskParametersID

Bases: BaseOrchestrationRule

Ensures a task's parameters ID is copied from Scheduled to Pending and from Pending to Running states.

If a parameters ID has been included on the proposed state, the parameters ID on the initial state will be ignored.

Source code in src/prefect/server/orchestration/core_policy.py
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
class CopyTaskParametersID(BaseOrchestrationRule):
    """
    Ensures a task's parameters ID is copied from Scheduled to Pending and from
    Pending to Running states.

    If a parameters ID has been included on the proposed state, the parameters ID
    on the initial state will be ignored.
    """

    FROM_STATES = [StateType.SCHEDULED, StateType.PENDING]
    TO_STATES = [StateType.PENDING, StateType.RUNNING]

    async def before_transition(
        self,
        initial_state: Optional[states.State],
        proposed_state: Optional[states.State],
        context: OrchestrationContext,
    ) -> None:
        if not proposed_state.state_details.task_parameters_id:
            proposed_state.state_details.task_parameters_id = (
                initial_state.state_details.task_parameters_id
            )

CoreFlowPolicy

Bases: BaseOrchestrationPolicy

Orchestration rules that run against flow-run-state transitions in priority order.

Source code in src/prefect/server/orchestration/core_policy.py
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
class CoreFlowPolicy(BaseOrchestrationPolicy):
    """
    Orchestration rules that run against flow-run-state transitions in priority order.
    """

    @staticmethod
    def priority():
        return [
            PreventDuplicateTransitions,
            HandleFlowTerminalStateTransitions,
            EnforceCancellingToCancelledTransition,
            BypassCancellingFlowRunsWithNoInfra,
            PreventPendingTransitions,
            SecureFlowConcurrencySlots,
            EnsureOnlyScheduledFlowsMarkedLate,
            HandlePausingFlows,
            HandleResumingPausedFlows,
            CopyScheduledTime,
            WaitForScheduledTime,
            RetryFailedFlows,
            InstrumentFlowRunStateTransitions,
            ReleaseFlowConcurrencySlots,
        ]

CoreFlowPolicyWithoutDeploymentConcurrency

Bases: BaseOrchestrationPolicy

Orchestration rules that run against flow-run-state transitions in priority order.

Source code in src/prefect/server/orchestration/core_policy.py
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
class CoreFlowPolicyWithoutDeploymentConcurrency(BaseOrchestrationPolicy):
    """
    Orchestration rules that run against flow-run-state transitions in priority order.
    """

    @staticmethod
    def priority():
        return [
            PreventDuplicateTransitions,
            HandleFlowTerminalStateTransitions,
            EnforceCancellingToCancelledTransition,
            BypassCancellingFlowRunsWithNoInfra,
            PreventPendingTransitions,
            EnsureOnlyScheduledFlowsMarkedLate,
            HandlePausingFlows,
            HandleResumingPausedFlows,
            CopyScheduledTime,
            WaitForScheduledTime,
            RetryFailedFlows,
            InstrumentFlowRunStateTransitions,
        ]

CoreTaskPolicy

Bases: BaseOrchestrationPolicy

Orchestration rules that run against task-run-state transitions in priority order.

Source code in src/prefect/server/orchestration/core_policy.py
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
class CoreTaskPolicy(BaseOrchestrationPolicy):
    """
    Orchestration rules that run against task-run-state transitions in priority order.
    """

    @staticmethod
    def priority():
        return [
            CacheRetrieval,
            HandleTaskTerminalStateTransitions,
            PreventRunningTasksFromStoppedFlows,
            SecureTaskConcurrencySlots,  # retrieve cached states even if slots are full
            CopyScheduledTime,
            WaitForScheduledTime,
            RetryFailedTasks,
            RenameReruns,
            UpdateFlowRunTrackerOnTasks,
            CacheInsertion,
            ReleaseTaskConcurrencySlots,
        ]

EnforceCancellingToCancelledTransition

Bases: BaseOrchestrationRule

Rejects transitions from Cancelling to any terminal state except for Cancelled.

Source code in src/prefect/server/orchestration/core_policy.py
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
class EnforceCancellingToCancelledTransition(BaseOrchestrationRule):
    """
    Rejects transitions from Cancelling to any terminal state except for Cancelled.
    """

    FROM_STATES = {StateType.CANCELLED, StateType.CANCELLING}
    TO_STATES = ALL_ORCHESTRATION_STATES - {StateType.CANCELLED}

    async def before_transition(
        self,
        initial_state: Optional[states.State],
        proposed_state: Optional[states.State],
        context: TaskOrchestrationContext,
    ) -> None:
        await self.reject_transition(
            state=None,
            reason=(
                "Cannot transition flows that are cancelling to a state other "
                "than Cancelled."
            ),
        )
        return

EnqueueScheduledTasks

Bases: BaseOrchestrationRule

Enqueues background task runs when they are scheduled

Source code in src/prefect/server/orchestration/core_policy.py
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
class EnqueueScheduledTasks(BaseOrchestrationRule):
    """
    Enqueues background task runs when they are scheduled
    """

    FROM_STATES = ALL_ORCHESTRATION_STATES
    TO_STATES = [StateType.SCHEDULED]

    async def after_transition(
        self,
        initial_state: Optional[states.State],
        validated_state: Optional[states.State],
        context: TaskOrchestrationContext,
    ) -> None:
        if not validated_state:
            # Only if the transition was valid
            return

        if not validated_state.state_details.deferred:
            # Only for tasks that are deferred
            return

        task_run: core.TaskRun = core.TaskRun.model_validate(context.run)
        queue: TaskQueue = TaskQueue.for_key(task_run.task_key)

        if validated_state.name == "AwaitingRetry":
            await queue.retry(task_run)
        else:
            await queue.enqueue(task_run)

HandleFlowTerminalStateTransitions

Bases: BaseOrchestrationRule

We do not allow flows to leave terminal states if: - The flow is completed and has a persisted result - The flow is going to CANCELLING / PAUSED / CRASHED - The flow is going to scheduled and has no deployment

We reset the pause metadata when a flow leaves a terminal state for a non-terminal state. This resets pause behavior during manual flow run retries.

Source code in src/prefect/server/orchestration/core_policy.py
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
class HandleFlowTerminalStateTransitions(BaseOrchestrationRule):
    """
    We do not allow flows to leave terminal states if:
    - The flow is completed and has a persisted result
    - The flow is going to CANCELLING / PAUSED / CRASHED
    - The flow is going to scheduled and has no deployment

    We reset the pause metadata when a flow leaves a terminal state for a non-terminal
    state. This resets pause behavior during manual flow run retries.
    """

    FROM_STATES = TERMINAL_STATES
    TO_STATES = ALL_ORCHESTRATION_STATES

    async def before_transition(
        self,
        initial_state: Optional[states.State],
        proposed_state: Optional[states.State],
        context: FlowOrchestrationContext,
    ) -> None:
        self.original_flow_policy = context.run.empirical_policy.model_dump()

        # Do not allow runs to be marked as crashed, paused, or cancelling if already terminal
        if proposed_state.type in {
            StateType.CANCELLING,
            StateType.PAUSED,
            StateType.CRASHED,
        }:
            await self.abort_transition(
                f"Run is already in terminal state {initial_state.type.value}."
            )
            return

        # Only allow departure from a happily completed state if the result is not
        # persisted and the a rerun is being proposed
        if (
            initial_state.is_completed()
            and not proposed_state.is_final()
            and initial_state.data
            and initial_state.data.get("type") != "unpersisted"
        ):
            await self.reject_transition(None, "Run is already COMPLETED.")
            return

        # Do not allows runs to be rescheduled without a deployment
        if proposed_state.is_scheduled() and not context.run.deployment_id:
            await self.abort_transition(
                "Cannot reschedule a run without an associated deployment."
            )
            return

        if not proposed_state.is_final():
            # Reset pause metadata when leaving a terminal state
            api_version = context.parameters.get("api-version", None)
            if api_version is None or api_version >= Version("0.8.4"):
                updated_policy = context.run.empirical_policy.model_dump()
                updated_policy["resuming"] = False
                updated_policy["pause_keys"] = set()
                context.run.empirical_policy = core.FlowRunPolicy(**updated_policy)

    async def cleanup(
        self,
        initial_state: Optional[states.State],
        validated_state: Optional[states.State],
        context: OrchestrationContext,
    ):
        context.run.empirical_policy = core.FlowRunPolicy(**self.original_flow_policy)

HandlePausingFlows

Bases: BaseOrchestrationRule

Governs runs attempting to enter a Paused/Suspended state

Source code in src/prefect/server/orchestration/core_policy.py
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
class HandlePausingFlows(BaseOrchestrationRule):
    """
    Governs runs attempting to enter a Paused/Suspended state
    """

    FROM_STATES = ALL_ORCHESTRATION_STATES
    TO_STATES = [StateType.PAUSED]

    async def before_transition(
        self,
        initial_state: Optional[states.State],
        proposed_state: Optional[states.State],
        context: TaskOrchestrationContext,
    ) -> None:
        verb = "suspend" if proposed_state.name == "Suspended" else "pause"

        if initial_state is None:
            await self.abort_transition(f"Cannot {verb} flows with no state.")
            return

        if not initial_state.is_running():
            await self.reject_transition(
                state=None,
                reason=f"Cannot {verb} flows that are not currently running.",
            )
            return

        self.key = proposed_state.state_details.pause_key
        if self.key is None:
            # if no pause key is provided, default to a UUID
            self.key = str(uuid4())

        if self.key in context.run.empirical_policy.pause_keys:
            await self.reject_transition(
                state=None, reason=f"This {verb} has already fired."
            )
            return

        if proposed_state.state_details.pause_reschedule:
            if context.run.parent_task_run_id:
                await self.abort_transition(
                    reason=f"Cannot {verb} subflows.",
                )
                return

            if context.run.deployment_id is None:
                await self.abort_transition(
                    reason=f"Cannot {verb} flows without a deployment.",
                )
                return

    async def after_transition(
        self,
        initial_state: Optional[states.State],
        validated_state: Optional[states.State],
        context: TaskOrchestrationContext,
    ) -> None:
        updated_policy = context.run.empirical_policy.model_dump()
        updated_policy["pause_keys"].add(self.key)
        context.run.empirical_policy = core.FlowRunPolicy(**updated_policy)

HandleResumingPausedFlows

Bases: BaseOrchestrationRule

Governs runs attempting to leave a Paused state

Source code in src/prefect/server/orchestration/core_policy.py
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
class HandleResumingPausedFlows(BaseOrchestrationRule):
    """
    Governs runs attempting to leave a Paused state
    """

    FROM_STATES = [StateType.PAUSED]
    TO_STATES = ALL_ORCHESTRATION_STATES

    async def before_transition(
        self,
        initial_state: Optional[states.State],
        proposed_state: Optional[states.State],
        context: TaskOrchestrationContext,
    ) -> None:
        if not (
            proposed_state
            and (
                proposed_state.is_running()
                or proposed_state.is_scheduled()
                or proposed_state.is_final()
            )
        ):
            await self.reject_transition(
                state=None,
                reason=(
                    f"This run cannot transition to the {proposed_state.type} state"
                    f" from the {initial_state.type} state."
                ),
            )
            return

        verb = "suspend" if proposed_state.name == "Suspended" else "pause"

        if initial_state.state_details.pause_reschedule:
            if not context.run.deployment_id:
                await self.reject_transition(
                    state=None,
                    reason=(
                        f"Cannot reschedule a {proposed_state.name.lower()} flow run"
                        " without a deployment."
                    ),
                )
                return
        pause_timeout = initial_state.state_details.pause_timeout
        if pause_timeout and pause_timeout < pendulum.now("UTC"):
            pause_timeout_failure = states.Failed(
                message=(
                    f"The flow was {proposed_state.name.lower()} and never resumed."
                ),
            )
            await self.reject_transition(
                state=pause_timeout_failure,
                reason=f"The flow run {verb} has timed out and can no longer resume.",
            )
            return

    async def after_transition(
        self,
        initial_state: Optional[states.State],
        validated_state: Optional[states.State],
        context: TaskOrchestrationContext,
    ) -> None:
        updated_policy = context.run.empirical_policy.model_dump()
        updated_policy["resuming"] = True
        context.run.empirical_policy = core.FlowRunPolicy(**updated_policy)

HandleTaskTerminalStateTransitions

Bases: BaseOrchestrationRule

We do not allow tasks to leave terminal states if: - The task is completed and has a persisted result - The task is going to CANCELLING / PAUSED / CRASHED

We reset the run count when a task leaves a terminal state for a non-terminal state which resets task run retries; this is particularly relevant for flow run retries.

Source code in src/prefect/server/orchestration/core_policy.py
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
class HandleTaskTerminalStateTransitions(BaseOrchestrationRule):
    """
    We do not allow tasks to leave terminal states if:
    - The task is completed and has a persisted result
    - The task is going to CANCELLING / PAUSED / CRASHED

    We reset the run count when a task leaves a terminal state for a non-terminal state
    which resets task run retries; this is particularly relevant for flow run retries.
    """

    FROM_STATES = TERMINAL_STATES
    TO_STATES = ALL_ORCHESTRATION_STATES

    async def before_transition(
        self,
        initial_state: Optional[states.State],
        proposed_state: Optional[states.State],
        context: TaskOrchestrationContext,
    ) -> None:
        self.original_run_count = context.run.run_count

        # Do not allow runs to be marked as crashed, paused, or cancelling if already terminal
        if proposed_state.type in {
            StateType.CANCELLING,
            StateType.PAUSED,
            StateType.CRASHED,
        }:
            await self.abort_transition(f"Run is already {initial_state.type.value}.")
            return

        # Only allow departure from a happily completed state if the result is not persisted
        if (
            initial_state.is_completed()
            and initial_state.data
            and initial_state.data.get("type") != "unpersisted"
        ):
            await self.reject_transition(None, "This run is already completed.")
            return

        if not proposed_state.is_final():
            # Reset run count to reset retries
            context.run.run_count = 0

        # Change the name of the state to retrying if its a flow run retry
        if proposed_state.is_running() and context.run.flow_run_id is not None:
            self.flow_run = await context.flow_run()
            flow_retrying = context.run.flow_run_run_count < self.flow_run.run_count
            if flow_retrying:
                await self.rename_state("Retrying")

    async def cleanup(
        self,
        initial_state: Optional[states.State],
        validated_state: Optional[states.State],
        context: OrchestrationContext,
    ):
        # reset run count
        context.run.run_count = self.original_run_count

PreventDuplicateTransitions

Bases: BaseOrchestrationRule

Prevent duplicate transitions from being made right after one another.

This rule allows for clients to set an optional transition_id on a state. If the run's next transition has the same transition_id, the transition will be rejected and the existing state will be returned.

This allows for clients to make state transition requests without worrying about the following case: - A client making a state transition request - The server accepts transition and commits the transition - The client is unable to receive the response and retries the request

Source code in src/prefect/server/orchestration/core_policy.py
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
class PreventDuplicateTransitions(BaseOrchestrationRule):
    """
    Prevent duplicate transitions from being made right after one another.

    This rule allows for clients to set an optional transition_id on a state. If the
    run's next transition has the same transition_id, the transition will be
    rejected and the existing state will be returned.

    This allows for clients to make state transition requests without worrying about
    the following case:
    - A client making a state transition request
    - The server accepts transition and commits the transition
    - The client is unable to receive the response and retries the request
    """

    FROM_STATES = ALL_ORCHESTRATION_STATES
    TO_STATES = ALL_ORCHESTRATION_STATES

    async def before_transition(
        self,
        initial_state: Optional[states.State],
        proposed_state: Optional[states.State],
        context: OrchestrationContext,
    ) -> None:
        if (
            initial_state is None
            or proposed_state is None
            or initial_state.state_details is None
            or proposed_state.state_details is None
        ):
            return

        initial_transition_id = getattr(
            initial_state.state_details, "transition_id", None
        )
        proposed_transition_id = getattr(
            proposed_state.state_details, "transition_id", None
        )
        if (
            initial_transition_id is not None
            and proposed_transition_id is not None
            and initial_transition_id == proposed_transition_id
        ):
            await self.reject_transition(
                # state=None will return the initial (current) state
                state=None,
                reason="This run has already made this state transition.",
            )

PreventPendingTransitions

Bases: BaseOrchestrationRule

Prevents transitions to PENDING.

This rule is only used for flow runs.

This is intended to prevent race conditions during duplicate submissions of runs. Before a run is submitted to its execution environment, it should be placed in a PENDING state. If two workers attempt to submit the same run, one of them should encounter a PENDING -> PENDING transition and abort orchestration of the run.

Similarly, if the execution environment starts quickly the run may be in a RUNNING state when the second worker attempts the PENDING transition. We deny these state changes as well to prevent duplicate submission. If a run has transitioned to a RUNNING state a worker should not attempt to submit it again unless it has moved into a terminal state.

CANCELLING and CANCELLED runs should not be allowed to transition to PENDING. For re-runs of deployed runs, they should transition to SCHEDULED first. For re-runs of ad-hoc runs, they should transition directly to RUNNING.

Source code in src/prefect/server/orchestration/core_policy.py
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
class PreventPendingTransitions(BaseOrchestrationRule):
    """
    Prevents transitions to PENDING.

    This rule is only used for flow runs.

    This is intended to prevent race conditions during duplicate submissions of runs.
    Before a run is submitted to its execution environment, it should be placed in a
    PENDING state. If two workers attempt to submit the same run, one of them should
    encounter a PENDING -> PENDING transition and abort orchestration of the run.

    Similarly, if the execution environment starts quickly the run may be in a RUNNING
    state when the second worker attempts the PENDING transition. We deny these state
    changes as well to prevent duplicate submission. If a run has transitioned to a
    RUNNING state a worker should not attempt to submit it again unless it has moved
    into a terminal state.

    CANCELLING and CANCELLED runs should not be allowed to transition to PENDING.
    For re-runs of deployed runs, they should transition to SCHEDULED first.
    For re-runs of ad-hoc runs, they should transition directly to RUNNING.
    """

    FROM_STATES = [
        StateType.PENDING,
        StateType.CANCELLING,
        StateType.RUNNING,
        StateType.CANCELLED,
    ]
    TO_STATES = [StateType.PENDING]

    async def before_transition(
        self,
        initial_state: Optional[states.State],
        proposed_state: Optional[states.State],
        context: OrchestrationContext,
    ) -> None:
        await self.abort_transition(
            reason=(
                f"This run is in a {initial_state.type.name} state and cannot"
                " transition to a PENDING state."
            )
        )

PreventRunningTasksFromStoppedFlows

Bases: BaseOrchestrationRule

Prevents running tasks from stopped flows.

A running state implies execution, but also the converse. This rule ensures that a flow's tasks cannot be run unless the flow is also running.

Source code in src/prefect/server/orchestration/core_policy.py
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
class PreventRunningTasksFromStoppedFlows(BaseOrchestrationRule):
    """
    Prevents running tasks from stopped flows.

    A running state implies execution, but also the converse. This rule ensures that a
    flow's tasks cannot be run unless the flow is also running.
    """

    FROM_STATES = ALL_ORCHESTRATION_STATES
    TO_STATES = [StateType.RUNNING]

    async def before_transition(
        self,
        initial_state: Optional[states.State],
        proposed_state: Optional[states.State],
        context: TaskOrchestrationContext,
    ) -> None:
        flow_run = await context.flow_run()
        if flow_run is not None:
            if flow_run.state is None:
                await self.abort_transition(
                    reason="The enclosing flow must be running to begin task execution."
                )
            elif flow_run.state.type == StateType.PAUSED:
                # Use the flow run's Paused state details to preserve data like
                # timeouts.
                paused_state = states.Paused(
                    name="NotReady",
                    pause_expiration_time=flow_run.state.state_details.pause_timeout,
                    reschedule=flow_run.state.state_details.pause_reschedule,
                )
                await self.reject_transition(
                    state=paused_state,
                    reason=(
                        "The flow is paused, new tasks can execute after resuming flow"
                        f" run: {flow_run.id}."
                    ),
                )
            elif not flow_run.state.type == StateType.RUNNING:
                # task runners should abort task run execution
                await self.abort_transition(
                    reason=(
                        "The enclosing flow must be running to begin task execution."
                    ),
                )

ReleaseFlowConcurrencySlots

Bases: BaseUniversalTransform

Releases deployment concurrency slots held by a flow run.

This rule releases a concurrency slot for a deployment when a flow run transitions out of the Running or Cancelling state.

Source code in src/prefect/server/orchestration/core_policy.py
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
class ReleaseFlowConcurrencySlots(BaseUniversalTransform):
    """
    Releases deployment concurrency slots held by a flow run.

    This rule releases a concurrency slot for a deployment when a flow run
    transitions out of the Running or Cancelling state.
    """

    async def after_transition(
        self,
        context: FlowOrchestrationContext,
    ):
        if self.nullified_transition():
            return

        initial_state_type = (
            context.initial_state.type if context.initial_state else None
        )
        proposed_state_type = (
            context.proposed_state.type if context.proposed_state else None
        )

        # Check if the transition is valid for releasing concurrency slots.
        # This should happen within `after_transition` because BaseUniversalTransforms
        # don't know how to "fizzle" themselves if they encounter a transition that
        # shouldn't apply to them, even if they use FROM_STATES and TO_STATES.
        if not (
            initial_state_type
            in {
                states.StateType.RUNNING,
                states.StateType.CANCELLING,
                states.StateType.PENDING,
            }
            and proposed_state_type
            not in {
                states.StateType.PENDING,
                states.StateType.RUNNING,
                states.StateType.CANCELLING,
            }
        ):
            return
        if not context.session or not context.run.deployment_id:
            return

        deployment = await deployments.read_deployment(
            session=context.session,
            deployment_id=context.run.deployment_id,
        )
        if not deployment or not deployment.concurrency_limit_id:
            return

        await concurrency_limits_v2.bulk_decrement_active_slots(
            session=context.session,
            concurrency_limit_ids=[deployment.concurrency_limit_id],
            slots=1,
        )

ReleaseTaskConcurrencySlots

Bases: BaseUniversalTransform

Releases any concurrency slots held by a run upon exiting a Running or Cancelling state.

Source code in src/prefect/server/orchestration/core_policy.py
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
class ReleaseTaskConcurrencySlots(BaseUniversalTransform):
    """
    Releases any concurrency slots held by a run upon exiting a Running or
    Cancelling state.
    """

    async def after_transition(
        self,
        context: OrchestrationContext,
    ):
        if self.nullified_transition():
            return

        if context.validated_state and context.validated_state.type not in [
            states.StateType.RUNNING,
            states.StateType.CANCELLING,
        ]:
            filtered_limits = (
                await concurrency_limits.filter_concurrency_limits_for_orchestration(
                    context.session, tags=context.run.tags
                )
            )
            run_limits = {limit.tag: limit for limit in filtered_limits}
            for tag, cl in run_limits.items():
                active_slots = set(cl.active_slots)
                active_slots.discard(str(context.run.id))
                cl.active_slots = list(active_slots)

RenameReruns

Bases: BaseOrchestrationRule

Name the states if they have run more than once.

In the special case where the initial state is an "AwaitingRetry" scheduled state, the proposed state will be renamed to "Retrying" instead.

Source code in src/prefect/server/orchestration/core_policy.py
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
class RenameReruns(BaseOrchestrationRule):
    """
    Name the states if they have run more than once.

    In the special case where the initial state is an "AwaitingRetry" scheduled state,
    the proposed state will be renamed to "Retrying" instead.
    """

    FROM_STATES = ALL_ORCHESTRATION_STATES
    TO_STATES = [StateType.RUNNING]

    async def before_transition(
        self,
        initial_state: Optional[states.State],
        proposed_state: Optional[states.State],
        context: TaskOrchestrationContext,
    ) -> None:
        run_count = context.run.run_count
        if run_count > 0:
            if initial_state.name == "AwaitingRetry":
                await self.rename_state("Retrying")
            else:
                await self.rename_state("Rerunning")

RetryFailedFlows

Bases: BaseOrchestrationRule

Rejects failed states and schedules a retry if the retry limit has not been reached.

This rule rejects transitions into a failed state if retries has been set and the run count has not reached the specified limit. The client will be instructed to transition into a scheduled state to retry flow execution.

Source code in src/prefect/server/orchestration/core_policy.py
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
class RetryFailedFlows(BaseOrchestrationRule):
    """
    Rejects failed states and schedules a retry if the retry limit has not been reached.

    This rule rejects transitions into a failed state if `retries` has been
    set and the run count has not reached the specified limit. The client will be
    instructed to transition into a scheduled state to retry flow execution.
    """

    FROM_STATES = [StateType.RUNNING]
    TO_STATES = [StateType.FAILED]

    async def before_transition(
        self,
        initial_state: Optional[states.State],
        proposed_state: Optional[states.State],
        context: FlowOrchestrationContext,
    ) -> None:
        run_settings = context.run_settings
        run_count = context.run.run_count

        if run_settings.retries is None or run_count > run_settings.retries:
            return  # Retry count exceeded, allow transition to failed

        scheduled_start_time = pendulum.now("UTC").add(
            seconds=run_settings.retry_delay or 0
        )

        # support old-style flow run retries for older clients
        # older flow retries require us to loop over failed tasks to update their state
        # this is not required after API version 0.8.3
        api_version = context.parameters.get("api-version", None)
        if api_version and api_version < Version("0.8.3"):
            failed_task_runs = await models.task_runs.read_task_runs(
                context.session,
                flow_run_filter=filters.FlowRunFilter(id={"any_": [context.run.id]}),
                task_run_filter=filters.TaskRunFilter(
                    state={"type": {"any_": ["FAILED"]}}
                ),
            )
            for run in failed_task_runs:
                await models.task_runs.set_task_run_state(
                    context.session,
                    run.id,
                    state=states.AwaitingRetry(scheduled_time=scheduled_start_time),
                    force=True,
                )
                # Reset the run count so that the task run retries still work correctly
                run.run_count = 0

        # Reset pause metadata on retry
        # Pauses as a concept only exist after API version 0.8.4
        api_version = context.parameters.get("api-version", None)
        if api_version is None or api_version >= Version("0.8.4"):
            updated_policy = context.run.empirical_policy.model_dump()
            updated_policy["resuming"] = False
            updated_policy["pause_keys"] = set()
            context.run.empirical_policy = core.FlowRunPolicy(**updated_policy)

        # Generate a new state for the flow
        retry_state = states.AwaitingRetry(
            scheduled_time=scheduled_start_time,
            message=proposed_state.message,
            data=proposed_state.data,
        )
        await self.reject_transition(state=retry_state, reason="Retrying")

RetryFailedTasks

Bases: BaseOrchestrationRule

Rejects failed states and schedules a retry if the retry limit has not been reached.

This rule rejects transitions into a failed state if retries has been set, the run count has not reached the specified limit, and the client asserts it is a retriable task run. The client will be instructed to transition into a scheduled state to retry task execution.

Source code in src/prefect/server/orchestration/core_policy.py
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
class RetryFailedTasks(BaseOrchestrationRule):
    """
    Rejects failed states and schedules a retry if the retry limit has not been reached.

    This rule rejects transitions into a failed state if `retries` has been
    set, the run count has not reached the specified limit, and the client
    asserts it is a retriable task run. The client will be instructed to
    transition into a scheduled state to retry task execution.
    """

    FROM_STATES = [StateType.RUNNING]
    TO_STATES = [StateType.FAILED]

    async def before_transition(
        self,
        initial_state: Optional[states.State],
        proposed_state: Optional[states.State],
        context: TaskOrchestrationContext,
    ) -> None:
        run_settings = context.run_settings
        run_count = context.run.run_count
        delay = run_settings.retry_delay

        if isinstance(delay, list):
            base_delay = delay[min(run_count - 1, len(delay) - 1)]
        else:
            base_delay = run_settings.retry_delay or 0

        # guard against negative relative jitter inputs
        if run_settings.retry_jitter_factor:
            delay = clamped_poisson_interval(
                base_delay, clamping_factor=run_settings.retry_jitter_factor
            )
        else:
            delay = base_delay

        # set by user to conditionally retry a task using @task(retry_condition_fn=...)
        if getattr(proposed_state.state_details, "retriable", True) is False:
            return

        if run_settings.retries is not None and run_count <= run_settings.retries:
            retry_state = states.AwaitingRetry(
                scheduled_time=pendulum.now("UTC").add(seconds=delay),
                message=proposed_state.message,
                data=proposed_state.data,
            )
            await self.reject_transition(state=retry_state, reason="Retrying")

SecureFlowConcurrencySlots

Bases: BaseOrchestrationRule

Enforce deployment concurrency limits.

This rule enforces concurrency limits on deployments. If a deployment has a concurrency limit, this rule will prevent more than that number of flow runs from being submitted concurrently based on the concurrency limit behavior configured for the deployment.

We use the PENDING state as the target transition because this allows workers to secure a slot before provisioning dynamic infrastructure to run a flow. If a slot isn't available, the worker won't provision infrastructure.

Source code in src/prefect/server/orchestration/core_policy.py
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
class SecureFlowConcurrencySlots(BaseOrchestrationRule):
    """
    Enforce deployment concurrency limits.

    This rule enforces concurrency limits on deployments. If a deployment has a concurrency limit,
    this rule will prevent more than that number of flow runs from being submitted concurrently
    based on the concurrency limit behavior configured for the deployment.

    We use the PENDING state as the target transition because this allows workers to secure a slot
    before provisioning dynamic infrastructure to run a flow. If a slot isn't available, the worker
    won't provision infrastructure.
    """

    FROM_STATES = ALL_ORCHESTRATION_STATES - {
        states.StateType.PENDING,
        states.StateType.RUNNING,
        states.StateType.CANCELLING,
    }
    TO_STATES = [states.StateType.PENDING]

    async def before_transition(  # type: ignore
        self,
        initial_state: Optional[states.State],
        proposed_state: Optional[states.State],
        context: FlowOrchestrationContext,
    ) -> None:
        if not context.session or not context.run.deployment_id:
            return

        deployment = await deployments.read_deployment(
            session=context.session,
            deployment_id=context.run.deployment_id,
        )
        if not deployment:
            await self.abort_transition("Deployment not found.")
            return

        if not deployment.global_concurrency_limit:
            return

        if deployment.global_concurrency_limit.limit == 0:
            await self.abort_transition(
                "The deployment concurrency limit is 0. The flow will deadlock if submitted again."
            )
            return

        acquired = await concurrency_limits_v2.bulk_increment_active_slots(
            session=context.session,
            concurrency_limit_ids=[deployment.concurrency_limit_id],
            slots=1,
        )

        if not acquired:
            concurrency_options = (
                deployment.concurrency_options
                or core.ConcurrencyOptions(
                    collision_strategy=core.ConcurrencyLimitStrategy.ENQUEUE
                )
            )

            if (
                concurrency_options.collision_strategy
                == core.ConcurrencyLimitStrategy.ENQUEUE
            ):
                await self.reject_transition(
                    state=states.Scheduled(
                        name="AwaitingConcurrencySlot",
                        scheduled_time=pendulum.now("UTC").add(
                            seconds=PREFECT_DEPLOYMENT_CONCURRENCY_SLOT_WAIT_SECONDS.value()
                        ),
                    ),
                    reason="Deployment concurrency limit reached.",
                )
            elif (
                concurrency_options.collision_strategy
                == core.ConcurrencyLimitStrategy.CANCEL_NEW
            ):
                await self.reject_transition(
                    state=states.Cancelled(
                        message="Deployment concurrency limit reached."
                    ),
                    reason="Deployment concurrency limit reached.",
                )

    async def cleanup(  # type: ignore
        self,
        initial_state: Optional[states.State],
        validated_state: Optional[states.State],
        context: FlowOrchestrationContext,
    ) -> None:
        logger = get_logger()
        if not context.session or not context.run.deployment_id:
            return

        try:
            deployment = await deployments.read_deployment(
                session=context.session,
                deployment_id=context.run.deployment_id,
            )

            if not deployment or not deployment.concurrency_limit_id:
                return

            await concurrency_limits_v2.bulk_decrement_active_slots(
                session=context.session,
                concurrency_limit_ids=[deployment.concurrency_limit_id],
                slots=1,
            )
        except Exception as e:
            logger.error(f"Error releasing concurrency slots on cleanup: {e}")

SecureTaskConcurrencySlots

Bases: BaseOrchestrationRule

Checks relevant concurrency slots are available before entering a Running state.

This rule checks if concurrency limits have been set on the tags associated with a TaskRun. If so, a concurrency slot will be secured against each concurrency limit before being allowed to transition into a running state. If a concurrency limit has been reached, the client will be instructed to delay the transition for the duration specified by the "PREFECT_TASK_RUN_TAG_CONCURRENCY_SLOT_WAIT_SECONDS" setting before trying again. If the concurrency limit set on a tag is 0, the transition will be aborted to prevent deadlocks.

Source code in src/prefect/server/orchestration/core_policy.py
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
class SecureTaskConcurrencySlots(BaseOrchestrationRule):
    """
    Checks relevant concurrency slots are available before entering a Running state.

    This rule checks if concurrency limits have been set on the tags associated with a
    TaskRun. If so, a concurrency slot will be secured against each concurrency limit
    before being allowed to transition into a running state. If a concurrency limit has
    been reached, the client will be instructed to delay the transition for the duration
    specified by the "PREFECT_TASK_RUN_TAG_CONCURRENCY_SLOT_WAIT_SECONDS" setting
    before trying again. If the concurrency limit set on a tag is 0, the transition will
    be aborted to prevent deadlocks.
    """

    FROM_STATES = ALL_ORCHESTRATION_STATES
    TO_STATES = [StateType.RUNNING]

    async def before_transition(
        self,
        initial_state: Optional[states.State],
        validated_state: Optional[states.State],
        context: TaskOrchestrationContext,
    ) -> None:
        self._applied_limits = []
        filtered_limits = (
            await concurrency_limits.filter_concurrency_limits_for_orchestration(
                context.session, tags=context.run.tags
            )
        )
        run_limits = {limit.tag: limit for limit in filtered_limits}
        for tag, cl in run_limits.items():
            limit = cl.concurrency_limit
            if limit == 0:
                # limits of 0 will deadlock, and the transition needs to abort
                for stale_tag in self._applied_limits:
                    stale_limit = run_limits.get(stale_tag, None)
                    active_slots = set(stale_limit.active_slots)
                    active_slots.discard(str(context.run.id))
                    stale_limit.active_slots = list(active_slots)

                await self.abort_transition(
                    reason=(
                        f'The concurrency limit on tag "{tag}" is 0 and will deadlock'
                        " if the task tries to run again."
                    ),
                )
            elif len(cl.active_slots) >= limit:
                # if the limit has already been reached, delay the transition
                for stale_tag in self._applied_limits:
                    stale_limit = run_limits.get(stale_tag, None)
                    active_slots = set(stale_limit.active_slots)
                    active_slots.discard(str(context.run.id))
                    stale_limit.active_slots = list(active_slots)

                await self.delay_transition(
                    PREFECT_TASK_RUN_TAG_CONCURRENCY_SLOT_WAIT_SECONDS.value(),
                    f"Concurrency limit for the {tag} tag has been reached",
                )
            else:
                # log the TaskRun ID to active_slots
                self._applied_limits.append(tag)
                active_slots = set(cl.active_slots)
                active_slots.add(str(context.run.id))
                cl.active_slots = list(active_slots)

    async def cleanup(
        self,
        initial_state: Optional[states.State],
        validated_state: Optional[states.State],
        context: OrchestrationContext,
    ) -> None:
        for tag in self._applied_limits:
            cl = await concurrency_limits.read_concurrency_limit_by_tag(
                context.session, tag
            )
            active_slots = set(cl.active_slots)
            active_slots.discard(str(context.run.id))
            cl.active_slots = list(active_slots)

UpdateFlowRunTrackerOnTasks

Bases: BaseOrchestrationRule

Tracks the flow run attempt a task run state is associated with.

Source code in src/prefect/server/orchestration/core_policy.py
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
class UpdateFlowRunTrackerOnTasks(BaseOrchestrationRule):
    """
    Tracks the flow run attempt a task run state is associated with.
    """

    FROM_STATES = ALL_ORCHESTRATION_STATES
    TO_STATES = [StateType.RUNNING]

    async def after_transition(
        self,
        initial_state: Optional[states.State],
        validated_state: Optional[states.State],
        context: TaskOrchestrationContext,
    ) -> None:
        if context.run.flow_run_id is not None:
            self.flow_run = await context.flow_run()
            if self.flow_run:
                context.run.flow_run_run_count = self.flow_run.run_count
            else:
                raise ObjectNotFoundError(
                    (
                        "Unable to read flow run associated with task run:"
                        f" {context.run.id}, this flow run might have been deleted"
                    ),
                )

WaitForScheduledTime

Bases: BaseOrchestrationRule

Prevents transitions to running states from happening too early.

This rule enforces that all scheduled states will only start with the machine clock used by the Prefect REST API instance. This rule will identify transitions from scheduled states that are too early and nullify them. Instead, no state will be written to the database and the client will be sent an instruction to wait for delay_seconds before attempting the transition again.

Source code in src/prefect/server/orchestration/core_policy.py
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
class WaitForScheduledTime(BaseOrchestrationRule):
    """
    Prevents transitions to running states from happening too early.

    This rule enforces that all scheduled states will only start with the machine clock
    used by the Prefect REST API instance. This rule will identify transitions from scheduled
    states that are too early and nullify them. Instead, no state will be written to the
    database and the client will be sent an instruction to wait for `delay_seconds`
    before attempting the transition again.
    """

    FROM_STATES = [StateType.SCHEDULED, StateType.PENDING]
    TO_STATES = [StateType.RUNNING]

    async def before_transition(
        self,
        initial_state: Optional[states.State],
        proposed_state: Optional[states.State],
        context: OrchestrationContext,
    ) -> None:
        scheduled_time = initial_state.state_details.scheduled_time
        if not scheduled_time:
            return

        # At this moment, we round delay to the nearest second as the API schema
        # specifies an integer return value.
        delay = scheduled_time - pendulum.now("UTC")
        delay_seconds = delay.in_seconds()
        delay_seconds += round(delay.microseconds / 1e6)
        if delay_seconds > 0:
            await self.delay_transition(
                delay_seconds, reason="Scheduled time is in the future"
            )