import math
# Packages for data manipulation
import numpy as np
import pandas as pd
# Packages for graphing
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
# Model functions
from examples.example_12_resourceless_with_back_and_forth.model_classes import Scenario, generate_seed_vector
from examples.example_12_resourceless_with_back_and_forth.simulation_execution_functions import single_run
# Animation functions
from vidigi.prep import reshape_for_animations, generate_animation_df
from vidigi.animation import generate_animation
import plotly.io as pio
= "notebook" pio.renderers.default
More Resourceless Queues - Community Service Repeat Appointment Booking Model with Variable Follow-ups
This model is designed to mimic a simple community-based appointment service where clients have an initial appointment and then a variable number of follow-ups over an extended period of time.
A client can have their first referral with any clinician - in practice, whoever has capacity and the soonest appointment - but all follow-on appointments will be with the same clinician.
Instead of using simpy resources, an appointment book is set up. The model looks for an appointment that meets criteria, then books this in, reducing the available slots as appropriate. This allows for continuity of care in a way that is more difficult to achieve with a simpy resource, as well as allowing finer control over the number of appointments a clinician can undertake in a day.
Note that some issues seem to be present with the caseload calculations, leading to some unexpected behaviour within the model over time.
The default is to aim to have as many people on caseload as you have maximum theoretical slots. This can be adjusted up or down to see the impact of changing the policy.
Note that low intensity patients in this model take up 0.5 slots. High intensity patients take up 1 slot.
= 8
number_of_clinicians
= 1.3
CASELOAD_TARGET_MULTIPLIER
# caseload_default_adjusted = pd.concat(
# [shifts.sum(),
# np.floor(shifts.sum() * CASELOAD_TARGET_MULTIPLIER)],
# axis=1
# )
# caseload_default_adjusted.columns = ["Default Caseload (Total Slots Per Week)",
# "Adjusted Caseload"]
= 700
ANNUAL_DEMAND
= 0.03
PROP_HIGH_PRIORITY
= 60
WARM_UP
= 180
RESULTS_COLLECTION
= RESULTS_COLLECTION + WARM_UP
RUN_LENGTH
= 0.12
PROP_REFERRED_OUT
= 42
SEED
= 0.95
PROP_HIGH_PRIORITY_ONGOING_APPOINTMENTS
= 0.8
PROP_LOW_PRIORITY_ONGOING_APPOINTMENTS
# What proportion of people initially graded as *high*
# priority go on to have high intensity therapy?
= 0.7
PROP_HIGH_PRIORITY_HIGH_INTENSITY # What proportion of people initially graded as *low*
# priority go on to have high intensity therapy?
= 0.2
PROP_LOW_PRIORITY_HIGH_INTENSITY
= 10
MEAN_FOLLOW_UPS_HIGH_INTENSITY = 6
MEAN_FOLLOW_UPS_LOW_INTENSITY
= 18
SD_FOLLOW_UPS_HIGH_INTENSITY = SD_FOLLOW_UPS_HIGH_INTENSITY/3
SD_FOLLOW_UPS_HIGH_INTENSITY = 9
SD_FOLLOW_UPS_LOW_INTENSITY = SD_FOLLOW_UPS_LOW_INTENSITY/3
SD_FOLLOW_UPS_LOW_INTENSITY
= {} scenarios
We define the parameters of the clinics in csv files.
= (pd.read_csv("data/shifts.csv")
shifts
.iloc[:,:number_of_clinicians])
shifts
clinic_1 | clinic_2 | clinic_3 | clinic_4 | clinic_5 | clinic_6 | clinic_7 | clinic_8 | |
---|---|---|---|---|---|---|---|---|
0 | 0 | 5 | 4 | 4 | 0 | 5 | 3 | 3 |
1 | 0 | 3 | 5 | 4 | 5 | 5 | 3 | 3 |
2 | 4 | 3 | 5 | 5 | 3 | 5 | 4 | 3 |
3 | 4 | 5 | 2 | 1 | 5 | 5 | 3 | 3 |
4 | 4 | 0 | 1 | 0 | 5 | 4 | 3 | 1 |
5 | 5 | 0 | 0 | 0 | 3 | 0 | 0 | 0 |
6 | 0 | 5 | 0 | 3 | 0 | 0 | 0 | 0 |
= (pd.read_csv("data/caseload.csv")
caseload +1])
.iloc[:,:number_of_clinicians
caseload
Unnamed: 0 | clinic_1 | clinic_2 | clinic_3 | clinic_4 | clinic_5 | clinic_6 | clinic_7 | clinic_8 | |
---|---|---|---|---|---|---|---|---|---|
0 | current_caseload | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
= (pd.read_csv("data/referrals.csv")
referrals
.iloc[:number_of_clinicians])
referrals
clinic | prop | referred_out | dna | |
---|---|---|---|---|
0 | 1 | 1.0 | 0.120 | 0.20 |
1 | 2 | 0.0 | 0.428 | 0.25 |
2 | 3 | 0.0 | 0.489 | 0.25 |
3 | 4 | 0.0 | 0.296 | 0.20 |
4 | 5 | 0.0 | 0.275 | 0.23 |
5 | 6 | 0.0 | 0.091 | 0.21 |
6 | 7 | 0.0 | 0.162 | 0.24 |
7 | 8 | 0.0 | 0.129 | 0.17 |
= (pd.read_csv("data/partial_pooling.csv")
pooling +1])
.iloc[:number_of_clinicians,:number_of_clinicians
'pooled'] = Scenario(RUN_LENGTH,
scenarios[
WARM_UP,# prop_carve_out=prop_carve_out,
=generate_seed_vector(SEED),
seeds=shifts,
slots_file=pooling,
pooling_file=caseload,
existing_caseload_file=referrals,
demand_file=CASELOAD_TARGET_MULTIPLIER,
caseload_multiplier=PROP_HIGH_PRIORITY,
prop_high_priority=PROP_HIGH_PRIORITY_ONGOING_APPOINTMENTS,
prop_high_priority_ongoing_appointments=PROP_LOW_PRIORITY_ONGOING_APPOINTMENTS,
prop_low_priority_ongoing_appointments=PROP_HIGH_PRIORITY_HIGH_INTENSITY,
prop_high_priority_assessed_high_intensity=PROP_LOW_PRIORITY_HIGH_INTENSITY,
prop_low_priority_assessed_high_intensity=MEAN_FOLLOW_UPS_HIGH_INTENSITY,
mean_follow_ups_high_intensity=SD_FOLLOW_UPS_HIGH_INTENSITY,
sd_follow_ups_high_intensity=MEAN_FOLLOW_UPS_LOW_INTENSITY,
mean_follow_ups_low_intensity=SD_FOLLOW_UPS_LOW_INTENSITY,
sd_follow_ups_low_intensity=ANNUAL_DEMAND,
annual_demand=PROP_REFERRED_OUT) prop_referred_out
# Run the model and unpack the outputs
\
results_all, results_low, results_high, event_log, \
bookings, available_slots, daily_caseload_snapshots, \
daily_waiting_for_booking_snapshots, = single_run(args = scenarios['pooled']) daily_arrivals
= pd.DataFrame(event_log)
event_log_df
'event_original'] = event_log_df['event']
event_log_df['event'] = event_log_df.apply(
event_log_df[lambda x: f"{x['event']}{f'_{int(x.booked_clinic)}'if pd.notna(x['booked_clinic']) and x['event'] != 'waiting_appointment_to_be_scheduled' else ''}",
=1
axis
)
= reshape_for_animations(event_log_df,
full_patient_df ="patient",
entity_col_name=WARM_UP+RESULTS_COLLECTION,
limit_duration=1,
every_x_time_units=30)
step_snapshot_max
# Remove the warm-up period from the event log
= full_patient_df[full_patient_df["snapshot_time"] >= WARM_UP] full_patient_df
We will automatically create a reasonable positioning dataframe that reflects the number of available clinicians.
#####################################################
# Create the positioning dataframe for the animation
#####################################################
# Create a list of clinics
= [x for x
clinics in event_log_df['booked_clinic'].sort_values().unique().tolist()
if not math.isnan(x)]
# Create a column of positions for people waiting for their initial appointment with the clinic
= [{'event': f'appointment_booked_waiting_{int(clinic)}',
clinic_waits 'y': 950-(clinic+1)*80,
'x': 360,
'label': f"Booked for<br>assessment with<br>clinician {int(clinic)}",
'clinic': int(clinic)}
for clinic in clinics]
# Create a column of positions for people having an appointment with the clinic
= [{'event': f'have_appointment_{int(clinic)}',
clinic_attends 'y': 950-(clinic+1)*80,
'x': 625,
'label': f"Attending appointment<br>with clinician {int(clinic)}"}
for clinic in clinics]
# Join these dataframes
= pd.concat(
event_position_df
[pd.DataFrame(clinic_waits),
(pd.DataFrame(clinic_attends))
])
# Create a column of positions for people who are put on a waiting list before being given their future
# appointment
= [{
wait_for_booking 'event': 'waiting_appointment_to_be_scheduled',
'y': 150,
'x': 325,
'label': "Waiting to be<br>scheduled with <br>clinician "
}]
= pd.concat([event_position_df,(pd.DataFrame(wait_for_booking))])
event_position_df
# Create a column of positions for people being referred to another service (triaged as inappropriate
# for this service after their initial referral and before an appointment is booked)
= [{
referred_out 'event': 'referred_out',
'y': 150,
'x': 625,
'label': "Referred Out:<br>Unsuitable for Service"
}]
= pd.concat([event_position_df,(pd.DataFrame(referred_out))])
event_position_df
# Create a column of positions for people who have had their initial appointment and are now waiting for a
# booked follow-up appointment to take place
= [{
follow_up_waiting 'event': f'follow_up_appointment_booked_waiting_{int(clinic)}',
'y': 950-(clinic+1)*80,
'x': 1000,
'label': f"On books - awaiting <br>next appointment<br>with clinician {int(clinic)}"
for clinic in clinics]
}
= pd.concat([event_position_df,(pd.DataFrame(follow_up_waiting))])
event_position_df
= event_position_df.drop(columns="clinic") event_position_df
= generate_animation_df(
full_patient_df_plus_pos =full_patient_df,
full_entity_df="patient",
entity_col_name=event_position_df,
event_position_df=15,
wrap_queues_at=30,
step_snapshot_max=15,
gap_between_entities=15,
gap_between_queue_rows=True
debug_mode )
Placement dataframe finished construction at 12:15:03
def show_priority_icon(row):
if "more" not in row["icon"]:
if row["pathway"] == 2:
return "🚨"
else:
return f"{row['icon']}"
else:
return row["icon"]
def add_los_to_icon(row):
if row["event_original"] == "have_appointment":
return f'{row["icon"]}<br>{int(row["wait"])}'
else:
return row["icon"]
= full_patient_df_plus_pos.assign(
full_patient_df_plus_pos =full_patient_df_plus_pos.apply(show_priority_icon, axis=1)
icon
)
= generate_animation(
fig =full_patient_df_plus_pos,
full_entity_df_plus_pos=event_position_df,
event_position_df="patient",
entity_col_name=None,
scenario=900,
plotly_height=1000,
plotly_width=1200,
override_x_max=1000,
override_y_max=10,
entity_icon_size=10,
text_size=True,
include_play_button=None,
add_background_image=True,
display_stage_labels="d",
time_display_units="days",
simulation_time_unit="2022-06-27",
start_date=False,
setup_mode=1500, #milliseconds
frame_duration=1000, #milliseconds
frame_transition_duration=False
debug_mode
)
fig
Making additional plots from the event log
We can also use the event log to make a wide range of additional plots for exploring our model. Here are just a few examples for this particular system.
= []
daily_position_counts
for day in range(RUN_LENGTH):
# First limit to anyone who hasn't left the system yet
# Get a list of all people who have departed on or before the day
# of interest as we can then remove them from the dataframe
# at the next step
= event_log_df[
departed "time"] <= day) &
(event_log_df["event"] == "depart")]["patient"].tolist()
(event_log_df[# Filter down to events that have occurred at or before this day
= event_log_df[(event_log_df["time"] <= day)
upto_now & (event_log_df["event"] != "arrival")
& (~event_log_df["patient"].isin(departed))]
# Now take the latest event for each person
= upto_now.sort_values("time").groupby("patient").tail(1)
latest_event_upto_now for event_type in event_log_df["event_original"].unique():
= len(latest_event_upto_now[(latest_event_upto_now["event_original"] == event_type)])
snapshot_count
daily_position_counts.append("day": day,
{"event": event_type,
"count": snapshot_count}
)
= pd.DataFrame(daily_position_counts) daily_position_counts
= px.line(daily_position_counts[(daily_position_counts["event"] == "waiting_appointment_to_be_scheduled") |
fig_daily_position_counts "event"] == "appointment_booked_waiting") |
(daily_position_counts["event"] == "follow_up_appointment_booked_waiting") |
(daily_position_counts["event"] == "have_appointment")],
(daily_position_counts[="day",
x="count",
y="event"
color
)=dict(
fig_daily_position_counts.update_layout(legend="h",
orientation="bottom",
yanchor=1.02,
y="right",
xanchor=1
x
))
fig_daily_position_counts
= event_log_df[(event_log_df["event"] == "arrival") |
arrival_depart_df "event"] == "depart")][["time", "event"]].value_counts().reset_index(drop=False).sort_values('time')
(event_log_df[
= arrival_depart_df.pivot(index="time", columns="event", values="count")
arrival_depart_df_pivot "difference (arrival-depart) - positive is more more arriving than departing"] = arrival_depart_df_pivot["arrival"] - arrival_depart_df_pivot["depart"]
arrival_depart_df_pivot[
= px.scatter(
arrival_depart_balance_fig
arrival_depart_df,="time",
x="count",
y="event",
color="rolling",
trendline=['#636EFA', '#EF553B'],
color_discrete_sequence=0.1,
opacity=dict(window=100)
trendline_options
)
arrival_depart_balance_fig
= (event_log_df
assessment_booking_waits ='assessment_booking_wait')
.dropna(subset='patient')
.drop_duplicates(subset'time','pathway', 'assessment_booking_wait']]
[[
)
px.box(
assessment_booking_waits,="assessment_booking_wait", x="pathway", color="pathway"
y )
px.line(
assessment_booking_waits,="assessment_booking_wait", x="time", color="pathway", line_group="pathway"
y )
px.box(
event_log_df='wait')
.dropna(subset='patient')[['pathway', 'wait']],
.drop_duplicates(subset="wait", x="pathway", color="pathway"
y )
px.line(
event_log_df='wait')
.dropna(subset='patient')[['time','pathway', 'wait']],
.drop_duplicates(subset="wait", x="time", color="pathway", line_group="pathway"
y )
= (event_log_df
inter_appointment_gaps ='interval')
.dropna(subset'patient')
.drop_duplicates(# .query('event_original == "have_appointment"')
'time', 'follow_up_intensity','interval']]
[[
)
px.box(
inter_appointment_gaps,="interval", x="follow_up_intensity", color="follow_up_intensity"
y )
px.line(
inter_appointment_gaps,="interval", x="time", color="follow_up_intensity", line_group="follow_up_intensity"
y )
= go.Figure(make_subplots(rows=1, cols=1))
fig_arrivals = px.scatter(
fig_arrivals_1 =False),
pd.DataFrame(pd.Series(daily_arrivals).value_counts()).reset_index(drop="index",
x="count",
y="rolling",
trendline=0.4,
opacity=dict(window=7)#,
trendline_options
)= px.scatter(
fig_arrivals_2 =False),
pd.DataFrame(pd.Series(daily_arrivals).value_counts()).reset_index(drop="index",
x="count",
y="rolling",
trendline=dict(window=60),
trendline_options=['red']
color_discrete_sequence
)= [t for t in fig_arrivals_2.data if t.mode == "lines"]
fig_arrivals_2.data = []
fig_trace
for trace in range(len(fig_arrivals_1["data"])):
"data"][trace])
fig_trace.append(fig_arrivals_1[for trace in range(len(fig_arrivals_2["data"])):
"data"][trace])
fig_trace.append(fig_arrivals_2[
for traces in fig_trace:
=1, col=1)
fig_arrivals.append_trace(traces, row
fig_arrivals
px.bar(
event_log_df='follow_ups_intended')
.dropna(subset='patient')[['pathway','follow_ups_intended']]
.drop_duplicates(subset
.value_counts()=False),
.reset_index(drop="follow_ups_intended", y="count",facet_row="pathway"
x )
px.bar(
event_log_df='assessment_booking_wait')
.dropna(subset='patient')
.drop_duplicates(subset'pathway')[['pathway','assessment_booking_wait']]
.groupby(
.value_counts()=False),
.reset_index(drop="assessment_booking_wait", y="count", facet_row="pathway"
x )
= pd.DataFrame(daily_caseload_snapshots["caseload_day_end"].tolist())
cl = cl.iloc[WARM_UP:RUN_LENGTH,:]
cl_filtered = cl_filtered.reset_index(drop=False).melt(id_vars=["index"], var_name="clinician", value_name="caseload")
cl_plotting
px.line(
cl_plotting,="index",
x= "caseload",
y="clinician",
color=[0, max(cl_plotting["caseload"])]
range_y )
sum(axis=1)/(np.floor(shifts.sum() * CASELOAD_TARGET_MULTIPLIER).sum())).reset_index(),
px.line((cl_filtered.="index", y=0) x
px.bar("event"] != "depart"],
daily_position_counts[daily_position_counts[="event",
x="count",
y="day",
animation_frame=[0, max(daily_position_counts["count"])]
range_y )