Situation:
I have a plot and an audio-file from a device that need to interact in dash on a localhost.
In dash, when playing the [audio] (dBA_sound.wav - Google Drive)-file, the annotation on the plot should move along the time - axis.
In that way, for example, the maximum decibels in the audio-file can be read from the plot.
I experimented with a simple dcc.input (textbox) to simulate the principle: by manualy incrementing the elapsed seconds in the text-box, the annotation moves in the plot.
So far the good news.
Question:
How can i catch the “Elapsed time” from the html.audio - component and pass it to the plot to update the annotation?
My code:
import dash
from dash import dcc, html, Input, Output
import plotly.express as px
import pandas as pd
import base64
import datetime
def create_fig_time_vs_db(x_plot_field: str, y_plot_field: str):
"""
Creates a Plotly-express line chart figure with a time series of decibels
:param
x_plot_field: string, field name of x-axis -> time
y_plot_field: string, field name of y-axis -> decibels
:return: figure with time series
"""
fig = px.line(df, x=x_plot_field, y=y_plot_field)
return fig
def fig_add_annotation(fig, time_indication, txt_annot):
"""
add an annotation on an actual moment of interest
in a figure with a time series
:param
fig: Plotly express line chart figure
time_indication: time object -> datetime.datetime(2021, 11, 16, 9, 0, 0)
txt_annot: Annotation text
:return: figure with time series and annotation
"""
fig.update_annotations(visible=False)
fig.add_annotation(
x=time_indication,
y=0.5,
xref='x',
yref='paper',
xshift=0,
text=txt_annot,
showarrow=True,
font=dict(family="Courier New, monospace", size=14, color="#ffffff"),
align="center",
bgcolor="blue",
)
return fig
# sample dataframe with decibels
df = pd.DataFrame({
'time': ['2021-11-16 08:56:07', '2021-11-16 08:56:08', '2021-11-16 08:56:09',
'2021-11-16 08:56:10', '2021-11-16 08:56:11', '2021-11-16 08:56:12'],
'dBA': [35.91, 36.99, 45.65, 34.01, 34.72, 34.98],
'marker': [0, 0, 1, 1, 1, 0]})
df['time'] = pd.to_datetime(df['time'])
begintime = df['time'].min()
# corresponding audio-file.
encoded_sound = base64.b64encode(open('dBA_sound.wav', 'rb').read())
# load figure without annotation
fig_time_vs_db = create_fig_time_vs_db('time', 'dBA')
# define dashboard
app = dash.Dash(__name__)
app.layout = html.Div(children=[
html.H1(children='Sample sound file with corresponding decibels'),
html.Div(children='Annotation in graph should follow audio progression'),
dcc.Input(id="txt_seconds", value=0, type='number'),
html.Audio(id='audioplayer', src='data:audio/mpeg;base64,{}'.format(encoded_sound.decode()),
controls=True,
autoPlay=False, style={"width": "100%"}
),
dcc.Graph(id='timeseries', figure=fig_time_vs_db)])
@app.callback(Output('timeseries', 'figure'),
Input('txt_seconds', 'value'))
def update_figure(value):
elapsed_timeobject = begintime + datetime.timedelta(seconds=int(value))
return fig_add_annotation(fig_time_vs_db, elapsed_timeobject, 'current moment in audiofile')
if __name__ == '__main__':
app.run_server(debug=True)