Why is the web browser falling behind the Dash program?


I’ve written an application in Dash and plotly to visualize a live graph in 50ms intervals (20Hz) but the browser is plotting the graph way slower than the speed at which program is running. So, the program is pretty fast but the plotting is so slow. My goal is to be able to update the graph at 20Hz rate on the browser. I’m using the localhost:8050

My code is here:

from __future__ import (absolute_import, division, print_function,
from future.builtins import *  # NOQA
import dash
from dash.dependencies import Output, Event
import dash_core_components as dcc
import dash_html_components as html
import time
import plotly
import plotly.graph_objs as go
from collections import deque
import sys
from operator import add
import numpy as np
from itertools import chain
import warnings
from obspy import UTCDateTime
from obspy.signal.cross_correlation import templates_max_similarity
from obspy.signal.headers import clibsignal, head_stalta_t
from obspy import read
from pyspark.sql import SparkSession

'''Usage: ./bin/spark-submit examples/src/main/python/streaming/viz1.py /home/zeinab/spark-2.3.1-bin-hadoop2.7/outFile.txt
viz3.py in Dash directory with Spark added'''
''' This program plots the STA/LTA based on the indices in the original timeseries'''

def classic_sta_lta_py(a):
    Computes the standard STA/LTA from a given input array a. The length of
    the STA is given by nsta in samples, respectively is the length of the
    LTA given by nlta in samples. Written in Python.

    .. note::

        There exists a faster version of this trigger wrapped in C
        called :func:`~obspy.signal.trigger.classic_sta_lta` in this module!

    :type a: NumPy :class:`~numpy.ndarray`
    :param a: Seismic Trace
    :type nsta: int
    :param nsta: Length of short time average window in samples
    :type nlta: int
    :param nlta: Length of long time average window in samples
    :rtype: NumPy :class:`~numpy.ndarray`
    :return: Characteristic function of classic STA/LTA
    # The cumulative sum can be exploited to calculate a moving average (the
    # cumsum function is quite efficient)
    nsta = 2
    nlta = 20
    sta = np.cumsum(a ** 2)

    # Convert to float
    sta = np.require(sta, dtype=np.float)

    # Copy for LTA
    lta = sta.copy()

    # Compute the STA and the LTA
    sta[nsta:] = sta[nsta:] - sta[:-nsta]
    sta /= nsta
    lta[nlta:] = lta[nlta:] - lta[:-nlta]
    lta /= nlta

    # Pad zeros
    sta[:nlta - 1] = 0

    # Avoid division by zero by setting zero values to tiny float
    dtiny = np.finfo(0.0).tiny
    idx = lta < dtiny
    lta[idx] = dtiny

    return sta / lta

app = dash.Dash(__name__)
# Read data

max_length = 50
X = deque(maxlen=max_length)
Y = deque(maxlen=max_length)
#	CREATE an instance of a SparkSession object
spark = SparkSession.builder.appName("myapp").getOrCreate()

#	DEFINE your input path
input_path = sys.argv[1]

num_of_partitions = 1
rdd = spark.sparkContext.textFile(input_path, num_of_partitions).flatMap(lambda line: line.strip().split("\n")).map(lambda strelem: float(strelem))
# Process data and pass the output to the app
mapped = rdd.mapPartitions(lambda i: classic_sta_lta_py(np.array(list(i))))
a = mapped.map(lambda r: r).collect()

app.layout = html.Div(
        dcc.Graph(id='live-graph', animate=True),

@app.callback(Output('live-graph', 'figure'),
              events=[Event('graph-update', 'interval')])
def update_graph_scatter():
    del a[0]

    data = plotly.graph_objs.Scatter(
            mode= 'lines+markers'

    return {'data': [data],'layout' : go.Layout(xaxis=dict(range=[min(X),max(X)]),
if __name__ == "__main__":

Any idea how to make the plotting faster?

Thank you,