I am building an image-processing app in dash, where the user uploads an image, selects a smoothing filter and then chooses between k-means clustering and thresholding to identify the foreground object from the background.
As shown below, I have a callback with a filters-drop down
(which works fine as both filters take the same type of input - in this case the kernel-height-slider
and the kernel-width-slider
.
@app.callback(
Output('smoothed-img','src'),
[
Input('filters-dropdown', 'value'),
Input('kernel-height-slider', 'value'),
Input('kernel-width-slider', 'value'),
Input('detectors-radio', 'value'),
Input('otsu-min-threshold', 'value'),
Input('otsu-max-threshold', 'value'),
Input('kmeans-kvalue-input', 'value'),
Input('kmeans-niter-input', 'value'),
Input('kmeans-accuracy-input', 'value'),
]
)
def update_overlay_base64(selected_filter, kernel_height, kernel_width,
selected_detector, otsu_min_thr, otsu_max_thr,
kmeans_k, kmeans_niter, kmeans_accuracy):
if not selected_filter:
return None
filtered_img = filters[selected_filter](preprocessed_img,
kernel_height, kernel_width)
if selected_detector == "otsu" and otsu_min_thr is not None and
otsu_max_thr is not None:
processed_img = otsu_thresholding(filtered_img, otsu_min_thr,
otsu_max_thr)[1]
elif selected_detector == "kmeans" and None not in (kmeans_k,
kmeans_niter, kmeans_accuracy):
processed_img = kmeans_clustering(filtered_img, kmeans_k,
kmeans_niter, kmeans_accuracy)
edge_img = cv.Canny(processed_img, 10, 150)
edge_img = np.array(edge_img, dtype='float64')
edge_img[edge_img==0] = np.nan
rows, cols = np.where(edge_img == 255)
min_row, max_row = rows.min(), rows.max()
min_col, max_col = cols.min(), cols.max()
# Plot with matplotlib
fig, ax = plt.subplots()
ax.imshow(processed_img, cmap='Spectral_r')
ax.imshow(edge_img, cmap='autumn', alpha=0.5)
plt.gca().add_patch(
plt.Rectangle((min_col-1, min_row-1),
max_col - min_col + 2,
max_row - min_row + 2,
edgecolor='cyan',
facecolor='none',
linewidth=1)
)
ax.axis('off')
# Save plot to buffer
buf = io.BytesIO()
plt.savefig(buf, format='png', bbox_inches='tight', pad_inches=0)
plt.close(fig)
buf.seek(0)
# Encode to base64
encoded = base64.b64encode(buf.read()).decode()
return f"data:image/png;base64,{encoded}"
However, when choosing from the detectors-dropdown
, depending on which one is selected the inputs for the image processing function will either be otsu-min-threshold
and otsu-max-threshold
(if otsu is selected) OR kmeans-kvalue-input
, kmeans-niter-input
and kmeans-accuracy-input
(if kmeans is selected).
The issue is that the callback needs all the Inputs
to work properly and throws errors as inevitably, one set of input is not available when the other option is selected. Is there a way to work around this? Can I do conditional callbacks or something similar?
Thank you!