Hey @numam,
Here’s an updated version that should be more consistent. Instead of transforming back into 3D space via a neural network, we can simply take the indices of the detected edge, and use those indices to obtain the points in 3D space.
Let me know how it goes for you! Here’s the code:
import plotly.graph_objs as go
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
import alphashape
def generate_contour_sample(x, y, z, alpha=0.0, n_components=2, plot=True):
X = np.array([x, y, z]).T
# do PCA to make 3D data planar
pca = PCA(n_components=n_components)
pca.fit(X)
X_transformed = pca.transform(X)
# generate boundary points in PCA space
alpha_shape = alphashape.alphashape(X_transformed, alpha)
alpha_shape_points = alpha_shape.boundary.coords
x_boundary = np.array(alpha_shape_points.xy[0])
y_boundary = np.array(alpha_shape_points.xy[1])
bound_mat = np.array([x_boundary, y_boundary]).T
# get the indices for the boundary
inds = []
for coord in bound_mat:
# return index where the values are close
ind_match = np.where(np.all(np.isclose(coord, X_transformed, atol=1e-3), axis=1))
if ind_match:
inds.append(ind_match)
inds = np.array(inds).flatten()
if plot:
fig = go.Figure(
data=[
go.Scatter3d(x=x, y=y, z=z, name='Input Data', mode='markers'),
go.Scatter3d(x=X_transformed[:, 0], y=X_transformed[:, 1], z=50 + np.zeros(len(X_transformed[:, 0])),
name='Points in 2-component PCA'),
go.Scatter3d(x=x_boundary, y=y_boundary, z=75 + np.zeros(len(x_boundary)), name='Edge in PCA space'),
go.Scatter3d(x=x[inds], y=y[inds], z=z[inds],
name='Reconstructed Edge')
],
layout=go.Layout(
scene=dict(
aspectmode='data',
),
template='plotly_dark'
)
)
fig.show()
return np.array([x[inds], y[inds], z[inds]]).T
if __name__ == '__main__':
data = pd.read_csv('ROIvertices.csv', header=None)
contour_pts = generate_contour_sample(data.iloc[:, 0], data.iloc[:, 1], data.iloc[:, 2], alpha=0.7, plot=True)