NEVER USE PYTHON IN RUST
This commit is contained in:
@@ -12,5 +12,3 @@ show-image = "0.14.1"
|
||||
rand = "0.9.1"
|
||||
rayon = "1.10.0"
|
||||
hdbscan = "0.10.0"
|
||||
pyo3 = { version = "0.21.2", features = ["auto-initialize"] }
|
||||
numpy = "0.21.0"
|
||||
151
src/main.rs
151
src/main.rs
@@ -6,14 +6,9 @@ use std::thread;
|
||||
use std::time::{Duration, Instant};
|
||||
use hdbscan::{Hdbscan, HdbscanHyperParams};
|
||||
use image::{DynamicImage, ImageReader, Rgb, RgbImage};
|
||||
use numpy::PyArray2;
|
||||
use pyo3::prelude::PyModule;
|
||||
use pyo3::Python;
|
||||
use pyo3::types::PyList;
|
||||
use rand::{Rng};
|
||||
use rayon::prelude::*;
|
||||
use show_image::{create_window, ImageInfo, ImageView, PixelFormat};
|
||||
use show_image::event::VirtualKeyCode::V;
|
||||
|
||||
fn create_extract_thread(image_rx: Receiver<Arc<Vec<u8>>>, width: u32, threshold: u8, downsample: f64) -> Receiver<Arc<Vec<Vec<f64>>>> {
|
||||
let (tx, rx) = mpsc::sync_channel::<Arc<Vec<Vec<f64>>>>(1);
|
||||
@@ -106,131 +101,33 @@ fn create_transform_thread(pointcloud_rx : Receiver<Arc<Vec<Vec<f64>>>>) -> Rece
|
||||
let (projection_tx, projection_rx) = mpsc::sync_channel::<Arc<Vec<Vec<f64>>>>(1);
|
||||
|
||||
thread::spawn(move || {
|
||||
Python::with_gil(|py| {
|
||||
let simulation_image_helper = PyModule::from_code(py,r#"
|
||||
import numpy as np
|
||||
let h_i = [
|
||||
[-7.74914499e-03, 3.95733793e-18, 3.10353257e00],
|
||||
[8.56519716e-18, 9.42313768e-05, -1.86052093e00],
|
||||
[2.57498016e-18, -2.73825295e-03, 1.00000000e00],
|
||||
];
|
||||
|
||||
for pointcloud in pointcloud_rx {
|
||||
let projection = pointcloud
|
||||
.iter()
|
||||
.map(|pt| {
|
||||
let x = pt[0];
|
||||
let y = pt[1];
|
||||
|
||||
class SimulationImageHelper:
|
||||
"""
|
||||
This class can be used to project points on the road surface onto the image
|
||||
and vice versa. It may be used for detecting lanes in simulation.
|
||||
// Convert to homogeneous coordinates
|
||||
let u = h_i[0][0] * x + h_i[0][1] * y + h_i[0][2];
|
||||
let v = h_i[1][0] * x + h_i[1][1] * y + h_i[1][2];
|
||||
let w = h_i[2][0] * x + h_i[2][1] * y + h_i[2][2];
|
||||
|
||||
Attributes:
|
||||
yHorizon: The y-coordinate of the horizon in the image
|
||||
let u_norm = u / w;
|
||||
let v_norm = v / w;
|
||||
|
||||
Methods:
|
||||
image2road: project 2d image points to 3d road coords
|
||||
road2image: project 3d road coords to 2d image points
|
||||
distance2imagerow: return which y-coord in the image corresponds to a
|
||||
given distance on the 3d road plane
|
||||
"""
|
||||
vec![u_norm, v_norm]
|
||||
})
|
||||
.collect();
|
||||
|
||||
def __init__(self,notsurewhatthisis):
|
||||
"""
|
||||
The class constructor.
|
||||
"""
|
||||
# from calib_image.py
|
||||
self.H_I = np.matrix(
|
||||
[
|
||||
[-7.74914499e-03, 3.95733793e-18, 3.10353257e00],
|
||||
[8.56519716e-18, 9.42313768e-05, -1.86052093e00],
|
||||
[2.57498016e-18, -2.73825295e-03, 1.00000000e00],
|
||||
]
|
||||
)
|
||||
|
||||
self.H_I = self.H_I.reshape(3, 3)
|
||||
self.H = self.H_I.I
|
||||
|
||||
X_infinity = np.matrix([0, 10000, 1]).reshape(3, 1)
|
||||
self.yHorizon = self.H * X_infinity
|
||||
self.yHorizon = int((self.yHorizon[1] / self.yHorizon[2]).item())
|
||||
|
||||
def image2road(self, pts2d):
|
||||
"""
|
||||
Transform a point on the road surface from the image to 3D coordinates
|
||||
(in DIN70000, i.e. X to front, Y to left, origin in center of vehicle at
|
||||
height 0).
|
||||
"""
|
||||
pts2d = np.array(pts2d)
|
||||
N, cols = pts2d.shape
|
||||
if (cols == 1) and (N == 2):
|
||||
# if just two coordinates are given, we make sure we have a (2x1) vector
|
||||
pts2d = pts2d.T
|
||||
assert (
|
||||
cols == 2
|
||||
), "pts2d should be a Nx2 numpy array, but we obtained (%d, %d)" % (N, cols)
|
||||
|
||||
X = self.H_I * np.hstack((pts2d, np.ones((pts2d.shape[0], 1)))).T
|
||||
X = X / X[2, :]
|
||||
X = X.T
|
||||
|
||||
# the camera is "self.C[2]" in front of the vehicle's center
|
||||
return np.hstack((X[:, 1], -X[:, 0]))
|
||||
|
||||
def road2image(self, ptsRd, imSize=None):
|
||||
"""
|
||||
Transform a 3d point on the road surface (in DIN70000, i.e. X to front,
|
||||
Y to left) to image coordinates.
|
||||
"""
|
||||
ptsRd = np.array(ptsRd)
|
||||
N, cols = ptsRd.shape
|
||||
if (cols == 1) and (N == 2):
|
||||
# if just two coordinates are given, we make sure we have a (2x1) vector
|
||||
ptsRd = ptsRd.T
|
||||
N = cols
|
||||
assert (
|
||||
cols == 2
|
||||
), "ptsRd should be a Nx2 numpy array, but we obtained (%d, %d)" % (N, cols)
|
||||
|
||||
# go back from DIN70000 to our image coordinate system
|
||||
pts = np.hstack(
|
||||
(
|
||||
-ptsRd[:, 1].reshape((-1, 1)),
|
||||
ptsRd[:, 0].reshape((-1, 1)),
|
||||
np.ones((N, 1)),
|
||||
)
|
||||
)
|
||||
x = self.H * pts.T
|
||||
x = x / x[2, :]
|
||||
x = x.T
|
||||
|
||||
if imSize is not None:
|
||||
valid = np.logical_and((x[:, 0] >= 0), (x[:, 1] >= 0))
|
||||
valid = np.logical_and(valid, (x[:, 0] < imSize[1]))
|
||||
valid = np.logical_and(valid, (x[:, 1] < imSize[0]))
|
||||
valid = np.nonzero(valid)[0].tolist()
|
||||
x = x[valid, :]
|
||||
|
||||
return np.hstack((x[:, 0], x[:, 1]))
|
||||
|
||||
def distance2imagerow(self, distance):
|
||||
"""
|
||||
Compute which row in the image corresponds to a distance (in DIN70000).
|
||||
"""
|
||||
p = self.road2image(np.array([[distance, 0]]))
|
||||
return p[0, 1]
|
||||
|
||||
"#,
|
||||
"simulation_image_helper.py",
|
||||
"simImgHelper",
|
||||
).unwrap();
|
||||
|
||||
let image_helper_class = simulation_image_helper
|
||||
.getattr("SimulationImageHelper")
|
||||
.unwrap();
|
||||
let house = image_helper_class.call1(("",)).unwrap();
|
||||
|
||||
for pointcloud in pointcloud_rx {
|
||||
let pylist = PyList::new(py, pointcloud.iter().map(|inner| PyList::new(py, inner)));
|
||||
let result : &PyArray2<f64> = house.call_method1("image2road", (pylist,)).unwrap().extract().unwrap();
|
||||
let arr = result.readonly().as_array().outer_iter().map(|row| {
|
||||
let vec = row.to_vec();
|
||||
vec![-vec[1], vec[0]]
|
||||
}).collect::<Vec<_>>();
|
||||
projection_tx.send(Arc::new(arr)).unwrap();
|
||||
}
|
||||
});
|
||||
projection_tx.send(Arc::new(projection)).unwrap();
|
||||
}
|
||||
});
|
||||
projection_rx
|
||||
}
|
||||
@@ -240,7 +137,7 @@ fn create_filter_thread() {
|
||||
}
|
||||
|
||||
fn dummy_image_spawner_thread() -> Receiver<Arc<Vec<u8>>> {
|
||||
let img: DynamicImage = ImageReader::open("./images/lane_detection_loop_60.png").unwrap().decode().unwrap();
|
||||
let img: DynamicImage = ImageReader::open("./images/lane_detection_loop_20.png").unwrap().decode().unwrap();
|
||||
let img_data: Vec<u8> = img.clone().into_bytes();
|
||||
|
||||
let (img_tx, img_rx) = mpsc::sync_channel::<Arc<Vec<u8>>>(1);
|
||||
@@ -257,7 +154,7 @@ fn dummy_image_spawner_thread() -> Receiver<Arc<Vec<u8>>> {
|
||||
#[show_image::main]
|
||||
fn main() {
|
||||
let image_rx = dummy_image_spawner_thread();
|
||||
let pointcloud_rx = create_extract_thread(image_rx, 800, 196, 0.05);
|
||||
let pointcloud_rx = create_extract_thread(image_rx, 800, 196, 0.025);
|
||||
let projection_rx = create_transform_thread(pointcloud_rx);
|
||||
let cluster_labels_rx = create_pointcloud_labels_thread(projection_rx);
|
||||
let clusters_rx = create_cluster_separator_thread(cluster_labels_rx);
|
||||
|
||||
Reference in New Issue
Block a user