diff --git a/.circleci/config.yml b/.circleci/config.yml
index 9f803ece4a..f6eff674de 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -6,7 +6,7 @@ orbs:
jobs:
build_doc:
docker:
- - image: cimg/python:3.12
+ - image: cimg/python:3.13
steps:
- checkout
- run:
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
index b6ac52efa2..c0d9f7eab2 100644
--- a/.github/pull_request_template.md
+++ b/.github/pull_request_template.md
@@ -1,7 +1,7 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/MissionPlanning/StateMachine/state_machine.py b/MissionPlanning/StateMachine/state_machine.py
index de72f0f451..454759236e 100644
--- a/MissionPlanning/StateMachine/state_machine.py
+++ b/MissionPlanning/StateMachine/state_machine.py
@@ -3,7 +3,7 @@
author: Wang Zheng (@Aglargil)
-Ref:
+Reference:
- [State Machine]
(https://en.wikipedia.org/wiki/Finite-state_machine)
@@ -23,7 +23,7 @@ def deflate_and_encode(plantuml_text):
"""
zlib compress the plantuml text and encode it for the plantuml server.
- Ref: https://plantuml.com/en/text-encoding
+ Reference https://plantuml.com/en/text-encoding
"""
plantuml_alphabet = (
string.digits + string.ascii_uppercase + string.ascii_lowercase + "-_"
diff --git a/PathPlanning/BatchInformedRRTStar/batch_informed_rrtstar.py b/PathPlanning/BatchInformedRRTStar/batch_informed_rrt_star.py
similarity index 100%
rename from PathPlanning/BatchInformedRRTStar/batch_informed_rrtstar.py
rename to PathPlanning/BatchInformedRRTStar/batch_informed_rrt_star.py
diff --git a/PathPlanning/DStarLite/d_star_lite.py b/PathPlanning/DStarLite/d_star_lite.py
index f099530173..1a44d84fa5 100644
--- a/PathPlanning/DStarLite/d_star_lite.py
+++ b/PathPlanning/DStarLite/d_star_lite.py
@@ -63,9 +63,7 @@ def __init__(self, ox: list, oy: list):
self.y_max = int(abs(max(oy) - self.y_min_world))
self.obstacles = [Node(x - self.x_min_world, y - self.y_min_world)
for x, y in zip(ox, oy)]
- self.obstacles_xy = np.array(
- [[obstacle.x, obstacle.y] for obstacle in self.obstacles]
- )
+ self.obstacles_xy = {(obstacle.x, obstacle.y) for obstacle in self.obstacles}
self.start = Node(0, 0)
self.goal = Node(0, 0)
self.U = list() # type: ignore
@@ -73,7 +71,7 @@ def __init__(self, ox: list, oy: list):
self.kold = 0.0
self.rhs = self.create_grid(float("inf"))
self.g = self.create_grid(float("inf"))
- self.detected_obstacles_xy = np.empty((0, 2))
+ self.detected_obstacles_xy: set[tuple[int, int]] = set()
self.xy = np.empty((0, 2))
if show_animation:
self.detected_obstacles_for_plotting_x = list() # type: ignore
@@ -84,18 +82,8 @@ def create_grid(self, val: float):
return np.full((self.x_max, self.y_max), val)
def is_obstacle(self, node: Node):
- x = np.array([node.x])
- y = np.array([node.y])
- obstacle_x_equal = self.obstacles_xy[:, 0] == x
- obstacle_y_equal = self.obstacles_xy[:, 1] == y
- is_in_obstacles = (obstacle_x_equal & obstacle_y_equal).any()
-
- is_in_detected_obstacles = False
- if self.detected_obstacles_xy.shape[0] > 0:
- is_x_equal = self.detected_obstacles_xy[:, 0] == x
- is_y_equal = self.detected_obstacles_xy[:, 1] == y
- is_in_detected_obstacles = (is_x_equal & is_y_equal).any()
-
+ is_in_obstacles = (node.x, node.y) in self.obstacles_xy
+ is_in_detected_obstacles = (node.x, node.y) in self.detected_obstacles_xy
return is_in_obstacles or is_in_detected_obstacles
def c(self, node1: Node, node2: Node):
@@ -157,7 +145,7 @@ def initialize(self, start: Node, goal: Node):
self.g = self.create_grid(math.inf)
self.rhs[self.goal.x][self.goal.y] = 0
self.U.append((self.goal, self.calculate_key(self.goal)))
- self.detected_obstacles_xy = np.empty((0, 2))
+ self.detected_obstacles_xy = set()
def update_vertex(self, u: Node):
if not compare_coordinates(u, self.goal):
@@ -215,12 +203,7 @@ def detect_changes(self):
compare_coordinates(spoofed_obstacle, self.goal):
continue
changed_vertices.append(spoofed_obstacle)
- self.detected_obstacles_xy = np.concatenate(
- (
- self.detected_obstacles_xy,
- [[spoofed_obstacle.x, spoofed_obstacle.y]]
- )
- )
+ self.detected_obstacles_xy.add((spoofed_obstacle.x, spoofed_obstacle.y))
if show_animation:
self.detected_obstacles_for_plotting_x.append(
spoofed_obstacle.x + self.x_min_world)
@@ -241,12 +224,7 @@ def detect_changes(self):
compare_coordinates(new_obs, self.goal):
return changed_vertices
changed_vertices.append(Node(x, y))
- self.detected_obstacles_xy = np.concatenate(
- (
- self.detected_obstacles_xy,
- [[x, y]]
- )
- )
+ self.detected_obstacles_xy.add((x, y))
if show_animation:
self.detected_obstacles_for_plotting_x.append(x +
self.x_min_world)
diff --git a/PathPlanning/Dijkstra/dijkstra.py b/PathPlanning/Dijkstra/dijkstra.py
index 8a585e4b18..f5a4703910 100644
--- a/PathPlanning/Dijkstra/dijkstra.py
+++ b/PathPlanning/Dijkstra/dijkstra.py
@@ -12,7 +12,7 @@
show_animation = True
-class Dijkstra:
+class DijkstraPlanner:
def __init__(self, ox, oy, resolution, robot_radius):
"""
@@ -246,7 +246,7 @@ def main():
plt.grid(True)
plt.axis("equal")
- dijkstra = Dijkstra(ox, oy, grid_size, robot_radius)
+ dijkstra = DijkstraPlanner(ox, oy, grid_size, robot_radius)
rx, ry = dijkstra.planning(sx, sy, gx, gy)
if show_animation: # pragma: no cover
diff --git a/PathPlanning/ElasticBands/elastic_bands.py b/PathPlanning/ElasticBands/elastic_bands.py
index 785f822d14..77d4e6e399 100644
--- a/PathPlanning/ElasticBands/elastic_bands.py
+++ b/PathPlanning/ElasticBands/elastic_bands.py
@@ -3,7 +3,7 @@
author: Wang Zheng (@Aglargil)
-Ref:
+Reference:
- [Elastic Bands: Connecting Path Planning and Control]
(http://www8.cs.umu.se/research/ifor/dl/Control/elastic%20bands.pdf)
diff --git a/PathPlanning/Eta3SplinePath/eta3_spline_path.py b/PathPlanning/Eta3SplinePath/eta3_spline_path.py
index dc07d3c84b..3f685e512f 100644
--- a/PathPlanning/Eta3SplinePath/eta3_spline_path.py
+++ b/PathPlanning/Eta3SplinePath/eta3_spline_path.py
@@ -5,7 +5,7 @@
author: Joe Dinius, Ph.D (https://jwdinius.github.io)
Atsushi Sakai (@Atsushi_twi)
-Ref:
+Reference:
- [eta^3-Splines for the Smooth Path Generation of Wheeled Mobile Robots]
(https://ieeexplore.ieee.org/document/4339545/)
diff --git a/PathPlanning/Eta3SplineTrajectory/eta3_spline_trajectory.py b/PathPlanning/Eta3SplineTrajectory/eta3_spline_trajectory.py
index a73797dacb..e72d33261e 100644
--- a/PathPlanning/Eta3SplineTrajectory/eta3_spline_trajectory.py
+++ b/PathPlanning/Eta3SplineTrajectory/eta3_spline_trajectory.py
@@ -29,7 +29,7 @@ def __init__(self, actual_vel, max_vel):
self.message = f'Actual velocity {actual_vel} does not equal desired max velocity {max_vel}!'
-class eta3_trajectory(Eta3Path):
+class Eta3SplineTrajectory(Eta3Path):
"""
eta3_trajectory
@@ -300,8 +300,8 @@ def test1(max_vel=0.5):
trajectory_segments.append(Eta3PathSegment(
start_pose=start_pose, end_pose=end_pose, eta=eta, kappa=kappa))
- traj = eta3_trajectory(trajectory_segments,
- max_vel=max_vel, max_accel=0.5)
+ traj = Eta3SplineTrajectory(trajectory_segments,
+ max_vel=max_vel, max_accel=0.5)
# interpolate at several points along the path
times = np.linspace(0, traj.total_time, 101)
@@ -334,8 +334,8 @@ def test2(max_vel=0.5):
trajectory_segments.append(Eta3PathSegment(
start_pose=start_pose, end_pose=end_pose, eta=eta, kappa=kappa))
- traj = eta3_trajectory(trajectory_segments,
- max_vel=max_vel, max_accel=0.5)
+ traj = Eta3SplineTrajectory(trajectory_segments,
+ max_vel=max_vel, max_accel=0.5)
# interpolate at several points along the path
times = np.linspace(0, traj.total_time, 101)
@@ -400,8 +400,8 @@ def test3(max_vel=2.0):
start_pose=start_pose, end_pose=end_pose, eta=eta, kappa=kappa))
# construct the whole path
- traj = eta3_trajectory(trajectory_segments,
- max_vel=max_vel, max_accel=0.5, max_jerk=1)
+ traj = Eta3SplineTrajectory(trajectory_segments,
+ max_vel=max_vel, max_accel=0.5, max_jerk=1)
# interpolate at several points along the path
times = np.linspace(0, traj.total_time, 1001)
diff --git a/PathPlanning/FrenetOptimalTrajectory/cartesian_frenet_converter.py b/PathPlanning/FrenetOptimalTrajectory/cartesian_frenet_converter.py
index 4cc8650c89..482712ceaf 100644
--- a/PathPlanning/FrenetOptimalTrajectory/cartesian_frenet_converter.py
+++ b/PathPlanning/FrenetOptimalTrajectory/cartesian_frenet_converter.py
@@ -4,7 +4,7 @@
author: Wang Zheng (@Aglargil)
-Ref:
+Reference:
- [Optimal Trajectory Generation for Dynamic Street Scenarios in a Frenet Frame]
(https://www.researchgate.net/profile/Moritz_Werling/publication/224156269_Optimal_Trajectory_Generation_for_Dynamic_Street_Scenarios_in_a_Frenet_Frame/links/54f749df0cf210398e9277af.pdf)
diff --git a/PathPlanning/FrenetOptimalTrajectory/frenet_optimal_trajectory.py b/PathPlanning/FrenetOptimalTrajectory/frenet_optimal_trajectory.py
index 248894c1c6..4b82fb70fd 100644
--- a/PathPlanning/FrenetOptimalTrajectory/frenet_optimal_trajectory.py
+++ b/PathPlanning/FrenetOptimalTrajectory/frenet_optimal_trajectory.py
@@ -4,7 +4,7 @@
author: Atsushi Sakai (@Atsushi_twi)
-Ref:
+Reference:
- [Optimal Trajectory Generation for Dynamic Street Scenarios in a Frenet Frame]
(https://www.researchgate.net/profile/Moritz_Werling/publication/224156269_Optimal_Trajectory_Generation_for_Dynamic_Street_Scenarios_in_a_Frenet_Frame/links/54f749df0cf210398e9277af.pdf)
diff --git a/PathPlanning/ParticleSwarmOptimization/particle_swarm_optimization.py b/PathPlanning/ParticleSwarmOptimization/particle_swarm_optimization.py
new file mode 100755
index 0000000000..4d1df5e197
--- /dev/null
+++ b/PathPlanning/ParticleSwarmOptimization/particle_swarm_optimization.py
@@ -0,0 +1,335 @@
+"""
+Particle Swarm Optimization (PSO) Path Planning
+
+author: Anish (@anishk85)
+
+See Wikipedia article (https://en.wikipedia.org/wiki/Particle_swarm_optimization)
+
+References:
+ - Kennedy, J.; Eberhart, R. (1995). "Particle Swarm Optimization"
+ - Shi, Y.; Eberhart, R. (1998). "A Modified Particle Swarm Optimizer"
+ - https://machinelearningmastery.com/a-gentle-introduction-to-particle-swarm-optimization/
+
+This implementation uses PSO to find collision-free paths by treating
+path planning as an optimization problem where particles explore the
+search space to minimize distance to target while avoiding obstacles.
+"""
+import numpy as np
+import matplotlib.pyplot as plt
+import matplotlib.animation as animation
+import matplotlib.patches as patches
+import signal
+import sys
+# Add show_animation flag for consistency with other planners
+show_animation = True
+
+def signal_handler(sig, frame):
+ print("\nExiting...")
+ plt.close("all")
+ sys.exit(0)
+
+signal.signal(signal.SIGINT, signal_handler)
+
+class Particle:
+ """Represents a single particle in the PSO swarm.
+ Each particle maintains its current position, velocity, and personal best
+ position discovered during the search. Particles explore the search space
+ by updating their velocity based on personal experience (cognitive component)
+ and swarm knowledge (social component).
+ Attributes:
+ search_bounds: List of tuples [(x_min, x_max), (y_min, y_max)] defining search space
+ max_velocity: Maximum velocity allowed in each dimension (5% of search space range)
+ position: Current 2D position [x, y] in search space
+ velocity: Current velocity vector [vx, vy]
+ personal_best_position: Personal best position found so far
+ personal_best_value: Fitness value at personal best position
+ path: List of all positions visited by this particle
+ """
+
+ def __init__(self, search_bounds, spawn_bounds):
+ self.search_bounds = search_bounds
+ self.max_velocity = np.array([(b[1] - b[0]) * 0.05 for b in search_bounds])
+ self.position = np.array([np.random.uniform(b[0], b[1]) for b in spawn_bounds])
+ self.velocity = np.random.randn(2) * 0.1
+ self.personal_best_position = self.position.copy()
+ self.personal_best_value = np.inf
+ self.path = [self.position.copy()]
+
+ def update_velocity(self, gbest_pos, w, c1, c2):
+ """Update particle velocity using PSO equation:
+ v = w*v + c1*r1*(personal_best - x) + c2*r2*(gbest - x)
+ """
+ r1 = np.random.rand(2)
+ r2 = np.random.rand(2)
+ cognitive = c1 * r1 * (self.personal_best_position - self.position)
+ social = c2 * r2 * (gbest_pos - self.position)
+ self.velocity = w * self.velocity + cognitive + social
+ self.velocity = np.clip(self.velocity, -self.max_velocity, self.max_velocity)
+
+ def update_position(self):
+ self.position = self.position + self.velocity
+ # Keep in bounds
+ for i in range(2):
+ self.position[i] = np.clip(
+ self.position[i], self.search_bounds[i][0], self.search_bounds[i][1]
+ )
+ self.path.append(self.position.copy())
+
+class PSOSwarm:
+
+ def __init__(
+ self, n_particles, max_iter, target, search_bounds, spawn_bounds, obstacles
+ ):
+ self.n_particles = n_particles
+ self.max_iter = max_iter
+ self.target = np.array(target)
+ self.obstacles = obstacles
+ self.search_bounds = search_bounds
+ # PSO parameters
+ self.w_start = 0.9 # Initial inertia weight
+ self.w_end = 0.4 # Final inertia weight
+ self.c1 = 1.5 # Cognitive coefficient
+ self.c2 = 1.5 # Social coefficient
+ # Initialize particles
+ self.particles = [
+ Particle(search_bounds, spawn_bounds) for _ in range(n_particles)
+ ]
+ self.gbest_position = None
+ self.gbest_value = np.inf
+ self.gbest_path = []
+ self.iteration = 0
+
+ def fitness(self, pos):
+ """Calculate fitness - distance to target + obstacle penalty"""
+ dist = np.linalg.norm(pos - self.target)
+ # Obstacle penalty
+ penalty = 0
+ for ox, oy, r in self.obstacles:
+ obs_dist = np.linalg.norm(pos - np.array([ox, oy]))
+ if obs_dist < r:
+ penalty += 1000 # Inside obstacle
+ elif obs_dist < r + 5:
+ penalty += 50 / (obs_dist - r + 0.1) # Too close
+ return dist + penalty
+
+ def check_collision(self, start, end, obstacle):
+ """Check if path from start to end hits obstacle using line-circle intersection
+ Args:
+ start: Starting position (numpy array)
+ end: Ending position (numpy array)
+ obstacle: Tuple (ox, oy, r) representing obstacle center and radius
+ Returns:
+ bool: True if collision detected, False otherwise
+ """
+ ox, oy, r = obstacle
+ center = np.array([ox, oy])
+ # Vector math for line-circle intersection
+ d = end - start
+ f = start - center
+ a = np.dot(d, d)
+ # Guard against zero-length steps to prevent ZeroDivisionError
+ if a < 1e-10: # Near-zero length step
+ # Check if start point is inside obstacle
+ return np.linalg.norm(f) <= r
+ b = 2 * np.dot(f, d)
+ c = np.dot(f, f) - r * r
+ discriminant = b * b - 4 * a * c
+ if discriminant < 0:
+ return False
+ # Check if intersection on segment
+ sqrt_discriminant = np.sqrt(discriminant)
+ t1 = (-b - sqrt_discriminant) / (2 * a)
+ t2 = (-b + sqrt_discriminant) / (2 * a)
+ return (0 <= t1 <= 1) or (0 <= t2 <= 1)
+
+ def step(self):
+ """Run one PSO iteration
+ Returns:
+ bool: True if algorithm should continue, False if completed
+ """
+ if self.iteration >= self.max_iter:
+ return False
+ # Update inertia weight (linear decay)
+ w = self.w_start - (self.w_start - self.w_end) * (
+ self.iteration / self.max_iter
+ )
+ # Evaluate all particles
+ for particle in self.particles:
+ value = self.fitness(particle.position)
+ # Update personal best
+ if value < particle.personal_best_value:
+ particle.personal_best_value = value
+ particle.personal_best_position = particle.position.copy()
+ # Update global best
+ if value < self.gbest_value:
+ self.gbest_value = value
+ self.gbest_position = particle.position.copy()
+ if self.gbest_position is not None:
+ self.gbest_path.append(self.gbest_position.copy())
+ # Update particles
+ for particle in self.particles:
+ particle.update_velocity(self.gbest_position, w, self.c1, self.c2)
+ # Predict next position
+ next_pos = particle.position + particle.velocity
+ # Check collision
+ collision = False
+ for obs in self.obstacles:
+ if self.check_collision(particle.position, next_pos, obs):
+ collision = True
+ break
+ if collision:
+ # Reduce velocity if collision detected
+ particle.velocity *= 0.2
+ particle.update_position()
+ self.iteration += 1
+ if show_animation and self.iteration % 20 == 0:
+ print(
+ f"Iteration {self.iteration}/{self.max_iter}, Best: {self.gbest_value:.2f}"
+ )
+ return True
+
+def main():
+ """Run PSO path planning algorithm demonstration.
+ This function demonstrates PSO-based path planning with the following setup:
+ - 15 particles exploring a (-50,50) x (-50,50) search space
+ - Start zone: (-45,-45) to (-35,-35)
+ - Target: (40, 35)
+ - 4 circular obstacles with collision avoidance
+ - Real-time visualization showing particle convergence (if show_animation=True)
+ - Headless mode support for testing (if show_animation=False)
+ The algorithm runs for up to 150 iterations, displaying particle movement,
+ personal/global best positions, and the evolving optimal path.
+ """
+ print(__file__ + " start!!")
+ # Set matplotlib backend for headless environments
+ if not show_animation:
+ plt.switch_backend("Agg") # Use non-GUI backend for testing
+ # Setup parameters
+ N_PARTICLES = 15
+ MAX_ITER = 150
+ SEARCH_BOUNDS = [(-50, 50), (-50, 50)]
+ TARGET = [40, 35]
+ SPAWN_AREA = [(-45, -35), (-45, -35)]
+ OBSTACLES = [(10, 15, 8), (-20, 0, 12), (20, -25, 10), (-5, -30, 7)]
+ swarm = PSOSwarm(
+ n_particles=N_PARTICLES,
+ max_iter=MAX_ITER,
+ target=TARGET,
+ search_bounds=SEARCH_BOUNDS,
+ spawn_bounds=SPAWN_AREA,
+ obstacles=OBSTACLES,
+ )
+ # pragma: no cover
+ if show_animation:
+ # Visualization setup
+ signal.signal(signal.SIGINT, signal_handler)
+ fig, ax = plt.subplots(figsize=(10, 10))
+ ax.set_xlim(SEARCH_BOUNDS[0])
+ ax.set_ylim(SEARCH_BOUNDS[1])
+ ax.set_title("PSO Path Planning with Collision Avoidance", fontsize=14)
+ ax.grid(True, alpha=0.3)
+ # Draw obstacles
+ for ox, oy, r in OBSTACLES:
+ circle = patches.Circle((ox, oy), r, color="gray", alpha=0.7)
+ ax.add_patch(circle)
+ # Draw spawn area
+ spawn_rect = patches.Rectangle(
+ (SPAWN_AREA[0][0], SPAWN_AREA[1][0]),
+ SPAWN_AREA[0][1] - SPAWN_AREA[0][0],
+ SPAWN_AREA[1][1] - SPAWN_AREA[1][0],
+ linewidth=2,
+ edgecolor="green",
+ facecolor="green",
+ alpha=0.2,
+ label="Start Zone",
+ )
+ ax.add_patch(spawn_rect)
+ # Draw target
+ ax.plot(TARGET[0], TARGET[1], "r*", markersize=20, label="Target")
+ # Initialize plot elements
+ particles_scatter = ax.scatter(
+ [], [], c="blue", s=50, alpha=0.6, label="Particles"
+ )
+ gbest_scatter = ax.plot([], [], "yo", markersize=12, label="Best Position")[0]
+ particle_paths = [
+ ax.plot([], [], "b-", lw=0.5, alpha=0.2)[0] for _ in range(N_PARTICLES)
+ ]
+ gbest_path_line = ax.plot([], [], "y--", lw=2, alpha=0.8, label="Best Path")[0]
+ iteration_text = ax.text(
+ 0.02,
+ 0.95,
+ "",
+ transform=ax.transAxes,
+ fontsize=12,
+ verticalalignment="top",
+ bbox=dict(boxstyle="round", facecolor="wheat", alpha=0.5),
+ )
+ ax.legend(loc="upper right")
+ def animate(frame):
+ """Animation function for matplotlib FuncAnimation"""
+ if not swarm.step():
+ return (
+ particles_scatter,
+ gbest_scatter,
+ gbest_path_line,
+ iteration_text,
+ *particle_paths,
+ )
+ # Update particle positions
+ positions = np.array([p.position for p in swarm.particles])
+ particles_scatter.set_offsets(positions)
+ # Update particle paths
+ for i, particle in enumerate(swarm.particles):
+ if len(particle.path) > 1:
+ path = np.array(particle.path)
+ particle_paths[i].set_data(path[:, 0], path[:, 1])
+ # Update global best
+ if swarm.gbest_position is not None:
+ gbest_scatter.set_data(
+ [swarm.gbest_position[0]], [swarm.gbest_position[1]]
+ )
+ if len(swarm.gbest_path) > 1:
+ gbest = np.array(swarm.gbest_path)
+ gbest_path_line.set_data(gbest[:, 0], gbest[:, 1])
+ # Update text
+ iteration_text.set_text(
+ f"Iteration: {swarm.iteration}/{MAX_ITER}\n"
+ f"Best Fitness: {swarm.gbest_value:.2f}"
+ )
+ return (
+ particles_scatter,
+ gbest_scatter,
+ gbest_path_line,
+ iteration_text,
+ *particle_paths,
+ )
+ # Create animation and store reference to prevent garbage collection
+ animation_ref = animation.FuncAnimation(
+ fig, animate, frames=MAX_ITER, interval=100, blit=True, repeat=False
+ )
+ plt.tight_layout()
+ plt.show()
+ # Keep reference to prevent garbage collection
+ return animation_ref
+ else:
+ # Run without animation for testing
+ print("Running PSO algorithm without animation...")
+ iteration_count = 0
+ while swarm.step():
+ iteration_count += 1
+ if iteration_count >= MAX_ITER:
+ break
+ print(f"PSO completed after {iteration_count} iterations")
+ print(f"Best fitness: {swarm.gbest_value:.2f}")
+ if swarm.gbest_position is not None:
+ print(
+ f"Best position: [{swarm.gbest_position[0]:.2f}, {swarm.gbest_position[1]:.2f}]"
+ )
+ return None
+if __name__ == "__main__":
+ try:
+ main()
+ except KeyboardInterrupt:
+ print("\nProgram interrupted by user")
+ plt.close("all")
+ sys.exit(0)
\ No newline at end of file
diff --git a/PathPlanning/PotentialFieldPlanning/potential_field_planning.py b/PathPlanning/PotentialFieldPlanning/potential_field_planning.py
index 8f136b5ee3..603a9d16cf 100644
--- a/PathPlanning/PotentialFieldPlanning/potential_field_planning.py
+++ b/PathPlanning/PotentialFieldPlanning/potential_field_planning.py
@@ -4,7 +4,7 @@
author: Atsushi Sakai (@Atsushi_twi)
-Ref:
+Reference:
https://www.cs.cmu.edu/~motionplanning/lecture/Chap4-Potential-Field_howie.pdf
"""
diff --git a/PathPlanning/QuinticPolynomialsPlanner/quintic_polynomials_planner.py b/PathPlanning/QuinticPolynomialsPlanner/quintic_polynomials_planner.py
index fdc181afab..86f9f662da 100644
--- a/PathPlanning/QuinticPolynomialsPlanner/quintic_polynomials_planner.py
+++ b/PathPlanning/QuinticPolynomialsPlanner/quintic_polynomials_planner.py
@@ -4,7 +4,7 @@
author: Atsushi Sakai (@Atsushi_twi)
-Ref:
+Reference:
- [Local Path planning And Motion Control For Agv In Positioning](https://ieeexplore.ieee.org/document/637936/)
diff --git a/PathPlanning/RRT/rrt_with_pathsmoothing.py b/PathPlanning/RRT/rrt_with_pathsmoothing.py
index 2ed6a366d1..ac68efe23f 100644
--- a/PathPlanning/RRT/rrt_with_pathsmoothing.py
+++ b/PathPlanning/RRT/rrt_with_pathsmoothing.py
@@ -51,30 +51,93 @@ def get_target_point(path, targetL):
return [x, y, ti]
-def line_collision_check(first, second, obstacleList):
- # Line Equation
-
- x1 = first[0]
- y1 = first[1]
- x2 = second[0]
- y2 = second[1]
-
- try:
- a = y2 - y1
- b = -(x2 - x1)
- c = y2 * (x2 - x1) - x2 * (y2 - y1)
- except ZeroDivisionError:
- return False
-
- for (ox, oy, size) in obstacleList:
- d = abs(a * ox + b * oy + c) / (math.hypot(a, b))
- if d <= size:
- return False
-
- return True # OK
-
-
-def path_smoothing(path, max_iter, obstacle_list):
+def is_point_collision(x, y, obstacle_list, robot_radius):
+ """
+ Check whether a single point collides with any obstacle.
+
+ This function calculates the Euclidean distance between the given point (x, y)
+ and each obstacle center. If the distance is less than or equal to the sum of
+ the obstacle's radius and the robot's radius, a collision is detected.
+
+ Args:
+ x (float): X-coordinate of the point to check.
+ y (float): Y-coordinate of the point to check.
+ obstacle_list (List[Tuple[float, float, float]]): List of obstacles defined as (ox, oy, radius).
+ robot_radius (float): Radius of the robot, used to inflate the obstacles.
+
+ Returns:
+ bool: True if the point is in collision with any obstacle, False otherwise.
+ """
+ for (ox, oy, obstacle_radius) in obstacle_list:
+ d = math.hypot(ox - x, oy - y)
+ if d <= obstacle_radius + robot_radius:
+ return True # Collided
+ return False
+
+
+def line_collision_check(first, second, obstacle_list, robot_radius=0.0, sample_step=0.2):
+ """
+ Check if the line segment between `first` and `second` collides with any obstacle.
+ Considers the robot_radius by inflating the obstacle size.
+
+ Args:
+ first (List[float]): Start point of the line [x, y]
+ second (List[float]): End point of the line [x, y]
+ obstacle_list (List[Tuple[float, float, float]]): Obstacles as (x, y, radius)
+ robot_radius (float): Radius of robot
+ sample_step (float): Distance between sampling points along the segment
+
+ Returns:
+ bool: True if collision-free, False otherwise
+ """
+ x1, y1 = first[0], first[1]
+ x2, y2 = second[0], second[1]
+
+ dx = x2 - x1
+ dy = y2 - y1
+ length = math.hypot(dx, dy)
+
+ if length == 0:
+ # Degenerate case: point collision check
+ return not is_point_collision(x1, y1, obstacle_list, robot_radius)
+
+ steps = int(length / sample_step) + 1 # Sampling every sample_step along the segment
+
+ for i in range(steps + 1):
+ t = i / steps
+ x = x1 + t * dx
+ y = y1 + t * dy
+
+ if is_point_collision(x, y, obstacle_list, robot_radius):
+ return False # Collision found
+
+ return True # Safe
+
+
+def path_smoothing(path, max_iter, obstacle_list, robot_radius=0.0):
+ """
+ Smooths a given path by iteratively replacing segments with shortcut connections,
+ while ensuring the new segments are collision-free.
+
+ The algorithm randomly picks two points along the original path and attempts to
+ connect them with a straight line. If the line does not collide with any obstacles
+ (considering the robot's radius), the intermediate path points between them are
+ replaced with the direct connection.
+
+ Args:
+ path (List[List[float]]): The original path as a list of [x, y] coordinates.
+ max_iter (int): Number of iterations for smoothing attempts.
+ obstacle_list (List[Tuple[float, float, float]]): List of obstacles represented as
+ (x, y, radius).
+ robot_radius (float, optional): Radius of the robot, used to inflate obstacle size
+ during collision checking. Defaults to 0.0.
+
+ Returns:
+ List[List[float]]: The smoothed path as a list of [x, y] coordinates.
+
+ Example:
+ >>> smoothed = path_smoothing(path, 1000, obstacle_list, robot_radius=0.5)
+ """
le = get_path_length(path)
for i in range(max_iter):
@@ -94,7 +157,7 @@ def path_smoothing(path, max_iter, obstacle_list):
continue
# collision check
- if not line_collision_check(first, second, obstacle_list):
+ if not line_collision_check(first, second, obstacle_list, robot_radius):
continue
# Create New path
@@ -119,14 +182,16 @@ def main():
(3, 10, 2),
(7, 5, 2),
(9, 5, 2)
- ] # [x,y,size]
+ ] # [x,y,radius]
rrt = RRT(start=[0, 0], goal=[6, 10],
- rand_area=[-2, 15], obstacle_list=obstacleList)
+ rand_area=[-2, 15], obstacle_list=obstacleList,
+ robot_radius=0.3)
path = rrt.planning(animation=show_animation)
# Path smoothing
maxIter = 1000
- smoothedPath = path_smoothing(path, maxIter, obstacleList)
+ smoothedPath = path_smoothing(path, maxIter, obstacleList,
+ robot_radius=rrt.robot_radius)
# Draw final path
if show_animation:
diff --git a/PathPlanning/StateLatticePlanner/state_lattice_planner.py b/PathPlanning/StateLatticePlanner/state_lattice_planner.py
index 7f8e725e0a..554cd0f3b7 100644
--- a/PathPlanning/StateLatticePlanner/state_lattice_planner.py
+++ b/PathPlanning/StateLatticePlanner/state_lattice_planner.py
@@ -8,7 +8,7 @@
https://github.com/AtsushiSakai/PythonRobotics/blob/master/PathPlanning
/ModelPredictiveTrajectoryGenerator/lookup_table_generator.py
-Ref:
+Reference:
- State Space Sampling of Feasible Motions for High-Performance Mobile Robot
Navigation in Complex Environments
@@ -22,7 +22,9 @@
import numpy as np
import math
import pathlib
+
sys.path.append(str(pathlib.Path(__file__).parent.parent))
+sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
import ModelPredictiveTrajectoryGenerator.trajectory_generator as planner
import ModelPredictiveTrajectoryGenerator.motion_model as motion_model
diff --git a/PathPlanning/ThetaStar/theta_star.py b/PathPlanning/ThetaStar/theta_star.py
new file mode 100644
index 0000000000..73a1448b28
--- /dev/null
+++ b/PathPlanning/ThetaStar/theta_star.py
@@ -0,0 +1,345 @@
+"""
+
+Theta* grid planning
+
+author: Musab Kasbati (@Musab1Blaser)
+
+See Wikipedia article (https://cdn.aaai.org/AAAI/2007/AAAI07-187.pdf)
+
+"""
+
+import math
+
+import matplotlib.pyplot as plt
+
+show_animation = True
+use_theta_star = True
+
+
+class ThetaStarPlanner:
+
+ def __init__(self, ox, oy, resolution, rr):
+ """
+ Initialize grid map for theta star planning
+
+ ox: x position list of Obstacles [m]
+ oy: y position list of Obstacles [m]
+ resolution: grid resolution [m]
+ rr: robot radius[m]
+ """
+
+ self.resolution = resolution
+ self.rr = rr
+ self.min_x, self.min_y = 0, 0
+ self.max_x, self.max_y = 0, 0
+ self.obstacle_map = None
+ self.x_width, self.y_width = 0, 0
+ self.motion = self.get_motion_model()
+ self.calc_obstacle_map(ox, oy)
+
+ class Node:
+ def __init__(self, x, y, cost, parent_index):
+ self.x = x # index of grid
+ self.y = y # index of grid
+ self.cost = cost
+ self.parent_index = parent_index
+
+ def __str__(self):
+ return str(self.x) + "," + str(self.y) + "," + str(
+ self.cost) + "," + str(self.parent_index)
+
+ def planning(self, sx, sy, gx, gy):
+ """
+ Theta star path search
+
+ input:
+ s_x: start x position [m]
+ s_y: start y position [m]
+ gx: goal x position [m]
+ gy: goal y position [m]
+
+ output:
+ rx: x position list of the final path
+ ry: y position list of the final path
+ """
+
+ start_node = self.Node(self.calc_xy_index(sx, self.min_x),
+ self.calc_xy_index(sy, self.min_y), 0.0, -1)
+ goal_node = self.Node(self.calc_xy_index(gx, self.min_x),
+ self.calc_xy_index(gy, self.min_y), 0.0, -1)
+
+ open_set, closed_set = dict(), dict()
+ open_set[self.calc_grid_index(start_node)] = start_node
+
+ while True:
+ if len(open_set) == 0:
+ print("Open set is empty..")
+ break
+
+ c_id = min(
+ open_set,
+ key=lambda o: open_set[o].cost + self.calc_heuristic(goal_node,
+ open_set[
+ o]))
+ current = open_set[c_id]
+
+ # show graph
+ if show_animation: # pragma: no cover
+ x = self.calc_grid_position(current.x, self.min_x)
+ y = self.calc_grid_position(current.y, self.min_y)
+
+ # Draw an arrow toward the parent
+ if current.parent_index != -1 and current.parent_index in closed_set:
+ parent = closed_set[current.parent_index]
+ px = self.calc_grid_position(parent.x, self.min_x)
+ py = self.calc_grid_position(parent.y, self.min_y)
+
+ # Vector from current to parent
+ dx = px - x
+ dy = py - y
+
+ # scale down vector for visibility
+ norm = math.hypot(dx, dy)
+ dx /= norm
+ dy /= norm
+
+ # Draw a small arrow (scale it down for visibility)
+ plt.arrow(x, y, dx, dy,
+ head_width=0.5, head_length=0.5,
+ fc='c', ec='c', alpha=0.7)
+
+ # For stopping simulation with the esc key
+ plt.gcf().canvas.mpl_connect(
+ 'key_release_event',
+ lambda event: [exit(0) if event.key == 'escape' else None]
+ )
+
+ if len(closed_set.keys()) % 10 == 0:
+ plt.pause(0.001)
+
+ if current.x == goal_node.x and current.y == goal_node.y:
+ print("Find goal")
+ goal_node.parent_index = current.parent_index
+ goal_node.cost = current.cost
+ break
+
+ # Remove the item from the open set
+ del open_set[c_id]
+
+ # Add it to the closed set
+ closed_set[c_id] = current
+
+ # expand_grid search grid based on motion model
+ for i, _ in enumerate(self.motion):
+ node = self.Node(current.x + self.motion[i][0],
+ current.y + self.motion[i][1],
+ current.cost + self.motion[i][2], c_id) # cost may later be updated by theta star path compression
+ n_id = self.calc_grid_index(node)
+
+ if not self.verify_node(node):
+ continue
+
+ if n_id in closed_set:
+ continue
+
+ # Theta* modification:
+ if use_theta_star and current.parent_index != -1 and current.parent_index in closed_set:
+ grandparent = closed_set[current.parent_index]
+ if self.line_of_sight(grandparent, node):
+ # If parent(current) has line of sight to neighbor
+ node.cost = grandparent.cost + math.hypot(node.x - grandparent.x, node.y - grandparent.y)
+ node.parent_index = current.parent_index # compress path directly to grandparent
+
+ if n_id not in open_set:
+ open_set[n_id] = node
+ else:
+ if open_set[n_id].cost > node.cost:
+ # This path is the best until now. record it
+ open_set[n_id] = node
+
+
+ rx, ry = self.calc_final_path(goal_node, closed_set)
+
+ return rx, ry
+
+ def calc_final_path(self, goal_node, closed_set):
+ # generate final course
+ rx, ry = [self.calc_grid_position(goal_node.x, self.min_x)], [
+ self.calc_grid_position(goal_node.y, self.min_y)]
+ parent_index = goal_node.parent_index
+ while parent_index != -1:
+ n = closed_set[parent_index]
+ rx.append(self.calc_grid_position(n.x, self.min_x))
+ ry.append(self.calc_grid_position(n.y, self.min_y))
+ parent_index = n.parent_index
+
+ return rx, ry
+
+ @staticmethod
+ def calc_heuristic(n1, n2):
+ w = 1.0 # weight of heuristic
+ d = w * math.hypot(n1.x - n2.x, n1.y - n2.y)
+ return d
+
+ def calc_grid_position(self, index, min_position):
+ """
+ calc grid position
+
+ :param index:
+ :param min_position:
+ :return:
+ """
+ pos = index * self.resolution + min_position
+ return pos
+
+ def calc_xy_index(self, position, min_pos):
+ return round((position - min_pos) / self.resolution)
+
+ def calc_grid_index(self, node):
+ return (node.y - self.min_y) * self.x_width + (node.x - self.min_x)
+
+ def line_of_sight(self, node1, node2):
+ """
+ Check if there is a direct line of sight between two nodes.
+ Uses Bresenham’s line algorithm for grid traversal.
+ """
+ x0 = node1.x
+ y0 = node1.y
+ x1 = node2.x
+ y1 = node2.y
+
+ dx = abs(x1 - x0)
+ dy = abs(y1 - y0)
+ sx = 1 if x0 < x1 else -1
+ sy = 1 if y0 < y1 else -1
+
+ err = dx - dy
+
+ while True:
+ if not self.verify_node(self.Node(x0, y0, 0, -1)):
+ return False
+ if x0 == x1 and y0 == y1:
+ break
+ e2 = 2 * err
+ if e2 > -dy:
+ err -= dy
+ x0 += sx
+ if e2 < dx:
+ err += dx
+ y0 += sy
+ return True
+
+
+ def verify_node(self, node):
+ px = self.calc_grid_position(node.x, self.min_x)
+ py = self.calc_grid_position(node.y, self.min_y)
+
+ if px < self.min_x:
+ return False
+ elif py < self.min_y:
+ return False
+ elif px >= self.max_x:
+ return False
+ elif py >= self.max_y:
+ return False
+
+ # collision check
+ if self.obstacle_map[node.x][node.y]:
+ return False
+
+ return True
+
+ def calc_obstacle_map(self, ox, oy):
+
+ self.min_x = round(min(ox))
+ self.min_y = round(min(oy))
+ self.max_x = round(max(ox))
+ self.max_y = round(max(oy))
+ print("min_x:", self.min_x)
+ print("min_y:", self.min_y)
+ print("max_x:", self.max_x)
+ print("max_y:", self.max_y)
+
+ self.x_width = round((self.max_x - self.min_x) / self.resolution)
+ self.y_width = round((self.max_y - self.min_y) / self.resolution)
+ print("x_width:", self.x_width)
+ print("y_width:", self.y_width)
+
+ # obstacle map generation
+ self.obstacle_map = [[False for _ in range(self.y_width)]
+ for _ in range(self.x_width)]
+ for ix in range(self.x_width):
+ x = self.calc_grid_position(ix, self.min_x)
+ for iy in range(self.y_width):
+ y = self.calc_grid_position(iy, self.min_y)
+ for iox, ioy in zip(ox, oy):
+ d = math.hypot(iox - x, ioy - y)
+ if d <= self.rr:
+ self.obstacle_map[ix][iy] = True
+ break
+
+ @staticmethod
+ def get_motion_model():
+ # dx, dy, cost
+ motion = [[1, 0, 1],
+ [0, 1, 1],
+ [-1, 0, 1],
+ [0, -1, 1],
+ [-1, -1, math.sqrt(2)],
+ [-1, 1, math.sqrt(2)],
+ [1, -1, math.sqrt(2)],
+ [1, 1, math.sqrt(2)]]
+
+ return motion
+
+
+def main():
+ print(__file__ + " start!!")
+
+ # start and goal position
+ sx = 10.0 # [m]
+ sy = 10.0 # [m]
+ gx = 50.0 # [m]
+ gy = 50.0 # [m]
+ grid_size = 2.0 # [m]
+ robot_radius = 1.0 # [m]
+
+ # set obstacle positions
+ ox, oy = [], []
+ for i in range(-10, 60):
+ ox.append(i)
+ oy.append(-10.0)
+ for i in range(-10, 60):
+ ox.append(60.0)
+ oy.append(i)
+ for i in range(-10, 61):
+ ox.append(i)
+ oy.append(60.0)
+ for i in range(-10, 61):
+ ox.append(-10.0)
+ oy.append(i)
+ for i in range(-10, 40):
+ ox.append(20.0)
+ oy.append(i)
+ for i in range(0, 40):
+ ox.append(40.0)
+ oy.append(60.0 - i)
+
+ if show_animation: # pragma: no cover
+ plt.plot(ox, oy, ".k")
+ plt.plot(sx, sy, "og")
+ plt.plot(gx, gy, "xb")
+ plt.grid(True)
+ plt.axis("equal")
+
+ theta_star = ThetaStarPlanner(ox, oy, grid_size, robot_radius)
+ rx, ry = theta_star.planning(sx, sy, gx, gy)
+
+ if show_animation: # pragma: no cover
+ plt.plot(rx, ry, "-r")
+ plt.pause(0.001)
+ plt.show()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/PathPlanning/TimeBasedPathPlanning/BaseClasses.py b/PathPlanning/TimeBasedPathPlanning/BaseClasses.py
new file mode 100644
index 0000000000..745cde45fb
--- /dev/null
+++ b/PathPlanning/TimeBasedPathPlanning/BaseClasses.py
@@ -0,0 +1,49 @@
+from abc import ABC, abstractmethod
+from dataclasses import dataclass
+from PathPlanning.TimeBasedPathPlanning.GridWithDynamicObstacles import (
+ Grid,
+ Position,
+)
+from PathPlanning.TimeBasedPathPlanning.Node import NodePath
+import random
+import numpy.random as numpy_random
+
+# Seed randomness for reproducibility
+RANDOM_SEED = 50
+random.seed(RANDOM_SEED)
+numpy_random.seed(RANDOM_SEED)
+
+class SingleAgentPlanner(ABC):
+ """
+ Base class for single agent planners
+ """
+
+ @staticmethod
+ @abstractmethod
+ def plan(grid: Grid, start: Position, goal: Position, verbose: bool = False) -> NodePath:
+ pass
+
+@dataclass
+class StartAndGoal:
+ # Index of this agent
+ index: int
+ # Start position of the robot
+ start: Position
+ # Goal position of the robot
+ goal: Position
+
+ def distance_start_to_goal(self) -> float:
+ return pow(self.goal.x - self.start.x, 2) + pow(self.goal.y - self.start.y, 2)
+
+class MultiAgentPlanner(ABC):
+ """
+ Base class for multi-agent planners
+ """
+
+ @staticmethod
+ @abstractmethod
+ def plan(grid: Grid, start_and_goal_positions: list[StartAndGoal], single_agent_planner_class: SingleAgentPlanner, verbose: bool = False) -> tuple[list[StartAndGoal], list[NodePath]]:
+ """
+ Plan for all agents. Returned paths are in order corresponding to the returned list of `StartAndGoal` objects
+ """
+ pass
\ No newline at end of file
diff --git a/PathPlanning/TimeBasedPathPlanning/GridWithDynamicObstacles.py b/PathPlanning/TimeBasedPathPlanning/GridWithDynamicObstacles.py
index 7b0190d023..ccc2989001 100644
--- a/PathPlanning/TimeBasedPathPlanning/GridWithDynamicObstacles.py
+++ b/PathPlanning/TimeBasedPathPlanning/GridWithDynamicObstacles.py
@@ -7,36 +7,29 @@
import matplotlib.pyplot as plt
from enum import Enum
from dataclasses import dataclass
+from PathPlanning.TimeBasedPathPlanning.Node import NodePath, Position
-@dataclass(order=True)
-class Position:
- x: int
- y: int
-
- def as_ndarray(self) -> np.ndarray:
- return np.array([self.x, self.y])
-
- def __add__(self, other):
- if isinstance(other, Position):
- return Position(self.x + other.x, self.y + other.y)
- raise NotImplementedError(
- f"Addition not supported for Position and {type(other)}"
- )
-
- def __sub__(self, other):
- if isinstance(other, Position):
- return Position(self.x - other.x, self.y - other.y)
- raise NotImplementedError(
- f"Subtraction not supported for Position and {type(other)}"
- )
-
+@dataclass
+class Interval:
+ start_time: int
+ end_time: int
class ObstacleArrangement(Enum):
# Random obstacle positions and movements
RANDOM = 0
# Obstacles start in a line in y at center of grid and move side-to-side in x
ARRANGEMENT1 = 1
+ # Static obstacle arrangement
+ NARROW_CORRIDOR = 2
+"""
+Generates a 2d numpy array with lists for elements.
+"""
+def empty_2d_array_of_lists(x: int, y: int) -> np.ndarray:
+ arr = np.empty((x, y), dtype=object)
+ # assign each element individually - np.full creates references to the same list
+ arr[:] = [[[] for _ in range(y)] for _ in range(x)]
+ return arr
class Grid:
# Set in constructor
@@ -72,6 +65,8 @@ def __init__(
self.obstacle_paths = self.generate_dynamic_obstacles(num_obstacles)
elif obstacle_arrangement == ObstacleArrangement.ARRANGEMENT1:
self.obstacle_paths = self.obstacle_arrangement_1(num_obstacles)
+ elif obstacle_arrangement == ObstacleArrangement.NARROW_CORRIDOR:
+ self.obstacle_paths = self.generate_narrow_corridor_obstacles(num_obstacles)
for i, path in enumerate(self.obstacle_paths):
obs_idx = i + 1 # avoid using 0 - that indicates free space in the grid
@@ -86,7 +81,7 @@ def __init__(
"""
def generate_dynamic_obstacles(self, obs_count: int) -> list[list[Position]]:
obstacle_paths = []
- for _ in (0, obs_count):
+ for _ in range(0, obs_count):
# Sample until a free starting space is found
initial_position = self.sample_random_position()
while not self.valid_obstacle_position(initial_position, 0):
@@ -169,6 +164,26 @@ def obstacle_arrangement_1(self, obs_count: int) -> list[list[Position]]:
obstacle_paths.append(path)
return obstacle_paths
+
+ def generate_narrow_corridor_obstacles(self, obs_count: int) -> list[list[Position]]:
+ obstacle_paths = []
+
+ for y in range(0, self.grid_size[1]):
+ if y > obs_count:
+ break
+
+ if y == self.grid_size[1] // 2:
+ # Skip the middle row
+ continue
+
+ obstacle_path = []
+ x = self.grid_size[0] // 2 # middle of the grid
+ for t in range(0, self.time_limit - 1):
+ obstacle_path.append(Position(x, y))
+
+ obstacle_paths.append(obstacle_path)
+
+ return obstacle_paths
"""
Check if the given position is valid at time t
@@ -181,11 +196,11 @@ def obstacle_arrangement_1(self, obs_count: int) -> list[list[Position]]:
bool: True if position/time combination is valid, False otherwise
"""
def valid_position(self, position: Position, t: int) -> bool:
- # Check if new position is in grid
+ # Check if position is in grid
if not self.inside_grid_bounds(position):
return False
- # Check if new position is not occupied at time t
+ # Check if position is not occupied at time t
return self.reservation_matrix[position.x, position.y, t] == 0
"""
@@ -231,9 +246,91 @@ def get_obstacle_positions_at_time(self, t: int) -> tuple[list[int], list[int]]:
y_positions.append(obs_path[t].y)
return (x_positions, y_positions)
+ """
+ Returns safe intervals for each cell.
+ """
+ def get_safe_intervals(self) -> np.ndarray:
+ intervals = empty_2d_array_of_lists(self.grid_size[0], self.grid_size[1])
+ for x in range(intervals.shape[0]):
+ for y in range(intervals.shape[1]):
+ intervals[x, y] = self.get_safe_intervals_at_cell(Position(x, y))
+
+ return intervals
-show_animation = True
+ """
+ Generate the safe intervals for a given cell. The intervals will be in order of start time.
+ ex: Interval (2, 3) will be before Interval (4, 5)
+ """
+ def get_safe_intervals_at_cell(self, cell: Position) -> list[Interval]:
+ vals = self.reservation_matrix[cell.x, cell.y, :]
+ # Find where the array is zero
+ zero_mask = (vals == 0)
+
+ # Identify transitions between zero and nonzero elements
+ diff = np.diff(zero_mask.astype(int))
+
+ # Start indices: where zeros begin (1 after a nonzero)
+ start_indices = np.where(diff == 1)[0] + 1
+
+ # End indices: where zeros stop (just before a nonzero)
+ end_indices = np.where(diff == -1)[0]
+
+ # Handle edge cases if the array starts or ends with zeros
+ if zero_mask[0]: # If the first element is zero, add index 0 to start_indices
+ start_indices = np.insert(start_indices, 0, 0)
+ if zero_mask[-1]: # If the last element is zero, add the last index to end_indices
+ end_indices = np.append(end_indices, len(vals) - 1)
+
+ # Create pairs of (first zero, last zero)
+ intervals = [Interval(int(start), int(end)) for start, end in zip(start_indices, end_indices)]
+
+ # Remove intervals where a cell is only free for one time step. Those intervals not provide enough time to
+ # move into and out of the cell each take 1 time step, and the cell is considered occupied during
+ # both the time step when it is entering the cell, and the time step when it is leaving the cell.
+ intervals = [interval for interval in intervals if interval.start_time != interval.end_time]
+ return intervals
+
+ """
+ Reserve an agent's path in the grid. Raises an exception if the agent's index is 0, or if a position is
+ already reserved by a different agent.
+ """
+ def reserve_path(self, node_path: NodePath, agent_index: int):
+ if agent_index == 0:
+ raise Exception("Agent index cannot be 0")
+
+ for i, node in enumerate(node_path.path):
+ reservation_finish_time = node.time + 1
+ if i < len(node_path.path) - 1:
+ reservation_finish_time = node_path.path[i + 1].time
+
+ self.reserve_position(node.position, agent_index, Interval(node.time, reservation_finish_time))
+ """
+ Reserve a position for the provided agent during the provided time interval.
+ Raises an exception if the agent's index is 0, or if the position is already reserved by a different agent during the interval.
+ """
+ def reserve_position(self, position: Position, agent_index: int, interval: Interval):
+ if agent_index == 0:
+ raise Exception("Agent index cannot be 0")
+
+ for t in range(interval.start_time, interval.end_time + 1):
+ current_reserver = self.reservation_matrix[position.x, position.y, t]
+ if current_reserver not in [0, agent_index]:
+ raise Exception(
+ f"Agent {agent_index} tried to reserve a position already reserved by another agent: {position} at time {t}, reserved by {current_reserver}"
+ )
+ self.reservation_matrix[position.x, position.y, t] = agent_index
+
+ """
+ Clears the initial reservation for an agent by clearing reservations at its start position with its index for
+ from time 0 to the time limit.
+ """
+ def clear_initial_reservation(self, position: Position, agent_index: int):
+ for t in range(self.time_limit):
+ if self.reservation_matrix[position.x, position.y, t] == agent_index:
+ self.reservation_matrix[position.x, position.y, t] = 0
+
+show_animation = True
def main():
grid = Grid(
diff --git a/PathPlanning/TimeBasedPathPlanning/Node.py b/PathPlanning/TimeBasedPathPlanning/Node.py
new file mode 100644
index 0000000000..728eebb676
--- /dev/null
+++ b/PathPlanning/TimeBasedPathPlanning/Node.py
@@ -0,0 +1,99 @@
+from dataclasses import dataclass
+from functools import total_ordering
+import numpy as np
+from typing import Sequence
+
+@dataclass(order=True)
+class Position:
+ x: int
+ y: int
+
+ def as_ndarray(self) -> np.ndarray:
+ return np.array([self.x, self.y])
+
+ def __add__(self, other):
+ if isinstance(other, Position):
+ return Position(self.x + other.x, self.y + other.y)
+ raise NotImplementedError(
+ f"Addition not supported for Position and {type(other)}"
+ )
+
+ def __sub__(self, other):
+ if isinstance(other, Position):
+ return Position(self.x - other.x, self.y - other.y)
+ raise NotImplementedError(
+ f"Subtraction not supported for Position and {type(other)}"
+ )
+
+ def __hash__(self):
+ return hash((self.x, self.y))
+
+@dataclass()
+# Note: Total_ordering is used instead of adding `order=True` to the @dataclass decorator because
+# this class needs to override the __lt__ and __eq__ methods to ignore parent_index. Parent
+# index is just used to track the path found by the algorithm, and has no effect on the quality
+# of a node.
+@total_ordering
+class Node:
+ position: Position
+ time: int
+ heuristic: int
+ parent_index: int
+
+ """
+ This is what is used to drive node expansion. The node with the lowest value is expanded next.
+ This comparison prioritizes the node with the lowest cost-to-come (self.time) + cost-to-go (self.heuristic)
+ """
+ def __lt__(self, other: object):
+ if not isinstance(other, Node):
+ return NotImplementedError(f"Cannot compare Node with object of type: {type(other)}")
+ return (self.time + self.heuristic) < (other.time + other.heuristic)
+
+ """
+ Note: cost and heuristic are not included in eq or hash, since they will always be the same
+ for a given (position, time) pair. Including either cost or heuristic would be redundant.
+ """
+ def __eq__(self, other: object):
+ if not isinstance(other, Node):
+ return NotImplementedError(f"Cannot compare Node with object of type: {type(other)}")
+ return self.position == other.position and self.time == other.time
+
+ def __hash__(self):
+ return hash((self.position, self.time))
+
+class NodePath:
+ path: Sequence[Node]
+ positions_at_time: dict[int, Position]
+ # Number of nodes expanded while finding this path
+ expanded_node_count: int
+
+ def __init__(self, path: Sequence[Node], expanded_node_count: int):
+ self.path = path
+ self.expanded_node_count = expanded_node_count
+
+ self.positions_at_time = {}
+ for i, node in enumerate(path):
+ reservation_finish_time = node.time + 1
+ if i < len(path) - 1:
+ reservation_finish_time = path[i + 1].time
+
+ for t in range(node.time, reservation_finish_time):
+ self.positions_at_time[t] = node.position
+
+ """
+ Get the position of the path at a given time
+ """
+ def get_position(self, time: int) -> Position | None:
+ return self.positions_at_time.get(time)
+
+ """
+ Time stamp of the last node in the path
+ """
+ def goal_reached_time(self) -> int:
+ return self.path[-1].time
+
+ def __repr__(self):
+ repr_string = ""
+ for i, node in enumerate(self.path):
+ repr_string += f"{i}: {node}\n"
+ return repr_string
\ No newline at end of file
diff --git a/PathPlanning/TimeBasedPathPlanning/Plotting.py b/PathPlanning/TimeBasedPathPlanning/Plotting.py
new file mode 100644
index 0000000000..7cd1f615d8
--- /dev/null
+++ b/PathPlanning/TimeBasedPathPlanning/Plotting.py
@@ -0,0 +1,135 @@
+import numpy as np
+import matplotlib.pyplot as plt
+from matplotlib.backend_bases import KeyEvent
+from PathPlanning.TimeBasedPathPlanning.GridWithDynamicObstacles import (
+ Grid,
+ Position,
+)
+from PathPlanning.TimeBasedPathPlanning.BaseClasses import StartAndGoal
+from PathPlanning.TimeBasedPathPlanning.Node import NodePath
+
+'''
+Plot a single agent path.
+'''
+def PlotNodePath(grid: Grid, start: Position, goal: Position, path: NodePath):
+ fig = plt.figure(figsize=(10, 7))
+ ax = fig.add_subplot(
+ autoscale_on=False,
+ xlim=(0, grid.grid_size[0] - 1),
+ ylim=(0, grid.grid_size[1] - 1),
+ )
+ ax.set_aspect("equal")
+ ax.grid()
+ ax.set_xticks(np.arange(0, grid.grid_size[0], 1))
+ ax.set_yticks(np.arange(0, grid.grid_size[1], 1))
+
+ (start_and_goal,) = ax.plot([], [], "mD", ms=15, label="Start and Goal")
+ start_and_goal.set_data([start.x, goal.x], [start.y, goal.y])
+ (obs_points,) = ax.plot([], [], "ro", ms=15, label="Obstacles")
+ (path_points,) = ax.plot([], [], "bo", ms=10, label="Path Found")
+ ax.legend(bbox_to_anchor=(1.05, 1))
+
+ # for stopping simulation with the esc key.
+ plt.gcf().canvas.mpl_connect(
+ "key_release_event",
+ lambda event: [exit(0) if event.key == "escape" else None]
+ if isinstance(event, KeyEvent) else None
+ )
+
+ for i in range(0, path.goal_reached_time()):
+ obs_positions = grid.get_obstacle_positions_at_time(i)
+ obs_points.set_data(obs_positions[0], obs_positions[1])
+ path_position = path.get_position(i)
+ if not path_position:
+ raise Exception(f"Path position not found for time {i}.")
+
+ path_points.set_data([path_position.x], [path_position.y])
+ plt.pause(0.2)
+ plt.show()
+
+'''
+Plot a series of agent paths.
+'''
+def PlotNodePaths(grid: Grid, start_and_goals: list[StartAndGoal], paths: list[NodePath]):
+ fig = plt.figure(figsize=(10, 7))
+
+ ax = fig.add_subplot(
+ autoscale_on=False,
+ xlim=(0, grid.grid_size[0] - 1),
+ ylim=(0, grid.grid_size[1] - 1),
+ )
+ ax.set_aspect("equal")
+ ax.grid()
+ ax.set_xticks(np.arange(0, grid.grid_size[0], 1))
+ ax.set_yticks(np.arange(0, grid.grid_size[1], 1))
+
+ # Plot start and goal positions for each agent
+ colors = [] # generated randomly in loop
+ markers = ['D', 's', '^', 'o', 'p'] # Different markers for visual distinction
+
+ # Create plots for start and goal positions
+ start_and_goal_plots = []
+ for i, path in enumerate(paths):
+ marker_idx = i % len(markers)
+ agent_id = start_and_goals[i].index
+ start = start_and_goals[i].start
+ goal = start_and_goals[i].goal
+
+ color = np.random.rand(3,)
+ colors.append(color)
+ sg_plot, = ax.plot([], [], markers[marker_idx], c=color, ms=15,
+ label=f"Agent {agent_id} Start/Goal")
+ sg_plot.set_data([start.x, goal.x], [start.y, goal.y])
+ start_and_goal_plots.append(sg_plot)
+
+ # Plot for obstacles
+ (obs_points,) = ax.plot([], [], "ro", ms=15, label="Obstacles")
+
+ # Create plots for each agent's path
+ path_plots = []
+ for i, path in enumerate(paths):
+ agent_id = start_and_goals[i].index
+ path_plot, = ax.plot([], [], "o", c=colors[i], ms=10,
+ label=f"Agent {agent_id} Path")
+ path_plots.append(path_plot)
+
+ ax.legend(bbox_to_anchor=(1.05, 1))
+
+ # For stopping simulation with the esc key
+ plt.gcf().canvas.mpl_connect(
+ "key_release_event",
+ lambda event: [exit(0) if event.key == "escape" else None]
+ if isinstance(event, KeyEvent) else None
+ )
+
+ # Find the maximum time across all paths
+ max_time = max(path.goal_reached_time() for path in paths)
+
+ # Animation loop
+ for i in range(0, max_time + 1):
+ # Update obstacle positions
+ obs_positions = grid.get_obstacle_positions_at_time(i)
+ obs_points.set_data(obs_positions[0], obs_positions[1])
+
+ # Update each agent's position
+ for (j, path) in enumerate(paths):
+ path_positions = []
+ if i <= path.goal_reached_time():
+ res = path.get_position(i)
+ if not res:
+ print(path)
+ print(i)
+ path_position = path.get_position(i)
+ if not path_position:
+ raise Exception(f"Path position not found for time {i}.")
+
+ # Verify position is valid
+ assert not path_position in obs_positions
+ assert not path_position in path_positions
+ path_positions.append(path_position)
+
+ path_plots[j].set_data([path_position.x], [path_position.y])
+
+ plt.pause(0.2)
+
+ plt.show()
\ No newline at end of file
diff --git a/PathPlanning/TimeBasedPathPlanning/PriorityBasedPlanner.py b/PathPlanning/TimeBasedPathPlanning/PriorityBasedPlanner.py
new file mode 100644
index 0000000000..2573965cf6
--- /dev/null
+++ b/PathPlanning/TimeBasedPathPlanning/PriorityBasedPlanner.py
@@ -0,0 +1,95 @@
+"""
+Priority Based Planner for multi agent path planning.
+The planner generates an order to plan in, and generates plans for the robots in that order. Each planned
+path is reserved in the grid, and all future plans must avoid that path.
+
+Algorithm outlined in section III of this paper: https://pure.tudelft.nl/ws/portalfiles/portal/67074672/07138650.pdf
+"""
+
+import numpy as np
+from PathPlanning.TimeBasedPathPlanning.GridWithDynamicObstacles import (
+ Grid,
+ Interval,
+ ObstacleArrangement,
+ Position,
+)
+from PathPlanning.TimeBasedPathPlanning.BaseClasses import MultiAgentPlanner, StartAndGoal
+from PathPlanning.TimeBasedPathPlanning.Node import NodePath
+from PathPlanning.TimeBasedPathPlanning.BaseClasses import SingleAgentPlanner
+from PathPlanning.TimeBasedPathPlanning.SafeInterval import SafeIntervalPathPlanner
+from PathPlanning.TimeBasedPathPlanning.Plotting import PlotNodePaths
+import time
+
+class PriorityBasedPlanner(MultiAgentPlanner):
+
+ @staticmethod
+ def plan(grid: Grid, start_and_goals: list[StartAndGoal], single_agent_planner_class: SingleAgentPlanner, verbose: bool = False) -> tuple[list[StartAndGoal], list[NodePath]]:
+ """
+ Generate a path from the start to the goal for each agent in the `start_and_goals` list.
+ Returns the re-ordered StartAndGoal combinations, and a list of path plans. The order of the plans
+ corresponds to the order of the `start_and_goals` list.
+ """
+ print(f"Using single-agent planner: {single_agent_planner_class}")
+
+ # Reserve initial positions
+ for start_and_goal in start_and_goals:
+ grid.reserve_position(start_and_goal.start, start_and_goal.index, Interval(0, 10))
+
+ # Plan in descending order of distance from start to goal
+ start_and_goals = sorted(start_and_goals,
+ key=lambda item: item.distance_start_to_goal(),
+ reverse=True)
+
+ paths = []
+ for start_and_goal in start_and_goals:
+ if verbose:
+ print(f"\nPlanning for agent: {start_and_goal}" )
+
+ grid.clear_initial_reservation(start_and_goal.start, start_and_goal.index)
+ path = single_agent_planner_class.plan(grid, start_and_goal.start, start_and_goal.goal, verbose)
+
+ if path is None:
+ print(f"Failed to find path for {start_and_goal}")
+ return []
+
+ agent_index = start_and_goal.index
+ grid.reserve_path(path, agent_index)
+ paths.append(path)
+
+ return (start_and_goals, paths)
+
+verbose = False
+show_animation = True
+def main():
+ grid_side_length = 21
+
+ start_and_goals = [StartAndGoal(i, Position(1, i), Position(19, 19-i)) for i in range(1, 16)]
+ obstacle_avoid_points = [pos for item in start_and_goals for pos in (item.start, item.goal)]
+
+ grid = Grid(
+ np.array([grid_side_length, grid_side_length]),
+ num_obstacles=250,
+ obstacle_avoid_points=obstacle_avoid_points,
+ # obstacle_arrangement=ObstacleArrangement.NARROW_CORRIDOR,
+ obstacle_arrangement=ObstacleArrangement.ARRANGEMENT1,
+ # obstacle_arrangement=ObstacleArrangement.RANDOM,
+ )
+
+ start_time = time.time()
+ start_and_goals, paths = PriorityBasedPlanner.plan(grid, start_and_goals, SafeIntervalPathPlanner, verbose)
+
+ runtime = time.time() - start_time
+ print(f"\nPlanning took: {runtime:.5f} seconds")
+
+ if verbose:
+ print(f"Paths:")
+ for path in paths:
+ print(f"{path}\n")
+
+ if not show_animation:
+ return
+
+ PlotNodePaths(grid, start_and_goals, paths)
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/PathPlanning/TimeBasedPathPlanning/SafeInterval.py b/PathPlanning/TimeBasedPathPlanning/SafeInterval.py
new file mode 100644
index 0000000000..446847ac6d
--- /dev/null
+++ b/PathPlanning/TimeBasedPathPlanning/SafeInterval.py
@@ -0,0 +1,212 @@
+"""
+Safe interval path planner
+ This script implements a safe-interval path planner for a 2d grid with dynamic obstacles. It is faster than
+ SpaceTime A* because it reduces the number of redundant node expansions by pre-computing regions of adjacent
+ time steps that are safe ("safe intervals") at each position. This allows the algorithm to skip expanding nodes
+ that are in intervals that have already been visited earlier.
+
+ Reference: https://www.cs.cmu.edu/~maxim/files/sipp_icra11.pdf
+"""
+
+import numpy as np
+from PathPlanning.TimeBasedPathPlanning.GridWithDynamicObstacles import (
+ Grid,
+ Interval,
+ ObstacleArrangement,
+ Position,
+ empty_2d_array_of_lists,
+)
+from PathPlanning.TimeBasedPathPlanning.BaseClasses import SingleAgentPlanner
+from PathPlanning.TimeBasedPathPlanning.Node import Node, NodePath
+from PathPlanning.TimeBasedPathPlanning.Plotting import PlotNodePath
+
+import heapq
+from dataclasses import dataclass
+from functools import total_ordering
+import time
+
+@dataclass()
+# Note: Total_ordering is used instead of adding `order=True` to the @dataclass decorator because
+# this class needs to override the __lt__ and __eq__ methods to ignore parent_index. The Parent
+# index and interval member variables are just used to track the path found by the algorithm,
+# and has no effect on the quality of a node.
+@total_ordering
+class SIPPNode(Node):
+ interval: Interval
+
+@dataclass
+class EntryTimeAndInterval:
+ entry_time: int
+ interval: Interval
+
+class SafeIntervalPathPlanner(SingleAgentPlanner):
+
+ """
+ Generate a plan given the loaded problem statement. Raises an exception if it fails to find a path.
+ Arguments:
+ verbose (bool): set to True to print debug information
+ """
+ @staticmethod
+ def plan(grid: Grid, start: Position, goal: Position, verbose: bool = False) -> NodePath:
+
+ safe_intervals = grid.get_safe_intervals()
+
+ open_set: list[SIPPNode] = []
+ first_node_interval = safe_intervals[start.x, start.y][0]
+ heapq.heappush(
+ open_set, SIPPNode(start, 0, SafeIntervalPathPlanner.calculate_heuristic(start, goal), -1, first_node_interval)
+ )
+
+ expanded_list: list[SIPPNode] = []
+ visited_intervals = empty_2d_array_of_lists(grid.grid_size[0], grid.grid_size[1])
+ while open_set:
+ expanded_node: SIPPNode = heapq.heappop(open_set)
+ if verbose:
+ print("Expanded node:", expanded_node)
+
+ if expanded_node.time + 1 >= grid.time_limit:
+ if verbose:
+ print(f"\tSkipping node that is past time limit: {expanded_node}")
+ continue
+
+ if expanded_node.position == goal:
+ if verbose:
+ print(f"Found path to goal after {len(expanded_list)} expansions")
+
+ path = []
+ path_walker: SIPPNode = expanded_node
+ while True:
+ path.append(path_walker)
+ if path_walker.parent_index == -1:
+ break
+ path_walker = expanded_list[path_walker.parent_index]
+
+ # reverse path so it goes start -> goal
+ path.reverse()
+ return NodePath(path, len(expanded_list))
+
+ expanded_idx = len(expanded_list)
+ expanded_list.append(expanded_node)
+ entry_time_and_node = EntryTimeAndInterval(expanded_node.time, expanded_node.interval)
+ add_entry_to_visited_intervals_array(entry_time_and_node, visited_intervals, expanded_node)
+
+ for child in SafeIntervalPathPlanner.generate_successors(grid, goal, expanded_node, expanded_idx, safe_intervals, visited_intervals):
+ heapq.heappush(open_set, child)
+
+ raise Exception("No path found")
+
+ """
+ Generate list of possible successors of the provided `parent_node` that are worth expanding
+ """
+ @staticmethod
+ def generate_successors(
+ grid: Grid, goal: Position, parent_node: SIPPNode, parent_node_idx: int, intervals: np.ndarray, visited_intervals: np.ndarray
+ ) -> list[SIPPNode]:
+ new_nodes = []
+ diffs = [
+ Position(0, 0),
+ Position(1, 0),
+ Position(-1, 0),
+ Position(0, 1),
+ Position(0, -1),
+ ]
+ for diff in diffs:
+ new_pos = parent_node.position + diff
+ if not grid.inside_grid_bounds(new_pos):
+ continue
+
+ current_interval = parent_node.interval
+
+ new_cell_intervals: list[Interval] = intervals[new_pos.x, new_pos.y]
+ for interval in new_cell_intervals:
+ # if interval starts after current ends, break
+ # assumption: intervals are sorted by start time, so all future intervals will hit this condition as well
+ if interval.start_time > current_interval.end_time:
+ break
+
+ # if interval ends before current starts, skip
+ if interval.end_time < current_interval.start_time:
+ continue
+
+ # if we have already expanded a node in this interval with a <= starting time, skip
+ better_node_expanded = False
+ for visited in visited_intervals[new_pos.x, new_pos.y]:
+ if interval == visited.interval and visited.entry_time <= parent_node.time + 1:
+ better_node_expanded = True
+ break
+ if better_node_expanded:
+ continue
+
+ # We know there is a node worth expanding. Generate successor at the earliest possible time the
+ # new interval can be entered
+ for possible_t in range(max(parent_node.time + 1, interval.start_time), min(current_interval.end_time, interval.end_time)):
+ if grid.valid_position(new_pos, possible_t):
+ new_nodes.append(SIPPNode(
+ new_pos,
+ # entry is max of interval start and parent node time + 1 (get there as soon as possible)
+ max(interval.start_time, parent_node.time + 1),
+ SafeIntervalPathPlanner.calculate_heuristic(new_pos, goal),
+ parent_node_idx,
+ interval,
+ ))
+ # break because all t's after this will make nodes with a higher cost, the same heuristic, and are in the same interval
+ break
+
+ return new_nodes
+
+ """
+ Calculate the heuristic for a given position - Manhattan distance to the goal
+ """
+ @staticmethod
+ def calculate_heuristic(position: Position, goal: Position) -> int:
+ diff = goal - position
+ return abs(diff.x) + abs(diff.y)
+
+
+"""
+Adds a new entry to the visited intervals array. If the entry is already present, the entry time is updated if the new
+entry time is better. Otherwise, the entry is added to `visited_intervals` at the position of `expanded_node`.
+"""
+def add_entry_to_visited_intervals_array(entry_time_and_interval: EntryTimeAndInterval, visited_intervals: np.ndarray, expanded_node: SIPPNode):
+ # if entry is present, update entry time if better
+ for existing_entry_and_interval in visited_intervals[expanded_node.position.x, expanded_node.position.y]:
+ if existing_entry_and_interval.interval == entry_time_and_interval.interval:
+ existing_entry_and_interval.entry_time = min(existing_entry_and_interval.entry_time, entry_time_and_interval.entry_time)
+
+ # Otherwise, append
+ visited_intervals[expanded_node.position.x, expanded_node.position.y].append(entry_time_and_interval)
+
+
+show_animation = True
+verbose = False
+
+def main():
+ start = Position(1, 18)
+ goal = Position(19, 19)
+ grid_side_length = 21
+
+ start_time = time.time()
+
+ grid = Grid(
+ np.array([grid_side_length, grid_side_length]),
+ num_obstacles=250,
+ obstacle_avoid_points=[start, goal],
+ obstacle_arrangement=ObstacleArrangement.ARRANGEMENT1,
+ # obstacle_arrangement=ObstacleArrangement.RANDOM,
+ )
+
+ path = SafeIntervalPathPlanner.plan(grid, start, goal, verbose)
+ runtime = time.time() - start_time
+ print(f"Planning took: {runtime:.5f} seconds")
+
+ if verbose:
+ print(f"Path: {path}")
+
+ if not show_animation:
+ return
+
+ PlotNodePath(grid, start, goal, path)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/PathPlanning/TimeBasedPathPlanning/SpaceTimeAStar.py b/PathPlanning/TimeBasedPathPlanning/SpaceTimeAStar.py
index 3b3613d695..b85569f5dc 100644
--- a/PathPlanning/TimeBasedPathPlanning/SpaceTimeAStar.py
+++ b/PathPlanning/TimeBasedPathPlanning/SpaceTimeAStar.py
@@ -3,130 +3,66 @@
This script demonstrates the Space-time A* algorithm for path planning in a grid world with moving obstacles.
This algorithm is different from normal 2D A* in one key way - the cost (often notated as g(n)) is
the number of time steps it took to get to a given node, instead of the number of cells it has
- traversed. This ensures the path is time-optimal, while respescting any dynamic obstacles in the environment.
+ traversed. This ensures the path is time-optimal, while respecting any dynamic obstacles in the environment.
Reference: https://www.davidsilver.uk/wp-content/uploads/2020/03/coop-path-AIWisdom.pdf
"""
import numpy as np
-import matplotlib.pyplot as plt
from PathPlanning.TimeBasedPathPlanning.GridWithDynamicObstacles import (
Grid,
ObstacleArrangement,
Position,
)
+from PathPlanning.TimeBasedPathPlanning.Node import Node, NodePath
import heapq
from collections.abc import Generator
-import random
-from dataclasses import dataclass
-from functools import total_ordering
-
-
-# Seed randomness for reproducibility
-RANDOM_SEED = 50
-random.seed(RANDOM_SEED)
-np.random.seed(RANDOM_SEED)
-
-@dataclass()
-# Note: Total_ordering is used instead of adding `order=True` to the @dataclass decorator because
-# this class needs to override the __lt__ and __eq__ methods to ignore parent_index. Parent
-# index is just used to track the path found by the algorithm, and has no effect on the quality
-# of a node.
-@total_ordering
-class Node:
- position: Position
- time: int
- heuristic: int
- parent_index: int
-
- """
- This is what is used to drive node expansion. The node with the lowest value is expanded next.
- This comparison prioritizes the node with the lowest cost-to-come (self.time) + cost-to-go (self.heuristic)
- """
- def __lt__(self, other: object):
- if not isinstance(other, Node):
- return NotImplementedError(f"Cannot compare Node with object of type: {type(other)}")
- return (self.time + self.heuristic) < (other.time + other.heuristic)
-
- def __eq__(self, other: object):
- if not isinstance(other, Node):
- return NotImplementedError(f"Cannot compare Node with object of type: {type(other)}")
- return self.position == other.position and self.time == other.time
-
-
-class NodePath:
- path: list[Node]
- positions_at_time: dict[int, Position] = {}
-
- def __init__(self, path: list[Node]):
- self.path = path
- for node in path:
- self.positions_at_time[node.time] = node.position
-
- """
- Get the position of the path at a given time
- """
- def get_position(self, time: int) -> Position | None:
- return self.positions_at_time.get(time)
-
- """
- Time stamp of the last node in the path
- """
- def goal_reached_time(self) -> int:
- return self.path[-1].time
-
- def __repr__(self):
- repr_string = ""
- for i, node in enumerate(self.path):
- repr_string += f"{i}: {node}\n"
- return repr_string
+import time
+from PathPlanning.TimeBasedPathPlanning.BaseClasses import SingleAgentPlanner
+from PathPlanning.TimeBasedPathPlanning.Plotting import PlotNodePath
+class SpaceTimeAStar(SingleAgentPlanner):
-class SpaceTimeAStar:
- grid: Grid
- start: Position
- goal: Position
-
- def __init__(self, grid: Grid, start: Position, goal: Position):
- self.grid = grid
- self.start = start
- self.goal = goal
-
- def plan(self, verbose: bool = False) -> NodePath:
+ @staticmethod
+ def plan(grid: Grid, start: Position, goal: Position, verbose: bool = False) -> NodePath:
open_set: list[Node] = []
heapq.heappush(
- open_set, Node(self.start, 0, self.calculate_heuristic(self.start), -1)
+ open_set, Node(start, 0, SpaceTimeAStar.calculate_heuristic(start, goal), -1)
)
- expanded_set: list[Node] = []
+ expanded_list: list[Node] = []
+ expanded_set: set[Node] = set()
while open_set:
expanded_node: Node = heapq.heappop(open_set)
if verbose:
print("Expanded node:", expanded_node)
- if expanded_node.time + 1 >= self.grid.time_limit:
+ if expanded_node.time + 1 >= grid.time_limit:
if verbose:
print(f"\tSkipping node that is past time limit: {expanded_node}")
continue
- if expanded_node.position == self.goal:
- print(f"Found path to goal after {len(expanded_set)} expansions")
+ if expanded_node.position == goal:
+ if verbose:
+ print(f"Found path to goal after {len(expanded_list)} expansions")
+
path = []
path_walker: Node = expanded_node
while True:
path.append(path_walker)
if path_walker.parent_index == -1:
break
- path_walker = expanded_set[path_walker.parent_index]
+ path_walker = expanded_list[path_walker.parent_index]
# reverse path so it goes start -> goal
path.reverse()
- return NodePath(path)
+ return NodePath(path, len(expanded_set))
- expanded_idx = len(expanded_set)
- expanded_set.append(expanded_node)
+ expanded_idx = len(expanded_list)
+ expanded_list.append(expanded_node)
+ expanded_set.add(expanded_node)
- for child in self.generate_successors(expanded_node, expanded_idx, verbose):
+ for child in SpaceTimeAStar.generate_successors(grid, goal, expanded_node, expanded_idx, verbose, expanded_set):
heapq.heappush(open_set, child)
raise Exception("No path found")
@@ -134,8 +70,9 @@ def plan(self, verbose: bool = False) -> NodePath:
"""
Generate possible successors of the provided `parent_node`
"""
+ @staticmethod
def generate_successors(
- self, parent_node: Node, parent_node_idx: int, verbose: bool
+ grid: Grid, goal: Position, parent_node: Node, parent_node_idx: int, verbose: bool, expanded_set: set[Node]
) -> Generator[Node, None, None]:
diffs = [
Position(0, 0),
@@ -146,19 +83,25 @@ def generate_successors(
]
for diff in diffs:
new_pos = parent_node.position + diff
- if self.grid.valid_position(new_pos, parent_node.time + 1):
- new_node = Node(
- new_pos,
- parent_node.time + 1,
- self.calculate_heuristic(new_pos),
- parent_node_idx,
- )
+ new_node = Node(
+ new_pos,
+ parent_node.time + 1,
+ SpaceTimeAStar.calculate_heuristic(new_pos, goal),
+ parent_node_idx,
+ )
+
+ if new_node in expanded_set:
+ continue
+
+ # Check if the new node is valid for the next 2 time steps - one step to enter, and another to leave
+ if all([grid.valid_position(new_pos, parent_node.time + dt) for dt in [1, 2]]):
if verbose:
print("\tNew successor node: ", new_node)
yield new_node
- def calculate_heuristic(self, position) -> int:
- diff = self.goal - position
+ @staticmethod
+ def calculate_heuristic(position: Position, goal: Position) -> int:
+ diff = goal - position
return abs(diff.x) + abs(diff.y)
@@ -166,9 +109,12 @@ def calculate_heuristic(self, position) -> int:
verbose = False
def main():
- start = Position(1, 11)
+ start = Position(1, 5)
goal = Position(19, 19)
grid_side_length = 21
+
+ start_time = time.time()
+
grid = Grid(
np.array([grid_side_length, grid_side_length]),
num_obstacles=40,
@@ -176,8 +122,10 @@ def main():
obstacle_arrangement=ObstacleArrangement.ARRANGEMENT1,
)
- planner = SpaceTimeAStar(grid, start, goal)
- path = planner.plan(verbose)
+ path = SpaceTimeAStar.plan(grid, start, goal, verbose)
+
+ runtime = time.time() - start_time
+ print(f"Planning took: {runtime:.5f} seconds")
if verbose:
print(f"Path: {path}")
@@ -185,36 +133,7 @@ def main():
if not show_animation:
return
- fig = plt.figure(figsize=(10, 7))
- ax = fig.add_subplot(
- autoscale_on=False,
- xlim=(0, grid.grid_size[0] - 1),
- ylim=(0, grid.grid_size[1] - 1),
- )
- ax.set_aspect("equal")
- ax.grid()
- ax.set_xticks(np.arange(0, grid_side_length, 1))
- ax.set_yticks(np.arange(0, grid_side_length, 1))
-
- (start_and_goal,) = ax.plot([], [], "mD", ms=15, label="Start and Goal")
- start_and_goal.set_data([start.x, goal.x], [start.y, goal.y])
- (obs_points,) = ax.plot([], [], "ro", ms=15, label="Obstacles")
- (path_points,) = ax.plot([], [], "bo", ms=10, label="Path Found")
- ax.legend(bbox_to_anchor=(1.05, 1))
-
- # for stopping simulation with the esc key.
- plt.gcf().canvas.mpl_connect(
- "key_release_event", lambda event: [exit(0) if event.key == "escape" else None]
- )
-
- for i in range(0, path.goal_reached_time()):
- obs_positions = grid.get_obstacle_positions_at_time(i)
- obs_points.set_data(obs_positions[0], obs_positions[1])
- path_position = path.get_position(i)
- path_points.set_data([path_position.x], [path_position.y])
- plt.pause(0.2)
- plt.show()
-
+ PlotNodePath(grid, start, goal, path)
if __name__ == "__main__":
main()
diff --git a/PathTracking/cgmres_nmpc/cgmres_nmpc.py b/PathTracking/cgmres_nmpc/cgmres_nmpc.py
index a582c9da81..ee40e73504 100644
--- a/PathTracking/cgmres_nmpc/cgmres_nmpc.py
+++ b/PathTracking/cgmres_nmpc/cgmres_nmpc.py
@@ -4,7 +4,7 @@
author Atsushi Sakai (@Atsushi_twi)
-Ref:
+Reference:
Shunichi09/nonlinear_control: Implementing the nonlinear model predictive
control, sliding mode control https://github.com/Shunichi09/PythonLinearNonlinearControl
diff --git a/PathTracking/lqr_speed_steer_control/lqr_speed_steer_control.py b/PathTracking/lqr_speed_steer_control/lqr_speed_steer_control.py
index d85fa98a84..5831d02d30 100644
--- a/PathTracking/lqr_speed_steer_control/lqr_speed_steer_control.py
+++ b/PathTracking/lqr_speed_steer_control/lqr_speed_steer_control.py
@@ -11,9 +11,9 @@
import numpy as np
import scipy.linalg as la
import pathlib
-from utils.angle import angle_mod
-sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
+sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
+from utils.angle import angle_mod
from PathPlanning.CubicSpline import cubic_spline_planner
# === Parameters =====
diff --git a/PathTracking/lqr_steer_control/lqr_steer_control.py b/PathTracking/lqr_steer_control/lqr_steer_control.py
index 461d6e3722..3c066917ff 100644
--- a/PathTracking/lqr_steer_control/lqr_steer_control.py
+++ b/PathTracking/lqr_steer_control/lqr_steer_control.py
@@ -11,9 +11,9 @@
import numpy as np
import sys
import pathlib
-from utils.angle import angle_mod
-sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
+sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
+from utils.angle import angle_mod
from PathPlanning.CubicSpline import cubic_spline_planner
Kp = 1.0 # speed proportional gain
diff --git a/PathTracking/model_predictive_speed_and_steer_control/model_predictive_speed_and_steer_control.py b/PathTracking/model_predictive_speed_and_steer_control/model_predictive_speed_and_steer_control.py
index eb2d7b6a73..205f37e62f 100644
--- a/PathTracking/model_predictive_speed_and_steer_control/model_predictive_speed_and_steer_control.py
+++ b/PathTracking/model_predictive_speed_and_steer_control/model_predictive_speed_and_steer_control.py
@@ -318,7 +318,7 @@ def calc_ref_trajectory(state, cx, cy, cyaw, ck, sp, dl, pind):
travel = 0.0
- for i in range(T + 1):
+ for i in range(1, T + 1):
travel += abs(state.v) * DT
dind = int(round(travel / dl))
diff --git a/PathTracking/move_to_pose/move_to_pose.py b/PathTracking/move_to_pose/move_to_pose.py
index 34736a2e21..faf1264953 100644
--- a/PathTracking/move_to_pose/move_to_pose.py
+++ b/PathTracking/move_to_pose/move_to_pose.py
@@ -76,7 +76,7 @@ def calc_control_command(self, x_diff, y_diff, theta, theta_goal):
# [-pi, pi] to prevent unstable behavior e.g. difference going
# from 0 rad to 2*pi rad with slight turn
- # Ref: The velocity v always has a constant sign which depends on the initial value of α.
+ # The velocity v always has a constant sign which depends on the initial value of α.
rho = np.hypot(x_diff, y_diff)
v = self.Kp_rho * rho
diff --git a/PathTracking/pure_pursuit/pure_pursuit.py b/PathTracking/pure_pursuit/pure_pursuit.py
index ff995033a9..48453927ab 100644
--- a/PathTracking/pure_pursuit/pure_pursuit.py
+++ b/PathTracking/pure_pursuit/pure_pursuit.py
@@ -10,6 +10,11 @@
import math
import matplotlib.pyplot as plt
+import sys
+import pathlib
+sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
+from utils.angle import angle_mod
+
# Parameters
k = 0.1 # look forward gain
Lfc = 2.0 # [m] look-ahead distance
@@ -17,26 +22,37 @@
dt = 0.1 # [s] time tick
WB = 2.9 # [m] wheel base of vehicle
-show_animation = True
+# Vehicle parameters
+LENGTH = WB + 1.0 # Vehicle length
+WIDTH = 2.0 # Vehicle width
+WHEEL_LEN = 0.6 # Wheel length
+WHEEL_WIDTH = 0.2 # Wheel width
+MAX_STEER = math.pi / 4 # Maximum steering angle [rad]
-class State:
- def __init__(self, x=0.0, y=0.0, yaw=0.0, v=0.0):
+show_animation = True
+pause_simulation = False # Flag for pause simulation
+is_reverse_mode = False # Flag for reverse driving mode
+
+class State:
+ def __init__(self, x=0.0, y=0.0, yaw=0.0, v=0.0, is_reverse=False):
self.x = x
self.y = y
self.yaw = yaw
self.v = v
- self.rear_x = self.x - ((WB / 2) * math.cos(self.yaw))
- self.rear_y = self.y - ((WB / 2) * math.sin(self.yaw))
+ self.direction = -1 if is_reverse else 1 # Direction based on reverse flag
+ self.rear_x = self.x - self.direction * ((WB / 2) * math.cos(self.yaw))
+ self.rear_y = self.y - self.direction * ((WB / 2) * math.sin(self.yaw))
def update(self, a, delta):
self.x += self.v * math.cos(self.yaw) * dt
self.y += self.v * math.sin(self.yaw) * dt
- self.yaw += self.v / WB * math.tan(delta) * dt
+ self.yaw += self.direction * self.v / WB * math.tan(delta) * dt
+ self.yaw = angle_mod(self.yaw)
self.v += a * dt
- self.rear_x = self.x - ((WB / 2) * math.cos(self.yaw))
- self.rear_y = self.y - ((WB / 2) * math.sin(self.yaw))
+ self.rear_x = self.x - self.direction * ((WB / 2) * math.cos(self.yaw))
+ self.rear_y = self.y - self.direction * ((WB / 2) * math.sin(self.yaw))
def calc_distance(self, point_x, point_y):
dx = self.rear_x - point_x
@@ -51,6 +67,7 @@ def __init__(self):
self.y = []
self.yaw = []
self.v = []
+ self.direction = []
self.t = []
def append(self, t, state):
@@ -58,6 +75,7 @@ def append(self, t, state):
self.y.append(state.y)
self.yaw.append(state.yaw)
self.v.append(state.v)
+ self.direction.append(state.direction)
self.t.append(t)
@@ -117,14 +135,18 @@ def pure_pursuit_steer_control(state, trajectory, pind):
if ind < len(trajectory.cx):
tx = trajectory.cx[ind]
ty = trajectory.cy[ind]
- else: # toward goal
+ else:
tx = trajectory.cx[-1]
ty = trajectory.cy[-1]
ind = len(trajectory.cx) - 1
alpha = math.atan2(ty - state.rear_y, tx - state.rear_x) - state.yaw
- delta = math.atan2(2.0 * WB * math.sin(alpha) / Lf, 1.0)
+ # Reverse steering angle when reversing
+ delta = state.direction * math.atan2(2.0 * WB * math.sin(alpha) / Lf, 1.0)
+
+ # Limit steering angle to max value
+ delta = np.clip(delta, -MAX_STEER, MAX_STEER)
return delta, ind
@@ -142,10 +164,111 @@ def plot_arrow(x, y, yaw, length=1.0, width=0.5, fc="r", ec="k"):
fc=fc, ec=ec, head_width=width, head_length=width)
plt.plot(x, y)
+def plot_vehicle(x, y, yaw, steer=0.0, color='blue', is_reverse=False):
+ """
+ Plot vehicle model with four wheels
+ Args:
+ x, y: Vehicle center position
+ yaw: Vehicle heading angle
+ steer: Steering angle
+ color: Vehicle color
+ is_reverse: Flag for reverse mode
+ """
+ # Adjust heading angle in reverse mode
+ if is_reverse:
+ yaw = angle_mod(yaw + math.pi) # Rotate heading by 180 degrees
+ steer = -steer # Reverse steering direction
+
+ def plot_wheel(x, y, yaw, steer=0.0, color=color):
+ """Plot single wheel"""
+ wheel = np.array([
+ [-WHEEL_LEN/2, WHEEL_WIDTH/2],
+ [WHEEL_LEN/2, WHEEL_WIDTH/2],
+ [WHEEL_LEN/2, -WHEEL_WIDTH/2],
+ [-WHEEL_LEN/2, -WHEEL_WIDTH/2],
+ [-WHEEL_LEN/2, WHEEL_WIDTH/2]
+ ])
+
+ # Rotate wheel if steering
+ if steer != 0:
+ c, s = np.cos(steer), np.sin(steer)
+ rot_steer = np.array([[c, -s], [s, c]])
+ wheel = wheel @ rot_steer.T
+
+ # Apply vehicle heading rotation
+ c, s = np.cos(yaw), np.sin(yaw)
+ rot_yaw = np.array([[c, -s], [s, c]])
+ wheel = wheel @ rot_yaw.T
+
+ # Translate to position
+ wheel[:, 0] += x
+ wheel[:, 1] += y
+
+ # Plot wheel with color
+ plt.plot(wheel[:, 0], wheel[:, 1], color=color)
+
+ # Calculate vehicle body corners
+ corners = np.array([
+ [-LENGTH/2, WIDTH/2],
+ [LENGTH/2, WIDTH/2],
+ [LENGTH/2, -WIDTH/2],
+ [-LENGTH/2, -WIDTH/2],
+ [-LENGTH/2, WIDTH/2]
+ ])
+
+ # Rotation matrix
+ c, s = np.cos(yaw), np.sin(yaw)
+ Rot = np.array([[c, -s], [s, c]])
+
+ # Rotate and translate vehicle body
+ rotated = corners @ Rot.T
+ rotated[:, 0] += x
+ rotated[:, 1] += y
+
+ # Plot vehicle body
+ plt.plot(rotated[:, 0], rotated[:, 1], color=color)
+
+ # Plot wheels (darker color for front wheels)
+ front_color = 'darkblue'
+ rear_color = color
+
+ # Plot four wheels
+ # Front left
+ plot_wheel(x + LENGTH/4 * c - WIDTH/2 * s,
+ y + LENGTH/4 * s + WIDTH/2 * c,
+ yaw, steer, front_color)
+ # Front right
+ plot_wheel(x + LENGTH/4 * c + WIDTH/2 * s,
+ y + LENGTH/4 * s - WIDTH/2 * c,
+ yaw, steer, front_color)
+ # Rear left
+ plot_wheel(x - LENGTH/4 * c - WIDTH/2 * s,
+ y - LENGTH/4 * s + WIDTH/2 * c,
+ yaw, color=rear_color)
+ # Rear right
+ plot_wheel(x - LENGTH/4 * c + WIDTH/2 * s,
+ y - LENGTH/4 * s - WIDTH/2 * c,
+ yaw, color=rear_color)
+
+ # Add direction arrow
+ arrow_length = LENGTH/3
+ plt.arrow(x, y,
+ -arrow_length * math.cos(yaw) if is_reverse else arrow_length * math.cos(yaw),
+ -arrow_length * math.sin(yaw) if is_reverse else arrow_length * math.sin(yaw),
+ head_width=WIDTH/4, head_length=WIDTH/4,
+ fc='r', ec='r', alpha=0.5)
+
+# Keyboard event handler
+def on_key(event):
+ global pause_simulation
+ if event.key == ' ': # Space key
+ pause_simulation = not pause_simulation
+ elif event.key == 'escape':
+ exit(0)
def main():
# target course
- cx = np.arange(0, 50, 0.5)
+ cx = -1 * np.arange(0, 50, 0.5) if is_reverse_mode else np.arange(0, 50, 0.5)
cy = [math.sin(ix / 5.0) * ix / 2.0 for ix in cx]
target_speed = 10.0 / 3.6 # [m/s]
@@ -153,7 +276,7 @@ def main():
T = 100.0 # max simulation time
# initial state
- state = State(x=-0.0, y=-3.0, yaw=0.0, v=0.0)
+ state = State(x=-0.0, y=-3.0, yaw=math.pi if is_reverse_mode else 0.0, v=0.0, is_reverse=is_reverse_mode)
lastIndex = len(cx) - 1
time = 0.0
@@ -173,22 +296,33 @@ def main():
time += dt
states.append(time, state)
-
if show_animation: # pragma: no cover
plt.cla()
# for stopping simulation with the esc key.
- plt.gcf().canvas.mpl_connect(
- 'key_release_event',
- lambda event: [exit(0) if event.key == 'escape' else None])
- plot_arrow(state.x, state.y, state.yaw)
+ plt.gcf().canvas.mpl_connect('key_release_event', on_key)
+ # Pass is_reverse parameter
+ plot_vehicle(state.x, state.y, state.yaw, di, is_reverse=is_reverse_mode)
plt.plot(cx, cy, "-r", label="course")
plt.plot(states.x, states.y, "-b", label="trajectory")
plt.plot(cx[target_ind], cy[target_ind], "xg", label="target")
plt.axis("equal")
plt.grid(True)
plt.title("Speed[km/h]:" + str(state.v * 3.6)[:4])
+ plt.legend() # Add legend display
+
+ # Add pause state display
+ if pause_simulation:
+ plt.text(0.02, 0.95, 'PAUSED', transform=plt.gca().transAxes,
+ bbox=dict(facecolor='red', alpha=0.5))
+
plt.pause(0.001)
+ # Handle pause state
+ while pause_simulation:
+ plt.pause(0.1) # Reduce CPU usage
+ if not plt.get_fignums(): # Check if window is closed
+ exit(0)
+
# Test
assert lastIndex >= target_ind, "Cannot goal"
diff --git a/PathTracking/rear_wheel_feedback/rear_wheel_feedback.py b/PathTracking/rear_wheel_feedback_control/rear_wheel_feedback_control.py
similarity index 98%
rename from PathTracking/rear_wheel_feedback/rear_wheel_feedback.py
rename to PathTracking/rear_wheel_feedback_control/rear_wheel_feedback_control.py
index 1702bd47dd..fd04fb6d17 100644
--- a/PathTracking/rear_wheel_feedback/rear_wheel_feedback.py
+++ b/PathTracking/rear_wheel_feedback_control/rear_wheel_feedback_control.py
@@ -8,11 +8,14 @@
import matplotlib.pyplot as plt
import math
import numpy as np
-from utils.angle import angle_mod
-
+import sys
+import pathlib
from scipy import interpolate
from scipy import optimize
+sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
+from utils.angle import angle_mod
+
Kp = 1.0 # speed proportional gain
# steering control parameter
KTH = 1.0
diff --git a/PathTracking/stanley_controller/stanley_controller.py b/PathTracking/stanley_control/stanley_control.py
similarity index 99%
rename from PathTracking/stanley_controller/stanley_controller.py
rename to PathTracking/stanley_control/stanley_control.py
index 08aa049395..01c2ec0229 100644
--- a/PathTracking/stanley_controller/stanley_controller.py
+++ b/PathTracking/stanley_control/stanley_control.py
@@ -4,7 +4,7 @@
author: Atsushi Sakai (@Atsushi_twi)
-Ref:
+Reference:
- [Stanley: The robot that won the DARPA grand challenge](http://isl.ecst.csuchico.edu/DOCS/darpa2005/DARPA%202005%20Stanley.pdf)
- [Autonomous Automobile Path Tracking](https://www.ri.cmu.edu/pub_files/2009/2/Automatic_Steering_Methods_for_Autonomous_Automobile_Path_Tracking.pdf)
@@ -13,9 +13,9 @@
import matplotlib.pyplot as plt
import sys
import pathlib
-from utils.angle import angle_mod
-sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
+sys.path.append(str(pathlib.Path(__file__).parent.parent.parent))
+from utils.angle import angle_mod
from PathPlanning.CubicSpline import cubic_spline_planner
k = 0.5 # control gain
diff --git a/README.md b/README.md
index 9e605435ce..65445fa4ce 100644
--- a/README.md
+++ b/README.md
@@ -36,6 +36,7 @@ Python codes and [textbook](https://atsushisakai.github.io/PythonRobotics/index.
* [D* Lite algorithm](#d-lite-algorithm)
* [Potential Field algorithm](#potential-field-algorithm)
* [Grid based coverage path planning](#grid-based-coverage-path-planning)
+ * [Particle Swarm Optimization (PSO)](#particle-swarm-optimization-pso)
* [State Lattice Planning](#state-lattice-planning)
* [Biased polar sampling](#biased-polar-sampling)
* [Lane sampling](#lane-sampling)
@@ -87,7 +88,7 @@ Features:
See this documentation
-- [Getting Started — PythonRobotics documentation](https://atsushisakai.github.io/PythonRobotics/getting_started.html#what-is-pythonrobotics)
+- [Getting Started — PythonRobotics documentation](https://atsushisakai.github.io/PythonRobotics/modules/0_getting_started/1_what_is_python_robotics.html)
or this Youtube video:
@@ -102,7 +103,7 @@ or this paper for more details:
For running each sample code:
-- [Python 3.12.x](https://www.python.org/)
+- [Python 3.13.x](https://www.python.org/)
- [NumPy](https://numpy.org/)
@@ -168,9 +169,9 @@ All animation gifs are stored here: [AtsushiSakai/PythonRoboticsGifs: Animation
-Ref:
+Reference
-- [documentation](https://atsushisakai.github.io/PythonRobotics/modules/localization/extended_kalman_filter_localization_files/extended_kalman_filter_localization.html)
+- [documentation](https://atsushisakai.github.io/PythonRobotics/modules/2_localization/extended_kalman_filter_localization_files/extended_kalman_filter_localization.html)
## Particle filter localization
@@ -186,7 +187,7 @@ It is assumed that the robot can measure a distance from landmarks (RFID).
These measurements are used for PF localization.
-Ref:
+Reference
- [PROBABILISTIC ROBOTICS](http://www.probabilistic-robotics.org/)
@@ -207,7 +208,7 @@ The filter integrates speed input and range observations from RFID for localizat
Initial position is not needed.
-Ref:
+Reference
- [PROBABILISTIC ROBOTICS](http://www.probabilistic-robotics.org/)
@@ -256,7 +257,7 @@ It can calculate a rotation matrix, and a translation vector between points and

-Ref:
+Reference
- [Introduction to Mobile Robotics: Iterative Closest Point Algorithm](https://cs.gmu.edu/~kosecka/cs685/cs685-icp.pdf)
@@ -275,7 +276,7 @@ Black points are landmarks, blue crosses are estimated landmark positions by Fas

-Ref:
+Reference
- [PROBABILISTIC ROBOTICS](http://www.probabilistic-robotics.org/)
@@ -321,7 +322,7 @@ This is a 2D grid based the shortest path planning with D star algorithm.
The animation shows a robot finding its path avoiding an obstacle using the D* search algorithm.
-Ref:
+Reference
- [D* Algorithm Wikipedia](https://en.wikipedia.org/wiki/D*)
@@ -346,7 +347,7 @@ This is a 2D grid based path planning with Potential Field algorithm.
In the animation, the blue heat map shows potential value on each grid.
-Ref:
+Reference
- [Robotic Motion Planning:Potential Functions](https://www.cs.cmu.edu/~motionplanning/lecture/Chap4-Potential-Field_howie.pdf)
@@ -356,17 +357,35 @@ This is a 2D grid based coverage path planning simulation.

+### Particle Swarm Optimization (PSO)
+
+This is a 2D path planning simulation using the Particle Swarm Optimization algorithm.
+
+
+
+PSO is a metaheuristic optimization algorithm inspired by bird flocking behavior. In path planning, particles explore the search space to find collision-free paths while avoiding obstacles.
+
+The animation shows particles (blue dots) converging towards the optimal path (yellow line) from start (green area) to goal (red star).
+
+References
+
+- [Particle swarm optimization - Wikipedia](https://en.wikipedia.org/wiki/Particle_swarm_optimization)
+
+- [Kennedy, J.; Eberhart, R. (1995). "Particle Swarm Optimization"](https://ieeexplore.ieee.org/document/488968)
+
+
+
## State Lattice Planning
This script is a path planning code with state lattice planning.
This code uses the model predictive trajectory generator to solve boundary problem.
-Ref:
+Reference
- [Optimal rough terrain trajectory generation for wheeled mobile robots](https://journals.sagepub.com/doi/pdf/10.1177/0278364906075328)
-- [State Space Sampling of Feasible Motions for High-Performance Mobile Robot Navigation in Complex Environments](https://www.frc.ri.cmu.edu/~alonzo/pubs/papers/JFR_08_SS_Sampling.pdf)
+- [State Space Sampling of Feasible Motions for High-Performance Mobile Robot Navigation in Complex Environments](https://www.cs.cmu.edu/~alonzo/pubs/papers/JFR_08_SS_Sampling.pdf)
### Biased polar sampling
@@ -390,7 +409,7 @@ Cyan crosses means searched points with Dijkstra method,
The red line is the final path of PRM.
-Ref:
+Reference
- [Probabilistic roadmap \- Wikipedia](https://en.wikipedia.org/wiki/Probabilistic_roadmap)
@@ -406,7 +425,7 @@ This is a path planning code with RRT\*
Black circles are obstacles, green line is a searched tree, red crosses are start and goal positions.
-Ref:
+Reference
- [Incremental Sampling-based Algorithms for Optimal Motion Planning](https://arxiv.org/abs/1005.0416)
@@ -426,7 +445,7 @@ A double integrator motion model is used for LQR local planner.

-Ref:
+Reference
- [LQR\-RRT\*: Optimal Sampling\-Based Motion Planning with Automatically Derived Extension Heuristics](https://lis.csail.mit.edu/pubs/perez-icra12.pdf)
@@ -441,7 +460,7 @@ Motion planning with quintic polynomials.
It can calculate a 2D path, velocity, and acceleration profile based on quintic polynomials.
-Ref:
+Reference
- [Local Path Planning And Motion Control For Agv In Positioning](https://ieeexplore.ieee.org/document/637936/)
@@ -451,7 +470,7 @@ A sample code with Reeds Shepp path planning.

-Ref:
+Reference
- [15.3.2 Reeds\-Shepp Curves](http://planning.cs.uiuc.edu/node822.html)
@@ -477,7 +496,7 @@ The cyan line is the target course and black crosses are obstacles.
The red line is the predicted path.
-Ref:
+Reference
- [Optimal Trajectory Generation for Dynamic Street Scenarios in a Frenet Frame](https://www.researchgate.net/profile/Moritz_Werling/publication/224156269_Optimal_Trajectory_Generation_for_Dynamic_Street_Scenarios_in_a_Frenet_Frame/links/54f749df0cf210398e9277af.pdf)
@@ -490,9 +509,9 @@ Ref:
This is a simulation of moving to a pose control
-
+
-Ref:
+Reference
- [P. I. Corke, "Robotics, Vision and Control" \| SpringerLink p102](https://link.springer.com/book/10.1007/978-3-642-20144-8)
@@ -503,7 +522,7 @@ Path tracking simulation with Stanley steering control and PID speed control.

-Ref:
+Reference
- [Stanley: The robot that won the DARPA grand challenge](http://robots.stanford.edu/papers/thrun.stanley05.pdf)
@@ -517,7 +536,7 @@ Path tracking simulation with rear wheel feedback steering control and PID speed

-Ref:
+Reference
- [A Survey of Motion Planning and Control Techniques for Self-driving Urban Vehicles](https://arxiv.org/abs/1604.07446)
@@ -528,7 +547,7 @@ Path tracking simulation with LQR speed and steering control.

-Ref:
+Reference
- [Towards fully autonomous driving: Systems and algorithms \- IEEE Conference Publication](https://ieeexplore.ieee.org/document/5940562/)
@@ -539,9 +558,9 @@ Path tracking simulation with iterative linear model predictive speed and steeri
-Ref:
+Reference
-- [documentation](https://atsushisakai.github.io/PythonRobotics/modules/path_tracking/model_predictive_speed_and_steering_control/model_predictive_speed_and_steering_control.html)
+- [documentation](https://atsushisakai.github.io/PythonRobotics/modules/6_path_tracking/model_predictive_speed_and_steering_control/model_predictive_speed_and_steering_control.html)
- [Real\-time Model Predictive Control \(MPC\), ACADO, Python \| Work\-is\-Playing](http://grauonline.de/wordpress/?page_id=3244)
@@ -551,9 +570,9 @@ A motion planning and path tracking simulation with NMPC of C-GMRES

-Ref:
+Reference
-- [documentation](https://atsushisakai.github.io/PythonRobotics/modules/path_tracking/cgmres_nmpc/cgmres_nmpc.html)
+- [documentation](https://atsushisakai.github.io/PythonRobotics/modules/6_path_tracking/cgmres_nmpc/cgmres_nmpc.html)
# Arm Navigation
@@ -591,9 +610,9 @@ This is a 3d trajectory generation simulation for a rocket powered landing.

-Ref:
+Reference
-- [documentation](https://atsushisakai.github.io/PythonRobotics/modules/aerial_navigation/rocket_powered_landing/rocket_powered_landing.html)
+- [documentation](https://atsushisakai.github.io/PythonRobotics/modules/8_aerial_navigation/rocket_powered_landing/rocket_powered_landing.html)
# Bipedal
diff --git a/SLAM/iterative_closest_point/iterative_closest_point.py b/SLAM/ICPMatching/icp_matching.py
similarity index 100%
rename from SLAM/iterative_closest_point/iterative_closest_point.py
rename to SLAM/ICPMatching/icp_matching.py
diff --git a/appveyor.yml b/appveyor.yml
index 05ad8a2820..72d89acf11 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -8,7 +8,7 @@ environment:
CMD_IN_ENV: "cmd /E:ON /V:ON /C .\\appveyor\\run_with_env.cmd"
matrix:
- - PYTHON_DIR: C:\Python310-x64
+ - PYTHON_DIR: C:\Python313-x64
branches:
only:
diff --git a/docs/_static/img/source_link_1.png b/docs/_static/img/source_link_1.png
new file mode 100644
index 0000000000..53febda7cb
Binary files /dev/null and b/docs/_static/img/source_link_1.png differ
diff --git a/docs/_static/img/source_link_2.png b/docs/_static/img/source_link_2.png
new file mode 100644
index 0000000000..d5647cac60
Binary files /dev/null and b/docs/_static/img/source_link_2.png differ
diff --git a/docs/conf.py b/docs/conf.py
index 919fa9ac76..eeabab11b1 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -13,6 +13,8 @@
#
import os
import sys
+from pathlib import Path
+
sys.path.insert(0, os.path.abspath('../'))
@@ -41,7 +43,7 @@
'matplotlib.sphinxext.plot_directive',
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
- 'sphinx.ext.viewcode',
+ 'sphinx.ext.linkcode',
'sphinx.ext.napoleon',
'sphinx.ext.imgconverter',
'IPython.sphinxext.ipython_console_highlighting',
@@ -184,4 +186,45 @@
]
-# -- Extension configuration -------------------------------------------------
+# -- linkcode setting -------------------------------------------------
+
+import inspect
+import os
+import sys
+import functools
+
+GITHUB_REPO = "https://github.com/AtsushiSakai/PythonRobotics"
+GITHUB_BRANCH = "master"
+
+
+def linkcode_resolve(domain, info):
+ if domain != "py":
+ return None
+
+ modname = info["module"]
+ fullname = info["fullname"]
+
+ try:
+ module = __import__(modname, fromlist=[fullname])
+ obj = functools.reduce(getattr, fullname.split("."), module)
+ except (ImportError, AttributeError):
+ return None
+
+ try:
+ srcfile = inspect.getsourcefile(obj)
+ srcfile = get_relative_path_from_parent(srcfile, "PythonRobotics")
+ lineno = inspect.getsourcelines(obj)[1]
+ except Exception:
+ return None
+
+ return f"{GITHUB_REPO}/blob/{GITHUB_BRANCH}/{srcfile}#L{lineno}"
+
+
+def get_relative_path_from_parent(file_path: str, parent_dir: str):
+ path = Path(file_path).resolve()
+
+ try:
+ parent_path = next(p for p in path.parents if p.name == parent_dir)
+ return str(path.relative_to(parent_path))
+ except StopIteration:
+ raise ValueError(f"Parent directory '{parent_dir}' not found in {file_path}")
diff --git a/docs/modules/0_getting_started/2_how_to_run_sample_codes_main.rst b/docs/modules/0_getting_started/2_how_to_run_sample_codes_main.rst
index b92fc9bde0..33440bb137 100644
--- a/docs/modules/0_getting_started/2_how_to_run_sample_codes_main.rst
+++ b/docs/modules/0_getting_started/2_how_to_run_sample_codes_main.rst
@@ -46,7 +46,7 @@ using pip :
5. Execute python script in each directory.
-For example, to run the sample code of `Extented Kalman Filter` in the
+For example, to run the sample code of `Extended Kalman Filter` in the
`localization` directory, execute the following command:
.. code-block::
diff --git a/docs/modules/0_getting_started/3_how_to_contribute_main.rst b/docs/modules/0_getting_started/3_how_to_contribute_main.rst
index 1e61760649..f975127107 100644
--- a/docs/modules/0_getting_started/3_how_to_contribute_main.rst
+++ b/docs/modules/0_getting_started/3_how_to_contribute_main.rst
@@ -26,7 +26,7 @@ to understand the philosophies of this project.
Check your Python version.
---------------------------
-We only accept a PR for Python 3.12.x or higher.
+We only accept a PR for Python 3.13.x or higher.
We will not accept a PR for Python 2.x.
@@ -107,6 +107,18 @@ mathematics and the algorithm of the example.
Documentations related codes should be in the python script as the header
comments of the script or docstrings of each function.
+Also, each document should have a link to the code in Github.
+You can easily add the link by using the `.. autoclass::`, `.. autofunction::`, and `.. automodule` by Sphinx's `autodoc`_ module.
+
+Using this `autodoc`_ module, the generated documentations have the link to the code in Github like:
+
+.. image:: /_static/img/source_link_1.png
+
+When you click the link, you will jump to the source code in Github like:
+
+.. image:: /_static/img/source_link_2.png
+
+
.. _`submit a pull request`:
@@ -120,7 +132,7 @@ At first, please fix all CI errors before code review.
You can check your PR doc from the CI panel.
After the "ci/circleci: build_doc" CI is succeeded,
-you can access you PR doc with clicking the [Details] of the "ci/circleci: build_doc artifact" CI.
+you can access your PR doc with clicking the [Details] of the "ci/circleci: build_doc artifact" CI.
.. image:: /_static/img/doc_ci.png
@@ -164,7 +176,7 @@ Adding the missed documentations for existing examples is also great contributio
If you check the `Python Robotics Docs`_, you can notice that some of the examples
only have a simulation gif or short overview descriptions or just TBD.,
but no detailed algorithm or mathematical description.
-These documents needs to be improved.
+These documents need to be improved.
This doc `how to write doc`_ can be helpful to write documents.
@@ -210,5 +222,6 @@ Current Major Sponsors:
.. _`1Password`: https://github.com/1Password/for-open-source
.. _`matplotrecorder`: https://github.com/AtsushiSakai/matplotrecorder
.. _`PythonRoboticsGifs`: https://github.com/AtsushiSakai/PythonRoboticsGifs
+.. _`autodoc`: https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html
diff --git a/docs/modules/10_inverted_pendulum/inverted_pendulum_main.rst b/docs/modules/10_inverted_pendulum/inverted_pendulum_main.rst
index 048cbea9ac..47ae034195 100644
--- a/docs/modules/10_inverted_pendulum/inverted_pendulum_main.rst
+++ b/docs/modules/10_inverted_pendulum/inverted_pendulum_main.rst
@@ -4,7 +4,7 @@ Inverted Pendulum
------------------
An inverted pendulum on a cart consists of a mass :math:`m` at the top of a pole of length :math:`l` pivoted on a
-horizontally moving base as shown in the adjacent.
+horizontally moving base as shown in the adjacent figure.
The objective of the control system is to balance the inverted pendulum by applying a force to the cart that the pendulum is attached to.
@@ -89,9 +89,15 @@ and :math:`P` is the unique positive definite solution to the discrete time
.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/Control/InvertedPendulumCart/animation_lqr.gif
+Code Link
+^^^^^^^^^^^
+
+.. autofunction:: InvertedPendulum.inverted_pendulum_lqr_control.main
+
+
MPC control
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The MPC controller minimize this cost function defined as:
+The MPC controller minimizes this cost function defined as:
.. math:: J = x^T Q x + u^T R u
@@ -101,3 +107,9 @@ subject to:
- Initial state
.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/Control/InvertedPendulumCart/animation.gif
+
+Code Link
+^^^^^^^^^^^
+
+.. autofunction:: InvertedPendulum.inverted_pendulum_mpc_control.main
+
diff --git a/docs/modules/12_appendix/Kalmanfilter_basics_2_main.rst b/docs/modules/12_appendix/Kalmanfilter_basics_2_main.rst
index 9ae6fc5bcb..b7ff84e6f6 100644
--- a/docs/modules/12_appendix/Kalmanfilter_basics_2_main.rst
+++ b/docs/modules/12_appendix/Kalmanfilter_basics_2_main.rst
@@ -331,7 +331,7 @@ are vectors and matrices, but the concepts are exactly the same:
- Use the process model to predict the next state (the prior)
- Form an estimate part way between the measurement and the prior
-References:
+Reference
~~~~~~~~~~~
1. Roger Labbe’s
diff --git a/docs/modules/12_appendix/Kalmanfilter_basics_main.rst b/docs/modules/12_appendix/Kalmanfilter_basics_main.rst
index 6548377e07..1ed5790d52 100644
--- a/docs/modules/12_appendix/Kalmanfilter_basics_main.rst
+++ b/docs/modules/12_appendix/Kalmanfilter_basics_main.rst
@@ -55,7 +55,7 @@ Variance, Covariance and Correlation
Variance
^^^^^^^^
-Variance is the spread of the data. The mean does’nt tell much **about**
+Variance is the spread of the data. The mean doesn’t tell much **about**
the data. Therefore the variance tells us about the **story** about the
data meaning the spread of the data.
@@ -552,7 +552,7 @@ a given (X,Y) value.
.. image:: Kalmanfilter_basics_files/Kalmanfilter_basics_28_1.png
-References:
+Reference
~~~~~~~~~~~
1. Roger Labbe’s
diff --git a/docs/modules/12_appendix/external_sensors_main.rst b/docs/modules/12_appendix/external_sensors_main.rst
index 3597418150..b7caaf2c3a 100644
--- a/docs/modules/12_appendix/external_sensors_main.rst
+++ b/docs/modules/12_appendix/external_sensors_main.rst
@@ -10,9 +10,7 @@ Therefore, we will provide an overview.
Introduction
------------
-In recent years, the application of robotic technology has advanced,
-particularly in areas such as autonomous vehicles and disaster response robots.
-A crucial element in these technologies is external recognition—the robot's ability to understand its surrounding environment, identify safe zones, and detect moving objects using onboard sensors. Achieving effective external recognition involves various techniques, but equally important is the selection of appropriate sensors. Robots, like the sensors they employ, come in many forms, but external recognition sensors can be broadly categorized into three types. Developing an advanced external recognition system requires a thorough understanding of each sensor's principles and characteristics to determine their optimal application. This article summarizes the principles and features of these sensors for personal study purposes.
+In recent years, the application of robotic technology has advanced, particularly in areas such as autonomous vehicles and disaster response robots. A crucial element in these technologies is external recognition—the robot's ability to understand its surrounding environment, identify safe zones, and detect moving objects using onboard sensors. Achieving effective external recognition involves various techniques, but equally important is the selection of appropriate sensors. Robots, like the sensors they employ, come in many forms, but external recognition sensors can be broadly categorized into three types. Developing an advanced external recognition system requires a thorough understanding of each sensor's principles and characteristics to determine their optimal application. This article summarizes the principles and features of these sensors for personal study purposes.
Laser Sensors
-------------
@@ -22,7 +20,7 @@ Laser sensors measure distances by utilizing light, commonly referred to as Ligh
Radar Sensors
-------------
-TBD
+Radar measures distances using radio waves, commonly referred to as Radio Detection and Ranging (RADAR). It operates by transmitting radio signals towards an object and calculating the distance based on the time it takes for the reflected waves to return, using the speed of radio waves as a constant.
Monocular Cameras
@@ -59,4 +57,9 @@ Ultrasonic sensors are commonly used in indoor robots and some automotive autono
References
----------
-TBD
+- Wikipedia articles:
+
+ - `Lidar Sensors `_
+ - `Radar Sensors `_
+ - `Stereo Cameras `_
+ - `Ultrasonic Sensors `_
\ No newline at end of file
diff --git a/docs/modules/12_appendix/internal_sensors_main.rst b/docs/modules/12_appendix/internal_sensors_main.rst
index 18f209098e..fa2594a2bf 100644
--- a/docs/modules/12_appendix/internal_sensors_main.rst
+++ b/docs/modules/12_appendix/internal_sensors_main.rst
@@ -3,32 +3,59 @@
Internal Sensors for Robots
============================
-This project, `PythonRobotics`, focuses on algorithms, so hardware is not included.
-However, having basic knowledge of hardware in robotics is also important for understanding algorithms.
-Therefore, we will provide an overview.
+This project, `PythonRobotics`, focuses on algorithms, so hardware is not included. However, having basic knowledge of hardware in robotics is also important for understanding algorithms. Therefore, we will provide an overview.
Introduction
-------------
+-------------
+
+In robotic systems, internal sensors play a crucial role in monitoring the robot’s internal state, such as orientation, acceleration, angular velocity, altitude, and temperature. These sensors provide essential feedback that supports control, localization, and safety mechanisms. While external sensors perceive the environment, internal sensors give the robot self-awareness of its own motion and condition. Understanding the principles and characteristics of these sensors is vital to fully utilize their information within algorithms and decision-making systems. This section outlines the main internal sensors used in robotics.
Global Navigation Satellite System (GNSS)
--------------------------------------------
+-----------------------------------------
+
+GNSS is a satellite-based navigation system that provides global positioning and time information by analyzing signals from multiple satellites. It is commonly used in outdoor environments for absolute positioning. Although GNSS offers global coverage without infrastructure dependency, its performance is limited indoors or in obstructed areas, and it suffers from low update rates and susceptibility to signal noise. It is widely used in outdoor navigation for drones, vehicles, and delivery robots.
Gyroscope
----------
+A gyroscope measures angular velocity around the robot’s axes, enabling orientation and attitude estimation through detection of the Coriolis effect. Gyroscopes are compact, cost-effective, and provide high update rates, but they are prone to drift and require calibration or sensor fusion for long-term accuracy. These sensors are essential in drones, balancing robots, and IMU-based systems for motion tracking.
+
Accelerometer
---------------
+---------------
+
+An accelerometer measures linear acceleration along one or more axes, typically by detecting mass displacement due to motion. It is small, affordable, and useful for detecting movement, tilt, or vibration. However, accelerometers are limited by noisy output and cannot independently determine position without integration and fusion. They are commonly applied in wearable robotics, step counters, and vibration sensing.
Magnetometer
--------------
+A magnetometer measures the direction and intensity of magnetic fields, enabling heading estimation based on Earth’s magnetic field. It is lightweight and useful for orientation, especially in GPS-denied environments, though it is sensitive to interference from electronics and requires calibration. Magnetometers are often used in conjunction with IMUs for navigation and directional awareness.
+
Inertial Measurement Unit (IMU)
--------------------------------
+An IMU integrates a gyroscope, accelerometer, and sometimes a magnetometer to estimate a robot's motion and orientation through sensor fusion techniques such as Kalman filters. IMUs are compact and provide high-frequency data, which is essential for localization and navigation in GPS-denied areas. Nonetheless, they accumulate drift over time and require complex filtering to maintain accuracy. IMUs are found in drones, mobile robots, and motion tracking systems.
+
Pressure Sensor
------------------
+----------------
+
+Pressure sensors detect atmospheric or fluid pressure by measuring the force exerted on a diaphragm. They are effective for estimating altitude and monitoring environmental conditions, especially in drones and underwater robots. Although cost-effective and efficient, their accuracy may degrade due to temperature variation and limitations in low-altitude resolution.
Temperature Sensor
--------------------
+Temperature sensors monitor environmental or internal component temperatures, using changes in resistance or voltage depending on sensor type (e.g., RTD or thermocouple). They are simple and reliable for safety and thermal regulation, though they may respond slowly and be affected by nearby electronics. Common applications include battery and motor monitoring, safety systems, and ambient sensing.
+
+References
+----------
+- *Introduction to Autonomous Mobile Robots*, Roland Siegwart, Illah Nourbakhsh, Davide Scaramuzza
+- *Probabilistic Robotics*, Sebastian Thrun, Wolfram Burgard, Dieter Fox
+- Wikipedia articles:
+
+ - `Inertial Measurement Unit (IMU) `_
+ - `Accelerometer `_
+ - `Gyroscope `_
+ - `Magnetometer `_
+ - `Pressure sensor `_
+ - `Temperature sensor `_
+- `Adafruit Sensor Guides `_
\ No newline at end of file
diff --git a/docs/modules/12_appendix/steering_motion_model_main.rst b/docs/modules/12_appendix/steering_motion_model_main.rst
index 6e444b7909..c697123fa2 100644
--- a/docs/modules/12_appendix/steering_motion_model_main.rst
+++ b/docs/modules/12_appendix/steering_motion_model_main.rst
@@ -91,7 +91,7 @@ the target minimum velocity :math:`v_{min}` can be calculated as follows:
:math:`v_{min} = \frac{d_{t+1}+d_{t}}{\Delta t} = \frac{d_{t+1}+d_{t}}{(\kappa_{t+1}-\kappa_{t})}\frac{tan\dot{\delta}_{max}}{WB}`
-References:
+Reference
~~~~~~~~~~~
- `Vehicle Dynamics and Control `_
diff --git a/docs/modules/13_mission_planning/behavior_tree/behavior_tree_main.rst b/docs/modules/13_mission_planning/behavior_tree/behavior_tree_main.rst
new file mode 100644
index 0000000000..4b0ef82c12
--- /dev/null
+++ b/docs/modules/13_mission_planning/behavior_tree/behavior_tree_main.rst
@@ -0,0 +1,104 @@
+Behavior Tree
+-------------
+
+Behavior Tree is a modular, hierarchical decision model that is widely used in robot control, and game development.
+It presents some similarities to hierarchical state machines with the key difference that the main building block of a behavior is a task rather than a state.
+Behavior Trees have been shown to generalize several other control architectures (https://ieeexplore.ieee.org/document/7790863)
+
+Code Link
+~~~~~~~~~~~~~
+
+Control Node
+++++++++++++
+
+.. autoclass:: MissionPlanning.BehaviorTree.behavior_tree.ControlNode
+
+.. autoclass:: MissionPlanning.BehaviorTree.behavior_tree.SequenceNode
+
+.. autoclass:: MissionPlanning.BehaviorTree.behavior_tree.SelectorNode
+
+.. autoclass:: MissionPlanning.BehaviorTree.behavior_tree.WhileDoElseNode
+
+Action Node
+++++++++++++
+
+.. autoclass:: MissionPlanning.BehaviorTree.behavior_tree.ActionNode
+
+.. autoclass:: MissionPlanning.BehaviorTree.behavior_tree.EchoNode
+
+.. autoclass:: MissionPlanning.BehaviorTree.behavior_tree.SleepNode
+
+Decorator Node
+++++++++++++++
+
+.. autoclass:: MissionPlanning.BehaviorTree.behavior_tree.DecoratorNode
+
+.. autoclass:: MissionPlanning.BehaviorTree.behavior_tree.InverterNode
+
+.. autoclass:: MissionPlanning.BehaviorTree.behavior_tree.TimeoutNode
+
+.. autoclass:: MissionPlanning.BehaviorTree.behavior_tree.DelayNode
+
+.. autoclass:: MissionPlanning.BehaviorTree.behavior_tree.ForceSuccessNode
+
+.. autoclass:: MissionPlanning.BehaviorTree.behavior_tree.ForceFailureNode
+
+Behavior Tree Factory
++++++++++++++++++++++
+
+.. autoclass:: MissionPlanning.BehaviorTree.behavior_tree.BehaviorTreeFactory
+ :members:
+
+Behavior Tree
++++++++++++++
+
+.. autoclass:: MissionPlanning.BehaviorTree.behavior_tree.BehaviorTree
+ :members:
+
+Example
+~~~~~~~
+
+Visualize the behavior tree by `xml-tree-visual `_.
+
+.. image:: ./robot_behavior_case.svg
+
+Print the behavior tree
+
+.. code-block:: text
+
+ Behavior Tree
+ [Robot Main Controller]
+ [Battery Management]
+ (Low Battery Detection)
+
+
+
+ [Patrol Task]
+
+ [Move to Position A]
+
+ [Obstacle Handling A]
+ [Obstacle Present]
+
+
+
+
+ [Move to Position B]
+ (Short Wait)
+
+
+ (Limited Time Obstacle Handling)
+ [Obstacle Present]
+
+
+
+ [Conditional Move to C]
+
+ [Perform Position C Task]
+
+ (Ensure Completion)
+
+
+
+
+ Behavior Tree
diff --git a/docs/modules/13_mission_planning/behavior_tree/robot_behavior_case.svg b/docs/modules/13_mission_planning/behavior_tree/robot_behavior_case.svg
new file mode 100644
index 0000000000..a3d43aed52
--- /dev/null
+++ b/docs/modules/13_mission_planning/behavior_tree/robot_behavior_case.svg
@@ -0,0 +1,22 @@
+
\ No newline at end of file
diff --git a/docs/modules/13_mission_planning/mission_planning_main.rst b/docs/modules/13_mission_planning/mission_planning_main.rst
index 385e62f68e..c35eacd8d5 100644
--- a/docs/modules/13_mission_planning/mission_planning_main.rst
+++ b/docs/modules/13_mission_planning/mission_planning_main.rst
@@ -10,3 +10,4 @@ Mission planning includes tools such as finite state machines and behavior trees
:caption: Contents
state_machine/state_machine
+ behavior_tree/behavior_tree
diff --git a/docs/modules/13_mission_planning/state_machine/state_machine_main.rst b/docs/modules/13_mission_planning/state_machine/state_machine_main.rst
index abaece1b11..3f516d46a9 100644
--- a/docs/modules/13_mission_planning/state_machine/state_machine_main.rst
+++ b/docs/modules/13_mission_planning/state_machine/state_machine_main.rst
@@ -12,8 +12,8 @@ Core Concepts
- **Action**: An operation executed during transition (before entering new state)
- **Guard**: A precondition that must be satisfied to allow transition
-API
-~~~
+Code Link
+~~~~~~~~~~~
.. autoclass:: MissionPlanning.StateMachine.state_machine.StateMachine
:members: add_transition, process, register_state
diff --git a/docs/modules/1_introduction/2_python_for_robotics/python_for_robotics_main.rst b/docs/modules/1_introduction/2_python_for_robotics/python_for_robotics_main.rst
index c47c122853..e3f38a55fd 100644
--- a/docs/modules/1_introduction/2_python_for_robotics/python_for_robotics_main.rst
+++ b/docs/modules/1_introduction/2_python_for_robotics/python_for_robotics_main.rst
@@ -1,7 +1,7 @@
Python for Robotics
----------------------
-A programing language, Python is used for this `PythonRobotics` project
+A programming language, Python is used for this `PythonRobotics` project
to achieve the purposes of this project described in the :ref:`What is PythonRobotics?`.
This section explains the Python itself and features for science computing and robotics.
diff --git a/docs/modules/1_introduction/3_technologies_for_robotics/technologies_for_robotics_main.rst b/docs/modules/1_introduction/3_technologies_for_robotics/technologies_for_robotics_main.rst
index 0ed51e961b..dd3cd1d86c 100644
--- a/docs/modules/1_introduction/3_technologies_for_robotics/technologies_for_robotics_main.rst
+++ b/docs/modules/1_introduction/3_technologies_for_robotics/technologies_for_robotics_main.rst
@@ -3,7 +3,7 @@ Technologies for Robotics
The field of robotics needs wide areas of technologies such as mechanical engineering,
electrical engineering, computer science, and artificial intelligence (AI).
-This project, `PythonRobotics`, only focus on computer science and artificial intelligence.
+This project, `PythonRobotics`, only focuses on computer science and artificial intelligence.
The technologies for robotics are categorized as following 3 categories:
diff --git a/docs/modules/2_localization/ensamble_kalman_filter_localization_files/ensamble_kalman_filter_localization_main.rst b/docs/modules/2_localization/ensamble_kalman_filter_localization_files/ensamble_kalman_filter_localization_main.rst
index 2543d1186a..6274859002 100644
--- a/docs/modules/2_localization/ensamble_kalman_filter_localization_files/ensamble_kalman_filter_localization_main.rst
+++ b/docs/modules/2_localization/ensamble_kalman_filter_localization_files/ensamble_kalman_filter_localization_main.rst
@@ -1,7 +1,204 @@
-Ensamble Kalman Filter Localization
+Ensemble Kalman Filter Localization
-----------------------------------
.. figure:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/Localization/ensamble_kalman_filter/animation.gif
-This is a sensor fusion localization with Ensamble Kalman Filter(EnKF).
+This is a sensor fusion localization with Ensemble Kalman Filter(EnKF).
+
+The blue line is true trajectory, the black line is dead reckoning trajectory,
+and the red line is estimated trajectory with EnKF.
+
+The red ellipse is estimated covariance ellipse with EnKF.
+
+It is assumed that the robot can measure distance and bearing angle from landmarks (RFID).
+
+These measurements are used for EnKF localization.
+
+Code Link
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. autofunction:: Localization.ensemble_kalman_filter.ensemble_kalman_filter.enkf_localization
+
+
+Ensemble Kalman Filter Algorithm
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The Ensemble Kalman Filter (EnKF) is a Monte Carlo approach to the Kalman filter that
+uses an ensemble of state estimates to represent the probability distribution. Unlike
+the traditional Kalman filter that propagates the mean and covariance analytically,
+EnKF uses a collection of samples (ensemble members) to estimate the state and its uncertainty.
+
+The EnKF algorithm consists of two main steps:
+
+=== Predict ===
+
+For each ensemble member :math:`i = 1, ..., N`:
+
+1. Add random noise to the control input:
+
+ :math:`\mathbf{u}^i = \mathbf{u} + \epsilon_u`, where :math:`\epsilon_u \sim \mathcal{N}(0, \mathbf{R})`
+
+2. Predict the state:
+
+ :math:`\mathbf{x}^i_{pred} = f(\mathbf{x}^i_t, \mathbf{u}^i)`
+
+3. Predict the observation:
+
+ :math:`\mathbf{z}^i_{pred} = h(\mathbf{x}^i_{pred}) + \epsilon_z`, where :math:`\epsilon_z \sim \mathcal{N}(0, \mathbf{Q})`
+
+=== Update ===
+
+1. Calculate ensemble mean for states:
+
+ :math:`\bar{\mathbf{x}} = \frac{1}{N}\sum_{i=1}^N \mathbf{x}^i_{pred}`
+
+2. Calculate ensemble mean for observations:
+
+ :math:`\bar{\mathbf{z}} = \frac{1}{N}\sum_{i=1}^N \mathbf{z}^i_{pred}`
+
+3. Calculate state deviation:
+
+ :math:`\mathbf{X}' = \mathbf{x}^i_{pred} - \bar{\mathbf{x}}`
+
+4. Calculate observation deviation:
+
+ :math:`\mathbf{Z}' = \mathbf{z}^i_{pred} - \bar{\mathbf{z}}`
+
+5. Calculate covariance matrices:
+
+ :math:`\mathbf{U} = \frac{1}{N-1}\mathbf{X}'\mathbf{Z}'^T`
+
+ :math:`\mathbf{V} = \frac{1}{N-1}\mathbf{Z}'\mathbf{Z}'^T`
+
+6. Calculate Kalman gain:
+
+ :math:`\mathbf{K} = \mathbf{U}\mathbf{V}^{-1}`
+
+7. Update each ensemble member:
+
+ :math:`\mathbf{x}^i_{t+1} = \mathbf{x}^i_{pred} + \mathbf{K}(\mathbf{z}_{obs} - \mathbf{z}^i_{pred})`
+
+8. Calculate final state estimate:
+
+ :math:`\mathbf{x}_{est} = \frac{1}{N}\sum_{i=1}^N \mathbf{x}^i_{t+1}`
+
+9. Calculate covariance estimate:
+
+ :math:`\mathbf{P}_{est} = \frac{1}{N}\sum_{i=1}^N (\mathbf{x}^i_{t+1} - \mathbf{x}_{est})(\mathbf{x}^i_{t+1} - \mathbf{x}_{est})^T`
+
+
+Filter Design
+~~~~~~~~~~~~~
+
+In this simulation, the robot has a state vector with 4 states at time :math:`t`:
+
+.. math:: \mathbf{x}_t = [x_t, y_t, \phi_t, v_t]^T
+
+where:
+
+- :math:`x, y` are 2D position coordinates
+- :math:`\phi` is orientation (yaw angle)
+- :math:`v` is velocity
+
+The filter uses an ensemble of :math:`N=20` particles to represent the state distribution.
+
+Input Vector
+^^^^^^^^^^^^
+
+The robot has a velocity sensor and a gyro sensor, so the control input vector at each time step is:
+
+.. math:: \mathbf{u}_t = [v_t, \omega_t]^T
+
+where:
+
+- :math:`v_t` is linear velocity
+- :math:`\omega_t` is angular velocity (yaw rate)
+
+The input vector includes sensor noise modeled by covariance matrix :math:`\mathbf{R}`:
+
+.. math:: \mathbf{R} = \begin{bmatrix} \sigma_v^2 & 0 \\ 0 & \sigma_\omega^2 \end{bmatrix}
+
+
+Observation Vector
+^^^^^^^^^^^^^^^^^^
+
+The robot can observe landmarks (RFID tags) with distance and bearing measurements:
+
+.. math:: \mathbf{z}_t = [d_t, \theta_t, x_{lm}, y_{lm}]
+
+where:
+
+- :math:`d_t` is the distance to the landmark
+- :math:`\theta_t` is the bearing angle to the landmark
+- :math:`x_{lm}, y_{lm}` are the known landmark positions
+
+The observation noise is modeled by covariance matrix :math:`\mathbf{Q}`:
+
+.. math:: \mathbf{Q} = \begin{bmatrix} \sigma_d^2 & 0 \\ 0 & \sigma_\theta^2 \end{bmatrix}
+
+
+Motion Model
+~~~~~~~~~~~~
+
+The robot motion model is:
+
+.. math:: \dot{x} = v \cos(\phi)
+
+.. math:: \dot{y} = v \sin(\phi)
+
+.. math:: \dot{\phi} = \omega
+
+.. math:: \dot{v} = 0
+
+Discretized with time step :math:`\Delta t`, the motion model becomes:
+
+.. math:: \mathbf{x}_{t+1} = f(\mathbf{x}_t, \mathbf{u}_t) = \mathbf{F}\mathbf{x}_t + \mathbf{B}\mathbf{u}_t
+
+where:
+
+:math:`\begin{equation*} \mathbf{F} = \begin{bmatrix} 1 & 0 & 0 & 0\\ 0 & 1 & 0 & 0\\ 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 0 \\ \end{bmatrix} \end{equation*}`
+
+:math:`\begin{equation*} \mathbf{B} = \begin{bmatrix} \cos(\phi) \Delta t & 0\\ \sin(\phi) \Delta t & 0\\ 0 & \Delta t\\ 1 & 0\\ \end{bmatrix} \end{equation*}`
+
+The motion function expands to:
+
+:math:`\begin{equation*} \begin{bmatrix} x_{t+1} \\ y_{t+1} \\ \phi_{t+1} \\ v_{t+1} \end{bmatrix} = \begin{bmatrix} x_t + v_t\cos(\phi_t)\Delta t \\ y_t + v_t\sin(\phi_t)\Delta t \\ \phi_t + \omega_t \Delta t \\ v_t \end{bmatrix} \end{equation*}`
+
+
+Observation Model
+~~~~~~~~~~~~~~~~~
+
+For each landmark at position :math:`(x_{lm}, y_{lm})`, the observation model calculates
+the expected landmark position in the global frame:
+
+.. math::
+ \begin{bmatrix}
+ x_{lm,obs} \\
+ y_{lm,obs}
+ \end{bmatrix} =
+ \begin{bmatrix}
+ x + d \cos(\phi + \theta) \\
+ y + d \sin(\phi + \theta)
+ \end{bmatrix}
+
+where :math:`d` and :math:`\theta` are the observed distance and bearing to the landmark.
+
+The observation function projects the robot state to expected landmark positions,
+which are then compared with actual landmark positions for the update step.
+
+
+Advantages of EnKF
+~~~~~~~~~~~~~~~~~~
+
+- **No Jacobian required**: Unlike EKF, EnKF does not need to compute Jacobian matrices, making it easier to implement for nonlinear systems
+- **Handles non-Gaussian distributions**: The ensemble representation can capture non-Gaussian features of the state distribution
+- **Computationally efficient**: For high-dimensional systems, EnKF can be more efficient than maintaining full covariance matrices
+- **Easy to parallelize**: Each ensemble member can be propagated independently
+
+
+Reference
+~~~~~~~~~
+
+- `Ensemble Kalman filtering `_
+- `PROBABILISTIC ROBOTICS `_
diff --git a/docs/modules/2_localization/extended_kalman_filter_localization_files/extended_kalman_filter_localization_main.rst b/docs/modules/2_localization/extended_kalman_filter_localization_files/extended_kalman_filter_localization_main.rst
index da214b6de5..adb41e5e40 100644
--- a/docs/modules/2_localization/extended_kalman_filter_localization_files/extended_kalman_filter_localization_main.rst
+++ b/docs/modules/2_localization/extended_kalman_filter_localization_files/extended_kalman_filter_localization_main.rst
@@ -23,28 +23,40 @@ is estimated trajectory with EKF.
The red ellipse is estimated covariance ellipse with EKF.
-Code: `PythonRobotics/extended_kalman_filter.py at master ·
-AtsushiSakai/PythonRobotics `__
+Code Link
+~~~~~~~~~~~~~
-Kalman Filter with Speed Scale Factor Correction
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+.. autofunction:: Localization.extended_kalman_filter.extended_kalman_filter.ekf_estimation
+Extended Kalman Filter algorithm
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. image:: ekf_with_velocity_correction_1_0.png
- :width: 600px
+Localization process using Extended Kalman Filter:EKF is
-This is a Extended kalman filter (EKF) localization with velocity correction.
+=== Predict ===
-This is for correcting the vehicle speed measured with scale factor errors due to factors such as wheel wear.
+:math:`x_{Pred} = Fx_t+Bu_t`
-Code: `PythonRobotics/extended_kalman_ekf_with_velocity_correctionfilter.py
-AtsushiSakai/PythonRobotics `__
+:math:`P_{Pred} = J_f P_t J_f^T + Q`
-Filter design
-~~~~~~~~~~~~~
+=== Update ===
-Position Estimation Kalman Filter
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+:math:`z_{Pred} = Hx_{Pred}`
+
+:math:`y = z - z_{Pred}`
+
+:math:`S = J_g P_{Pred}.J_g^T + R`
+
+:math:`K = P_{Pred}.J_g^T S^{-1}`
+
+:math:`x_{t+1} = x_{Pred} + Ky`
+
+:math:`P_{t+1} = ( I - K J_g) P_{Pred}`
+
+
+
+Filter design
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In this simulation, the robot has a state vector includes 4 states at
time :math:`t`.
@@ -82,27 +94,9 @@ In the code, “observation” function generates the input and observation
vector with noise
`code `__
-Kalman Filter with Speed Scale Factor Correction
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-In this simulation, the robot has a state vector includes 5 states at
-time :math:`t`.
-
-.. math:: \textbf{x}_t=[x_t, y_t, \phi_t, v_t, s_t]
-
-x, y are a 2D x-y position, :math:`\phi` is orientation, v is
-velocity, and s is a scale factor of velocity.
-
-In the code, “xEst” means the state vector.
-`code `__
-
-The rest is the same as the Position Estimation Kalman Filter.
Motion Model
-~~~~~~~~~~~~
-
-Position Estimation Kalman Filter
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+~~~~~~~~~~~~~~~~~
The robot model is
@@ -137,9 +131,61 @@ Its Jacobian matrix is
:math:`\begin{equation*} = \begin{bmatrix} 1& 0 & -v \sin(\phi) \Delta t & \cos(\phi) \Delta t\\ 0 & 1 & v \cos(\phi) \Delta t & \sin(\phi) \Delta t\\ 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 1 \end{bmatrix} \end{equation*}`
+Observation Model
+~~~~~~~~~~~~~~~~~
+
+The robot can get x-y position information from GPS.
+
+So GPS Observation model is
+
+.. math:: \textbf{z}_{t} = g(\textbf{x}_t) = H \textbf{x}_t
+
+where
+
+:math:`\begin{equation*} H = \begin{bmatrix} 1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ \end{bmatrix} \end{equation*}`
+
+The observation function states that
+
+:math:`\begin{equation*} \begin{bmatrix} x' \\ y' \end{bmatrix} = g(\textbf{x}) = \begin{bmatrix} x \\ y \end{bmatrix} \end{equation*}`
+
+Its Jacobian matrix is
+
+:math:`\begin{equation*} J_g = \begin{bmatrix} \frac{\partial x'}{\partial x} & \frac{\partial x'}{\partial y} & \frac{\partial x'}{\partial \phi} & \frac{\partial x'}{\partial v}\\ \frac{\partial y'}{\partial x}& \frac{\partial y'}{\partial y} & \frac{\partial y'}{\partial \phi} & \frac{\partial y'}{ \partial v}\\ \end{bmatrix} \end{equation*}`
+
+:math:`\begin{equation*} = \begin{bmatrix} 1& 0 & 0 & 0\\ 0 & 1 & 0 & 0\\ \end{bmatrix} \end{equation*}`
+
Kalman Filter with Speed Scale Factor Correction
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+This is a Extended kalman filter (EKF) localization with velocity correction.
+
+This is for correcting the vehicle speed measured with scale factor errors due to factors such as wheel wear.
+
+
+In this simulation, the robot has a state vector includes 5 states at
+time :math:`t`.
+
+.. math:: \textbf{x}_t=[x_t, y_t, \phi_t, v_t, s_t]
+
+x, y are a 2D x-y position, :math:`\phi` is orientation, v is
+velocity, and s is a scale factor of velocity.
+
+In the code, “xEst” means the state vector.
+`code `__
+
+The rest is the same as the Position Estimation Kalman Filter.
+
+.. image:: ekf_with_velocity_correction_1_0.png
+ :width: 600px
+
+Code Link
+~~~~~~~~~~~~~
+
+.. autofunction:: Localization.extended_kalman_filter.ekf_with_velocity_correction.ekf_estimation
+
+
+Motion Model
+~~~~~~~~~~~~
The robot model is
@@ -178,33 +224,7 @@ Its Jacobian matrix is
Observation Model
~~~~~~~~~~~~~~~~~
-Position Estimation Kalman Filter
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The robot can get x-y position infomation from GPS.
-
-So GPS Observation model is
-
-.. math:: \textbf{z}_{t} = g(\textbf{x}_t) = H \textbf{x}_t
-
-where
-
-:math:`\begin{equation*} H = \begin{bmatrix} 1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ \end{bmatrix} \end{equation*}`
-
-The observation function states that
-
-:math:`\begin{equation*} \begin{bmatrix} x' \\ y' \end{bmatrix} = g(\textbf{x}) = \begin{bmatrix} x \\ y \end{bmatrix} \end{equation*}`
-
-Its Jacobian matrix is
-
-:math:`\begin{equation*} J_g = \begin{bmatrix} \frac{\partial x'}{\partial x} & \frac{\partial x'}{\partial y} & \frac{\partial x'}{\partial \phi} & \frac{\partial x'}{\partial v}\\ \frac{\partial y'}{\partial x}& \frac{\partial y'}{\partial y} & \frac{\partial y'}{\partial \phi} & \frac{\partial y'}{ \partial v}\\ \end{bmatrix} \end{equation*}`
-
-:math:`\begin{equation*} = \begin{bmatrix} 1& 0 & 0 & 0\\ 0 & 1 & 0 & 0\\ \end{bmatrix} \end{equation*}`
-
-Kalman Filter with Speed Scale Factor Correction
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The robot can get x-y position infomation from GPS.
+The robot can get x-y position information from GPS.
So GPS Observation model is
@@ -225,32 +245,8 @@ Its Jacobian matrix is
:math:`\begin{equation*} = \begin{bmatrix} 1& 0 & 0 & 0 & 0\\ 0 & 1 & 0 & 0 & 0\\ \end{bmatrix} \end{equation*}`
-Extended Kalman Filter
-~~~~~~~~~~~~~~~~~~~~~~
-
-Localization process using Extended Kalman Filter:EKF is
-
-=== Predict ===
-
-:math:`x_{Pred} = Fx_t+Bu_t`
-
-:math:`P_{Pred} = J_f P_t J_f^T + Q`
-
-=== Update ===
-
-:math:`z_{Pred} = Hx_{Pred}`
-
-:math:`y = z - z_{Pred}`
-
-:math:`S = J_g P_{Pred}.J_g^T + R`
-
-:math:`K = P_{Pred}.J_g^T S^{-1}`
-
-:math:`x_{t+1} = x_{Pred} + Ky`
-
-:math:`P_{t+1} = ( I - K J_g) P_{Pred}`
-Ref:
-~~~~
+Reference
+^^^^^^^^^^^
- `PROBABILISTIC-ROBOTICS.ORG `__
diff --git a/docs/modules/2_localization/histogram_filter_localization/histogram_filter_localization_main.rst b/docs/modules/2_localization/histogram_filter_localization/histogram_filter_localization_main.rst
index fafd578333..b5697d2dd9 100644
--- a/docs/modules/2_localization/histogram_filter_localization/histogram_filter_localization_main.rst
+++ b/docs/modules/2_localization/histogram_filter_localization/histogram_filter_localization_main.rst
@@ -16,6 +16,11 @@ The filter uses speed input and range observations from RFID for localization.
Initial position information is not needed.
+Code Link
+~~~~~~~~~~~~~
+
+.. autofunction:: Localization.histogram_filter.histogram_filter.histogram_filter_localization
+
Filtering algorithm
~~~~~~~~~~~~~~~~~~~~
@@ -63,7 +68,7 @@ But, the probability is getting uncertain without observations:
The `gaussian filter `_
-is used in the simulation for adding noize.
+is used in the simulation for adding noise.
Step3: Update probability by observation
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -107,7 +112,7 @@ There are two ways to calculate the final positions:
-References:
+Reference
~~~~~~~~~~~
- `_PROBABILISTIC ROBOTICS: `_
diff --git a/docs/modules/2_localization/particle_filter_localization/particle_filter_localization_main.rst b/docs/modules/2_localization/particle_filter_localization/particle_filter_localization_main.rst
index 20a9eb58fc..8bf22a246b 100644
--- a/docs/modules/2_localization/particle_filter_localization/particle_filter_localization_main.rst
+++ b/docs/modules/2_localization/particle_filter_localization/particle_filter_localization_main.rst
@@ -13,7 +13,13 @@ and the red line is estimated trajectory with PF.
It is assumed that the robot can measure a distance from landmarks
(RFID).
-This measurements are used for PF localization.
+These measurements are used for PF localization.
+
+Code Link
+~~~~~~~~~~~~~
+
+.. autofunction:: Localization.particle_filter.particle_filter.pf_localization
+
How to calculate covariance matrix from particles
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -30,7 +36,7 @@ The covariance matrix :math:`\Xi` from particle information is calculated by the
- :math:`\mu_j` is the :math:`j` th mean state of particles.
-References:
+Reference
~~~~~~~~~~~
- `_PROBABILISTIC ROBOTICS: `_
diff --git a/docs/modules/2_localization/unscented_kalman_filter_localization/unscented_kalman_filter_localization_main.rst b/docs/modules/2_localization/unscented_kalman_filter_localization/unscented_kalman_filter_localization_main.rst
index bb6b5b664b..01d161b17e 100644
--- a/docs/modules/2_localization/unscented_kalman_filter_localization/unscented_kalman_filter_localization_main.rst
+++ b/docs/modules/2_localization/unscented_kalman_filter_localization/unscented_kalman_filter_localization_main.rst
@@ -5,9 +5,253 @@ Unscented Kalman Filter localization
This is a sensor fusion localization with Unscented Kalman Filter(UKF).
-The lines and points are same meaning of the EKF simulation.
+The blue line is true trajectory, the black line is dead reckoning trajectory,
+the green points are GPS observations, and the red line is estimated trajectory with UKF.
-References:
+The red ellipse is estimated covariance ellipse with UKF.
+
+Code Link
+~~~~~~~~~~~~~
+
+.. autofunction:: Localization.unscented_kalman_filter.unscented_kalman_filter.ukf_estimation
+
+
+Unscented Kalman Filter Algorithm
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The Unscented Kalman Filter (UKF) is a nonlinear state estimation technique that uses
+the unscented transform to handle nonlinearities. Unlike the Extended Kalman Filter (EKF)
+that linearizes the nonlinear functions using Jacobians, UKF uses a deterministic sampling
+approach called sigma points to capture the mean and covariance of the state distribution.
+
+The UKF algorithm consists of two main steps:
+
+=== Predict ===
+
+1. Generate sigma points around the current state estimate:
+
+ :math:`\chi_0 = \mathbf{x}_{t}`
+
+ :math:`\chi_i = \mathbf{x}_{t} + \gamma\sqrt{\mathbf{P}_t}_i` for :math:`i = 1, ..., n`
+
+ :math:`\chi_i = \mathbf{x}_{t} - \gamma\sqrt{\mathbf{P}_t}_{i-n}` for :math:`i = n+1, ..., 2n`
+
+ where :math:`\gamma = \sqrt{n + \lambda}` and :math:`\lambda = \alpha^2(n + \kappa) - n`
+
+2. Propagate sigma points through the motion model:
+
+ :math:`\chi^-_i = f(\chi_i, \mathbf{u}_t)`
+
+3. Calculate predicted state mean:
+
+ :math:`\mathbf{x}^-_{t+1} = \sum_{i=0}^{2n} w^m_i \chi^-_i`
+
+4. Calculate predicted state covariance:
+
+ :math:`\mathbf{P}^-_{t+1} = \sum_{i=0}^{2n} w^c_i (\chi^-_i - \mathbf{x}^-_{t+1})(\chi^-_i - \mathbf{x}^-_{t+1})^T + \mathbf{Q}`
+
+=== Update ===
+
+1. Generate sigma points around the predicted state:
+
+ :math:`\chi_i = \text{generate\_sigma\_points}(\mathbf{x}^-_{t+1}, \mathbf{P}^-_{t+1})`
+
+2. Propagate sigma points through the observation model:
+
+ :math:`\mathcal{Z}_i = h(\chi_i)`
+
+3. Calculate predicted observation mean:
+
+ :math:`\mathbf{z}^-_{t+1} = \sum_{i=0}^{2n} w^m_i \mathcal{Z}_i`
+
+4. Calculate innovation covariance:
+
+ :math:`\mathbf{S}_t = \sum_{i=0}^{2n} w^c_i (\mathcal{Z}_i - \mathbf{z}^-_{t+1})(\mathcal{Z}_i - \mathbf{z}^-_{t+1})^T + \mathbf{R}`
+
+5. Calculate cross-correlation matrix:
+
+ :math:`\mathbf{P}_{xz} = \sum_{i=0}^{2n} w^c_i (\chi_i - \mathbf{x}^-_{t+1})(\mathcal{Z}_i - \mathbf{z}^-_{t+1})^T`
+
+6. Calculate Kalman gain:
+
+ :math:`\mathbf{K} = \mathbf{P}_{xz}\mathbf{S}_t^{-1}`
+
+7. Update state estimate:
+
+ :math:`\mathbf{x}_{t+1} = \mathbf{x}^-_{t+1} + \mathbf{K}(\mathbf{z}_t - \mathbf{z}^-_{t+1})`
+
+8. Update covariance estimate:
+
+ :math:`\mathbf{P}_{t+1} = \mathbf{P}^-_{t+1} - \mathbf{K}\mathbf{S}_t\mathbf{K}^T`
+
+
+Sigma Points and Weights
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The UKF uses deterministic sigma points to represent the state distribution. The weights
+for combining sigma points are calculated as:
+
+**Mean weights:**
+
+:math:`w^m_0 = \frac{\lambda}{n + \lambda}`
+
+:math:`w^m_i = \frac{1}{2(n + \lambda)}` for :math:`i = 1, ..., 2n`
+
+**Covariance weights:**
+
+:math:`w^c_0 = \frac{\lambda}{n + \lambda} + (1 - \alpha^2 + \beta)`
+
+:math:`w^c_i = \frac{1}{2(n + \lambda)}` for :math:`i = 1, ..., 2n`
+
+where:
+
+- :math:`\alpha` controls the spread of sigma points around the mean (typically :math:`0.001 \leq \alpha \leq 1`)
+- :math:`\beta` incorporates prior knowledge of the distribution (:math:`\beta = 2` is optimal for Gaussian distributions)
+- :math:`\kappa` is a secondary scaling parameter (typically :math:`\kappa = 0`)
+- :math:`n` is the dimension of the state vector
+
+
+Filter Design
+~~~~~~~~~~~~~
+
+In this simulation, the robot has a state vector with 4 states at time :math:`t`:
+
+.. math:: \mathbf{x}_t = [x_t, y_t, \phi_t, v_t]^T
+
+where:
+
+- :math:`x, y` are 2D position coordinates
+- :math:`\phi` is orientation (yaw angle)
+- :math:`v` is velocity
+
+In the code, "xEst" means the state vector.
+
+The covariance matrices are:
+
+- :math:`\mathbf{P}_t` is the state covariance matrix
+- :math:`\mathbf{Q}` is the process noise covariance matrix
+- :math:`\mathbf{R}` is the observation noise covariance matrix
+
+**Process Noise Covariance** :math:`\mathbf{Q}`:
+
+.. math::
+ \mathbf{Q} = \begin{bmatrix}
+ 0.1^2 & 0 & 0 & 0 \\
+ 0 & 0.1^2 & 0 & 0 \\
+ 0 & 0 & (0.017)^2 & 0 \\
+ 0 & 0 & 0 & 1.0^2
+ \end{bmatrix}
+
+**Observation Noise Covariance** :math:`\mathbf{R}`:
+
+.. math::
+ \mathbf{R} = \begin{bmatrix}
+ 1.0^2 & 0 \\
+ 0 & 1.0^2
+ \end{bmatrix}
+
+
+Input Vector
+^^^^^^^^^^^^
+
+The robot has a velocity sensor and a gyro sensor, so the control input vector at each time step is:
+
+.. math:: \mathbf{u}_t = [v_t, \omega_t]^T
+
+where:
+
+- :math:`v_t` is linear velocity [m/s]
+- :math:`\omega_t` is angular velocity (yaw rate) [rad/s]
+
+The input vector includes sensor noise.
+
+
+Observation Vector
+^^^^^^^^^^^^^^^^^^
+
+The robot has a GNSS (GPS) sensor that can measure x-y position:
+
+.. math:: \mathbf{z}_t = [x_t, y_t]^T
+
+The observation includes GPS noise with covariance :math:`\mathbf{R}`.
+
+
+Motion Model
+~~~~~~~~~~~~
+
+The robot motion model is:
+
+.. math:: \dot{x} = v \cos(\phi)
+
+.. math:: \dot{y} = v \sin(\phi)
+
+.. math:: \dot{\phi} = \omega
+
+.. math:: \dot{v} = 0
+
+Discretized with time step :math:`\Delta t`, the motion model becomes:
+
+.. math:: \mathbf{x}_{t+1} = f(\mathbf{x}_t, \mathbf{u}_t) = \mathbf{F}\mathbf{x}_t + \mathbf{B}\mathbf{u}_t
+
+where:
+
+:math:`\begin{equation*} \mathbf{F} = \begin{bmatrix} 1 & 0 & 0 & 0\\ 0 & 1 & 0 & 0\\ 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 0 \\ \end{bmatrix} \end{equation*}`
+
+:math:`\begin{equation*} \mathbf{B} = \begin{bmatrix} \cos(\phi) \Delta t & 0\\ \sin(\phi) \Delta t & 0\\ 0 & \Delta t\\ 1 & 0\\ \end{bmatrix} \end{equation*}`
+
+The motion function expands to:
+
+:math:`\begin{equation*} \begin{bmatrix} x_{t+1} \\ y_{t+1} \\ \phi_{t+1} \\ v_{t+1} \end{bmatrix} = \begin{bmatrix} x_t + v_t\cos(\phi_t)\Delta t \\ y_t + v_t\sin(\phi_t)\Delta t \\ \phi_t + \omega_t \Delta t \\ v_t \end{bmatrix} \end{equation*}`
+
+:math:`\Delta t = 0.1` [s] is the time interval.
+
+
+Observation Model
+~~~~~~~~~~~~~~~~~
+
+The robot can get x-y position information from GPS.
+
+The GPS observation model is:
+
+.. math:: \mathbf{z}_{t} = h(\mathbf{x}_t) = \mathbf{H} \mathbf{x}_t
+
+where:
+
+:math:`\begin{equation*} \mathbf{H} = \begin{bmatrix} 1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ \end{bmatrix} \end{equation*}`
+
+The observation function states that:
+
+:math:`\begin{equation*} \begin{bmatrix} x_{obs} \\ y_{obs} \end{bmatrix} = h(\mathbf{x}) = \begin{bmatrix} x \\ y \end{bmatrix} \end{equation*}`
+
+
+UKF Parameters
+~~~~~~~~~~~~~~
+
+The UKF uses three tuning parameters:
+
+- **ALPHA = 0.001**: Controls the spread of sigma points around the mean. Smaller values result in sigma points closer to the mean.
+- **BETA = 2**: Incorporates prior knowledge about the distribution. For Gaussian distributions, the optimal value is 2.
+- **KAPPA = 0**: Secondary scaling parameter. Usually set to 0 or 3-n where n is the state dimension.
+
+These parameters affect the calculation of :math:`\lambda = \alpha^2(n + \kappa) - n`, which determines
+the sigma point spread and weights.
+
+
+Advantages of UKF over EKF
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The Unscented Kalman Filter offers several advantages over the Extended Kalman Filter:
+
+- **No Jacobian required**: UKF does not need to compute Jacobian matrices, which can be error-prone and computationally expensive for complex nonlinear systems
+- **Higher accuracy**: UKF captures the mean and covariance to the second order (third order for Gaussian distributions) while EKF only achieves first-order accuracy
+- **Better handling of nonlinearities**: The unscented transform provides a better approximation of the probability distribution after nonlinear transformation
+- **Easier implementation**: For highly nonlinear systems, UKF is often easier to implement since it doesn't require analytical derivatives
+- **Numerical stability**: UKF can be more numerically stable than EKF for strongly nonlinear systems
+
+
+Reference
~~~~~~~~~~~
- `Discriminatively Trained Unscented Kalman Filter for Mobile Robot Localization `_
+- `The Unscented Kalman Filter for Nonlinear Estimation `_
+- `PROBABILISTIC ROBOTICS `_
diff --git a/docs/modules/3_mapping/circle_fitting/circle_fitting_main.rst b/docs/modules/3_mapping/circle_fitting/circle_fitting_main.rst
index 1892d1f8f7..e243529a9c 100644
--- a/docs/modules/3_mapping/circle_fitting/circle_fitting_main.rst
+++ b/docs/modules/3_mapping/circle_fitting/circle_fitting_main.rst
@@ -11,3 +11,7 @@ The red crosses are observations from a ranging sensor.
The red circle is the estimated object shape using circle fitting.
+Code Link
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. autofunction:: Mapping.circle_fitting.circle_fitting.circle_fitting
diff --git a/docs/modules/3_mapping/distance_map/distance_map_main.rst b/docs/modules/3_mapping/distance_map/distance_map_main.rst
index 0ef9e3022f..ec60e752c9 100644
--- a/docs/modules/3_mapping/distance_map/distance_map_main.rst
+++ b/docs/modules/3_mapping/distance_map/distance_map_main.rst
@@ -14,8 +14,8 @@ The algorithm is demonstrated on a simple 2D grid with obstacles:
.. image:: distance_map.png
-API
-~~~
+Code Link
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autofunction:: Mapping.DistanceMap.distance_map.compute_sdf
diff --git a/docs/modules/3_mapping/gaussian_grid_map/gaussian_grid_map_main.rst b/docs/modules/3_mapping/gaussian_grid_map/gaussian_grid_map_main.rst
index b0f112a871..50033d2a20 100644
--- a/docs/modules/3_mapping/gaussian_grid_map/gaussian_grid_map_main.rst
+++ b/docs/modules/3_mapping/gaussian_grid_map/gaussian_grid_map_main.rst
@@ -6,3 +6,9 @@ Gaussian grid map
This is a 2D Gaussian grid mapping example.
.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/Mapping/gaussian_grid_map/animation.gif
+
+Code Link
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. autofunction:: Mapping.gaussian_grid_map.gaussian_grid_map.generate_gaussian_grid_map
+
diff --git a/docs/modules/3_mapping/k_means_object_clustering/k_means_object_clustering_main.rst b/docs/modules/3_mapping/k_means_object_clustering/k_means_object_clustering_main.rst
index e098ca5409..0ece604009 100644
--- a/docs/modules/3_mapping/k_means_object_clustering/k_means_object_clustering_main.rst
+++ b/docs/modules/3_mapping/k_means_object_clustering/k_means_object_clustering_main.rst
@@ -4,3 +4,9 @@ k-means object clustering
This is a 2D object clustering with k-means algorithm.
.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/Mapping/kmeans_clustering/animation.gif
+
+Code Link
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. autofunction:: Mapping.kmeans_clustering.kmeans_clustering.kmeans_clustering
+
diff --git a/docs/modules/3_mapping/lidar_to_grid_map_tutorial/lidar_to_grid_map_tutorial_main.rst b/docs/modules/3_mapping/lidar_to_grid_map_tutorial/lidar_to_grid_map_tutorial_main.rst
index 1f62179efd..c97d204a82 100644
--- a/docs/modules/3_mapping/lidar_to_grid_map_tutorial/lidar_to_grid_map_tutorial_main.rst
+++ b/docs/modules/3_mapping/lidar_to_grid_map_tutorial/lidar_to_grid_map_tutorial_main.rst
@@ -19,7 +19,7 @@ unknown (unobserved) areas, which are close to 0.5.
.. figure:: grid_map_example.png
In order to construct the grid map from the measurement we need to
-discretise the values. But, first let’s need to ``import`` some
+discretise the values. But, first we need to ``import`` some
necessary packages.
.. code:: ipython3
@@ -196,3 +196,9 @@ Let’s use this flood fill on real data:
.. image:: lidar_to_grid_map_tutorial_14_1.png
+Code Link
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. autofunction:: Mapping.lidar_to_grid_map.lidar_to_grid_map.main
+
+
diff --git a/docs/modules/3_mapping/normal_vector_estimation/normal_vector_estimation_main.rst b/docs/modules/3_mapping/normal_vector_estimation/normal_vector_estimation_main.rst
index a4d1bf0df2..68a19e1534 100644
--- a/docs/modules/3_mapping/normal_vector_estimation/normal_vector_estimation_main.rst
+++ b/docs/modules/3_mapping/normal_vector_estimation/normal_vector_estimation_main.rst
@@ -25,8 +25,8 @@ This is an example of normal vector calculation:
.. figure:: normal_vector_calc.png
-API
-=====
+Code Link
+==========
.. autofunction:: Mapping.normal_vector_estimation.normal_vector_estimation.calc_normal_vector
@@ -67,8 +67,8 @@ This is an example of RANSAC based normal vector estimation:
.. figure:: ransac_normal_vector_estimation.png
-API
-=====
+Code Link
+==========
.. autofunction:: Mapping.normal_vector_estimation.normal_vector_estimation.ransac_normal_vector_estimation
diff --git a/docs/modules/3_mapping/point_cloud_sampling/point_cloud_sampling_main.rst b/docs/modules/3_mapping/point_cloud_sampling/point_cloud_sampling_main.rst
index cbb5652f56..8cb08d4b78 100644
--- a/docs/modules/3_mapping/point_cloud_sampling/point_cloud_sampling_main.rst
+++ b/docs/modules/3_mapping/point_cloud_sampling/point_cloud_sampling_main.rst
@@ -27,8 +27,8 @@ This method determines which each point is in a grid, and replaces the point
clouds that are in the same Voxel with their average to reduce the number of
points.
-API
-=====
+Code Link
+==========
.. autofunction:: Mapping.point_cloud_sampling.point_cloud_sampling.voxel_point_sampling
@@ -61,8 +61,8 @@ Although this method does not have good performance comparing the Farthest
distance sample where each point is distributed farther from each other,
this is suitable for real-time processing because of its fast computation time.
-API
-=====
+Code Link
+==========
.. autofunction:: Mapping.point_cloud_sampling.point_cloud_sampling.poisson_disk_sampling
diff --git a/docs/modules/3_mapping/ray_casting_grid_map/ray_casting_grid_map_main.rst b/docs/modules/3_mapping/ray_casting_grid_map/ray_casting_grid_map_main.rst
index cc5a1a1c5b..bee2f64193 100644
--- a/docs/modules/3_mapping/ray_casting_grid_map/ray_casting_grid_map_main.rst
+++ b/docs/modules/3_mapping/ray_casting_grid_map/ray_casting_grid_map_main.rst
@@ -3,4 +3,9 @@ Ray casting grid map
This is a 2D ray casting grid mapping example.
-.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/Mapping/raycasting_grid_map/animation.gif
\ No newline at end of file
+.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/Mapping/raycasting_grid_map/animation.gif
+
+Code Link
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. autofunction:: Mapping.ray_casting_grid_map.ray_casting_grid_map.generate_ray_casting_grid_map
diff --git a/docs/modules/3_mapping/rectangle_fitting/rectangle_fitting_main.rst b/docs/modules/3_mapping/rectangle_fitting/rectangle_fitting_main.rst
index b6ced1dc1d..06d85efe61 100644
--- a/docs/modules/3_mapping/rectangle_fitting/rectangle_fitting_main.rst
+++ b/docs/modules/3_mapping/rectangle_fitting/rectangle_fitting_main.rst
@@ -57,8 +57,8 @@ This evaluation function uses the squreed distances between the edges of the rec
Calculating the squared error is the same as calculating the variance.
The smaller this variance, the more it signifies that the points fit within the rectangle.
-API
-~~~~~~
+Code Link
+~~~~~~~~~~~
.. autoclass:: Mapping.rectangle_fitting.rectangle_fitting.LShapeFitting
:members:
diff --git a/docs/modules/4_slam/FastSLAM1/FastSLAM1_main.rst b/docs/modules/4_slam/FastSLAM1/FastSLAM1_main.rst
index a2aa521216..296f1766de 100644
--- a/docs/modules/4_slam/FastSLAM1/FastSLAM1_main.rst
+++ b/docs/modules/4_slam/FastSLAM1/FastSLAM1_main.rst
@@ -22,6 +22,11 @@ The red points are particles of FastSLAM.
Black points are landmarks, blue crosses are estimated landmark
positions by FastSLAM.
+Code Link
+~~~~~~~~~~~~
+
+.. autofunction:: SLAM.FastSLAM1.fast_slam1.fast_slam1
+
Introduction
~~~~~~~~~~~~
@@ -107,7 +112,7 @@ will converge to the correct estimate.
MAX_RANGE = 20.0 # maximum observation range
M_DIST_TH = 2.0 # Threshold of Mahalanobis distance for data association.
STATE_SIZE = 3 # State size [x,y,yaw]
- LM_SIZE = 2 # LM srate size [x,y]
+ LM_SIZE = 2 # LM state size [x,y]
N_PARTICLE = 100 # number of particle
NTH = N_PARTICLE / 1.5 # Number of particle for re-sampling
@@ -404,7 +409,7 @@ probably will die out.
3- Resampling
~~~~~~~~~~~~~
-In the reseampling steps a new set of particles are chosen from the old
+In the resampling steps a new set of particles are chosen from the old
set. This is done according to the weight of each particle.
The figure shows 100 particles distributed uniformly between [-0.5, 0.5]
diff --git a/docs/modules/4_slam/FastSLAM2/FastSLAM2_main.rst b/docs/modules/4_slam/FastSLAM2/FastSLAM2_main.rst
index 9e79b496a3..59ed3b9f75 100644
--- a/docs/modules/4_slam/FastSLAM2/FastSLAM2_main.rst
+++ b/docs/modules/4_slam/FastSLAM2/FastSLAM2_main.rst
@@ -7,6 +7,12 @@ The animation has the same meanings as one of FastSLAM 1.0.
.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/SLAM/FastSLAM2/animation.gif
+Code Link
+~~~~~~~~~~~
+
+.. autofunction:: SLAM.FastSLAM2.fast_slam2.fast_slam2
+
+
References
~~~~~~~~~~
diff --git a/docs/modules/4_slam/ekf_slam/ekf_slam_main.rst b/docs/modules/4_slam/ekf_slam/ekf_slam_main.rst
index b27971225e..05bd636ef5 100644
--- a/docs/modules/4_slam/ekf_slam/ekf_slam_main.rst
+++ b/docs/modules/4_slam/ekf_slam/ekf_slam_main.rst
@@ -21,12 +21,18 @@ This is a simulation of EKF SLAM.
- Blue line: ground truth
- Red line: EKF SLAM position estimation
+Code Link
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: SLAM.EKFSLAM.ekf_slam.ekf_slam
+
+
Introduction
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
EKF SLAM models the SLAM problem in a single EKF where the modeled state
is both the pose :math:`(x, y, \theta)` and an array of landmarks
-:math:`[(x_1, y_1), (x_2, x_y), ... , (x_n, y_n)]` for :math:`n`
+:math:`[(x_1, y_1), (x_2, y_2), ... , (x_n, y_n)]` for :math:`n`
landmarks. The covariance between each of the positions and landmarks
are also tracked.
@@ -578,7 +584,7 @@ reckoning and control functions are passed along here as well.
.. image:: ekf_slam_1_0.png
-References:
+Reference
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- `PROBABILISTIC ROBOTICS `_
diff --git a/docs/modules/4_slam/graph_slam/graphSLAM_doc.rst b/docs/modules/4_slam/graph_slam/graphSLAM_doc.rst
index 5297604809..3d329ae131 100644
--- a/docs/modules/4_slam/graph_slam/graphSLAM_doc.rst
+++ b/docs/modules/4_slam/graph_slam/graphSLAM_doc.rst
@@ -28,7 +28,7 @@ available that uses graph-based approaches to perform online estimation
or to solve for a subset of the poses.
GraphSLAM uses the motion information as well as the observations of the
-environment to create least square problem that can be solved using
+environment to create a least squares problem that can be solved using
standard optimization techniques.
Minimal Example
@@ -331,7 +331,7 @@ between node 0 and 1 will be created. The equations for the error is as follows:
:math:`e_{ij}^y = y_j + d_j sin(\psi_j + \theta_j) - y_i - d_i sin(\psi_i + \theta_i)`
-:math:`e_{ij}^x = \psi_j + \theta_j - \psi_i - \theta_i`
+:math:`e_{ij}^{\psi} = \psi_j + \theta_j - \psi_i - \theta_i`
Where :math:`[x_i, y_i, \psi_i]` is the pose for node :math:`i` and
similarly for node :math:`j`, :math:`d` is the measured distance at
diff --git a/docs/modules/4_slam/graph_slam/graph_slam_main.rst b/docs/modules/4_slam/graph_slam/graph_slam_main.rst
index 987eed385c..2ef17e4179 100644
--- a/docs/modules/4_slam/graph_slam/graph_slam_main.rst
+++ b/docs/modules/4_slam/graph_slam/graph_slam_main.rst
@@ -13,11 +13,17 @@ The black stars are landmarks for graph edge generation.
.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/SLAM/GraphBasedSLAM/animation.gif
+Code Link
+~~~~~~~~~~~~
+
+.. autofunction:: SLAM.GraphBasedSLAM.graph_based_slam.graph_based_slam
+
+
.. include:: graphSLAM_doc.rst
.. include:: graphSLAM_formulation.rst
.. include:: graphSLAM_SE2_example.rst
-References:
+Reference
~~~~~~~~~~~
- `A Tutorial on Graph-Based SLAM `_
diff --git a/docs/modules/4_slam/iterative_closest_point_matching/iterative_closest_point_matching_main.rst b/docs/modules/4_slam/iterative_closest_point_matching/iterative_closest_point_matching_main.rst
index a30b1fc99b..772fe62889 100644
--- a/docs/modules/4_slam/iterative_closest_point_matching/iterative_closest_point_matching_main.rst
+++ b/docs/modules/4_slam/iterative_closest_point_matching/iterative_closest_point_matching_main.rst
@@ -10,7 +10,13 @@ points to points.
.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/SLAM/iterative_closest_point/animation.gif
+Code Link
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. autofunction:: SLAM.ICPMatching.icp_matching.icp_matching
+
+
References
-~~~~~~~~~~
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- `Introduction to Mobile Robotics: Iterative Closest Point Algorithm `_
diff --git a/docs/modules/5_path_planning/bezier_path/bezier_path_main.rst b/docs/modules/5_path_planning/bezier_path/bezier_path_main.rst
index d306a85352..19fb89a1b1 100644
--- a/docs/modules/5_path_planning/bezier_path/bezier_path_main.rst
+++ b/docs/modules/5_path_planning/bezier_path/bezier_path_main.rst
@@ -13,8 +13,15 @@ You can get different Beizer course:
.. image:: Figure_2.png
-Ref:
+Code Link
+~~~~~~~~~~~~~~~
+
+.. autofunction:: PathPlanning.BezierPath.bezier_path.calc_4points_bezier_path
+
+
+Reference
+~~~~~~~~~~~~~~~
- `Continuous Curvature Path Generation Based on Bezier Curves for
Autonomous
- Vehicles `
+ Vehicles `__
diff --git a/docs/modules/5_path_planning/bspline_path/bspline_path_main.rst b/docs/modules/5_path_planning/bspline_path/bspline_path_main.rst
index e734352e34..00e5ef2fdb 100644
--- a/docs/modules/5_path_planning/bspline_path/bspline_path_main.rst
+++ b/docs/modules/5_path_planning/bspline_path/bspline_path_main.rst
@@ -105,8 +105,8 @@ The default spline degree is 3, so curvature changes smoothly.
.. image:: interp_and_curvature.png
-API
-++++
+Code link
+++++++++++
.. autofunction:: PathPlanning.BSplinePath.bspline_path.interpolate_b_spline_path
@@ -133,8 +133,8 @@ The default spline degree is 3, so curvature changes smoothly.
.. image:: approx_and_curvature.png
-API
-++++
+Code Link
+++++++++++
.. autofunction:: PathPlanning.BSplinePath.bspline_path.approximate_b_spline_path
diff --git a/docs/modules/5_path_planning/bugplanner/bugplanner_main.rst b/docs/modules/5_path_planning/bugplanner/bugplanner_main.rst
index 81c91c0313..e1cd2fe353 100644
--- a/docs/modules/5_path_planning/bugplanner/bugplanner_main.rst
+++ b/docs/modules/5_path_planning/bugplanner/bugplanner_main.rst
@@ -5,4 +5,12 @@ This is a 2D planning with Bug algorithm.
.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/PathPlanning/BugPlanner/animation.gif
+Code Link
+~~~~~~~~~~~~~~~
+
+.. autofunction:: PathPlanning.BugPlanning.bug.main
+
+Reference
+~~~~~~~~~~~~
+
- `ECE452 Bug Algorithms `_
diff --git a/docs/modules/5_path_planning/catmull_rom_spline/catmull_rom_spline_main.rst b/docs/modules/5_path_planning/catmull_rom_spline/catmull_rom_spline_main.rst
index 72e558c486..fa2a2ff72b 100644
--- a/docs/modules/5_path_planning/catmull_rom_spline/catmull_rom_spline_main.rst
+++ b/docs/modules/5_path_planning/catmull_rom_spline/catmull_rom_spline_main.rst
@@ -88,8 +88,8 @@ Catmull-Rom Spline API
This section provides an overview of the functions used for Catmull-Rom spline path planning.
-API
-++++
+Code Link
+++++++++++
.. autofunction:: PathPlanning.Catmull_RomSplinePath.catmull_rom_spline_path.catmull_rom_point
diff --git a/docs/modules/5_path_planning/clothoid_path/clothoid_path_main.rst b/docs/modules/5_path_planning/clothoid_path/clothoid_path_main.rst
index 1e8cee694a..16d0ec03c1 100644
--- a/docs/modules/5_path_planning/clothoid_path/clothoid_path_main.rst
+++ b/docs/modules/5_path_planning/clothoid_path/clothoid_path_main.rst
@@ -73,6 +73,11 @@ The final clothoid path can be calculated with the path parameters and Fresnel i
&y(s)=y_{0}+\int_{0}^{s} \sin \left(\frac{1}{2} \kappa^{\prime} \tau^{2}+\kappa \tau+\vartheta_{0}\right) \mathrm{d} \tau
\end{aligned}
+Code Link
+~~~~~~~~~~~~~
+
+.. autofunction:: PathPlanning.ClothoidPath.clothoid_path_planner.generate_clothoid_path
+
References
~~~~~~~~~~
diff --git a/docs/modules/5_path_planning/coverage_path/coverage_path_main.rst b/docs/modules/5_path_planning/coverage_path/coverage_path_main.rst
index 0a688b5ed2..eaa876c80b 100644
--- a/docs/modules/5_path_planning/coverage_path/coverage_path_main.rst
+++ b/docs/modules/5_path_planning/coverage_path/coverage_path_main.rst
@@ -8,6 +8,11 @@ This is a 2D grid based sweep coverage path planner simulation:
.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/PathPlanning/GridBasedSweepCPP/animation.gif
+Code Link
++++++++++++++
+
+.. autofunction:: PathPlanning.GridBasedSweepCPP.grid_based_sweep_coverage_path_planner.planning
+
Spiral Spanning Tree
~~~~~~~~~~~~~~~~~~~~
@@ -17,6 +22,14 @@ This is a 2D grid based spiral spanning tree coverage path planner simulation:
.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/PathPlanning/SpiralSpanningTreeCPP/animation2.gif
.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/PathPlanning/SpiralSpanningTreeCPP/animation3.gif
+Code Link
++++++++++++++
+
+.. autofunction:: PathPlanning.SpiralSpanningTreeCPP.spiral_spanning_tree_coverage_path_planner.main
+
+Reference
++++++++++++++
+
- `Spiral-STC: An On-Line Coverage Algorithm of Grid Environments by a Mobile Robot `_
@@ -29,6 +42,14 @@ This is a 2D grid based wavefront coverage path planner simulation:
.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/PathPlanning/WavefrontCPP/animation2.gif
.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/PathPlanning/WavefrontCPP/animation3.gif
+Code Link
++++++++++++++
+
+.. autofunction:: PathPlanning.WavefrontCPP.wavefront_coverage_path_planner.wavefront
+
+Reference
++++++++++++++
+
- `Planning paths of complete coverage of an unstructured environment by a mobile robot `_
diff --git a/docs/modules/5_path_planning/cubic_spline/cubic_spline_main.rst b/docs/modules/5_path_planning/cubic_spline/cubic_spline_main.rst
index 33471f7c85..a110217a2e 100644
--- a/docs/modules/5_path_planning/cubic_spline/cubic_spline_main.rst
+++ b/docs/modules/5_path_planning/cubic_spline/cubic_spline_main.rst
@@ -171,8 +171,8 @@ the second derivative by:
These equations can be calculated by differentiating the cubic polynomial.
-API
-===
+Code Link
+==========
This is the 1D cubic spline class API:
@@ -199,8 +199,8 @@ Curvature of each point can be also calculated analytically by:
:math:`\kappa=\frac{y^{\prime \prime} x^{\prime}-x^{\prime \prime} y^{\prime}}{\left(x^{\prime2}+y^{\prime2}\right)^{\frac{2}{3}}}`
-API
-===
+Code Link
+==========
.. autoclass:: PathPlanning.CubicSpline.cubic_spline_planner.CubicSpline2D
:members:
diff --git a/docs/modules/5_path_planning/dubins_path/dubins_path_main.rst b/docs/modules/5_path_planning/dubins_path/dubins_path_main.rst
index 12172fb51e..74cb757886 100644
--- a/docs/modules/5_path_planning/dubins_path/dubins_path_main.rst
+++ b/docs/modules/5_path_planning/dubins_path/dubins_path_main.rst
@@ -13,7 +13,7 @@ It can generates a shortest path between two 2D poses (x, y, yaw) with maximum c
Generated paths consist of 3 segments of maximum curvature curves or a straight line segment.
-Each segment type can is categorized by 3 type: 'Right turn (R)' , 'Left turn (L)', and 'Straight (S).'
+Each segment type can be categorized by 3 types: 'Right turn (R)' , 'Left turn (L)', and 'Straight (S).'
Possible path will be at least one of these six types: RSR, RSL, LSR, LSL, RLR, LRL.
@@ -62,7 +62,7 @@ You can generate a path from these information and the maximum curvature informa
A path type which has minimum course length among 6 types is selected,
and then a path is constructed based on the selected type and its distances.
-API
+Code Link
~~~~~~~~~~~~~~~~~~~~
.. autofunction:: PathPlanning.DubinsPath.dubins_path_planner.plan_dubins_path
diff --git a/docs/modules/5_path_planning/dynamic_window_approach/dynamic_window_approach_main.rst b/docs/modules/5_path_planning/dynamic_window_approach/dynamic_window_approach_main.rst
index d645838597..ac5322df94 100644
--- a/docs/modules/5_path_planning/dynamic_window_approach/dynamic_window_approach_main.rst
+++ b/docs/modules/5_path_planning/dynamic_window_approach/dynamic_window_approach_main.rst
@@ -5,7 +5,17 @@ Dynamic Window Approach
This is a 2D navigation sample code with Dynamic Window Approach.
+.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/PathPlanning/DynamicWindowApproach/animation.gif
+
+Code Link
++++++++++++++
+
+.. autofunction:: PathPlanning.DynamicWindowApproach.dynamic_window_approach.dwa_control
+
+
+Reference
+~~~~~~~~~~~~
+
- `The Dynamic Window Approach to Collision
Avoidance `__
-.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/PathPlanning/DynamicWindowApproach/animation.gif
diff --git a/docs/modules/5_path_planning/elastic_bands/elastic_bands_main.rst b/docs/modules/5_path_planning/elastic_bands/elastic_bands_main.rst
index 8a3e517105..e37cbd4e77 100644
--- a/docs/modules/5_path_planning/elastic_bands/elastic_bands_main.rst
+++ b/docs/modules/5_path_planning/elastic_bands/elastic_bands_main.rst
@@ -5,6 +5,11 @@ This is a path planning with Elastic Bands.
.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/PathPlanning/ElasticBands/animation.gif
+Code Link
++++++++++++++
+
+.. autoclass:: PathPlanning.ElasticBands.elastic_bands.ElasticBands
+
Core Concept
~~~~~~~~~~~~
@@ -62,13 +67,13 @@ Dynamic Path Maintenance
.. math::
b_i^{\text{new}} = b_i^{\text{old}} + \alpha (f_c + f_r),
- where :math:`\alpha` is a step-size parameter, which often proportional to :math:`\rho(b_i^{\text{old}})`
+ where :math:`\alpha` is a step-size parameter, which is often proportional to :math:`\rho(b_i^{\text{old}})`
2. **Overlap Enforcement**:
- Insert new nodes if adjacent nodes are too far apart
- Remove redundant nodes if adjacent nodes are too close
References
-~~~~~~~~~~~~~~~~~~~~~~~
++++++++++++++
- `Elastic Bands: Connecting Path Planning and Control `__
diff --git a/docs/modules/5_path_planning/eta3_spline/eta3_spline_main.rst b/docs/modules/5_path_planning/eta3_spline/eta3_spline_main.rst
index ffc3cc6451..9ee343e8a7 100644
--- a/docs/modules/5_path_planning/eta3_spline/eta3_spline_main.rst
+++ b/docs/modules/5_path_planning/eta3_spline/eta3_spline_main.rst
@@ -7,7 +7,14 @@ Eta^3 Spline path planning
This is a path planning with Eta^3 spline.
-Ref:
+Code Link
+~~~~~~~~~~~~~~~
+
+.. autoclass:: PathPlanning.Eta3SplineTrajectory.eta3_spline_trajectory.Eta3SplineTrajectory
+
+
+Reference
+~~~~~~~~~~~~~~~
- `\\eta^3-Splines for the Smooth Path Generation of Wheeled Mobile
Robots `__
diff --git a/docs/modules/5_path_planning/frenet_frame_path/frenet_frame_path_main.rst b/docs/modules/5_path_planning/frenet_frame_path/frenet_frame_path_main.rst
index 38efaf2b53..0f84d381ea 100644
--- a/docs/modules/5_path_planning/frenet_frame_path/frenet_frame_path_main.rst
+++ b/docs/modules/5_path_planning/frenet_frame_path/frenet_frame_path_main.rst
@@ -7,6 +7,12 @@ The cyan line is the target course and black crosses are obstacles.
The red line is predicted path.
+Code Link
+~~~~~~~~~~~~~~
+
+.. autofunction:: PathPlanning.FrenetOptimalTrajectory.frenet_optimal_trajectory.main
+
+
High Speed and Velocity Keeping Scenario
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -35,7 +41,7 @@ Low Speed and Merging and Stopping Scenario
This scenario illustrates the trajectory planning at low speeds with merging and stopping behaviors.
-Ref:
+Reference
- `Optimal Trajectory Generation for Dynamic Street Scenarios in a
Frenet
diff --git a/docs/modules/5_path_planning/grid_base_search/grid_base_search_main.rst b/docs/modules/5_path_planning/grid_base_search/grid_base_search_main.rst
index 1644ed00cc..b7450107b6 100644
--- a/docs/modules/5_path_planning/grid_base_search/grid_base_search_main.rst
+++ b/docs/modules/5_path_planning/grid_base_search/grid_base_search_main.rst
@@ -10,6 +10,12 @@ This is a 2D grid based path planning with Breadth first search algorithm.
In the animation, cyan points are searched nodes.
+Code Link
++++++++++++++
+
+.. autofunction:: PathPlanning.BreadthFirstSearch.breadth_first_search.BreadthFirstSearchPlanner
+
+
Depth First Search
~~~~~~~~~~~~~~~~~~~~
@@ -19,6 +25,12 @@ This is a 2D grid based path planning with Depth first search algorithm.
In the animation, cyan points are searched nodes.
+Code Link
++++++++++++++
+
+.. autofunction:: PathPlanning.DepthFirstSearch.depth_first_search.DepthFirstSearchPlanner
+
+
.. _dijkstra:
Dijkstra algorithm
@@ -30,6 +42,12 @@ This is a 2D grid based shortest path planning with Dijkstra's algorithm.
In the animation, cyan points are searched nodes.
+Code Link
++++++++++++++
+
+.. autofunction:: PathPlanning.Dijkstra.dijkstra.DijkstraPlanner
+
+
.. _a*-algorithm:
A\* algorithm
@@ -43,6 +61,12 @@ In the animation, cyan points are searched nodes.
Its heuristic is 2D Euclid distance.
+Code Link
++++++++++++++
+
+.. autofunction:: PathPlanning.AStar.a_star.AStarPlanner
+
+
Bidirectional A\* algorithm
~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -52,6 +76,41 @@ This is a 2D grid based shortest path planning with bidirectional A star algorit
In the animation, cyan points are searched nodes.
+Code Link
++++++++++++++
+
+.. autofunction:: PathPlanning.BidirectionalAStar.bidirectional_a_star.BidirectionalAStarPlanner
+
+
+.. _Theta*-algorithm:
+
+Theta\* algorithm
+~~~~~~~~~~~~~~~~~
+
+This is a 2D grid based shortest path planning with Theta star algorithm.
+
+It offers an optimization over the A star algorithm to generate any-angle paths. Unlike A star, which always assigns the current node as the parent of the successor, Theta star checks for a line-of-sight (unblocked path) from an earlier node (typically the parent of the current node) to the successor, and directly assigns it as a parent if visible. This reduces cost by replacing staggered segments with straight lines.
+
+Checking for line of sight involves verifying that no obstacles block the straight line between two nodes. On a grid, this means identifying all the discrete cells that the line passes through to determine if they are empty. Bresenham’s line algorithm is commonly used for this purpose. Starting from one endpoint, it incrementally steps along one axis, while considering the gradient to determine the position on the other axis. Because it relies only on integer addition and subtraction, it is both efficient and precise for grid-based visibility checks in Theta star.
+
+As a result, Theta star produces shorter, smoother paths than A star, ideal for ground or aerial robots operating in continuous environments where smoother motion enables higher acceleration and reduced travel time.
+
+.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/PathPlanning/ThetaStar/animation.gif
+
+In the animation, each cyan arrow represents a node pointing to its parent.
+
+Reference
+++++++++++++
+
+- `Theta*: Any-Angle Path Planning on Grids `__
+- `Bresenham's line algorithm `__
+
+Code Link
++++++++++++++
+
+.. autofunction:: PathPlanning.ThetaStar.theta_star.ThetaStarPlanner
+
+
.. _D*-algorithm:
D\* algorithm
@@ -63,7 +122,14 @@ This is a 2D grid based shortest path planning with D star algorithm.
The animation shows a robot finding its path avoiding an obstacle using the D* search algorithm.
-Ref:
+Code Link
++++++++++++++
+
+.. autoclass:: PathPlanning.DStar.dstar.Dstar
+
+
+Reference
+++++++++++++
- `D* search Wikipedia `__
@@ -74,7 +140,13 @@ This is a 2D grid based path planning and replanning with D star lite algorithm.
.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/PathPlanning/DStarLite/animation.gif
-Ref:
+Code Link
++++++++++++++
+
+.. autoclass:: PathPlanning.DStarLite.d_star_lite.DStarLite
+
+Reference
+++++++++++++
- `Improved Fast Replanning for Robot Navigation in Unknown Terrain `_
@@ -88,7 +160,14 @@ This is a 2D grid based path planning with Potential Field algorithm.
In the animation, the blue heat map shows potential value on each grid.
-Ref:
+Code Link
++++++++++++++
+
+.. autofunction:: PathPlanning.PotentialFieldPlanning.potential_field_planning.potential_field_planning
+
+
+Reference
+++++++++++++
- `Robotic Motion Planning:Potential
Functions `__
diff --git a/docs/modules/5_path_planning/hybridastar/hybridastar_main.rst b/docs/modules/5_path_planning/hybridastar/hybridastar_main.rst
index a9876fa3d4..36f340e0c2 100644
--- a/docs/modules/5_path_planning/hybridastar/hybridastar_main.rst
+++ b/docs/modules/5_path_planning/hybridastar/hybridastar_main.rst
@@ -4,3 +4,8 @@ Hybrid a star
This is a simple vehicle model based hybrid A\* path planner.
.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/PathPlanning/HybridAStar/animation.gif
+
+Code Link
++++++++++++++
+
+.. autofunction:: PathPlanning.HybridAStar.hybrid_a_star.hybrid_a_star_planning
diff --git a/docs/modules/5_path_planning/lqr_path/lqr_path_main.rst b/docs/modules/5_path_planning/lqr_path/lqr_path_main.rst
index 788442ff63..1eb1e4d840 100644
--- a/docs/modules/5_path_planning/lqr_path/lqr_path_main.rst
+++ b/docs/modules/5_path_planning/lqr_path/lqr_path_main.rst
@@ -4,3 +4,8 @@ LQR based path planning
A sample code using LQR based path planning for double integrator model.
.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/PathPlanning/LQRPlanner/animation.gif?raw=true
+
+Code Link
++++++++++++++
+
+.. autoclass:: PathPlanning.LQRPlanner.lqr_planner.LQRPlanner
diff --git a/docs/modules/5_path_planning/model_predictive_trajectory_generator/model_predictive_trajectory_generator_main.rst b/docs/modules/5_path_planning/model_predictive_trajectory_generator/model_predictive_trajectory_generator_main.rst
index 463363ddcf..76472a6792 100644
--- a/docs/modules/5_path_planning/model_predictive_trajectory_generator/model_predictive_trajectory_generator_main.rst
+++ b/docs/modules/5_path_planning/model_predictive_trajectory_generator/model_predictive_trajectory_generator_main.rst
@@ -6,6 +6,12 @@ generator.
This algorithm is used for state lattice planner.
+Code Link
+~~~~~~~~~~~~~
+
+.. autofunction:: PathPlanning.ModelPredictiveTrajectoryGenerator.trajectory_generator.optimize_trajectory
+
+
Path optimization sample
~~~~~~~~~~~~~~~~~~~~~~~~
@@ -16,7 +22,8 @@ Lookup table generation sample
.. image:: lookup_table.png
-Ref:
+Reference
+~~~~~~~~~~~~
- `Optimal rough terrain trajectory generation for wheeled mobile
robots `__
diff --git a/docs/modules/5_path_planning/particleSwarmOptimization/particleSwarmOptimization_main.rst b/docs/modules/5_path_planning/particleSwarmOptimization/particleSwarmOptimization_main.rst
new file mode 100644
index 0000000000..b430faec1e
--- /dev/null
+++ b/docs/modules/5_path_planning/particleSwarmOptimization/particleSwarmOptimization_main.rst
@@ -0,0 +1,153 @@
+.. _particle_swarm_optimization:
+
+Particle Swarm Optimization Path Planning
+------------------------------------------
+
+This is a 2D path planning simulation using the Particle Swarm Optimization algorithm.
+
+PSO is a metaheuristic optimization algorithm inspired by the social behavior of bird flocking or fish schooling. In path planning, particles represent potential solutions that explore the search space to find collision-free paths from start to goal.
+
+.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/PathPlanning/ParticleSwarmOptimization/animation.gif
+
+Algorithm Overview
+++++++++++++++++++
+
+The PSO algorithm maintains a swarm of particles that move through the search space according to simple mathematical rules:
+
+1. **Initialization**: Particles are randomly distributed near the start position
+2. **Evaluation**: Each particle's fitness is calculated based on distance to goal and obstacle penalties
+3. **Update**: Particles adjust their velocities based on:
+ - Personal best position (cognitive component)
+ - Global best position (social component)
+ - Current velocity (inertia component)
+4. **Movement**: Particles move to new positions and check for collisions
+5. **Convergence**: Process repeats until maximum iterations or goal is reached
+
+Mathematical Foundation
++++++++++++++++++++++++
+
+The core PSO velocity update equation is:
+
+.. math::
+
+ v_{i}(t+1) = w \cdot v_{i}(t) + c_1 \cdot r_1 \cdot (p_{i} - x_{i}(t)) + c_2 \cdot r_2 \cdot (g - x_{i}(t))
+
+Where:
+- :math:`v_{i}(t)` = velocity of particle i at time t
+- :math:`x_{i}(t)` = position of particle i at time t
+- :math:`w` = inertia weight (controls exploration vs exploitation)
+- :math:`c_1` = cognitive coefficient (attraction to personal best)
+- :math:`c_2` = social coefficient (attraction to global best)
+- :math:`r_1, r_2` = random numbers in [0,1]
+- :math:`p_{i}` = personal best position of particle i
+- :math:`g` = global best position
+
+Position update:
+
+.. math::
+
+ x_{i}(t+1) = x_{i}(t) + v_{i}(t+1)
+
+Fitness Function
+++++++++++++++++
+
+The fitness function combines distance to target with obstacle penalties:
+
+.. math::
+
+ f(x) = ||x - x_{goal}|| + \sum_{j} P_{obs}(x, O_j)
+
+Where:
+- :math:`||x - x_{goal}||` = Euclidean distance to goal
+- :math:`P_{obs}(x, O_j)` = penalty for obstacle j
+- :math:`O_j` = obstacle j with position and radius
+
+The obstacle penalty function is defined as:
+
+.. math::
+
+ P_{obs}(x, O_j) = \begin{cases}
+ 1000 & \text{if } ||x - O_j|| < r_j \text{ (inside obstacle)} \\
+ \frac{50}{||x - O_j|| - r_j + 0.1} & \text{if } r_j \leq ||x - O_j|| < r_j + R_{influence} \text{ (near obstacle)} \\
+ 0 & \text{if } ||x - O_j|| \geq r_j + R_{influence} \text{ (safe distance)}
+ \end{cases}
+
+Where:
+- :math:`r_j` = radius of obstacle j
+- :math:`R_{influence}` = influence radius (typically 5 units)
+
+Collision Detection
++++++++++++++++++++
+
+Line-circle intersection is used to detect collisions between particle paths and circular obstacles:
+
+.. math::
+
+ ||P_0 + t \cdot \vec{d} - C|| = r
+
+Where:
+- :math:`P_0` = start point of path segment
+- :math:`\vec{d}` = direction vector of path
+- :math:`C` = obstacle center
+- :math:`r` = obstacle radius
+- :math:`t \in [0,1]` = parameter along line segment
+
+Algorithm Parameters
+++++++++++++++++++++
+
+Key parameters affecting performance:
+
+- **Number of particles** (n_particles): More particles = better exploration but slower
+- **Maximum iterations** (max_iter): More iterations = better convergence but slower
+- **Inertia weight** (w): High = exploration, Low = exploitation
+- **Cognitive coefficient** (c1): Attraction to personal best
+- **Social coefficient** (c2): Attraction to global best
+
+Typical values:
+- n_particles: 20-50
+- max_iter: 100-300
+- w: 0.9 → 0.4 (linearly decreasing)
+- c1, c2: 1.5-2.0
+
+Advantages
+++++++++++
+
+- **Global optimization**: Can escape local minima unlike gradient-based methods
+- **No derivatives needed**: Works with non-differentiable fitness landscapes
+- **Parallel exploration**: Multiple particles search simultaneously
+- **Simple implementation**: Few parameters and straightforward logic
+- **Flexible**: Easily adaptable to different environments and constraints
+
+Disadvantages
++++++++++++++
+
+- **Stochastic**: Results may vary between runs
+- **Parameter sensitive**: Performance heavily depends on parameter tuning
+- **No optimality guarantee**: Metaheuristic without convergence proof
+- **Computational cost**: Requires many fitness evaluations
+- **Prone to stagnation**: Premature convergence where the entire swarm can get trapped in a local minimum if exploration is insufficient
+
+Code Link
++++++++++
+
+.. autofunction:: PathPlanning.ParticleSwarmOptimization.particle_swarm_optimization.main
+
+Usage Example
++++++++++++++
+
+.. code-block:: python
+
+ import matplotlib.pyplot as plt
+ from PathPlanning.ParticleSwarmOptimization.particle_swarm_optimization import main
+
+ # Run PSO path planning with visualization
+ main()
+
+References
+++++++++++
+
+- `Particle swarm optimization - Wikipedia `__
+- Kennedy, J.; Eberhart, R. (1995). "Particle Swarm Optimization". Proceedings of IEEE International Conference on Neural Networks. IV. pp. 1942–1948.
+- Shi, Y.; Eberhart, R. (1998). "A Modified Particle Swarm Optimizer". IEEE International Conference on Evolutionary Computation.
+- `A Gentle Introduction to Particle Swarm Optimization `__
+- Clerc, M.; Kennedy, J. (2002). "The particle swarm - explosion, stability, and convergence in a multidimensional complex space". IEEE Transactions on Evolutionary Computation. 6 (1): 58–73.
\ No newline at end of file
diff --git a/docs/modules/5_path_planning/path_planning_main.rst b/docs/modules/5_path_planning/path_planning_main.rst
index 0c84a19c22..5132760dc5 100644
--- a/docs/modules/5_path_planning/path_planning_main.rst
+++ b/docs/modules/5_path_planning/path_planning_main.rst
@@ -19,6 +19,7 @@ Path planning is the ability of a robot to search feasible and efficient path to
visibility_road_map_planner/visibility_road_map_planner
vrm_planner/vrm_planner
rrt/rrt
+ particleSwarmOptimization/particleSwarmOptimization
cubic_spline/cubic_spline
bspline_path/bspline_path
catmull_rom_spline/catmull_rom_spline
diff --git a/docs/modules/5_path_planning/prm_planner/prm_planner_main.rst b/docs/modules/5_path_planning/prm_planner/prm_planner_main.rst
index 0628719176..d58d1e2633 100644
--- a/docs/modules/5_path_planning/prm_planner/prm_planner_main.rst
+++ b/docs/modules/5_path_planning/prm_planner/prm_planner_main.rst
@@ -13,7 +13,14 @@ Cyan crosses means searched points with Dijkstra method,
The red line is the final path of PRM.
-Ref:
+Code Link
+~~~~~~~~~~~~~~~
+
+.. autofunction:: PathPlanning.ProbabilisticRoadMap.probabilistic_road_map.prm_planning
+
+
+Reference
+~~~~~~~~~~~
- `Probabilistic roadmap -
Wikipedia `__
diff --git a/docs/modules/5_path_planning/quintic_polynomials_planner/quintic_polynomials_planner_main.rst b/docs/modules/5_path_planning/quintic_polynomials_planner/quintic_polynomials_planner_main.rst
index 0412b3c9b3..c7bc3fb55c 100644
--- a/docs/modules/5_path_planning/quintic_polynomials_planner/quintic_polynomials_planner_main.rst
+++ b/docs/modules/5_path_planning/quintic_polynomials_planner/quintic_polynomials_planner_main.rst
@@ -9,6 +9,11 @@ Motion planning with quintic polynomials.
It can calculate 2D path, velocity, and acceleration profile based on
quintic polynomials.
+Code Link
+~~~~~~~~~~~~~~~
+
+.. autofunction:: PathPlanning.QuinticPolynomialsPlanner.quintic_polynomials_planner.quintic_polynomials_planner
+
Quintic polynomials for one dimensional robot motion
@@ -97,7 +102,7 @@ Each velocity and acceleration boundary condition can be calculated with each or
:math:`v_{xe}=v_ecos(\theta_e), v_{ye}=v_esin(\theta_e)`
-References:
+Reference
~~~~~~~~~~~
- `Local Path Planning And Motion Control For Agv In
diff --git a/docs/modules/5_path_planning/reeds_shepp_path/reeds_shepp_path_main.rst b/docs/modules/5_path_planning/reeds_shepp_path/reeds_shepp_path_main.rst
index ff377eb91b..bedcfc33d8 100644
--- a/docs/modules/5_path_planning/reeds_shepp_path/reeds_shepp_path_main.rst
+++ b/docs/modules/5_path_planning/reeds_shepp_path/reeds_shepp_path_main.rst
@@ -5,13 +5,19 @@ A sample code with Reeds Shepp path planning.
.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/PathPlanning/ReedsSheppPath/animation.gif?raw=true
+Code Link
+==============
+
+.. autofunction:: PathPlanning.ReedsSheppPath.reeds_shepp_path_planning.reeds_shepp_path_planning
+
+
Mathematical Description of Individual Path Types
=================================================
Here is an overview of mathematical derivations of formulae for individual path types.
In all the derivations below, radius of curvature of the vehicle is assumed to be of unit length and start pose is considered to be at origin. (*In code we are removing the offset due to start position and normalising the lengths before passing the values to these functions.*)
-Also, (t, u, v) respresent the measure of each motion requried. Thus, in case of a turning maneuver, they represent the angle inscribed at the centre of turning circle and in case of straight maneuver, they represent the distance to be travelled.
+Also, (t, u, v) represent the measure of each motion required. Thus, in case of a turning maneuver, they represent the angle inscribed at the centre of turning circle and in case of straight maneuver, they represent the distance to be travelled.
1. **Left-Straight-Left**
@@ -380,7 +386,8 @@ Hence, we have:
- :math:`v = (t - φ)`
-Ref:
+Reference
+=============
- `15.3.2 Reeds-Shepp
Curves `__
diff --git a/docs/modules/5_path_planning/closed_loop_rrt_star_car/Figure_1.png b/docs/modules/5_path_planning/rrt/closed_loop_rrt_star_car/Figure_1.png
similarity index 100%
rename from docs/modules/5_path_planning/closed_loop_rrt_star_car/Figure_1.png
rename to docs/modules/5_path_planning/rrt/closed_loop_rrt_star_car/Figure_1.png
diff --git a/docs/modules/5_path_planning/closed_loop_rrt_star_car/Figure_3.png b/docs/modules/5_path_planning/rrt/closed_loop_rrt_star_car/Figure_3.png
similarity index 100%
rename from docs/modules/5_path_planning/closed_loop_rrt_star_car/Figure_3.png
rename to docs/modules/5_path_planning/rrt/closed_loop_rrt_star_car/Figure_3.png
diff --git a/docs/modules/5_path_planning/closed_loop_rrt_star_car/Figure_4.png b/docs/modules/5_path_planning/rrt/closed_loop_rrt_star_car/Figure_4.png
similarity index 100%
rename from docs/modules/5_path_planning/closed_loop_rrt_star_car/Figure_4.png
rename to docs/modules/5_path_planning/rrt/closed_loop_rrt_star_car/Figure_4.png
diff --git a/docs/modules/5_path_planning/closed_loop_rrt_star_car/Figure_5.png b/docs/modules/5_path_planning/rrt/closed_loop_rrt_star_car/Figure_5.png
similarity index 100%
rename from docs/modules/5_path_planning/closed_loop_rrt_star_car/Figure_5.png
rename to docs/modules/5_path_planning/rrt/closed_loop_rrt_star_car/Figure_5.png
diff --git a/docs/modules/5_path_planning/rrt/rrt_main.rst b/docs/modules/5_path_planning/rrt/rrt_main.rst
index e5f351a7ba..1bd5497f4c 100644
--- a/docs/modules/5_path_planning/rrt/rrt_main.rst
+++ b/docs/modules/5_path_planning/rrt/rrt_main.rst
@@ -14,6 +14,12 @@ This is a simple path planning code with Rapidly-Exploring Random Trees
Black circles are obstacles, green line is a searched tree, red crosses
are start and goal positions.
+Code Link
+^^^^^^^^^^
+
+.. autoclass:: PathPlanning.RRT.rrt.RRT
+
+
.. include:: rrt_star.rst
@@ -24,6 +30,12 @@ RRT with dubins path
Path planning for a car robot with RRT and dubins path planner.
+Code Link
+^^^^^^^^^^
+
+.. autoclass:: PathPlanning.RRTDubins.rrt_dubins.RRTDubins
+
+
.. _rrt*-with-dubins-path:
RRT\* with dubins path
@@ -33,6 +45,12 @@ RRT\* with dubins path
Path planning for a car robot with RRT\* and dubins path planner.
+Code Link
+^^^^^^^^^^
+
+.. autoclass:: PathPlanning.RRTStarDubins.rrt_star_dubins.RRTStarDubins
+
+
.. _rrt*-with-reeds-sheep-path:
RRT\* with reeds-sheep path
@@ -42,6 +60,12 @@ RRT\* with reeds-sheep path
Path planning for a car robot with RRT\* and reeds sheep path planner.
+Code Link
+^^^^^^^^^^
+
+.. autoclass:: PathPlanning.RRTStarReedsShepp.rrt_star_reeds_shepp.RRTStarReedsShepp
+
+
.. _informed-rrt*:
Informed RRT\*
@@ -53,7 +77,14 @@ This is a path planning code with Informed RRT*.
The cyan ellipse is the heuristic sampling domain of Informed RRT*.
-Ref:
+Code Link
+^^^^^^^^^^
+
+.. autoclass:: PathPlanning.InformedRRTStar.informed_rrt_star.InformedRRTStar
+
+
+Reference
+^^^^^^^^^^
- `Informed RRT\*: Optimal Sampling-based Path Planning Focused via
Direct Sampling of an Admissible Ellipsoidal
@@ -68,12 +99,20 @@ Batch Informed RRT\*
This is a path planning code with Batch Informed RRT*.
-Ref:
+Code Link
+^^^^^^^^^^
+
+.. autoclass:: PathPlanning.BatchInformedRRTStar.batch_informed_rrt_star.BITStar
+
+
+Reference
+^^^^^^^^^^^
- `Batch Informed Trees (BIT*): Sampling-based Optimal Planning via the
Heuristically Guided Search of Implicit Random Geometric
Graphs `__
+
.. _closed-loop-rrt*:
Closed Loop RRT\*
@@ -87,7 +126,14 @@ In this code, pure-pursuit algorithm is used for steering control,
PID is used for speed control.
-Ref:
+Code Link
+^^^^^^^^^^
+
+.. autoclass:: PathPlanning.ClosedLoopRRTStar.closed_loop_rrt_star_car.ClosedLoopRRTStar
+
+
+Reference
+^^^^^^^^^^^^
- `Motion Planning in Complex Environments using Closed-loop
Prediction `__
@@ -98,6 +144,7 @@ Ref:
- `[1601.06326] Sampling-based Algorithms for Optimal Motion Planning
Using Closed-loop Prediction `__
+
.. _lqr-rrt*:
LQR-RRT\*
@@ -109,7 +156,14 @@ A double integrator motion model is used for LQR local planner.
.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/PathPlanning/LQRRRTStar/animation.gif
-Ref:
+Code Link
+^^^^^^^^^^
+
+.. autoclass:: PathPlanning.LQRRRTStar.lqr_rrt_star.LQRRRTStar
+
+
+Reference
+~~~~~~~~~~~~~
- `LQR-RRT\*: Optimal Sampling-Based Motion Planning with Automatically
Derived Extension
diff --git a/docs/modules/5_path_planning/rrt/rrt_star.rst b/docs/modules/5_path_planning/rrt/rrt_star.rst
index 6deb6b9172..960b9976d5 100644
--- a/docs/modules/5_path_planning/rrt/rrt_star.rst
+++ b/docs/modules/5_path_planning/rrt/rrt_star.rst
@@ -7,6 +7,12 @@ This is a path planning code with RRT\*
Black circles are obstacles, green line is a searched tree, red crosses are start and goal positions.
+Code Link
+^^^^^^^^^^
+
+.. autoclass:: PathPlanning.RRTStar.rrt_star.RRTStar
+
+
Simulation
^^^^^^^^^^
diff --git a/docs/modules/5_path_planning/state_lattice_planner/state_lattice_planner_main.rst b/docs/modules/5_path_planning/state_lattice_planner/state_lattice_planner_main.rst
index d5e7ed17d1..9f8cc0c50f 100644
--- a/docs/modules/5_path_planning/state_lattice_planner/state_lattice_planner_main.rst
+++ b/docs/modules/5_path_planning/state_lattice_planner/state_lattice_planner_main.rst
@@ -12,17 +12,34 @@ Uniform polar sampling
.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/PathPlanning/StateLatticePlanner/UniformPolarSampling.gif
+Code Link
+^^^^^^^^^^^^^
+
+.. autofunction:: PathPlanning.StateLatticePlanner.state_lattice_planner.calc_uniform_polar_states
+
+
Biased polar sampling
~~~~~~~~~~~~~~~~~~~~~
.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/PathPlanning/StateLatticePlanner/BiasedPolarSampling.gif
+Code Link
+^^^^^^^^^^^^^
+.. autofunction:: PathPlanning.StateLatticePlanner.state_lattice_planner.calc_biased_polar_states
+
+
Lane sampling
~~~~~~~~~~~~~
.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/PathPlanning/StateLatticePlanner/LaneSampling.gif
-Ref:
+Code Link
+^^^^^^^^^^^^^
+.. autofunction:: PathPlanning.StateLatticePlanner.state_lattice_planner.calc_lane_states
+
+
+Reference
+~~~~~~~~~~~~~~~
- `Optimal rough terrain trajectory generation for wheeled mobile
robots `__
diff --git a/docs/modules/5_path_planning/time_based_grid_search/time_based_grid_search_main.rst b/docs/modules/5_path_planning/time_based_grid_search/time_based_grid_search_main.rst
index 0c26badec7..a44dd20a97 100644
--- a/docs/modules/5_path_planning/time_based_grid_search/time_based_grid_search_main.rst
+++ b/docs/modules/5_path_planning/time_based_grid_search/time_based_grid_search_main.rst
@@ -16,7 +16,93 @@ Using a time-based cost and heuristic ensures the path found is optimal in terms
The cost is the amount of time it takes to reach a given node, and the heuristic is the minimum amount of time it could take to reach the goal from that node, disregarding all obstacles.
For a simple scenario where the robot can move 1 cell per time step and stop and go as it pleases, the heuristic for time is equivalent to the heuristic for distance.
-References:
+One optimization that was added in `this PR `__ was to add an expanded set to the algorithm. The algorithm will not expand nodes that are already in that set. This greatly reduces the number of node expansions needed to find a path, since no duplicates are expanded. It also helps to reduce the amount of memory the algorithm uses.
+
+Before::
+
+ Found path to goal after 204490 expansions
+ Planning took: 1.72464 seconds
+ Memory usage (RSS): 68.19 MB
+
+
+After::
+
+ Found path to goal after 2348 expansions
+ Planning took: 0.01550 seconds
+ Memory usage (RSS): 64.85 MB
+
+When starting at (1, 11) in the structured obstacle arrangement (second of the two gifs above).
+
+Code Link
+^^^^^^^^^^^^^
+.. autoclass:: PathPlanning.TimeBasedPathPlanning.SpaceTimeAStar.SpaceTimeAStar
+
+
+Safe Interval Path Planning
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The safe interval path planning algorithm is described in this paper:
+
+`SIPP: Safe Interval Path Planning for Dynamic Environments `__
+
+It is faster than space-time A* because it pre-computes the intervals of time that are unoccupied in each cell. This allows it to reduce the number of successor node it generates by avoiding nodes within the same interval.
+
+**Comparison with SpaceTime A*:**
+
+Arrangement 1 starting at (1, 18)
+
+SafeInterval planner::
+
+ Found path to goal after 322 expansions
+ Planning took: 0.00730 seconds
+
+SpaceTime A*::
+
+ Found path to goal after 2717154 expansions
+ Planning took: 20.51330 seconds
+
+**Benchmarking the Safe Interval Path Planner:**
+
+250 random obstacles::
+
+ Found path to goal after 764 expansions
+ Planning took: 0.60596 seconds
+
+.. image:: https://raw.githubusercontent.com/AtsushiSakai/PythonRoboticsGifs/refs/heads/master/PathPlanning/TimeBasedPathPlanning/SafeIntervalPathPlanner/path_animation.gif
+
+Arrangement 1 starting at (1, 18)::
+
+ Found path to goal after 322 expansions
+ Planning took: 0.00730 seconds
+
+.. image:: https://raw.githubusercontent.com/AtsushiSakai/PythonRoboticsGifs/refs/heads/master/PathPlanning/TimeBasedPathPlanning/SafeIntervalPathPlanner/path_animation2.gif
+
+Code Link
+^^^^^^^^^^^^^
+.. autoclass:: PathPlanning.TimeBasedPathPlanning.SafeInterval.SafeIntervalPathPlanner
+
+Multi-Agent Path Planning
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Priority Based Planning
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The planner generates an order to plan in, and generates plans for the robots in that order. Each planned path is reserved in the grid, and all future plans must avoid that path. The robots are planned for in descending order of distance from start to goal.
+
+This is a sub-optimal algorithm because no replanning happens once paths are found. It does not guarantee the shortest path is found for any particular robot, nor that the final set of paths found contains the lowest possible amount of time or movement.
+
+Static Obstacles:
+
+.. image:: https://raw.githubusercontent.com/AtsushiSakai/PythonRoboticsGifs/refs/heads/master/PathPlanning/TimeBasedPathPlanning/PriorityBasedPlanner/priority_planner2.gif
+
+Dynamic Obstacles:
+
+.. image:: https://raw.githubusercontent.com/AtsushiSakai/PythonRoboticsGifs/refs/heads/master/PathPlanning/TimeBasedPathPlanning/PriorityBasedPlanner/priority_planner.gif
+
+
+References
~~~~~~~~~~~
- `Cooperative Pathfinding `__
+- `SIPP: Safe Interval Path Planning for Dynamic Environments `__
+- `Prioritized Planning Algorithms for Trajectory Coordination of Multiple Mobile Robots `__
\ No newline at end of file
diff --git a/docs/modules/5_path_planning/visibility_road_map_planner/visibility_road_map_planner_main.rst b/docs/modules/5_path_planning/visibility_road_map_planner/visibility_road_map_planner_main.rst
index 3c9b7c008c..aac96d6e19 100644
--- a/docs/modules/5_path_planning/visibility_road_map_planner/visibility_road_map_planner_main.rst
+++ b/docs/modules/5_path_planning/visibility_road_map_planner/visibility_road_map_planner_main.rst
@@ -13,6 +13,11 @@ red crosses are visibility nodes, and blue lines area collision free visibility
The red line is the final path searched by dijkstra algorithm frm the visibility graphs.
+Code Link
+~~~~~~~~~~~~
+.. autoclass:: PathPlanning.VisibilityRoadMap.visibility_road_map.VisibilityRoadMap
+
+
Algorithms
~~~~~~~~~~
@@ -64,7 +69,7 @@ The red line is searched path in the figure:
You can find the details of Dijkstra algorithm in :ref:`dijkstra`.
References
-^^^^^^^^^^
+~~~~~~~~~~~~
- `Visibility graph - Wikipedia `_
diff --git a/docs/modules/5_path_planning/vrm_planner/vrm_planner_main.rst b/docs/modules/5_path_planning/vrm_planner/vrm_planner_main.rst
index 92e729ab29..46b86123f3 100644
--- a/docs/modules/5_path_planning/vrm_planner/vrm_planner_main.rst
+++ b/docs/modules/5_path_planning/vrm_planner/vrm_planner_main.rst
@@ -9,9 +9,15 @@ In the animation, blue points are Voronoi points,
Cyan crosses mean searched points with Dijkstra method,
-The red line is the final path of Vornoi Road-Map.
+The red line is the final path of Voronoi Road-Map.
-Ref:
+Code Link
+~~~~~~~~~~~~~~~
+.. autoclass:: PathPlanning.VoronoiRoadMap.voronoi_road_map.VoronoiRoadMapPlanner
+
+
+Reference
+~~~~~~~~~~~~
- `Robotic Motion Planning `__
diff --git a/docs/modules/6_path_tracking/cgmres_nmpc/cgmres_nmpc_main.rst b/docs/modules/6_path_tracking/cgmres_nmpc/cgmres_nmpc_main.rst
index 23f1218a94..914a4555ff 100644
--- a/docs/modules/6_path_tracking/cgmres_nmpc/cgmres_nmpc_main.rst
+++ b/docs/modules/6_path_tracking/cgmres_nmpc/cgmres_nmpc_main.rst
@@ -17,6 +17,11 @@ Nonlinear Model Predictive Control with C-GMRES
.. figure:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/PathTracking/cgmres_nmpc/animation.gif
:alt: gif
+Code Link
+~~~~~~~~~~~~
+
+.. autofunction:: PathTracking.cgmres_nmpc.cgmres_nmpc.NMPCControllerCGMRES
+
Mathematical Formulation
~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/docs/modules/6_path_tracking/lqr_speed_and_steering_control/lqr_speed_and_steering_control_main.rst b/docs/modules/6_path_tracking/lqr_speed_and_steering_control/lqr_speed_and_steering_control_main.rst
index ded187e972..b0ba9e0d45 100644
--- a/docs/modules/6_path_tracking/lqr_speed_and_steering_control/lqr_speed_and_steering_control_main.rst
+++ b/docs/modules/6_path_tracking/lqr_speed_and_steering_control/lqr_speed_and_steering_control_main.rst
@@ -7,7 +7,12 @@ Path tracking simulation with LQR speed and steering control.
.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/PathTracking/lqr_speed_steer_control/animation.gif
-`[Code Link] `_
+
+Code Link
+~~~~~~~~~~~~~~~
+
+.. autofunction:: PathTracking.lqr_speed_steer_control.lqr_speed_steer_control.lqr_speed_steering_control
+
Overview
~~~~~~~~
@@ -134,7 +139,7 @@ Simulation results
-References:
+Reference
~~~~~~~~~~~
- `Towards fully autonomous driving: Systems and algorithms `__
diff --git a/docs/modules/6_path_tracking/lqr_steering_control/lqr_steering_control_main.rst b/docs/modules/6_path_tracking/lqr_steering_control/lqr_steering_control_main.rst
index baca7a33fc..fc8f2a33aa 100644
--- a/docs/modules/6_path_tracking/lqr_steering_control/lqr_steering_control_main.rst
+++ b/docs/modules/6_path_tracking/lqr_steering_control/lqr_steering_control_main.rst
@@ -8,7 +8,11 @@ control.
.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/PathTracking/lqr_steer_control/animation.gif
-`[Code Link] `_
+Code Link
+~~~~~~~~~~~~~~~
+
+.. autofunction:: PathTracking.lqr_steer_control.lqr_steer_control.lqr_steering_control
+
Overview
~~~~~~~~
@@ -113,7 +117,7 @@ The optimal control input `u` can be calculated as:
where `K` is the feedback gain matrix obtained by solving the Riccati equation.
-References:
+Reference
~~~~~~~~~~~
- `ApolloAuto/apollo: An open autonomous driving platform `_
diff --git a/docs/modules/6_path_tracking/model_predictive_speed_and_steering_control/model_predictive_speed_and_steering_control_main.rst b/docs/modules/6_path_tracking/model_predictive_speed_and_steering_control/model_predictive_speed_and_steering_control_main.rst
index de55b545ba..cc5a6812ca 100644
--- a/docs/modules/6_path_tracking/model_predictive_speed_and_steering_control/model_predictive_speed_and_steering_control_main.rst
+++ b/docs/modules/6_path_tracking/model_predictive_speed_and_steering_control/model_predictive_speed_and_steering_control_main.rst
@@ -5,13 +5,6 @@ Model predictive speed and steering control
.. figure:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/PathTracking/model_predictive_speed_and_steer_control/animation.gif?raw=true
:alt: Model predictive speed and steering control
- Model predictive speed and steering control
-
-code:
-
-`PythonRobotics/model_predictive_speed_and_steer_control.py at master ·
-AtsushiSakai/PythonRobotics `__
-
This is a path tracking simulation using model predictive control (MPC).
The MPC controller controls vehicle speed and steering base on
@@ -22,6 +15,12 @@ This code uses cvxpy as an optimization modeling tool.
- `Welcome to CVXPY 1.0 — CVXPY 1.0.6
documentation `__
+Code Link
+~~~~~~~~~~~~~~~
+
+.. autofunction:: PathTracking.model_predictive_speed_and_steer_control.model_predictive_speed_and_steer_control.iterative_linear_mpc_control
+
+
MPC modeling
~~~~~~~~~~~~
@@ -35,17 +34,17 @@ Input vector is:
.. math:: u = [a, \delta]
-a: accellation, δ: steering angle
+a: acceleration, δ: steering angle
-The MPC cotroller minimize this cost function for path tracking:
+The MPC cotroller minimizes this cost function for path tracking:
.. math:: min\ Q_f(z_{T,ref}-z_{T})^2+Q\Sigma({z_{t,ref}-z_{t}})^2+R\Sigma{u_t}^2+R_d\Sigma({u_{t+1}-u_{t}})^2
-z_ref come from target path and speed.
+z_ref comes from target path and speed.
subject to:
-- Linearlied vehicle model
+- Linearized vehicle model
.. math:: z_{t+1}=Az_t+Bu+C
diff --git a/docs/modules/6_path_tracking/move_to_a_pose_control/move_to_a_pose_control_main.rst b/docs/modules/6_path_tracking/move_to_a_pose/move_to_a_pose_main.rst
similarity index 96%
rename from docs/modules/6_path_tracking/move_to_a_pose_control/move_to_a_pose_control_main.rst
rename to docs/modules/6_path_tracking/move_to_a_pose/move_to_a_pose_main.rst
index 77ec682a30..b3d23ab2be 100644
--- a/docs/modules/6_path_tracking/move_to_a_pose_control/move_to_a_pose_control_main.rst
+++ b/docs/modules/6_path_tracking/move_to_a_pose/move_to_a_pose_main.rst
@@ -3,7 +3,13 @@ Move to a Pose Control
In this section, we present the logic of PathFinderController that drives a car from a start pose (x, y, theta) to a goal pose. A simulation of moving to a pose control is presented below.
-.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/PathTracking/move_to_pose/animation.gif
+.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/Control/move_to_pose/animation.gif
+
+Code Link
+~~~~~~~~~~~~~~~
+
+.. autofunction:: PathTracking.move_to_pose.move_to_pose.move_to_pose
+
Position Control of non-Holonomic Systems
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -26,7 +32,7 @@ Constructor
PathFinderController(Kp_rho, Kp_alpha, Kp_beta)
-Constructs an instantiate of the PathFinderController for navigating a 3-DOF wheeled robot on a 2D plane.
+Constructs an instance of the PathFinderController for navigating a 3-DOF wheeled robot on a 2D plane.
Parameters:
diff --git a/docs/modules/6_path_tracking/path_tracking_main.rst b/docs/modules/6_path_tracking/path_tracking_main.rst
index d98e324583..742cc807e6 100644
--- a/docs/modules/6_path_tracking/path_tracking_main.rst
+++ b/docs/modules/6_path_tracking/path_tracking_main.rst
@@ -16,4 +16,4 @@ Path tracking is the ability of a robot to follow the reference path generated b
lqr_speed_and_steering_control/lqr_speed_and_steering_control
model_predictive_speed_and_steering_control/model_predictive_speed_and_steering_control
cgmres_nmpc/cgmres_nmpc
- move_to_a_pose_control/move_to_a_pose_control
+ move_to_a_pose/move_to_a_pose
diff --git a/docs/modules/6_path_tracking/pure_pursuit_tracking/pure_pursuit_tracking_main.rst b/docs/modules/6_path_tracking/pure_pursuit_tracking/pure_pursuit_tracking_main.rst
index 5c7bcef85f..ff6749bbbe 100644
--- a/docs/modules/6_path_tracking/pure_pursuit_tracking/pure_pursuit_tracking_main.rst
+++ b/docs/modules/6_path_tracking/pure_pursuit_tracking/pure_pursuit_tracking_main.rst
@@ -9,7 +9,13 @@ speed control.
The red line is a target course, the green cross means the target point
for pure pursuit control, the blue line is the tracking.
-References:
+Code Link
+~~~~~~~~~~~~~~~
+
+.. autofunction:: PathTracking.pure_pursuit.pure_pursuit.pure_pursuit_steer_control
+
+
+Reference
~~~~~~~~~~~
- `A Survey of Motion Planning and Control Techniques for Self-driving
diff --git a/docs/modules/6_path_tracking/rear_wheel_feedback_control/rear_wheel_feedback_control_main.rst b/docs/modules/6_path_tracking/rear_wheel_feedback_control/rear_wheel_feedback_control_main.rst
index 70875fdc6c..56d0db63ad 100644
--- a/docs/modules/6_path_tracking/rear_wheel_feedback_control/rear_wheel_feedback_control_main.rst
+++ b/docs/modules/6_path_tracking/rear_wheel_feedback_control/rear_wheel_feedback_control_main.rst
@@ -6,7 +6,13 @@ PID speed control.
.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/PathTracking/rear_wheel_feedback/animation.gif
-References:
+Code Link
+~~~~~~~~~~~~~~~
+
+.. autofunction:: PathTracking.rear_wheel_feedback_control.rear_wheel_feedback_control.rear_wheel_feedback_control
+
+
+Reference
~~~~~~~~~~~
- `A Survey of Motion Planning and Control Techniques for Self-driving
Urban Vehicles `__
diff --git a/docs/modules/6_path_tracking/stanley_control/stanley_control_main.rst b/docs/modules/6_path_tracking/stanley_control/stanley_control_main.rst
index fe325b0102..3c491804f6 100644
--- a/docs/modules/6_path_tracking/stanley_control/stanley_control_main.rst
+++ b/docs/modules/6_path_tracking/stanley_control/stanley_control_main.rst
@@ -6,7 +6,13 @@ control.
.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/PathTracking/stanley_controller/animation.gif
-References:
+Code Link
+~~~~~~~~~~~~~~~
+
+.. autofunction:: PathTracking.stanley_control.stanley_control.stanley_control
+
+
+Reference
~~~~~~~~~~~
- `Stanley: The robot that won the DARPA grand
diff --git a/docs/modules/7_arm_navigation/n_joint_arm_to_point_control_main.rst b/docs/modules/7_arm_navigation/n_joint_arm_to_point_control_main.rst
index cc6279f681..6c1db7d2a5 100644
--- a/docs/modules/7_arm_navigation/n_joint_arm_to_point_control_main.rst
+++ b/docs/modules/7_arm_navigation/n_joint_arm_to_point_control_main.rst
@@ -3,7 +3,7 @@ N joint arm to point control
N joint arm to a point control simulation.
-This is a interactive simulation.
+This is an interactive simulation.
You can set the goal position of the end effector with left-click on the
plotting area.
@@ -11,3 +11,10 @@ plotting area.
.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/ArmNavigation/n_joint_arm_to_point_control/animation.gif
In this simulation N = 10, however, you can change it.
+
+
+Code Link
+~~~~~~~~~~~~~~~
+
+.. autofunction:: ArmNavigation.n_joint_arm_to_point_control.n_joint_arm_to_point_control.main
+
diff --git a/docs/modules/7_arm_navigation/obstacle_avoidance_arm_navigation_main.rst b/docs/modules/7_arm_navigation/obstacle_avoidance_arm_navigation_main.rst
index 899a64a5cd..4433ab531b 100644
--- a/docs/modules/7_arm_navigation/obstacle_avoidance_arm_navigation_main.rst
+++ b/docs/modules/7_arm_navigation/obstacle_avoidance_arm_navigation_main.rst
@@ -3,4 +3,9 @@ Arm navigation with obstacle avoidance
Arm navigation with obstacle avoidance simulation.
-.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/ArmNavigation/arm_obstacle_navigation/animation.gif
\ No newline at end of file
+.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/ArmNavigation/arm_obstacle_navigation/animation.gif
+
+Code Link
+~~~~~~~~~~~~~~~
+
+.. autofunction:: ArmNavigation.arm_obstacle_navigation.arm_obstacle_navigation.main
diff --git a/docs/modules/7_arm_navigation/planar_two_link_ik_main.rst b/docs/modules/7_arm_navigation/planar_two_link_ik_main.rst
index 2da2b0dc30..8d3b709e47 100644
--- a/docs/modules/7_arm_navigation/planar_two_link_ik_main.rst
+++ b/docs/modules/7_arm_navigation/planar_two_link_ik_main.rst
@@ -7,10 +7,16 @@ Two joint arm to point control
This is two joint arm to a point control simulation.
-This is a interactive simulation.
+This is an interactive simulation.
You can set the goal position of the end effector with left-click on the plotting area.
+Code Link
+~~~~~~~~~~~~~~~
+
+.. autofunction:: ArmNavigation.two_joint_arm_to_point_control.two_joint_arm_to_point_control.main
+
+
Inverse Kinematics for a Planar Two-Link Robotic Arm
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/docs/modules/8_aerial_navigation/drone_3d_trajectory_following/drone_3d_trajectory_following_main.rst b/docs/modules/8_aerial_navigation/drone_3d_trajectory_following/drone_3d_trajectory_following_main.rst
index c3bdc33cdc..1805bb3f6d 100644
--- a/docs/modules/8_aerial_navigation/drone_3d_trajectory_following/drone_3d_trajectory_following_main.rst
+++ b/docs/modules/8_aerial_navigation/drone_3d_trajectory_following/drone_3d_trajectory_following_main.rst
@@ -4,3 +4,8 @@ Drone 3d trajectory following
This is a 3d trajectory following simulation for a quadrotor.
.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/AerialNavigation/drone_3d_trajectory_following/animation.gif
+
+Code Link
+~~~~~~~~~~~~~~~
+
+.. autofunction:: AerialNavigation.drone_3d_trajectory_following.drone_3d_trajectory_following.quad_sim
diff --git a/docs/modules/8_aerial_navigation/rocket_powered_landing/rocket_powered_landing_main.rst b/docs/modules/8_aerial_navigation/rocket_powered_landing/rocket_powered_landing_main.rst
index 6c90d2b44e..4bf5117500 100644
--- a/docs/modules/8_aerial_navigation/rocket_powered_landing/rocket_powered_landing_main.rst
+++ b/docs/modules/8_aerial_navigation/rocket_powered_landing/rocket_powered_landing_main.rst
@@ -1,6 +1,14 @@
Rocket powered landing
-----------------------------
+.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/AerialNavigation/rocket_powered_landing/animation.gif
+
+Code Link
+~~~~~~~~~~~~~~~
+
+.. autofunction:: AerialNavigation.rocket_powered_landing.rocket_powered_landing.main
+
+
Simulation
~~~~~~~~~~
diff --git a/docs/modules/9_bipedal/bipedal_planner/bipedal_planner_main.rst b/docs/modules/9_bipedal/bipedal_planner/bipedal_planner_main.rst
index 2ee5971e7a..6253715cc1 100644
--- a/docs/modules/9_bipedal/bipedal_planner/bipedal_planner_main.rst
+++ b/docs/modules/9_bipedal/bipedal_planner/bipedal_planner_main.rst
@@ -4,3 +4,8 @@ Bipedal Planner
Bipedal Walking with modifying designated footsteps
.. image:: https://github.com/AtsushiSakai/PythonRoboticsGifs/raw/master/Bipedal/bipedal_planner/animation.gif
+
+Code Link
+~~~~~~~~~~~~~~~
+
+.. autofunction:: Bipedal.bipedal_planner.bipedal_planner.BipedalPlanner
diff --git a/requirements/environment.yml b/requirements/environment.yml
index afbb3fb8ce..023a3d75bf 100644
--- a/requirements/environment.yml
+++ b/requirements/environment.yml
@@ -2,7 +2,7 @@ name: python_robotics
channels:
- conda-forge
dependencies:
- - python=3.12
+ - python=3.13
- pip
- scipy
- numpy
diff --git a/requirements/requirements.txt b/requirements/requirements.txt
index b439ea4266..eecc683983 100644
--- a/requirements/requirements.txt
+++ b/requirements/requirements.txt
@@ -1,8 +1,9 @@
-numpy == 2.2.3
-scipy == 1.15.2
-matplotlib == 3.10.0
-cvxpy == 1.5.3
-pytest == 8.3.4 # For unit test
-pytest-xdist == 3.6.1 # For unit test
-mypy == 1.15.0 # For unit test
-ruff == 0.9.7 # For unit test
+numpy == 2.3.5
+scipy == 1.16.3
+matplotlib == 3.10.7
+cvxpy == 1.7.5
+ecos == 2.0.14
+pytest == 9.0.2 # For unit test
+pytest-xdist == 3.8.0 # For unit test
+mypy == 1.19.1 # For unit test
+ruff == 0.14.9 # For unit test
diff --git a/ruff.toml b/ruff.toml
index 5823ca3db7..d76b715a06 100644
--- a/ruff.toml
+++ b/ruff.toml
@@ -5,8 +5,8 @@ ignore = ["E501", "E741", "E402"]
exclude = [
]
-# Assume Python 3.11
-target-version = "py312"
+# Assume Python 3.13
+target-version = "py313"
[per-file-ignores]
diff --git a/runtests.sh b/runtests.sh
index 12d1b80453..c4460c8aa7 100755
--- a/runtests.sh
+++ b/runtests.sh
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+cd "$(dirname "$0")" || exit 1
echo "Run test suites! "
# === pytest based test runner ===
diff --git a/tests/conftest.py b/tests/conftest.py
index 3485fe8150..b707b22d00 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -10,4 +10,4 @@
def run_this_test(file):
- pytest.main([os.path.abspath(file)])
+ pytest.main(args=["-W", "error", "-Werror", "--pythonwarnings=error", os.path.abspath(file)])
diff --git a/tests/test_batch_informed_rrt_star.py b/tests/test_batch_informed_rrt_star.py
index 3ad78bdb23..cf0a9827a8 100644
--- a/tests/test_batch_informed_rrt_star.py
+++ b/tests/test_batch_informed_rrt_star.py
@@ -1,7 +1,7 @@
import random
import conftest
-from PathPlanning.BatchInformedRRTStar import batch_informed_rrtstar as m
+from PathPlanning.BatchInformedRRTStar import batch_informed_rrt_star as m
def test_1():
diff --git a/tests/test_behavior_tree.py b/tests/test_behavior_tree.py
new file mode 100644
index 0000000000..0898690448
--- /dev/null
+++ b/tests/test_behavior_tree.py
@@ -0,0 +1,207 @@
+import pytest
+import conftest
+
+from MissionPlanning.BehaviorTree.behavior_tree import (
+ BehaviorTreeFactory,
+ Status,
+ ActionNode,
+)
+
+
+def test_sequence_node1():
+ xml_string = """
+
+
+
+
+
+
+
+ """
+ bt_factory = BehaviorTreeFactory()
+ bt = bt_factory.build_tree(xml_string)
+ bt.tick()
+ assert bt.root.status == Status.RUNNING
+ assert bt.root.children[0].status == Status.SUCCESS
+ assert bt.root.children[1].status is None
+ assert bt.root.children[2].status is None
+ bt.tick()
+ bt.tick()
+ assert bt.root.status == Status.FAILURE
+ assert bt.root.children[0].status is None
+ assert bt.root.children[1].status is None
+ assert bt.root.children[2].status is None
+
+
+def test_sequence_node2():
+ xml_string = """
+
+
+
+
+
+
+
+ """
+ bt_factory = BehaviorTreeFactory()
+ bt = bt_factory.build_tree(xml_string)
+ bt.tick_while_running()
+ assert bt.root.status == Status.SUCCESS
+ assert bt.root.children[0].status is None
+ assert bt.root.children[1].status is None
+ assert bt.root.children[2].status is None
+
+
+def test_selector_node1():
+ xml_string = """
+
+
+
+
+
+
+
+ """
+ bt_factory = BehaviorTreeFactory()
+ bt = bt_factory.build_tree(xml_string)
+ bt.tick()
+ assert bt.root.status == Status.RUNNING
+ assert bt.root.children[0].status == Status.FAILURE
+ assert bt.root.children[1].status is None
+ assert bt.root.children[2].status is None
+ bt.tick()
+ assert bt.root.status == Status.SUCCESS
+ assert bt.root.children[0].status is None
+ assert bt.root.children[1].status is None
+ assert bt.root.children[2].status is None
+
+
+def test_selector_node2():
+ xml_string = """
+
+
+
+
+
+
+
+
+ """
+ bt_factory = BehaviorTreeFactory()
+ bt = bt_factory.build_tree(xml_string)
+ bt.tick_while_running()
+ assert bt.root.status == Status.FAILURE
+ assert bt.root.children[0].status is None
+ assert bt.root.children[1].status is None
+
+
+def test_while_do_else_node():
+ xml_string = """
+
+
+
+
+
+ """
+
+ class CountNode(ActionNode):
+ def __init__(self, name, count_threshold):
+ super().__init__(name)
+ self.count = 0
+ self.count_threshold = count_threshold
+
+ def tick(self):
+ self.count += 1
+ if self.count >= self.count_threshold:
+ return Status.FAILURE
+ else:
+ return Status.SUCCESS
+
+ bt_factory = BehaviorTreeFactory()
+ bt_factory.register_node_builder(
+ "Count",
+ lambda node: CountNode(
+ node.attrib.get("name", CountNode.__name__),
+ int(node.attrib["count_threshold"]),
+ ),
+ )
+ bt = bt_factory.build_tree(xml_string)
+ bt.tick()
+ assert bt.root.status == Status.RUNNING
+ assert bt.root.children[0].status == Status.SUCCESS
+ assert bt.root.children[1].status is Status.SUCCESS
+ assert bt.root.children[2].status is None
+ bt.tick()
+ assert bt.root.status == Status.RUNNING
+ assert bt.root.children[0].status == Status.SUCCESS
+ assert bt.root.children[1].status is Status.SUCCESS
+ assert bt.root.children[2].status is None
+ bt.tick()
+ assert bt.root.status == Status.SUCCESS
+ assert bt.root.children[0].status is None
+ assert bt.root.children[1].status is None
+ assert bt.root.children[2].status is None
+
+
+def test_node_children():
+ # ControlNode Must have children
+ xml_string = """
+
+
+ """
+ bt_factory = BehaviorTreeFactory()
+ with pytest.raises(ValueError):
+ bt_factory.build_tree(xml_string)
+
+ # DecoratorNode Must have child
+ xml_string = """
+
+
+ """
+ with pytest.raises(ValueError):
+ bt_factory.build_tree(xml_string)
+
+ # DecoratorNode Must have only one child
+ xml_string = """
+
+
+
+
+ """
+ with pytest.raises(ValueError):
+ bt_factory.build_tree(xml_string)
+
+ # ActionNode Must have no children
+ xml_string = """
+
+
+
+ """
+ with pytest.raises(ValueError):
+ bt_factory.build_tree(xml_string)
+
+ # WhileDoElse Must have exactly 2 or 3 children
+ xml_string = """
+
+
+
+ """
+ with pytest.raises(ValueError):
+ bt = bt_factory.build_tree(xml_string)
+ bt.tick()
+
+ xml_string = """
+
+
+
+
+
+
+ """
+ with pytest.raises(ValueError):
+ bt = bt_factory.build_tree(xml_string)
+ bt.tick()
+
+
+if __name__ == "__main__":
+ conftest.run_this_test(__file__)
diff --git a/tests/test_iterative_closest_point.py b/tests/test_iterative_closest_point.py
index 3e726b5649..cdfa89cc55 100644
--- a/tests/test_iterative_closest_point.py
+++ b/tests/test_iterative_closest_point.py
@@ -1,5 +1,5 @@
import conftest
-from SLAM.iterative_closest_point import iterative_closest_point as m
+from SLAM.ICPMatching import icp_matching as m
def test_1():
diff --git a/tests/test_particle_swarm_optimization.py b/tests/test_particle_swarm_optimization.py
new file mode 100755
index 0000000000..95720022f6
--- /dev/null
+++ b/tests/test_particle_swarm_optimization.py
@@ -0,0 +1,11 @@
+import conftest # Add root path to sys.path
+from PathPlanning.ParticleSwarmOptimization import particle_swarm_optimization as m
+
+
+def test_1():
+ m.show_animation = False
+ m.main()
+
+
+if __name__ == '__main__':
+ conftest.run_this_test(__file__)
\ No newline at end of file
diff --git a/tests/test_priority_based_planner.py b/tests/test_priority_based_planner.py
new file mode 100644
index 0000000000..e2ff602d88
--- /dev/null
+++ b/tests/test_priority_based_planner.py
@@ -0,0 +1,39 @@
+from PathPlanning.TimeBasedPathPlanning.GridWithDynamicObstacles import (
+ Grid,
+ NodePath,
+ ObstacleArrangement,
+ Position,
+)
+from PathPlanning.TimeBasedPathPlanning.BaseClasses import StartAndGoal
+from PathPlanning.TimeBasedPathPlanning import PriorityBasedPlanner as m
+from PathPlanning.TimeBasedPathPlanning.SafeInterval import SafeIntervalPathPlanner
+import numpy as np
+import conftest
+
+
+def test_1():
+ grid_side_length = 21
+
+ start_and_goals = [StartAndGoal(i, Position(1, i), Position(19, 19-i)) for i in range(1, 16)]
+ obstacle_avoid_points = [pos for item in start_and_goals for pos in (item.start, item.goal)]
+
+ grid = Grid(
+ np.array([grid_side_length, grid_side_length]),
+ num_obstacles=250,
+ obstacle_avoid_points=obstacle_avoid_points,
+ obstacle_arrangement=ObstacleArrangement.ARRANGEMENT1,
+ )
+
+ m.show_animation = False
+
+ start_and_goals: list[StartAndGoal]
+ paths: list[NodePath]
+ start_and_goals, paths = m.PriorityBasedPlanner.plan(grid, start_and_goals, SafeIntervalPathPlanner, False)
+
+ # All paths should start at the specified position and reach the goal
+ for i, start_and_goal in enumerate(start_and_goals):
+ assert paths[i].path[0].position == start_and_goal.start
+ assert paths[i].path[-1].position == start_and_goal.goal
+
+if __name__ == "__main__":
+ conftest.run_this_test(__file__)
diff --git a/tests/test_pure_pursuit.py b/tests/test_pure_pursuit.py
index 0e0b83bf6c..89c1829514 100644
--- a/tests/test_pure_pursuit.py
+++ b/tests/test_pure_pursuit.py
@@ -6,6 +6,10 @@ def test1():
m.show_animation = False
m.main()
+def test_backward():
+ m.show_animation = False
+ m.is_reverse_mode = True
+ m.main()
if __name__ == '__main__':
conftest.run_this_test(__file__)
diff --git a/tests/test_raycasting_grid_map.py b/tests/test_ray_casting_grid_map.py
similarity index 71%
rename from tests/test_raycasting_grid_map.py
rename to tests/test_ray_casting_grid_map.py
index f08ae9277e..2d192c9310 100644
--- a/tests/test_raycasting_grid_map.py
+++ b/tests/test_ray_casting_grid_map.py
@@ -1,5 +1,5 @@
import conftest # Add root path to sys.path
-from Mapping.raycasting_grid_map import raycasting_grid_map as m
+from Mapping.ray_casting_grid_map import ray_casting_grid_map as m
def test1():
diff --git a/tests/test_rear_wheel_feedback.py b/tests/test_rear_wheel_feedback.py
index 895eb188b3..eccde52358 100644
--- a/tests/test_rear_wheel_feedback.py
+++ b/tests/test_rear_wheel_feedback.py
@@ -1,5 +1,5 @@
import conftest # Add root path to sys.path
-from PathTracking.rear_wheel_feedback import rear_wheel_feedback as m
+from PathTracking.rear_wheel_feedback_control import rear_wheel_feedback_control as m
def test1():
diff --git a/tests/test_rrt_with_pathsmoothing_radius.py b/tests/test_rrt_with_pathsmoothing_radius.py
new file mode 100644
index 0000000000..a1159255b5
--- /dev/null
+++ b/tests/test_rrt_with_pathsmoothing_radius.py
@@ -0,0 +1,48 @@
+import conftest
+import math
+
+from PathPlanning.RRT import rrt_with_pathsmoothing as rrt_module
+
+def test_smoothed_path_safety():
+ # Define test environment
+ obstacle_list = [
+ (5, 5, 1.0),
+ (3, 6, 2.0),
+ (3, 8, 2.0),
+ (3, 10, 2.0),
+ (7, 5, 2.0),
+ (9, 5, 2.0)
+ ]
+ robot_radius = 0.5
+
+ # Disable animation for testing
+ rrt_module.show_animation = False
+
+ # Create RRT planner
+ rrt = rrt_module.RRT(
+ start=[0, 0],
+ goal=[6, 10],
+ rand_area=[-2, 15],
+ obstacle_list=obstacle_list,
+ robot_radius=robot_radius
+ )
+
+ # Run RRT
+ path = rrt.planning(animation=False)
+
+ # Smooth the path
+ smoothed = rrt_module.path_smoothing(path, max_iter=1000,
+ obstacle_list=obstacle_list,
+ robot_radius=robot_radius)
+
+ # Check if all points on the smoothed path are safely distant from obstacles
+ for x, y in smoothed:
+ for ox, oy, obs_radius in obstacle_list:
+ d = math.hypot(x - ox, y - oy)
+ min_safe_dist = obs_radius + robot_radius
+ assert d > min_safe_dist, \
+ f"Point ({x:.2f}, {y:.2f}) too close to obstacle at ({ox}, {oy})"
+
+
+if __name__ == '__main__':
+ conftest.run_this_test(__file__)
diff --git a/tests/test_safe_interval_path_planner.py b/tests/test_safe_interval_path_planner.py
new file mode 100644
index 0000000000..bbcd4e427c
--- /dev/null
+++ b/tests/test_safe_interval_path_planner.py
@@ -0,0 +1,31 @@
+from PathPlanning.TimeBasedPathPlanning.GridWithDynamicObstacles import (
+ Grid,
+ ObstacleArrangement,
+ Position,
+)
+from PathPlanning.TimeBasedPathPlanning import SafeInterval as m
+import numpy as np
+import conftest
+
+
+def test_1():
+ start = Position(1, 11)
+ goal = Position(19, 19)
+ grid_side_length = 21
+ grid = Grid(
+ np.array([grid_side_length, grid_side_length]),
+ obstacle_arrangement=ObstacleArrangement.ARRANGEMENT1,
+ )
+
+ m.show_animation = False
+ path = m.SafeIntervalPathPlanner.plan(grid, start, goal)
+
+ # path should have 31 entries
+ assert len(path.path) == 31
+
+ # path should end at the goal
+ assert path.path[-1].position == goal
+
+
+if __name__ == "__main__":
+ conftest.run_this_test(__file__)
diff --git a/tests/test_space_time_astar.py b/tests/test_space_time_astar.py
index 5290738eb4..1194c61d2e 100644
--- a/tests/test_space_time_astar.py
+++ b/tests/test_space_time_astar.py
@@ -18,9 +18,7 @@ def test_1():
)
m.show_animation = False
- planner = m.SpaceTimeAStar(grid, start, goal)
-
- path = planner.plan(False)
+ path = m.SpaceTimeAStar.plan(grid, start, goal)
# path should have 28 entries
assert len(path.path) == 31
@@ -28,6 +26,7 @@ def test_1():
# path should end at the goal
assert path.path[-1].position == goal
+ assert path.expanded_node_count < 1000
if __name__ == "__main__":
conftest.run_this_test(__file__)
diff --git a/tests/test_stanley_controller.py b/tests/test_stanley_controller.py
index a1d8073764..bd590cb51a 100644
--- a/tests/test_stanley_controller.py
+++ b/tests/test_stanley_controller.py
@@ -1,5 +1,5 @@
import conftest # Add root path to sys.path
-from PathTracking.stanley_controller import stanley_controller as m
+from PathTracking.stanley_control import stanley_control as m
def test1():
diff --git a/tests/test_theta_star.py b/tests/test_theta_star.py
new file mode 100644
index 0000000000..10ceae44e3
--- /dev/null
+++ b/tests/test_theta_star.py
@@ -0,0 +1,11 @@
+import conftest
+from PathPlanning.ThetaStar import theta_star as m
+
+
+def test_1():
+ m.show_animation = False
+ m.main()
+
+
+if __name__ == '__main__':
+ conftest.run_this_test(__file__)
\ No newline at end of file
diff --git a/users_comments.md b/users_comments.md
index a2f798eac4..c814e74c4b 100644
--- a/users_comments.md
+++ b/users_comments.md
@@ -6,11 +6,11 @@ This is an electric wheelchair control demo by [Katsushun89](https://github.com/
[WHILL Model CR](https://whill.jp/model-cr) is the control target, [M5Stack](https://m5stack.com/) is used for the controller, and [toio](https://toio.io/) is used for the control input device.
-[Move to a Pose Control — PythonRobotics documentation](https://atsushisakai.github.io/PythonRobotics/modules/control/move_to_a_pose_control/move_to_a_pose_control.html) is used for its control algorithm ([code link](https://github.com/AtsushiSakai/PythonRobotics/blob/master/PathTracking/move_to_pose/move_to_pose.py)).
+[Move to a Pose Control — PythonRobotics documentation](https://atsushisakai.github.io/PythonRobotics/modules/6_path_tracking/move_to_a_pose/move_to_a_pose.html) is used for its control algorithm ([code link](https://github.com/AtsushiSakai/PythonRobotics/blob/master/PathTracking/move_to_pose/move_to_pose.py)).

-Ref:
+Reference:
- [toioと同じように動くWHILL Model CR (in Japanese)](https://qiita.com/KatsuShun89/items/68fde7544ecfe36096d2)
@@ -101,7 +101,7 @@ Thank you!
---
-Dear AtsushiSakai, Thanks a lot for the wonderful collection of projects.It was really useful. I really appreciate your time in maintaing and building this repository.It will be really great if I get a chance to meet you in person sometime.I got really inspired looking at your work. All the very best for all your future endeavors! Thanks once again,
+Dear AtsushiSakai, Thanks a lot for the wonderful collection of projects.It was really useful. I really appreciate your time in maintaining and building this repository.It will be really great if I get a chance to meet you in person sometime.I got really inspired looking at your work. All the very best for all your future endeavors! Thanks once again,
---Ezhil Bharathi
@@ -163,7 +163,7 @@ so kind of you can you make videos of it.
---
-Dear AtshshiSakai, You are very wise and smart that you created this library for students wanting to learn Probabilistic Robotics and actually perform robot control. Hats off to you and your work. I am also reading your book titled : Python Robotics
+Dear AtsushiSakai, You are very wise and smart that you created this library for students wanting to learn Probabilistic Robotics and actually perform robot control. Hats off to you and your work. I am also reading your book titled : Python Robotics
--Himanshu
@@ -263,7 +263,7 @@ Hey Atsushi We are working on a multiagent simulation game and this project
---
-Thanks a lot for this amazing set of very useful librarires!
+Thanks a lot for this amazing set of very useful libraries!
--Razvan Viorel Mihai
@@ -275,7 +275,7 @@ Dear Atsushi Sakai,
This is probably one of the best open-source roboti
---
-hanks frnd, for ur contibusion
+Thanks friend, for your contribution
—--