162 lines
5.7 KiB
Python
162 lines
5.7 KiB
Python
from typing import Optional, Sequence
|
|
|
|
import cv2
|
|
import numpy as np
|
|
from matplotlib import pyplot as plt
|
|
|
|
|
|
class VisualOdometry:
|
|
|
|
def __init__(self,
|
|
K: np.ndarray,
|
|
index_params: dict[str, int] = {"algorithm": 1, "trees": 5},
|
|
search_params: dict[str, int] = {"checks": 50}):
|
|
""" Constructor
|
|
|
|
Args:
|
|
K (np.ndarray): Camera Intrinsics Model
|
|
index_params (dict[str, int], optional): Index parameters for FLANN. Defaults to {"algorithm": 1, "trees": 5}.
|
|
search_params (dict[str, int], optional): Search parameters for FLANN. Defaults to {"checks": 50}.
|
|
"""
|
|
self.K = K
|
|
# pyright: ignore[reportAttributeAccessIssue]
|
|
self.sift = cv2.SIFT_create()
|
|
self.flann = cv2.FlannBasedMatcher(
|
|
indexParams=index_params, searchParams=search_params) # pyright: ignore[reportArgumentType]
|
|
|
|
def extract_keypoints(self, img: cv2.typing.MatLike) -> tuple[list[cv2.KeyPoint], np.ndarray]:
|
|
""" Detects keypoints in an image
|
|
|
|
Args:
|
|
img (cv2.typing.MatLike): _description_
|
|
|
|
Returns:
|
|
kp (list[cv2.KeyPoint]): list of keypoints
|
|
desc (np.ndarray): descriptor of the keypoints
|
|
"""
|
|
return self.sift.detectAndCompute(img, None)
|
|
|
|
def match_keypoints(self,
|
|
desc1: np.ndarray,
|
|
desc2: np.ndarray,
|
|
k: int = 2) -> Sequence[Sequence[cv2.DMatch]]:
|
|
""" Matches keypoints
|
|
|
|
Args:
|
|
desc1 (np.ndarray): image 1 keypoint description
|
|
desc2 (np.ndarray): image 2 keypoint description
|
|
k (int, optional): Defaults to 2.
|
|
|
|
Returns:
|
|
Sequence[Sequence[cv2.DMatch]]: sequence of matches
|
|
"""
|
|
|
|
return self.flann.knnMatch(desc1, desc2, k=k)
|
|
|
|
def filter_matches(self, matches: Sequence[Sequence[cv2.DMatch]], distance_threshold: float = 0.7) -> list[cv2.DMatch]:
|
|
""" Filters out good keypoint matches
|
|
|
|
Args:
|
|
matches (Sequence[Sequence[cv2.DMatch]]): list of keypoint matches
|
|
distance_threshold (float, optional): distance percent threshold for filtering. Defaults to 0.7.
|
|
|
|
Returns:
|
|
list[cv2.DMatch]: list of good matches
|
|
"""
|
|
return [m for m, n in matches if m.distance < distance_threshold * n.distance]
|
|
|
|
def estimate_motion(self, kp1: list[cv2.KeyPoint], kp2: list[cv2.KeyPoint], matches: list[cv2.DMatch]):
|
|
""" Estimates the motion between two images
|
|
|
|
Args:
|
|
kp1 (list[cv2.KeyPoint]): first image keypoints
|
|
kp2 (list[cv2.KeyPoint]): second image keypoints
|
|
matches (list[cv2.DMatch]): list of keypoint matches
|
|
Returns:
|
|
TODO: Add returns
|
|
"""
|
|
|
|
def draw_keypoint_matches(self,
|
|
img1: cv2.typing.MatLike,
|
|
kp1: list[cv2.KeyPoint],
|
|
img2: cv2.typing.MatLike,
|
|
kp2: list[cv2.KeyPoint],
|
|
matches: list[cv2.DMatch],
|
|
output_image: Optional[cv2.typing.MatLike] = None) -> cv2.typing.MatLike:
|
|
""" Generates an image drawing the keypoint matches between two images in the image
|
|
|
|
Args:
|
|
img1 (cv2.typing.MatLike): first image
|
|
kp1 (list[cv2.KeyPoint]): first image keypoints
|
|
img2 (cv2.typing.MatLike): second image
|
|
kp2 (list[cv2.KeyPoint]): second image keypoints
|
|
matches (list[cv2.DMatch]): list of matches accepted
|
|
output_image (Optional[cv2.typing.MatLike], optional): output image buffer. If None or omitted, a new one will be created.
|
|
|
|
Returns:
|
|
cv2.typing.MatLike: _description_
|
|
"""
|
|
|
|
# Draw matches
|
|
# pyright: ignore[reportArgumentType, reportCallIssue]
|
|
return cv2.drawMatches(img1, kp1, img2, kp2, matches, output_image, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
|
|
|
|
@staticmethod
|
|
def show_keypoint_matches(match_image: cv2.typing.MatLike) -> None:
|
|
""" Show image matches
|
|
|
|
Args:
|
|
match_image (cv2.typing.MatLike): match image
|
|
"""
|
|
plt.figure(figsize=(15, 10))
|
|
plt.imshow(match_image, cmap='gray')
|
|
plt.title('Matched Keypoints Between Two Images')
|
|
plt.axis('off')
|
|
plt.show()
|
|
|
|
|
|
def main():
|
|
# Set Camera Intrinsics
|
|
K = np.array(
|
|
[[1389.2414846481593, 0, 962.3421649150145],
|
|
[0, 1389.2414846481593, 605.814069325842],
|
|
[0, 0, 1]],
|
|
dtype=np.float64)
|
|
|
|
# Set Image Paths
|
|
img1_path = ".\\train1\\3d20ae25-5b29-320d-8bae-f03e9dc177b9\\ring_front_center\\ring_front_center_315975023006264672.jpg"
|
|
img2_path = ".\\train1\\3d20ae25-5b29-320d-8bae-f03e9dc177b9\\ring_front_center\\ring_front_center_315975023039564872.jpg"
|
|
|
|
# Load images
|
|
img1 = cv2.imread(img1_path)
|
|
img2 = cv2.imread(img2_path)
|
|
|
|
if img1 is None:
|
|
raise RuntimeError(f"Could not open or find the image {img1_path}")
|
|
|
|
if img2 is None:
|
|
raise RuntimeError(f"Could not open or find the image {img2_path}")
|
|
|
|
# Create an instance of the VisualOdometry class
|
|
vo = VisualOdometry(K=K)
|
|
|
|
# Extract Keypoints
|
|
kp1, desc1 = vo.extract_keypoints(img1)
|
|
kp2, desc2 = vo.extract_keypoints(img2)
|
|
|
|
# Match Keypoints
|
|
matches = vo.match_keypoints(desc1, desc2)
|
|
|
|
# Filter Keypoints
|
|
good_matches = vo.filter_matches(matches)
|
|
|
|
# Draw matches
|
|
img_matches = vo.draw_keypoint_matches(img1, kp1, img2, kp2, good_matches)
|
|
|
|
# Show Matches
|
|
VisualOdometry.show_keypoint_matches(img_matches)
|
|
|
|
|
|
if __name__ == '__main__':
|
|
main()
|