Python, How to stitch images which overlapping areas?











up vote
0
down vote

favorite












I'm trying to stitch together images which have overlapping areas.
The images are sorted, each image has overlapping area with the previous image. For example:



https://imgur.com/a/t9zzeHD



I've tried the code at https://www.pyimagesearch.com/2016/01/11/opencv-panorama-stitching/, which I slightly changed and used tilted images but the end result (https://imgur.com/a/B2d2VBL) is not as expected.



Does the issue arise from 5th image which has a black right side? Not sure on why the black was added and how to avoid it.



Anybody knows how I can fix the code to not distort the images as I add more and more images? Better code examples for me to use are welcomed.



~~~~~~~~ EDIT ~~~~~~~
As pointed by Dan in the comments I'm using the wrong tool (warpPerspective) for the job. What I'm really looking for is a way to get find the matching key points in both images, translate it to the correct Y in each image so I could cut the images and then stitch them accordingly.



So the question now might be a bit simple on how to get matching key points and translate it to Y coordinates.



PLEASE IGNORE THE CODE AS IT IS ONLY AN EXAMPLE FOR WHERE I STARTED AND IT IS ONLY MISLEADING AT THIS POINT.



The code example bellow, takes an input of a path to directory which contains the images ["0.png", "1.png", "2.png", "3.png"]



from PIL import Image
import numpy as np
import imutils
import cv2
# from panorama import Stitcher
import argparse
import imutils
import cv2

class Stitcher:
def __init__(self):
# determine if we are using OpenCV v3.X
self.isv3 = imutils.is_cv3()

def stitch(self, images, ratio=0.75, reprojThresh=4.0,
showMatches=False):
# unpack the images, then detect keypoints and extract
# local invariant descriptors from them
(imageB, imageA) = images
(kpsA, featuresA) = self.detectAndDescribe(imageA)
(kpsB, featuresB) = self.detectAndDescribe(imageB)

# match features between the two images
M = self.matchKeypoints(kpsA, kpsB,
featuresA, featuresB, ratio, reprojThresh)

# if the match is None, then there aren't enough matched
# keypoints to create a panorama
if M is None:
return None

# otherwise, apply a perspective warp to stitch the images
# together
(matches, H, status) = M
result = cv2.warpPerspective(imageA, H,
(imageA.shape[1] + imageB.shape[1], imageA.shape[0]))
result[0:imageB.shape[0], 0:imageB.shape[1]] = imageB

# check to see if the keypoint matches should be visualized
if showMatches:
vis = self.drawMatches(imageA, imageB, kpsA, kpsB, matches,
status)

# return a tuple of the stitched image and the
# visualization
return (result, vis)

# return the stitched image
return result

def detectAndDescribe(self, image):
# convert the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# check to see if we are using OpenCV 3.X
if self.isv3:
# detect and extract features from the image
descriptor = cv2.xfeatures2d.SIFT_create()
(kps, features) = descriptor.detectAndCompute(image, None)

# otherwise, we are using OpenCV 2.4.X
else:
# detect keypoints in the image
detector = cv2.FeatureDetector_create("SIFT")
kps = detector.detect(gray)

# extract features from the image
extractor = cv2.DescriptorExtractor_create("SIFT")
(kps, features) = extractor.compute(gray, kps)

# convert the keypoints from KeyPoint objects to NumPy
# arrays
kps = np.float32([kp.pt for kp in kps])

# return a tuple of keypoints and features
return (kps, features)

def matchKeypoints(self, kpsA, kpsB, featuresA, featuresB,
ratio, reprojThresh):
# compute the raw matches and initialize the list of actual
# matches
matcher = cv2.DescriptorMatcher_create("BruteForce")
rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
matches =

# loop over the raw matches
for m in rawMatches:
# ensure the distance is within a certain ratio of each
# other (i.e. Lowe's ratio test)
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
matches.append((m[0].trainIdx, m[0].queryIdx))

# computing a homography requires at least 4 matches
if len(matches) > 4:
# construct the two sets of points
ptsA = np.float32([kpsA[i] for (_, i) in matches])
ptsB = np.float32([kpsB[i] for (i, _) in matches])

# compute the homography between the two sets of points
(H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC,
reprojThresh)

# return the matches along with the homograpy matrix
# and status of each matched point
return (matches, H, status)

# otherwise, no homograpy could be computed
return None

def drawMatches(self, imageA, imageB, kpsA, kpsB, matches, status):
# initialize the output visualization image
(hA, wA) = imageA.shape[:2]
(hB, wB) = imageB.shape[:2]
vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8")
vis[0:hA, 0:wA] = imageA
vis[0:hB, wA:] = imageB

# loop over the matches
for ((trainIdx, queryIdx), s) in zip(matches, status):
# only process the match if the keypoint was successfully
# matched
if s == 1:
# draw the match
ptA = (int(kpsA[queryIdx][0]), int(kpsA[queryIdx][1]))
ptB = (int(kpsB[trainIdx][0]) + wA, int(kpsB[trainIdx][1]))
cv2.line(vis, ptA, ptB, (0, 255, 0), 1)

# return the visualization
return vis


if __name__ == '__main__':
images_folder = sys.argv[1]
images = ["0.png", "1.png", "2.png", "3.png"]

imageA = cv2.imread(images_folder+images[0])
imageB = cv2.imread(images_folder+images[1])

# stitch the images together to create a panorama
stitcher = Stitcher()
(result, vis) = stitcher.stitch([imageA, imageB], showMatches=True)

count = 0
imgRGB=cv2.cvtColor(result, cv2.COLOR_BGR2RGB)
img = Image.fromarray(imgRGB)
current_stiched_image = images_folder + "lol10{}.png".format(count)
img.save(current_stiched_image)

for image in images[2:]:
count+=1
print("image: {}".format(image))
print("count: {}".format(count))
print("current_stiched_image: {}".format(current_stiched_image))
imageA1 = cv2.imread(current_stiched_image)
imageB1 = cv2.imread(images_folder + image)
(result, vis) = stitcher.stitch([imageA1, imageB1], showMatches=True)
imgRGB=cv2.cvtColor(result, cv2.COLOR_BGR2RGB)
img = Image.fromarray(imgRGB)
current_stiched_image = images_folder + "lol10{}.png".format(count)
print("new current_stiched_image: {}".format(current_stiched_image))
img.save(current_stiched_image)









share|improve this question
























  • The question title seems a bit odd too -- isn't the overlap a prerequisite for the stitching to work? | With an input like this, warpPerspective seems a bit counterproductive.
    – Dan Mašek
    Nov 9 at 12:30










  • @DanMašek yes I agree, as I'm unfamiliar with working with cv2 I used an exmaple I found, after digging more into it today I did find that using warpPerspective is deferentially not the right approach as It's more relevant to stitching images in cases such as panorama and such. I guess what I really need is just finding the area with the maximum matching points and apply a cut & stitch. But as I'm new I'm not sure on how to find the best matching key points, and how to translate that output into a Y coordinates (where to cut).
    – Captain_Meow_Meow
    Nov 10 at 7:55















up vote
0
down vote

favorite












I'm trying to stitch together images which have overlapping areas.
The images are sorted, each image has overlapping area with the previous image. For example:



https://imgur.com/a/t9zzeHD



I've tried the code at https://www.pyimagesearch.com/2016/01/11/opencv-panorama-stitching/, which I slightly changed and used tilted images but the end result (https://imgur.com/a/B2d2VBL) is not as expected.



Does the issue arise from 5th image which has a black right side? Not sure on why the black was added and how to avoid it.



Anybody knows how I can fix the code to not distort the images as I add more and more images? Better code examples for me to use are welcomed.



~~~~~~~~ EDIT ~~~~~~~
As pointed by Dan in the comments I'm using the wrong tool (warpPerspective) for the job. What I'm really looking for is a way to get find the matching key points in both images, translate it to the correct Y in each image so I could cut the images and then stitch them accordingly.



So the question now might be a bit simple on how to get matching key points and translate it to Y coordinates.



PLEASE IGNORE THE CODE AS IT IS ONLY AN EXAMPLE FOR WHERE I STARTED AND IT IS ONLY MISLEADING AT THIS POINT.



The code example bellow, takes an input of a path to directory which contains the images ["0.png", "1.png", "2.png", "3.png"]



from PIL import Image
import numpy as np
import imutils
import cv2
# from panorama import Stitcher
import argparse
import imutils
import cv2

class Stitcher:
def __init__(self):
# determine if we are using OpenCV v3.X
self.isv3 = imutils.is_cv3()

def stitch(self, images, ratio=0.75, reprojThresh=4.0,
showMatches=False):
# unpack the images, then detect keypoints and extract
# local invariant descriptors from them
(imageB, imageA) = images
(kpsA, featuresA) = self.detectAndDescribe(imageA)
(kpsB, featuresB) = self.detectAndDescribe(imageB)

# match features between the two images
M = self.matchKeypoints(kpsA, kpsB,
featuresA, featuresB, ratio, reprojThresh)

# if the match is None, then there aren't enough matched
# keypoints to create a panorama
if M is None:
return None

# otherwise, apply a perspective warp to stitch the images
# together
(matches, H, status) = M
result = cv2.warpPerspective(imageA, H,
(imageA.shape[1] + imageB.shape[1], imageA.shape[0]))
result[0:imageB.shape[0], 0:imageB.shape[1]] = imageB

# check to see if the keypoint matches should be visualized
if showMatches:
vis = self.drawMatches(imageA, imageB, kpsA, kpsB, matches,
status)

# return a tuple of the stitched image and the
# visualization
return (result, vis)

# return the stitched image
return result

def detectAndDescribe(self, image):
# convert the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# check to see if we are using OpenCV 3.X
if self.isv3:
# detect and extract features from the image
descriptor = cv2.xfeatures2d.SIFT_create()
(kps, features) = descriptor.detectAndCompute(image, None)

# otherwise, we are using OpenCV 2.4.X
else:
# detect keypoints in the image
detector = cv2.FeatureDetector_create("SIFT")
kps = detector.detect(gray)

# extract features from the image
extractor = cv2.DescriptorExtractor_create("SIFT")
(kps, features) = extractor.compute(gray, kps)

# convert the keypoints from KeyPoint objects to NumPy
# arrays
kps = np.float32([kp.pt for kp in kps])

# return a tuple of keypoints and features
return (kps, features)

def matchKeypoints(self, kpsA, kpsB, featuresA, featuresB,
ratio, reprojThresh):
# compute the raw matches and initialize the list of actual
# matches
matcher = cv2.DescriptorMatcher_create("BruteForce")
rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
matches =

# loop over the raw matches
for m in rawMatches:
# ensure the distance is within a certain ratio of each
# other (i.e. Lowe's ratio test)
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
matches.append((m[0].trainIdx, m[0].queryIdx))

# computing a homography requires at least 4 matches
if len(matches) > 4:
# construct the two sets of points
ptsA = np.float32([kpsA[i] for (_, i) in matches])
ptsB = np.float32([kpsB[i] for (i, _) in matches])

# compute the homography between the two sets of points
(H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC,
reprojThresh)

# return the matches along with the homograpy matrix
# and status of each matched point
return (matches, H, status)

# otherwise, no homograpy could be computed
return None

def drawMatches(self, imageA, imageB, kpsA, kpsB, matches, status):
# initialize the output visualization image
(hA, wA) = imageA.shape[:2]
(hB, wB) = imageB.shape[:2]
vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8")
vis[0:hA, 0:wA] = imageA
vis[0:hB, wA:] = imageB

# loop over the matches
for ((trainIdx, queryIdx), s) in zip(matches, status):
# only process the match if the keypoint was successfully
# matched
if s == 1:
# draw the match
ptA = (int(kpsA[queryIdx][0]), int(kpsA[queryIdx][1]))
ptB = (int(kpsB[trainIdx][0]) + wA, int(kpsB[trainIdx][1]))
cv2.line(vis, ptA, ptB, (0, 255, 0), 1)

# return the visualization
return vis


if __name__ == '__main__':
images_folder = sys.argv[1]
images = ["0.png", "1.png", "2.png", "3.png"]

imageA = cv2.imread(images_folder+images[0])
imageB = cv2.imread(images_folder+images[1])

# stitch the images together to create a panorama
stitcher = Stitcher()
(result, vis) = stitcher.stitch([imageA, imageB], showMatches=True)

count = 0
imgRGB=cv2.cvtColor(result, cv2.COLOR_BGR2RGB)
img = Image.fromarray(imgRGB)
current_stiched_image = images_folder + "lol10{}.png".format(count)
img.save(current_stiched_image)

for image in images[2:]:
count+=1
print("image: {}".format(image))
print("count: {}".format(count))
print("current_stiched_image: {}".format(current_stiched_image))
imageA1 = cv2.imread(current_stiched_image)
imageB1 = cv2.imread(images_folder + image)
(result, vis) = stitcher.stitch([imageA1, imageB1], showMatches=True)
imgRGB=cv2.cvtColor(result, cv2.COLOR_BGR2RGB)
img = Image.fromarray(imgRGB)
current_stiched_image = images_folder + "lol10{}.png".format(count)
print("new current_stiched_image: {}".format(current_stiched_image))
img.save(current_stiched_image)









share|improve this question
























  • The question title seems a bit odd too -- isn't the overlap a prerequisite for the stitching to work? | With an input like this, warpPerspective seems a bit counterproductive.
    – Dan Mašek
    Nov 9 at 12:30










  • @DanMašek yes I agree, as I'm unfamiliar with working with cv2 I used an exmaple I found, after digging more into it today I did find that using warpPerspective is deferentially not the right approach as It's more relevant to stitching images in cases such as panorama and such. I guess what I really need is just finding the area with the maximum matching points and apply a cut & stitch. But as I'm new I'm not sure on how to find the best matching key points, and how to translate that output into a Y coordinates (where to cut).
    – Captain_Meow_Meow
    Nov 10 at 7:55













up vote
0
down vote

favorite









up vote
0
down vote

favorite











I'm trying to stitch together images which have overlapping areas.
The images are sorted, each image has overlapping area with the previous image. For example:



https://imgur.com/a/t9zzeHD



I've tried the code at https://www.pyimagesearch.com/2016/01/11/opencv-panorama-stitching/, which I slightly changed and used tilted images but the end result (https://imgur.com/a/B2d2VBL) is not as expected.



Does the issue arise from 5th image which has a black right side? Not sure on why the black was added and how to avoid it.



Anybody knows how I can fix the code to not distort the images as I add more and more images? Better code examples for me to use are welcomed.



~~~~~~~~ EDIT ~~~~~~~
As pointed by Dan in the comments I'm using the wrong tool (warpPerspective) for the job. What I'm really looking for is a way to get find the matching key points in both images, translate it to the correct Y in each image so I could cut the images and then stitch them accordingly.



So the question now might be a bit simple on how to get matching key points and translate it to Y coordinates.



PLEASE IGNORE THE CODE AS IT IS ONLY AN EXAMPLE FOR WHERE I STARTED AND IT IS ONLY MISLEADING AT THIS POINT.



The code example bellow, takes an input of a path to directory which contains the images ["0.png", "1.png", "2.png", "3.png"]



from PIL import Image
import numpy as np
import imutils
import cv2
# from panorama import Stitcher
import argparse
import imutils
import cv2

class Stitcher:
def __init__(self):
# determine if we are using OpenCV v3.X
self.isv3 = imutils.is_cv3()

def stitch(self, images, ratio=0.75, reprojThresh=4.0,
showMatches=False):
# unpack the images, then detect keypoints and extract
# local invariant descriptors from them
(imageB, imageA) = images
(kpsA, featuresA) = self.detectAndDescribe(imageA)
(kpsB, featuresB) = self.detectAndDescribe(imageB)

# match features between the two images
M = self.matchKeypoints(kpsA, kpsB,
featuresA, featuresB, ratio, reprojThresh)

# if the match is None, then there aren't enough matched
# keypoints to create a panorama
if M is None:
return None

# otherwise, apply a perspective warp to stitch the images
# together
(matches, H, status) = M
result = cv2.warpPerspective(imageA, H,
(imageA.shape[1] + imageB.shape[1], imageA.shape[0]))
result[0:imageB.shape[0], 0:imageB.shape[1]] = imageB

# check to see if the keypoint matches should be visualized
if showMatches:
vis = self.drawMatches(imageA, imageB, kpsA, kpsB, matches,
status)

# return a tuple of the stitched image and the
# visualization
return (result, vis)

# return the stitched image
return result

def detectAndDescribe(self, image):
# convert the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# check to see if we are using OpenCV 3.X
if self.isv3:
# detect and extract features from the image
descriptor = cv2.xfeatures2d.SIFT_create()
(kps, features) = descriptor.detectAndCompute(image, None)

# otherwise, we are using OpenCV 2.4.X
else:
# detect keypoints in the image
detector = cv2.FeatureDetector_create("SIFT")
kps = detector.detect(gray)

# extract features from the image
extractor = cv2.DescriptorExtractor_create("SIFT")
(kps, features) = extractor.compute(gray, kps)

# convert the keypoints from KeyPoint objects to NumPy
# arrays
kps = np.float32([kp.pt for kp in kps])

# return a tuple of keypoints and features
return (kps, features)

def matchKeypoints(self, kpsA, kpsB, featuresA, featuresB,
ratio, reprojThresh):
# compute the raw matches and initialize the list of actual
# matches
matcher = cv2.DescriptorMatcher_create("BruteForce")
rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
matches =

# loop over the raw matches
for m in rawMatches:
# ensure the distance is within a certain ratio of each
# other (i.e. Lowe's ratio test)
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
matches.append((m[0].trainIdx, m[0].queryIdx))

# computing a homography requires at least 4 matches
if len(matches) > 4:
# construct the two sets of points
ptsA = np.float32([kpsA[i] for (_, i) in matches])
ptsB = np.float32([kpsB[i] for (i, _) in matches])

# compute the homography between the two sets of points
(H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC,
reprojThresh)

# return the matches along with the homograpy matrix
# and status of each matched point
return (matches, H, status)

# otherwise, no homograpy could be computed
return None

def drawMatches(self, imageA, imageB, kpsA, kpsB, matches, status):
# initialize the output visualization image
(hA, wA) = imageA.shape[:2]
(hB, wB) = imageB.shape[:2]
vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8")
vis[0:hA, 0:wA] = imageA
vis[0:hB, wA:] = imageB

# loop over the matches
for ((trainIdx, queryIdx), s) in zip(matches, status):
# only process the match if the keypoint was successfully
# matched
if s == 1:
# draw the match
ptA = (int(kpsA[queryIdx][0]), int(kpsA[queryIdx][1]))
ptB = (int(kpsB[trainIdx][0]) + wA, int(kpsB[trainIdx][1]))
cv2.line(vis, ptA, ptB, (0, 255, 0), 1)

# return the visualization
return vis


if __name__ == '__main__':
images_folder = sys.argv[1]
images = ["0.png", "1.png", "2.png", "3.png"]

imageA = cv2.imread(images_folder+images[0])
imageB = cv2.imread(images_folder+images[1])

# stitch the images together to create a panorama
stitcher = Stitcher()
(result, vis) = stitcher.stitch([imageA, imageB], showMatches=True)

count = 0
imgRGB=cv2.cvtColor(result, cv2.COLOR_BGR2RGB)
img = Image.fromarray(imgRGB)
current_stiched_image = images_folder + "lol10{}.png".format(count)
img.save(current_stiched_image)

for image in images[2:]:
count+=1
print("image: {}".format(image))
print("count: {}".format(count))
print("current_stiched_image: {}".format(current_stiched_image))
imageA1 = cv2.imread(current_stiched_image)
imageB1 = cv2.imread(images_folder + image)
(result, vis) = stitcher.stitch([imageA1, imageB1], showMatches=True)
imgRGB=cv2.cvtColor(result, cv2.COLOR_BGR2RGB)
img = Image.fromarray(imgRGB)
current_stiched_image = images_folder + "lol10{}.png".format(count)
print("new current_stiched_image: {}".format(current_stiched_image))
img.save(current_stiched_image)









share|improve this question















I'm trying to stitch together images which have overlapping areas.
The images are sorted, each image has overlapping area with the previous image. For example:



https://imgur.com/a/t9zzeHD



I've tried the code at https://www.pyimagesearch.com/2016/01/11/opencv-panorama-stitching/, which I slightly changed and used tilted images but the end result (https://imgur.com/a/B2d2VBL) is not as expected.



Does the issue arise from 5th image which has a black right side? Not sure on why the black was added and how to avoid it.



Anybody knows how I can fix the code to not distort the images as I add more and more images? Better code examples for me to use are welcomed.



~~~~~~~~ EDIT ~~~~~~~
As pointed by Dan in the comments I'm using the wrong tool (warpPerspective) for the job. What I'm really looking for is a way to get find the matching key points in both images, translate it to the correct Y in each image so I could cut the images and then stitch them accordingly.



So the question now might be a bit simple on how to get matching key points and translate it to Y coordinates.



PLEASE IGNORE THE CODE AS IT IS ONLY AN EXAMPLE FOR WHERE I STARTED AND IT IS ONLY MISLEADING AT THIS POINT.



The code example bellow, takes an input of a path to directory which contains the images ["0.png", "1.png", "2.png", "3.png"]



from PIL import Image
import numpy as np
import imutils
import cv2
# from panorama import Stitcher
import argparse
import imutils
import cv2

class Stitcher:
def __init__(self):
# determine if we are using OpenCV v3.X
self.isv3 = imutils.is_cv3()

def stitch(self, images, ratio=0.75, reprojThresh=4.0,
showMatches=False):
# unpack the images, then detect keypoints and extract
# local invariant descriptors from them
(imageB, imageA) = images
(kpsA, featuresA) = self.detectAndDescribe(imageA)
(kpsB, featuresB) = self.detectAndDescribe(imageB)

# match features between the two images
M = self.matchKeypoints(kpsA, kpsB,
featuresA, featuresB, ratio, reprojThresh)

# if the match is None, then there aren't enough matched
# keypoints to create a panorama
if M is None:
return None

# otherwise, apply a perspective warp to stitch the images
# together
(matches, H, status) = M
result = cv2.warpPerspective(imageA, H,
(imageA.shape[1] + imageB.shape[1], imageA.shape[0]))
result[0:imageB.shape[0], 0:imageB.shape[1]] = imageB

# check to see if the keypoint matches should be visualized
if showMatches:
vis = self.drawMatches(imageA, imageB, kpsA, kpsB, matches,
status)

# return a tuple of the stitched image and the
# visualization
return (result, vis)

# return the stitched image
return result

def detectAndDescribe(self, image):
# convert the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# check to see if we are using OpenCV 3.X
if self.isv3:
# detect and extract features from the image
descriptor = cv2.xfeatures2d.SIFT_create()
(kps, features) = descriptor.detectAndCompute(image, None)

# otherwise, we are using OpenCV 2.4.X
else:
# detect keypoints in the image
detector = cv2.FeatureDetector_create("SIFT")
kps = detector.detect(gray)

# extract features from the image
extractor = cv2.DescriptorExtractor_create("SIFT")
(kps, features) = extractor.compute(gray, kps)

# convert the keypoints from KeyPoint objects to NumPy
# arrays
kps = np.float32([kp.pt for kp in kps])

# return a tuple of keypoints and features
return (kps, features)

def matchKeypoints(self, kpsA, kpsB, featuresA, featuresB,
ratio, reprojThresh):
# compute the raw matches and initialize the list of actual
# matches
matcher = cv2.DescriptorMatcher_create("BruteForce")
rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
matches =

# loop over the raw matches
for m in rawMatches:
# ensure the distance is within a certain ratio of each
# other (i.e. Lowe's ratio test)
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
matches.append((m[0].trainIdx, m[0].queryIdx))

# computing a homography requires at least 4 matches
if len(matches) > 4:
# construct the two sets of points
ptsA = np.float32([kpsA[i] for (_, i) in matches])
ptsB = np.float32([kpsB[i] for (i, _) in matches])

# compute the homography between the two sets of points
(H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC,
reprojThresh)

# return the matches along with the homograpy matrix
# and status of each matched point
return (matches, H, status)

# otherwise, no homograpy could be computed
return None

def drawMatches(self, imageA, imageB, kpsA, kpsB, matches, status):
# initialize the output visualization image
(hA, wA) = imageA.shape[:2]
(hB, wB) = imageB.shape[:2]
vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8")
vis[0:hA, 0:wA] = imageA
vis[0:hB, wA:] = imageB

# loop over the matches
for ((trainIdx, queryIdx), s) in zip(matches, status):
# only process the match if the keypoint was successfully
# matched
if s == 1:
# draw the match
ptA = (int(kpsA[queryIdx][0]), int(kpsA[queryIdx][1]))
ptB = (int(kpsB[trainIdx][0]) + wA, int(kpsB[trainIdx][1]))
cv2.line(vis, ptA, ptB, (0, 255, 0), 1)

# return the visualization
return vis


if __name__ == '__main__':
images_folder = sys.argv[1]
images = ["0.png", "1.png", "2.png", "3.png"]

imageA = cv2.imread(images_folder+images[0])
imageB = cv2.imread(images_folder+images[1])

# stitch the images together to create a panorama
stitcher = Stitcher()
(result, vis) = stitcher.stitch([imageA, imageB], showMatches=True)

count = 0
imgRGB=cv2.cvtColor(result, cv2.COLOR_BGR2RGB)
img = Image.fromarray(imgRGB)
current_stiched_image = images_folder + "lol10{}.png".format(count)
img.save(current_stiched_image)

for image in images[2:]:
count+=1
print("image: {}".format(image))
print("count: {}".format(count))
print("current_stiched_image: {}".format(current_stiched_image))
imageA1 = cv2.imread(current_stiched_image)
imageB1 = cv2.imread(images_folder + image)
(result, vis) = stitcher.stitch([imageA1, imageB1], showMatches=True)
imgRGB=cv2.cvtColor(result, cv2.COLOR_BGR2RGB)
img = Image.fromarray(imgRGB)
current_stiched_image = images_folder + "lol10{}.png".format(count)
print("new current_stiched_image: {}".format(current_stiched_image))
img.save(current_stiched_image)






python opencv






share|improve this question















share|improve this question













share|improve this question




share|improve this question








edited Nov 10 at 7:59

























asked Nov 9 at 4:22









Captain_Meow_Meow

64531128




64531128












  • The question title seems a bit odd too -- isn't the overlap a prerequisite for the stitching to work? | With an input like this, warpPerspective seems a bit counterproductive.
    – Dan Mašek
    Nov 9 at 12:30










  • @DanMašek yes I agree, as I'm unfamiliar with working with cv2 I used an exmaple I found, after digging more into it today I did find that using warpPerspective is deferentially not the right approach as It's more relevant to stitching images in cases such as panorama and such. I guess what I really need is just finding the area with the maximum matching points and apply a cut & stitch. But as I'm new I'm not sure on how to find the best matching key points, and how to translate that output into a Y coordinates (where to cut).
    – Captain_Meow_Meow
    Nov 10 at 7:55


















  • The question title seems a bit odd too -- isn't the overlap a prerequisite for the stitching to work? | With an input like this, warpPerspective seems a bit counterproductive.
    – Dan Mašek
    Nov 9 at 12:30










  • @DanMašek yes I agree, as I'm unfamiliar with working with cv2 I used an exmaple I found, after digging more into it today I did find that using warpPerspective is deferentially not the right approach as It's more relevant to stitching images in cases such as panorama and such. I guess what I really need is just finding the area with the maximum matching points and apply a cut & stitch. But as I'm new I'm not sure on how to find the best matching key points, and how to translate that output into a Y coordinates (where to cut).
    – Captain_Meow_Meow
    Nov 10 at 7:55
















The question title seems a bit odd too -- isn't the overlap a prerequisite for the stitching to work? | With an input like this, warpPerspective seems a bit counterproductive.
– Dan Mašek
Nov 9 at 12:30




The question title seems a bit odd too -- isn't the overlap a prerequisite for the stitching to work? | With an input like this, warpPerspective seems a bit counterproductive.
– Dan Mašek
Nov 9 at 12:30












@DanMašek yes I agree, as I'm unfamiliar with working with cv2 I used an exmaple I found, after digging more into it today I did find that using warpPerspective is deferentially not the right approach as It's more relevant to stitching images in cases such as panorama and such. I guess what I really need is just finding the area with the maximum matching points and apply a cut & stitch. But as I'm new I'm not sure on how to find the best matching key points, and how to translate that output into a Y coordinates (where to cut).
– Captain_Meow_Meow
Nov 10 at 7:55




@DanMašek yes I agree, as I'm unfamiliar with working with cv2 I used an exmaple I found, after digging more into it today I did find that using warpPerspective is deferentially not the right approach as It's more relevant to stitching images in cases such as panorama and such. I guess what I really need is just finding the area with the maximum matching points and apply a cut & stitch. But as I'm new I'm not sure on how to find the best matching key points, and how to translate that output into a Y coordinates (where to cut).
– Captain_Meow_Meow
Nov 10 at 7:55












1 Answer
1






active

oldest

votes

















up vote
0
down vote













The problem of the black strip is that you put 2 overlapping images inside an image of the size of both of them... the black strip is the width of the overlapping area. To remove it you can always calculate the width needed. For example, you can find where the top and bottom right points will be mapped to, using the formula given in the documentation. Something like this:



def determineMaXSize(self, w, h ,M):
x0 = (M[0,0]*(w-1) + M[0,2]) / (M[2,0]*(w-1) + M[2,2]) # for top right point
x1 = (M[0,0]*(w-1) + M[0,1]*(h-1) + M[0,2]) / (M[2,0]*(w-1) + M[2,1]*(h-1) + M[2,2])# for bottom right point
print ("x0",x0)
print("x1",x1)
return int(min(x0, x1))


And then:



    result[0:imageB.shape[0], 0:imageB.shape[1]] = imageB
maxWidth = self.determineMaXSize(imageA.shape[1], imageA.shape[0] ,H)
result = result[:, 0:maxWidth]


I take the min width between the 2 point to remove all the black part from the right, even if the image is distorted. (it will cut a little of the image in this case)



Your second problem is that somehow the match between the two images of the guy's chest are not so good to do the matching and they end a little bit moved, and then ends on having quite some distortion. You should try to obtain images with more overlapping or try using other matching methods or other parameters for the SIFT feature detection. Maybe try looking to a method which only estimates translations based on features/points.






share|improve this answer





















    Your Answer






    StackExchange.ifUsing("editor", function () {
    StackExchange.using("externalEditor", function () {
    StackExchange.using("snippets", function () {
    StackExchange.snippets.init();
    });
    });
    }, "code-snippets");

    StackExchange.ready(function() {
    var channelOptions = {
    tags: "".split(" "),
    id: "1"
    };
    initTagRenderer("".split(" "), "".split(" "), channelOptions);

    StackExchange.using("externalEditor", function() {
    // Have to fire editor after snippets, if snippets enabled
    if (StackExchange.settings.snippets.snippetsEnabled) {
    StackExchange.using("snippets", function() {
    createEditor();
    });
    }
    else {
    createEditor();
    }
    });

    function createEditor() {
    StackExchange.prepareEditor({
    heartbeatType: 'answer',
    convertImagesToLinks: true,
    noModals: true,
    showLowRepImageUploadWarning: true,
    reputationToPostImages: 10,
    bindNavPrevention: true,
    postfix: "",
    imageUploader: {
    brandingHtml: "Powered by u003ca class="icon-imgur-white" href="https://imgur.com/"u003eu003c/au003e",
    contentPolicyHtml: "User contributions licensed under u003ca href="https://creativecommons.org/licenses/by-sa/3.0/"u003ecc by-sa 3.0 with attribution requiredu003c/au003e u003ca href="https://stackoverflow.com/legal/content-policy"u003e(content policy)u003c/au003e",
    allowUrls: true
    },
    onDemand: true,
    discardSelector: ".discard-answer"
    ,immediatelyShowMarkdownHelp:true
    });


    }
    });














    draft saved

    draft discarded


















    StackExchange.ready(
    function () {
    StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53219838%2fpython-how-to-stitch-images-which-overlapping-areas%23new-answer', 'question_page');
    }
    );

    Post as a guest















    Required, but never shown

























    1 Answer
    1






    active

    oldest

    votes








    1 Answer
    1






    active

    oldest

    votes









    active

    oldest

    votes






    active

    oldest

    votes








    up vote
    0
    down vote













    The problem of the black strip is that you put 2 overlapping images inside an image of the size of both of them... the black strip is the width of the overlapping area. To remove it you can always calculate the width needed. For example, you can find where the top and bottom right points will be mapped to, using the formula given in the documentation. Something like this:



    def determineMaXSize(self, w, h ,M):
    x0 = (M[0,0]*(w-1) + M[0,2]) / (M[2,0]*(w-1) + M[2,2]) # for top right point
    x1 = (M[0,0]*(w-1) + M[0,1]*(h-1) + M[0,2]) / (M[2,0]*(w-1) + M[2,1]*(h-1) + M[2,2])# for bottom right point
    print ("x0",x0)
    print("x1",x1)
    return int(min(x0, x1))


    And then:



        result[0:imageB.shape[0], 0:imageB.shape[1]] = imageB
    maxWidth = self.determineMaXSize(imageA.shape[1], imageA.shape[0] ,H)
    result = result[:, 0:maxWidth]


    I take the min width between the 2 point to remove all the black part from the right, even if the image is distorted. (it will cut a little of the image in this case)



    Your second problem is that somehow the match between the two images of the guy's chest are not so good to do the matching and they end a little bit moved, and then ends on having quite some distortion. You should try to obtain images with more overlapping or try using other matching methods or other parameters for the SIFT feature detection. Maybe try looking to a method which only estimates translations based on features/points.






    share|improve this answer

























      up vote
      0
      down vote













      The problem of the black strip is that you put 2 overlapping images inside an image of the size of both of them... the black strip is the width of the overlapping area. To remove it you can always calculate the width needed. For example, you can find where the top and bottom right points will be mapped to, using the formula given in the documentation. Something like this:



      def determineMaXSize(self, w, h ,M):
      x0 = (M[0,0]*(w-1) + M[0,2]) / (M[2,0]*(w-1) + M[2,2]) # for top right point
      x1 = (M[0,0]*(w-1) + M[0,1]*(h-1) + M[0,2]) / (M[2,0]*(w-1) + M[2,1]*(h-1) + M[2,2])# for bottom right point
      print ("x0",x0)
      print("x1",x1)
      return int(min(x0, x1))


      And then:



          result[0:imageB.shape[0], 0:imageB.shape[1]] = imageB
      maxWidth = self.determineMaXSize(imageA.shape[1], imageA.shape[0] ,H)
      result = result[:, 0:maxWidth]


      I take the min width between the 2 point to remove all the black part from the right, even if the image is distorted. (it will cut a little of the image in this case)



      Your second problem is that somehow the match between the two images of the guy's chest are not so good to do the matching and they end a little bit moved, and then ends on having quite some distortion. You should try to obtain images with more overlapping or try using other matching methods or other parameters for the SIFT feature detection. Maybe try looking to a method which only estimates translations based on features/points.






      share|improve this answer























        up vote
        0
        down vote










        up vote
        0
        down vote









        The problem of the black strip is that you put 2 overlapping images inside an image of the size of both of them... the black strip is the width of the overlapping area. To remove it you can always calculate the width needed. For example, you can find where the top and bottom right points will be mapped to, using the formula given in the documentation. Something like this:



        def determineMaXSize(self, w, h ,M):
        x0 = (M[0,0]*(w-1) + M[0,2]) / (M[2,0]*(w-1) + M[2,2]) # for top right point
        x1 = (M[0,0]*(w-1) + M[0,1]*(h-1) + M[0,2]) / (M[2,0]*(w-1) + M[2,1]*(h-1) + M[2,2])# for bottom right point
        print ("x0",x0)
        print("x1",x1)
        return int(min(x0, x1))


        And then:



            result[0:imageB.shape[0], 0:imageB.shape[1]] = imageB
        maxWidth = self.determineMaXSize(imageA.shape[1], imageA.shape[0] ,H)
        result = result[:, 0:maxWidth]


        I take the min width between the 2 point to remove all the black part from the right, even if the image is distorted. (it will cut a little of the image in this case)



        Your second problem is that somehow the match between the two images of the guy's chest are not so good to do the matching and they end a little bit moved, and then ends on having quite some distortion. You should try to obtain images with more overlapping or try using other matching methods or other parameters for the SIFT feature detection. Maybe try looking to a method which only estimates translations based on features/points.






        share|improve this answer












        The problem of the black strip is that you put 2 overlapping images inside an image of the size of both of them... the black strip is the width of the overlapping area. To remove it you can always calculate the width needed. For example, you can find where the top and bottom right points will be mapped to, using the formula given in the documentation. Something like this:



        def determineMaXSize(self, w, h ,M):
        x0 = (M[0,0]*(w-1) + M[0,2]) / (M[2,0]*(w-1) + M[2,2]) # for top right point
        x1 = (M[0,0]*(w-1) + M[0,1]*(h-1) + M[0,2]) / (M[2,0]*(w-1) + M[2,1]*(h-1) + M[2,2])# for bottom right point
        print ("x0",x0)
        print("x1",x1)
        return int(min(x0, x1))


        And then:



            result[0:imageB.shape[0], 0:imageB.shape[1]] = imageB
        maxWidth = self.determineMaXSize(imageA.shape[1], imageA.shape[0] ,H)
        result = result[:, 0:maxWidth]


        I take the min width between the 2 point to remove all the black part from the right, even if the image is distorted. (it will cut a little of the image in this case)



        Your second problem is that somehow the match between the two images of the guy's chest are not so good to do the matching and they end a little bit moved, and then ends on having quite some distortion. You should try to obtain images with more overlapping or try using other matching methods or other parameters for the SIFT feature detection. Maybe try looking to a method which only estimates translations based on features/points.







        share|improve this answer












        share|improve this answer



        share|improve this answer










        answered Nov 9 at 13:04









        api55

        6,41732443




        6,41732443






























            draft saved

            draft discarded




















































            Thanks for contributing an answer to Stack Overflow!


            • Please be sure to answer the question. Provide details and share your research!

            But avoid



            • Asking for help, clarification, or responding to other answers.

            • Making statements based on opinion; back them up with references or personal experience.


            To learn more, see our tips on writing great answers.





            Some of your past answers have not been well-received, and you're in danger of being blocked from answering.


            Please pay close attention to the following guidance:


            • Please be sure to answer the question. Provide details and share your research!

            But avoid



            • Asking for help, clarification, or responding to other answers.

            • Making statements based on opinion; back them up with references or personal experience.


            To learn more, see our tips on writing great answers.




            draft saved


            draft discarded














            StackExchange.ready(
            function () {
            StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53219838%2fpython-how-to-stitch-images-which-overlapping-areas%23new-answer', 'question_page');
            }
            );

            Post as a guest















            Required, but never shown





















































            Required, but never shown














            Required, but never shown












            Required, but never shown







            Required, but never shown

































            Required, but never shown














            Required, but never shown












            Required, but never shown







            Required, but never shown







            這個網誌中的熱門文章

            Tangent Lines Diagram Along Smooth Curve

            Yusuf al-Mu'taman ibn Hud

            Zucchini