Home > Enterprise >  How to stitch images that have small overlap area and were captured with a wide angle lenses?
How to stitch images that have small overlap area and were captured with a wide angle lenses?

Time:09-17

I have a system with a fixed camera with a wide-angle lens and a moving object. I captured images with 10mm intervals and 2064x40 px while the object is moving at constant velocity. Besides, I captured images 2048x40 without constant velocity. I would like to stitch these captured images.

First of all, I tried the OpenCV stitching method by referring to link. However, I got error code 1 and I learned that between two images do have not enough overlap area to stitch.

After that, I thought that I can try to concatenate the images for constant velocity objects. I used the code below and I put 13 px as a shifting parameter.

Code that I tried:

import numpy as np
import cv2
import os

from Stitching.Blending import UVSSBlendingConcate
from Stitching.DistortionCorrection import load_coefficients


def load_images_from_folder(folder):
    print("\nImages are reading from folder: "   folder)
    images = []
    for filename in os.listdir(folder):
        img = cv2.imread((folder   "/"   filename))
        if img is not None:
            images.append(img)
    return images


def unDistortImages(images):
    mtx, dist = load_coefficients('calibration_chessboard.yml')
    for i in range(len(images)):
        images[i] = cv2.undistort(images[i], mtx, dist, None, None)
    return images


def LineTriggerConcate(dx, images, blending, IsFlip, IsUnDistorted):
    print("\nImage LineTrigger Concate Start")

    if IsUnDistorted:
        images = unDistortImages(images)

    cropped_images = []
    for i in range(len(images) - 1):
        if IsFlip is True:
            cropped_images.append(cv2.flip(images[i][2:2   dx, 0:2064], 0))
        else:
            cropped_images.append(images[i][2:2   dx, 0:2064])

    if not blending:
        result = cv2.vconcat(cropped_images)
        return result
    else:
        global blendingResult
        for i in range(len(cropped_images) - 1):
            if i == 0:
                blendingResult = UVSSBlendingConcate(cropped_images[i], cropped_images[i   1], dx / 2)
            else:
                blendingResult = UVSSBlendingConcate(blendingResult, cropped_images[i   1], dx / 2)

        print("\nImage LineTrigger Concate Finish")
        return blendingResult


def concateImages(image_list):
    image_h = cv2.vconcat(image_list)
    return image_h


def main():
    images_path = "10mm"
    image_list = load_images_from_folder(images_path)

    # LineTriggerConcate Parameters
    shiftParameter = 13
    IsBlending = False
    IsFlipped = True
    IsUnDistorted = False
    result = LineTriggerConcate(shiftParameter, image_list, IsBlending, IsFlipped, IsUnDistorted)

    cv2.imwrite(images_path   r"//"   str(shiftParameter)   r"_Shift_"   str(IsBlending)   "_Blending_Result.bmp", result)
    print('Successfully saved to %s' % images_path)


if __name__ == '__main__':
    main()

Output image:

Result for 10mm dataset

a closer look at the problem

In the above result, the transition is not smooth and I tried to fix the transition by using blending and undistortion methods but I am not successful.

On the other hand, I assume that the velocity of the object is constant but unfortunately it isn’t in the real case. When the object has acceleration, some parts of the image may be elongated or shortened.

Could someone please advise any methodology or research?
I also share a part of 10mm intervals datasets.

CodePudding user response:

Here is a bit more "morphologically rich" solution:

-convert image to edges

-dilate edges (to improve gradient decent flexibility)

-find best match offset for each image to the bottom of the accumulating image

-stash offset and append accumulating image

-rebuilt full image using rgb images and the stored offsets

#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <Windows.h>
#include <string>

using namespace cv;

double imDiff(Mat mat1, Mat mat2)
{
    double sumSquares = 0;
    Mat channels1[3], channels2[3];
    cv::split(mat1, channels1);
    cv::split(mat2, channels2);
    Mat dif1 = channels1[0] - channels2[0];
    Mat dif2 = channels1[1] - channels2[1];
    Mat dif3 = channels1[2] - channels2[2];
    dif1.mul(dif1);
    dif2.mul(dif2);
    dif3.mul(dif3);
    sumSquares = cv::sum(dif1)[0]   cv::sum(dif2)[0]   cv::sum(dif3)[0];
    return sumSquares;
}

Mat autoCanny(Mat image)
{
    Mat edged;
    cv::Canny(image, edged,75,125);
    Mat blurred;

    int dilation_size = 1;
    Mat element = cv::getStructuringElement(MORPH_CROSS,
        Size(2 * dilation_size   1, 2 * dilation_size   1),
        Point(dilation_size, dilation_size));

    cv::dilate(edged, blurred, element,cv::Point(-1,-1),2);
    //cv::GaussianBlur(edged, blurred, cv::Size(5, 5), 0);
    return blurred;
}

int main(int argc, char** argv)
{
    int compareHeight = 25;
    int compareWidth = 350;

    std::vector<int> offsets = std::vector<int>();

    Mat image;
    bool firstLoop = true;
    for (int i = 140; i >=53; i--) //140
    {
        std::string fileName = "C:/Users/thoma/Downloads/stitching-main/stitching-main/dataset_10mm/" std::to_string(i)   ".bmp";
        Mat tempImage = imread(fileName, 1);
        tempImage = autoCanny(tempImage);
        //std::cout << "imsize: " << tempImage.rows << std::endl;
        if (firstLoop) { image = tempImage; firstLoop = false; }
        else 
        {
            double lowestNorm = -1;
            int index = -1;
            Mat refSlice = image(Rect(image.cols/2-compareWidth, image.rows- compareHeight, 2*compareWidth, compareHeight));
            for (int ii = 0; ii < tempImage.rows- compareHeight; ii  )
            {
                Mat testSlice = tempImage(Rect(tempImage.cols/2-compareWidth, ii, 2*compareWidth, compareHeight));
                //double tempNorm = cv::norm(refSlice, testSlice);
                double tempNorm = imDiff(refSlice, testSlice);
                //std::cout << "norm: " << tempNorm << std::endl;
                //std::cout << "cust: " << imDiff(refSlice, testSlice) << std::endl;
                if (lowestNorm == -1)
                {
                    lowestNorm = tempNorm;
                    index = ii;
                }
                else if (tempNorm < lowestNorm)
                {
                    lowestNorm = tempNorm;
                    index = ii;
                }
            }
            index  = compareHeight;
            std::cout << tempImage.rows - index << std::endl;
            if (tempImage.rows - index < 1) { std::cout << "skipped" << std::endl; continue; }

            //index = 32;
            offsets.push_back(index);
            
            Mat crop_img = tempImage(Rect(0, index, tempImage.cols, tempImage.rows-index));
            vconcat(image, crop_img, image);
        }
    }

    namedWindow("Display Image", WINDOW_AUTOSIZE);
    imshow("Display Image", image);

    waitKey(0);



    firstLoop = true;
    int offsetIndex = 0;
    for (int i = 140; i >= 53; i--) //140
    {
        std::string fileName = "C:/Users/thoma/Downloads/stitching-main/stitching-main/dataset_10mm/"   std::to_string(i)   ".bmp";
        Mat tempImage = imread(fileName, 1);

        if (firstLoop) { image = tempImage; firstLoop = false; }
        else
        {
            Mat crop_img = tempImage(Rect(0, offsets[offsetIndex], tempImage.cols, tempImage.rows - offsets[offsetIndex]));
            vconcat(image, crop_img, image);
            offsetIndex  ;
        }
    }


    namedWindow("Display Image", WINDOW_AUTOSIZE);
    imshow("Display Image", image);

    waitKey(0);
    system("pause");
    return 0;
}

Result Picture: https://imgur.com/9dEXonn

Notes: This method uses a center stripe of the image. There seems to still be non-trivial distortion on the images that increases towards the edges and so this method attempts to ignore that. This method is vulnerable to a lack of horizontal edges (pretty sure most stitching methods will be). This strategy (as I wrote it) has a lot of "magic variables" ie knobs that you would want to dial in and or automate if you are planning on deploying this code or running it in an automated capacity.

CodePudding user response:

Your current solution seems pretty darn close to me. Have you tried using min diff to clean up the last little bit of alignment prior to combining? Basically create some form of value function (could be summation of pixel by pixel color distance). You could apply this value function to a single line in the image, several lines or a random scatter of points. But then step by single pixels while comparing to secondary image.

Here is some code (sry in c ) that demonstrates a simple implementation of this (note this is just for alignment with variable velocity and does nothing to correct the hard edges that need to be blurred):

#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <Windows.h>
#include <string>

using namespace cv;

int main(int argc, char** argv)
{
    int compareHeight = 2;

    Mat image;
    bool firstLoop = true;
    for (int i = 140; i >=53; i--) //140
    {
        std::string fileName = "C:/Users/thoma/Downloads/stitching-main/stitching-main/dataset_10mm/" std::to_string(i)   ".bmp";
        Mat tempImage;
        tempImage = imread(fileName, 1);
        std::cout << "imsize: " << tempImage.rows << std::endl;
        if (firstLoop) { image = tempImage; firstLoop = false; }
        else 
        {
            double lowestNorm = -1;
            int index = -1;
            Mat refSlice = image(Rect(0, image.rows- compareHeight, image.cols, compareHeight));
            for (int ii = 0; ii < tempImage.rows- compareHeight; ii  )
            {
                Mat testSlice = tempImage(Rect(0, ii, tempImage.cols, compareHeight));
                double tempNorm = cv::norm(refSlice, testSlice);
                if (lowestNorm == -1)
                {
                    lowestNorm = tempNorm;
                    index = ii;
                }
                else if (tempNorm < lowestNorm)
                {
                    lowestNorm = tempNorm;
                    index = ii;
                }
            }
            std::cout << index << " , "<<lowestNorm<< std::endl;
            index =compareHeight;
            if (tempImage.rows - index < 1) { std::cout << "skipped" << std::endl; continue; }

            
            Mat crop_img = tempImage(Rect(0, index, tempImage.cols, tempImage.rows-index));
            vconcat(image, crop_img, image);
        }
    }

    if (!image.data)
    {
        printf("No image data \n"); system("pause");
        return -1;
    }
    namedWindow("Display Image", WINDOW_AUTOSIZE);
    imshow("Display Image", image);

    waitKey(0);
    system("pause");
    return 0;
}

Running above code on your example set: https://imgur.com/EYvJT8I

Some additional notes about your data set: I noticed that there is still some warping, so by performing this fit on a particular strip of the image you can favor some features over others. Also, I am using 2 rows per calculation to try to try to prevent from walking, you could use more or less. Finally clipping the top few rows off an image may help as there seems to be some variable lighting at the edges of the images. I may spend some more time on this later, but this is where I have it right now with your current data set.

  • Related