how to remove all the detected lines from the original image using python?

  • Last Update :
  • Techknowledgy :

After converting to grayscale, we Otsu's threshold to get a binary image

image = cv2.imread('1.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]

From here we construct a special horizontal kernel to detect horizontal lines. Once the lines are detected, we fill in the lines to effectively remove the line

# Remove horizontal
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 1))
detected_lines = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, horizontal_kernel, iterations = 2)
cnts = cv2.findContours(detected_lines, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0]
if len(cnts) == 2
else cnts[1]
for c in cnts:
   cv2.drawContours(image, [c], -1, (255, 255, 255), 2)

Similarly, to remove vertical lines, we construct a special vertical kernel

# Remove vertical
vertical_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 10))
detected_lines = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, vertical_kernel, iterations = 2)
cnts = cv2.findContours(detected_lines, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0]
if len(cnts) == 2
else cnts[1]
for c in cnts:
   cv2.drawContours(image, [c], -1, (255, 255, 255), 2)

Suggestion : 2

February 7, 2019 at 6:50 am,February 7, 2019 at 5:36 am,February 9, 2016 at 3:59 pm,February 8, 2016 at 11:49 pm

Anyway, let’s go ahead and get this example started. Open up a new file, name it remove_contours.py , and let’s get coding:

#
import the necessary packages
import numpy as np
import imutils
import cv2

def is_contour_bad(c):
   # approximate the contour
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)

# the contour is 'bad'
if it is not a rectangle
return not len(approx) == 4

Let’s finish implementing the other steps to solve this problem:

# load the shapes image, convert it to grayscale, and edge edges in
   # the image
image = cv2.imread("shapes.png")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
edged = cv2.Canny(gray, 50, 100)
cv2.imshow("Original", image)

# find contours in the image and initialize the mask that will be
# used to remove the bad contours
cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
mask = np.ones(image.shape[: 2], dtype = "uint8") * 255

# loop over the contours
for c in cnts:
   #
if the contour is bad, draw it on the mask
if is_contour_bad(c):
   cv2.drawContours(mask, [c], -1, 0, -1)

# remove the contours from the image and show the resulting images
image = cv2.bitwise_and(image, image, mask = mask)
cv2.imshow("Mask", mask)
cv2.imshow("After", image)
cv2.waitKey(0)

To execute our script, just issue the following command:

$ python remove_contours.py

Suggestion : 3

Last Updated : 16 Sep, 2021,GATE CS 2021 Syllabus

1._
pip install opencv - python
pip install pytesseract

Suggestion : 4

First, take a look at the code that will demonstrate edge detection. Each line of code will be discussed in detail  so that you understand it fully.,The figure below shows the Sobel image for the gradient in both directions, which distills the original image into an edge structure representation, such that its structural integrity remains intact.,Before going into each algorithm in detail, let’s complete some preliminary steps needed for edge detection. Start by importing the OpenCV library, as shown in the code below. ,When these kernels are convolved with the original image, you get a ‘Sobel edge image’. 

import cv2

# Read the original image
img = cv2.imread('test.jpg')
# Display original image
cv2.imshow('Original', img)
cv2.waitKey(0)

# Convert to graycsale
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Blur the image
for better edge detection
img_blur = cv2.GaussianBlur(img_gray, (3, 3), 0)

# Sobel Edge Detection
sobelx = cv2.Sobel(src = img_blur, ddepth = cv2.CV_64F, dx = 1, dy = 0, ksize = 5) # Sobel Edge Detection on the X axis
sobely = cv2.Sobel(src = img_blur, ddepth = cv2.CV_64F, dx = 0, dy = 1, ksize = 5) # Sobel Edge Detection on the Y axis
sobelxy = cv2.Sobel(src = img_blur, ddepth = cv2.CV_64F, dx = 1, dy = 1, ksize = 5) # Combined X and Y Sobel Edge Detection
# Display Sobel Edge Detection Images
cv2.imshow('Sobel X', sobelx)
cv2.waitKey(0)
cv2.imshow('Sobel Y', sobely)
cv2.waitKey(0)
cv2.imshow('Sobel X Y using Sobel() function', sobelxy)
cv2.waitKey(0)

# Canny Edge Detection
edges = cv2.Canny(image = img_blur, threshold1 = 100, threshold2 = 200) # Canny Edge Detection
# Display Canny Edge Detection Image
cv2.imshow('Canny Edge Detection', edges)
cv2.waitKey(0)

cv2.destroyAllWindows()
#include <opencv2/opencv.hpp>
#include <iostream>
// using namespaces to nullify use of cv::function(); syntax and std::function();
using namespace std;
using namespace cv;

int main()
{
    // Reading image
    Mat img = imread("test.jpg");
    // Display original image
    imshow("original Image", img);
    waitKey(0);

    // Convert to graycsale
    Mat img_gray;
    cvtColor(img, img_gray, COLOR_BGR2GRAY);
    // Blur the image for better edge detection
    Mat img_blur;
    GaussianBlur(img_gray, img_blur, Size(3,3), 0);
    
    // Sobel edge detection
    Mat sobelx, sobely, sobelxy;
    Sobel(img_blur, sobelx, CV_64F, 1, 0, 5);
    Sobel(img_blur, sobely, CV_64F, 0, 1, 5);
    Sobel(img_blur, sobelxy, CV_64F, 1, 1, 5);
    // Display Sobel edge detection images
    imshow("Sobel X", sobelx);
    waitKey(0);
    imshow("Sobel Y", sobely);
    waitKey(0);
    imshow("Sobel XY using Sobel() function", sobelxy);
    waitKey(0);

    // Canny edge detection
    Mat edges;
    Canny(img_blur, edges, 100, 200, 3, false);
    // Display canny edge detected image
    imshow("Canny edge detection", edges);
    waitKey(0);
    
    destroyAllWindows();
    return 0;
}
import cv2
#include<opencv2 /opencv.hpp>
   #include<iostream>

      // Namespace nullifies the use of cv::function();
      using namespace std;
      using namespace cv;
# Read the original image
img = cv2.imread('test.jpg', flags = 0)
# Blur the image
for better edge detection
img_blur = cv2.GaussianBlur(img, (3, 3), SigmaX = 0, SigmaY = 0)
// Reading image
Mat img = imread("test.jpg", 0);
// Blur the image for better edge detection
Mat img_blur;
GaussianBlur(img, img_blur, Size(3, 3), SigmaX = 0, SigmaY = 0);