Нет описания

xword.py 9.2KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. import math
  2. import cv2
  3. import numpy as np
  4. import peakutils
  5. import copy
  6. import argparse
  7. def preprocess_image(original, gaussian_blur_size, adaptive_threshold_block_size, adaptive_threshold_mean_adjustment, num_dilations):
  8. img = cv2.GaussianBlur(original, (gaussian_blur_size, gaussian_blur_size), 0)
  9. img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, adaptive_threshold_block_size, adaptive_threshold_mean_adjustment)
  10. kernel = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], np.uint8)
  11. for i in range(num_dilations):
  12. img = cv2.dilate(img, kernel)
  13. return img
  14. def find_biggest_contour(img):
  15. contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
  16. biggest = None
  17. max_area = 0
  18. for contour in contours:
  19. area = cv2.contourArea(contour)
  20. if area > max_area:
  21. biggest = contour
  22. max_area = area
  23. return biggest
  24. def erode_contour(img_shape, contour, kernel_size, iterations):
  25. contour_img = np.zeros(img_shape, dtype=np.uint8)
  26. cv2.drawContours(contour_img, [contour], 0, 255, -1)
  27. kernel = np.ones((kernel_size, kernel_size), dtype=np.uint8)
  28. contour_img = cv2.erode(contour_img, kernel, iterations=iterations)
  29. contour_img = cv2.dilate(contour_img, kernel, iterations=iterations)
  30. return find_biggest_contour(contour_img)
  31. def get_contour_corners(img, contour):
  32. height, width = img.shape
  33. top_left = [width, height]
  34. top_right = [-1, height]
  35. bottom_left = [width, -1]
  36. bottom_right = [-1, -1]
  37. for vertex in contour:
  38. point = vertex[0]
  39. sum = point[0] + point[1]
  40. diff = point[0] - point[1]
  41. if sum < top_left[0] + top_left[1]:
  42. top_left = point
  43. if sum > bottom_right[0] + bottom_right[1]:
  44. bottom_right = point
  45. if diff < bottom_left[0] - bottom_left[1]:
  46. bottom_left = point
  47. if diff > top_right[0] - top_right[1]:
  48. top_right = point
  49. return top_left, top_right, bottom_right, bottom_left
  50. def segment_length(p1, p2):
  51. dx = p1[0] - p2[0]
  52. dy = p1[1] - p2[1]
  53. return math.sqrt(dx ** 2 + dy ** 2)
  54. def get_longest_side(poly):
  55. previous = poly[-1]
  56. max = 0
  57. for current in poly:
  58. len = segment_length(previous, current)
  59. if len > max:
  60. max = len
  61. previous = current
  62. return max
  63. def extract_square(img, top_left, top_right, bottom_right, bottom_left):
  64. src = [top_left, top_right, bottom_right, bottom_left]
  65. longest = get_longest_side(src)
  66. dst = [[0, 0], [longest - 1, 0], [longest - 1, longest - 1], [0, longest - 1]]
  67. m = cv2.getPerspectiveTransform(np.array(src, dtype=np.float32), np.array(dst, dtype=np.float32))
  68. return cv2.warpPerspective(img, m, (int(longest), int(longest)))
  69. def get_fundamental_frequency(ffts):
  70. all_peak_indexes = []
  71. max_peak_count = None
  72. f_est = None
  73. for fft in ffts:
  74. # Use the upper half of the fft array, since this seems to always exclude the DC component.
  75. peak_indexes = peakutils.indexes(np.flip(abs(fft[len(fft) // 2:])), thres=0.3)
  76. peak_count = len(peak_indexes)
  77. if peak_count < 1:
  78. return None
  79. if (max_peak_count is None) or (peak_count > max_peak_count):
  80. max_peak_count = peak_count
  81. f_est = round(peak_indexes[peak_count - 1] / peak_count)
  82. all_peak_indexes.append(peak_indexes)
  83. if f_est < 2:
  84. return None
  85. min_err = None
  86. f = None
  87. for delta in range(-2, 3):
  88. err = 0
  89. f_current = f_est + delta
  90. for peak_indexes in all_peak_indexes:
  91. for i, peak_index in enumerate(peak_indexes):
  92. err += (peak_index - f_current * (i + 1)) ** 2
  93. if (min_err is None) or (err < min_err):
  94. min_err = err
  95. f = f_current
  96. return int(f)
  97. def get_threshold_from_quantile(img, quantile):
  98. height, width = img.shape
  99. num_pixels = height * width
  100. pixels = np.sort(np.reshape(img, num_pixels))
  101. return pixels[int(num_pixels * quantile)]
  102. def extract_grid_colours(img, num_rows, num_cols, sampling_block_size_ratio):
  103. height, width = img.shape
  104. row_delta = int(height * sampling_block_size_ratio / num_rows / 2)
  105. col_delta = int(width * sampling_block_size_ratio / num_cols / 2)
  106. sampling_block_area = (2 * row_delta + 1) * (2 * col_delta + 1)
  107. grid = []
  108. for row in range(num_rows):
  109. line = []
  110. y = int(((row + 0.5) / num_rows) * height)
  111. for col in range(num_cols):
  112. sum = 0
  113. x = int(((col + 0.5) / num_cols) * width)
  114. for dy in range(-row_delta, row_delta + 1):
  115. for dx in range(-col_delta, col_delta + 1):
  116. sum += img[y + dy, x + dx]
  117. line.append(sum / sampling_block_area)
  118. grid.append(line)
  119. return grid
  120. def grid_colours_to_blocks(grid_colours, num_rows, num_cols, sampling_threshold):
  121. grid = copy.deepcopy(grid_colours)
  122. warning = False
  123. for row in range(round(num_rows / 2)):
  124. for col in range(num_cols):
  125. row2 = num_rows - row - 1
  126. col2 = num_cols - col - 1
  127. delta1 = grid_colours[row][col] - sampling_threshold
  128. delta2 = grid_colours[row2][col2] - sampling_threshold
  129. if (delta1 > 0) and (delta2 > 0):
  130. block = 0
  131. elif (delta1 < 0) and (delta2 < 0):
  132. block = 1
  133. else:
  134. warning = True
  135. if abs(delta1) > abs(delta2):
  136. block = 1 if delta1 < 0 else 0
  137. else:
  138. block = 1 if delta2 < 0 else 0
  139. grid[row][col] = grid[row2][col2] = block
  140. return warning, grid
  141. def draw_point(image, point, colour):
  142. height, width, _ = image.shape
  143. for dx in range(-10, 11):
  144. for dy in range(-10, 11):
  145. x = point[0] + dx
  146. y = point[1] + dy
  147. if (x >= 0) and (y >= 0) and (x < width) and (y < height):
  148. image[y, x] = colour
  149. def show_image(image):
  150. cv2.namedWindow('xword', cv2.WINDOW_NORMAL)
  151. cv2.imshow('xword', image)
  152. while cv2.waitKey() & 0xFF != ord('q'):
  153. pass
  154. cv2.destroyAllWindows()
  155. def extract_crossword(
  156. file_name,
  157. gaussian_blur_size=11,
  158. adaptive_threshold_block_size=11,
  159. adaptive_threshold_mean_adjustment=2,
  160. not_square=False,
  161. num_dilations=1,
  162. contour_erosion_kernel_size=5,
  163. contour_erosion_iterations=5,
  164. line_detector_element_size=51,
  165. sampling_block_size_ratio=0.25,
  166. sampling_threshold_quantile=0.3,
  167. sampling_threshold=None,
  168. grid_line_thickness=4,
  169. grid_square_size=64,
  170. grid_border_size=20,
  171. ):
  172. original = cv2.imread(file_name, cv2.IMREAD_GRAYSCALE)
  173. if original is None:
  174. raise RuntimeError("Failed to load image")
  175. img = preprocess_image(original, gaussian_blur_size, adaptive_threshold_block_size, adaptive_threshold_mean_adjustment, num_dilations)
  176. biggest = find_biggest_contour(img)
  177. biggest = erode_contour(img.shape, biggest, contour_erosion_kernel_size, contour_erosion_iterations)
  178. top_left, top_right, bottom_right, bottom_left = get_contour_corners(img, biggest)
  179. img = extract_square(img, top_left, top_right, bottom_right, bottom_left)
  180. horiz_elem = cv2.getStructuringElement(cv2.MORPH_RECT, (line_detector_element_size, 1))
  181. horiz_lines = cv2.erode(img, horiz_elem)
  182. horiz_lines = cv2.dilate(horiz_lines, horiz_elem)
  183. vert_elem = cv2.getStructuringElement(cv2.MORPH_RECT, (1, line_detector_element_size))
  184. vert_lines = cv2.erode(img, vert_elem)
  185. vert_lines = cv2.dilate(vert_lines, vert_elem)
  186. row_fft = np.fft.fft(np.sum(horiz_lines, axis=1))
  187. col_fft = np.fft.fft(np.sum(vert_lines, axis=0))
  188. if not_square:
  189. num_rows = get_fundamental_frequency([row_fft])
  190. num_cols = get_fundamental_frequency([col_fft])
  191. else:
  192. num_rows = num_cols = get_fundamental_frequency([row_fft, col_fft])
  193. block_img = extract_square(original, top_left, top_right, bottom_right, bottom_left)
  194. if sampling_threshold is None:
  195. sampling_threshold = get_threshold_from_quantile(block_img, sampling_threshold_quantile)
  196. else:
  197. sampling_threshold = sampling_threshold
  198. grid_colours = extract_grid_colours(block_img, num_rows, num_cols, sampling_block_size_ratio)
  199. warning, grid = grid_colours_to_blocks(grid_colours, num_rows, num_cols, sampling_threshold)
  200. step = grid_square_size + grid_line_thickness
  201. grid_height = num_rows * step + grid_line_thickness
  202. grid_width = num_cols * step + grid_line_thickness
  203. output = np.full([2 * grid_border_size + grid_height, 2 * grid_border_size + grid_width], 255, dtype=np.uint8)
  204. cv2.rectangle(output, (grid_border_size, grid_border_size), (grid_border_size + grid_width - 1, grid_border_size + grid_height - 1), 0, -1)
  205. for row in range(num_rows):
  206. y = row * step + grid_line_thickness + grid_border_size
  207. for col in range(num_cols):
  208. if grid[row][col] == 0:
  209. x = col * step + grid_line_thickness + grid_border_size
  210. cv2.rectangle(output, (x, y), (x + grid_square_size - 1, y + grid_square_size - 1), 255, -1)
  211. _, png = cv2.imencode('.png', output)
  212. return png.tobytes(), warning