Sin descripción

xword.py 8.3KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243
  1. import math
  2. import cv2
  3. import numpy as np
  4. import copy
  5. import argparse
  6. def preprocess_image(original, gaussian_blur_size, adaptive_threshold_block_size, adaptive_threshold_mean_adjustment, num_dilations):
  7. img = cv2.GaussianBlur(original, (gaussian_blur_size, gaussian_blur_size), 0)
  8. img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, adaptive_threshold_block_size, adaptive_threshold_mean_adjustment)
  9. kernel = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], np.uint8)
  10. for i in range(num_dilations):
  11. img = cv2.dilate(img, kernel)
  12. return img
  13. def morph_open_image(img, kernel_size, iterations=1):
  14. kernel = cv2.getStructuringElement(cv2.MORPH_RECT, kernel_size)
  15. return cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel, iterations=iterations)
  16. def get_fundamental_frequency(fft):
  17. mag = abs(fft[0:len(fft) // 2])
  18. mag[0] = 0
  19. return int(np.argmax(mag))
  20. def get_line_fft(img, line_detector_element_size, axis):
  21. lines = morph_open_image(img, (line_detector_element_size, 1) if axis == 1 else (1, line_detector_element_size))
  22. return np.fft.fft(np.sum(lines, axis=axis))
  23. def get_line_frequency(img, line_detector_element_size, axis):
  24. return get_fundamental_frequency(get_line_fft(img, line_detector_element_size, axis))
  25. def find_biggest_contour(img):
  26. contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
  27. biggest = None
  28. max_area = 0
  29. for contour in contours:
  30. area = cv2.contourArea(contour)
  31. if area > max_area:
  32. biggest = contour
  33. max_area = area
  34. return biggest
  35. def erode_contour(img_shape, contour, erosion_kernel_size, iterations):
  36. contour_img = np.zeros(img_shape, dtype=np.uint8)
  37. cv2.drawContours(contour_img, [contour], 0, 255, -1)
  38. contour_img = morph_open_image(contour_img, (erosion_kernel_size, erosion_kernel_size), iterations)
  39. return find_biggest_contour(contour_img)
  40. def get_contour_corners(img, contour):
  41. height, width = img.shape
  42. top_left = [width, height]
  43. top_right = [-1, height]
  44. bottom_left = [width, -1]
  45. bottom_right = [-1, -1]
  46. for vertex in contour:
  47. point = vertex[0]
  48. sum = point[0] + point[1]
  49. diff = point[0] - point[1]
  50. if sum < top_left[0] + top_left[1]:
  51. top_left = point
  52. if sum > bottom_right[0] + bottom_right[1]:
  53. bottom_right = point
  54. if diff < bottom_left[0] - bottom_left[1]:
  55. bottom_left = point
  56. if diff > top_right[0] - top_right[1]:
  57. top_right = point
  58. return top_left, top_right, bottom_right, bottom_left
  59. def segment_length(p1, p2):
  60. dx = p1[0] - p2[0]
  61. dy = p1[1] - p2[1]
  62. return math.sqrt(dx ** 2 + dy ** 2)
  63. def get_longest_side(poly):
  64. previous = poly[-1]
  65. max = 0
  66. for current in poly:
  67. len = segment_length(previous, current)
  68. if len > max:
  69. max = len
  70. previous = current
  71. return max
  72. def extract_square(img, top_left, top_right, bottom_right, bottom_left):
  73. src = [top_left, top_right, bottom_right, bottom_left]
  74. longest = get_longest_side(src)
  75. dst = [[0, 0], [longest - 1, 0], [longest - 1, longest - 1], [0, longest - 1]]
  76. m = cv2.getPerspectiveTransform(np.array(src, dtype=np.float32), np.array(dst, dtype=np.float32))
  77. return cv2.warpPerspective(img, m, (int(longest), int(longest)))
  78. def get_threshold_from_quantile(img, quantile):
  79. height, width = img.shape
  80. num_pixels = height * width
  81. pixels = np.sort(np.reshape(img, num_pixels))
  82. return pixels[int(num_pixels * quantile)]
  83. def extract_grid_colours(img, num_rows, num_cols, sampling_block_size_ratio):
  84. height, width = img.shape
  85. row_delta = int(height * sampling_block_size_ratio / num_rows / 2)
  86. col_delta = int(width * sampling_block_size_ratio / num_cols / 2)
  87. sampling_block_area = (2 * row_delta + 1) * (2 * col_delta + 1)
  88. grid = []
  89. for row in range(num_rows):
  90. line = []
  91. y = int(((row + 0.5) / num_rows) * height)
  92. for col in range(num_cols):
  93. sum = 0
  94. x = int(((col + 0.5) / num_cols) * width)
  95. for dy in range(-row_delta, row_delta + 1):
  96. for dx in range(-col_delta, col_delta + 1):
  97. sum += img[y + dy, x + dx]
  98. line.append(sum / sampling_block_area)
  99. grid.append(line)
  100. return grid
  101. def grid_colours_to_blocks(grid_colours, num_rows, num_cols, sampling_threshold):
  102. grid = copy.deepcopy(grid_colours)
  103. warning = False
  104. for row in range(round(num_rows / 2)):
  105. for col in range(num_cols):
  106. row2 = num_rows - row - 1
  107. col2 = num_cols - col - 1
  108. delta1 = grid_colours[row][col] - sampling_threshold
  109. delta2 = grid_colours[row2][col2] - sampling_threshold
  110. if (delta1 > 0) and (delta2 > 0):
  111. block = 0
  112. elif (delta1 < 0) and (delta2 < 0):
  113. block = 1
  114. else:
  115. warning = True
  116. if abs(delta1) > abs(delta2):
  117. block = 1 if delta1 < 0 else 0
  118. else:
  119. block = 1 if delta2 < 0 else 0
  120. grid[row][col] = grid[row2][col2] = block
  121. return warning, grid
  122. def draw_point(image, point, colour):
  123. height, width, _ = image.shape
  124. for dx in range(-10, 11):
  125. for dy in range(-10, 11):
  126. x = point[0] + dx
  127. y = point[1] + dy
  128. if (x >= 0) and (y >= 0) and (x < width) and (y < height):
  129. image[y, x] = colour
  130. def show_image(image):
  131. cv2.namedWindow('xword', cv2.WINDOW_NORMAL)
  132. cv2.imshow('xword', image)
  133. while cv2.waitKey() & 0xFF != ord('q'):
  134. pass
  135. cv2.destroyAllWindows()
  136. def extract_crossword(
  137. file_name,
  138. gaussian_blur_size=11,
  139. adaptive_threshold_block_size=11,
  140. adaptive_threshold_mean_adjustment=2,
  141. square=True,
  142. num_dilations=1,
  143. contour_erosion_kernel_size=5,
  144. contour_erosion_iterations=6,
  145. line_detector_element_size=51,
  146. sampling_block_size_ratio=0.25,
  147. sampling_threshold_quantile=0.3,
  148. sampling_threshold=None,
  149. grid_line_thickness=4,
  150. grid_square_size=64,
  151. grid_border_size=20,
  152. ):
  153. warnings = []
  154. original = cv2.imread(file_name, cv2.IMREAD_GRAYSCALE)
  155. if original is None:
  156. raise RuntimeError("Failed to load image")
  157. img = preprocess_image(original, gaussian_blur_size, adaptive_threshold_block_size, adaptive_threshold_mean_adjustment, num_dilations)
  158. biggest = find_biggest_contour(img)
  159. biggest = erode_contour(img.shape, biggest, contour_erosion_kernel_size, contour_erosion_iterations)
  160. top_left, top_right, bottom_right, bottom_left = get_contour_corners(img, biggest)
  161. img = extract_square(img, top_left, top_right, bottom_right, bottom_left)
  162. num_rows = get_line_frequency(img, line_detector_element_size, 1)
  163. num_cols = get_line_frequency(img, line_detector_element_size, 0)
  164. if square and (num_rows != num_cols):
  165. warnings.append("Crossword is not square")
  166. block_img = extract_square(original, top_left, top_right, bottom_right, bottom_left)
  167. if sampling_threshold is None:
  168. sampling_threshold = get_threshold_from_quantile(block_img, sampling_threshold_quantile)
  169. else:
  170. sampling_threshold = sampling_threshold
  171. grid_colours = extract_grid_colours(block_img, num_rows, num_cols, sampling_block_size_ratio)
  172. warning, grid = grid_colours_to_blocks(grid_colours, num_rows, num_cols, sampling_threshold)
  173. if warning:
  174. warnings.append("Some blocks may be the wrong colour")
  175. step = grid_square_size + grid_line_thickness
  176. grid_height = num_rows * step + grid_line_thickness
  177. grid_width = num_cols * step + grid_line_thickness
  178. output = np.full([2 * grid_border_size + grid_height, 2 * grid_border_size + grid_width], 255, dtype=np.uint8)
  179. cv2.rectangle(output, (grid_border_size, grid_border_size), (grid_border_size + grid_width - 1, grid_border_size + grid_height - 1), 0, -1)
  180. for row in range(num_rows):
  181. y = row * step + grid_line_thickness + grid_border_size
  182. for col in range(num_cols):
  183. if grid[row][col] == 0:
  184. x = col * step + grid_line_thickness + grid_border_size
  185. cv2.rectangle(output, (x, y), (x + grid_square_size - 1, y + grid_square_size - 1), 255, -1)
  186. _, png = cv2.imencode('.png', output)
  187. return png.tobytes(), warnings