Skip to content
Snippets Groups Projects
Commit e1f39b53 authored by dakotagoldberg's avatar dakotagoldberg
Browse files

Improve filtering and app state machine

parent 8c34681a
Branches
No related tags found
No related merge requests found
......@@ -28,12 +28,12 @@ mask_bool = mask[:, :, 0] == 0
image[~mask_bool] = 255 # Set the face region to white
# # Create a blurred version of the original image
# blurred_image = cv2.GaussianBlur(image, (25, 25), 0)
# Create a blurred version of the original image
blurred_image = cv2.GaussianBlur(image, (25, 25), 0)
# # Copy the clear face region from the original image
# final_image = image.copy()
# final_image[~mask_bool] = blurred_image[~mask_bool]
# Copy the clear face region from the original image
final_image = image.copy()
final_image[~mask_bool] = blurred_image[~mask_bool]
# Convert the final image to PIL format for saving
final_image_pil = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
......
......@@ -11,22 +11,36 @@ import random
def bf(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
filtered = cv2.bilateralFilter(gray, d=7, sigmaColor=50, sigmaSpace=50)
# filtered = cv2.Sobel(filtered, ddepth=cv2.CV_8U, dx=1, dy=1, ksize=5)
filtered = cv2.bilateralFilter(gray, d=3, sigmaColor=10, sigmaSpace=10)
# filtered = cv2.Sobel(filtered, ddepth=cv2.CV_8U, dx=1, dy=1, ksize=)
# gray = cv2.cvtColor(filtered, cv2.COLOR_BGR2GRAY)
edges = cv2.adaptiveThreshold(filtered, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, blockSize=11, C=4)
edges = cv2.adaptiveThreshold(
filtered, 250, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, blockSize=11, C=4)
# post_filtered = cv2.bilateralFilter(edges, d=9, sigmaColor=75, sigmaSpace=75)
edges = ~edges
edges = cv2.dilate(edges, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)))
edges = cv2.dilate(edges, cv2.getStructuringElement(
cv2.MORPH_ELLIPSE, (5, 5)))
# return post_filtered
return edges
def get_polylines(img, min_length=5.0, n_max=200):
face_cascade = cv2.CascadeClassifier(
cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
faces = face_cascade.detectMultiScale(
img, scaleFactor=1.8, minNeighbors=5, minSize=(50, 50))
# Create a mask where the faces are 0 and the rest is 1
height, width, _ = img.shape
mask = np.zeros((height, width), dtype=bool) # White mask
for (x, y, w, h) in faces:
mask[y:y+h, x:x+w] = True # Set the face region to black
img2 = bf(img)
img_float = img2.astype(np.float32) / 255.0
img_binary = img_float > 0.5
......@@ -37,7 +51,8 @@ def get_polylines(img, min_length=5.0, n_max=200):
polys = trace_skeleton.from_numpy(im)
else:
rects = []
polys = trace_skeleton_old.traceSkeleton(im, 0, 0, im.shape[1], im.shape[0], 10, 999, rects)
polys = trace_skeleton_old.traceSkeleton(
im, 0, 0, im.shape[1], im.shape[0], 10, 999, rects)
polys = [np.array(x, dtype=np.float32) for x in polys]
......@@ -45,7 +60,15 @@ def get_polylines(img, min_length=5.0, n_max=200):
for line in polys:
deltas = line[1:] - line[:-1]
length = np.sum(np.linalg.norm(deltas, axis=1))
if length >= min_length:
all_x = line[:, 0].astype(np.int32)
all_y = line[:, 1].astype(np.int32)
all_x = np.clip(all_x, 0, width-1)
all_y = np.clip(all_y, 0, height-1)
if np.any(mask[all_y, all_x]):
polys_length_keep.append([line, 5*length])
else:
polys_length_keep.append([line, length])
polys_length_keep = sorted(polys_length_keep, key=lambda x: x[1])[::-1]
......@@ -58,7 +81,8 @@ def get_polylines(img, min_length=5.0, n_max=200):
def draw_polylines(img, polys):
for l in polys:
for i in range(0, len(l) - 1):
cv2.line(img, (int(l[i][0]), int(l[i][1])), (int(l[i + 1][0]), int(l[i + 1][1])), (0, 0, 0), 2)
cv2.line(img, (int(l[i][0]), int(l[i][1])), (int(
l[i + 1][0]), int(l[i + 1][1])), (0, 0, 0), 2)
def scale_offset_polylines(polys, img_w, img_h, mm_w, offset):
......
......@@ -19,11 +19,13 @@ STATE_DONE = 6
class MainApplication:
def __init__(self, fullscreen=False, rotate=False):
self.state = STATE_INIT
self.window = cv2.namedWindow("main", cv2.WINDOW_FULLSCREEN if fullscreen else cv2.WINDOW_KEEPRATIO)
self.window = cv2.namedWindow(
"main", cv2.WINDOW_FULLSCREEN if fullscreen else cv2.WINDOW_KEEPRATIO)
self.video = cv2.VideoCapture(0)
self.width = 480
self.height = 800
self.img_captured = np.zeros((self.height, self.width, 3), dtype=np.uint8)
self.img_captured = np.zeros(
(self.height, self.width, 3), dtype=np.uint8)
self.img = np.zeros((self.height, self.width, 3), dtype=np.uint8)
self.click = None
self.running = True
......@@ -49,6 +51,7 @@ class MainApplication:
def on_click(self, event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
# print(x, y, event)
if self.rotate:
self.click = (y, x)
else:
......@@ -78,17 +81,18 @@ class MainApplication:
elif self.state == STATE_COUNTDOWN:
self.capture()
self.img = self.img_captured
if self.dt_state <= 1.0:
if self.dt_state <= 1:
overlay = "3"
elif self.dt_state <= 2.0:
elif self.dt_state <= 2:
overlay = "2"
elif self.dt_state <= 3.0:
elif self.dt_state <= 3:
overlay = "1"
else:
self.state = STATE_SHOWING
elif self.state == STATE_SHOWING:
if self.dt_state < 0.1:
self.img = 255*np.ones((self.height, self.width), dtype=np.uint8)
self.img = 255 * \
np.ones((self.height, self.width), dtype=np.uint8)
else:
self.img = np.copy(self.img_captured)
overlay = "yes_no"
......@@ -101,12 +105,14 @@ class MainApplication:
self.state = STATE_STREAM
elif self.state == STATE_VECTORIZED:
if self.dt_state < 0.5:
self.img = np.zeros((self.height, self.width, 3), dtype=np.uint8)
self.img = np.zeros(
(self.height, self.width, 3), dtype=np.uint8)
overlay = "processing"
else:
if self.strokes is None:
polys = filtering.get_polylines(self.img_captured)
img_draw = 255 * np.ones((self.height, self.width, 3), dtype=np.uint8)
img_draw = 255 * \
np.ones((self.height, self.width, 3), dtype=np.uint8)
filtering.draw_polylines(img_draw, polys)
self.strokes = filtering.scale_offset_polylines(polys,
self.width,
......@@ -121,6 +127,7 @@ class MainApplication:
if x < self.width // 2:
self.state = STATE_DRAWING
else:
self.strokes = None
self.state = STATE_STREAM
elif self.state == STATE_DRAWING:
if self.dt_state < 0.5:
......@@ -146,7 +153,8 @@ class MainApplication:
self.t_state = datetime.datetime.now()
self.dt_state = 0
else:
self.dt_state = (datetime.datetime.now() - self.t_state).total_seconds()
self.dt_state = (datetime.datetime.now() -
self.t_state).total_seconds()
def show(self, overlay="none"):
img_show = self.img
......@@ -155,7 +163,8 @@ class MainApplication:
img_over_col = img_over[:, :, :3]
alpha = img_over[:, :, 3:4]
img_float = img_show.astype(np.float32) / 255.0
img_show = np.clip(img_over_col * alpha + img_float * (1-alpha), 0, 1)
img_show = np.clip(img_over_col * alpha +
img_float * (1-alpha), 0, 1)
if self.rotate:
cv2.imshow("main", cv2.rotate(img_show, cv2.ROTATE_90_CLOCKWISE))
else:
......@@ -170,7 +179,8 @@ def main():
with open("config.json", "r") as f:
config = json.load(f)
app = MainApplication(fullscreen=config["fullscreen"], rotate=config["rotate"])
app = MainApplication(
fullscreen=config["fullscreen"], rotate=config["rotate"])
if config["use_machine"]:
m = machine.Machine(config["ports"])
m.pen_action(2, False)
......
pypotrace @ e28c4a25
Subproject commit e28c4a25553fd40d41b9aa9ae94b1ea96dc9bdce
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment