diff --git a/Face_Detection/align_warp_back_multiple_dlib.py b/Face_Detection/align_warp_back_multiple_dlib.py index 4b82139e..21ddce2c 100644 --- a/Face_Detection/align_warp_back_multiple_dlib.py +++ b/Face_Detection/align_warp_back_multiple_dlib.py @@ -24,6 +24,7 @@ def calculate_cdf(histogram): + print("Calculate cdf") """ This method calculates the cumulative distribution function :param array histogram: The values of the histogram @@ -40,6 +41,7 @@ def calculate_cdf(histogram): def calculate_lookup(src_cdf, ref_cdf): + print("Calculate_lookup") """ This method creates the lookup table :param array src_cdf: The cdf for the source image @@ -60,6 +62,7 @@ def calculate_lookup(src_cdf, ref_cdf): def match_histograms(src_image, ref_image): + print("Match histograms") """ This method matches the source image histogram to the reference signal @@ -125,7 +128,7 @@ def _origin_face_pts(): def compute_transformation_matrix(img, landmark, normalize, target_face_scale=1.0): - + print("Compute transformation matrix") std_pts = _standard_face_pts() # [-1,1] target_pts = (std_pts * target_face_scale + 1) / 2 * 256.0 @@ -146,18 +149,19 @@ def compute_transformation_matrix(img, landmark, normalize, target_face_scale=1. def compute_inverse_transformation_matrix(img, landmark, normalize, target_face_scale=1.0): + print("Compute inverse transformation matrix") std_pts = _standard_face_pts() # [-1,1] target_pts = (std_pts * target_face_scale + 1) / 2 * 256.0 - # print(target_pts) + print(target_pts) h, w, c = img.shape if normalize == True: landmark[:, 0] = landmark[:, 0] / h * 2 - 1.0 landmark[:, 1] = landmark[:, 1] / w * 2 - 1.0 - # print(landmark) + print(landmark) affine = SimilarityTransform() @@ -167,6 +171,7 @@ def compute_inverse_transformation_matrix(img, landmark, normalize, target_face_ def show_detection(image, box, landmark): + print(" Show detection") plt.imshow(image) print(box[2] - box[0]) plt.gca().add_patch( @@ -183,6 +188,7 @@ def show_detection(image, box, landmark): def affine2theta(affine, input_w, input_h, target_w, target_h): + print("Affine2theta") # param = np.linalg.inv(affine) param = affine theta = np.zeros([2, 3]) @@ -196,7 +202,7 @@ def affine2theta(affine, input_w, input_h, target_w, target_h): def blur_blending(im1, im2, mask): - + print("Blur blending") mask *= 255.0 kernel = np.ones((10, 10), np.uint8) @@ -215,6 +221,7 @@ def blur_blending(im1, im2, mask): def blur_blending_cv2(im1, im2, mask): + print("Blur bending cv2") mask *= 255.0 @@ -237,6 +244,7 @@ def blur_blending_cv2(im1, im2, mask): # Image.composite( def Poisson_blending(im1, im2, mask): + print("Poisson blending") # mask=1-mask mask *= 255 @@ -257,6 +265,7 @@ def Poisson_blending(im1, im2, mask): def Poisson_B(im1, im2, mask, center): + print("Poisson B") mask *= 255 @@ -268,6 +277,7 @@ def Poisson_B(im1, im2, mask, center): def seamless_clone(old_face, new_face, raw_mask): + print("Seamless clone") height, width, _ = old_face.shape height = height // 2 @@ -310,11 +320,13 @@ def get_landmark(face_landmarks, id): part = face_landmarks.part(id) x = part.x y = part.y - + print("Get landmark") + print(x, y) return (x, y) def search(face_landmarks): + print("Search function") x1, y1 = get_landmark(face_landmarks, 36) x2, y2 = get_landmark(face_landmarks, 39) diff --git a/Face_Detection/detect_all_dlib.py b/Face_Detection/detect_all_dlib.py index 081b4c18..f81d4db2 100644 --- a/Face_Detection/detect_all_dlib.py +++ b/Face_Detection/detect_all_dlib.py @@ -82,14 +82,14 @@ def compute_transformation_matrix(img, landmark, normalize, target_face_scale=1. std_pts = _standard_face_pts() # [-1,1] target_pts = (std_pts * target_face_scale + 1) / 2 * 256.0 - # print(target_pts) + print(target_pts) h, w, c = img.shape if normalize == True: landmark[:, 0] = landmark[:, 0] / h * 2 - 1.0 landmark[:, 1] = landmark[:, 1] / w * 2 - 1.0 - # print(landmark) + print(landmark) affine = SimilarityTransform() @@ -171,9 +171,13 @@ def affine2theta(affine, input_w, input_h, target_w, target_h): current_face = faces[face_id] face_landmarks = landmark_locator(image, current_face) current_fl = search(face_landmarks) + print("current face") + print(current_face) + plt(current_face) affine = compute_transformation_matrix(image, current_fl, False, target_face_scale=1.3) aligned_face = warp(image, affine, output_shape=(256, 256, 3)) + plt(aligned_face) img_name = x[:-4] + "_" + str(face_id + 1) io.imsave(os.path.join(save_url, img_name + ".png"), img_as_ubyte(aligned_face)) diff --git a/Global/train_domain_A.py b/Global/train_domain_A.py index 45004938..77b28aa1 100644 --- a/Global/train_domain_A.py +++ b/Global/train_domain_A.py @@ -44,8 +44,8 @@ # opt.which_epoch=start_epoch-1 model = create_da_model(opt) fd = open(path, 'w') -fd.write(str(model.module.netG)) -fd.write(str(model.module.netD)) +fd.write(str(model.netG)) +fd.write(str(model.netD)) fd.close() total_steps = (start_epoch - 1) * dataset_size + epoch_iter @@ -72,7 +72,7 @@ # sum per device losses losses = [torch.mean(x) if not isinstance(x, int) else x for x in losses] - loss_dict = dict(zip(model.module.loss_names, losses)) + loss_dict = dict(zip(model.loss_names, losses)) # calculate final loss scalar loss_D = (loss_dict['D_fake'] + loss_dict['D_real']) * 0.5 @@ -81,18 +81,18 @@ ############### Backward Pass #################### # update generator weights - model.module.optimizer_G.zero_grad() + model.optimizer_G.zero_grad() loss_G.backward() - model.module.optimizer_G.step() + model.optimizer_G.step() # update discriminator weights - model.module.optimizer_D.zero_grad() + model.optimizer_D.zero_grad() loss_D.backward() - model.module.optimizer_D.step() + model.optimizer_D.step() - model.module.optimizer_featD.zero_grad() + model.optimizer_featD.zero_grad() loss_featD.backward() - model.module.optimizer_featD.step() + model.optimizer_featD.step() # call(["nvidia-smi", "--format=csv", "--query-gpu=memory.used,memory.free"]) @@ -101,7 +101,7 @@ if total_steps % opt.print_freq == print_delta: errors = {k: v.data if not isinstance(v, int) else v for k, v in loss_dict.items()} t = (time.time() - iter_start_time) / opt.batchSize - visualizer.print_current_errors(epoch, epoch_iter, errors, t, model.module.old_lr) + visualizer.print_current_errors(epoch, epoch_iter, errors, t, model.old_lr) visualizer.plot_current_errors(errors, total_steps) ### display output images @@ -133,15 +133,15 @@ ### save model for this epoch if epoch % opt.save_epoch_freq == 0: print('saving the model at the end of epoch %d, iters %d' % (epoch, total_steps)) - model.module.save('latest') - model.module.save(epoch) + model.save('latest') + model.save(epoch) np.savetxt(iter_path, (epoch + 1, 0), delimiter=',', fmt='%d') ### instead of only training the local enhancer, train the entire network after certain iterations if (opt.niter_fix_global != 0) and (epoch == opt.niter_fix_global): - model.module.update_fixed_params() + model.update_fixed_params() ### linearly decay learning rate after certain iterations if epoch > opt.niter: - model.module.update_learning_rate() + model.update_learning_rate()