안녕하세요. 7기 랭체인 스터디에서 활동했던, 그리고 8기를 신청해둔 허세임입니다. 오늘은 친구의 10년전 사진을 가지고 사진어플없이 chatgpt와의 대화로 사진을 복원해보는 작업을 해보겠습니다.
시작하기
먼저 오늘의 준비물 사진입니다.
대한민국 남자아이들이라면 모두 하나씩 가지고있는 태권도 사진입니다. 색이 많이 바래있네요. 이래서 사진은 디지털로 소장해야하나봅니다.
이번작업은 gpt4 ADA로 진행하였습니다.
먼저 이 사진의 디테일에 대해 설명하여, gpt가 이미지를 이해하는데 도움을 줍니다.
from PIL import Image
import IPython.display as display
# Load the image
image_path = '/mnt/data/IMG_7115.jpeg'
original_image = Image.open(image_path)
# Display the image
display.display(original_image)
import cv2
import numpy as np
# Convert the image to a numpy array
np_image = np.array(original_image)
# Convert the image from RGB to BGR because OpenCV uses BGR format
np_image = cv2.cvtColor(np_image, cv2.COLOR_RGB2BGR)
# First, we'll apply histogram equalization to improve the contrast of the image
# This is usually a good first step in the process of image restoration
# We need to convert the image to a LAB color space, equalize the L channel and then convert back to RGB
lab = cv2.cvtColor(np_image, cv2.COLOR_BGR2LAB)
l, a, b = cv2.split(lab)
l_eq = cv2.equalizeHist(l)
lab_eq = cv2.merge((l_eq, a, b))
np_image_eq = cv2.cvtColor(lab_eq, cv2.COLOR_LAB2BGR)
# Now we'll convert it back to RGB for further processing
np_image_eq = cv2.cvtColor(np_image_eq, cv2.COLOR_BGR2RGB)
# Convert back to PIL Image and display the result
restored_image = Image.fromarray(np_image_eq)
display.display(restored_image)
# Save the intermediate result to the disk
intermediate_result_path = '/mnt/data/IMG_7115_restored_intermediate.jpeg'
restored_image.save(intermediate_result_path)
intermediate_result_path
전후 비교 사진입니다. 이미지가 좀 선명해진것같긴한데, 전체적으로 푸르스름한 보라빛 제거가 필요해보입니다.
# Function to reduce blue and purple tones
def reduce_blue_purple_tones(image, strength=1.0):
"""
Reduce the blue and purple tones in the image.
:param image: PIL Image to be processed
:param strength: Factor by which the blue and purple tones are reduced
:return: PIL Image with reduced blue and purple tones
"""
# Convert the image to RGB if it's not already in that mode
if image.mode != 'RGB':
image = image.convert('RGB')
# Split into separate channels
r, g, b = image.split()
# Reduce blue channel
b = b.point(lambda i: i * (1 - strength))
# Merge channels back and convert to RGB
result_image = Image.merge('RGB', (r, g, b))
return result_image
# Apply the function to reduce blue and purple tones
strength_of_reduction = 0.5 # Adjust the strength as needed
final_restored_image = reduce_blue_purple_tones(restored_image, strength=strength_of_reduction)
# Display the final result
display.display(final_restored_image)
# Save the final result to the disk
final_result_path = '/mnt/data/IMG_7115_restored_final.jpeg'
final_restored_image.save(final_result_path)
final_result_path
오줌사진이 되어버렸습니다. 중간이란 없는걸까요?
def correct_white_balance(image):
"""
Apply white balance correction to make the whites appear white.
:param image: PIL Image to be processed
:return: PIL Image with corrected white balance
"""
# Convert the image to a numpy array
np_img = np.array(image)
# Convert image to the LAB color space
lab = cv2.cvtColor(np_img, cv2.COLOR_RGB2LAB)
# Split the LAB image to different channels
l, a, b = cv2.split(lab)
# Apply CLAHE to L-channel
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
cl = clahe.apply(l)
# Merge the CLAHE enhanced L-channel with the a and b channel
limg = cv2.merge((cl, a, b))
# Convert image from LAB color model back to RGB color model
np_img = cv2.cvtColor(limg, cv2.COLOR_LAB2RGB)
# Convert back to PIL Image
corrected_image = Image.fromarray(np_img)
return corrected_image
# Apply white balance correction
white_balanced_image = correct_white_balance(restored_image)
# Display the white balanced image
display.display(white_balanced_image)
# Save the white balanced image to the disk
white_balanced_result_path = '/mnt/data/IMG_7115_white_balanced.jpeg'
white_balanced_image.save(white_balanced_result_path)
white_balanced_result_path
휴 노란색 걷어냈습니다.
def adjust_skin_tone(image):
"""
Adjust the skin tone to match East Asian characteristics.
:param image: PIL Image to be processed
:return: PIL Image with adjusted skin tone
"""
# Convert the image to a numpy array
np_img = np.array(image)
# Convert image to the LAB color space
lab = cv2.cvtColor(np_img, cv2.COLOR_RGB2LAB)
# Split the LAB image to different channels
l_channel, a_channel, b_channel = cv2.split(lab)
# Increase the 'a' channel slightly to give a natural skin color ('a' channel is for Green-Red)
a_channel += 3 # Increasing the values in 'a' channel to add a slight redness
a_channel = np.clip(a_channel, 0, 255)
# Decrease the 'b' channel slightly to remove yellowness ('b' channel is for Blue-Yellow)
b_channel -= 5 # Decreasing the values in 'b' channel to reduce the yellowness
b_channel = np.clip(b_channel, 0, 255)
# Merge the modified channels back
lab = cv2.merge((l_channel, a_channel, b_channel))
# Convert image from LAB color model back to RGB color model
np_img = cv2.cvtColor(lab, cv2.COLOR_LAB2RGB)
# Convert back to PIL Image
adjusted_image = Image.fromarray(np_img)
return adjusted_image
# Apply skin tone adjustment
skin_tone_adjusted_image = adjust_skin_tone(white_balanced_image)
# Display the skin tone adjusted image
display.display(skin_tone_adjusted_image)
# Save the skin tone adjusted image to the disk
skin_tone_adjusted_result_path = '/mnt/data/IMG_7115_skin_tone_adjusted.jpeg'
skin_tone_adjusted_image.save(skin_tone_adjusted_result_path)
skin_tone_adjusted_result_path
나아지고 있는게 맞는지 의구심이 들기시작합니다.
def darken_purple_background(image):
"""
Darken the purple tones in the background while trying to preserve the white color of the outfit.
:param image: PIL Image to be processed
:return: PIL Image with darkened background
"""
# Convert the image to a numpy array
np_img = np.array(image)
# Convert image to the HSV color space
hsv = cv2.cvtColor(np_img, cv2.COLOR_RGB2HSV)
# Define the range of purple color in HSV
lower_purple = np.array([120, 10, 10])
upper_purple = np.array([160, 255, 255])
# Create a mask for purple color
mask = cv2.inRange(hsv, lower_purple, upper_purple)
# Darken the purple parts of the image
hsv[..., 2] = np.where(mask == 255, hsv[..., 2] * 0.5, hsv[..., 2])
# Convert image from HSV color model back to RGB color model
np_img = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
# Convert back to PIL Image
darkened_image = Image.fromarray(np_img)
return darkened_image
# Apply the function to darken purple background
darkened_background_image = darken_purple_background(skin_tone_adjusted_image)
# Display the darkened background image
display.display(darkened_background_image)
# Save the darkened background image to the disk
darkened_background_result_path = '/mnt/data/IMG_7115_darkened_background.jpeg'
darkened_background_image.save(darkened_background_result_path)
darkened_background_result_path
으악. 이미지가 너무나도 손상되어버렸습니다.
지피티에게 불만을 제기해보았지만, 쿨한 답변만이 돌아올 뿐 입니다.
처음과 마지막 결과의 비교사진입니다. 처참하군요.
지피티가 작성한 코드들을 보니, 제가 내린 명령을 과하게 수행한 감이 있습니다. 제가 사진 색조 조정에 대해 배경지식이 있다면 그 정도를 조절하는 명령을 내릴수 있겠다 싶네요. 하지만 그것까진 모르니 전 여기까지하겠습니다.
레퍼런스 이미지 활용하기
는 아니고, 이번엔 참고사진을 주고 비슷하게 해보라고 하겠습니다.
구글에서 “태권도 액자” 라고 검색해서 얻은 사진입니다. 여자아이라서 배경이 붉은 빛이긴 한데, 그래도 피부색이나 도복 색감은 좋은것같아 참고사진으로 활용하기로합니다.
# Load the reference image to analyze its color profile
reference_image_path = '/mnt/data/참고사진.jpeg'
reference_image = Image.open(reference_image_path)
# Convert reference image to numpy array
np_reference_image = np.array(reference_image)
# Convert reference image from RGB to LAB color space to get its color profile
lab_reference = cv2.cvtColor(np_reference_image, cv2.COLOR_RGB2LAB)
# Split into L, A, and B channels
l_channel_ref, a_channel_ref, b_channel_ref = cv2.split(lab_reference)
# Calculate the mean color values of the reference image in LAB color space
mean_l_ref = np.mean(l_channel_ref)
mean_a_ref = np.mean(a_channel_ref)
mean_b_ref = np.mean(b_channel_ref)
# Now we'll use these mean values to adjust the original uploaded photo
# Load the original uploaded photo
original_photo_path = '/mnt/data/IMG_7115.jpeg'
original_photo = Image.open(original_photo_path)
# Display the original photo before adjustment
display.display(original_photo)
# Convert the original photo to numpy array
np_original_photo = np.array(original_photo)
# Convert original photo from RGB to LAB color space
lab_original = cv2.cvtColor(np_original_photo, cv2.COLOR_RGB2LAB)
# Split into L, A, and B channels
l_channel_org, a_channel_org, b_channel_org = cv2.split(lab_original)
# Adjust the A and B channels of the original photo to match the mean values of the reference image
# Ensure the adjustment does not go out of bounds
a_channel_org_adjusted = a_channel_org.astype(np.float64) - (np.mean(a_channel_org) - mean_a_ref)
b_channel_org_adjusted = b_channel_org.astype(np.float64) - (np.mean(b_channel_org) - mean_b_ref)
# Keep the values in the 8-bit range
a_channel_org_adjusted = np.clip(a_channel_org_adjusted, 0, 255).astype(np.uint8)
b_channel_org_adjusted = np.clip(b_channel_org_adjusted, 0, 255).astype(np.uint8)
# Merge the adjusted A and B channels back with the L channel
lab_original_adjusted = cv2.merge((l_channel_org, a_channel_org_adjusted, b_channel_org_adjusted))
# Convert the adjusted LAB image back to RGB color space
np_original_adjusted_rgb = cv2.cvtColor(lab_original_adjusted, cv2.COLOR_LAB2RGB)
# Convert the numpy array back to a PIL image
original_adjusted_image = Image.fromarray(np_original_adjusted_rgb)
# Display the adjusted original image
display.display(original_adjusted_image)
# Save the adjusted original image
adjusted_original_image_path = '/mnt/data/IMG_7115_adjusted_based_on_reference.jpeg'
original_adjusted_image.save(adjusted_original_image_path)
adjusted_original_image_path
뭘 하긴 한것같습니다만…
여기까지하겠습니다
느낀점
chatgpt4는 ADA에서 OpenCV와 같은 이미지 처리 라이브러리를 사용하여 이미지 보정을 할 수 있다.
히스토그램 평활화와 같은 기술을 적용하여 이미지의 대비를 향상시키는 등 할 수 있는게 많다
이미지 보정에 대한 배경지식이 있다면 더 디테일한 명령을 통해 더 나은 결과를 얻을 수 있을 것이다.
다른 분이 시도해주세요.
@huhsame