|
import cv2 |
|
import numpy as np |
|
import gradio as gr |
|
|
|
|
|
def apply_gaussian_blur(frame): |
|
return cv2.GaussianBlur(frame, (15, 15), 0) |
|
|
|
def apply_sharpening_filter(frame): |
|
kernel = np.array([[0, -1, 0], [-1, 5,-1], [0, -1, 0]]) |
|
return cv2.filter2D(frame, -1, kernel) |
|
|
|
def apply_edge_detection(frame): |
|
return cv2.Canny(frame, 100, 200) |
|
|
|
def apply_invert_filter(frame): |
|
return cv2.bitwise_not(frame) |
|
|
|
def adjust_brightness_contrast(frame, alpha=1.0, beta=50): |
|
return cv2.convertScaleAbs(frame, alpha=alpha, beta=beta) |
|
|
|
def apply_grayscale_filter(frame): |
|
return cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) |
|
|
|
def apply_sepia_filter(frame): |
|
sepia_filter = np.array([[0.272, 0.534, 0.131], |
|
[0.349, 0.686, 0.168], |
|
[0.393, 0.769, 0.189]]) |
|
return cv2.transform(frame, sepia_filter) |
|
|
|
def apply_bilateral_filter(frame): |
|
return cv2.bilateralFilter(frame, 9, 75, 75) |
|
|
|
def apply_darkening_filter(frame, alpha=0.5): |
|
return cv2.convertScaleAbs(frame, alpha=alpha, beta=0) |
|
|
|
def apply_histogram_equalization(frame): |
|
if len(frame.shape) == 2: |
|
return cv2.equalizeHist(frame) |
|
else: |
|
ycrcb = cv2.cvtColor(frame, cv2.COLOR_BGR2YCrCb) |
|
channels = cv2.split(ycrcb) |
|
cv2.equalizeHist(channels[0], channels[0]) |
|
ycrcb = cv2.merge(channels) |
|
return cv2.cvtColor(ycrcb, cv2.COLOR_YCrCb2BGR) |
|
|
|
def apply_median_blur(frame): |
|
return cv2.medianBlur(frame, 15) |
|
|
|
def apply_dilation(frame, kernel_size=5): |
|
kernel = np.ones((kernel_size, kernel_size), np.uint8) |
|
return cv2.dilate(frame, kernel, iterations=1) |
|
|
|
def apply_erosion(frame, kernel_size=5): |
|
kernel = np.ones((kernel_size, kernel_size), np.uint8) |
|
return cv2.erode(frame, kernel, iterations=1) |
|
|
|
def apply_line_detection(frame): |
|
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) |
|
edges = cv2.Canny(gray, 50, 150, apertureSize=3) |
|
lines = cv2.HoughLines(edges, 1, np.pi/180, 200) |
|
if lines is not None: |
|
for rho, theta in lines[:,0]: |
|
a = np.cos(theta) |
|
b = np.sin(theta) |
|
x0 = a * rho |
|
y0 = b * rho |
|
x1 = int(x0 + 1000 * (-b)) |
|
x2 = int(x0 - 1000 * (-b)) |
|
y1 = int(y0 + 1000 * (a)) |
|
y2 = int(y0 - 1000 * (a)) |
|
cv2.line(frame, (x1, y1), (x2, y2), (0, 0, 255), 2) |
|
return frame |
|
|
|
def apply_contour_detection(frame): |
|
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) |
|
blurred = cv2.GaussianBlur(gray, (5, 5), 0) |
|
edged = cv2.Canny(blurred, 50, 150) |
|
contours, _ = cv2.findContours(edged, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) |
|
cv2.drawContours(frame, contours, -1, (0, 255, 0), 2) |
|
return frame |
|
|
|
def apply_box_blur(frame): |
|
return cv2.blur(frame, (15, 15)) |
|
|
|
def apply_emboss_filter(frame): |
|
kernel = np.array([[-2, -1, 0], [-1, 1, 1], [0, 1, 2]]) |
|
return cv2.filter2D(frame, -1, kernel) |
|
|
|
def apply_sobel_edge_detection(frame): |
|
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) |
|
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=5) |
|
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=5) |
|
sobel = cv2.addWeighted(sobelx, 0.5, sobely, 0.5, 0) |
|
return cv2.convertScaleAbs(sobel) |
|
|
|
def apply_thresholding(frame, threshold_value=127): |
|
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) |
|
_, thresh = cv2.threshold(gray, threshold_value, 255, cv2.THRESH_BINARY) |
|
return thresh |
|
|
|
def apply_color_quantization(frame, k=8): |
|
Z = frame.reshape((-1, 3)) |
|
Z = np.float32(Z) |
|
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0) |
|
_, label, center = cv2.kmeans(Z, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS) |
|
center = np.uint8(center) |
|
res = center[label.flatten()] |
|
return res.reshape((frame.shape)) |
|
|
|
|
|
filter_descriptions = { |
|
"Gaussian Blur": "Resmi bulanıklaştırır. Bu filtre, resimdeki gürültüyü azaltmak için kullanılır.", |
|
"Sharpen": "Resmi netleştirir. Bu filtre, resimdeki kenarları daha belirgin hale getirir.", |
|
"Edge Detection": "Canny Edge Detection algoritmasını kullanarak resimdeki kenarları tespit eder.", |
|
"Invert": "Resmin renklerini tersine çevirir.", |
|
"Brightness": "Resmin parlaklığını ve kontrastını ayarlar.", |
|
"Grayscale": "Resmi gri tonlamalı hale getirir.", |
|
"Sepia": "Resmi sepiya tonlarıyla işler.", |
|
"Bilateral": "Kenar koruyarak resmi bulanıklaştırır.", |
|
"Darkening": "Resmi karartır.", |
|
"Histogram Equalization": "Resmin histogramını eşitleyerek kontrastı artırır.", |
|
"Median Blur": "Medyan filtresi ile resmi bulanıklaştırır. Bu filtre, gürültüyü azaltmak için kullanılır.", |
|
"Dilation": "Resimdeki beyaz bölgeleri genişletir.", |
|
"Erosion": "Resimdeki beyaz bölgeleri daraltır.", |
|
"Line Detection": "Hough dönüşümü ile resimdeki doğruları tespit eder.", |
|
"Contour Detection": "Resimdeki konturları tespit eder ve çizer.", |
|
"Box Blur": "Basit bir bulanıklaştırma filtresi.", |
|
"Emboss": "Resmi kabartma efektiyle işler.", |
|
"Sobel Edge Detection": "Sobel operatörü ile kenarları tespit eder.", |
|
"Thresholding": "Eşikleme ile resmi ikili (siyah-beyaz) hale getirir.", |
|
"Color Quantization": "Renk sayısını azaltarak resmi daha basit bir renk paletiyle gösterir." |
|
} |
|
|
|
|
|
def apply_filter(filter_type, input_image=None): |
|
if input_image is not None: |
|
frame = input_image |
|
else: |
|
cap = cv2.VideoCapture(0) |
|
ret, frame = cap.read() |
|
cap.release() |
|
if not ret: |
|
return "Web kameradan görüntü alınamadı" |
|
|
|
if filter_type == "Gaussian Blur": |
|
return apply_gaussian_blur(frame) |
|
elif filter_type == "Sharpen": |
|
return apply_sharpening_filter(frame) |
|
elif filter_type == "Edge Detection": |
|
return apply_edge_detection(frame) |
|
elif filter_type == "Invert": |
|
return apply_invert_filter(frame) |
|
elif filter_type == "Brightness": |
|
return adjust_brightness_contrast(frame, alpha=1.0, beta=50) |
|
elif filter_type == "Grayscale": |
|
return apply_grayscale_filter(frame) |
|
elif filter_type == "Sepia": |
|
return apply_sepia_filter(frame) |
|
elif filter_type == "Bilateral": |
|
return apply_bilateral_filter(frame) |
|
elif filter_type == "Darkening": |
|
return apply_darkening_filter(frame) |
|
elif filter_type == "Histogram Equalization": |
|
return apply_histogram_equalization(frame) |
|
elif filter_type == "Median Blur": |
|
return apply_median_blur(frame) |
|
elif filter_type == "Dilation": |
|
return apply_dilation(frame) |
|
elif filter_type == "Erosion": |
|
return apply_erosion(frame) |
|
elif filter_type == "Line Detection": |
|
return apply_line_detection(frame) |
|
elif filter_type == "Contour Detection": |
|
return apply_contour_detection(frame) |
|
elif filter_type == "Box Blur": |
|
return apply_box_blur(frame) |
|
elif filter_type == "Emboss": |
|
return apply_emboss_filter(frame) |
|
elif filter_type == "Sobel Edge Detection": |
|
return apply_sobel_edge_detection(frame) |
|
elif filter_type == "Thresholding": |
|
return apply_thresholding(frame) |
|
elif filter_type == "Color Quantization": |
|
return apply_color_quantization(frame) |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("# Web Kameradan ya da Yüklenen Fotoğraftan Canlı Filtreleme") |
|
|
|
|
|
filter_type = gr.Dropdown( |
|
label="Filtre Seçin", |
|
choices=list(filter_descriptions.keys()), |
|
value="Gaussian Blur" |
|
) |
|
|
|
|
|
filter_description = gr.Markdown(label="Filtre Açıklaması", value=filter_descriptions["Gaussian Blur"]) |
|
|
|
|
|
input_image = gr.Image(label="Resim Yükle", type="numpy", height=300, width=300) |
|
|
|
|
|
output_image = gr.Image(label="Filtre Uygulandı", height=300, width=300) |
|
|
|
|
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
gr.Markdown("### Giriş Resmi") |
|
input_image |
|
with gr.Column(scale=1): |
|
gr.Markdown("### Çıkış Resmi") |
|
output_image |
|
|
|
|
|
def update_description(filter_type): |
|
return filter_descriptions[filter_type] |
|
|
|
filter_type.change(fn=update_description, inputs=filter_type, outputs=filter_description) |
|
|
|
|
|
input_image.change(fn=apply_filter, inputs=[filter_type, input_image], outputs=output_image) |
|
filter_type.change(fn=apply_filter, inputs=[filter_type, input_image], outputs=output_image) |
|
|
|
|
|
demo.launch() |