상세 컨텐츠

본문 제목

python opencv 예제 코드

Programming

by 신농해태 2022. 4. 2. 06:51

본문

반응형

import cv2
img = cv2.imread('img550x779.jpg')
cv2.imshow('see img', img)
presskey = cv2.waitKey(0)
print(presskey)
cv2.destroyAllWindows()

 

 

import cv2
cam = cv2.VideoCapture(0) # device ID = 0

if not cam.isOpened():
    exit()
    
while True:
    ret, frame = cam.read() 
    
    if not ret:
        print('실시간 영상자료 없음')
        break
    
    cv2.imshow('camera', frame)
    
    presskey = cv2.waitKey(1)
    if presskey == ord('q'):
        print('q를 눌러서 종료합니다.')
        break
    
cam.release()
cv2.destroyAllWindows()   

 

 

import cv2
video = cv2.VideoCapture('video.mp4')

while video.isOpened():
    ret, frame = video.read() 
    if not ret:
        print('더이상 프레임 없다')
        break
    
    cv2.imshow('view frame', frame)
    
    presskey = cv2.waitKey(25)
    if presskey == ord('q'):
        print('q를 눌러서 종료합니다.')
        break
    
video.release()
cv2.destroyAllWindows()

 

 

import cv2
import numpy as np

img = np.zeros((480,640,3),dtype=np.uint8)
#img[:]=[255,255,255]  #b g r
#print(img)
cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()

 

 

import cv2
import numpy as np

img = np.zeros((480,640,3),dtype=np.uint8)
img[100:200,200:300] = (255,255,255)

cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()

 

 

import cv2
import numpy as np

img = np.zeros((480,640,3),dtype=np.uint8)
COLOR=(0,255,255)
THICKNESS = 3
cv2.line(img, (50,100),(400,50), COLOR, THICKNESS, cv2.LINE_8)
cv2.line(img, (50,150),(400,100), COLOR, THICKNESS, cv2.LINE_4)
cv2.line(img, (50,200),(400,150), COLOR, THICKNESS, cv2.LINE_AA)

cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()

 

 

import cv2
import numpy as np

img = np.zeros((480,640,3),dtype=np.uint8)

COLOR=(255,255,0)
RADIUS = 50
THICKNESS = 10

cv2.circle(img,(200,100), RADIUS, COLOR, THICKNESS, cv2.LINE_AA)
cv2.circle(img,(400,100), RADIUS, COLOR, cv2.FILLED, cv2.LINE_AA)

cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()

 

 

import cv2
import numpy as np

img = np.zeros((480,640,3),dtype=np.uint8)

COLOR=(0,255,0) #B G R
THICKNESS = 3

cv2.rectangle(img, (100,100), (200,200), COLOR, THICKNESS)
cv2.rectangle(img, (300,100), (400,300), COLOR, cv2.FILLED)

cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()

 

 

import cv2
import numpy as np

img = np.zeros((480,640,3),dtype=np.uint8)

COLOR=(0,0, 255) #B G R
THICKNESS = 3

pts1 = np.array([[100,100],[200,100],[100,200]])
pts2 = np.array([[200,100],[300,100],[300,200]])

cv2.polylines(img, [pts1, pts2], True, COLOR, THICKNESS, cv2.LINE_AA)

pts3 = np.array([[[100,300],[200,300],[100,400]],[[200,300],[300,300],[300,400]]])
cv2.fillPoly(img, pts3, COLOR, cv2.LINE_AA)

cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()

 

 

import numpy as np
import cv2

img = np.zeros((480, 640, 3), dtype = np.uint8)
COLOR = (255,255,255)
THICKNESS = 1
SCALE =1

cv2.putText(img, "Simplex", (20,50), cv2.FONT_HERSHEY_SIMPLEX,SCALE, COLOR, THICKNESS)
cv2.putText(img, "Plain", (20,150), cv2.FONT_HERSHEY_PLAIN,SCALE, COLOR, THICKNESS)
cv2.putText(img, "script Simplex", (20,250), cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,SCALE, COLOR, THICKNESS)
cv2.putText(img, "Triplex", (20,350), cv2.FONT_HERSHEY_TRIPLEX,SCALE, COLOR, THICKNESS)
cv2.putText(img, "italic", (20,450), cv2.FONT_HERSHEY_TRIPLEX | cv2.FONT_ITALIC,SCALE, COLOR, THICKNESS)

cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()

 

 

import numpy as np
import cv2

from PIL import ImageFont, ImageDraw, Image # 우회적으로 한글 사용하는 라이브러리
def myPutText(src,text,pos,font_size,font_color):
    img_pil = Image.fromarray(src)
    draw = ImageDraw.Draw(img_pil)
    font = ImageFont.truetype('fonts/gulim.ttc',font_size)
    draw.text(pos,text,font=font, fill=font_color)
    return np.array(img_pil)

img = np.zeros((480, 640, 3), dtype = np.uint8)

COLOR = (255,255,255)
THICKNESS = 1
SCALE =1

cv2.putText(img, "한글사용", (20,50), cv2.FONT_HERSHEY_SIMPLEX,SCALE, COLOR, THICKNESS) # 글자깨짐

FONT_SIZE = 30
img = myPutText(img,"한글사용",(20,100), FONT_SIZE, COLOR)

cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()

 

 

import cv2
img = cv2.imread('img2.jpg', cv2.IMREAD_GRAYSCALE)
cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()

result = cv2.imwrite('img_save.jpg', img)
print(result)

 

 

import cv2
cap = cv2.VideoCapture('video.mp4')

# fourcc = cv2.VideoWriter_forcc('D','I', 'V','X')
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
width = round(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = round(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS)

out = cv2.VideoWriter('output.avi', fourcc, fps, (width, height))

while cap.isOpened():
    ret, frame = cap.read()
    
    if not ret:
        break
        
    out.write(frame)
        
    cv2.imshow('video', frame)
    if cv2.waitKey(1) == ord('q'):
        break

out.release()        
cap.release()
cv2.destroyAllWindows()

 

 

import cv2
img = cv2.imread('img2.jpg')
dst = cv2.resize(img,None,fx=0.5, fy=0.5, interpolation=cv2.INTER_AREA)

cv2.imshow('img',img)
cv2.imshow('resize',dst)
cv2.waitKey(0)
cv2.destroyAllWindows()

 

 

import cv2
cap = cv2.VideoCapture('video.mp4')

while cap.isOpened():
    ret, frame = cap.read()
    if not ret:
        break
        
    frame_resized = cv2.resize(frame, None, fx=1.5, fy=1.5, interpolation=cv2.INTER_CUBIC)
        
    cv2.imshow('video',frame_resized)
    if cv2.waitKey(1) == ord('q'):
        break

cap.release()
cv2.destroyAllWindows()

 

 

import cv2
img = cv2.imread('img2.jpg')

crop = img[100:200, 200:300]
img[100:200, 400:500] = crop

cv2.imshow('img', img)
cv2.waitKey(0)
cv2.destroyAllWindows()

 

 

import cv2
img=cv2.imread('img.jpg')
flip_horizontal = cv2.flip(img, 1) #1 좌우대칭
cv2.imshow('img', img)
cv2.imshow('flip', flip_horizontal)
cv2.waitKey(0)
cv2.destroyAllWindows()

 

 

import cv2
img=cv2.imread('img.jpg')
flip_vertical = cv2.flip(img, 0) #0 상하대칭
cv2.imshow('img', img)
cv2.imshow('flip', flip_vertical)
cv2.waitKey(0)
cv2.destroyAllWindows()

 

 

import cv2
img=cv2.imread('img2.jpg')
flip_both = cv2.flip(img, -1) #0 상하좌우대칭
cv2.imshow('img', img)
cv2.imshow('flip', flip_both)
cv2.waitKey(0)
cv2.destroyAllWindows()

 

 

import cv2
img=cv2.imread('img2.jpg')

rotate_90 = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
rotate_180 = cv2.rotate(img, cv2.ROTATE_180)
rotate_270 = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)

cv2.imshow('img', img)
cv2.imshow('flip', rotate_90)
cv2.imshow('rotate', rotate_180)
cv2.imshow('rotate270', rotate_270)
cv2.waitKey(0)
cv2.destroyAllWindows()

 

 

import cv2
img=cv2.imread('img.jpg', cv2.IMREAD_GRAYSCALE)
cv2.imshow('img', img)
cv2.waitKey(0)
cv2.destroyAllWindows()

 

 

import cv2
img=cv2.imread('img.jpg')

dst = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

cv2.imshow('img', img)
cv2.imshow('gray', dst)
cv2.waitKey(0)
cv2.destroyAllWindows()

 

 

import cv2
img = cv2.imread('img.jpg')

# (3,3) (5,5) (7,7)
sigma_3 = cv2.GaussianBlur(img, (0,0), 1) # sigmaX
sigma_5 = cv2.GaussianBlur(img, (0,0), 2)
sigma_7 = cv2.GaussianBlur(img, (0,0), 3)

cv2.imshow('img', img)
cv2.imshow('sigma_3', kernel_3)
cv2.imshow('sigma_5', kernel_5)
cv2.imshow('sigma_7', kernel_7)
cv2.waitKey(0)
cv2.destroyAllWindows()

 

 

import cv2
import numpy as np

img = cv2.imread('img2.jpg')

width, height = 530, 710

src = np.array([[702,243],[1133,414],[726,1007],[276,700]], dtype=np.float32)
dst = np.array([[0,0],[width,0],[width, height],[0,height]], dtype=np.float32)

matrix = cv2.getPerspectiveTransform(src, dst)
result = cv2.warpPerspective(img, matrix, (width, height))

cv2.imshow('img', img)
cv2.imshow('result',result)
cv2.waitKey(0)
cv2.destroyAllWindows()

 

 

 

import cv2
import numpy as np

point_list = []
src_img = cv2.imread('img2.jpg')

COLOR = (255, 0, 255)
THICKNESS = 3
drawing = False

def mouse_handler(event,x,y, flags, param):
    global drawing
    dst_img = src_img.copy()
    
    if event == cv2.EVENT_LBUTTONDOWN:
        drawing = True
        point_list.append((x,y))
    
    if drawing:
        prev_point = None
        for point in point_list:
            cv2.circle(dst_img, point, 15, COLOR, cv2.FILLED)
            if prev_point:
                cv2.line(dst_img, prev_point, point, COLOR, THICKNESS, cv2.LINE_AA)
            prev_point = point
        
        next_point = (x,y)
        
        if len(point_list) == 4:
            show_result()
            next_point = point_list[0]
        
        cv2.line(dst_img, prev_point, next_point, COLOR, THICKNESS, cv2.LINE_AA)
    
    cv2.imshow('img', dst_img)
    
def show_result():
    width, height = 530, 710

    src = np.float32(point_list)
    dst = np.array([[0,0],[width,0],[width, height],[0,height]], dtype=np.float32)

    matrix = cv2.getPerspectiveTransform(src, dst)
    result = cv2.warpPerspective(src_img, matrix, (width, height))
    cv2.imshow('result', result)

cv2.namedWindow('img')
cv2.setMouseCallback('img', mouse_handler)
cv2.imshow('img', src_img)
cv2.waitKey(0)
cv2.destroyAllWindows()

 

 

import cv2

def empty(pos):
    #print(pos)
    pass

img = cv2.imread('book.jpg', cv2.IMREAD_GRAYSCALE)

name = 'Trackbar'
cv2.namedWindow(name)

cv2.createTrackbar('threshold', name, 127, 255, empty)

while True:
    thresh = cv2.getTrackbarPos('threshold', name)
    ret, binary = cv2.threshold(img, thresh, 255, cv2.THRESH_BINARY)

    if not ret:
        break
        
    cv2.imshow(name, binary)
    if cv2.waitKey(1) == ord('q'):
        break
    
cv2.destroyAllWindows()

 

import cv2

def empty(pos):
    #print(pos)
    pass

img = cv2.imread('book.jpg', cv2.IMREAD_GRAYSCALE)

name = 'Trackbar'
cv2.namedWindow(name)

cv2.createTrackbar('block_size', name, 25, 255, empty) # 홀수만 가능, 1보다 큰 값
cv2.createTrackbar('c', name, 3, 10, empty) # 일반적으로 양수 값 사용

while True:
    block_size = cv2.getTrackbarPos('block_size', name)
    c = cv2.getTrackbarPos('c', name)
    
    if block_size <= 1:
        block_size = 3
        
    if block_size % 2 == 0:
        block_size += 1
    
    binary = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, block_size, c)

    cv2.imshow('img', img)
    cv2.imshow(name, binary)
    if cv2.waitKey(1) == ord('q'):
        break
    
cv2.destroyAllWindows()

 

 

import cv2
import numpy as np

kernel = np.ones((3,3), dtype=np.uint8)
#kernel

img = cv2.imread('dilate.png', cv2.IMREAD_GRAYSCALE)
dilate1 = cv2.dilate(img, kernel, iterations=1)
dilate2 = cv2.dilate(img, kernel, iterations=2)
dilate3 = cv2.dilate(img, kernel, iterations=3)

cv2.imshow('gray',img)
cv2.imshow('d1',dilate1)
cv2.imshow('d2',dilate2)
cv2.imshow('d3',dilate3)

cv2.waitKey(0)
cv2.destroyAllWindows()

 

 

import cv2
import numpy as np

kernel = np.ones((3,3), dtype=np.uint8)
#kernel

img = cv2.imread('erode.png', cv2.IMREAD_GRAYSCALE)
erode1 = cv2.erode(img, kernel, iterations=1)
erode2 = cv2.erode(img, kernel, iterations=2)
erode3 = cv2.erode(img, kernel, iterations=3)

cv2.imshow('gray',img)
cv2.imshow('erode1',erode1)
cv2.imshow('erode2',erode2)
cv2.imshow('erode3',erode3)

cv2.waitKey(0)
cv2.destroyAllWindows()

 

 

import cv2
import numpy as np
kernel = np.ones((3,3), dtype=np.uint8)

img = cv2.imread('erode.png', cv2.IMREAD_GRAYSCALE)

erode = cv2.erode(img, kernel, iterations = 3)
dilate = cv2.dilate(erode, kernel, iterations=3)

cv2.imshow('img',img)
cv2.imshow('erode',erode)
cv2.imshow('dilate',dilate)
cv2.waitKey(0)
cv2.destroyAllWindows()

 

 

import cv2

def empty(pos):
    pass

img = cv2.imread('img2.jpg')

name = 'Trackbar'
cv2.namedWindow(name)
cv2.createTrackbar('threshold1', name, 0, 255, empty)
cv2.createTrackbar('threshold2', name, 0, 255, empty)

while True:
    threshold1 = cv2.getTrackbarPos('threshold1', name)
    threshold2 = cv2.getTrackbarPos('threshold2', name)
    
    canny = cv2.Canny(img, threshold1, threshold2)

    cv2.imshow('img',img)
    cv2.imshow(name,canny)
    
    if cv2.waitKey(1) == ord('q'):
        break

cv2.destroyAllWindows()

 

 

import cv2
img = cv2.imread('img.jpg')
target_img = img.copy()

gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, otsu = cv2.threshold(gray, -1, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)

contours, hierarchy = cv2.findContours(otsu, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
print(hierarchy)
print(f'총 발견갯수: {len(contours)}' )

COLOR = (0, 200,0)
cv2.drawContours(target_img, contours, -1, COLOR, 2)

cv2.imshow('img', img)
cv2.imshow('gray', gray)
cv2.imshow('otsu', otsu)
cv2.imshow('contours', target_img)

cv2.waitKey(0)
cv2.destroyAllWindows()

 

 

import cv2
cap = cv2.VideoCapture('video.mp4')
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
width = round(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = round(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS)

out = cv2.VideoWriter('city_coutput.avi', fourcc, fps * 4, (height, width))

while cap.isOpened():
    ret, frame = cap.read()
    if not ret:
        break
        
    cv2.imshow('video', frame)
    
    if cv2.waitKey(1) == ord('q'):
        break
        
out.release()
cap.release()
cv2.destroyAllWindows()

 

 

import cv2
import mediapipe as mp

mp_face_detection = mp.solutions.face_detection
mp_drawing = mp.solutions.drawing_utils

cap = cv2.VideoCapture('facevideo.mp4')

with mp_face_detection.FaceDetection(   # 파일 열고 닫는 함수
    model_selection=1, min_detection_confidence=0.7) as face_detection:
    while cap.isOpened():
        success, image = cap.read()
        if not success:
            break   

        # To improve performance, optionally mark the image as not writeable to
        # pass by reference.
        image.flags.writeable = False
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        results = face_detection.process(image)

        # Draw the face detection annotations on the image.
        image.flags.writeable = True
        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        
        if results.detections:
            for detection in results.detections:
                mp_drawing.draw_detection(image, detection)
                #print(detection)
                
                keypoints = detection.location_data.relative_keypoints
                right_eye = keypoints[0]
                left_eye = keypoints[1]
                nose_tip = keypoints[2]
                
                h, w, _ = image.shape  # height width, channel
                right_eye = (int(right_eye.x * w), int(right_eye.y * h))
                left_eye = (int(left_eye.x * w),int(left_eye.y * h))
                nose_tip = (int(nose_tip.x * w),int(nose_tip.y * h))
                
                cv2.circle(image, right_eye, 50, (255, 0, 0), 10, cv2.LINE_AA)
                cv2.circle(image, left_eye, 50, (0, 255, 0), 10, cv2.LINE_AA)
                cv2.circle(image, nose_tip, 50, (0, 0, 255), 10, cv2.LINE_AA)
                
                
        # Flip the image horizontally for a selfie-view display.
        cv2.imshow('MediaPipe Face Detection', cv2.resize(image,None, fx=2, fy=2))
        if cv2.waitKey(1) == ord('q'):
            break
            
cap.release()
cv2.destroyAllWindows()

728x90
LIST

관련글 더보기

댓글 영역