1 Star 0 Fork 0

Nuyoah / scaning_block

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
tc_rgb_test_similarity.py 45.83 KB
一键复制 编辑 原始数据 按行查看 历史
Nuyoah 提交于 2024-04-30 18:44 . feat:增加香港项目本地检测识别
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456
import cv2
import random
import math
from colormath.color_diff import delta_e_cie2000
from colormath.color_objects import sRGBColor, XYZColor, LabColor
from colormath.color_conversions import convert_color
from colormath import color_conversions
import tkinter as tk
import json
from model import predict
import base64
from PIL import Image
from io import BytesIO
import numpy as np
from scipy.spatial import distance
from PIL import Image, ImageDraw, ImageFilter
import colorspacious as cs
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def save_detection(img, filename, path):
""" 根据模型发现试剂
:param im_data: (numpy.ndarray) 输入图片
:return: (numpy.ndarray) 发现区域后,裁剪置信度大于0.9的区域图片
"""
# 推理图像
result = predict.detection(img)
boxes = result.boxes
scores = result.scores
label_ids = result.label_ids
detection_results = [
DetectionResult(*box, score, label_id)
for box, score, label_id in zip(boxes, scores, label_ids)
]
highest_scores = {}
# Iterate through all results
for result in detection_results:
label_id = result.label_id
score = result.score
# Check if this label_id has been seen before or if the current score is higher
if label_id not in highest_scores or score > highest_scores[label_id]:
highest_scores[label_id] = score
# Filter the detection_results to keep only the highest-scored entry for each label_id
filtered_results = [
result
for result in detection_results
if result.score == highest_scores[result.label_id]
]
# Print the final results
for result in filtered_results:
print(
f"File: {filename}, Label ID: {result.label_id}, "
f"Box: ({result.xmin}, {result.ymin}, {result.xmax}, {result.ymax}), "
f"Score: {result.score}"
)
# 在图像上绘制边界框并显示
img_with_boxes = img.copy()
for result in filtered_results:
cv2.rectangle(img_with_boxes, (int(result.xmin), int(result.ymin)),
(int(result.xmax), int(result.ymax)), (0, 255, 0), 2)
# file_path = os.path.join(path, filename)0
# cv2.imwrite(file_path, img_with_boxes)
# vis_im = vision.vis_detection(img, detection_results, score_threshold=0.001)
# file_path = os.path.join(path, filename)
# cv2.imwrite(file_path, vis_im)
class DetectionResult:
def __init__(self, xmin, ymin, xmax, ymax, score, label_id):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.score = score
self.label_id = label_id
# Initialize filtered attributes
self.filtered_xmin = xmin
self.filtered_ymin = ymin
self.filtered_xmax = xmax
self.filtered_ymax = ymax
self.filtered_score = score
self.filtered_label_id = label_id
# 图像裁剪
def image_cutout(img, box):
left = int(box[0])
top = int(box[1])
right = int(box[2])
bottom = int(box[3])
image_box = img[top: bottom,
left:right]
return image_box
def lab_image(img):
lab_image_box = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
random_y = random.randint(0, lab_image_box.shape[0] - 1)
random_x = random.randint(0, lab_image_box.shape[1] - 1)
# Get the CIELab values at the random coordinate
random_L1 = lab_image_box[random_y, random_x, 0]
random_a1 = lab_image_box[random_y, random_x, 1]
random_b1 = lab_image_box[random_y, random_x, 2]
return random_L1, random_a1, random_b1
# 定义反伽马校正函数
def gamma_correct(c):
if c <= 0.04045:
return c / 12.92
else:
return ((c + 0.055) / 1.055) ** 2.4
# 线性化RGB值
def linearize_rgb(R, G, B):
# R, G, B = rgbValue
linear_R_value = gamma_correct(R / 255.0)
linear_G_value = gamma_correct(G / 255.0)
linear_B_value = gamma_correct(B / 255.0)
return linear_R_value, linear_G_value, linear_B_value
def rgb_to_xyz(R, G, B):
r = gamma_correct(R / 255.0)
g = gamma_correct(G / 255.0)
b = gamma_correct(B / 255.0)
x = r * 0.4124564 + g * 0.3575761 + b * 0.1804375
y = r * 0.2126729 + g * 0.7151522 + b * 0.0721750
z = r * 0.0193339 + g * 0.1191920 + b * 0.9503041
return x, y, z
def gamma_correct(c):
if c <= 0.04045:
return c / 12.92
else:
return ((c + 0.055) / 1.055) ** 2.4
def xyz_to_lab(xyz, x_n=95.047, y_n=100.0, z_n=108.883):
X1, Y1, Z1 = xyz # 解包xyz参数 # Normalize XYZ values to the reference white point
x = X1 / x_n
y = Y1 / y_n
z = Z1 / z_n
def f(t):
if t > (6 / 29) ** 3:
return t ** (1 / 3)
else:
return (1 / 3) * ((29 / 6) ** 2) * t + (4 / 29)
L = 116 * f(y) - 16
a = 500 * (f(x) - f(y))
b = 200 * (f(y) - f(z))
return L, a, b
def CIELab(imgData):
height, width, _ = imgData.shape
# Calculate the center coordinates of the cutout image
center_x = width // 2
center_y = height // 2
# Extract R, G, B values from the image data at the center coordinate
B, G, R = imgData[center_y, center_x]
linear_R, linear_G, linear_B = linearize_rgb(R, G, B)
return xyz_to_lab(rgb_to_xyz(*linearize_rgb(R, G, B)))
# 中心坐标rgb
def central_coordinate(imgData):
height, width, _ = imgData.shape
# Calculate the center coordinates of the cutout image
center_x = width // 2
center_y = height // 2
# Extract R, G, B values from the image data at the center coordinate
B, G, R = imgData[center_y, center_x]
return R, G, B
def RGBCIELab(R, G, B):
linear_R, linear_G, linear_B = linearize_rgb(R, G, B)
return xyz_to_lab(rgb_to_xyz(*linearize_rgb(R, G, B)))
def RGB(imgData):
height, width, _ = imgData.shape
# Calculate the center coordinates of the cutout image
center_x = width // 2
center_y = height // 2
# Extract R, G, B values from the image data at the center coordinate
B, G, R = imgData[center_y, center_x]
return R, G, B
def rgb_to_lab(R, G, B):
# RGB反伽马校正和线性化
linear_R, linear_G, linear_B = linearize_rgb(R, G, B)
# 转换为XYZ颜色空间
xyz = (
linear_R * 0.4124564 + linear_G * 0.3575761 + linear_B * 0.1804375,
linear_R * 0.2126729 + linear_G * 0.7151522 + linear_B * 0.0721750,
linear_R * 0.0193339 + linear_G * 0.1191920 + linear_B * 0.9503041
)
# 转换为Lab颜色空间
lab = color_conversions.xyz2lab(xyz[0], xyz[1], xyz[2])
return lab
def show_color_box_cv2(rgb, name):
blue, green, red = rgb
# 创建一个空白图像
img = np.zeros((100, 100, 3), dtype=np.uint8)
# 设置颜色通道的值
img[:, :] = [blue, green, red]
# 显示图像
cv2.imshow(name, img)
cv2.namedWindow(name, cv2.WINDOW_FREERATIO) # 窗口大小自适应比例
def parse_json_file(file_path):
with open(file_path, 'r') as f:
data = json.load(f)
# 这里可以根据需要对 JSON 数据进行进一步解析或处理
return data # 根据需要返回解析后的数据
def color_block(json, rgb):
min_delta_e = float('inf') # 初始化最小的 delta_e 值为正无穷
closest_color = None # 初始化最接近的颜色
for i in json:
rgb_tuple = tuple(map(int, i['RGB'].strip('()').split(','))) # 将字符串转换为元组
delta_e = Calculated_color_difference(rgb_tuple, rgb) # Pass the RGB values as a tuple
if delta_e < min_delta_e: # 如果当前色差比记录的最小值小
min_delta_e = delta_e # 更新最小的 delta_e 值
closest_color = rgb_tuple # 更新最接近的颜色
closest_color_index = i['index']
if json[closest_color_index + 1] is None:
return [json[closest_color_index]]
return [json[closest_color_index], json[closest_color_index + 1]]
def Calculated_color_difference(rgb1, rgb2):
r1, g1, b1 = rgb1 # Unpack the RGB tuple
r2, g2, b2 = rgb2 # Unpack the RGB tuple
linear_R1, linear_G1, linear_B1 = linearize_rgb(*(r1, g1, b1))
linear_R2, linear_G2, linear_B2 = linearize_rgb(*(r2, g2, b2))
color1_xyz = convert_color(sRGBColor(linear_R1, linear_G1, linear_B1), XYZColor)
color2_xyz = convert_color(sRGBColor(linear_R2, linear_G2, linear_B2), XYZColor)
color1_lab = convert_color(color1_xyz, LabColor)
color2_lab = convert_color(color2_xyz, LabColor)
delta_e = delta_e_cie2000(color1_lab, color2_lab)
return delta_e;
def compute(RGBT, RGBA, RGBB):
rT, gT, bT = RGBT # Unpack the RGB tuple
rA, gA, bA = RGBA # Unpack the RGB tuple
rB, gB, bB = RGBB # Unpack the RGB tuple
linear_RT, linear_GT, linear_BT = linearize_rgb(*(rT, gT, bT))
linear_RA, linear_GA, linear_BA = linearize_rgb(*(rA, gA, bA))
linear_RB, linear_GB, linear_BB = linearize_rgb(*(rB, gB, bB))
# 将线性化后的RGB值转换为XYZColor对象
colorT_xyz = convert_color(sRGBColor(linear_RT, linear_GT, linear_BT), XYZColor)
colorA_xyz = convert_color(sRGBColor(linear_RA, linear_GA, linear_BA), XYZColor)
colorB_xyz = convert_color(sRGBColor(linear_RB, linear_GB, linear_BB), XYZColor)
# 将XYZColor对象转换为LabColor对象
colorT_lab = convert_color(colorT_xyz, LabColor)
colorA_lab = convert_color(colorA_xyz, LabColor)
colorB_lab = convert_color(colorB_xyz, LabColor)
delta_TA = delta_e_cie2000(colorT_lab, colorA_lab)
delta_TB = delta_e_cie2000(colorT_lab, colorB_lab)
delta_AB = delta_e_cie2000(colorA_lab, colorB_lab)
return [delta_TA, delta_TB, delta_AB]
def restore(RGBA, r1, g1, b1):
r, g, b = RGBA
r = r - r1
g = g - g1
b = b - b1
return r, g, b
def restore_1(imgData, box, r1, g1, b1):
left = int(box[0])
top = int(box[1])
right = int(box[2])
bottom = int(box[3])
for y in range(top, bottom):
for x in range(left, right):
# 获取当前像素的RGB值
pixel_rgb = imgData[y - top, x - left]
new_pixel_rgb = (
safely_add(pixel_rgb[0], r1),
safely_add(pixel_rgb[1], g1),
safely_add(pixel_rgb[2], b1)
)
# 重建新像素的颜色,并更新新图像
imgData[y - top, x - left] = new_pixel_rgb
return imgData
def averageChromatism(img, box, rgb):
x = (box[0] + box[2]) / 2
y = (box[1] + box[3]) / 2
width, height = get_box_dimensions(box)
# 计算4x4盒子的左上角和右下角坐标
width = width / 3
height = height / 2
# 计算新盒子的四个顶点坐标
left = int(x - width)
top = int(y - height)
right = int(x + width)
bottom = int(y + height)
new_box = [left, top, right, bottom]
new_img = image_cutout(img, new_box)
delta_r_sum = 0
total_pixels = 0
for i in range(len(new_img)):
for j in range(len(new_img[i])):
# red_color = np.array([255, 0, 0])
# # 计算每个像素与红色之间的欧氏距禶
# distances = [distance.euclidean(pixel, red_color) for pixel in new_img[i]]
# average_distance = np.mean(distances)
# print("Overall color difference: ", average_distance)
# 取出每个像素的 RGB 值
b, g, r = new_img[i][j]
delta_e = Calculated_color_difference(rgb, (r, g, b))
delta_r_sum += delta_e
total_pixels += 1
# 红色 (255, 0, 0)
red_color = np.array([255, 0, 0])
# 计算 delta_r 平均值
if total_pixels != 0:
avg_delta_r = delta_r_sum / total_pixels
return round(avg_delta_r, 1)
print("Average delta_r:", avg_delta_r)
else:
print("No pixels found in the image.")
def calculate_average_rgb(img):
# 初始化各个颜色通道的总和
total_red = 0
total_green = 0
total_blue = 0
# 获取图像的宽度和高度
height, width, _ = img.shape
# 计算每个颜色通道的总和
for y in range(height):
for x in range(width):
pixel = img[y, x]
total_blue += pixel[0]
total_green += pixel[1]
total_red += pixel[2]
# 计算平均RGB值
total_pixels = height * width
avg_blue = total_blue // total_pixels
avg_green = total_green // total_pixels
avg_red = total_red // total_pixels
return avg_red, avg_green, avg_blue
def calculate_color_difference(L1, a1, b1, L2, a2, b2):
delta_L = L2 - L1
delta_a = a2 - a1
delta_b = b2 - b1
color_difference = math.sqrt(delta_L ** 2 + delta_a ** 2 + delta_b ** 2)
return color_difference
def color_difference_1(rgb1, rgb2):
red_diff = abs(rgb1[0] - rgb2[0])
green_diff = abs(rgb1[1] - rgb2[1])
blue_diff = abs(rgb1[2] - rgb2[2])
return (red_diff, green_diff, blue_diff)
def adjust_color(img, target_rgb, diff):
# 按原始图像的大小创建新图像副本
adjusted_img = img.copy()
# 计算需要调整的增量
delta_r = diff[0] # 红色通道的增量
delta_g = diff[1] # 绿色通道的增量
delta_b = diff[2] # 蓝色通道的增量
# 遍历图像的每个像素并调整RGB值
for y in range(adjusted_img.shape[0]):
for x in range(adjusted_img.shape[1]):
# 获取当前像素的RGB值
pixel = adjusted_img[y, x]
# 调整红色通道
new_r = max(min(pixel[0] + delta_r, 255), 0)
# 调整绿色通道
new_g = max(min(pixel[1] + delta_g, 255), 0)
# 调整蓝色通道
new_b = max(min(pixel[2] + delta_b, 255), 0)
# 更新像素的RGB值
adjusted_img[y, x] = (new_r, new_g, new_b)
return adjusted_img
def getHSV(img):
hsv_image = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# 分离H、S、V通道
h_channel, s_channel, v_channel = cv2.split(hsv_image)
# # 显示转换后的HSV图像
# cv2.imshow('HSV Image', hsv_image)
height, width, c = img.shape
cnt = 0
h_values = 0
s_values = 0
v_values = 0
# 遍历每一个像素点
for y in range(width):
for x in range(height):
h_value = h_channel[x, y]
s_value = s_channel[x, y]
v_value = v_channel[x, y]
if (h_value == 0) and (s_value == 0) and (v_value == 0):
continue
else:
cnt += 1
h_values += h_value
s_values += s_value
v_values += v_value
meanH = h_values / cnt
meanS = s_values / cnt
meanV = v_values / cnt
return meanH, meanS, meanV;
# 计算包围框中心的灰度图像
def calculate_box_center_gray_image(img, box):
# 将彩色图像转换为灰度图像
gray_img = calculate_gray1(img)
# 计算包围框中心坐标
x = int((box[0] + box[2]) / 2)
y = int((box[1] + box[3]) / 2)
# 获取中心像素的灰度值
center_gray_value = gray_img[y, x]
return center_gray_value
# # 将中心像素的灰度值设置为整个图像的灰度值
# gray_img[:, :] = center_gray_value
def get_min_max_box_dimensions(gray_img, x, y, min_threshold, max_threshold):
left = right = x
top = bottom = y
while left >= 0 and min_threshold <= gray_img[y, max(left, 0)] <= max_threshold:
left -= 1
while right < gray_img.shape[1] and min_threshold <= gray_img[
y, min(right, gray_img.shape[1] - 1)] <= max_threshold:
right += 1
while top >= 0 and min_threshold <= gray_img[min(top, gray_img.shape[0] - 1), x] <= max_threshold:
top -= 1
while bottom < gray_img.shape[0] and min_threshold <= gray_img[
min(bottom, gray_img.shape[0] - 1), x] <= max_threshold:
bottom += 1
width = right - left - 1
height = bottom - top - 1
return width, height
def get_min_max_box_dimensions_1(img, lower_threshold, upper_threshold):
max_area = 0
optimal_rectangle = None
# 遍历图像的每个像素
for i in range(img.shape[0]):
for j in range(img.shape[1]):
# 如果当前像素的灰度值在设定的阈值范围内
if lower_threshold < img[i, j] < upper_threshold:
# 以当前像素为起点,向右和向下扩展,直到遇到边界或灰度值不在阈值范围内
right_bound = j
while right_bound < img.shape[1] and lower_threshold < img[
i, right_bound] < upper_threshold:
right_bound += 1
down_bound = i
while down_bound < img.shape[0] and lower_threshold < img[
down_bound, j] < upper_threshold:
down_bound += 1
# 计算矩形面积
area = calculate_rectangle_area((j, i), (right_bound, down_bound))
# 更新最大矩形的信息
if area > max_area:
max_area = area
optimal_rectangle = ((j, i), (right_bound, down_bound))
# 输出最大矩形信息
if optimal_rectangle is not None:
# 获取最优矩形的左上角和右下角坐标
top_left = optimal_rectangle[0]
bottom_right = optimal_rectangle[1]
# 构建盒子
box = [top_left[0], top_left[1], bottom_right[0], bottom_right[1]]
return box
else:
return None
def get_new_img(x, y, width, height, img):
left = int(x - width)
top = int(y - height)
right = int(x + width)
bottom = int(y + height)
new_box = [left, top, right, bottom]
new_img = image_cutout(img, new_box)
return new_img
# 定义函数计算矩形面积
def calculate_rectangle_area(top_left, bottom_right):
width = bottom_right[0] - top_left[0]
height = bottom_right[1] - top_left[1]
return width * height
def averageChromatism_RGB(name, img, box, rgb):
x = (box[0] + box[2]) // 2
y = (box[1] + box[3]) // 2
width, height = get_box_dimensions(box)
# 计算4x4盒子的左上角和右下角坐标
width = width * 2
height = height
new_img = get_new_img(x, y, width, height, img)
new_img_gray_img = cv2.cvtColor(new_img, cv2.COLOR_BGR2GRAY)
height, width, c = new_img.shape
# 计算新图片的中心坐标
x1 = width // 2
y1 = height // 2
cen_gray = calculate_box_center_gray_image(img, box)
min_threshold = cen_gray - 10 # 最小灰度阈值
max_threshold = cen_gray + 10 # 最大灰度阈值
new_box = get_min_max_box_dimensions_1(new_img_gray_img, min_threshold, max_threshold)
new_img_gray_img_super = image_cutout(new_img, new_box)
# # 获取灰度值介于 70 到 90 之间的矩形的长和宽
# width, height = get_min_max_box_dimensions(new_img_gray_img, x1, y1, min_threshold, max_threshold)
# new_img_gray_img_super = get_new_img(x, y, width // 2, height // 2, img)
cv2.namedWindow('new_img_gray_img_super' + name, cv2.WINDOW_NORMAL) # 窗口大小自适应比例
cv2.imshow('new_img_gray_img_super' + name, new_img_gray_img_super)
print("getHSV" + name, getHSV(new_img_gray_img_super))
new_img_gray_img_super_delta_e = Calculated_color_difference(rgb, calculate_average_rgb(new_img_gray_img_super))
return new_img_gray_img_super_delta_e
# 应用高斯滤波器平滑图像
smoothed_image = cv2.GaussianBlur(new_img, (5, 5), 0)
print("smoothed_image" + name, filtered_value(calculate_gray1(smoothed_image)))
# 获取 new_img 的平均 RGB 值
new_img_rgb = calculate_average_rgb(new_img)
print("ray" + name, filtered_value(calculate_gray1(new_img)))
print("getHSV" + name, getHSV(new_img))
print("new_img_rgb" + name, new_img_rgb)
# 计算颜色差
color_diff = color_difference_1(new_img_rgb, rgb)
# 校准颜色
adjusted_img = adjust_color(new_img, rgb, color_diff)
delta_e = Calculated_color_difference(rgb, calculate_average_rgb(new_img))
return delta_e
delta_r_sum = 0
total_pixels = 0
for i in range(len(new_img)):
for j in range(len(new_img[i])):
# red_color = np.array([255, 0, 0])
# # 计算每个像素与红色之间的欧氏距禶
# distances = [distance.euclidean(pixel, red_color) for pixel in new_img[i]]
# average_distance = np.mean(distances)
# print("Overall color difference: ", average_distance)
# 取出每个像素的 RGB 值
b, g, r = new_img[i][j]
delta_e = Calculated_color_difference(rgb, (r, g, b))
delta_r_sum += delta_e
total_pixels += 1
# 计算 delta_r 平均值
if total_pixels != 0:
avg_delta_r = delta_r_sum / total_pixels
return round(avg_delta_r, 1)
print("Average delta_r:", avg_delta_r)
else:
print("No pixels found in the image.")
def color_difference_rgb(new_color, ref_color):
# 计算RGB分量的色差
diff = tuple(np.subtract(new_color, ref_color))
return diff
def clip_lab_value(value):
# 将 Lab 色彩空间中的值限制在合理范围内
return max(0, min(value, 255))
def color_difference_lab(new_color, ref_color):
# 将 RGB 转换为 Lab 色彩空间
new_color_lab = cv2.cvtColor(np.uint8([[new_color]]), cv2.COLOR_RGB2LAB)[0][0]
ref_color_lab = cv2.cvtColor(np.uint8([[ref_color]]), cv2.COLOR_RGB2LAB)[0][0]
# 比较 L 分量来确定色差的正负
l_diff = clip_lab_value(new_color_lab[0] - ref_color_lab[0])
return l_diff
def euclidean_distance(color1, color2):
dist = np.linalg.norm(np.array(color1) - np.array(color2))
return dist
def averageChromatism_RGB_1(name, img, box, rgb):
x = (box[0] + box[2]) / 2
y = (box[1] + box[3]) / 2
width, height = get_box_dimensions(box)
# 计算4x4盒子的左上角和右下角坐标
width = width / 3
height = height / 4
# 计算新盒子的四个顶点坐标
left = int(x - width)
top = int(y - height)
right = int(x + width)
bottom = int(y + height)
new_box = [left, top, right, bottom]
new_img = image_cutout(img, new_box)
new_img_rgb = calculate_average_rgb(new_img)
# 计算 RGB 分量色差
diff_rgb = color_difference_rgb(rgb, new_img_rgb)
print("RGB分量色差:", diff_rgb)
# 计算 Lab 色彩空间的 L 分量差异
l_diff_lab = color_difference_lab(rgb, new_img_rgb)
print("Lab色彩空间 L 分量色差:", l_diff_lab)
return l_diff_lab;
# 计算数组平均值 , 去掉一个最大值, 去掉一个最小值
def average(data_list):
# 去除0的情况
data_list_none_zero = []
for i in range(len(data_list)):
if data_list[i] != 0:
data_list_none_zero.append(data_list[i])
data_list = data_list_none_zero[2:-2]
if len(data_list) == 0:
return 0
if len(data_list) > 2:
data_list.remove(min(data_list))
data_list.remove(max(data_list))
average_data = float(sum(data_list)) / len(data_list)
return average_data
elif len(data_list) <= 2:
average_data = float(sum(data_list)) / len(data_list)
return average_data
# 人眼感知公式:
def test_average_1(image):
gray_image = 0.299 * image[:, :, 2] + 0.587 * image[:, :, 1] + 0.114 * image[:, :, 0]
average_brightness_eye = (gray_image.mean())
return average_brightness_eye
# 加权平均值公式:
def test_average_2(image):
clipped_image = np.clip(image, 0, 255)
gray_image_weighted = clipped_image[:, :, 0] * 0.3 + clipped_image[:, :, 1] * 0.59 + clipped_image[:, :, 2] * 0.11
average_brightness_weighted = gray_image_weighted.mean()
return average_brightness_weighted
# 计算一个图片的测试值
def test_average(image):
# 计算灰度图的平均灰度值(即图像的亮度)
brightness = int(image.mean())
# return brightness
height = image.shape[0]
weight = image.shape[1]
count = height * weight
# 循环获取像素值
pixel_values = []
for row in range(height): # 遍历高
for col in range(weight): # 遍历宽
pixel_values.append(image[row, col])
# # 如果当前数量小于c线的大小, 需要补齐
# if count < c_count:
# for i in range(c_count - count):
# pixel_values.append(1)
# # 如果当前数量大于c线的大小, 需要删除相应数量
# if count > c_count:
# for i in range(count - c_count):
# pixel_values.pop()
# 计算平均值
result = average(pixel_values)
return result
def average(pixel_values):
if len(pixel_values) == 0:
return 0
total = sum(pixel_values)
avg = total / len(pixel_values)
return avg
def get_box_dimensions(box):
x1, y1, x2, y2 = box # Unpack the box coordinates
width = x2 - x1 # Calculate width
height = y2 - y1 # Calculate height
return width, height
def central_coordinate_TC(img, box):
width, height = get_box_dimensions(box)
x = (box[0] + box[2]) / 2
y = (box[1] + box[3]) / 2
# 计算4x4盒子的左上角和右下角坐标
width = width / 2
height = height / 2
# 计算新盒子的四个顶点坐标
left = int(x - width / 2)
top = int(y - height / 2)
right = int(x + width / 2)
bottom = int(y + height / 2)
new_box = [left, top, right, bottom]
new_img = image_cutout(img, new_box)
# 初始化 RGB 值总和
sum_r = sum_g = sum_b = 0
# 遍历新图像中的像素,累加 RGB 值
for row in new_img:
for pixel in row:
r, g, b = pixel
sum_r += r
sum_g += g
sum_b += b
# 计算平均 RGB 值
total_pixels = len(new_img) * len(new_img[0])
R = sum_r / total_pixels
G = sum_g / total_pixels
B = sum_b / total_pixels
return int(R), int(G), int(B)
# 计算灰度1 使用opencv默认方法
def calculate_gray1(img):
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def restore_color(img, red_diff, green_diff, blue_diff):
# 分别调整RGB通道值
img[:, :, 2] = np.clip(img[:, :, 2] + red_diff, 0, 255) # 红色通道
img[:, :, 1] = np.clip(img[:, :, 1] + green_diff, 0, 255) # 绿色通道
img[:, :, 0] = np.clip(img[:, :, 0] + blue_diff, 0, 255) # 蓝色通道
return img
def color_diff_lab(color1, color2):
# 将RGB颜色转换为Lab颜色空间
lab_color1 = cs.cspace_convert(color1, "sRGB255", "CAM02-UCS")
lab_color2 = cs.cspace_convert(color2, "sRGB255", "CAM02-UCS")
# 计算Lab颜色之间的欧氏距离作为色差
distance = cs.deltaE(lab_color1, lab_color2, input_space='CAM02-UCS')
return distance
def reestablishColorImg(img, box, r1, g1, b1):
left, top, right, bottom = box[0], box[1], box[2], box[3]
# 遍历新图像的每个像素进行还原和重建
for y in range(top, bottom):
for x in range(left, right):
# 获取当前像素的RGB值
pixel_rgb = img[y - top, x - left]
new_pixel_rgb = (
safely_add(pixel_rgb[0], r1),
safely_add(pixel_rgb[1], g1),
safely_add(pixel_rgb[2], b1)
)
# 重建新像素的颜色,并更新新图像
img[y - top, x - left] = new_pixel_rgb
return img;
def central_coordinate_TC_Box(name, img, box, r1, g1, b1, **args):
x, y = calculate_box_center(box)
width, height = get_box_dimensions(box)
# 计算4x4盒子的左上角和右下角坐标
width = width
height = height
new_img = get_new_img(x, y, width, height, img)
new_img_gray_img = calculate_gray1(new_img)
cv2.imshow("new_img" + name, new_img)
height, width, c = new_img.shape
# 计算新图片的中心坐标
x1 = width // 2
y1 = height // 2
cen_gray = calculate_box_center_gray_image(img, box)
min_threshold = cen_gray - 15 # 最小灰度阈值
max_threshold = cen_gray + 15 # 最大灰度阈值
new_box = get_min_max_box_dimensions_1(new_img_gray_img, min_threshold, max_threshold)
new_img_gray_img_super = image_cutout(new_img, new_box)
height, width, c = new_img_gray_img_super.shape
# # 获取灰度值介于 70 到 90 之间的矩形的长和宽
# width, height = get_min_max_box_dimensions(new_img_gray_img, x1, y1, min_threshold, max_threshold)
# new_img_gray_img_super = get_new_img(x, y, width // 2, height // 2, img)
# height, width, c = new_img_gray_img_super.shape
new_img_gray_img_super = reestablishColorImg(new_img_gray_img_super, new_box, r1, g1, b1)
cv2.imshow("new_img_gray_img_super" + name, new_img_gray_img_super)
return test_average(calculate_gray1(new_img_gray_img_super))
def safely_add(value, delta):
return max(min(value + delta, 255), 0) # 限制在有效范围内
def central_coordinate_Cen_Box(img, t_box, c_box, r1, g1, b1, **args):
width_c, heigth_c = get_box_dimensions(c_box)
center_x_c = (c_box[0] + c_box[2]) / 2
center_y_C = (c_box[1] + c_box[3]) / 2
center_x_t = (t_box[0] + t_box[2]) / 2
center_y_t = (t_box[1] + t_box[3]) / 2
x = (center_x_c + center_x_t) / 2
y = (center_y_C + center_y_t) / 2
height = heigth_c / 2
width = width_c / 2
left = int(x - width)
top = int(y - height)
right = int(x + width)
bottom = int(y + height)
new_box = [left, top, right, bottom]
new_img = image_cutout(img, new_box)
# return int(filtered_value(calculate_gray1(new_img)))
# 遍历新图像的每个像素进行还原和重建
for y in range(top, bottom):
for x in range(left, right):
# 获取当前像素的RGB值
pixel_rgb = new_img[y - top, x - left]
new_pixel_rgb = (
safely_add(pixel_rgb[0], r1),
safely_add(pixel_rgb[1], g1),
safely_add(pixel_rgb[2], b1)
)
# 重建新像素的颜色,并更新新图像
new_img[y - top, x - left] = new_pixel_rgb
# if not args.get('filter_row') is None:
# filter_rows = args.get('filter_row')
# return filter_pixel_rows(new_img)
# filter_pixel_rows(new_img)
# return test_average(filter_pixel_rows(new_img))
# return test_average(calculate_gray1(new_img))
# return test_average_2(new_img)
# new_img = blur_image_openCV_1(img, (1, 1))
return filtered_value(calculate_gray1(new_img))
def central_coordinate_TC_Box_1(name, img, box, width_c, height_c, r1, g1, b1, **args):
cv2.imshow("22222222" + name, image_cutout(img, box))
x, y = calculate_box_center(box)
# 计算4x4盒子的左上角和右下角坐标
width = width_c
height = height_c
# 计算新盒子的四个顶点坐标
left = int(x - width)
top = int(y - height)
right = int(x + width)
bottom = int(y + height)
new_box = [left, top, right, bottom]
new_img = image_cutout(img, new_box)
cv2.imshow("11111" + name, new_img)
# 设置矩形的左上角坐标、宽度和高度
x, y, w, h = left, top, right - left, bottom - top
# 设置矩形的颜色,BGR格式
color = (0, 0, 255) # 蓝色
thickness = 2 # 矩形边框的厚度
# 在新的图像上绘制矩形
output_img = np.copy(img)
cv2.rectangle(output_img, (x, y), (x + w, y + h), color, thickness)
# Display the image using OpenCV
cv2.imshow('Box Image' + name, output_img)
# 遍历新图像的每个像素进行还原和重建
for y in range(top, bottom):
for x in range(left, right):
# 获取当前像素的RGB值
pixel_rgb = new_img[y - top, x - left]
new_pixel_rgb = (
safely_add(pixel_rgb[0], r1),
safely_add(pixel_rgb[1], g1),
safely_add(pixel_rgb[2], b1)
)
# 重建新像素的颜色,并更新新图像
new_img[y - top, x - left] = new_pixel_rgb
# # 过滤掉最大值和最小值
# new_pixel_rgb_filtered = np.clip(new_pixel_rgb, np.percentile(new_pixel_rgb, 5),
# np.percentile(new_pixel_rgb, 95))
#
# # 更新新像素的颜色,并更新新图像
# new_img[y - top, x - left] = new_pixel_rgb_filtered
cv2.imshow("waaaaaa" + name, new_img)
# if not args.get('filter_row') is None:
# filter_rows = args.get('filter_row')
# return filter_pixel_rows(new_img)
# filter_pixel_rows(new_img)
# return test_average(filter_pixel_rows(new_img))
# return test_average_2(new_img)
return filtered_value(calculate_gray1(new_img))
def filter_pixel_rows(image):
# 将图像转换为灰度
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# 计算每一行的平均像素值
row_averages = np.mean(gray_image, axis=1)
# 计算全局平均值
global_average = np.mean(row_averages)
# 找到符合条件的像素行索引
filtered_rows = np.where(row_averages < global_average)[0]
# 缩减高度,过滤掉顶部和底部像素行
less_height = max(len(filtered_rows) // 3, 0)
filtered_rows = filtered_rows[less_height:-less_height]
# 返回经过筛选后的像素行
return gray_image[filtered_rows]
##裁剪
def crop_and_resize_image(input_image_path, size):
original_image = Image.open(input_image_path)
# 计算裁剪区域
width, height = original_image.size
left = (width - height) / 2
top = 0
right = (width + height) / 2
bottom = height
cropped_image = original_image.crop((left, top, right, bottom))
# 调整尺寸
resized_image = cropped_image.resize(size)
resized_image.save(input_image_path)
def blur_image(input_image_path, output_image_path, radius=2):
original_image = Image.open(input_image_path)
blurred_image = original_image.filter(ImageFilter.GaussianBlur(radius))
blurred_image.save(output_image_path)
def calculate_test_value(new_images):
t_average = test_average(new_images[0])
c_average = test_average(new_images[1])
return round(t_average / c_average, 1);
def calculate_test_value_1(new_images):
t_average = test_average(new_images[0])
c_average = test_average(new_images[1])
if c_average == 0:
return None # 或者根据需求返回其他值
return t_average / c_average
def calculate_test_value_2(new_images):
t_average = test_average(new_images[0])
c_average = test_average(new_images[1])
return int(t_average) / int(c_average);
def calculate_test_value_3(new_images):
t_average = test_average(new_images[0])
c_average = test_average(new_images[1])
return round(int(t_average) / int(c_average), 2);
def calculate_test_value_4(new_images):
t_average = test_average(new_images[0])
c_average = test_average(new_images[1])
# 获取中间点的像素平均值
median_average = test_average(new_images[2])
result = (t_average - median_average) / (c_average - median_average)
similarity_result = round(abs(result), 1)
return similarity_result
def calculate_test_value_5(new_images):
t_average = test_average(new_images[0])
c_average = test_average(new_images[1])
# 获取中间点的像素平均值
median_average = test_average(new_images[2])
result = (t_average - median_average) / (c_average - median_average)
similarity_result = round(abs(result), 2)
return similarity_result
def calculate_test_value_6(new_images):
# t_average = round(test_average(new_images[0]), 1)
# c_average = round(test_average(new_images[1]), 1)
# median_average = round(test_average(new_images[2]), 1)
t_average = test_average(new_images[0])
c_average = test_average(new_images[1])
median_average = test_average(new_images[2])
# 获取中间点的像素平均值
result = (t_average - median_average) / (c_average - median_average)
similarity_result = round(abs(result), 2)
return similarity_result
def is_subtraction_negative(num1, num2):
print("小于")
return num1 < num2
def calculate_test_value_7(T, C, M):
print(M, "m")
param1 = T - M
if is_subtraction_negative(C, M):
param2 = C - M
else:
param2 = M - C
result = param1 / param2
print("result", result)
similarity_result = round(abs(result), 2)
return similarity_result
##图片模糊
def blur_image_openCV(image, blur_type):
if blur_type == 'normal':
blurred_image = cv2.blur(image, (15, 15)) # 普通均值模糊
elif blur_type == 'gaussian':
blurred_image = cv2.GaussianBlur(image, (15, 15), 0) # 高斯模糊
elif blur_type == 'median':
blurred_image = cv2.medianBlur(image, 5) # 中值模糊
else:
blurred_image = image
return blurred_image;
##图片模糊
def blur_image_openCV_1(image, kernel_size):
# 指定模糊内核的大小 (3, 3)、(5, 5)、(10, 10)
kernel_size
# 应用均值模糊
blurred_image = cv2.blur(image, kernel_size)
return blurred_image;
def image_cutout_TC(img, box, **args):
width, height = get_box_dimensions(box)
x = (box[0] + box[2]) / 2
y = (box[1] + box[3]) / 2
# 计算4x4盒子的左上角和右下角坐标
width = width / 2
height = height / 2
# 计算新盒子的四个顶点坐标
left = int(x - width / 2)
top = int(y - height / 2)
right = int(x + width / 2)
bottom = int(y + height / 2)
new_box = [left, top, right, bottom]
new_img = image_cutout(img, new_box)
line_area_gray = calculate_gray1(new_img)
# ##过滤
# if not args.get('filter_row') is None:
# filter_rows = args.get('filter_row')
# return filter_pixel_rows(new_img)
# filter_pixel_rows(new_img)
# return filter_pixel_rows(new_img);
return line_area_gray
def filtered_value(matrix):
matrix = np.array(matrix)
unique_values = np.unique(matrix) # Find unique values in the matrix
if len(unique_values) == 1: # All elements are the same
return unique_values[0]
# 找到最大值和最小值
max_val = np.max(matrix)
min_val = np.min(matrix)
mean_val = np.mean(matrix)
filtered_values = matrix[(matrix != max_val) & (matrix != min_val)]
return np.mean(filtered_values) if len(filtered_values) > 0 else mean_val
if max_val == min_val == mean_val:
return mean_val
filtered_values = matrix[(matrix != max_val) & (matrix != min_val)]
# 过滤掉最大值、最小值和与平均值相差五个数以上的值
# filtered_values = matrix[(matrix != max_val) & (matrix != min_val) & (np.abs(matrix - mean_val) < 5)]
# print(filtered_values)
return np.mean(filtered_values);
def calculate_box_center(box):
# 提取矩形框的左上角和右下角坐标
x1, y1, x2, y2 = box[0], box[1], box[2], box[3]
# 计算中心点坐标
center_x = (x1 + x2) / 2
center_y = (y1 + y2) / 2
return center_x, center_y
def mein(c_box, t_box, width_c, height_c, original_pic, **args):
center_x_c = (c_box[0] + c_box[2]) / 2
center_y_C = (c_box[1] + c_box[3]) / 2
center_x_t = (t_box[0] + t_box[2]) / 2
center_y_t = (t_box[1] + t_box[3]) / 2
x = (center_x_c + center_x_t) / 2
y = (center_y_C + center_y_t) / 2
width = width_c
height = height_c
left = int(x - width)
top = int(y - height)
right = int(x + width)
bottom = int(y + height)
cen_box = [left, top, right, bottom]
# # 设置矩形的左上角坐标、宽度和高度
# x, y, w, h = left, top, right - left, bottom - top
# # 设置矩形的颜色,BGR格式
# color = (0, 0, 255) # 蓝色
# thickness = 2 # 矩形边框的厚度
# # 在新的图像上绘制矩形
# output_img = np.copy(original_pic)
# cv2.rectangle(output_img, (x, y), (x + w, y + h), color, thickness)
# # Display the image using OpenCV
# cv2.imshow('Box Image', output_img)
# original_pic = blur_image_openCV(original_pic, "median")
new_img = image_cutout(original_pic, cen_box)
cv2.imshow('Center', new_img)
line_area_gray = calculate_gray1(new_img)
# ##过滤
# if not args.get('filter_row') is None:
# filter_rows = args.get('filter_row')
# return filter_pixel_rows(new_img)
# filter_pixel_rows(new_img)
# return filter_pixel_rows(new_img);
return line_area_gray
def similarity(image):
# print(base64Str)
# image_data = base64.b64decode(base64Str)
# # Convert the image data to a NumPy array
# nparr = np.frombuffer(image_data, np.uint8)
# # Decode the array as an image using OpenCV
# img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
imgCV2 = cv2.imread(image)
height = imgCV2.shape[0]
weight = imgCV2.shape[1]
print("原始图像分辨率", weight, "*", height)
# size = (500, height) # 统一尺寸
# crop_and_resize_image(image, size)
img = cv2.imread(image)
height = img.shape[0]
weight = img.shape[1]
print("图像分辨率", weight, "*", height)
# img = blur_image_openCV_1(img,(1, 1))
filtered_results = predict.detection(img, image)
R_box = list()
G_box = list()
B_box = list()
T_box = list()
C_box = list()
for result in filtered_results:
box = [result.xmin, result.ymin, result.xmax, result.ymax]
ID = result.label_id
if (ID == 0): R_box = box
if (ID == 1): G_box = box
if (ID == 2): B_box = box
if (ID == 3): T_box = box
if (ID == 4): C_box = box
if len(T_box) == 0:
return "0.01", "", "", ""
if len(R_box) == 0 or len(G_box) == 0 or len(B_box) == 0:
return "-1000", "", "", ""
if len(C_box) == 0:
return "-100", "", "", ""
# img = blur_image_openCV(img, "gaussian")
# T_rgb = central_coordinate(image_cutout(img, T_box))
# C_rgb = central_coordinate(image_cutout(img, C_box))
# delta_r = Calculated_color_difference((255, 0, 0), central_coordinate(
# image_cutout(img, R_box))) # Pass the RGB values as a tuple
# delta_g = Calculated_color_difference((0, 255, 0), central_coordinate(
# image_cutout(img, G_box))) # Pass the RGB values as a tuple
# delta_b = Calculated_color_difference((0, 0, 255), central_coordinate(
# image_cutout(img, B_box))) # Pass the RGB values as a tuple
delta_r = averageChromatism(img, R_box, (255, 0, 0))
delta_r = averageChromatism_RGB("R", img, R_box, (255, 0, 0))
# delta_r = averageChromatism_RGB_1("R", img, R_box, (255, 0, 0))
print("delta_r", delta_r)
delta_g = averageChromatism(img, G_box, (0, 255, 0))
delta_g = averageChromatism_RGB("G", img, G_box, (0, 255, 0))
print("delta_g", delta_g)
# delta_g = averageChromatism_RGB_1("G", img, G_box, (0, 255, 0))
delta_b = averageChromatism(img, B_box, (0, 0, 255))
delta_b = averageChromatism_RGB("B", img, B_box, (0, 0, 255))
print("delta_b", delta_b)
# delta_b = averageChromatism_RGB_1("B", img, B_box, (0, 0, 255))
# T_rgb = central_coordinate_TC(img, T_box)
# C_rgb = central_coordinate_TC(img, C_box)
# print("T_rgb", T_rgb)
# print("C_rgb", C_rgb)
# Define the RGB values for the two colors
# color1 = restore(T_rgb, delta_r, delta_g, delta_b)
# color2 = restore(C_rgb, delta_r, delta_g, delta_b)
#
# 宽度
width_c = int((C_box[2] - C_box[0]))
# 高度
heigth_c = int((C_box[3] - C_box[1]) / 2)
cen = mein(C_box, T_box, width_c, heigth_c, img)
# img_t = image_cutout_TC(img, T_box)
#
# img_c = image_cutout_TC(img, C_box)
imglist = [0, 0, cen]
T_value = central_coordinate_TC_Box("T", img, T_box, delta_r, delta_g, delta_b)
# T_value = central_coordinate_TC_Box_1("T", img, T_box, width_c, heigth_c, delta_r, delta_g, delta_b)
C_value = central_coordinate_TC_Box("C", img, C_box, delta_r, delta_g, delta_b)
# C_value = central_coordinate_TC_Box_1("C", img, C_box, width_c, heigth_c, delta_r, delta_g, delta_b)
median_value = central_coordinate_Cen_Box(img, T_box, C_box, delta_r, delta_g, delta_b)
print("T_value", T_value)
print("C_value", C_value)
print("median_value", median_value)
print("M", filtered_value(cen))
# median_value = test_average(imglist[2])
# Calculate the ratio of the grayscale values
ratio = C_value / T_value
ratio = (median_value / ((T_value - C_value) * 10))
value = T_value - C_value
if T_value - C_value <= 10:
print("<=10")
ratio = T_value / C_value
if T_value - C_value >= 10:
ratio = 0.9 + 0.009 * value
if T_value - C_value >= 20:
ratio = 0.6 + 0.004 * value
if T_value - C_value >= 30:
ratio = 0.5 + 0.008 * value
if T_value - C_value >= 40:
ratio = 0.4 + 0.002 * value
if T_value - C_value >= 50:
print("<=50")
ratio = 0.3 + 0.003 * value
ratio = T_value - C_value
# print(T_value - C_value)
# if T_value - C_value <= 10:
# ratio = 1.5 + abs(T_value - C_value) / 500
# if T_value - C_value > 10 and T_value - C_value < 20:
# ratio = 1 + (T_value - C_value) / 100
# if T_value - C_value > 15 and T_value - C_value < 20:
# ratio = 0.8 + (T_value - C_value) / 100
# if T_value - C_value > 20 and T_value - C_value < 30:
# ratio = 0.7 + (T_value - C_value) / 50
# if T_value - C_value > 30 and T_value - C_value < 40:
# ratio = 0.6 + (T_value - C_value) / 150
# if T_value - C_value >= 40:
# ratio = 0.1 + (T_value - C_value) / 300
ratio = (T_value - median_value) / (C_value - median_value)
# ratio = (C_value - filtered_value(cen)) / (T_value - filtered_value(cen))
# ratio = (filtered_value(cen) - T_value) / (filtered_value(cen) - C_value)
# print("算法2", calculate_test_value_1(imglist))
# print("算法3", calculate_test_value_2(imglist))
# print("算法4", calculate_test_value_3(imglist))
# print("算法6", round(int(T_value) / int(C_value), 2))
# print("算法7", int(T_value) / int(C_value))
# print("算法9", calculate_test_value_6(imglist))
# print("算法9", calculate_test_value_4(imglist))
# print("算法10", calculate_test_value_7(T_value, C_value, test_average(imglist[2])))
# return round(ratio, 1);
print("result", round(ratio, 2))
return round(ratio, 2), T_value, C_value, median_value;
if __name__ == '__main__':
similarity("3.jpg")
cv2.waitKey(0)
cv2.destroyAllWindows()
1
https://gitee.com/xiaoshengdi/scaning_block.git
git@gitee.com:xiaoshengdi/scaning_block.git
xiaoshengdi
scaning_block
scaning_block
master

搜索帮助