faceListId = "suiqiao78"
1
url_01 = "https://api-hjq.cognitiveservices.azure.com/face/v1.0/facelists/" + faceListId # string 拼接
2
url_02 = "https://api-hjq.cognitiveservices.azure.com/face/v1.0/facelists/%s" %(faceListId)
3
url_03 = "https://api-hjq.cognitiveservices.azure.com/face/v1.0/facelists/{}".format(faceListId)
print(url_01,'\n',url_02,'\n',url_03)
import requests
# 1、create list列表
# faceListId
faceListId = "suiqiao78"
create_facelists_url = "https://api2.cognitiveservices.azure.com/face/v1.0/facelists/{}"
subscription_key = "eaa7f2d3c7e4456fbcc2884e463e77c0" # 输入密钥
assert subscription_key
headers = {
# Request headers
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': subscription_key,
}
data = {
"name": "qqc" ,
"userData": "bbq",
"recognitionModel":"recognition_03",
}
r_create = requests.put(create_facelists_url.format(faceListId),headers=headers,json=data)
r_create
<Response [200]>
r_create.content
b''
# 检查你的facelist的信息
get_facelist_url = "https://api2.cognitiveservices.azure.com/face/v1.0/facelists/{}"
r_get_facelist = requests.get(get_facelist_url.format(faceListId),headers=headers)
r_get_facelist.json()
{'persistedFaces': [],
'faceListId': 'suiqiao78',
'name': 'qqc',
'userData': 'bbq'}
#先加一张脸试试
# 2、Add face
add_face_url ='https://api2.cognitiveservices.azure.com/face/v1.0/facelists/suiqiao78/persistedfaces'
assert subscription_key
headers = {
# Request headers
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': subscription_key,
}
img_url = "http://huangjieqi.gitee.io/picture_storage/EdisonQXF.jpg"
params_add_face={
"faceListId": "suiqiao78",
"userData": "wcq"
}
r_add_face = requests.post(add_face_url,headers=headers,params=params_add_face,json={"url":img_url})
r_add_face.status_code
200
r_add_face.json()
{'persistedFaceId': '9ed25cd7-d97d-404f-ad93-2ce188ac69a8'}
def AddFace(img_url=str,userData=str):
add_face_url ="https://api2.cognitiveservices.azure.com/face/v1.0/facelists/{}/persistedFaces"
assert subscription_key
headers = {
# Request headers
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': subscription_key,
}
img_url = img_url
params_add_face={
"userData":userData
}
r_add_face = requests.post(add_face_url.format(faceListId),headers=headers,params=params_add_face,json={"url":img_url})
return r_add_face.status_code #返回出状态码
AddFace("http://huangjieqi.gitee.io/picture_storage/Autumnhui.jpg","丘天惠")
AddFace("http://huangjieqi.gitee.io/picture_storage/L-Tony-info.jpg","林嘉茵")
AddFace("http://huangjieqi.gitee.io/picture_storage/TLINGP.jpg","汤玲萍")
AddFace("http://huangjieqi.gitee.io/picture_storage/WenYanZeng.jpg","曾雯燕")
AddFace("http://huangjieqi.gitee.io/picture_storage/XIEIC.jpg","谢依希")
AddFace("http://huangjieqi.gitee.io/picture_storage/YuecongYang.png","杨悦聪")
AddFace("http://huangjieqi.gitee.io/picture_storage/Zoezhouyu.jpg","周雨")
AddFace("http://huangjieqi.gitee.io/picture_storage/crayon-heimi.jpg","刘瑜鹏")
AddFace("http://huangjieqi.gitee.io/picture_storage/jiayichen.jpg","陈嘉仪")
AddFace("http://huangjieqi.gitee.io/picture_storage/kg2000.jpg","徐旖芊")
AddFace("http://huangjieqi.gitee.io/picture_storage/liuxinrujiayou.jpg","刘心如")
AddFace("http://huangjieqi.gitee.io/picture_storage/liuyu19.png","刘宇")
AddFace("http://huangjieqi.gitee.io/picture_storage/ltco.jpg","李婷")
AddFace("http://huangjieqi.gitee.io/picture_storage/lucaszy.jpg","黄智毅")
AddFace("http://huangjieqi.gitee.io/picture_storage/pingzi0211.jpg","黄慧文")
AddFace("http://huangjieqi.gitee.io/picture_storage/shmimy-cn.jpg","张铭睿")
AddFace("http://huangjieqi.gitee.io/picture_storage/yichenting.jpg","陈婷")
AddFace("http://huangjieqi.gitee.io/picture_storage/coco022.jpg","洪可凡")
AddFace("http://huangjieqi.gitee.io/picture_storage/lujizhi.png","卢继志")
AddFace("http://huangjieqi.gitee.io/picture_storage/zzlhyy.jpg","张梓乐")
200
face_api_url = 'https://api2.cognitiveservices.azure.com/face/v1.0/detect'
image_url = 'http://huangjieqi.gitee.io/picture_storage/hjq.jpg'
headers = {'Ocp-Apim-Subscription-Key': subscription_key}
params = {
'returnFaceId': 'true',
'returnFaceLandmarks': 'false',
# 选择模型
'recognitionModel':'recognition_03',#此参数需与facelist参数一致
'detectionModel':'detection_01',
# 可选参数,请仔细阅读API文档
'returnFaceAttributes': 'age,gender,headPose,smile,facialHair,glasses,emotion,hair,makeup,occlusion,accessories,blur,exposure,noise',
}
response = requests.post(face_api_url, params=params,headers=headers, json={"url": image_url})
response.json()
[{'faceId': '1500d248-389b-48b8-ab2b-cc003d5fb969',
'faceRectangle': {'top': 264, 'left': 207, 'width': 277, 'height': 277},
'faceAttributes': {'smile': 0.0,
'headPose': {'pitch': 1.7, 'roll': -19.8, 'yaw': -13.9},
'gender': 'male',
'age': 21.0,
'facialHair': {'moustache': 0.1, 'beard': 0.1, 'sideburns': 0.1},
'glasses': 'NoGlasses',
'emotion': {'anger': 0.0,
'contempt': 0.002,
'disgust': 0.0,
'fear': 0.0,
'happiness': 0.0,
'neutral': 0.969,
'sadness': 0.029,
'surprise': 0.0},
'blur': {'blurLevel': 'medium', 'value': 0.44},
'exposure': {'exposureLevel': 'goodExposure', 'value': 0.68},
'noise': {'noiseLevel': 'medium', 'value': 0.39},
'makeup': {'eyeMakeup': False, 'lipMakeup': False},
'accessories': [],
'occlusion': {'foreheadOccluded': False,
'eyeOccluded': False,
'mouthOccluded': False},
'hair': {'bald': 0.18,
'invisible': False,
'hairColor': [{'color': 'brown', 'confidence': 0.97},
{'color': 'black', 'confidence': 0.9},
{'color': 'red', 'confidence': 0.38},
{'color': 'other', 'confidence': 0.23},
{'color': 'blond', 'confidence': 0.15},
{'color': 'gray', 'confidence': 0.09},
{'color': 'white', 'confidence': 0.0}]}}}]
# 4.返回人脸相似置信度
findsimilars_url = "https://api2.cognitiveservices.azure.com/face/v1.0/findsimilars"
# 请求正文 faceID需要先检测一张照片获取
data_findsimilars = {
"faceId":"1500d248-389b-48b8-ab2b-cc003d5fb969", #取上方的faceID
"faceListId": "suiqiao78",
"maxNumOfCandidatesReturned": 10,
"mode": "matchFace" #matchPerson #一种为验证模式,一种为相似值模式
}
r_findsimilars = requests.post(findsimilars_url,headers=headers,json=data_findsimilars)
r_findsimilars.json()
[{'persistedFaceId': 'a8be1065-df0c-4e80-b619-6b97447303c6',
'confidence': 0.29269},
{'persistedFaceId': '3c1b9b90-c9f1-4713-9c4b-c0c623b04b84',
'confidence': 0.20908},
{'persistedFaceId': 'b439cc13-732b-4ce2-b1d0-d5087d5c8e20',
'confidence': 0.17849},
{'persistedFaceId': '9de92160-a6f1-44b1-be4f-fed6da615b64',
'confidence': 0.16209},
{'persistedFaceId': '6d06f21c-9572-45eb-81a2-2245926a6126',
'confidence': 0.15023},
{'persistedFaceId': 'b136b546-0c93-47af-ad72-4bff13634e58',
'confidence': 0.101},
{'persistedFaceId': 'da8fa3ed-a0f2-44ca-8c9a-70e737f679ec',
'confidence': 0.10034},
{'persistedFaceId': '13a2e24a-69c9-44ea-9100-ded401fba7a0',
'confidence': 0.0999},
{'persistedFaceId': '9ed25cd7-d97d-404f-ad93-2ce188ac69a8',
'confidence': 0.09955},
{'persistedFaceId': 'b9cf1d3c-aa01-44ba-bb14-cf3e834b5b2a',
'confidence': 0.09503}]
# 查看列表
get_facelist_url = "https://api2.cognitiveservices.azure.com/face/v1.0/facelists/{}"
r_get_facelist = requests.get(get_facelist_url.format(faceListId),headers=headers)
r_get_facelist.json()
{'persistedFaces': [{'persistedFaceId': '9ed25cd7-d97d-404f-ad93-2ce188ac69a8',
'userData': 'wcq'},
{'persistedFaceId': '5fe0c65f-d94b-4f30-8b67-9d890e7b971f',
'userData': '丘天惠'},
{'persistedFaceId': 'da8fa3ed-a0f2-44ca-8c9a-70e737f679ec',
'userData': '林嘉茵'},
{'persistedFaceId': '2725813b-2166-47ab-9b90-6ab2abf2d1c7',
'userData': '汤玲萍'},
{'persistedFaceId': '12493c41-fec9-4a69-8968-95bed717d903',
'userData': '曾雯燕'},
{'persistedFaceId': '47ed440f-131e-4589-a7fc-a5b499d53e6e',
'userData': '谢依希'},
{'persistedFaceId': 'da20b597-f787-4cec-bbc7-6afda2b69b02',
'userData': '杨悦聪'},
{'persistedFaceId': 'eab9ce87-26b2-4758-aa92-ea27b1c7ee2c',
'userData': '周雨'},
{'persistedFaceId': '2a75e81e-8a10-4a94-bdf8-661c4093f900',
'userData': '刘瑜鹏'},
{'persistedFaceId': '9de92160-a6f1-44b1-be4f-fed6da615b64',
'userData': '陈嘉仪'},
{'persistedFaceId': 'a8be1065-df0c-4e80-b619-6b97447303c6',
'userData': '徐旖芊'},
{'persistedFaceId': '6d06f21c-9572-45eb-81a2-2245926a6126',
'userData': '刘心如'},
{'persistedFaceId': '8e8c08bc-901b-4018-ae99-38b56c35e1ef',
'userData': '刘宇'},
{'persistedFaceId': '3c1b9b90-c9f1-4713-9c4b-c0c623b04b84',
'userData': '李婷'},
{'persistedFaceId': 'b9cf1d3c-aa01-44ba-bb14-cf3e834b5b2a',
'userData': '黄智毅'},
{'persistedFaceId': 'adcc9f48-6619-4ccd-b053-541500f91622',
'userData': '黄慧文'},
{'persistedFaceId': 'b439cc13-732b-4ce2-b1d0-d5087d5c8e20',
'userData': '张铭睿'},
{'persistedFaceId': '011b1a23-1c7e-47df-a4f1-0c7a34ecd651',
'userData': '陈婷'},
{'persistedFaceId': 'b136b546-0c93-47af-ad72-4bff13634e58',
'userData': '洪可凡'},
{'persistedFaceId': 'cf7aa208-c8c0-4af2-be1e-840c0ff448d2',
'userData': '卢继志'},
{'persistedFaceId': '13a2e24a-69c9-44ea-9100-ded401fba7a0',
'userData': '张梓乐'}],
'faceListId': 'suiqiao78',
'name': 'qqc',
'userData': 'bbq'}
# 用Pandas简化数据
import pandas as pd
# 返回facelist数据
adf = pd.json_normalize(r_get_facelist.json()["persistedFaces"])
adf
# 返回相似度数据
bdf = pd.json_normalize(r_findsimilars.json())# 升级pandas才能运行
bdf
pd.merge(adf,bdf,how='inner', on='persistedFaceId').sort_values(by="confidence",ascending = False)
#这里不知道为什么一开始两个表格不能合并,重新运行一次又好了
# Detect face 删除列表内人脸id
faceListId = "suiqiao78"
r_delete_face
# 删除列表内人脸
delete_face_url = "https://api2.cognitiveservices.azure.com/face/v1.0/facelists/{}/persistedfaces/{}"
assert subscription_key
# 获取上面的persistedFaceId
persistedFaceId = r_add_face.json()["persistedFaceId"]
headers = {
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': subscription_key,
}
# 注意requests请求为delete
r_delete_face = requests.delete(delete_face_url.format(faceListId,persistedFaceId),headers=headers)
# 删除人脸列表
delete_facelist_url = "https://api2.cognitiveservices.azure.com/face/v1.0/facelists/{}"
assert subscription_key
headers = {
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': subscription_key,
}
r_delete_facelist = requests.delete(delete_facelist_url.format(faceListId),headers=headers)
r_delete_facelist
<Response [200]>
api_secret = "ZChjCWQ7eLeIsAx7Rz4YjJXr4xWYDSeU"
api_key = 'Vp8rarJ3g1kz8mCan2bTMJeCEOLiTXsF'
# 1.FaceSet Create
import requests,json
display_name = "加油666" #自定义人脸集合的名字
outer_id = "1008" #自定义标识
user_data = "suiqiao" #自定义用户信息
CreateFace_Url = "https://api-cn.faceplusplus.com/facepp/v3/faceset/create" #调用URL
payload = {
# 请求参数
'api_key': api_key,
'api_secret': api_secret,
'display_name':display_name,
'outer_id':outer_id,
'user_data':user_data
}
r = requests.post(CreateFace_Url, params=payload)
r.json()
{'faceset_token': '5eb076ca8c1e7e438101eadc212e9afc',
'time_used': 200,
'face_count': 0,
'face_added': 0,
'request_id': '1603504838,b267b8de-6d67-4d8f-a63b-480583ad6a49',
'outer_id': '1008',
'failure_detail': []}
# 2.FaceSet GetDetail(获取人脸集合信息)
GetDetail_Url = "https://api-cn.faceplusplus.com/facepp/v3/faceset/getdetail"
payload = {
'api_key': api_key,
'api_secret': api_secret,
'outer_id':outer_id,
}
r = requests.post(GetDetail_Url,params=payload)
r.json()
{'faceset_token': '5eb076ca8c1e7e438101eadc212e9afc',
'tags': '',
'time_used': 84,
'user_data': 'suiqiao',
'display_name': '加油666',
'face_tokens': [],
'face_count': 0,
'request_id': '1603504959,4231c6cb-d025-46e3-a5e9-5a0c1782d385',
'outer_id': '1008'}
# 3.FaceSet AddFace(增加人脸信息)
AddFace_url = " https://api-cn.faceplusplus.com/facepp/v3/faceset/addface"
payload = {
'api_key': api_key,
'api_secret': api_secret,
'faceset_token':'5eb076ca8c1e7e438101eadc212e9afc',
'face_tokens':'1aff0090208b37e2f8de321a8e2e96c7', # qianmiande
}
r = requests.post(AddFace_url,params=payload)
r.json()
{'faceset_token': '5eb076ca8c1e7e438101eadc212e9afc',
'time_used': 80,
'face_count': 0,
'face_added': 0,
'request_id': '1603505363,242bcef6-826b-42ee-890b-722595798052',
'outer_id': '1008',
'failure_detail': [{'reason': 'INVALID_FACE_TOKEN',
'face_token': '1aff0090208b37e2f8de321a8e2e96c7'}]}
# 4.FaceSet RemoveFace(移除人脸信息)
RemoveFace_url = " https://api-cn.faceplusplus.com/facepp/v3/faceset/removeface"
payload = {
'api_key': api_key,
'api_secret': api_secret,
'faceset_token':'5eb076ca8c1e7e438101eadc212e9afc',
'face_tokens':'1aff0090208b37e2f8de321a8e2e96c7',
}
r = requests.post(RemoveFace_url,params=payload)
r.json()
{'faceset_token': '5eb076ca8c1e7e438101eadc212e9afc',
'face_removed': 0,
'time_used': 118,
'face_count': 0,
'request_id': '1603505499,54d769c3-2eca-4a67-b853-c2ab9a091d67',
'outer_id': '1008',
'failure_detail': [{'reason': 'FACE_NOT_IN_FACESET',
'face_token': '1aff0090208b37e2f8de321a8e2e96c7'}]}
Update_url = "https://api-cn.faceplusplus.com/facepp/v3/faceset/update"
payload = {
'api_key': api_key,
'api_secret': api_secret,
'faceset_token':'5eb076ca8c1e7e438101eadc212e9afc',
'user_data':"Test.",
}
r = requests.post(Update_url,params=payload)
r.json()
{'faceset_token': '5eb076ca8c1e7e438101eadc212e9afc',
'request_id': '1603506236,536b66ba-951b-4a13-8ef1-a7a9849664ae',
'time_used': 57,
'outer_id': '1008'}
# 6.Compare Face(对比人脸相似度)
yyqx = "https://i.loli.net/2020/10/24/mq6CidMSutr4Plx.jpg"
yyqx2 = "https://i.loli.net/2020/10/24/M4Y9qz6KVQ2XRyi.jpg"
xiaozhai = "https://i.loli.net/2020/10/24/HWAMEkcYur5NwyU.jpg"
Compare_url = "https://api-cn.faceplusplus.com/facepp/v3/compare"
payload ={
'api_key': api_key,
'api_secret': api_secret,
'image_url1':liudehua01,
'image_url2':wangzulan
}
r = requests.post(Compare_url,params=payload)
r.json()
{'faces1': [{'face_rectangle': {'width': 824,
'top': 871,
'left': 1114,
'height': 824},
'face_token': '1b04411a188d4ff54c25632bdac2377e'}],
'faces2': [{'face_rectangle': {'width': 86,
'top': 91,
'left': 65,
'height': 86},
'face_token': '0583824b709e63484d4f1814d0d21200'}],
'time_used': 1820,
'thresholds': {'1e-3': 62.327, '1e-5': 73.975, '1e-4': 69.101},
'confidence': 26.085,
'image_id2': 'g6kg8zfyOouG6ftP+GvEfg==',
'image_id1': 'KIOXEC2V/MyL4zuopAcNig==',
'request_id': '1603506567,fff41d5b-93f4-412c-a22d-bb19eb1c05cf'}
Secret_key = 'DH3uIYICbg34zpPXdnNsTHGnoAgKVhjr'
API_key = 'BRogF97Qkfqtzuh5Q8Ekhiq4'
import requests
import base64
import matplotlib.pyplot as plt # plt 用于显示图片
import matplotlib.image as mpimg # mpimg 用于读取图片
#获取access_token
def gettoken():
host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=BRogF97Qkfqtzuh5Q8Ekhiq4&client_secret=DH3uIYICbg34zpPXdnNsTHGnoAgKVhjr'
response = requests.get(host)
if response:
return response.json()['access_token']
def getscore(url):
f=open(url,'rb')#二进制读写,转换base64
base64_data = base64.b64encode(f.read())
request_url = "https://aip.baidubce.com/rest/2.0/face/v3/detect"
# 参数
params = {
"image":base64_data,
"image_type":"BASE64",
"face_field":"beauty"
}
#请求
request_url = request_url + "?access_token=" + gettoken()
headers = {'content-type': 'application/json'}
response = requests.post(request_url, data=params, headers=headers)
if response:
print ("经过百度智能云的AI大数据评分,您的颜值分数为:",response.json()['result']['face_list'][0]['beauty'])
lena = mpimg.imread(url)
plt.imshow(lena)
plt.axis('off')
plt.show()
getscore('C:/Users/ycqq/Desktop/翟.jpg')
import requests
# client_id 为官网获取的API Key, client_secret 为官网获取的Secret Key
host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id={}&client_secret={}'
client_id = "tGtBNMzw74rxzFS3kqzStlkm"
client_secret = "V1cHTUxTeKs8cntxObggHGoEQPh6n80v"
response = requests.get(host.format(client_id, client_secret))
if response:
print(response.json())
{'refresh_token': '25.119b23100eca9380c8e2140144971a07.315360000.1918882605.282335-22868460', 'expires_in': 2592000, 'session_key': '9mzdCPL0Tn/8/Su8WCUgwCNF9L6gNIWPgg65o62M30GzwA1XUlFNsjoxlGZ8r5wLFQa2mAc8ONFIXVTcLvcSPZArCmIhxA==', 'access_token': '24.36b9630d03ab86ee08e690693f2998bd.2592000.1606114605.282335-22868460', 'scope': 'public brain_all_scope vis-faceverify_faceverify_h5-face-liveness vis-faceverify_FACE_V3 vis-faceverify_idl_face_merge vis-faceverify_FACE_EFFECT vis-faceverify_face_feature_sdk wise_adapt lebo_resource_base lightservice_public hetu_basic lightcms_map_poi kaidian_kaidian ApsMisTest_Test权限 vis-classify_flower lpq_开放 cop_helloScope ApsMis_fangdi_permission smartapp_snsapi_base smartapp_mapp_dev_manage iop_autocar oauth_tp_app smartapp_smart_game_openapi oauth_sessionkey smartapp_swanid_verify smartapp_opensource_openapi smartapp_opensource_recapi fake_face_detect_开放Scope vis-ocr_虚拟人物助理 idl-video_虚拟人物助理 smartapp_component', 'session_secret': '612035b1bcf0173ac19307f81510239e'}
request_url = "https://aip.baidubce.com/rest/2.0/face/v3/detect"
params = "{\"image\":\"https://ss2.bdstatic.com/70cFvnSh_Q1YnxGkpoWK1HF6hhy/it/u=3430692674,459091344&fm=26&gp=0.jpg\",\"image_type\":\"URL\",\"face_field\":\"faceshape,facetype\"}"
# image_type 图片类型:BASE64;URL;FACE_TOKEN。
access_token = '25.119b23100eca9380c8e2140144971a07.315360000.1918882605.282335-22868460' # 这里不太会
request_url = request_url + "?access_token=" + access_token
headers = {'content-type': 'application/json'}
response = requests.post(request_url, data=params, headers=headers)
response.json()
##分析远程图像
import requests
# If you are using a Jupyter notebook, uncomment the following line.
# %matplotlib inline
import matplotlib.pyplot as plt
import json
from PIL import Image
from io import BytesIO
# Add your Computer Vision subscription key and endpoint to your environment variables.
# if 'COMPUTER_VISION_SUBSCRIPTION_KEY' in os.environ:
# subscription_key = os.environ['COMPUTER_VISION_SUBSCRIPTION_KEY']
# else:
# print("\nSet the COMPUTER_VISION_SUBSCRIPTION_KEY environment variable.\n**Restart your shell or IDE for changes to take effect.**")
# sys.exit()
endpoint = "https://gxl.cognitiveservices.azure.com/"
# if 'COMPUTER_VISION_ENDPOINT' in os.environ:
# endpoint = os.environ['COMPUTER_VISION_ENDPOINT']
subscription_key = "754e142aa949439aa27e58b26ae6534c"
# base url
analyze_url = endpoint+ "vision/v2.1/analyze"
# Set image_url to the URL of an image that you want to analyze.
image_url = "https://i.loli.net/2020/10/24/DlRLJIBWowp32cU.jpg"
headers = {'Ocp-Apim-Subscription-Key': subscription_key}
# 参数
params = {'visualFeatures': 'Categories,Description,Color'}
# 请求主体body
data = {'url': image_url}
response = requests.post(analyze_url, headers=headers,
params=params, json=data)
response.raise_for_status()
# The 'analysis' object contains various fields that describe the image. The most
# relevant caption for the image is obtained from the 'description' property.
analysis = response.json()
print(json.dumps(response.json()))
image_caption = analysis["description"]["captions"][0]["text"].capitalize()
# Display the image and overlay it with the caption.
image = Image.open(BytesIO(requests.get(image_url).content))
plt.imshow(image)
plt.axis("off")
_ = plt.title(image_caption, size="x-large", y=-0.1)
plt.show()
response.json()
{'categories': [{'name': 'others_', 'score': 0.00390625},
{'name': 'people_', 'score': 0.72265625, 'detail': {'celebrities': []}}],
'color': {'dominantColorForeground': 'White',
'dominantColorBackground': 'White',
'dominantColors': [],
'accentColor': '285EA3',
'isBwImg': False,
'isBWImg': False},
'description': {'tags': ['person',
'man',
'standing',
'holding',
'suit',
'wearing',
'woman',
'jacket',
'dressed',
'posing',
'carrying',
'young',
'walking',
'smiling',
'boy',
'street',
'blue',
'people',
'shirt',
'phone',
'room'],
'captions': [{'text': 'a person wearing a suit and tie',
'confidence': 0.9723753037851468}]},
'requestId': '1b4382ef-fb11-449f-91c0-0bc8400efe8c',
'metadata': {'height': 700, 'width': 700, 'format': 'Jpeg'}}
##分析本地图片
import os
import sys
import requests
# If you are using a Jupyter notebook, uncomment the following line.
# %matplotlib inline
import matplotlib.pyplot as plt
from PIL import Image
from io import BytesIO
# Add your Computer Vision subscription key and endpoint to your environment variables.
# if 'COMPUTER_VISION_SUBSCRIPTION_KEY' in os.environ:
# subscription_key = os.environ['COMPUTER_VISION_SUBSCRIPTION_KEY']
# else:
# print("\nSet the COMPUTER_VISION_SUBSCRIPTION_KEY environment variable.\n**Restart your shell or IDE for changes to take effect.**")
# sys.exit()
# if 'COMPUTER_VISION_ENDPOINT' in os.environ:
# endpoint = os.environ['COMPUTER_VISION_ENDPOINT']
# analyze_url = endpoint + "vision/v2.1/analyze"
# Set image_path to the local path of an image that you want to analyze.
image_path = "C:/Users/ycqq/Desktop/一.jpg"
analyze_url = "https://gxl.cognitiveservices.azure.com/vision/v2.1/analyze"
# Read the image into a byte array
image_data = open(image_path,"rb").read()
headers = {'Ocp-Apim-Subscription-Key': "754e142aa949439aa27e58b26ae6534c",
'Content-Type': 'application/octet-stream'}
params = {'visualFeatures': 'Categories,Description,Color'}
response = requests.post(
analyze_url, headers=headers, params=params, data=image_data)
response.raise_for_status()
# The 'analysis' object contains various fields that describe the image. The most
# relevant caption for the image is obtained from the 'description' property.
analysis = response.json()
print(analysis)
image_caption = analysis["description"]["captions"][0]["text"].capitalize()
# Display the image and overlay it with the caption.
image = Image.open(BytesIO(image_data))
plt.imshow(image)
plt.axis("off")
_ = plt.title(image_caption, size="x-large", y=-0.1)
##生成缩略图
import os
import sys
import requests
# If you are using a Jupyter notebook, uncomment the following line.
# %matplotlib inline
import matplotlib.pyplot as plt
from PIL import Image
from io import BytesIO
# Add your Computer Vision subscription key and endpoint to your environment variables.
# if 'COMPUTER_VISION_SUBSCRIPTION_KEY' in os.environ:
# subscription_key = os.environ['COMPUTER_VISION_SUBSCRIPTION_KEY']
# else:
# print("\nSet the COMPUTER_VISION_SUBSCRIPTION_KEY environment variable.\n**Restart your shell or IDE for changes to take effect.**")
# sys.exit()
# if 'COMPUTER_VISION_ENDPOINT' in os.environ:
# endpoint = os.environ['COMPUTER_VISION_ENDPOINT']
thumbnail_url = "https://gxl.cognitiveservices.azure.com/" + "vision/v2.1/generateThumbnail"
# Set image_url to the URL of an image that you want to analyze.
image_url = "https://i.loli.net/2020/10/24/xpe5E4JWiX279qV.jpg "
headers = {'Ocp-Apim-Subscription-Key': "754e142aa949439aa27e58b26ae6534c"}
params = {'width': '100', 'height': '100', 'smartCropping': 'true'}
data = {'url': image_url}
response = requests.post(thumbnail_url, headers=headers,
params=params, json=data)
response.raise_for_status()
thumbnail = Image.open(BytesIO(response.content))
# Display the thumbnail.
plt.imshow(thumbnail)
plt.axis("off")
# Verify the thumbnail size.
print("Thumbnail is {0}-by-{1}".format(*thumbnail.size))
##效果图
##原图
#提取印刷体文本和手写文本
import json
import os
import sys
import requests
import time
%matplotlib inline
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from PIL import Image
from io import BytesIO
text_recognition_url = endpoint + "/vision/v3.0/read/analyze"
# Set image_url to the URL of an image that you want to recognize.
image_url = "https://i.loli.net/2020/10/24/5sHzZ7jaXP6lMc1.jpg"
headers = {'Ocp-Apim-Subscription-Key': "754e142aa949439aa27e58b26ae6534c"}
data = {'url': image_url}
response = requests.post(text_recognition_url, headers=headers, json=data)
response.raise_for_status()
# Extracting text requires two API calls: One call to submit the
# image for processing, the other to retrieve the text found in the image.
# Holds the URI used to retrieve the recognized text.
operation_url = response.headers["Operation-Location"]
# The recognized text isn't immediately available, so poll to wait for completion.
analysis = {}
poll = True
while (poll):
response_final = requests.get(
response.headers["Operation-Location"], headers=headers)
analysis = response_final.json()
print(json.dumps(analysis, indent=4))
time.sleep(1)
if ("analyzeResult" in analysis):
poll = False
if ("status" in analysis and analysis['status'] == 'failed'):
poll = False
polygons = []
if ("analyzeResult" in analysis):
# Extract the recognized text, with bounding boxes.
polygons = [(line["boundingBox"], line["text"])
for line in analysis["analyzeResult"]["readResults"][0]["lines"]]
# Display the image and overlay it with the extracted text.
image = Image.open(BytesIO(requests.get(image_url).content))
ax = plt.imshow(image)
for polygon in polygons:
vertices = [(polygon[0][i], polygon[0][i+1])
for i in range(0, len(polygon[0]), 2)]
text = polygon[1]
patch = Polygon(vertices, closed=True, fill=False, linewidth=2, color='y')
ax.axes.add_patch(patch)
plt.text(vertices[0][0], vertices[0][1], text, fontsize=20, va="top")
plt.show()
{
"status": "succeeded",
"createdDateTime": "2020-10-24T07:30:48Z",
"lastUpdatedDateTime": "2020-10-24T07:30:49Z",
"analyzeResult": {
"version": "3.0.0",
"readResults": [
{
"page": 1,
"angle": -0.2274,
"width": 690,
"height": 1227,
"unit": "pixel",
"lines": [
{
"boundingBox": [
188,
509,
364,
509,
364,
539,
188,
537
],
"text": "on my way",
"words": [
{
"boundingBox": [
193,
510,
242,
510,
242,
537,
194,
536
],
"text": "on",
"confidence": 0.986
},
{
"boundingBox": [
255,
509,
298,
509,
298,
538,
255,
537
],
"text": "my",
"confidence": 0.986
},
{
"boundingBox": [
312,
510,
364,
510,
362,
540,
311,
538
],
"text": "way",
"confidence": 0.986
}
]
},
{
"boundingBox": [
189,
541,
315,
541,
315,
571,
189,
572
],
"text": "to hell :)",
"words": [
{
"boundingBox": [
189,
542,
222,
542,
223,
572,
190,
572
],
"text": "to",
"confidence": 0.988
},
{
"boundingBox": [
228,
542,
289,
541,
288,
572,
228,
572
],
"text": "hell",
"confidence": 0.983
},
{
"boundingBox": [
294,
541,
316,
542,
315,
572,
294,
572
],
"text": ":)",
"confidence": 0.636
}
]
},
{
"boundingBox": [
193,
608,
308,
607,
309,
639,
193,
640
],
"text": "school'",
"words": [
{
"boundingBox": [
193,
608,
309,
608,
309,
640,
194,
641
],
"text": "school'",
"confidence": 0.802
}
]
}
]
}
]
}
}
#学习人脸识别和计算机视觉心得体会
1.多跟同学交流,向大佬求助,不要把自己困在一个坑太久,如果找了很久都找不到错误所在,尝试问问他人有没有同样的bug,或者请他们帮忙看,有的时候别人一眼看出来的东西你自己就是看不出来。
2.要花时间去读,去练,去尝试,去犯错,多错几遍你就会错出经验来。不要一开始觉得很难就不去看,这样永远学不会。
3.其实看到最后运行结果200或者功能实现还是蛮开心的,尝试在这其中找到乐趣和成就感,你才会有动力坚持下去,