# API-作业
**Repository Path**: fanpeikun/api-job
## Basic Information
- **Project Name**: API-作业
- **Description**: No description available
- **Primary Language**: Unknown
- **License**: Not specified
- **Default Branch**: master
- **Homepage**: None
- **GVP Project**: No
## Statistics
- **Stars**: 0
- **Forks**: 0
- **Created**: 2020-10-24
- **Last Updated**: 2020-12-19
## Categories & Tags
**Categories**: Uncategorized
**Tags**: None
## README
# API 作业-1
* [一、人脸识别](#face)
* [二、计算机视觉](#vision)
* [三、学习心得](#learn)
---
一、人脸识别
### Azure
参考:[Azure 人脸识别文档](https://docs.microsoft.com/zh-cn/rest/api/cognitiveservices/face/facelist/create)
- Face Detect
```python
-# 导入需要的模块
import requests
import json
# 输入密钥
subscription_key = "64a33a0983774e318e3fb211c70a94a7"
assert subscription_key
# 目标URL
face_api_url = 'https://fpkapi.cognitiveservices.azure.com//face/v1.0/detect'
# 人脸相片地址
image_url = 'https://gitee.com/fanpeikun/my-ku/raw/master/f692a03cb7fbc3545eac264b18fc2367.jpeg'
# 请求头
headers = {'Ocp-Apim-Subscription-Key': subscription_key}
# 请求参数parameters
params = {
'returnFaceId': 'true',
'returnFaceLandmarks': 'false',
# 可选参数,请仔细阅读API文档
'returnFaceAttributes': 'age,gender,headPose,smile,facialHair,glasses,emotion,hair,makeup,occlusion,accessories,blur,exposure,noise',
}
response = requests.post(face_api_url, params=params,
headers=headers, json={"url": image_url})
# json.dumps 将json--->bytes
response
```
```python
# json转义
results = response.json()
results
```
```
[{'faceId': 'b1327850-c76e-4c59-8c30-b957269c0534',
'faceRectangle': {'top': 306, 'left': 1043, 'width': 492, 'height': 492},
'faceAttributes': {'smile': 0.0,
'headPose': {'pitch': -7.5, 'roll': 0.3, 'yaw': -3.2},
'gender': 'female',
'age': 22.0,
'facialHair': {'moustache': 0.0, 'beard': 0.0, 'sideburns': 0.0},
'glasses': 'NoGlasses',
'emotion': {'anger': 0.0,
'contempt': 0.0,
'disgust': 0.0,
'fear': 0.0,
'happiness': 0.0,
'neutral': 0.932,
'sadness': 0.068,
'surprise': 0.0},
'blur': {'blurLevel': 'medium', 'value': 0.48},
'exposure': {'exposureLevel': 'overExposure', 'value': 0.79},
'noise': {'noiseLevel': 'low', 'value': 0.0},
'makeup': {'eyeMakeup': True, 'lipMakeup': True},
'accessories': [],
'occlusion': {'foreheadOccluded': False,
'eyeOccluded': False,
'mouthOccluded': False},
'hair': {'bald': 0.04,
'invisible': False,
'hairColor': [{'color': 'blond', 'confidence': 1.0},
{'color': 'gray', 'confidence': 0.87},
{'color': 'other', 'confidence': 0.56},
{'color': 'red', 'confidence': 0.2},
{'color': 'brown', 'confidence': 0.07},
{'color': 'black', 'confidence': 0.02},
{'color': 'white', 'confidence': 0.0}]}}}]
```
```
# 用Pandas简化数据
import pandas as pd
df = pd.json_normalize(results)
df
```
|faceId|faceRectangle.top|faceRectangle.left|faceRectangle.width|faceRectangle.height|faceAttributes.smile|faceAttributes.headPose.pitch|faceAttributes.headPose.roll|faceAttributes.headPose.yaw|faceAttributes.gender|...|faceAttributes.noise.value|faceAttributes.makeup.eyeMakeup|faceAttributes.makeup.lipMakeup|faceAttributes.accessories|faceAttributes.occlusion.foreheadOccluded|faceAttributes.occlusion.eyeOccluded|faceAttributes.occlusion.mouthOccluded|faceAttributes.hair.bald|faceAttributes.hair.invisible|faceAttributes.hair.hairColor|
|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|:--:|
|b1327850-c76e-4c59-8c30-b957269c0534|306|1043|492|492|0.0|-7.5|0.3|-3.2|female|...|0.0|Ture|Ture|[]|False|False|False|0.04|False|[{'color': 'blond', 'confidence': 1.0}, {'colo...|
- FaceList & Find Similar
```python
import requests
# 1.创建人脸列表
faceListId = "list_22" # 设置人脸列表ID
create_facelists_url ="https://fpkapi.cognitiveservices.azure.com/face/v1.0/facelists/{}" # URL(见API文档)
subscription_key ="64a33a0983774e318e3fb211c70a94a7" # 密钥
# 请求头
headers = {
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': subscription_key,
}
# 相关信息及识别模型的选用
data = {
"name": "sample_list",
"userData": "相册",
"recognitionModel": "recognition_03"
}
# 发送请求
r_create = requests.put(create_facelists_url.format(faceListId), headers=headers, json=data)
r_create.content
```
b''
```python
# 2.添加人脸
add_face_url ="https://fpkapi.cognitiveservices.azure.com/face/v1.0/facelists/{}/persistedfaces"
assert subscription_key
headers = {
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': subscription_key,
}
# 人脸相片地址
img_url ="https://gitee.com/fanpeikun/my-ku/raw/master/41c37c09abd946e4da3abaf6f7f1234f.jpg"
# 人脸数据
params_add_face={
"userData":"flower jisoo"
}
# 发送请求
r_add_face = requests.post(add_face_url.format(faceListId),headers=headers,params=params_add_face,json={"url":img_url})
r_add_face.status_code
```
200
```python
r_add_face.json() # 返回persistedFaceId
```
{'persistedFaceId': '9f848c50-e613-498f-b3ba-778b4f4cf73e'}
```python
# 封装成函数方便添加图片
def AddFace(img_url=str,userData=str):
add_face_url ="https://fpkapi.cognitiveservices.azure.com/face/v1.0/facelists/{}/persistedFaces"
assert subscription_key
headers = {
# Request headers
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': subscription_key,
}
img_url = img_url
params_add_face={
"userData":userData
}
r_add_face = requests.post(add_face_url.format(faceListId),headers=headers,params=params_add_face,json={"url":img_url})
return r_add_face.status_code #返回出状态码
```
```
AddFace("https://gitee.com/fanpeikun/my-ku/raw/master/f692a03cb7fbc3545eac264b18fc2367.jpeg","Billie")
AddFace("https://gitee.com/fanpeikun/my-ku/raw/master/20191214152337.jpg","jisoo")
```
200
```python
# 3.检测人脸
face_api_url = 'https://fpkapi.cognitiveservices.azure.com/face/v1.0/detect'
image_url = 'https://gitee.com/fanpeikun/my-ku/raw/master/41c37c09abd946e4da3abaf6f7f1234f.jpg'
headers = {'Ocp-Apim-Subscription-Key': subscription_key}
# 请求参数
params = {
'returnFaceId': 'true',
'returnFaceLandmarks': 'false',
# 选择模型
'recognitionModel':'recognition_03',#此参数需与facelist参数一致
'detectionModel':'detection_01',
# 可选参数,请仔细阅读API文档
'returnFaceAttributes': 'age,gender,headPose,smile,facialHair,glasses,emotion,hair,makeup,occlusion,accessories,blur,exposure,noise',
}
response = requests.post(face_api_url, params=params,headers=headers, json={"url": image_url})
response.json()
```
[{'faceId': 'c251b377-b2d3-485a-9e71-cebea1586dce',
'faceRectangle': {'top': 207, 'left': 599, 'width': 311, 'height': 311},
'faceAttributes': {'smile': 0.01,
'headPose': {'pitch': 8.8, 'roll': 16.9, 'yaw': -18.8},
'gender': 'female',
'age': 22.0,
'facialHair': {'moustache': 0.0, 'beard': 0.0, 'sideburns': 0.0},
'glasses': 'NoGlasses',
'emotion': {'anger': 0.0,
'contempt': 0.0,
'disgust': 0.0,
'fear': 0.0,
'happiness': 0.01,
'neutral': 0.988,
'sadness': 0.001,
'surprise': 0.0},
'blur': {'blurLevel': 'low', 'value': 0.07},
'exposure': {'exposureLevel': 'goodExposure', 'value': 0.66},
'noise': {'noiseLevel': 'medium', 'value': 0.41},
'makeup': {'eyeMakeup': True, 'lipMakeup': True},
'accessories': [{'type': 'headwear', 'confidence': 1.0}],
'occlusion': {'foreheadOccluded': True,
'eyeOccluded': False,
'mouthOccluded': False},
'hair': {'bald': 0.0, 'invisible': True, 'hairColor': []}}}]
```python
# 4.返回人脸相似置信度
findsimilars_url = "https://fpkapi.cognitiveservices.azure.com/face/v1.0/findsimilars"
# 请求正文 faceID需要先检测一张照片获取
data_findsimilars = {
"faceId":"c251b377-b2d3-485a-9e71-cebea1586dce", #取上方的faceID
"faceListId": "list_22",
"maxNumOfCandidatesReturned": 10,
"mode": "matchFace" #matchPerson #一种为验证模式,一种为相似值模式
}
r_findsimilars = requests.post(findsimilars_url,headers=headers,json=data_findsimilars)
r_findsimilars.json()
```
[{'persistedFaceId': '9f848c50-e613-498f-b3ba-778b4f4cf73e',
'confidence': 1.0},
{'persistedFaceId': '20438fb7-11f0-4f0f-a66c-e96ffb8ed1f5',
'confidence': 0.14733},
{'persistedFaceId': '2c210b2d-c72e-4690-b321-66d0218d2f53',
'confidence': 0.09571}]
```python
# 查看列表
get_facelist_url = "https://fpkapi.cognitiveservices.azure.com/face/v1.0/facelists/{}"
r_get_facelist = requests.get(get_facelist_url.format(faceListId),headers=headers)
r_get_facelist.json()
```
{'persistedFaces': [{'persistedFaceId': '9f848c50-e613-498f-b3ba-778b4f4cf73e',
'userData': 'flower jisoo'},
{'persistedFaceId': '2c210b2d-c72e-4690-b321-66d0218d2f53',
'userData': 'Billie'},
{'persistedFaceId': '20438fb7-11f0-4f0f-a66c-e96ffb8ed1f5',
'userData': 'jisoo'}],
'faceListId': 'list_22',
'name': 'sample_list',
'userData': '相册'}
```python
# 用Pandas简化数据
import pandas as pd
# 返回facelist数据
adf = pd.json_normalize(r_get_facelist.json()["persistedFaces"])
adf
```
||persistedFaceId|userData|
|:--:|:--:|:--:|
|0|9f848c50-e613-498f-b3ba-778b4f4cf73e|flower jisoo|
|1|2c210b2d-c72e-4690-b321-66d0218d2f53|Billie|
|2|20438fb7-11f0-4f0f-a66c-e96ffb8ed1f5|jisoo|
```
# 返回相似度数据
bdf = pd.json_normalize(r_findsimilars.json())# 升级pandas才能运行
bdf
```
||persistedFaceId|confidence|
|:--:|:--:|:--:|
|0|9f848c50-e613-498f-b3ba-778b4f4cf73e|1.00000|
|1|20438fb7-11f0-4f0f-a66c-e96ffb8ed1f5|0.14733|
|2|2c210b2d-c72e-4690-b321-66d0218d2f53|0.09571|
```
#合并
pd.merge(adf, bdf,how='inner', on='persistedFaceId').sort_values(by="confidence",ascending = False)
```
||persistedFaceId|userData|confidence|
|:--:|:--:|:--:|:--:|
|0|9f848c50-e613-498f-b3ba-778b4f4cf73e|flower jisoo|1.00000|
|1|20438fb7-11f0-4f0f-a66c-e96ffb8ed1f5|jisoo|0.14733|
|2|2c210b2d-c72e-4690-b321-66d0218d2f53|Billie|0.09571|
```python
# 5.删除人脸/人脸列表
faceListId = "list_22" # 需要删除的人脸列表
# 删除列表内人脸
delete_face_url = "https://fpkapi.cognitiveservices.azure.com/face/v1.0/facelists/{}/persistedfaces/{}"
assert subscription_key
# 获取上面的persistedFaceId
persistedFaceId = r_add_face.json()["persistedFaceId"]
headers = {
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': subscription_key,
}
# 注意requests请求为delete
r_delete_face = requests.delete(delete_face_url.format(faceListId,persistedFaceId),headers=headers)
```
```python
# 删除人脸列表
delete_facelist_url = "https://fpkapi.cognitiveservices.azure.com/face/v1.0/facelists/{}"
assert subscription_key
headers = {
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': subscription_key,
}
r_delete_facelist = requests.delete(delete_facelist_url.format(faceListId),headers=headers)
```
```
r_delete_face
```
```
r_delete_facelist
```
### Face++
参考:[Face++ 人脸识别文档](https://console.faceplusplus.com.cn/documents/4888391)
- Face Detect
```python
# 1、先导入为们需要的模块
import requests
api_secret = "TZIdg08jH1DAePKkATDb3F3lF-5mtHYU"
# 2、输入我们API_Key
api_key = 'Gj0sRv0NDYzmRSWRoJWlE24t_3TTp8-z' # Replace with a valid Subscription Key here.
# 3、目标url
# 这里也可以使用本地图片 例如:filepath ="image/tupian.jpg"
BASE_URL = 'https://api-cn.faceplusplus.com/facepp/v3/detect'
img_url = 'https://gitee.com/fanpeikun/my-ku/raw/master/f692a03cb7fbc3545eac264b18fc2367.jpeg'
# 4、沿用API文档的示范代码,准备我们的headers和图片(数据)
headers = {
'Content-Type': 'application/json',
}
# 5、准备symbol ? 后面的数据
payload = {
"image_url":img_url,
'api_key': api_key,
'api_secret': api_secret,
'return_attributes':'gender,age,smiling,emotion',
}
```
```
# 6、requests发送我们请求
r = requests.post(BASE_URL, params=payload, headers=headers)
```
```r.status_code
```
200
```
r.content
```
b'{"request_id":"1603513040,ea9484d1-0787-4b8a-a4bf-bb8d1fb337a6","time_used":832,"faces":[{"face_token":"29bfc50df9eab4f8267bab61dac2f67a","face_rectangle":{"top":367,"left":1072,"width":446,"height":446},"attributes":{"gender":{"value":"Female"},"age":{"value":22},"smile":{"value":0.003,"threshold":50.000},"emotion":{"anger":0.078,"disgust":0.030,"fear":0.216,"happiness":0.112,"neutral":94.108,"sadness":0.192,"surprise":5.264}}}],"image_id":"PcUHL7MmvExD1Vtt7YM5MQ==","face_num":1}\n'
```
# requests 巧妙的方法 r = response
results = r.json() #
results
```
{'request_id': '1603513040,ea9484d1-0787-4b8a-a4bf-bb8d1fb337a6',
'time_used': 832,
'faces': [{'face_token': '29bfc50df9eab4f8267bab61dac2f67a',
'face_rectangle': {'top': 367, 'left': 1072, 'width': 446, 'height': 446},
'attributes': {'gender': {'value': 'Female'},
'age': {'value': 22},
'smile': {'value': 0.003, 'threshold': 50.0},
'emotion': {'anger': 0.078,
'disgust': 0.03,
'fear': 0.216,
'happiness': 0.112,
'neutral': 94.108,
'sadness': 0.192,
'surprise': 5.264}}}],
'image_id': 'PcUHL7MmvExD1Vtt7YM5MQ==',
'face_num': 1}
- FaceSet & Compare Face
```python
api_secret ="TZIdg08jH1DAePKkATDb3F3lF-5mtHYU"
api_key ="Gj0sRv0NDYzmRSWRoJWlE24t_3TTp8-z" # Replace with a valid Subscription Key here.
```
```
# 1. FaceSet Create
import requests,json
display_name = "fpk人脸集合"
outer_id = "00001"
user_data = "star"
CreateFace_Url = "https://api-cn.faceplusplus.com/facepp/v3/faceset/create"
payload = {
'api_key': api_key,
'api_secret': api_secret,
'display_name':display_name,
'outer_id':outer_id,
'user_data':user_data
}
```
```
r = requests.post(CreateFace_Url, params=payload)
```
```
r.json()
```
{'time_used': 90,
'error_message': 'FACESET_EXIST',
'request_id': '1603517793,84229343-ffd7-49a0-bb1d-b4431ca2bce6'}
```
GetDetail_Url = "https://api-cn.faceplusplus.com/facepp/v3/faceset/getdetail"
payload = {
'api_key': api_key,
'api_secret': api_secret,
'outer_id':outer_id,
}
```
```
r = requests.post(GetDetail_Url,params=payload)
```
```
r.json()
```
{'faceset_token': 'e76b57a9d37005c99f51c737454f87ab',
'tags': '',
'time_used': 99,
'user_data': 'star',
'display_name': 'fpk人脸集合',
'face_tokens': [],
'face_count': 0,
'request_id': '1603517795,43dbb13d-8a52-4a66-b04f-d80e80bed5eb',
'outer_id': '00001'}
```
AddFace_url = " https://api-cn.faceplusplus.com/facepp/v3/faceset/addface"
payload = {
'api_key': api_key,
'api_secret': api_secret,
'faceset_token':'e76b57a9d37005c99f51c737454f87ab',
'face_tokens':'b0407b9e803ebd39d511cd7956fd5bf5',
}
```
```
r = requests.post(AddFace_url,params=payload)
```
```
r.json()
```
{'faceset_token': 'e76b57a9d37005c99f51c737454f87ab',
'time_used': 92,
'face_count': 0,
'face_added': 0,
'request_id': '1603517801,cb26f625-1b2a-459f-a695-f130656404a5',
'outer_id': '00001',
'failure_detail': [{'reason': 'INVALID_FACE_TOKEN',
'face_token': 'b0407b9e803ebd39d511cd7956fd5bf5'}]}
```
RemoveFace_url = " https://api-cn.faceplusplus.com/facepp/v3/faceset/removeface"
payload = {
'api_key': api_key,
'api_secret': api_secret,
'faceset_token':'e76b57a9d37005c99f51c737454f87ab',
'face_tokens':'b0407b9e803ebd39d511cd7956fd5bf5',
}
```
```
r = requests.post(RemoveFace_url,params=payload)
```
```
r.json()
```
{'faceset_token': 'e76b57a9d37005c99f51c737454f87ab',
'face_removed': 0,
'time_used': 119,
'face_count': 0,
'request_id': '1603517802,63d37f43-fd4b-4fbd-95b5-90388e40a2aa',
'outer_id': '00001',
'failure_detail': [{'reason': 'FACE_NOT_IN_FACESET',
'face_token': 'b0407b9e803ebd39d511cd7956fd5bf5'}]}
```
Update_url = "https://api-cn.faceplusplus.com/facepp/v3/faceset/update"
payload = {
'api_key': api_key,
'api_secret': api_secret,
'faceset_token':'e76b57a9d37005c99f51c737454f87ab',
'user_data':"star",
}
```
```
r = requests.post(Update_url,params=payload)
```
```
r.json()
```
{'faceset_token': 'e76b57a9d37005c99f51c737454f87ab',
'request_id': '1603517803,0316d92c-0862-4c3f-8d72-d78396b5cdc2',
'time_used': 87,
'outer_id': '00001'}
```
yihao = "https://gitee.com/fanpeikun/my-ku/raw/master/41c37c09abd946e4da3abaf6f7f1234f.jpg"
erhao = "https://gitee.com/fanpeikun/my-ku/raw/master/20191214152337.jpg"
sanhao = "https://gitee.com/fanpeikun/my-ku/raw/master/20201024132926.jpg"
```
```
Compare_url = "https://api-cn.faceplusplus.com/facepp/v3/compare"
payload ={
'api_key': api_key,
'api_secret': api_secret,
'image_url1':yihao,
'image_url2':erhao
}
```
```
r = requests.post(Compare_url,params=payload)
```
```
r.json()
```
{'faces1': [{'face_rectangle': {'width': 312,
'top': 249,
'left': 644,
'height': 312},
'face_token': 'cd93b5251686aad32480088a2fbd4658'}],
'faces2': [{'face_rectangle': {'width': 367,
'top': 314,
'left': 293,
'height': 367},
'face_token': '5b5572512cd121310438404a1e50fcba'}],
'time_used': 1344,
'thresholds': {'1e-3': 62.327, '1e-5': 73.975, '1e-4': 69.101},
'confidence': 65.279,
'image_id2': 'D9w79c3tIgaPk2e3riQm9A==',
'image_id1': 'hNcqzVAJ99Odfr6PC7jH6g==',
'request_id': '1603518054,9171767c-b261-4d82-ac55-067aad0e3b78'}
### 百度智能云
参考:[百度智能云人脸识别文档](https://ai.baidu.com/ai-doc/FACE/yk37c1u4t)
```python
# encoding:utf-8
import requests
# client_id 为官网获取的AK, client_secret 为官网获取的SK
host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=MLkobeqTMgBiYWf8szCzRxdu&client_secret=U6U38dSoMi4AEzQwVxGWrxWEAKFLubnV'
response = requests.get(host)
if response:
print(response.json())
```
{'refresh_token': '25.dea3d5c36926caa04d36ce7adec54b48.315360000.1918875070.282335-22868820', 'expires_in': 2592000, 'session_key': '9mzdWEnM/SHsvJ+Klzwf8ezSazKt885iu7kanyrUwoME4Ky0iFHyMEBvMFsZvAnAWnvrW6xnpP4l/A8vMpJ3xEHy6VPyGA==', 'access_token': '24.c9f9b7d09757b461cff713ea9e0c4f53.2592000.1606107070.282335-22868820', 'scope': 'public brain_all_scope vis-faceverify_faceverify_h5-face-liveness vis-faceverify_FACE_V3 vis-faceverify_idl_face_merge vis-faceverify_FACE_EFFECT vis-faceverify_face_feature_sdk wise_adapt lebo_resource_base lightservice_public hetu_basic lightcms_map_poi kaidian_kaidian ApsMisTest_Test权限 vis-classify_flower lpq_开放 cop_helloScope ApsMis_fangdi_permission smartapp_snsapi_base smartapp_mapp_dev_manage iop_autocar oauth_tp_app smartapp_smart_game_openapi oauth_sessionkey smartapp_swanid_verify smartapp_opensource_openapi smartapp_opensource_recapi fake_face_detect_开放Scope vis-ocr_虚拟人物助理 idl-video_虚拟人物助理 smartapp_component', 'session_secret': '42e3990d51d19d485a0721d223b3069e'}
```
# 人脸检测与属性分析
request_url = "https://aip.baidubce.com/rest/2.0/face/v3/detect"
params = "{\"image\":\"https://gitee.com/fanpeikun/my-ku/raw/master/f692a03cb7fbc3545eac264b18fc2367.jpeg\",\"image_type\":\"URL\",\"face_field\":\"faceshape,facetype\"}"
access_token = '24.c9f9b7d09757b461cff713ea9e0c4f53.2592000.1606107070.282335-22868820'
request_url = request_url + "?access_token=" + access_token
headers = {'content-type': 'application/json'}
response = requests.post(request_url, data=params, headers=headers)
if response:
print (response.json())
```
{'error_code': 0, 'error_msg': 'SUCCESS', 'log_id': 2575551565001, 'timestamp': 1603515794, 'cached': 0, 'result': {'face_num': 1, 'face_list': [{'face_token': '84efc818d35f82978e426600a99bbde5', 'location': {'left': 1067.31, 'top': 375.19, 'width': 440, 'height': 446, 'rotation': -2}, 'face_probability': 1, 'angle': {'yaw': 3.13, 'pitch': 7.6, 'roll': -4.03}, 'face_shape': {'type': 'oval', 'probability': 0.7}, 'face_type': {'type': 'human', 'probability': 0.99}}]}}
```
# 人脸对比
request_url = "https://aip.baidubce.com/rest/2.0/face/v3/match"
params = "[{\"image\": \"https://gitee.com/fanpeikun/my-ku/raw/master/20191214152337.jpg\", \"image_type\": \"URL\", \"face_type\": \"CERT\", \"quality_control\": \"LOW\"}, {\"image\": \"https://gitee.com/fanpeikun/my-ku/raw/master/41c37c09abd946e4da3abaf6f7f1234f.jpg\", \"image_type\": \"URL\", \"face_type\": \"LIVE\", \"quality_control\": \"LOW\"}]"
# face_type 人脸的类型 LIVE;IDCARD;WATERMARK;CERT;INFRARED。
access_token = '24.c9f9b7d09757b461cff713ea9e0c4f53.2592000.1606107070.282335-22868820' # 调用鉴权接口获取的token
request_url = request_url + "?access_token=" + access_token
headers = {'content-type': 'application/json'}
response = requests.post(request_url, data=params, headers=headers)
response.json()
```
{'error_code': 0,
'error_msg': 'SUCCESS',
'log_id': 520115997915,
'timestamp': 1603517697,
'cached': 0,
'result': {'score': 76.50760651,
'face_list': [{'face_token': '03f6a93605cf596adbdec9efc1901897'},
{'face_token': 'c5e40a06bd2db3614e8cd8a65da7d34d'}]}}
---
二、计算机视觉
参考:[Azure 计算机视觉文档](https://docs.microsoft.com/zh-cn/azure/cognitive-services/computer-vision/)
### 1. 分析远程图像
```python
import requests
%matplotlib inline
import matplotlib.pyplot as plt
import json
from PIL import Image
from io import BytesIO
endpoint = "https://fpk223.cognitiveservices.azure.com/"
subscription_key = "835af6a3699344dd84bb9d884634070a"
# base url
analyze_url = endpoint+ "vision/v3.1/analyze"
# Set image_url to the URL of an image that you want to analyze.
image_url = "https://gitee.com/fanpeikun/my-ku/raw/master/b923e414e2fc1488fd1365231a547c43.jpg"
headers = {'Ocp-Apim-Subscription-Key': subscription_key}
# 参数
params = {'visualFeatures': 'Categories,Description,Color'}
# 请求主体body
data = {'url': image_url}
response = requests.post(analyze_url, headers=headers,params=params, json=data)
response.raise_for_status()
# The 'analysis' object contains various fields that describe the image. The most
# relevant caption for the image is obtained from the 'description' property.
analysis = response.json()
print(json.dumps(response.json()))
image_caption = analysis["description"]["captions"][0]["text"].capitalize()
# Display the image and overlay it with the caption.
image = Image.open(BytesIO(requests.get(image_url).content))
plt.imshow(image)
plt.axis("off")
_ = plt.title(image_caption, size="x-large", y=-0.1)
plt.show()
```
{"categories": [{"name": "building_", "score": 0.58984375, "detail": {"landmarks": [{"name": "United States Capitol", "confidence": 0.9931780099868774}]}}, {"name": "outdoor_", "score": 0.015625, "detail": {"landmarks": [{"name": "United States Capitol", "confidence": 0.9931780099868774}]}}], "color": {"dominantColorForeground": "Grey", "dominantColorBackground": "Black", "dominantColors": ["Black"], "accentColor": "066B9F", "isBwImg": false, "isBWImg": false}, "description": {"tags": ["grass", "outdoor", "building", "government building", "big"], "captions": [{"text": "a large white building with a dome with United States Capitol in the background", "confidence": 0.39791345596313477}]}, "requestId": "05b42c54-8811-44e3-8eb4-e4c527619d5b", "metadata": {"height": 2252, "width": 3379, "format": "Jpeg"}}

### 2.生成本地图片
```
import os
import sys
import requests
# If you are using a Jupyter notebook, uncomment the following line.
# %matplotlib inline
import matplotlib.pyplot as plt
from PIL import Image
from io import BytesIO
# Add your Computer Vision subscription key and endpoint to your environment variables.
#if 'COMPUTER_VISION_SUBSCRIPTION_KEY' in os.environ:
# subscription_key = os.environ['COMPUTER_VISION_SUBSCRIPTION_KEY']
# print("\nSet the COMPUTER_VISION_SUBSCRIPTION_KEY environment variable.\n**Restart your shell or IDE for changes to take effect.**")
# sys.exit()
#if 'COMPUTER_VISION_ENDPOINT' in os.environ:
# endpoint = os.environ['COMPUTER_VISION_ENDPOINT']
analyze_url = "https://fpk223.cognitiveservices.azure.com/vision/v3.1/analyze"
# Set image_path to the local path of an image that you want to analyze.
# Sample images are here, if needed:
# https://github.com/Azure-Samples/cognitive-services-sample-data-files/tree/master/ComputerVision/Images
image_path = 'guangzhouta.jpg'
# Read the image into a byte array
image_data = open(image_path, "rb").read()
headers = {'Ocp-Apim-Subscription-Key': subscription_key,
'Content-Type': 'application/octet-stream'}
params = {'visualFeatures': 'Categories,Description,Color'}
response = requests.post(
analyze_url, headers=headers, params=params, data=image_data)
response.raise_for_status()
# The 'analysis' object contains various fields that describe the image. The most
# relevant caption for the image is obtained from the 'description' property.
analysis = response.json()
print(analysis)
image_caption = analysis["description"]["captions"][0]["text"].capitalize()
# Display the image and overlay it with the caption.
image = Image.open(BytesIO(image_data))
plt.imshow(image)
plt.axis("off")
_ = plt.title(image_caption, size="x-large", y=-0.1)
plt.show()
```
[1;31m---------------------------------------------------------------------------[0m
[1;31mFileNotFoundError[0m Traceback (most recent call last)
[1;32m[0m in [0;36m[1;34m[0m
[0;32m 25[0m [1;33m[0m[0m
[0;32m 26[0m [1;31m# Read the image into a byte array[0m[1;33m[0m[1;33m[0m[1;33m[0m[0m
[1;32m---> 27[1;33m [0mimage_data[0m [1;33m=[0m [0mopen[0m[1;33m([0m[0mimage_path[0m[1;33m,[0m [1;34m"rb"[0m[1;33m)[0m[1;33m.[0m[0mread[0m[1;33m([0m[1;33m)[0m[1;33m[0m[1;33m[0m[0m
[0m[0;32m 28[0m headers = {'Ocp-Apim-Subscription-Key': subscription_key,
[0;32m 29[0m 'Content-Type': 'application/octet-stream'}
[1;31mFileNotFoundError[0m: [Errno 2] No such file or directory: 'guangzhouta.jpg'
#### (此项失败↑)
### 3.生成缩略图
```
import os
import sys
import requests
# If you are using a Jupyter notebook, uncomment the following line.
# %matplotlib inline
import matplotlib.pyplot as plt
from PIL import Image
from io import BytesIO
# Add your Computer Vision subscription key and endpoint to your environment variables.
# if 'COMPUTER_VISION_SUBSCRIPTION_KEY' in os.environ:
# subscription_key = os.environ['COMPUTER_VISION_SUBSCRIPTION_KEY']
# else:
# print("\nSet the COMPUTER_VISION_SUBSCRIPTION_KEY environment variable.\n**Restart your shell or IDE for changes to take effect.**")
# sys.exit()
# if 'COMPUTER_VISION_ENDPOINT' in os.environ:
# endpoint = os.environ['COMPUTER_VISION_ENDPOINT']
thumbnail_url = "https://fpk223.cognitiveservices.azure.com/" + "vision/v3.1/generateThumbnail"
# Set image_url to the URL of an image that you want to analyze.
image_url = "https://gitee.com/fanpeikun/my-ku/raw/master/b923e414e2fc1488fd1365231a547c43.jpg"
headers = {'Ocp-Apim-Subscription-Key': "dd748cf10bf9404399e5416d9399e218"}
params = {'width': '100', 'height': '100', 'smartCropping': 'true'}
data = {'url': image_url}
response = requests.post(thumbnail_url, headers=headers,
params=params, json=data)
response.raise_for_status()
thumbnail = Image.open(BytesIO(response.content))
# Display the thumbnail.
plt.imshow(thumbnail)
plt.axis("off")
# Verify the thumbnail size.
print("Thumbnail is {0}-by-{1}".format(*thumbnail.size))
```
Thumbnail is 100-by-100

### 4.提取文本(读取API)
```
import json
import os
import sys
import requests
import time
%matplotlib inline
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from PIL import Image
from io import BytesIO
text_recognition_url ="https://fpk223.cognitiveservices.azure.com/vision/v3.0/read/analyze"
# Set image_url to the URL of an image that you want to recognize.
image_url = "https://gitee.com/fanpeikun/my-ku/raw/master/754c60eb157915cd97e39038da334223.jpg"
headers = {'Ocp-Apim-Subscription-Key': "835af6a3699344dd84bb9d884634070a"}
data = {'url': image_url}
response = requests.post(text_recognition_url, headers=headers, json=data)
response.raise_for_status()
# Extracting text requires two API calls: One call to submit the
# image for processing, the other to retrieve the text found in the image.
# Holds the URI used to retrieve the recognized text.
operation_url = response.headers["Operation-Location"]
# The recognized text isn't immediately available, so poll to wait for completion.
analysis = {}
poll = True
while (poll):
response_final = requests.get(
response.headers["Operation-Location"], headers=headers)
analysis = response_final.json()
print(json.dumps(analysis, indent=4))
time.sleep(1)
if ("analyzeResult" in analysis):
poll = False
if ("status" in analysis and analysis['status'] == 'failed'):
poll = False
polygons = []
if ("analyzeResult" in analysis):
# Extract the recognized text, with bounding boxes.
polygons = [(line["boundingBox"], line["text"])
for line in analysis["analyzeResult"]["readResults"][0]["lines"]]
# Display the image and overlay it with the extracted text.
image = Image.open(BytesIO(requests.get(image_url).content))
ax = plt.imshow(image)
for polygon in polygons:
vertices = [(polygon[0][i], polygon[0][i+1])
for i in range(0, len(polygon[0]), 2)]
text = polygon[1]
patch = Polygon(vertices, closed=True, fill=False, linewidth=2, color='y')
ax.axes.add_patch(patch)
plt.text(vertices[0][0], vertices[0][1], text, fontsize=20, va="top")
plt.show()
```
{
"status": "running",
"createdDateTime": "2020-10-24T08:47:46Z",
"lastUpdatedDateTime": "2020-10-24T08:47:46Z"
}
{
"status": "succeeded",
"createdDateTime": "2020-10-24T08:47:46Z",
"lastUpdatedDateTime": "2020-10-24T08:47:46Z",
"analyzeResult": {
"version": "3.0.0",
"readResults": [
{
"page": 1,
"angle": 0,
"width": 500,
"height": 500,
"unit": "pixel",
"lines": []
}
]
}
}

### 5.提取文本(OCR API)
```
import os
import sys
import requests
%matplotlib inline
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from PIL import Image
from io import BytesIO
ocr_url ="https://fpk223.cognitiveservices.azure.com/vision/v3.1/ocr"
# Set image_url to the URL of an image that you want to analyze.
image_url = "https://gitee.com/fanpeikun/my-ku/raw/master/9c7c1ec7fdba64403d615487dfcb47bf.jpg"
headers = {'Ocp-Apim-Subscription-Key': "835af6a3699344dd84bb9d884634070a"}
params = {'language': 'unk', 'detectOrientation': 'true'}
data = {'url': image_url}
response = requests.post(ocr_url, headers=headers, params=params, json=data)
response.raise_for_status()
analysis = response.json()
# Extract the word bounding boxes and text.
line_infos = [region["lines"] for region in analysis["regions"]]
word_infos = []
for line in line_infos:
for word_metadata in line:
for word_info in word_metadata["words"]:
word_infos.append(word_info)
word_infos
# Display the image and overlay it with the extracted text.
plt.figure(figsize=(5, 5))
image = Image.open(BytesIO(requests.get(image_url).content))
ax = plt.imshow(image, alpha=0.5)
for word in word_infos:
bbox = [int(num) for num in word["boundingBox"].split(",")]
text = word["text"]
origin = (bbox[0], bbox[1])
patch = Rectangle(origin, bbox[2], bbox[3],
fill=False, linewidth=2, color='y')
ax.axes.add_patch(patch)
plt.text(origin[0], origin[1], text, fontsize=20, weight="bold", va="top")
plt.show()
plt.axis("off")
```

(0.0, 1.0, 0.0, 1.0)
### 6.使用域模型
####6.1 地标
```
import os
import sys
import requests
%matplotlib inline
import matplotlib.pyplot as plt
from PIL import Image
from io import BytesIO
landmark_analyze_url ="https://fpk223.cognitiveservices.azure.com/vision/v3.1/models/landmarks/analyze"
# Set image_url to the URL of an image that you want to analyze.
image_url = "https://gitee.com/fanpeikun/my-ku/raw/master/b923e414e2fc1488fd1365231a547c43.jpg"
headers = {'Ocp-Apim-Subscription-Key': "835af6a3699344dd84bb9d884634070a"}
params = {'model': 'landmarks'}
data = {'url': image_url}
response = requests.post(
landmark_analyze_url, headers=headers, params=params, json=data)
response.raise_for_status()
# The 'analysis' object contains various fields that describe the image. The
# most relevant landmark for the image is obtained from the 'result' property.
analysis = response.json()
assert analysis["result"]["landmarks"] is not []
print(analysis)
landmark_name = analysis["result"]["landmarks"][0]["name"].capitalize()
# Display the image and overlay it with the landmark name.
image = Image.open(BytesIO(requests.get(image_url).content))
plt.imshow(image)
plt.axis("off")
_ = plt.title(landmark_name, size="x-large", y=-0.1)
plt.show()
```
{'result': {'landmarks': [{'name': 'United States Capitol', 'confidence': 0.9931780099868774}]}, 'requestId': '667ed1ff-6666-477c-9ba1-404efb533dfa', 'metadata': {'height': 2252, 'width': 3379, 'format': 'Jpeg'}}

#### 6.2 名人
```
import requests
%matplotlib inline
import matplotlib.pyplot as plt
from PIL import Image
from io import BytesIO
# Replace with your valid subscription key.
subscription_key = "835af6a3699344dd84bb9d884634070a"
assert subscription_key
vision_base_url = "https://fpk223.cognitiveservices.azure.com//vision/v2.1/"
celebrity_analyze_url = vision_base_url + "models/celebrities/analyze"
# Set image_url to the URL of an image that you want to analyze.
image_url = "https://gitee.com/fanpeikun/my-ku/raw/master/24dff14d565dbe6addffb4005136cf09.jpg"
headers = {'Ocp-Apim-Subscription-Key': subscription_key}
params = {'model': 'celebrities'}
data = {'url': image_url}
response = requests.post(
celebrity_analyze_url, headers=headers, params=params, json=data)
response.raise_for_status()
# The 'analysis' object contains various fields that describe the image. The
# most relevant celebrity for the image is obtained from the 'result' property.
analysis = response.json()
assert analysis["result"]["celebrities"] is not []
print(analysis)
celebrity_name = analysis["result"]["celebrities"][0]["name"].capitalize()
# Display the image and overlay it with the celebrity name.
image = Image.open(BytesIO(requests.get(image_url).content))
plt.imshow(image)
plt.axis("off")
_ = plt.title(celebrity_name, size="x-large", y=-0.1)
plt.show()
```

---
三、学习心得
我感觉API这门课程是目前所有课程中最难学的,不仅要求我们有阅读API文档的能力,还要敲比以往多得多的代码,这也要求我们学好python语言这门课程。要掌握这门课没那么容易,所以在认真听课之于一定要花更多的时间去练习。在这次作业中我尝试了Azure、Face++和百度API,发现其实不管是哪个平台,操作方法和功能都大致相同,只要把我们自定义的数据填入相应位置就能实现相应的功能。