# API作业_1 **Repository Path**: ss-1621/api-job ## Basic Information - **Project Name**: API作业_1 - **Description**: No description available - **Primary Language**: Unknown - **License**: Not specified - **Default Branch**: master - **Homepage**: None - **GVP Project**: No ## Statistics - **Stars**: 0 - **Forks**: 0 - **Created**: 2020-10-24 - **Last Updated**: 2020-12-19 ## Categories & Tags **Categories**: Uncategorized **Tags**: None ## README # API作业 1. 三种(两种已提供/再找一种练习)及以上不同API平台人脸识别(面部检测/人脸集合) 2. 计算机视觉(Azure API)所有的功能 3. 学习心得(总结)。 1. 三种(两种已提供/再找一种练习)及以上不同API平台人脸识别(面部检测/人脸集合) * Azure In: ``` # A-1 面部检测 import requests # json import json # set to your own subscription key value subscription_key = "85019b1c0337446294c317daeb3f435d" assert subscription_key # replace with the string from your endpoint URL face_api_url = 'https://api-xls.cognitiveservices.azure.com/face/v1.0/detect' # 请求正文body image_url = 'https://timgsa.baidu.com/timg?image&quality=80&size=b9999_10000&sec=1602696306239&di=2fa54217dc9275ebe61584eee793e86c&imgtype=0&src=http%3A%2F%2Fpic1.win4000.com%2Fwallpaper%2F2017-12-22%2F5a3c6bd5a708b.jpg' headers = {'Ocp-Apim-Subscription-Key': subscription_key} # 请求参数parameters params = { 'returnFaceId': 'true', 'returnFaceLandmarks': 'false', # 可选参数,请仔细阅读API文档 'returnFaceAttributes': 'age,gender,headPose,smile,facialHair,glasses,emotion,hair,makeup,occlusion,accessories,blur,exposure,noise', } response = requests.post(face_api_url, params=params, headers=headers, json={"url": image_url}) # json.dumps 将json--->bytes response ``` Out: ``` ``` In: ``` # bytes or string 编码不同 ***** encoding ="utf-8" # response.text # response.content results = response.json() ``` In: ``` results ``` Out: ``` [{'faceId': '226f1db3-9a13-42ed-ac61-f0f717966124', 'faceRectangle': {'top': 188, 'left': 716, 'width': 191, 'height': 191}, 'faceAttributes': {'smile': 0.0, 'headPose': {'pitch': -6.4, 'roll': -7.5, 'yaw': 16.0}, 'gender': 'female', 'age': 21.0, 'facialHair': {'moustache': 0.0, 'beard': 0.0, 'sideburns': 0.0}, 'glasses': 'NoGlasses', 'emotion': {'anger': 0.0, 'contempt': 0.0, 'disgust': 0.0, 'fear': 0.0, 'happiness': 0.0, 'neutral': 0.81, 'sadness': 0.19, 'surprise': 0.0}, 'blur': {'blurLevel': 'low', 'value': 0.08}, 'exposure': {'exposureLevel': 'goodExposure', 'value': 0.64}, 'noise': {'noiseLevel': 'low', 'value': 0.0}, 'makeup': {'eyeMakeup': True, 'lipMakeup': True}, 'accessories': [], 'occlusion': {'foreheadOccluded': False, 'eyeOccluded': False, 'mouthOccluded': False}, 'hair': {'bald': 0.02, 'invisible': False, 'hairColor': [{'color': 'brown', 'confidence': 1.0}, {'color': 'blond', 'confidence': 0.93}, {'color': 'red', 'confidence': 0.53}, {'color': 'gray', 'confidence': 0.1}, {'color': 'black', 'confidence': 0.05}, {'color': 'other', 'confidence': 0.03}, {'color': 'white', 'confidence': 0.0}]}}}] ``` In: ``` # A-2 results = response.json() results ``` Out: ``` [{'faceId': '226f1db3-9a13-42ed-ac61-f0f717966124', 'faceRectangle': {'top': 188, 'left': 716, 'width': 191, 'height': 191}, 'faceAttributes': {'smile': 0.0, 'headPose': {'pitch': -6.4, 'roll': -7.5, 'yaw': 16.0}, 'gender': 'female', 'age': 21.0, 'facialHair': {'moustache': 0.0, 'beard': 0.0, 'sideburns': 0.0}, 'glasses': 'NoGlasses', 'emotion': {'anger': 0.0, 'contempt': 0.0, 'disgust': 0.0, 'fear': 0.0, 'happiness': 0.0, 'neutral': 0.81, 'sadness': 0.19, 'surprise': 0.0}, 'blur': {'blurLevel': 'low', 'value': 0.08}, 'exposure': {'exposureLevel': 'goodExposure', 'value': 0.64}, 'noise': {'noiseLevel': 'low', 'value': 0.0}, 'makeup': {'eyeMakeup': True, 'lipMakeup': True}, 'accessories': [], 'occlusion': {'foreheadOccluded': False, 'eyeOccluded': False, 'mouthOccluded': False}, 'hair': {'bald': 0.02, 'invisible': False, 'hairColor': [{'color': 'brown', 'confidence': 1.0}, {'color': 'blond', 'confidence': 0.93}, {'color': 'red', 'confidence': 0.53}, {'color': 'gray', 'confidence': 0.1}, {'color': 'black', 'confidence': 0.05}, {'color': 'other', 'confidence': 0.03}, {'color': 'white', 'confidence': 0.0}]}}}] ``` In: ``` import requests # 1、create 列表 # faceListId faceListId = "lili" #学生填写 create_facelists_url = "https://api-xls.cognitiveservices.azure.com/face/v1.0/facelists/{}" #学生填写 subscription_key = "85019b1c0337446294c317daeb3f435d" #学生填写 assert subscription_key headers = { # Request headers 'Content-Type': 'application/json', 'Ocp-Apim-Subscription-Key': subscription_key, } data = { "name": "sample_list", "userData": "xiangce", "recognitionModel": "recognition_03"# 学生填写 } r_create = requests.put(create_facelists_url.format(faceListId),headers=headers,json=data) #学生填写 ``` In: ``` r_create.content ``` Out: ``` b'' ``` In: ``` # 已经成功创建facelist(云端/云计算) ``` In: ``` #先加一张脸试试 # 2、Add face add_face_url = "https://api-xls.cognitiveservices.azure.com/face/v1.0/facelists/{}/persistedfaces" #学生填写 assert subscription_key headers = { # Request headers 'Content-Type': 'application/json', 'Ocp-Apim-Subscription-Key': subscription_key, } img_url = "https://www.apple.com.cn/newsroom/images/live-action/keynote-september-2020/apple_apple-event-keynote_tim_09152020_big.jpg.large_2x.jpg" #学生填写 params_add_face={ "userData":"user1" #学生填写 } r_add_face = requests.post(add_face_url.format(faceListId),headers=headers,params=params_add_face,json={"url":img_url}) ``` In: ``` r_add_face.status_code ``` Out: ``` 200 ``` In: ``` r_add_face.json()#返回persistedFaceId ``` Out: ``` {'persistedFaceId': 'e660083c-a9a0-4d74-b2c3-19323290ed2e'} ``` In: ``` # 封装成函数方便添加图片 def AddFace(img_url=str,userData=str): add_face_url ="https://api-xls.cognitiveservices.azure.com/face/v1.0/facelists/{}/persistedFaces" assert subscription_key headers = { # Request headers 'Content-Type': 'application/json', 'Ocp-Apim-Subscription-Key': subscription_key, } img_url = img_url params_add_face={ "userData":userData } r_add_face = requests.post(add_face_url.format(faceListId),headers=headers,params=params_add_face,json={"url":img_url}) return r_add_face.status_code#返回出状态码 ``` In: ``` AddFace("http://huangjieqi.gitee.io/picture_storage/Autumnhui.jpg","丘天惠") AddFace("http://huangjieqi.gitee.io/picture_storage/L-Tony-info.jpg","林嘉茵") AddFace("http://huangjieqi.gitee.io/picture_storage/TLINGP.jpg","汤玲萍") AddFace("http://huangjieqi.gitee.io/picture_storage/WenYanZeng.jpg","曾雯燕") AddFace("http://huangjieqi.gitee.io/picture_storage/XIEIC.jpg","谢依希") AddFace("http://huangjieqi.gitee.io/picture_storage/YuecongYang.png","杨悦聪") AddFace("http://huangjieqi.gitee.io/picture_storage/Zoezhouyu.jpg","周雨") AddFace("http://huangjieqi.gitee.io/picture_storage/crayon-heimi.jpg","刘瑜鹏") AddFace("http://huangjieqi.gitee.io/picture_storage/jiayichen.jpg","陈嘉仪") AddFace("http://huangjieqi.gitee.io/picture_storage/kg2000.jpg","徐旖芊") AddFace("http://huangjieqi.gitee.io/picture_storage/liuxinrujiayou.jpg","刘心如") AddFace("http://huangjieqi.gitee.io/picture_storage/liuyu19.png","刘宇") AddFace("http://huangjieqi.gitee.io/picture_storage/ltco.jpg","李婷") AddFace("http://huangjieqi.gitee.io/picture_storage/lucaszy.jpg","黄智毅") AddFace("http://huangjieqi.gitee.io/picture_storage/pingzi0211.jpg","黄慧文") AddFace("http://huangjieqi.gitee.io/picture_storage/shmimy-cn.jpg","张铭睿") AddFace("http://huangjieqi.gitee.io/picture_storage/yichenting.jpg","陈婷") AddFace("http://huangjieqi.gitee.io/picture_storage/coco022.jpg","洪可凡") AddFace("http://huangjieqi.gitee.io/picture_storage/lujizhi.png","卢继志") AddFace("http://huangjieqi.gitee.io/picture_storage/zzlhyy.jpg","张梓乐") ``` Out: ``` 200 ``` In: ``` # Get facelist get_facelist_url = "https://api-xls.cognitiveservices.azure.com/face/v1.0/facelists/{}"#学生填写 r_get_facelist = requests.get(get_facelist_url.format(faceListId),headers=headers)#学生填写 r_get_facelist.json() ``` Out: ``` {'persistedFaces': [{'persistedFaceId': 'e660083c-a9a0-4d74-b2c3-19323290ed2e', 'userData': 'user1'}, {'persistedFaceId': '0f3d6bf5-809f-4ee6-b43b-16c3d1b6206e', 'userData': '丘天惠'}, {'persistedFaceId': 'c861f87d-e864-472c-8997-b146aefafd4b', 'userData': '林嘉茵'}, {'persistedFaceId': '07c4269d-820d-405f-9517-b64f93b39724', 'userData': '汤玲萍'}, {'persistedFaceId': '918a185e-fdf2-48d5-aad9-1786ddda7b59', 'userData': '曾雯燕'}, {'persistedFaceId': '63cfb8d8-7126-468f-b91f-82f08df2070f', 'userData': '谢依希'}, {'persistedFaceId': 'fba87f85-f189-4298-aeb9-f46ca2239b8a', 'userData': '杨悦聪'}, {'persistedFaceId': 'b4b50c05-85ec-45d6-9bdb-914c2c9cc819', 'userData': '周雨'}, {'persistedFaceId': 'b9bf69c6-4c83-44e7-a45a-b33cb905d1a1', 'userData': '刘瑜鹏'}, {'persistedFaceId': '3ef64e6a-6cb0-49a1-94e8-2746f16fe091', 'userData': '陈嘉仪'}, {'persistedFaceId': '0963b891-69e9-41e5-983f-6d47b1c80433', 'userData': '徐旖芊'}, {'persistedFaceId': 'b43ebc6e-e4dd-4e51-91a6-c43015da3bf8', 'userData': '刘心如'}, {'persistedFaceId': '9790adda-77a7-458b-894d-df72b534dc20', 'userData': '刘宇'}, {'persistedFaceId': 'fa0e36cd-c269-46a6-9917-e3aaae7bf568', 'userData': '李婷'}, {'persistedFaceId': '11b0289a-680c-4378-a63a-aa5ccec5c06d', 'userData': '黄智毅'}, {'persistedFaceId': 'a3a92511-2733-4de6-ae37-69dac0b333cf', 'userData': '黄慧文'}, {'persistedFaceId': '321a2c85-28d9-4577-ae67-5a0991999312', 'userData': '张铭睿'}, {'persistedFaceId': 'a6b42b73-3092-4748-bc98-3754b7087fdb', 'userData': '陈婷'}, {'persistedFaceId': 'a60562d9-dc0b-478a-8538-630a37d84d36', 'userData': '洪可凡'}, {'persistedFaceId': '94e3ae3a-82b0-4f4a-a93e-85083cc36284', 'userData': '丘天惠'}, {'persistedFaceId': '0c345b11-b262-41d4-9c14-dd8ba03f5bd6', 'userData': '林嘉茵'}, {'persistedFaceId': '4752b94e-5d26-4948-b6d0-325f3a7fbd93', 'userData': '汤玲萍'}, {'persistedFaceId': 'f83432b2-40a7-4fb6-97db-d82535544e0d', 'userData': '曾雯燕'}, {'persistedFaceId': '6f1057c9-7238-4629-bc1a-953637f523bf', 'userData': '谢依希'}, {'persistedFaceId': '551ea1ad-895f-4d0f-bc55-cc8b072dcd90', 'userData': '杨悦聪'}, {'persistedFaceId': '037d62aa-1ed5-4e77-98c9-6dc5d6a8f6d8', 'userData': '周雨'}, {'persistedFaceId': '4f615d23-ea3f-4166-85cf-61b33f2157b0', 'userData': '刘瑜鹏'}, {'persistedFaceId': 'fe964a1e-f00d-405c-ab51-66355b2dc1af', 'userData': '陈嘉仪'}, {'persistedFaceId': '3740f23f-ebc4-4e92-9d54-81faff5c95b4', 'userData': '徐旖芊'}, {'persistedFaceId': '045e8b48-6f7f-4946-8f7d-0394dd3d4ca9', 'userData': '刘心如'}, {'persistedFaceId': 'e600dce1-cabe-4968-afe5-805199703a58', 'userData': '刘宇'}, {'persistedFaceId': 'b2156429-33bb-48f1-9d10-ced52a5ae4fa', 'userData': '李婷'}, {'persistedFaceId': 'b9d5040a-ac5a-4150-af7e-8d7dc3134704', 'userData': '黄智毅'}, {'persistedFaceId': 'af639c9c-b0f9-4c2c-9c60-21688261ec7c', 'userData': '黄慧文'}, {'persistedFaceId': '1b90edc1-40af-4dcb-9ac2-6413adfc860e', 'userData': '张铭睿'}, {'persistedFaceId': '7d318c64-2241-4ba6-a097-f9b7a733b59e', 'userData': '陈婷'}, {'persistedFaceId': '6ec635b4-55d5-4b58-8c57-798898b21646', 'userData': '洪可凡'}, {'persistedFaceId': '055bef44-4db4-4c45-ae8d-9ac027cc349d', 'userData': '卢继志'}, {'persistedFaceId': '1a82f5c0-d922-4066-be80-0141cf9df894', 'userData': '张梓乐'}], 'faceListId': 'lili', 'name': 'sample_list', 'userData': 'xiangce'} ``` In: ``` faceListId ="lili" #删除人脸列表 # 学生填写 ``` In: ``` # Detect face 删除列表内人脸id delete_face_url ="https://api-xls.cognitiveservices.azure.com/face/v1.0/facelists/{}/persistedfaces/{}"#学生填写 assert subscription_key persistedFaceId = r_add_face.json()["persistedFaceId"] # 直接取上面获得的ID{'persistedFaceId': '85019b1c0337446294c317daeb3f435d'} headers = { # Request headers 'Content-Type': 'application/json', 'Ocp-Apim-Subscription-Key': subscription_key, } # 注意requests请求为delete r_delete_face = requests.delete(delete_face_url.format(faceListId,persistedFaceId),headers=headers)#学生填写 ``` In: ``` r_delete_face ``` Out: ``` ``` In: ``` # Detect face 删除人脸列表 delete_facelist_url ="https://api-xls.cognitiveservices.azure.com/face/v1.0/facelists/{}"#学生填写 assert subscription_key headers = { # Request headers 'Content-Type': 'application/json', 'Ocp-Apim-Subscription-Key': subscription_key, } # format内直接删除 # 注意requests请求为delete r_delete_facelist = requests.delete(delete_facelist_url.format(faceListId),headers=headers)#学生填写 ``` In: ``` r_delete_facelist ``` Out: ``` ``` In: ``` # 3、检测人脸的id # replace with the string from your endpoint URL face_api_url = 'https://api-xls.cognitiveservices.azure.com/face/v1.0/detect' # 请求正文 image_url = 'https://www.apple.com.cn/newsroom/images/live-action/keynote-september-2020/apple_apple-event-keynote_tim_09152020_big.jpg.large_2x.jpg' headers = {'Ocp-Apim-Subscription-Key': subscription_key} # 请求参数 params = { 'returnFaceId': 'true', 'returnFaceLandmarks': 'false', # 选择model 'recognitionModel':'recognition_03',#此参数需与facelist参数一致 'detectionModel':'detection_01', # 可选参数,请仔细阅读API文档 'returnFaceAttributes': 'age,gender,headPose,smile,facialHair,glasses,emotion,hair,makeup,occlusion,accessories,blur,exposure,noise', } response = requests.post(face_api_url, params=params,headers=headers, json={"url": image_url}) # json.dumps 将json--->字符串 response.json() ``` Out: ``` [{'faceId': '2e4be80f-0a49-4b7d-b662-1753a64f9dbb', 'faceRectangle': {'top': 121, 'left': 571, 'width': 146, 'height': 146}, 'faceAttributes': {'smile': 0.005, 'headPose': {'pitch': 1.3, 'roll': -7.5, 'yaw': -7.0}, 'gender': 'male', 'age': 60.0, 'facialHair': {'moustache': 0.1, 'beard': 0.1, 'sideburns': 0.1}, 'glasses': 'ReadingGlasses', 'emotion': {'anger': 0.003, 'contempt': 0.002, 'disgust': 0.002, 'fear': 0.0, 'happiness': 0.005, 'neutral': 0.986, 'sadness': 0.001, 'surprise': 0.0}, 'blur': {'blurLevel': 'low', 'value': 0.16}, 'exposure': {'exposureLevel': 'goodExposure', 'value': 0.67}, 'noise': {'noiseLevel': 'low', 'value': 0.0}, 'makeup': {'eyeMakeup': False, 'lipMakeup': False}, 'accessories': [{'type': 'glasses', 'confidence': 0.99}], 'occlusion': {'foreheadOccluded': False, 'eyeOccluded': False, 'mouthOccluded': False}, 'hair': {'bald': 0.44, 'invisible': False, 'hairColor': [{'color': 'gray', 'confidence': 1.0}, {'color': 'blond', 'confidence': 0.73}, {'color': 'other', 'confidence': 0.67}, {'color': 'black', 'confidence': 0.57}, {'color': 'brown', 'confidence': 0.03}, {'color': 'red', 'confidence': 0.01}, {'color': 'white', 'confidence': 0.0}]}}}] ``` In: ``` findsimilars_url = "https://api-xls.cognitiveservices.azure.com/face/v1.0/findsimilars" # 请求正文 faceId需要先检测一张照片获取 data_findsimilars = { "faceId":"2e4be80f-0a49-4b7d-b662-1753a64f9dbb",#取上方的faceID "faceListId": "lili", "maxNumOfCandidatesReturned": 10, "mode": "matchFace"#matchPerson #一种为验证模式,一种为相似值模式 } r_findsimilars = requests.post(findsimilars_url,headers=headers,json=data_findsimilars) ``` In: ``` r_findsimilars.json() ``` Out: ``` [{'persistedFaceId': 'e660083c-a9a0-4d74-b2c3-19323290ed2e', 'confidence': 1.0}, {'persistedFaceId': '07c4269d-820d-405f-9517-b64f93b39724', 'confidence': 0.0995}, {'persistedFaceId': '4752b94e-5d26-4948-b6d0-325f3a7fbd93', 'confidence': 0.0995}, {'persistedFaceId': 'fba87f85-f189-4298-aeb9-f46ca2239b8a', 'confidence': 0.09931}, {'persistedFaceId': '551ea1ad-895f-4d0f-bc55-cc8b072dcd90', 'confidence': 0.09931}, {'persistedFaceId': 'b43ebc6e-e4dd-4e51-91a6-c43015da3bf8', 'confidence': 0.09631}, {'persistedFaceId': '045e8b48-6f7f-4946-8f7d-0394dd3d4ca9', 'confidence': 0.09631}, {'persistedFaceId': '0f3d6bf5-809f-4ee6-b43b-16c3d1b6206e', 'confidence': 0.0949}, {'persistedFaceId': '94e3ae3a-82b0-4f4a-a93e-85083cc36284', 'confidence': 0.0949}, {'persistedFaceId': 'c861f87d-e864-472c-8997-b146aefafd4b', 'confidence': 0.09432}] ``` * Face++ In: ``` # 1、先导入为们需要的模块 import requests api_secret = "Vr0PtRCw-ZFwYXTKVfi6aDNaSqlfunK3" # 2、输入我们API_Key api_key = 'jRpCaZ34kqNYJ8Zppdc-yGGum_YkETov' # Replace with a valid Subscription Key here. # 3、目标url # 这里也可以使用本地图片 例如:filepath ="image/tupian.jpg" BASE_URL = 'https://api-cn.faceplusplus.com/facepp/v3/detect' img_url = 'https://photo.orsoon.com/180610/JPG-180610_451/WSVb8tpo5Q_small.jpg' # 4、沿用API文档的示范代码,准备我们的headers和图片(数据) headers = { 'Content-Type': 'application/json', } # 5、准备symbol ? 后面的数据 payload = { "image_url":img_url, 'api_key': api_key, 'api_secret': api_secret, 'return_attributes':'gender,age,smiling,emotion', } ``` In: ``` # 6、requests发送我们请求 r = requests.post(BASE_URL, params=payload, headers=headers) ``` In: ``` r.status_code ``` Out: ``` 200 ``` In: ``` r.content ``` Out: ``` b'{"request_id":"1603375692,a7bf257f-16a4-4cc0-851b-fd63081eadcd","time_used":531,"faces":[{"face_token":"85c3d3b090d4492ad0082c3df00d4603","face_rectangle":{"top":85,"left":440,"width":112,"height":112},"attributes":{"gender":{"value":"Female"},"age":{"value":39},"smile":{"value":48.759,"threshold":50.000},"emotion":{"anger":0.084,"disgust":0.001,"fear":0.001,"happiness":99.876,"neutral":0.004,"sadness":0.001,"surprise":0.032}}},{"face_token":"afbe8f52ca6ba6272b49c888dc40340f","face_rectangle":{"top":102,"left":4,"width":98,"height":98},"attributes":{"gender":{"value":"Female"},"age":{"value":25},"smile":{"value":1.467,"threshold":50.000},"emotion":{"anger":0.002,"disgust":0.005,"fear":0.002,"happiness":0.003,"neutral":99.820,"sadness":0.031,"surprise":0.139}}},{"face_token":"65eeee1524a81e060897451e70e97497","face_rectangle":{"top":83,"left":350,"width":95,"height":95},"attributes":{"gender":{"value":"Female"},"age":{"value":26},"smile":{"value":0.146,"threshold":50.000},"emotion":{"anger":1.155,"disgust":0.261,"fear":3.883,"happiness":3.498,"neutral":55.944,"sadness":10.098,"surprise":25.161}}},{"face_token":"6e22cc57a3b7ae3cacf826bdf51ea795","face_rectangle":{"top":73,"left":612,"width":94,"height":94},"attributes":{"gender":{"value":"Male"},"age":{"value":21},"smile":{"value":5.730,"threshold":50.000},"emotion":{"anger":0.000,"disgust":0.000,"fear":0.000,"happiness":0.632,"neutral":99.362,"sadness":0.000,"surprise":0.006}}},{"face_token":"514d049c19d3d88b3ed7313df1fd4d7a","face_rectangle":{"top":104,"left":180,"width":93,"height":93},"attributes":{"gender":{"value":"Male"},"age":{"value":32},"smile":{"value":99.999,"threshold":50.000},"emotion":{"anger":0.004,"disgust":60.636,"fear":0.129,"happiness":30.148,"neutral":9.074,"sadness":0.004,"surprise":0.004}}},{"face_token":"7017b4bd99957b38e65995ae811303bf","face_rectangle":{"top":98,"left":264,"width":93,"height":93}},{"face_token":"c4088d9ec148711eed4fe3ee4a052ddd","face_rectangle":{"top":118,"left":96,"width":89,"height":89}},{"face_token":"6fbc75f0bbaf335ac86522fbffceaa4d","face_rectangle":{"top":83,"left":701,"width":87,"height":87}}],"image_id":"tp593GJd/ARo1sVO+5hOzA==","face_num":8}\n' ``` In: ``` # requests 巧妙的方法 r = response results = r.json() # results ``` Out: ``` {'request_id': '1603375692,a7bf257f-16a4-4cc0-851b-fd63081eadcd', 'time_used': 531, 'faces': [{'face_token': '85c3d3b090d4492ad0082c3df00d4603', 'face_rectangle': {'top': 85, 'left': 440, 'width': 112, 'height': 112}, 'attributes': {'gender': {'value': 'Female'}, 'age': {'value': 39}, 'smile': {'value': 48.759, 'threshold': 50.0}, 'emotion': {'anger': 0.084, 'disgust': 0.001, 'fear': 0.001, 'happiness': 99.876, 'neutral': 0.004, 'sadness': 0.001, 'surprise': 0.032}}}, {'face_token': 'afbe8f52ca6ba6272b49c888dc40340f', 'face_rectangle': {'top': 102, 'left': 4, 'width': 98, 'height': 98}, 'attributes': {'gender': {'value': 'Female'}, 'age': {'value': 25}, 'smile': {'value': 1.467, 'threshold': 50.0}, 'emotion': {'anger': 0.002, 'disgust': 0.005, 'fear': 0.002, 'happiness': 0.003, 'neutral': 99.82, 'sadness': 0.031, 'surprise': 0.139}}}, {'face_token': '65eeee1524a81e060897451e70e97497', 'face_rectangle': {'top': 83, 'left': 350, 'width': 95, 'height': 95}, 'attributes': {'gender': {'value': 'Female'}, 'age': {'value': 26}, 'smile': {'value': 0.146, 'threshold': 50.0}, 'emotion': {'anger': 1.155, 'disgust': 0.261, 'fear': 3.883, 'happiness': 3.498, 'neutral': 55.944, 'sadness': 10.098, 'surprise': 25.161}}}, {'face_token': '6e22cc57a3b7ae3cacf826bdf51ea795', 'face_rectangle': {'top': 73, 'left': 612, 'width': 94, 'height': 94}, 'attributes': {'gender': {'value': 'Male'}, 'age': {'value': 21}, 'smile': {'value': 5.73, 'threshold': 50.0}, 'emotion': {'anger': 0.0, 'disgust': 0.0, 'fear': 0.0, 'happiness': 0.632, 'neutral': 99.362, 'sadness': 0.0, 'surprise': 0.006}}}, {'face_token': '514d049c19d3d88b3ed7313df1fd4d7a', 'face_rectangle': {'top': 104, 'left': 180, 'width': 93, 'height': 93}, 'attributes': {'gender': {'value': 'Male'}, 'age': {'value': 32}, 'smile': {'value': 99.999, 'threshold': 50.0}, 'emotion': {'anger': 0.004, 'disgust': 60.636, 'fear': 0.129, 'happiness': 30.148, 'neutral': 9.074, 'sadness': 0.004, 'surprise': 0.004}}}, {'face_token': '7017b4bd99957b38e65995ae811303bf', 'face_rectangle': {'top': 98, 'left': 264, 'width': 93, 'height': 93}}, {'face_token': 'c4088d9ec148711eed4fe3ee4a052ddd', 'face_rectangle': {'top': 118, 'left': 96, 'width': 89, 'height': 89}}, {'face_token': '6fbc75f0bbaf335ac86522fbffceaa4d', 'face_rectangle': {'top': 83, 'left': 701, 'width': 87, 'height': 87}}], 'image_id': 'tp593GJd/ARo1sVO+5hOzA==', 'face_num': 8} ``` In: ``` api_secret = "Vr0PtRCw-ZFwYXTKVfi6aDNaSqlfunK3" api_key = "jRpCaZ34kqNYJ8Zppdc-yGGum_YkETov" # Replace with a valid Subscription Key here. ``` In: ``` # 1. FaceSet Create import requests,json display_name = "网新二班人脸集合" outer_id = "01" user_data = "40人,20男生,20女生" CreateFace_Url = "https://api-cn.faceplusplus.com/facepp/v3/faceset/create" payload = { 'api_key': api_key, 'api_secret': api_secret, 'display_name':display_name, 'outer_id':outer_id, 'user_data':user_data } ``` In: ``` r = requests.post(CreateFace_Url, params=payload) ``` In: ``` r.json() ``` Out: ``` {'time_used': 95, 'error_message': 'FACESET_EXIST', 'request_id': '1603472593,ede092d0-e812-498b-8380-7ceef478f410'} ``` In: ``` GetDetail_Url = "https://api-cn.faceplusplus.com/facepp/v3/faceset/getdetail" payload = { 'api_key': api_key, 'api_secret': api_secret, 'outer_id':outer_id, } ``` In: ``` r = requests.post(GetDetail_Url,params=payload) ``` In: ``` r.json() ``` Out: ``` {'faceset_token': 'cc34ef13f35cbae14c39f740cd703549', 'tags': '', 'time_used': 92, 'user_data': '40人,20男生,20女生', 'display_name': '网新二班人脸集合', 'face_tokens': [], 'face_count': 0, 'request_id': '1603472595,a5105ee1-485f-4745-91e8-321ee763ce0d', 'outer_id': '01'} ``` In: ``` AddFace_url = " https://api-cn.faceplusplus.com/facepp/v3/faceset/addface" payload = { 'api_key': api_key, 'api_secret': api_secret, 'faceset_token':'cc34ef13f35cbae14c39f740cd703549', 'face_tokens':'42fb0d5bf81c5ac57c52344dddc3e7c9', } ``` In: ``` r = requests.post(AddFace_url,params=payload) ``` In: ``` r.json() ``` Out: ``` {'faceset_token': 'cc34ef13f35cbae14c39f740cd703549', 'time_used': 79, 'face_count': 0, 'face_added': 0, 'request_id': '1603472597,a9044758-3784-4d23-817d-970a641a43a9', 'outer_id': '01', 'failure_detail': [{'reason': 'INVALID_FACE_TOKEN', 'face_token': '42fb0d5bf81c5ac57c52344dddc3e7c9'}]} ``` In: ``` RemoveFace_url = " https://api-cn.faceplusplus.com/facepp/v3/faceset/removeface" payload = { 'api_key': api_key, 'api_secret': api_secret, 'faceset_token':'cc34ef13f35cbae14c39f740cd703549', 'face_tokens':'42fb0d5bf81c5ac57c52344dddc3e7c9', } ``` In: ``` r = requests.post(RemoveFace_url,params=payload) ``` In: ``` r.json() ``` Out: ``` {'faceset_token': 'cc34ef13f35cbae14c39f740cd703549', 'face_removed': 0, 'time_used': 148, 'face_count': 0, 'request_id': '1603472727,3e83c7f3-14c4-469b-9e41-e9a3aa8ddb34', 'outer_id': '01', 'failure_detail': [{'reason': 'INVALID_FACE_TOKEN', 'face_token': '42fb0d5bf81c5ac57c52344dddc3e7c9'}]} ``` In: ``` Update_url = "https://api-cn.faceplusplus.com/facepp/v3/faceset/update" payload = { 'api_key': api_key, 'api_secret': api_secret, 'faceset_token':'cc34ef13f35cbae14c39f740cd703549', 'user_data':"40人,20男生,20女生", } ``` In: ``` r = requests.post(Update_url,params=payload) ``` In: ``` r.json() ``` Out: ``` {'faceset_token': 'cc34ef13f35cbae14c39f740cd703549', 'request_id': '1603472926,21627a73-9b68-433e-a9da-582bdf304505', 'time_used': 59, 'outer_id': '01'} ``` In: ``` liudehua01 = "https://gss0.baidu.com/9fo3dSag_xI4khGko9WTAnF6hhy/zhidao/pic/item/7c1ed21b0ef41bd57f7f20ff57da81cb39db3d89.jpg" liudehua02 = "https://tse3-mm.cn.bing.net/th/id/OIP.Xz3HbYZeNrdUnGJ7vXNzsQHaKO?pid=Api&rs=1" wangzulan = "https://tse3-mm.cn.bing.net/th/id/OIP.ZnXeGoVYT4jQudiPOGZn3QAAAA?pid=Api&rs=1" ``` In: ``` Compare_url = "https://api-cn.faceplusplus.com/facepp/v3/compare" payload ={ 'api_key': api_key, 'api_secret': api_secret, 'image_url1':liudehua01, 'image_url2':wangzulan } ``` In: ``` r = requests.post(Compare_url,params=payload) ``` In: ``` r.json() ``` Out: ``` {'faces1': [{'face_rectangle': {'width': 824, 'top': 871, 'left': 1114, 'height': 824}, 'face_token': 'a712a5a29f6712c486165476bba97c87'}], 'faces2': [{'face_rectangle': {'width': 86, 'top': 91, 'left': 65, 'height': 86}, 'face_token': '4abfec8226b5d91c598665a2b5bdc278'}], 'time_used': 3842, 'thresholds': {'1e-3': 62.327, '1e-5': 73.975, '1e-4': 69.101}, 'confidence': 26.085, 'image_id2': 'g6kg8zfyOouG6ftP+GvEfg==', 'image_id1': 'KIOXEC2V/MyL4zuopAcNig==', 'request_id': '1603472976,a9d5799a-36e1-448c-9f84-59e57ccbafa7'} ``` In: ``` Detect_url = 'https://api-cn.faceplusplus.com/facepp/v3/detect' img_url = liudehua01 payload = { "image_url":img_url, 'api_key': api_key, 'api_secret': api_secret, 'return_attributes':'gender,age,smiling,emotion', } ``` In: ``` r = requests.post(Detect_url,params=payload) ``` In: ``` r.json() ``` Out: ``` {'request_id': '1603473144,7ce18660-35d3-4d24-8b1e-45a82fd1a23b', 'time_used': 2983, 'faces': [{'face_token': '19683918dcf6912e9ceca45eec7063ee', 'face_rectangle': {'top': 871, 'left': 1114, 'width': 824, 'height': 824}, 'attributes': {'gender': {'value': 'Male'}, 'age': {'value': 59}, 'smile': {'value': 99.998, 'threshold': 50.0}, 'emotion': {'anger': 0.0, 'disgust': 0.047, 'fear': 0.0, 'happiness': 99.945, 'neutral': 0.0, 'sadness': 0.007, 'surprise': 0.0}}}], 'image_id': 'KIOXEC2V/MyL4zuopAcNig==', 'face_num': 1} ``` * 百度云 In: ``` # encoding:utf-8 import requests # client_id 为官网获取的AK, client_secret 为官网获取的SK host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=Xs95c4m4d43hcz6OU6utVWYr&client_secret=a9bXg6lNW5kEA9sz143yy7beLPnd9WUc' response = requests.get(host) if response: print(response.json()) ``` ``` {'refresh_token': '25.fc37d23c99b60a160ae2ed7037e7d82f.315360000.1918738032.282335-22861617', 'expires_in': 2592000, 'session_key': '9mzdDAC7DARclEBmp6pK6Ube0JUURlh/gfLfGvtWvVunR5YRL5Lzl3zi4JL8ia5ftpSoxuq2oeLS1ve/bINbzrf9txtbSQ==', 'access_token': '24.75be0b69fe9ed4024b2a7711250f5f2e.2592000.1605970032.282335-22861617', 'scope': 'public brain_all_scope vis-faceverify_faceverify_h5-face-liveness vis-faceverify_FACE_V3 vis-faceverify_idl_face_merge vis-faceverify_FACE_EFFECT vis-faceverify_face_feature_sdk wise_adapt lebo_resource_base lightservice_public hetu_basic lightcms_map_poi kaidian_kaidian ApsMisTest_Test权限 vis-classify_flower lpq_开放 cop_helloScope ApsMis_fangdi_permission smartapp_snsapi_base smartapp_mapp_dev_manage iop_autocar oauth_tp_app smartapp_smart_game_openapi oauth_sessionkey smartapp_swanid_verify smartapp_opensource_openapi smartapp_opensource_recapi fake_face_detect_开放Scope vis-ocr_虚拟人物助理 idl-video_虚拟人物助理 smartapp_component', 'session_secret': '8ccc4f17cfa7c60e106f311adc1f3158'} ``` In: ``` # encoding:utf-8 import requests ''' 人脸检测与属性分析 ''' request_url = "https://aip.baidubce.com/rest/2.0/face/v3/detect" params = "{\"image\":\"https://tse1-mm.cn.bing.net/th/id/OIP.BKUnKar_nwcvS4igSwZrhwHaJX?pid=Api&rs=1\",\"image_type\":\"URL\",\"face_field\":\"faceshape,facetype\"}" access_token = '24.75be0b69fe9ed4024b2a7711250f5f2e.2592000.1605970032.282335-22861617' request_url = request_url + "?access_token=" + access_token headers = {'content-type': 'application/json'} response = requests.post(request_url, data=params, headers=headers) if response: print (response.json()) ``` ``` {'error_code': 0, 'error_msg': 'SUCCESS', 'log_id': 9925201350015, 'timestamp': 1603379907, 'cached': 0, 'result': {'face_num': 1, 'face_list': [{'face_token': '621a63e5573e25159a19e6565e05402e', 'location': {'left': 86.86, 'top': 306.88, 'width': 298, 'height': 285, 'rotation': -1}, 'face_probability': 1, 'angle': {'yaw': -3.43, 'pitch': 22.35, 'roll': -3.39}, 'face_shape': {'type': 'oval', 'probability': 0.57}, 'face_type': {'type': 'human', 'probability': 0.96}}]}} ``` In: ``` results=response.json() results ``` Out: ``` {'error_code': 0, 'error_msg': 'SUCCESS', 'log_id': 9925201350015, 'timestamp': 1603379907, 'cached': 0, 'result': {'face_num': 1, 'face_list': [{'face_token': '621a63e5573e25159a19e6565e05402e', 'location': {'left': 86.86, 'top': 306.88, 'width': 298, 'height': 285, 'rotation': -1}, 'face_probability': 1, 'angle': {'yaw': -3.43, 'pitch': 22.35, 'roll': -3.39}, 'face_shape': {'type': 'oval', 'probability': 0.57}, 'face_type': {'type': 'human', 'probability': 0.96}}]}} ``` 2. 计算机视觉(Azure API)所有的功能 * 分析远程图像 In: ``` import requests # If you are using a Jupyter notebook, uncomment the following line. # %matplotlib inline import matplotlib.pyplot as plt import json from PIL import Image from io import BytesIO # Add your Computer Vision subscription key and endpoint to your environment variables. # if 'COMPUTER_VISION_SUBSCRIPTION_KEY' in os.environ: # subscription_key = os.environ['COMPUTER_VISION_SUBSCRIPTION_KEY'] # else: # print("\nSet the COMPUTER_VISION_SUBSCRIPTION_KEY environment variable.\n**Restart your shell or IDE for changes to take effect.**") # sys.exit() # if 'COMPUTER_VISION_ENDPOINT' in os.environ: # endpoint = os.environ['COMPUTER_VISION_ENDPOINT'] subscription_key = "b5b31be87fcb4d26a78250965c262bcc" endpoint = "https://api-jsj-xls.cognitiveservices.azure.com/" analyze_url = endpoint + "vision/v3.1/analyze" # Set image_url to the URL of an image that you want to analyze. image_url = "http://img95.699pic.com/photo/50080/4291.jpg_wh300.jpg!/fh/300/quality/90" headers = {'Ocp-Apim-Subscription-Key': subscription_key} params = {'visualFeatures': 'Categories,Description,Color'} data = {'url': image_url} response = requests.post(analyze_url, headers=headers, params=params, json=data) response.raise_for_status() # The 'analysis' object contains various fields that describe the image. The most # relevant caption for the image is obtained from the 'description' property. analysis = response.json() print(json.dumps(response.json())) image_caption = analysis["description"]["captions"][0]["text"].capitalize() # Display the image and overlay it with the caption. image = Image.open(BytesIO(requests.get(image_url).content)) plt.imshow(image) plt.axis("off") _ = plt.title(image_caption, size="x-large", y=-0.1) plt.show() ``` ``` {"categories": [{"name": "others_", "score": 0.10546875}, {"name": "text_", "score": 0.31640625}], "color": {"dominantColorForeground": "White", "dominantColorBackground": "White", "dominantColors": ["White"], "accentColor": "1A8AB1", "isBwImg": false, "isBWImg": false}, "description": {"tags": ["person", "indoor", "laptop"], "captions": [{"text": "a person using a laptop", "confidence": 0.5452403426170349}]}, "requestId": "f87463ba-7a96-4a79-b0ac-c95d94cf5466", "metadata": {"height": 300, "width": 441, "format": "Jpeg"}}
``` * 分析本地图像 In: ``` import os import sys import requests # If you are using a Jupyter notebook, uncomment the following line. # %matplotlib inline import matplotlib.pyplot as plt from PIL import Image from io import BytesIO # Add your Computer Vision subscription key and endpoint to your environment variables. # if 'COMPUTER_VISION_SUBSCRIPTION_KEY' in os.environ: # subscription_key = os.environ['COMPUTER_VISION_SUBSCRIPTION_KEY'] # else: # print("\nSet the COMPUTER_VISION_SUBSCRIPTION_KEY environment variable.\n**Restart your shell or IDE for changes to take effect.**") # sys.exit() # if 'COMPUTER_VISION_ENDPOINT' in os.environ: # endpoint = os.environ['COMPUTER_VISION_ENDPOINT'] subscription_key = "b5b31be87fcb4d26a78250965c262bcc" endpoint = "https://api-jsj-xls.cognitiveservices.azure.com/" analyze_url = endpoint + "vision/v3.1/analyze" # Set image_path to the local path of an image that you want to analyze. # Sample images are here, if needed: # https://github.com/Azure-Samples/cognitive-services-sample-data-files/tree/master/ComputerVision/Images image_path = "D:/vr1.jpg" # Read the image into a byte array image_data = open(image_path, "rb").read() headers = {'Ocp-Apim-Subscription-Key': subscription_key, 'Content-Type': 'application/octet-stream'} params = {'visualFeatures': 'Categories,Description,Color'} response = requests.post( analyze_url, headers=headers, params=params, data=image_data) response.raise_for_status() # The 'analysis' object contains various fields that describe the image. The most # relevant caption for the image is obtained from the 'description' property. analysis = response.json() print(analysis) image_caption = analysis["description"]["captions"][0]["text"].capitalize() # Display the image and overlay it with the caption. image = Image.open(BytesIO(image_data)) plt.imshow(image) plt.axis("off") _ = plt.title(image_caption, size="x-large", y=-0.1) plt.show() ``` ``` {'categories': [{'name': 'abstract_', 'score': 0.00390625}, {'name': 'others_', 'score': 0.0078125}, {'name': 'outdoor_', 'score': 0.234375, 'detail': {'landmarks': []}}], 'color': {'dominantColorForeground': 'Blue', 'dominantColorBackground': 'Blue', 'dominantColors': ['Blue'], 'accentColor': '0EA6BD', 'isBwImg': False, 'isBWImg': False}, 'description': {'tags': ['water', 'boat', 'outdoor', 'blue', 'harbor', 'long', 'colorful', 'several'], 'captions': [{'text': 'a city with a pool and a bridge', 'confidence': 0.18992629647254944}]}, 'requestId': 'a26364c7-f66a-4d11-aa56-d996838c3f73', 'metadata': {'height': 720, 'width': 1280, 'format': 'Jpeg'}} ``` ![A city with a pool and a bridge](image/vr1.jpg) * 生成缩略图 In: ``` import os import sys import requests # If you are using a Jupyter notebook, uncomment the following line. # %matplotlib inline import matplotlib.pyplot as plt from PIL import Image from io import BytesIO # Add your Computer Vision subscription key and endpoint to your environment variables. # if 'COMPUTER_VISION_SUBSCRIPTION_KEY' in os.environ: # subscription_key = os.environ['COMPUTER_VISION_SUBSCRIPTION_KEY'] # else: # print("\nSet the COMPUTER_VISION_SUBSCRIPTION_KEY environment variable.\n**Restart your shell or IDE for changes to take effect.**") # sys.exit() # if 'COMPUTER_VISION_ENDPOINT' in os.environ: # endpoint = os.environ['COMPUTER_VISION_ENDPOINT'] thumbnail_url = "https://api-jsj-xls.cognitiveservices.azure.com/" + "vision/v2.1/generateThumbnail" # Set image_url to the URL of an image that you want to analyze. image_url = "http://img95.699pic.com/photo/50080/4298.jpg_wh300.jpg!/fh/300/quality/90" headers = {'Ocp-Apim-Subscription-Key': "b5b31be87fcb4d26a78250965c262bcc"} params = {'width': '100', 'height': '100', 'smartCropping': 'true'} data = {'url': image_url} response = requests.post(thumbnail_url, headers=headers, params=params, json=data) response.raise_for_status() thumbnail = Image.open(BytesIO(response.content)) # Display the thumbnail. plt.imshow(thumbnail) plt.axis("off") # Verify the thumbnail size. print("Thumbnail is {0}-by-{1}".format(*thumbnail.size)) ``` ``` Thumbnail is 100-by-100 ``` ![png](image/1.jpg) * 提取文本(读取API) In: ``` import json import os import sys import requests import time # If you are using a Jupyter notebook, uncomment the following line. # %matplotlib inline import matplotlib.pyplot as plt from matplotlib.patches import Polygon from PIL import Image from io import BytesIO missing_env = False # Add your Computer Vision subscription key and endpoint to your environment variables. # if 'COMPUTER_VISION_ENDPOINT' in os.environ: # endpoint = os.environ['COMPUTER_VISION_ENDPOINT'] # else: # print("From Azure Cognitive Service, retrieve your endpoint and subscription key.") # print("\nSet the COMPUTER_VISION_ENDPOINT environment variable, such as \"https://westus2.api.cognitive.microsoft.com\".\n") # missing_env = True # if 'COMPUTER_VISION_SUBSCRIPTION_KEY' in os.environ: # subscription_key = os.environ['COMPUTER_VISION_SUBSCRIPTION_KEY'] # else: # print("From Azure Cognitive Service, retrieve your endpoint and subscription key.") # print("\nSet the COMPUTER_VISION_SUBSCRIPTION_KEY environment variable, such as \"1234567890abcdef1234567890abcdef\".\n") # missing_env = True # if missing_env: # print("**Restart your shell or IDE for changes to take effect.**") # sys.exit() endpoint = "https://api-jsj-xls.cognitiveservices.azure.com/" subscription_key = "b5b31be87fcb4d26a78250965c262bcc" text_recognition_url = endpoint + "/vision/v3.1/read/analyze" # Set image_url to the URL of an image that you want to recognize. image_url = "https://uploadfile.bizhizu.cn/2017/0722/20170722032928988.jpg" headers = {'Ocp-Apim-Subscription-Key': subscription_key} data = {'url': image_url} response = requests.post( text_recognition_url, headers=headers, json=data) response.raise_for_status() # Extracting text requires two API calls: One call to submit the # image for processing, the other to retrieve the text found in the image. # Holds the URI used to retrieve the recognized text. operation_url = response.headers["Operation-Location"] # The recognized text isn't immediately available, so poll to wait for completion. analysis = {} poll = True while (poll): response_final = requests.get( response.headers["Operation-Location"], headers=headers) analysis = response_final.json() print(json.dumps(analysis, indent=4)) time.sleep(1) if ("analyzeResult" in analysis): poll = False if ("status" in analysis and analysis['status'] == 'failed'): poll = False polygons = [] if ("analyzeResult" in analysis): # Extract the recognized text, with bounding boxes. polygons = [(line["boundingBox"], line["text"]) for line in analysis["analyzeResult"]["readResults"][0]["lines"]] # Display the image and overlay it with the extracted text. image = Image.open(BytesIO(requests.get(image_url).content)) ax = plt.imshow(image) for polygon in polygons: vertices = [(polygon[0][i], polygon[0][i+1]) for i in range(0, len(polygon[0]), 2)] text = polygon[1] patch = Polygon(vertices, closed=True, fill=False, linewidth=2, color='y') ax.axes.add_patch(patch) plt.text(vertices[0][0], vertices[0][1], text, fontsize=20, va="top") plt.show() ``` ``` { "status": "running", "createdDateTime": "2020-10-23T14:34:44Z", "lastUpdatedDateTime": "2020-10-23T14:34:45Z" } { "status": "succeeded", "createdDateTime": "2020-10-23T14:34:44Z", "lastUpdatedDateTime": "2020-10-23T14:34:45Z", "analyzeResult": { "version": "3.0.0", "readResults": [ { "page": 1, "angle": 0.2204, "width": 1024, "height": 1820, "unit": "pixel", "lines": [ { "boundingBox": [ 604, 1020, 994, 1021, 993, 1087, 603, 1085 ], "text": "where to go", "words": [ { "boundingBox": [ 633, 1021, 813, 1020, 812, 1086, 633, 1084 ], "text": "where", "confidence": 0.981 }, { "boundingBox": [ 825, 1020, 907, 1021, 906, 1087, 825, 1086 ], "text": "to", "confidence": 0.986 }, { "boundingBox": [ 919, 1021, 993, 1023, 992, 1088, 919, 1087 ], "text": "go", "confidence": 0.986 } ] } ] } ] } } ``` ![png](image/1.png) * 提取文本(OCR API) In: ``` import sys import requests # If you are using a Jupyter notebook, uncomment the following line. # %matplotlib inline import matplotlib.pyplot as plt from matplotlib.patches import Rectangle from PIL import Image from io import BytesIO # Add your Computer Vision subscription key and endpoint to your environment variables. # if 'COMPUTER_VISION_SUBSCRIPTION_KEY' in os.environ: # subscription_key = os.environ['COMPUTER_VISION_SUBSCRIPTION_KEY'] # else: # print("\nSet the COMPUTER_VISION_SUBSCRIPTION_KEY environment variable.\n**Restart your shell or IDE for changes to take effect.**") # sys.exit() # if 'COMPUTER_VISION_ENDPOINT' in os.environ: # endpoint = os.environ['COMPUTER_VISION_ENDPOINT'] endpoint = "https://api-jsj-xls.cognitiveservices.azure.com/" subscription_key = "b5b31be87fcb4d26a78250965c262bcc" ocr_url = endpoint + "vision/v3.1/ocr" # Set image_url to the URL of an image that you want to analyze. image_url = "https://tse2-mm.cn.bing.net/th/id/OIP.l3rdt4l3I4MfPRqbJShIrwHaNK?pid=Api&rs=1" headers = {'Ocp-Apim-Subscription-Key': subscription_key} params = {'language': 'unk', 'detectOrientation': 'true'} data = {'url': image_url} response = requests.post(ocr_url, headers=headers, params=params, json=data) response.raise_for_status() analysis = response.json() # Extract the word bounding boxes and text. line_infos = [region["lines"] for region in analysis["regions"]] word_infos = [] for line in line_infos: for word_metadata in line: for word_info in word_metadata["words"]: word_infos.append(word_info) word_infos # Display the image and overlay it with the extracted text. plt.figure(figsize=(5, 5)) image = Image.open(BytesIO(requests.get(image_url).content)) ax = plt.imshow(image, alpha=0.5) for word in word_infos: bbox = [int(num) for num in word["boundingBox"].split(",")] text = word["text"] origin = (bbox[0], bbox[1]) patch = Rectangle(origin, bbox[2], bbox[3], fill=False, linewidth=2, color='y') ax.axes.add_patch(patch) plt.text(origin[0], origin[1], text, fontsize=20, weight="bold", va="top") plt.show() plt.axis("off") ``` ![png](image/2.png) Out: ``` (0.0, 1.0, 0.0, 1.0) ``` * 使用域模型 In: ``` import os import sys import requests # If you are using a Jupyter notebook, uncomment the following line. # %matplotlib inline import matplotlib.pyplot as plt from PIL import Image from io import BytesIO # Add your Computer Vision subscription key and endpoint to your environment variables. # if 'COMPUTER_VISION_SUBSCRIPTION_KEY' in os.environ: # subscription_key = os.environ['COMPUTER_VISION_SUBSCRIPTION_KEY'] # else: # print("\nSet the COMPUTER_VISION_SUBSCRIPTION_KEY environment variable.\n**Restart your shell or IDE for changes to take effect.**") # sys.exit() # if 'COMPUTER_VISION_ENDPOINT' in os.environ: # endpoint = os.environ['COMPUTER_VISION_ENDPOINT'] endpoint = "https://api-jsj-xls.cognitiveservices.azure.com/" subscription_key = "b5b31be87fcb4d26a78250965c262bcc" landmark_analyze_url = endpoint + "vision/v3.1/models/landmarks/analyze" # Set image_url to the URL of an image that you want to analyze. image_url = "http://img3.cache.netease.com/photo/0007/2013-09-12/98INE4G82EL20007.jpg" headers = {'Ocp-Apim-Subscription-Key': subscription_key} params = {'model': 'landmarks'} data = {'url': image_url} response = requests.post( landmark_analyze_url, headers=headers, params=params, json=data) response.raise_for_status() # The 'analysis' object contains various fields that describe the image. The # most relevant landmark for the image is obtained from the 'result' property. analysis = response.json() assert analysis["result"]["landmarks"] is not [] print(analysis) landmark_name = analysis["result"]["landmarks"][0]["name"].capitalize() # Display the image and overlay it with the landmark name. image = Image.open(BytesIO(requests.get(image_url).content)) plt.imshow(image) plt.axis("off") _ = plt.title(landmark_name, size="x-large", y=-0.1) plt.show() ``` ``` {'result': {'landmarks': [{'name': 'Tiananmen Square', 'confidence': 0.9532119035720825}]}, 'requestId': 'bd07b8a0-8c0c-4150-9018-52031a320747', 'metadata': {'height': 667, 'width': 1000, 'format': 'Jpeg'}} ``` ![Tiananmen square](image/4.jpg) In: ``` import requests # If you are using a Jupyter notebook, uncomment the following line. # %matplotlib inline import matplotlib.pyplot as plt from PIL import Image from io import BytesIO # Replace with your valid subscription key. subscription_key = "b5b31be87fcb4d26a78250965c262bcc" assert subscription_key vision_base_url = "https://api-jsj-xls.cognitiveservices.azure.com/vision/v2.1/" celebrity_analyze_url = vision_base_url + "models/celebrities/analyze" # Set image_url to the URL of an image that you want to analyze. image_url = "https://tse4-mm.cn.bing.net/th/id/OIP.FaxEul6ZyjJNogfxH6da2AHaJn?pid=Api&rs=1" headers = {'Ocp-Apim-Subscription-Key': subscription_key} params = {'model': 'celebrities'} data = {'url': image_url} response = requests.post( celebrity_analyze_url, headers=headers, params=params, json=data) response.raise_for_status() # The 'analysis' object contains various fields that describe the image. The # most relevant celebrity for the image is obtained from the 'result' property. analysis = response.json() assert analysis["result"]["celebrities"] is not [] print(analysis) celebrity_name = analysis["result"]["celebrities"][0]["name"].capitalize() # Display the image and overlay it with the celebrity name. image = Image.open(BytesIO(requests.get(image_url).content)) plt.imshow(image) plt.axis("off") _ = plt.title(celebrity_name, size="x-large", y=-0.1) plt.show() ``` ``` {'result': {'celebrities': [{'name': 'Kobe Bryant', 'confidence': 0.9906031489372253, 'faceRectangle': {'left': 267, 'top': 91, 'width': 129, 'height': 129}}]}, 'requestId': 'a7302c3a-a491-431f-b53f-6d4653a4c588', 'metadata': {'height': 584, 'width': 450, 'format': 'Jpeg'}} ``` ![Kobe bryant](image/5.jpg) 3. 学习心得(总结)。 API课程让我了解到API的强大,它可以给我们带来很多便捷,但它并不是那么容易就能轻易掌握的,需要我们多加研究与练习才能学会。