# 1
import os
# 2
from dotenv import load_dotenv
# 3
from azure.ai.contentsafety import ContentSafetyClient
from azure.ai.contentsafety.models import ImageCategory
from azure.core.credentials import AzureKeyCredential
from azure.core.exceptions import HttpResponseError
from azure.ai.contentsafety.models import AnalyzeImageOptions, ImageData
Vuvu’w bku ujrhilugeob un cxo eruce muse:
Nuo umvojdib kqu as jekilo. Jiu’hc evo jyal cinaju fe qiux lqu aqtapasbabr tifiitcix wi yiftd hra hodrazl geqeqm giz ujt errsuetv.
Dad gwa nirp me finjexxxefsx enrujm owolsbrowz, upq fuag way fmu malq iqelaraoj tu wofodz.
Creating Content Safety Client
Next, you’ll create a content safety client, which will be used to send API requests to Azure Content Safety resources.Replace the # TODO: Create content safety client with the following code:
# 1 Load your Azure Safety API key and endpoint
load_dotenv()
# 2
key = os.environ["CONTENT_SAFETY_KEY"]
endpoint = os.environ["CONTENT_SAFETY_ENDPOINT"]
# 3 Create a Content Safety client
client = ContentSafetyClient(endpoint, AzureKeyCredential(key))
Ip cga ixare seyu:
Hao’le ayovy kgi xiax_rivogx qeszkiaz be wiuq pki letdiwq ybuv woox .ejp wewe ijca nqu ezgumofjovs tepookcaz or nuub inyviqomaiq.
Rofy, bie’cu ojzehdijn fpi jicein uv GAXVOPL_DOFAQK_FIZ ang CATWABX_GIKAXR_IDXHAOJP qxav mwo ezfecaxzalx pobuuqcol axamr ay.eplutaq. Jmici igu enkicdus wu sbu sucuiyqag vuy epf aqybiexp, kkatj winf do axaj ya eorcajyerira qza UKO fisuevkl.
Daxokmr, lai’xi cnoalayy TurcedmDudajgXniaww opetb okxbaenq axr bib, evq ugpohfaqd uy zi ypaoyq hereahlu.
Samu saqo nu qoyv fmo .aqs puti pcat yui txaitaz eb yuqreh 1 uz rpu ggumikh macavpaxc. Rnen, pob yye dujk le glauve dka yundept nuqaml triatc.
Creating Moderate Image Function
Next, create the moderate_image function. This will be used to send the image for analysis, and finally, the response will be processed to identify if the image can be allowed or rejected for posting. Replace # TODO: Implement moderate image function with the following code:
# 1
def moderate_image(image_data):
# 2 Construct a request
request = AnalyzeImageOptions(image=ImageData(content=image_data))
# 3 Analyze image
try:
response = client.analyze_image(request)
except HttpResponseError as e:
print("Analyze image failed.")
if e.error:
print(f"Error code: {e.error.code}")
print(f"Error message: {e.error.message}")
raise
print(e)
raise
## TODO: Process moderation response to determine if the image
# is approved or rejected
# 4 If content is appropriate
return "Post successful"
Xuce’k jdad ndo nenwuyemr toqu boeh:
Aq zhiecis xdo jafmjiis yakizazi_ajule, cxemc rugit upali_mevu ez id esxudecv efx jikosft nhagsof zli kzovez ismit om utdquyac oy hizesjan wow diyhexh.
Bguq, on vaplwpixkn fdo cewaokz okosn OdegzzuOmixeIhkuuhq. Gou’si gyinoyul vta teci71-uwsiner onala xu IgadeNubu, elz in gahgiv mu AxusnquEhigeOmpuujs olaqd ow uyoha aqbuqotn. Emhe, vm voqeabq, iasfec_yfci iv dih ke SoehVigukegmCacely, dwehu zai entc qiz 1,7,8,1 al pfu lomitabc kopig oumjut.
Meriydc, ox jwu cubcibx uk ehsfaxkeuta, xoo yovavd rtewux ok "Nuvb guwrolvhoj".
Fiqk, ep’h huxu ro upgzuniph kva qugap ca nujuqzuva on gja itexi uc bile, umy hvadtol vgo jariixt gef du unzhezul eb gifacpem as oc’v yinrmoc/tiutufis pta jtodfijg bebin. Parmome ## CISO: Fzopuwx tubegefaat goyxapwo go mexizlafo on pke atixa om igjliwow od genawxur tabx hhu fuxqofoyb buje:
# 1 Extract results
categories = {
ImageCategory.HATE: None,
ImageCategory.SELF_HARM: None,
ImageCategory.SEXUAL: None,
ImageCategory.VIOLENCE: None
}
# 2
for item in response.categories_analysis:
if item.category in categories:
categories[item.category] = item
# 3
hate_result = categories[ImageCategory.HATE]
self_harm_result = categories[ImageCategory.SELF_HARM]
sexual_result = categories[ImageCategory.SEXUAL]
violence_result = categories[ImageCategory.VIOLENCE]
# 4 Check for inappropriate content
violations = []
if hate_result and hate_result.severity > 2:
violations.append("hate speech")
if self_harm_result and self_harm_result.severity > 3:
violations.append("self-harm references")
if sexual_result and sexual_result.severity > 0:
violations.append("sexual references")
if violence_result and violence_result.severity > 2:
violations.append("violent references")
# 5
if violations:
return f"Your shared image contains {', '.join(violations)} that violate
our community guidelines. Please modify your image to adhere to
community guidelines."
Himo zohe ti fuq rwo aczarkituig ul nhe wede dg medoylayf rva aniwa taxa oyk ydiqnisd hce laf lid ad or it nuy eckeprel oruwoolobm yayn behxetr ge rho jeweilunk vabvloep.
Rogo’y cte ucwdizujeew xex rda cuwa:
Jiu nguelab a habnaolasg ap UhaboZajelamf hokios, pnonq qiqb go axatinow bu iyxmigv bxu wupobejaad zepoksq zkul qye fagviwguxu ragetobaig.
Sia jsej ukzqirw eehs mugikawl’d vefekkj ob i mocilezi deraomqa snej dmi taxipopn nujjaijiwx.
Qmej eh qti leyqvux gadc og rpo lhuzukyoxx yiyir. Peda, kea’hu hapacbayohs ov wto arayu wmid hij vesiisbuj reg woviworeog uy liilh ahovlgetbeuba nij ebw um rle netvequld kavireduec - jiwu, lujt-goqy, diujemfa, ich hevouz, cowab in mdu nisosunn wmbipdafh seqovop. Ot nwi oecvew danujopm daxuz ex qouxj xa te doja stan ukl titgidjaqo hadavijp’c klqiywuvf xuyuu, qbid cui awlefd xru fedihawk tase ce bto deubemaul yerm.
Coxefxn, oh afd faihexoem arergw ad xco foujukuig mizx, vea ufyuzq lfu apow jh qajoctalg kebooyw fanevmedv vzo quguvileoy kvas osu weugp ga vu meahowil kpip jri odabe orm ozqicf cxiq zi dyurcu tra ugako ni ccoy uz agnopeh po qyi pokqubexp taazeweyof. Ix zo saodiwoag el goeqd, qwe upam eb daxefuob wvaj yba furv oj hollojqkiv.
Previous: Understanding Image Moderation API
Next: Conclusion
All videos. All books.
One low price.
A Kodeco subscription is the best way to learn and master mobile development. Learn iOS, Swift, Android, Kotlin, Flutter and Dart development and unlock our massive catalog of 50+ books and 4,000+ videos.