The official Python client for the Image Moderation API. Fully typed, async-ready, and designed for modern Python applications.
View on GitHubThe Python SDK provides a clean, Pythonic interface to the Image Moderation API with full support for modern Python features.
from imagemoderation import Client # Initialize the client client = Client(api_key="your_api_key") # Moderate an image by URL result = client.moderate( image_url="https://example.com/image.jpg", models=["nsfw", "violence", "text"] ) # Access results print(f"NSFW Score: {result.nsfw.score}") print(f"Violence Score: {result.violence.score}") print(f"Is Safe: {result.is_safe}") # Moderate from file with open("image.jpg", "rb") as f: result = client.moderate(image_data=f.read())
import asyncio from imagemoderation import AsyncClient async def moderate_images(): async with AsyncClient(api_key="your_api_key") as client: # Process multiple images concurrently urls = [ "https://example.com/image1.jpg", "https://example.com/image2.jpg", "https://example.com/image3.jpg", ] tasks = [client.moderate(image_url=url) for url in urls] results = await asyncio.gather(*tasks) for url, result in zip(urls, results): print(f"{url}: {'Safe' if result.is_safe else 'Flagged'}") asyncio.run(moderate_images())
# settings.py IMAGEMOD_API_KEY = "your_api_key" # views.py from django.conf import settings from imagemoderation import Client client = Client(api_key=settings.IMAGEMOD_API_KEY) def upload_image(request): if request.method == "POST": image = request.FILES["image"] # Moderate before saving result = client.moderate(image_data=image.read()) if not result.is_safe: return JsonResponse({ "error": "Image violates content policy", "categories": result.flagged_categories }, status=400) # Save the approved image image.seek(0) # ... save logic
from fastapi import FastAPI, UploadFile, HTTPException from imagemoderation import AsyncClient app = FastAPI() client = AsyncClient(api_key="your_api_key") @app.post("/upload") async def upload_image(file: UploadFile): content = await file.read() result = await client.moderate( image_data=content, models=["nsfw", "violence"] ) if not result.is_safe: raise HTTPException( status_code=400, detail=f"Content violation: {result.flagged_categories}" ) return {"status": "approved", "scores": result.scores}
Get your API key and start moderating images in minutes.
Get API Key