first commit
This commit is contained in:
commit
067056629b
11
Dockerfile
Normal file
11
Dockerfile
Normal file
@ -0,0 +1,11 @@
|
||||
FROM python:3.9
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY requirements.txt .
|
||||
|
||||
RUN pip install -U pip && pip install -r requirements.txt
|
||||
|
||||
COPY . .
|
||||
|
||||
CMD python -m api
|
4
api/__init__.py
Normal file
4
api/__init__.py
Normal file
@ -0,0 +1,4 @@
|
||||
from fastapi import FastAPI
|
||||
from nsfw_detector import predict
|
||||
|
||||
app = FastAPI()
|
37
api/__main__.py
Normal file
37
api/__main__.py
Normal file
@ -0,0 +1,37 @@
|
||||
from api import predict, app
|
||||
from api.functions import download_image
|
||||
import os
|
||||
import uvicorn
|
||||
|
||||
model = predict.load_model('nsfw_detector/nsfw_model.h5')
|
||||
|
||||
|
||||
@app.get("/")
|
||||
async def detect_nsfw(url: str):
|
||||
if not url:
|
||||
return {"ERROR": "URL PARAMETER EMPTY"}
|
||||
image = await download_image(url)
|
||||
if not image:
|
||||
return {"ERROR": "IMAGE SIZE TOO LARGE OR INCORRECT URL"}
|
||||
results = predict.classify(model, image)
|
||||
os.remove(image)
|
||||
hentai = results['data']['hentai']
|
||||
sexy = results['data']['sexy']
|
||||
porn = results['data']['porn']
|
||||
drawings = results['data']['drawings']
|
||||
neutral = results['data']['neutral']
|
||||
if neutral >= 25:
|
||||
results['data']['is_nsfw'] = False
|
||||
return results
|
||||
elif (sexy + porn + hentai) >= 70:
|
||||
results['data']['is_nsfw'] = True
|
||||
return results
|
||||
elif drawings >= 40:
|
||||
results['data']['is_nsfw'] = False
|
||||
return results
|
||||
else:
|
||||
results['data']['is_nsfw'] = False
|
||||
return results
|
||||
|
||||
if __name__ == "__main__":
|
||||
uvicorn.run("api:app", host="0.0.0.0", port=8000, log_level="info")
|
20
api/functions.py
Normal file
20
api/functions.py
Normal file
@ -0,0 +1,20 @@
|
||||
from random import randint
|
||||
import aiohttp
|
||||
import aiofiles
|
||||
|
||||
MAX_IMAGE_SIZE = 5 * 1000000
|
||||
|
||||
|
||||
async def download_image(url):
|
||||
file_name = f"{randint(6969, 6999)}.jpg"
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(url) as resp:
|
||||
if resp.status == 200:
|
||||
if int(resp.headers['Content-Length']) > MAX_IMAGE_SIZE:
|
||||
return False
|
||||
f = await aiofiles.open(file_name, mode='wb')
|
||||
await f.write(await resp.read())
|
||||
await f.close()
|
||||
else:
|
||||
return False
|
||||
return file_name
|
45
docker-compose.yml
Normal file
45
docker-compose.yml
Normal file
@ -0,0 +1,45 @@
|
||||
version: '3'
|
||||
services:
|
||||
|
||||
api:
|
||||
build: .
|
||||
ports:
|
||||
- '8000:8000'
|
||||
|
||||
nginx:
|
||||
image: nginx
|
||||
container_name: my-nginx
|
||||
restart: always
|
||||
volumes:
|
||||
- ./nginx/nginx.conf:/etc/nginx/nginx.conf
|
||||
- ./nginx/robots.txt:/var/www/html/robots.txt
|
||||
- etc-letsencrypt:/etc/letsencrypt
|
||||
- www-html:/var/www/html
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
networks:
|
||||
- api_default
|
||||
|
||||
certbot:
|
||||
image: certbot/certbot
|
||||
container_name: certbot
|
||||
volumes:
|
||||
- etc-letsencrypt:/etc/letsencrypt
|
||||
- www-html:/var/www/html
|
||||
depends_on:
|
||||
- nginx
|
||||
command: certonly --webroot -w /var/www/html --email me@abakuka.ru -d nsfw.wizardstech.ru --cert-name=nsfw.wizardstech.ru --key-type rsa --agree-tos
|
||||
ports:
|
||||
- "6000:80"
|
||||
networks:
|
||||
- api_default
|
||||
|
||||
volumes:
|
||||
some_volume:
|
||||
www-html:
|
||||
etc-letsencrypt:
|
||||
|
||||
networks:
|
||||
api_default:
|
||||
external: true
|
54
nginx/nginx.conf
Normal file
54
nginx/nginx.conf
Normal file
@ -0,0 +1,54 @@
|
||||
user www-data;
|
||||
worker_processes auto;
|
||||
pid /run/nginx.pid;
|
||||
include /etc/nginx/modules-enabled/*.conf;
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
types_hash_max_size 2048;
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
server_name nsfw.wizardstech.ru;
|
||||
location /.well-known/acme-challenge {
|
||||
allow all;
|
||||
root /var/www/html;
|
||||
}
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
}
|
||||
server {
|
||||
listen 443 ssl;
|
||||
server_name nsfw.wizardstech.ru;
|
||||
|
||||
root /var/www/html;
|
||||
|
||||
ssl_certificate /etc/letsencrypt/live/certfolder/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/certfolder/privkey.pem;
|
||||
|
||||
location / {
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_pass http://api:8000;
|
||||
}
|
||||
location /.well-known/acme-challenge {
|
||||
allow all;
|
||||
root /var/www/html;
|
||||
}
|
||||
location /robots.txt {
|
||||
root /var/www/html;
|
||||
}
|
||||
}
|
||||
}
|
1
nsfw_detector/__init__.py
Normal file
1
nsfw_detector/__init__.py
Normal file
@ -0,0 +1 @@
|
||||
# init
|
BIN
nsfw_detector/nsfw_model.h5
Normal file
BIN
nsfw_detector/nsfw_model.h5
Normal file
Binary file not shown.
119
nsfw_detector/predict.py
Normal file
119
nsfw_detector/predict.py
Normal file
@ -0,0 +1,119 @@
|
||||
import argparse
|
||||
import json
|
||||
from os import listdir
|
||||
from os.path import isfile, join, exists, isdir, abspath
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
from tensorflow import keras
|
||||
import tensorflow_hub as hub
|
||||
|
||||
|
||||
IMAGE_DIM = 224 # required/default image dimensionality
|
||||
|
||||
|
||||
def load_images(image_paths, image_size, verbose=True):
|
||||
'''
|
||||
Function for loading images into numpy arrays for passing to model.predict
|
||||
inputs:
|
||||
image_paths: list of image paths to load
|
||||
image_size: size into which images should be resized
|
||||
verbose: show all of the image path and sizes loaded
|
||||
|
||||
outputs:
|
||||
loaded_images: loaded images on which keras model can run predictions
|
||||
loaded_image_indexes: paths of images which the function is able to process
|
||||
|
||||
'''
|
||||
loaded_images = []
|
||||
loaded_image_paths = []
|
||||
|
||||
if isdir(image_paths):
|
||||
parent = abspath(image_paths)
|
||||
image_paths = [join(parent, f) for f in listdir(
|
||||
image_paths) if isfile(join(parent, f))]
|
||||
elif isfile(image_paths):
|
||||
image_paths = [image_paths]
|
||||
|
||||
for img_path in image_paths:
|
||||
try:
|
||||
if verbose:
|
||||
print(img_path, "size:", image_size)
|
||||
image = keras.preprocessing.image.load_img(
|
||||
img_path, target_size=image_size)
|
||||
image = keras.preprocessing.image.img_to_array(image)
|
||||
image /= 255
|
||||
loaded_images.append(image)
|
||||
loaded_image_paths.append(img_path)
|
||||
except Exception as ex:
|
||||
print("Image Load Failure: ", img_path, ex)
|
||||
|
||||
return np.asarray(loaded_images), loaded_image_paths
|
||||
|
||||
|
||||
def load_model(model_path):
|
||||
if model_path is None or not exists(model_path):
|
||||
raise ValueError(
|
||||
"saved_model_path must be the valid directory of a saved model to load.")
|
||||
|
||||
model = tf.keras.models.load_model(model_path, custom_objects={
|
||||
'KerasLayer': hub.KerasLayer})
|
||||
return model
|
||||
|
||||
|
||||
def classify(model, input_paths, image_dim=IMAGE_DIM):
|
||||
""" Classify given a model, input paths (could be single string), and image dimensionality...."""
|
||||
images, image_paths = load_images(input_paths, (image_dim, image_dim))
|
||||
probs = classify_nd(model, images)
|
||||
return dict(zip(['data'], probs))
|
||||
|
||||
|
||||
def classify_nd(model, nd_images):
|
||||
""" Classify given a model, image array (numpy)...."""
|
||||
|
||||
model_preds = model.predict(nd_images)
|
||||
# preds = np.argsort(model_preds, axis = 1).tolist()
|
||||
|
||||
categories = ['drawings', 'hentai', 'neutral', 'porn', 'sexy']
|
||||
|
||||
probs = []
|
||||
for i, single_preds in enumerate(model_preds):
|
||||
single_probs = {}
|
||||
for j, pred in enumerate(single_preds):
|
||||
single_probs[categories[j]] = round(float(pred), 6) * 100
|
||||
probs.append(single_probs)
|
||||
return probs
|
||||
|
||||
|
||||
def main(args=None):
|
||||
parser = argparse.ArgumentParser(
|
||||
description="""A script to perform NFSW classification of images""",
|
||||
epilog="""
|
||||
Launch with default model and a test image
|
||||
python nsfw_detector/predict.py --saved_model_path mobilenet_v2_140_224 --image_source test.jpg
|
||||
""", formatter_class=argparse.RawTextHelpFormatter)
|
||||
|
||||
submain = parser.add_argument_group(
|
||||
'main execution and evaluation functionality')
|
||||
submain.add_argument('--image_source', dest='image_source', type=str, required=True,
|
||||
help='A directory of images or a single image to classify')
|
||||
submain.add_argument('--saved_model_path', dest='saved_model_path', type=str, required=True,
|
||||
help='The model to load')
|
||||
submain.add_argument('--image_dim', dest='image_dim', type=int, default=IMAGE_DIM,
|
||||
help="The square dimension of the model's input shape")
|
||||
if args is not None:
|
||||
config = vars(parser.parse_args(args))
|
||||
else:
|
||||
config = vars(parser.parse_args())
|
||||
|
||||
if config['image_source'] is None or not exists(config['image_source']):
|
||||
raise ValueError(
|
||||
"image_source must be a valid directory with images or a single image to classify.")
|
||||
|
||||
model = load_model(config['saved_model_path'])
|
||||
image_preds = classify(model, config['image_source'], config['image_dim'])
|
||||
print(json.dumps(image_preds, indent=2), '\n')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
7
requirements.txt
Normal file
7
requirements.txt
Normal file
@ -0,0 +1,7 @@
|
||||
fastapi
|
||||
uvicorn
|
||||
aiohttp
|
||||
aiofiles
|
||||
tensorflow
|
||||
tensorflow-hub
|
||||
pillow
|
Loading…
Reference in New Issue
Block a user