Add at new repo again
This commit is contained in:
@@ -0,0 +1,7 @@
|
||||
|
||||
## Some scripts for developers to use, include:
|
||||
|
||||
- `linter.sh`: lint the codebase before commit
|
||||
- `run_{inference,instant}_tests.sh`: run inference/training for a few iterations.
|
||||
Note that these tests require 2 GPUs.
|
||||
- `parse_results.sh`: parse results from a log file.
|
@@ -0,0 +1,46 @@
|
||||
#!/bin/bash -e
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
||||
|
||||
# Run this script at project root by "./dev/linter.sh" before you commit
|
||||
|
||||
vergte() {
|
||||
[ "$2" = "$(echo -e "$1\\n$2" | sort -V | head -n1)" ]
|
||||
}
|
||||
|
||||
{
|
||||
black --version | grep -E "(19.3b0.*6733274)|(19.3b0\\+8)" > /dev/null
|
||||
} || {
|
||||
echo "Linter requires 'black @ git+https://github.com/psf/black@673327449f86fce558adde153bb6cbe54bfebad2' !"
|
||||
exit 1
|
||||
}
|
||||
|
||||
ISORT_TARGET_VERSION="4.3.21"
|
||||
ISORT_VERSION=$(isort -v | grep VERSION | awk '{print $2}')
|
||||
vergte "$ISORT_VERSION" "$ISORT_TARGET_VERSION" || {
|
||||
echo "Linter requires isort>=${ISORT_TARGET_VERSION} !"
|
||||
exit 1
|
||||
}
|
||||
|
||||
set -v
|
||||
|
||||
echo "Running isort ..."
|
||||
isort -y -sp . --atomic
|
||||
|
||||
echo "Running black ..."
|
||||
black -l 100 .
|
||||
|
||||
echo "Running flake8 ..."
|
||||
if [ -x "$(command -v flake8-3)" ]; then
|
||||
flake8-3 .
|
||||
else
|
||||
python3 -m flake8 .
|
||||
fi
|
||||
|
||||
# echo "Running mypy ..."
|
||||
# Pytorch does not have enough type annotations
|
||||
# mypy detectron2/solver detectron2/structures detectron2/config
|
||||
|
||||
echo "Running clang-format ..."
|
||||
find . -regex ".*\.\(cpp\|c\|cc\|cu\|cxx\|h\|hh\|hpp\|hxx\|tcc\|mm\|m\)" -print0 | xargs -0 clang-format -i
|
||||
|
||||
command -v arc > /dev/null && arc lint
|
@@ -0,0 +1,17 @@
|
||||
|
||||
## To build a cu101 wheel for release:
|
||||
|
||||
```
|
||||
$ nvidia-docker run -it --storage-opt "size=20GB" --name pt pytorch/manylinux-cuda101
|
||||
# inside the container:
|
||||
# git clone https://github.com/facebookresearch/detectron2/
|
||||
# cd detectron2
|
||||
# export CU_VERSION=cu101 D2_VERSION_SUFFIX= PYTHON_VERSION=3.7 PYTORCH_VERSION=1.4
|
||||
# ./dev/packaging/build_wheel.sh
|
||||
```
|
||||
|
||||
## To build all wheels for `CUDA {9.2,10.0,10.1}` x `Python {3.6,3.7,3.8}`:
|
||||
```
|
||||
./dev/packaging/build_all_wheels.sh
|
||||
./dev/packaging/gen_wheel_index.sh /path/to/wheels
|
||||
```
|
@@ -0,0 +1,57 @@
|
||||
#!/bin/bash -e
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
||||
|
||||
PYTORCH_VERSION=1.5
|
||||
|
||||
build_for_one_cuda() {
|
||||
cu=$1
|
||||
|
||||
case "$cu" in
|
||||
cu*)
|
||||
container_name=manylinux-cuda${cu/cu/}
|
||||
;;
|
||||
cpu)
|
||||
container_name=manylinux-cuda101
|
||||
;;
|
||||
*)
|
||||
echo "Unrecognized cu=$cu"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "Launching container $container_name ..."
|
||||
|
||||
for py in 3.6 3.7 3.8; do
|
||||
docker run -itd \
|
||||
--name $container_name \
|
||||
--mount type=bind,source="$(pwd)",target=/detectron2 \
|
||||
pytorch/$container_name
|
||||
|
||||
cat <<EOF | docker exec -i $container_name sh
|
||||
export CU_VERSION=$cu D2_VERSION_SUFFIX=+$cu PYTHON_VERSION=$py
|
||||
export PYTORCH_VERSION=$PYTORCH_VERSION
|
||||
cd /detectron2 && ./dev/packaging/build_wheel.sh
|
||||
EOF
|
||||
|
||||
# if [[ "$cu" == "cu101" ]]; then
|
||||
# # build wheel without local version
|
||||
# cat <<EOF | docker exec -i $container_name sh
|
||||
# export CU_VERSION=$cu D2_VERSION_SUFFIX= PYTHON_VERSION=$py
|
||||
# export PYTORCH_VERSION=$PYTORCH_VERSION
|
||||
# cd /detectron2 && ./dev/packaging/build_wheel.sh
|
||||
# EOF
|
||||
# fi
|
||||
|
||||
docker exec -i $container_name rm -rf /detectron2/build/$cu
|
||||
docker container stop $container_name
|
||||
docker container rm $container_name
|
||||
done
|
||||
}
|
||||
|
||||
if [[ -n "$1" ]]; then
|
||||
build_for_one_cuda "$1"
|
||||
else
|
||||
for cu in cu102 cu101 cu92 cpu; do
|
||||
build_for_one_cuda "$cu"
|
||||
done
|
||||
fi
|
@@ -0,0 +1,32 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
||||
set -ex
|
||||
|
||||
ldconfig # https://github.com/NVIDIA/nvidia-docker/issues/854
|
||||
|
||||
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
. "$script_dir/pkg_helpers.bash"
|
||||
|
||||
echo "Build Settings:"
|
||||
echo "CU_VERSION: $CU_VERSION" # e.g. cu101
|
||||
echo "D2_VERSION_SUFFIX: $D2_VERSION_SUFFIX" # e.g. +cu101 or ""
|
||||
echo "PYTHON_VERSION: $PYTHON_VERSION" # e.g. 3.6
|
||||
echo "PYTORCH_VERSION: $PYTORCH_VERSION" # e.g. 1.4
|
||||
|
||||
setup_cuda
|
||||
setup_wheel_python
|
||||
yum install ninja-build -y && ln -sv /usr/bin/ninja-build /usr/bin/ninja
|
||||
|
||||
export TORCH_VERSION_SUFFIX="+$CU_VERSION"
|
||||
if [[ "$CU_VERSION" == "cu102" ]]; then
|
||||
export TORCH_VERSION_SUFFIX=""
|
||||
fi
|
||||
pip_install pip numpy -U
|
||||
pip_install "torch==$PYTORCH_VERSION$TORCH_VERSION_SUFFIX" \
|
||||
-f https://download.pytorch.org/whl/$CU_VERSION/torch_stable.html
|
||||
|
||||
# use separate directories to allow parallel build
|
||||
BASE_BUILD_DIR=build/$CU_VERSION/$PYTHON_VERSION
|
||||
python setup.py \
|
||||
build -b $BASE_BUILD_DIR \
|
||||
bdist_wheel -b $BASE_BUILD_DIR/build_dist -d wheels/$CU_VERSION
|
@@ -0,0 +1,27 @@
|
||||
#!/bin/bash -e
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
||||
|
||||
|
||||
root=$1
|
||||
if [[ -z "$root" ]]; then
|
||||
echo "Usage: ./gen_wheel_index.sh /path/to/wheels"
|
||||
exit
|
||||
fi
|
||||
|
||||
index=$root/index.html
|
||||
|
||||
cd "$root"
|
||||
for cu in cpu cu92 cu100 cu101 cu102; do
|
||||
cd $cu
|
||||
echo "Creating $PWD/index.html ..."
|
||||
for whl in *.whl; do
|
||||
echo "<a href=\"${whl/+/%2B}\">$whl</a><br>"
|
||||
done > index.html
|
||||
cd "$root"
|
||||
done
|
||||
|
||||
echo "Creating $index ..."
|
||||
for whl in $(find . -type f -name '*.whl' -printf '%P\n' | sort); do
|
||||
echo "<a href=\"${whl/+/%2B}\">$whl</a><br>"
|
||||
done > "$index"
|
||||
|
@@ -0,0 +1,57 @@
|
||||
#!/bin/bash -e
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
||||
|
||||
# Function to retry functions that sometimes timeout or have flaky failures
|
||||
retry () {
|
||||
$* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
|
||||
}
|
||||
# Install with pip a bit more robustly than the default
|
||||
pip_install() {
|
||||
retry pip install --progress-bar off "$@"
|
||||
}
|
||||
|
||||
|
||||
setup_cuda() {
|
||||
# Now work out the CUDA settings
|
||||
# Like other torch domain libraries, we choose common GPU architectures only.
|
||||
export FORCE_CUDA=1
|
||||
case "$CU_VERSION" in
|
||||
cu102)
|
||||
export CUDA_HOME=/usr/local/cuda-10.2/
|
||||
export TORCH_CUDA_ARCH_LIST="3.5;3.7;5.0;5.2;6.0+PTX;6.1+PTX;7.0+PTX;7.5+PTX"
|
||||
;;
|
||||
cu101)
|
||||
export CUDA_HOME=/usr/local/cuda-10.1/
|
||||
export TORCH_CUDA_ARCH_LIST="3.5;3.7;5.0;5.2;6.0+PTX;6.1+PTX;7.0+PTX;7.5+PTX"
|
||||
;;
|
||||
cu100)
|
||||
export CUDA_HOME=/usr/local/cuda-10.0/
|
||||
export TORCH_CUDA_ARCH_LIST="3.5;3.7;5.0;5.2;6.0+PTX;6.1+PTX;7.0+PTX;7.5+PTX"
|
||||
;;
|
||||
cu92)
|
||||
export CUDA_HOME=/usr/local/cuda-9.2/
|
||||
export TORCH_CUDA_ARCH_LIST="3.5;3.7;5.0;5.2;6.0+PTX;6.1+PTX;7.0+PTX"
|
||||
;;
|
||||
cpu)
|
||||
unset FORCE_CUDA
|
||||
export CUDA_VISIBLE_DEVICES=
|
||||
;;
|
||||
*)
|
||||
echo "Unrecognized CU_VERSION=$CU_VERSION"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
setup_wheel_python() {
|
||||
case "$PYTHON_VERSION" in
|
||||
3.6) python_abi=cp36-cp36m ;;
|
||||
3.7) python_abi=cp37-cp37m ;;
|
||||
3.8) python_abi=cp38-cp38 ;;
|
||||
*)
|
||||
echo "Unrecognized PYTHON_VERSION=$PYTHON_VERSION"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
export PATH="/opt/python/$python_abi/bin:$PATH"
|
||||
}
|
@@ -0,0 +1,45 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
||||
|
||||
# A shell script that parses metrics from the log file.
|
||||
# Make it easier for developers to track performance of models.
|
||||
|
||||
LOG="$1"
|
||||
|
||||
if [[ -z "$LOG" ]]; then
|
||||
echo "Usage: $0 /path/to/log/file"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# [12/15 11:47:32] trainer INFO: Total training time: 12:15:04.446477 (0.4900 s / it)
|
||||
# [12/15 11:49:03] inference INFO: Total inference time: 0:01:25.326167 (0.13652186737060548 s / demo per device, on 8 devices)
|
||||
# [12/15 11:49:03] inference INFO: Total inference pure compute time: .....
|
||||
|
||||
# training time
|
||||
trainspeed=$(grep -o 'Overall training.*' "$LOG" | grep -Eo '\(.*\)' | grep -o '[0-9\.]*')
|
||||
echo "Training speed: $trainspeed s/it"
|
||||
|
||||
# inference time: there could be multiple inference during training
|
||||
inferencespeed=$(grep -o 'Total inference pure.*' "$LOG" | tail -n1 | grep -Eo '\(.*\)' | grep -o '[0-9\.]*' | head -n1)
|
||||
echo "Inference speed: $inferencespeed s/it"
|
||||
|
||||
# [12/15 11:47:18] trainer INFO: eta: 0:00:00 iter: 90000 loss: 0.5407 (0.7256) loss_classifier: 0.1744 (0.2446) loss_box_reg: 0.0838 (0.1160) loss_mask: 0.2159 (0.2722) loss_objectness: 0.0244 (0.0429) loss_rpn_box_reg: 0.0279 (0.0500) time: 0.4487 (0.4899) data: 0.0076 (0.0975) lr: 0.000200 max mem: 4161
|
||||
memory=$(grep -o 'max[_ ]mem: [0-9]*' "$LOG" | tail -n1 | grep -o '[0-9]*')
|
||||
echo "Training memory: $memory MB"
|
||||
|
||||
echo "Easy to copypaste:"
|
||||
echo "$trainspeed","$inferencespeed","$memory"
|
||||
|
||||
echo "------------------------------"
|
||||
|
||||
# [12/26 17:26:32] engine.coco_evaluation: copypaste: Task: bbox
|
||||
# [12/26 17:26:32] engine.coco_evaluation: copypaste: AP,AP50,AP75,APs,APm,APl
|
||||
# [12/26 17:26:32] engine.coco_evaluation: copypaste: 0.0017,0.0024,0.0017,0.0005,0.0019,0.0011
|
||||
# [12/26 17:26:32] engine.coco_evaluation: copypaste: Task: segm
|
||||
# [12/26 17:26:32] engine.coco_evaluation: copypaste: AP,AP50,AP75,APs,APm,APl
|
||||
# [12/26 17:26:32] engine.coco_evaluation: copypaste: 0.0014,0.0021,0.0016,0.0005,0.0016,0.0011
|
||||
|
||||
echo "COCO Results:"
|
||||
num_tasks=$(grep -o 'copypaste:.*Task.*' "$LOG" | sort -u | wc -l)
|
||||
# each task has 3 lines
|
||||
grep -o 'copypaste:.*' "$LOG" | cut -d ' ' -f 2- | tail -n $((num_tasks * 3))
|
@@ -0,0 +1,44 @@
|
||||
#!/bin/bash -e
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
||||
|
||||
BIN="python tools/train_net.py"
|
||||
OUTPUT="inference_test_output"
|
||||
NUM_GPUS=2
|
||||
|
||||
CFG_LIST=( "${@:1}" )
|
||||
|
||||
if [ ${#CFG_LIST[@]} -eq 0 ]; then
|
||||
CFG_LIST=( ./configs/quick_schedules/*inference_acc_test.yaml )
|
||||
fi
|
||||
|
||||
echo "========================================================================"
|
||||
echo "Configs to run:"
|
||||
echo "${CFG_LIST[@]}"
|
||||
echo "========================================================================"
|
||||
|
||||
|
||||
for cfg in "${CFG_LIST[@]}"; do
|
||||
echo "========================================================================"
|
||||
echo "Running $cfg ..."
|
||||
echo "========================================================================"
|
||||
$BIN \
|
||||
--eval-only \
|
||||
--num-gpus $NUM_GPUS \
|
||||
--config-file "$cfg" \
|
||||
OUTPUT_DIR $OUTPUT
|
||||
rm -rf $OUTPUT
|
||||
done
|
||||
|
||||
|
||||
echo "========================================================================"
|
||||
echo "Running demo.py ..."
|
||||
echo "========================================================================"
|
||||
DEMO_BIN="python demo/demo.py"
|
||||
COCO_DIR=datasets/coco/val2014
|
||||
mkdir -pv $OUTPUT
|
||||
|
||||
set -v
|
||||
|
||||
$DEMO_BIN --config-file ./configs/quick_schedules/panoptic_fpn_R_50_inference_acc_test.yaml \
|
||||
--input $COCO_DIR/COCO_val2014_0000001933* --output $OUTPUT
|
||||
rm -rf $OUTPUT
|
@@ -0,0 +1,27 @@
|
||||
#!/bin/bash -e
|
||||
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
||||
|
||||
BIN="python tools/train_net.py"
|
||||
OUTPUT="instant_test_output"
|
||||
NUM_GPUS=2
|
||||
|
||||
CFG_LIST=( "${@:1}" )
|
||||
if [ ${#CFG_LIST[@]} -eq 0 ]; then
|
||||
CFG_LIST=( ./configs/quick_schedules/*instant_test.yaml )
|
||||
fi
|
||||
|
||||
echo "========================================================================"
|
||||
echo "Configs to run:"
|
||||
echo "${CFG_LIST[@]}"
|
||||
echo "========================================================================"
|
||||
|
||||
for cfg in "${CFG_LIST[@]}"; do
|
||||
echo "========================================================================"
|
||||
echo "Running $cfg ..."
|
||||
echo "========================================================================"
|
||||
$BIN --num-gpus $NUM_GPUS --config-file "$cfg" \
|
||||
SOLVER.IMS_PER_BATCH $(($NUM_GPUS * 2)) \
|
||||
OUTPUT_DIR "$OUTPUT"
|
||||
rm -rf "$OUTPUT"
|
||||
done
|
||||
|
Reference in New Issue
Block a user