Add at new repo again

This commit is contained in:
2025-01-28 21:48:35 +00:00
commit 6e660ddb3c
564 changed files with 75575 additions and 0 deletions

View File

@@ -0,0 +1,17 @@
## To build a cu101 wheel for release:
```
$ nvidia-docker run -it --storage-opt "size=20GB" --name pt pytorch/manylinux-cuda101
# inside the container:
# git clone https://github.com/facebookresearch/detectron2/
# cd detectron2
# export CU_VERSION=cu101 D2_VERSION_SUFFIX= PYTHON_VERSION=3.7 PYTORCH_VERSION=1.4
# ./dev/packaging/build_wheel.sh
```
## To build all wheels for `CUDA {9.2,10.0,10.1}` x `Python {3.6,3.7,3.8}`:
```
./dev/packaging/build_all_wheels.sh
./dev/packaging/gen_wheel_index.sh /path/to/wheels
```

View File

@@ -0,0 +1,57 @@
#!/bin/bash -e
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
PYTORCH_VERSION=1.5
build_for_one_cuda() {
cu=$1
case "$cu" in
cu*)
container_name=manylinux-cuda${cu/cu/}
;;
cpu)
container_name=manylinux-cuda101
;;
*)
echo "Unrecognized cu=$cu"
exit 1
;;
esac
echo "Launching container $container_name ..."
for py in 3.6 3.7 3.8; do
docker run -itd \
--name $container_name \
--mount type=bind,source="$(pwd)",target=/detectron2 \
pytorch/$container_name
cat <<EOF | docker exec -i $container_name sh
export CU_VERSION=$cu D2_VERSION_SUFFIX=+$cu PYTHON_VERSION=$py
export PYTORCH_VERSION=$PYTORCH_VERSION
cd /detectron2 && ./dev/packaging/build_wheel.sh
EOF
# if [[ "$cu" == "cu101" ]]; then
# # build wheel without local version
# cat <<EOF | docker exec -i $container_name sh
# export CU_VERSION=$cu D2_VERSION_SUFFIX= PYTHON_VERSION=$py
# export PYTORCH_VERSION=$PYTORCH_VERSION
# cd /detectron2 && ./dev/packaging/build_wheel.sh
# EOF
# fi
docker exec -i $container_name rm -rf /detectron2/build/$cu
docker container stop $container_name
docker container rm $container_name
done
}
if [[ -n "$1" ]]; then
build_for_one_cuda "$1"
else
for cu in cu102 cu101 cu92 cpu; do
build_for_one_cuda "$cu"
done
fi

View File

@@ -0,0 +1,32 @@
#!/bin/bash
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
set -ex
ldconfig # https://github.com/NVIDIA/nvidia-docker/issues/854
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
. "$script_dir/pkg_helpers.bash"
echo "Build Settings:"
echo "CU_VERSION: $CU_VERSION" # e.g. cu101
echo "D2_VERSION_SUFFIX: $D2_VERSION_SUFFIX" # e.g. +cu101 or ""
echo "PYTHON_VERSION: $PYTHON_VERSION" # e.g. 3.6
echo "PYTORCH_VERSION: $PYTORCH_VERSION" # e.g. 1.4
setup_cuda
setup_wheel_python
yum install ninja-build -y && ln -sv /usr/bin/ninja-build /usr/bin/ninja
export TORCH_VERSION_SUFFIX="+$CU_VERSION"
if [[ "$CU_VERSION" == "cu102" ]]; then
export TORCH_VERSION_SUFFIX=""
fi
pip_install pip numpy -U
pip_install "torch==$PYTORCH_VERSION$TORCH_VERSION_SUFFIX" \
-f https://download.pytorch.org/whl/$CU_VERSION/torch_stable.html
# use separate directories to allow parallel build
BASE_BUILD_DIR=build/$CU_VERSION/$PYTHON_VERSION
python setup.py \
build -b $BASE_BUILD_DIR \
bdist_wheel -b $BASE_BUILD_DIR/build_dist -d wheels/$CU_VERSION

View File

@@ -0,0 +1,27 @@
#!/bin/bash -e
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
root=$1
if [[ -z "$root" ]]; then
echo "Usage: ./gen_wheel_index.sh /path/to/wheels"
exit
fi
index=$root/index.html
cd "$root"
for cu in cpu cu92 cu100 cu101 cu102; do
cd $cu
echo "Creating $PWD/index.html ..."
for whl in *.whl; do
echo "<a href=\"${whl/+/%2B}\">$whl</a><br>"
done > index.html
cd "$root"
done
echo "Creating $index ..."
for whl in $(find . -type f -name '*.whl' -printf '%P\n' | sort); do
echo "<a href=\"${whl/+/%2B}\">$whl</a><br>"
done > "$index"

View File

@@ -0,0 +1,57 @@
#!/bin/bash -e
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Function to retry functions that sometimes timeout or have flaky failures
retry () {
$* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*)
}
# Install with pip a bit more robustly than the default
pip_install() {
retry pip install --progress-bar off "$@"
}
setup_cuda() {
# Now work out the CUDA settings
# Like other torch domain libraries, we choose common GPU architectures only.
export FORCE_CUDA=1
case "$CU_VERSION" in
cu102)
export CUDA_HOME=/usr/local/cuda-10.2/
export TORCH_CUDA_ARCH_LIST="3.5;3.7;5.0;5.2;6.0+PTX;6.1+PTX;7.0+PTX;7.5+PTX"
;;
cu101)
export CUDA_HOME=/usr/local/cuda-10.1/
export TORCH_CUDA_ARCH_LIST="3.5;3.7;5.0;5.2;6.0+PTX;6.1+PTX;7.0+PTX;7.5+PTX"
;;
cu100)
export CUDA_HOME=/usr/local/cuda-10.0/
export TORCH_CUDA_ARCH_LIST="3.5;3.7;5.0;5.2;6.0+PTX;6.1+PTX;7.0+PTX;7.5+PTX"
;;
cu92)
export CUDA_HOME=/usr/local/cuda-9.2/
export TORCH_CUDA_ARCH_LIST="3.5;3.7;5.0;5.2;6.0+PTX;6.1+PTX;7.0+PTX"
;;
cpu)
unset FORCE_CUDA
export CUDA_VISIBLE_DEVICES=
;;
*)
echo "Unrecognized CU_VERSION=$CU_VERSION"
exit 1
;;
esac
}
setup_wheel_python() {
case "$PYTHON_VERSION" in
3.6) python_abi=cp36-cp36m ;;
3.7) python_abi=cp37-cp37m ;;
3.8) python_abi=cp38-cp38 ;;
*)
echo "Unrecognized PYTHON_VERSION=$PYTHON_VERSION"
exit 1
;;
esac
export PATH="/opt/python/$python_abi/bin:$PATH"
}