update docker-stacks, torchviz, dockerhub-image description

This commit is contained in:
Christoph Schranz 2021-01-05 12:39:30 +01:00
parent 49d0c6f49d
commit c250aea2fe
8 changed files with 183 additions and 225 deletions

View File

@ -2,7 +2,7 @@
# **Please do not change this file directly!**
# To adapt this Dockerfile, adapt 'generate-Dockerfile.sh' or 'src/Dockerfile.usefulpackages'.
# More information can be found in the documentation.
# More information can be found in the README under configuration.
# Use NVIDIA CUDA as base image and run the same installation as in the other packages.
@ -22,7 +22,8 @@ RUN chmod 1777 /tmp && chmod 1777 /var/tmp
# Ubuntu 20.04 (focal)
# https://hub.docker.com/_/ubuntu/?tab=tags&name=focal
# OS/ARCH: linux/amd64
ARG ROOT_CONTAINER=ubuntu:focal-20200423@sha256:238e696992ba9913d24cfc3727034985abd136e08ee3067982401acdc30cbf3f
ARG ROOT_CONTAINER=ubuntu:focal-20201106@sha256:4e4bc990609ed865e07afc8427c30ffdddca5153fd4e82c20d8f0783a291e241
LABEL maintainer="Jupyter Project <jupyter@googlegroups.com>"
ARG NB_USER="jovyan"
@ -34,13 +35,33 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"]
USER root
# ---- Miniforge installer ----
# Default values can be overridden at build time
# (ARGS are in lower case to distinguish them from ENV)
# Check https://github.com/conda-forge/miniforge/releases
# Conda version
ARG conda_version="4.9.2"
# Miniforge installer patch version
ARG miniforge_patch_number="0"
# Miniforge installer architecture
ARG miniforge_arch="x86_64"
# Python implementation to use
# can be either Miniforge3 to use Python or Miniforge-pypy3 to use PyPy
ARG miniforge_python="Miniforge3"
# Miniforge archive to install
ARG miniforge_version="${conda_version}-${miniforge_patch_number}"
# Miniforge installer
ARG miniforge_installer="${miniforge_python}-${miniforge_version}-Linux-${miniforge_arch}.sh"
# Miniforge checksum
ARG miniforge_checksum="6321775eb2c02d7f51d3a9004ce0be839099f126f4099c781531428536669560"
# Install all OS dependencies for notebook server that starts but lacks all
# features (e.g., download as all possible file formats)
ENV DEBIAN_FRONTEND noninteractive
RUN apt-get update \
&& apt-get install -yq --no-install-recommends \
wget \
bzip2 \
ca-certificates \
sudo \
locales \
@ -61,16 +82,21 @@ ENV CONDA_DIR=/opt/conda \
LANG=en_US.UTF-8 \
LANGUAGE=en_US.UTF-8
ENV PATH=$CONDA_DIR/bin:$PATH \
HOME=/home/$NB_USER
HOME=/home/$NB_USER \
CONDA_VERSION="${conda_version}" \
MINIFORGE_VERSION="${miniforge_version}"
# Copy a script that we will use to correct permissions after running certain commands
COPY fix-permissions /usr/local/bin/fix-permissions
RUN chmod a+rx /usr/local/bin/fix-permissions
# Enable prompt color in the skeleton .bashrc before creating the default NB_USER
RUN sed -i 's/^#force_color_prompt=yes/force_color_prompt=yes/' /etc/skel/.bashrc
# hadolint ignore=SC2016
RUN sed -i 's/^#force_color_prompt=yes/force_color_prompt=yes/' /etc/skel/.bashrc && \
# Add call to conda init script see https://stackoverflow.com/a/58081608/4413446
echo 'eval "$(command conda shell.bash hook 2> /dev/null)"' >> /etc/skel/.bashrc
# Create NB_USER wtih name jovyan user with UID=1000 and in the 'users' group
# Create NB_USER with name jovyan user with UID=1000 and in the 'users' group
# and make sure these dirs are writable by the `users` group.
RUN echo "auth requisite pam_deny.so" >> /etc/pam.d/su && \
sed -i.bak -e 's/^%admin/#%admin/' /etc/sudoers && \
@ -83,42 +109,34 @@ RUN echo "auth requisite pam_deny.so" >> /etc/pam.d/su && \
fix-permissions $CONDA_DIR
USER $NB_UID
WORKDIR $HOME
ARG PYTHON_VERSION=default
# Setup work directory for backward-compatibility
RUN mkdir /home/$NB_USER/work && \
fix-permissions /home/$NB_USER
# Install conda as jovyan and check the md5 sum provided on the download site
ENV MINICONDA_VERSION=4.8.2 \
MINICONDA_MD5=87e77f097f6ebb5127c77662dfc3165e \
CONDA_VERSION=4.8.2
RUN mkdir "/home/$NB_USER/work" && \
fix-permissions "/home/$NB_USER"
# Install conda as jovyan and check the sha256 sum provided on the download site
WORKDIR /tmp
RUN wget --quiet https://repo.continuum.io/miniconda/Miniconda3-py37_${MINICONDA_VERSION}-Linux-x86_64.sh && \
echo "${MINICONDA_MD5} *Miniconda3-py37_${MINICONDA_VERSION}-Linux-x86_64.sh" | md5sum -c - && \
/bin/bash Miniconda3-py37_${MINICONDA_VERSION}-Linux-x86_64.sh -f -b -p $CONDA_DIR && \
rm Miniconda3-py37_${MINICONDA_VERSION}-Linux-x86_64.sh && \
# Prerequisites installation: conda, pip, tini
RUN wget --quiet "https://github.com/conda-forge/miniforge/releases/download/${miniforge_version}/${miniforge_installer}" && \
echo "${miniforge_checksum} *${miniforge_installer}" | sha256sum --check && \
/bin/bash "${miniforge_installer}" -f -b -p $CONDA_DIR && \
rm "${miniforge_installer}" && \
# Conda configuration see https://conda.io/projects/conda/en/latest/configuration.html
echo "conda ${CONDA_VERSION}" >> $CONDA_DIR/conda-meta/pinned && \
conda config --system --prepend channels conda-forge && \
conda config --system --set auto_update_conda false && \
conda config --system --set show_channel_urls true && \
conda config --system --set channel_priority strict && \
if [ ! $PYTHON_VERSION = 'default' ]; then conda install --yes python=$PYTHON_VERSION; fi && \
conda list python | grep '^python ' | tr -s ' ' | cut -d '.' -f 1,2 | sed 's/$/.*/' >> $CONDA_DIR/conda-meta/pinned && \
conda install --quiet --yes conda && \
conda install --quiet --yes pip && \
conda install --quiet --yes \
"conda=${CONDA_VERSION}" \
'pip' \
'tini=0.18.0' && \
conda update --all --quiet --yes && \
conda clean --all -f -y && \
rm -rf /home/$NB_USER/.cache/yarn && \
fix-permissions $CONDA_DIR && \
fix-permissions /home/$NB_USER
# Install Tini
RUN conda install --quiet --yes 'tini=0.18.0' && \
conda list tini | grep tini | tr -s ' ' | cut -d ' ' -f 1,2 >> $CONDA_DIR/conda-meta/pinned && \
conda clean --all -f -y && \
rm -rf /home/$NB_USER/.cache/yarn && \
fix-permissions $CONDA_DIR && \
fix-permissions /home/$NB_USER
@ -129,9 +147,9 @@ RUN conda install --quiet --yes 'tini=0.18.0' && \
# Do all this in a single RUN command to avoid duplicating all of the
# files across image layers when the permissions change
RUN conda install --quiet --yes \
'notebook=6.0.3' \
'jupyterhub=1.1.0' \
'jupyterlab=2.1.3' && \
'notebook=6.1.6' \
'jupyterhub=1.3.0' \
'jupyterlab=2.2.9' && \
conda clean --all -f -y && \
npm cache clean --force && \
jupyter notebook --generate-config && \
@ -148,10 +166,16 @@ CMD ["start-notebook.sh"]
# Copy local files as late as possible to avoid cache busting
COPY start.sh start-notebook.sh start-singleuser.sh /usr/local/bin/
# Currently need to have both jupyter_notebook_config and jupyter_server_config to support classic and lab
COPY jupyter_notebook_config.py /etc/jupyter/
# Fix permissions on /etc/jupyter as root
USER root
# Prepare upgrade to JupyterLab V3.0 #1205
RUN sed -re "s/c.NotebookApp/c.ServerApp/g" \
/etc/jupyter/jupyter_notebook_config.py > /etc/jupyter/jupyter_server_config.py
RUN fix-permissions /etc/jupyter/
# Switch back to jovyan to avoid accidental container runs as root
@ -173,29 +197,27 @@ USER root
# Install all OS dependencies for fully functional notebook server
RUN apt-get update && apt-get install -yq --no-install-recommends \
build-essential \
emacs-nox \
vim-tiny \
git \
inkscape \
jed \
libsm6 \
libxext-dev \
libxrender1 \
lmodern \
netcat \
python-dev \
# ---- nbconvert dependencies ----
texlive-xetex \
texlive-fonts-recommended \
texlive-plain-generic \
# Optional dependency
texlive-fonts-extra \
# ----
tzdata \
unzip \
nano \
nano-tiny \
&& apt-get clean && rm -rf /var/lib/apt/lists/*
# Create alternative for nano -> nano-tiny
RUN update-alternatives --install /usr/bin/nano nano /bin/nano-tiny 10
# Switch back to jovyan to avoid accidental container runs as root
USER $NB_UID
@ -210,10 +232,10 @@ LABEL maintainer="Jupyter Project <jupyter@googlegroups.com>"
USER root
# ffmpeg for matplotlib anim & dvipng for latex labels
# ffmpeg for matplotlib anim & dvipng+cm-super for latex labels
RUN apt-get update && \
apt-get install -y --no-install-recommends ffmpeg dvipng && \
rm -rf /var/lib/apt/lists/*
apt-get install -y --no-install-recommends ffmpeg dvipng cm-super && \
apt-get clean && rm -rf /var/lib/apt/lists/*
USER $NB_UID
@ -221,31 +243,29 @@ USER $NB_UID
RUN conda install --quiet --yes \
'beautifulsoup4=4.9.*' \
'conda-forge::blas=*=openblas' \
'bokeh=2.0.*' \
'bokeh=2.2.*' \
'bottleneck=1.3.*' \
'cloudpickle=1.4.*' \
'cloudpickle=1.6.*' \
'cython=0.29.*' \
'dask=2.15.*' \
'dask=2020.12.*' \
'dill=0.3.*' \
'h5py=2.10.*' \
'hdf5=1.10.*' \
'ipywidgets=7.5.*' \
'h5py=3.1.*' \
'ipywidgets=7.6.*' \
'ipympl=0.5.*'\
'matplotlib-base=3.2.*' \
# numba update to 0.49 fails resolving deps.
'numba=0.48.*' \
'matplotlib-base=3.3.*' \
'numba=0.52.*' \
'numexpr=2.7.*' \
'pandas=1.0.*' \
'pandas=1.1.*' \
'patsy=0.5.*' \
'protobuf=3.11.*' \
'protobuf=3.14.*' \
'pytables=3.6.*' \
'scikit-image=0.16.*' \
'scikit-learn=0.22.*' \
'scipy=1.4.*' \
'seaborn=0.10.*' \
'scikit-image=0.18.*' \
'scikit-learn=0.24.*' \
'scipy=1.5.*' \
'seaborn=0.11.*' \
'sqlalchemy=1.3.*' \
'statsmodels=0.11.*' \
'sympy=1.5.*' \
'statsmodels=0.12.*' \
'sympy=1.7.*' \
'vincent=0.4.*' \
'widgetsnbextension=3.5.*'\
'xlrd=1.2.*' \
@ -283,102 +303,6 @@ RUN MPLBACKEND=Agg python -c "import matplotlib.pyplot" && \
USER $NB_UID
WORKDIR $HOME
############################################################################
################ Dependency: jupyter/datascience-notebook ##################
############################################################################
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
LABEL maintainer="Jupyter Project <jupyter@googlegroups.com>"
# Set when building on Travis so that certain long-running build steps can
# be skipped to shorten build time.
ARG TEST_ONLY_BUILD
# Fix DL4006
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
USER root
# R pre-requisites
RUN apt-get update && \
apt-get install -y --no-install-recommends \
fonts-dejavu \
gfortran \
gcc && \
rm -rf /var/lib/apt/lists/*
# Julia dependencies
# install Julia packages in /opt/julia instead of $HOME
ENV JULIA_DEPOT_PATH=/opt/julia
ENV JULIA_PKGDIR=/opt/julia
ENV JULIA_VERSION=1.4.1
WORKDIR /tmp
# hadolint ignore=SC2046
RUN mkdir "/opt/julia-${JULIA_VERSION}" && \
wget -q https://julialang-s3.julialang.org/bin/linux/x64/$(echo "${JULIA_VERSION}" | cut -d. -f 1,2)"/julia-${JULIA_VERSION}-linux-x86_64.tar.gz" && \
echo "fd6d8cadaed678174c3caefb92207a3b0e8da9f926af6703fb4d1e4e4f50610a *julia-${JULIA_VERSION}-linux-x86_64.tar.gz" | sha256sum -c - && \
tar xzf "julia-${JULIA_VERSION}-linux-x86_64.tar.gz" -C "/opt/julia-${JULIA_VERSION}" --strip-components=1 && \
rm "/tmp/julia-${JULIA_VERSION}-linux-x86_64.tar.gz"
RUN ln -fs /opt/julia-*/bin/julia /usr/local/bin/julia
# Show Julia where conda libraries are \
RUN mkdir /etc/julia && \
echo "push!(Libdl.DL_LOAD_PATH, \"$CONDA_DIR/lib\")" >> /etc/julia/juliarc.jl && \
# Create JULIA_PKGDIR \
mkdir "${JULIA_PKGDIR}" && \
chown "${NB_USER}" "${JULIA_PKGDIR}" && \
fix-permissions "${JULIA_PKGDIR}"
USER $NB_UID
# R packages including IRKernel which gets installed globally.
RUN conda install --quiet --yes \
'r-base=3.6.3' \
'r-caret=6.0*' \
'r-crayon=1.3*' \
'r-devtools=2.3*' \
'r-forecast=8.12*' \
'r-hexbin=1.28*' \
'r-htmltools=0.4*' \
'r-htmlwidgets=1.5*' \
'r-irkernel=1.1*' \
'r-nycflights13=1.0*' \
'r-plyr=1.8*' \
'r-randomforest=4.6*' \
'r-rcurl=1.98*' \
'r-reshape2=1.4*' \
'r-rmarkdown=2.1*' \
'r-rsqlite=2.2*' \
'r-shiny=1.4*' \
'r-tidyverse=1.3*' \
'rpy2=3.1*' \
&& \
conda clean --all -f -y && \
fix-permissions "${CONDA_DIR}" && \
fix-permissions "/home/${NB_USER}"
# Add Julia packages. Only add HDF5 if this is not a test-only build since
# it takes roughly half the entire build time of all of the images on Travis
# to add this one package and often causes Travis to timeout.
#
# Install IJulia as jovyan and then move the kernelspec out
# to the system share location. Avoids problems with runtime UID change not
# taking effect properly on the .local folder in the jovyan home dir.
RUN julia -e 'import Pkg; Pkg.update()' && \
(test $TEST_ONLY_BUILD || julia -e 'import Pkg; Pkg.add("HDF5")') && \
julia -e "using Pkg; pkg\"add IJulia\"; pkg\"precompile\"" && \
# move kernelspec out of home \
mv "${HOME}/.local/share/jupyter/kernels/julia"* "${CONDA_DIR}/share/jupyter/kernels/" && \
chmod -R go+rx "${CONDA_DIR}/share/jupyter" && \
rm -rf "${HOME}/.local" && \
fix-permissions "${JULIA_PKGDIR}" "${CONDA_DIR}/share/jupyter"
WORKDIR $HOME
############################################################################
@ -403,6 +327,8 @@ RUN conda install --quiet --yes \
torchvision \
cudatoolkit=10.1 -c pytorch
# pip install torch_nightly -f https://download.pytorch.org/whl/nightly/cu90/torch_nightly.html && \
RUN pip install --no-cache-dir torchviz
# Clean installation
RUN conda clean --all -f -y && \

@ -1 +1 @@
Subproject commit d676cdf9b4847b5a5da2d4367aed56265174a5ef
Subproject commit 703d8b2dcb886be2fe5aa4660a48fbcef647e7aa

View File

@ -18,18 +18,18 @@
set -e
for d in "$@"; do
find "$d" \
! \( \
-group $NB_GID \
-a -perm -g+rwX \
\) \
-exec chgrp $NB_GID {} \; \
-exec chmod g+rwX {} \;
# setuid,setgid *on directories only*
find "$d" \
\( \
-type d \
-a ! -perm -6000 \
\) \
-exec chmod +6000 {} \;
find "$d" \
! \( \
-group $NB_GID \
-a -perm -g+rwX \
\) \
-exec chgrp $NB_GID {} \; \
-exec chmod g+rwX {} \;
# setuid, setgid *on directories only*
find "$d" \
\( \
-type d \
-a ! -perm -6000 \
\) \
-exec chmod +6000 {} \;
done

View File

@ -7,7 +7,7 @@ import os
import errno
import stat
c = get_config()
c = get_config() # noqa: F821
c.NotebookApp.ip = '0.0.0.0'
c.NotebookApp.port = 8888
c.NotebookApp.open_browser = False
@ -52,4 +52,4 @@ distinguished_name = req_distinguished_name
# Change default umask for all subprocesses of the notebook server if set in
# the environment
if 'NB_UMASK' in os.environ:
os.umask(int(os.environ['NB_UMASK'], 8))
os.umask(int(os.environ['NB_UMASK'], 8))

View File

@ -47,19 +47,6 @@ if [ $(id -u) == 0 ] ; then
usermod -d /home/$NB_USER -l $NB_USER jovyan
fi
# Handle case where provisioned storage does not have the correct permissions by default
# Ex: default NFS/EFS (no auto-uid/gid)
if [[ "$CHOWN_HOME" == "1" || "$CHOWN_HOME" == 'yes' ]]; then
echo "Changing ownership of /home/$NB_USER to $NB_UID:$NB_GID with options '${CHOWN_HOME_OPTS}'"
chown $CHOWN_HOME_OPTS $NB_UID:$NB_GID /home/$NB_USER
fi
if [ ! -z "$CHOWN_EXTRA" ]; then
for extra_dir in $(echo $CHOWN_EXTRA | tr ',' ' '); do
echo "Changing ownership of ${extra_dir} to $NB_UID:$NB_GID with options '${CHOWN_EXTRA_OPTS}'"
chown $CHOWN_EXTRA_OPTS $NB_UID:$NB_GID $extra_dir
done
fi
# handle home and working directory if the username changed
if [[ "$NB_USER" != "jovyan" ]]; then
# changing username, make sure homedir exists
@ -76,11 +63,24 @@ if [ $(id -u) == 0 ] ; then
fi
fi
# Handle case where provisioned storage does not have the correct permissions by default
# Ex: default NFS/EFS (no auto-uid/gid)
if [[ "$CHOWN_HOME" == "1" || "$CHOWN_HOME" == 'yes' ]]; then
echo "Changing ownership of /home/$NB_USER to $NB_UID:$NB_GID with options '${CHOWN_HOME_OPTS}'"
chown $CHOWN_HOME_OPTS $NB_UID:$NB_GID /home/$NB_USER
fi
if [ ! -z "$CHOWN_EXTRA" ]; then
for extra_dir in $(echo $CHOWN_EXTRA | tr ',' ' '); do
echo "Changing ownership of ${extra_dir} to $NB_UID:$NB_GID with options '${CHOWN_EXTRA_OPTS}'"
chown $CHOWN_EXTRA_OPTS $NB_UID:$NB_GID $extra_dir
done
fi
# Change UID:GID of NB_USER to NB_UID:NB_GID if it does not match
if [ "$NB_UID" != $(id -u $NB_USER) ] || [ "$NB_GID" != $(id -g $NB_USER) ]; then
echo "Set user $NB_USER UID:GID to: $NB_UID:$NB_GID"
if [ "$NB_GID" != $(id -g $NB_USER) ]; then
groupadd -g $NB_GID -o ${NB_GROUP:-${NB_USER}}
groupadd -f -g $NB_GID -o ${NB_GROUP:-${NB_USER}}
fi
userdel $NB_USER
useradd --home /home/$NB_USER -u $NB_UID -g $NB_GID -G 100 -l $NB_USER
@ -101,7 +101,7 @@ if [ $(id -u) == 0 ] ; then
echo "Executing the command: ${cmd[@]}"
exec sudo -E -H -u $NB_USER PATH=$PATH XDG_CACHE_HOME=/home/$NB_USER/.cache PYTHONPATH=${PYTHONPATH:-} "${cmd[@]}"
else
if [[ "$NB_UID" == "$(id -u jovyan)" && "$NB_GID" == "$(id -g jovyan)" ]]; then
if [[ "$NB_UID" == "$(id -u jovyan 2>/dev/null)" && "$NB_GID" == "$(id -g jovyan 2>/dev/null)" ]]; then
# User is not attempting to override user/group via environment
# variables, but they could still have overridden the uid/gid that
# container runs as. Check that the user has an entry in the passwd

106
README.md
View File

@ -21,62 +21,81 @@ The image of this repository is available on [Dockerhub](https://hub.docker.com/
## Requirements
1. A NVIDIA GPU
1. A computer with a NVIDIA GPU
2. Install [Docker](https://www.docker.com/community-edition#/download) version **1.10.0+**
and [Docker Compose](https://docs.docker.com/compose/install/) version **1.6.0+**.
3. Get access to your GPU via CUDA drivers within Docker containers. Therfore, check out this
[medium article](https://medium.com/@christoph.schranz/set-up-your-own-gpu-based-jupyterlab-e0d45fcacf43).
The CUDA toolkit is not required on the host system, as it will be deployed
in [NVIDIA-docker](https://github.com/NVIDIA/nvidia-docker).
3. Get access to your GPU via CUDA drivers within Docker containers.
You can be sure that you can access your GPU within Docker,
if the command `docker run --gpus all nvidia/cuda:10.1-base-ubuntu18.04 nvidia-smi`
if the command `docker run --gpus all nvidia/cuda:10.1-cudnn7-runtime-ubuntu18.04 nvidia-smi`
returns a result similar to this one:
```bash
Mon Jun 22 09:06:28 2020
Tue Jan 5 09:38:21 2021
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 440.82 Driver Version: 440.82 CUDA Version: 10.1 |
| NVIDIA-SMI 450.80.02 Driver Version: 450.80.02 CUDA Version: 10.1 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|===============================+======================+======================|
| 0 GeForce RTX 207... Off | 00000000:01:00.0 On | N/A |
| 0% 46C P8 9W / 215W | 424MiB / 7974MiB | 6% Default |
| 0% 40C P8 7W / 215W | 360MiB / 7974MiB | 1% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: GPU Memory |
| GPU PID Type Process name Usage |
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=============================================================================|
+-----------------------------------------------------------------------------+
```
4. Clone the Repository or pull the image from
[Dockerhub](https://hub.docker.com/repository/docker/cschranz/gpu-jupyter):
```bash
git clone https://github.com/iot-salzburg/gpu-jupyter.git
cd gpu-jupyter
```
```
If you don't get an output similar than this one, follow the installation steps in this
[medium article](https://medium.com/@christoph.schranz/set-up-your-own-gpu-based-jupyterlab-e0d45fcacf43).
The CUDA toolkit is not required on the host system, as it will be
installed within the Docker containers using [NVIDIA-docker](https://github.com/NVIDIA/nvidia-docker).
It is also important to keep your installed CUDA version in mind, when you pull images.
**You can't run images based on `nvidia/cuda:11.1` if you have only CUDA version 10.1 installed.**
Check your host's CUDA-version with `nvcc --version` and update to at least
the same version you want to pull.
4. Pull and run the image. This can last some hours, as a whole data-science
environment will be downloaded:
```bash
cd your-working-directory
docker run --gpus all -d -it -p 8848:8888 -v data:/home/jovyan/work -e GRANT_SUDO=yes -e JUPYTER_ENABLE_LAB=yes --user root cschranz/gpu-jupyter:v1.1_cuda-10.1_ubuntu-18.04_python-only
```
This starts a new instance of the GPU-Jupyter service on at [http://localhost:8848](http://localhost:8848) (port `8484`).
The default password is `asdf` which should be changed as described [below](#set-password).
Furthermore, data within the host's `data` directory is shared with the container.
Within the Jupyterlab instance, you can check if you can access your GPU by opening a new terminal window and running
`nvidia-smi`. In terminal windows, you can also install new packages for your own projects.
Some example code can be found in the repository under `extra/Getting_Started`.
If you want to learn more about Jupyterlab, check out this [tutorial](https://www.youtube.com/watch?v=7wfPqAyYADY).
## Quickstart
First of all, it is necessary to generate the `Dockerfile` that is based on
## Build a modified version
First, it is necessary to generate the `Dockerfile` in `.build`, that is based on
the NIVIDA base image and the [docker-stacks](https://github.com/jupyter/docker-stacks).
As soon as you have access to your GPU within Docker containers
(make sure the command `docker run --gpus all nvidia/cuda:10.1-base-ubuntu18.04 nvidia-smi`
(make sure the command `docker run --gpus all nvidia/cuda:10.1-cudnn7-runtime-ubuntu18.04 nvidia-smi`
shows your GPU statistics), you can generate the Dockerfile, build and run it.
The following commands will start *GPU-Jupyter* on [localhost:8848](http://localhost:8848)
with the default password `asdf`.
```bash
git clone https://github.com/iot-salzburg/gpu-jupyter.git
cd gpu-jupyter
# generate a Dockerfile with python and without Julia and R
./generate-Dockerfile.sh --no-datascience-notebook
docker build -t gpu-jupyter .build/ # will take a while
docker run -d -p [port]:8888 gpu-jupyter # starts gpu-jupyter WITHOUT GPU support
docker run --gpus all -d -it -p 8848:8888 -v $pwd/data:/home/jovyan/work -e GRANT_SUDO=yes -e JUPYTER_ENABLE_LAB=yes --user root --restart always --name gpu-jupyter_1 gpu-jupyter
```
To run the container WITH GPU support, a local data volume and some other configurations, run:
```bash
docker run --gpus all -d -it -p 8848:8888 -v $(pwd)/data:/home/jovyan/work -e GRANT_SUDO=yes -e JUPYTER_ENABLE_LAB=yes --user root --restart always --name gpu-jupyter_1 gpu-jupyter
```
This starts a container WITH GPU support, a shared local data volume `data`
and some other configurations like root permissions which are necessary to install packages within the container.
For more configurations, scroll down to [Configuration of the Dockerfile-Generation](#configuration-of-the-dockerfile-generation).
### Start via Docker Compose
@ -92,14 +111,16 @@ underlying `docker-compose.yml`:
With these commands we can see if everything worked well:
```bash
bash show-local.sh # a env-var safe wrapper for 'docker-compose logs -f'
docker ps
docker logs [service-name]
docker logs [service-name] # or
bash show-local.sh # a env-var safe wrapper for 'docker-compose logs -f'
```
In order to stop the local deployment, run:
```bash
docker ps
docker rm -f [service-name] # or
./stop-local.sh
```
@ -134,13 +155,16 @@ the essential `gpulibs` are installed, but not the packages within `src/Dockerfi
### Custom Installations
**As `.build/Dockerfile` is overwritten, it is suggested to append custom installations either
Custom packages can be installed within a container, or by modifying the file
`src/Dockerfile.usefulpackages`.
**As `.build/Dockerfile` is overwritten each time a Dockerfile is generated,
it is suggested to append custom installations either
within `src/Dockerfile.usefulpackages` or in `generate-Dockerfile.sh`.**
If you think some package is missing in the default stack, please let us know!
If an essential package is missing in the default stack, please let us know!
### Set the Password
### Set Password
Please set a new password using `src/jupyter_notebook_config.json`.
Therefore, hash your password in the form (password)(salt) using a sha1 hash generator, e.g., the sha1 generator of [sha1-online.com](http://www.sha1-online.com/).
@ -162,24 +186,30 @@ Then update the config file as shown below and restart the service.
#### Update CUDA to another version
Please check version compatibilities for [CUDA and Pytorch](https://pytorch.org/get-started/locally/)
respectively [CUDA and Tensorflow](https://www.tensorflow.org/install/gpu) previously.
To update CUDA to another version, change in `Dockerfile.header`
The host's CUDA-version must be equal or higher than that of the
container itself (in `Dockerfile.header`).
Check the host's version with `nvcc --version` and the version compatibilities
for CUDA-dependent packages as [Pytorch](https://pytorch.org/get-started/locally/)
respectively [Tensorflow](https://www.tensorflow.org/install/gpu) previously.
Then modify, if supported, the CUDA-version in `Dockerfile.header` to, e.g.:
the line:
FROM nvidia/cuda:10.1-base-ubuntu18.04
FROM nvidia/cuda:11.1-base-ubuntu20.04
and in the `Dockerfile.pytorch` the line:
cudatoolkit=10.1
cudatoolkit=11.1
Then re-generate and re-run the image, as closer described above:
Then re-generate, re-build and run the updated image, as closer described above:
Note that a change in the first line of the Dockerfile will re-build the whole image.
```bash
./generate-Dockerfile.sh
./start-local.sh -p 8848
docker build -t gpu-jupyter .build/ # will take a while
docker run --gpus all -d -it -p 8848:8888 -v $pwd/data:/home/jovyan/work -e GRANT_SUDO=yes -e JUPYTER_ENABLE_LAB=yes --user root --restart always --name gpu-jupyter_1 gpu-jupyter
```
#### Update Docker-Stack
The [docker-stacks](https://github.com/jupyter/docker-stacks) are used as a

View File

@ -5,7 +5,7 @@ cd $(cd -P -- "$(dirname -- "$0")" && pwd -P)
export DOCKERFILE=".build/Dockerfile"
export STACKS_DIR=".build/docker-stacks"
# please test the build of the commit in https://github.com/jupyter/docker-stacks/commits/master in advance
export HEAD_COMMIT="04f7f60d34a674a2964d96a6cb97c57a7870a828"
export HEAD_COMMIT="703d8b2dcb886be2fe5aa4660a48fbcef647e7aa"
while [[ "$#" -gt 0 ]]; do case $1 in
-c|--commit) HEAD_COMMIT="$2"; shift;;

View File

@ -16,6 +16,8 @@ RUN conda install --quiet --yes \
torchvision \
cudatoolkit=10.1 -c pytorch
# pip install torch_nightly -f https://download.pytorch.org/whl/nightly/cu90/torch_nightly.html && \
RUN pip install --no-cache-dir torchviz
# Clean installation
RUN conda clean --all -f -y && \