feature: set sparse Dockerfile with Python interpreter only
This commit is contained in:
parent
da9c98689e
commit
a4d5573698
@ -2,7 +2,7 @@
|
||||
# The version of cudatoolkit must match those of the base image, see Dockerfile.pytorch
|
||||
FROM nvidia/cuda:10.1-base-ubuntu18.04
|
||||
LABEL maintainer="Christoph Schranz <christoph.schranz@salzburgresearch.at>"
|
||||
# The maintainers of subsequent sections may vary
|
||||
# This is a concatenated Dockerfile, the maintainers of subsequent sections may vary.
|
||||
|
||||
############################################################################
|
||||
#################### Dependency: jupyter/base-image ########################
|
||||
@ -11,15 +11,19 @@ LABEL maintainer="Christoph Schranz <christoph.schranz@salzburgresearch.at>"
|
||||
# Copyright (c) Jupyter Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
# Ubuntu 18.04 (bionic)
|
||||
# https://hub.docker.com/_/ubuntu/?tab=tags&name=bionic
|
||||
ARG ROOT_CONTAINER=ubuntu:bionic-20200112@sha256:bc025862c3e8ec4a8754ea4756e33da6c41cba38330d7e324abd25c8e0b93300
|
||||
# Ubuntu 20.04 (focal)
|
||||
# https://hub.docker.com/_/ubuntu/?tab=tags&name=focal
|
||||
# OS/ARCH: linux/amd64
|
||||
ARG ROOT_CONTAINER=ubuntu:focal-20200423@sha256:238e696992ba9913d24cfc3727034985abd136e08ee3067982401acdc30cbf3f
|
||||
|
||||
LABEL maintainer="Jupyter Project <jupyter@googlegroups.com>"
|
||||
ARG NB_USER="jovyan"
|
||||
ARG NB_UID="1000"
|
||||
ARG NB_GID="100"
|
||||
|
||||
# Fix DL4006
|
||||
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
|
||||
|
||||
USER root
|
||||
|
||||
# Install all OS dependencies for notebook server that starts but lacks all
|
||||
@ -68,7 +72,7 @@ RUN echo "auth requisite pam_deny.so" >> /etc/pam.d/su && \
|
||||
chown $NB_USER:$NB_GID $CONDA_DIR && \
|
||||
chmod g+w /etc/passwd && \
|
||||
fix-permissions $HOME && \
|
||||
fix-permissions "$(dirname $CONDA_DIR)"
|
||||
fix-permissions $CONDA_DIR
|
||||
|
||||
USER $NB_UID
|
||||
WORKDIR $HOME
|
||||
@ -79,19 +83,20 @@ RUN mkdir /home/$NB_USER/work && \
|
||||
fix-permissions /home/$NB_USER
|
||||
|
||||
# Install conda as jovyan and check the md5 sum provided on the download site
|
||||
ENV MINICONDA_VERSION=4.7.12.1 \
|
||||
MINICONDA_MD5=81c773ff87af5cfac79ab862942ab6b3 \
|
||||
CONDA_VERSION=4.7.12
|
||||
ENV MINICONDA_VERSION=4.8.2 \
|
||||
MINICONDA_MD5=87e77f097f6ebb5127c77662dfc3165e \
|
||||
CONDA_VERSION=4.8.2
|
||||
|
||||
RUN cd /tmp && \
|
||||
wget --quiet https://repo.continuum.io/miniconda/Miniconda3-${MINICONDA_VERSION}-Linux-x86_64.sh && \
|
||||
echo "${MINICONDA_MD5} *Miniconda3-${MINICONDA_VERSION}-Linux-x86_64.sh" | md5sum -c - && \
|
||||
/bin/bash Miniconda3-${MINICONDA_VERSION}-Linux-x86_64.sh -f -b -p $CONDA_DIR && \
|
||||
rm Miniconda3-${MINICONDA_VERSION}-Linux-x86_64.sh && \
|
||||
WORKDIR /tmp
|
||||
RUN wget --quiet https://repo.continuum.io/miniconda/Miniconda3-py37_${MINICONDA_VERSION}-Linux-x86_64.sh && \
|
||||
echo "${MINICONDA_MD5} *Miniconda3-py37_${MINICONDA_VERSION}-Linux-x86_64.sh" | md5sum -c - && \
|
||||
/bin/bash Miniconda3-py37_${MINICONDA_VERSION}-Linux-x86_64.sh -f -b -p $CONDA_DIR && \
|
||||
rm Miniconda3-py37_${MINICONDA_VERSION}-Linux-x86_64.sh && \
|
||||
echo "conda ${CONDA_VERSION}" >> $CONDA_DIR/conda-meta/pinned && \
|
||||
conda config --system --prepend channels conda-forge && \
|
||||
conda config --system --set auto_update_conda false && \
|
||||
conda config --system --set show_channel_urls true && \
|
||||
conda config --system --set channel_priority strict && \
|
||||
if [ ! $PYTHON_VERSION = 'default' ]; then conda install --yes python=$PYTHON_VERSION; fi && \
|
||||
conda list python | grep '^python ' | tr -s ' ' | cut -d '.' -f 1,2 | sed 's/$/.*/' >> $CONDA_DIR/conda-meta/pinned && \
|
||||
conda install --quiet --yes conda && \
|
||||
@ -118,7 +123,7 @@ RUN conda install --quiet --yes 'tini=0.18.0' && \
|
||||
RUN conda install --quiet --yes \
|
||||
'notebook=6.0.3' \
|
||||
'jupyterhub=1.1.0' \
|
||||
'jupyterlab=1.2.5' && \
|
||||
'jupyterlab=2.1.3' && \
|
||||
conda clean --all -f -y && \
|
||||
npm cache clean --force && \
|
||||
jupyter notebook --generate-config && \
|
||||
@ -134,9 +139,7 @@ ENTRYPOINT ["tini", "-g", "--"]
|
||||
CMD ["start-notebook.sh"]
|
||||
|
||||
# Copy local files as late as possible to avoid cache busting
|
||||
COPY start.sh /usr/local/bin/
|
||||
COPY start-notebook.sh /usr/local/bin/
|
||||
COPY start-singleuser.sh /usr/local/bin/
|
||||
COPY start.sh start-notebook.sh start-singleuser.sh /usr/local/bin/
|
||||
COPY jupyter_notebook_config.py /etc/jupyter/
|
||||
|
||||
# Fix permissions on /etc/jupyter as root
|
||||
@ -146,6 +149,8 @@ RUN fix-permissions /etc/jupyter/
|
||||
# Switch back to jovyan to avoid accidental container runs as root
|
||||
USER $NB_UID
|
||||
|
||||
WORKDIR $HOME
|
||||
|
||||
############################################################################
|
||||
################# Dependency: jupyter/minimal-notebook #####################
|
||||
############################################################################
|
||||
@ -160,7 +165,8 @@ USER root
|
||||
# Install all OS dependencies for fully functional notebook server
|
||||
RUN apt-get update && apt-get install -yq --no-install-recommends \
|
||||
build-essential \
|
||||
emacs \
|
||||
emacs-nox \
|
||||
vim-tiny \
|
||||
git \
|
||||
inkscape \
|
||||
jed \
|
||||
@ -173,7 +179,7 @@ RUN apt-get update && apt-get install -yq --no-install-recommends \
|
||||
# ---- nbconvert dependencies ----
|
||||
texlive-xetex \
|
||||
texlive-fonts-recommended \
|
||||
texlive-generic-recommended \
|
||||
texlive-plain-generic \
|
||||
# Optional dependency
|
||||
texlive-fonts-extra \
|
||||
# ----
|
||||
@ -196,40 +202,45 @@ LABEL maintainer="Jupyter Project <jupyter@googlegroups.com>"
|
||||
|
||||
USER root
|
||||
|
||||
# ffmpeg for matplotlib anim
|
||||
# ffmpeg for matplotlib anim & dvipng for latex labels
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends ffmpeg && \
|
||||
apt-get install -y --no-install-recommends ffmpeg dvipng && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
USER $NB_UID
|
||||
|
||||
# Install Python 3 packages
|
||||
RUN conda install --quiet --yes \
|
||||
'beautifulsoup4=4.8.*' \
|
||||
'beautifulsoup4=4.9.*' \
|
||||
'conda-forge::blas=*=openblas' \
|
||||
'bokeh=1.4.*' \
|
||||
'cloudpickle=1.2.*' \
|
||||
'bokeh=2.0.*' \
|
||||
'bottleneck=1.3.*' \
|
||||
'cloudpickle=1.4.*' \
|
||||
'cython=0.29.*' \
|
||||
'dask=2.9.*' \
|
||||
'dask=2.15.*' \
|
||||
'dill=0.3.*' \
|
||||
'h5py=2.10.*' \
|
||||
'hdf5=1.10.*' \
|
||||
'ipywidgets=7.5.*' \
|
||||
'matplotlib-base=3.1.*' \
|
||||
'ipympl=0.5.*'\
|
||||
'matplotlib-base=3.2.*' \
|
||||
# numba update to 0.49 fails resolving deps.
|
||||
'numba=0.48.*' \
|
||||
'numexpr=2.7.*' \
|
||||
'pandas=0.25.*' \
|
||||
'pandas=1.0.*' \
|
||||
'patsy=0.5.*' \
|
||||
'protobuf=3.11.*' \
|
||||
'pytables=3.6.*' \
|
||||
'scikit-image=0.16.*' \
|
||||
'scikit-learn=0.22.*' \
|
||||
'scipy=1.4.*' \
|
||||
'seaborn=0.9.*' \
|
||||
'seaborn=0.10.*' \
|
||||
'sqlalchemy=1.3.*' \
|
||||
'statsmodels=0.11.*' \
|
||||
'sympy=1.5.*' \
|
||||
'vincent=0.4.*' \
|
||||
'xlrd' \
|
||||
'widgetsnbextension=3.5.*'\
|
||||
'xlrd=1.2.*' \
|
||||
&& \
|
||||
conda clean --all -f -y && \
|
||||
# Activate ipywidgets extension in the environment that runs the notebook server
|
||||
@ -237,121 +248,34 @@ RUN conda install --quiet --yes \
|
||||
# Also activate ipywidgets extension for JupyterLab
|
||||
# Check this URL for most recent compatibilities
|
||||
# https://github.com/jupyter-widgets/ipywidgets/tree/master/packages/jupyterlab-manager
|
||||
jupyter labextension install @jupyter-widgets/jupyterlab-manager@^1.0.1 --no-build && \
|
||||
jupyter labextension install jupyterlab_bokeh@1.0.0 --no-build && \
|
||||
jupyter lab build && \
|
||||
jupyter labextension install @jupyter-widgets/jupyterlab-manager@^2.0.0 --no-build && \
|
||||
jupyter labextension install @bokeh/jupyter_bokeh@^2.0.0 --no-build && \
|
||||
jupyter labextension install jupyter-matplotlib@^0.7.2 --no-build && \
|
||||
jupyter lab build -y && \
|
||||
jupyter lab clean -y && \
|
||||
npm cache clean --force && \
|
||||
rm -rf $CONDA_DIR/share/jupyter/lab/staging && \
|
||||
rm -rf /home/$NB_USER/.cache/yarn && \
|
||||
rm -rf /home/$NB_USER/.node-gyp && \
|
||||
fix-permissions $CONDA_DIR && \
|
||||
fix-permissions /home/$NB_USER
|
||||
rm -rf "/home/${NB_USER}/.cache/yarn" && \
|
||||
rm -rf "/home/${NB_USER}/.node-gyp" && \
|
||||
fix-permissions "${CONDA_DIR}" && \
|
||||
fix-permissions "/home/${NB_USER}"
|
||||
|
||||
# Install facets which does not have a pip or conda package at the moment
|
||||
RUN cd /tmp && \
|
||||
git clone https://github.com/PAIR-code/facets.git && \
|
||||
cd facets && \
|
||||
jupyter nbextension install facets-dist/ --sys-prefix && \
|
||||
cd && \
|
||||
WORKDIR /tmp
|
||||
RUN git clone https://github.com/PAIR-code/facets.git && \
|
||||
jupyter nbextension install facets/facets-dist/ --sys-prefix && \
|
||||
rm -rf /tmp/facets && \
|
||||
fix-permissions $CONDA_DIR && \
|
||||
fix-permissions /home/$NB_USER
|
||||
fix-permissions "${CONDA_DIR}" && \
|
||||
fix-permissions "/home/${NB_USER}"
|
||||
|
||||
# Import matplotlib the first time to build the font cache.
|
||||
ENV XDG_CACHE_HOME /home/$NB_USER/.cache/
|
||||
ENV XDG_CACHE_HOME="/home/${NB_USER}/.cache/"
|
||||
|
||||
RUN MPLBACKEND=Agg python -c "import matplotlib.pyplot" && \
|
||||
fix-permissions /home/$NB_USER
|
||||
fix-permissions "/home/${NB_USER}"
|
||||
|
||||
USER $NB_UID
|
||||
|
||||
############################################################################
|
||||
################ Dependency: jupyter/datascience-notebook ##################
|
||||
############################################################################
|
||||
|
||||
# Copyright (c) Jupyter Development Team.
|
||||
# Distributed under the terms of the Modified BSD License.
|
||||
|
||||
LABEL maintainer="Jupyter Project <jupyter@googlegroups.com>"
|
||||
|
||||
# Set when building on Travis so that certain long-running build steps can
|
||||
# be skipped to shorten build time.
|
||||
ARG TEST_ONLY_BUILD
|
||||
|
||||
USER root
|
||||
|
||||
# R pre-requisites
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
fonts-dejavu \
|
||||
gfortran \
|
||||
gcc && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Julia dependencies
|
||||
# install Julia packages in /opt/julia instead of $HOME
|
||||
ENV JULIA_DEPOT_PATH=/opt/julia
|
||||
ENV JULIA_PKGDIR=/opt/julia
|
||||
ENV JULIA_VERSION=1.3.1
|
||||
|
||||
RUN mkdir /opt/julia-${JULIA_VERSION} && \
|
||||
cd /tmp && \
|
||||
wget -q https://julialang-s3.julialang.org/bin/linux/x64/`echo ${JULIA_VERSION} | cut -d. -f 1,2`/julia-${JULIA_VERSION}-linux-x86_64.tar.gz && \
|
||||
echo "faa707c8343780a6fe5eaf13490355e8190acf8e2c189b9e7ecbddb0fa2643ad *julia-${JULIA_VERSION}-linux-x86_64.tar.gz" | sha256sum -c - && \
|
||||
tar xzf julia-${JULIA_VERSION}-linux-x86_64.tar.gz -C /opt/julia-${JULIA_VERSION} --strip-components=1 && \
|
||||
rm /tmp/julia-${JULIA_VERSION}-linux-x86_64.tar.gz
|
||||
RUN ln -fs /opt/julia-*/bin/julia /usr/local/bin/julia
|
||||
|
||||
# Show Julia where conda libraries are \
|
||||
RUN mkdir /etc/julia && \
|
||||
echo "push!(Libdl.DL_LOAD_PATH, \"$CONDA_DIR/lib\")" >> /etc/julia/juliarc.jl && \
|
||||
# Create JULIA_PKGDIR \
|
||||
mkdir $JULIA_PKGDIR && \
|
||||
chown $NB_USER $JULIA_PKGDIR && \
|
||||
fix-permissions $JULIA_PKGDIR
|
||||
|
||||
USER $NB_UID
|
||||
|
||||
# R packages including IRKernel which gets installed globally.
|
||||
RUN conda install --quiet --yes \
|
||||
'r-base=3.6.2' \
|
||||
'r-caret=6.0*' \
|
||||
'r-crayon=1.3*' \
|
||||
'r-devtools=2.2*' \
|
||||
'r-forecast=8.10*' \
|
||||
'r-hexbin=1.28*' \
|
||||
'r-htmltools=0.4*' \
|
||||
'r-htmlwidgets=1.5*' \
|
||||
'r-irkernel=1.1*' \
|
||||
'r-nycflights13=1.0*' \
|
||||
'r-plyr=1.8*' \
|
||||
'r-randomforest=4.6*' \
|
||||
'r-rcurl=1.98*' \
|
||||
'r-reshape2=1.4*' \
|
||||
'r-rmarkdown=2.1*' \
|
||||
'r-rsqlite=2.1*' \
|
||||
'r-shiny=1.3*' \
|
||||
'r-tidyverse=1.3*' \
|
||||
'rpy2=3.1*' \
|
||||
&& \
|
||||
conda clean --all -f -y && \
|
||||
fix-permissions $CONDA_DIR && \
|
||||
fix-permissions /home/$NB_USER
|
||||
|
||||
# Add Julia packages. Only add HDF5 if this is not a test-only build since
|
||||
# it takes roughly half the entire build time of all of the images on Travis
|
||||
# to add this one package and often causes Travis to timeout.
|
||||
#
|
||||
# Install IJulia as jovyan and then move the kernelspec out
|
||||
# to the system share location. Avoids problems with runtime UID change not
|
||||
# taking effect properly on the .local folder in the jovyan home dir.
|
||||
RUN julia -e 'import Pkg; Pkg.update()' && \
|
||||
(test $TEST_ONLY_BUILD || julia -e 'import Pkg; Pkg.add("HDF5")') && \
|
||||
julia -e "using Pkg; pkg\"add IJulia\"; pkg\"precompile\"" && \
|
||||
# move kernelspec out of home \
|
||||
mv $HOME/.local/share/jupyter/kernels/julia* $CONDA_DIR/share/jupyter/kernels/ && \
|
||||
chmod -R go+rx $CONDA_DIR/share/jupyter && \
|
||||
rm -rf $HOME/.local && \
|
||||
fix-permissions $JULIA_PKGDIR $CONDA_DIR/share/jupyter
|
||||
WORKDIR $HOME
|
||||
|
||||
############################################################################
|
||||
########################## Dependency: gpulibs #############################
|
||||
@ -359,12 +283,16 @@ RUN julia -e 'import Pkg; Pkg.update()' && \
|
||||
|
||||
LABEL maintainer="Christoph Schranz <christoph.schranz@salzburgresearch.at>"
|
||||
|
||||
# Install Tensorflow, check compatibility here: https://www.tensorflow.org/install/gpu
|
||||
RUN conda install --quiet --yes \
|
||||
'tensorflow-gpu=2.1*' \
|
||||
'keras-gpu' && \
|
||||
fix-permissions $CONDA_DIR && \
|
||||
fix-permissions /home/$NB_USER
|
||||
# Install Tensorflow, check compatibility here: https://www.tensorflow.org/install/gpu
|
||||
# installation via conda leads to errors in version 4.8.2
|
||||
#RUN conda install --quiet --yes \
|
||||
# 'tensorflow-gpu=2.1*' \
|
||||
# 'keras-gpu' && \
|
||||
# fix-permissions $CONDA_DIR && \
|
||||
# fix-permissions /home/$NB_USER
|
||||
RUN pip install --upgrade pip && \
|
||||
pip install --no-cache-dir "tensorflow-gpu>=2.1.*" && \
|
||||
pip install --no-cache-dir keras
|
||||
|
||||
# Install PyTorch with dependencies
|
||||
RUN conda install --quiet --yes \
|
||||
@ -382,57 +310,5 @@ RUN conda clean --all -f -y && \
|
||||
fix-permissions $CONDA_DIR && \
|
||||
fix-permissions /home/$NB_USER
|
||||
|
||||
############################################################################
|
||||
############################ Useful packages ###############################
|
||||
############################################################################
|
||||
|
||||
LABEL maintainer="Christoph Schranz <christoph.schranz@salzburgresearch.at>"
|
||||
|
||||
# Update conda
|
||||
RUN conda update -n base conda -y
|
||||
|
||||
USER root
|
||||
|
||||
# Install elasticsearch libs
|
||||
USER root
|
||||
RUN apt-get update \
|
||||
&& curl -sL https://repo1.maven.org/maven2/org/elasticsearch/elasticsearch-hadoop/6.8.1/elasticsearch-hadoop-6.8.1.jar
|
||||
RUN pip install --no-cache-dir elasticsearch==7.1.0
|
||||
|
||||
# Install rpy2 to share data between Python and R
|
||||
RUN conda install rpy2=2.9.4 plotly=4.4.1
|
||||
RUN conda install -c conda-forge ipyleaflet
|
||||
|
||||
# Install important packages and Graphviz
|
||||
RUN set -ex \
|
||||
&& buildDeps=' \
|
||||
graphviz==0.11 \
|
||||
' \
|
||||
&& apt-get update \
|
||||
&& apt-get -y install htop apt-utils graphviz libgraphviz-dev \
|
||||
&& pip install --no-cache-dir $buildDeps
|
||||
|
||||
# Install various extensions
|
||||
RUN jupyter labextension install @jupyterlab/github
|
||||
RUN jupyter labextension install jupyterlab-drawio
|
||||
RUN jupyter labextension install jupyter-leaflet
|
||||
RUN jupyter labextension install @jupyterlab/plotly-extension
|
||||
RUN jupyter labextension install @jupyter-widgets/jupyterlab-manager
|
||||
RUN pip install --no-cache-dir jupyter-tabnine==1.0.2 && \
|
||||
jupyter nbextension install --py jupyter_tabnine && \
|
||||
jupyter nbextension enable --py jupyter_tabnine && \
|
||||
jupyter serverextension enable --py jupyter_tabnine
|
||||
RUN fix-permissions $CONDA_DIR
|
||||
RUN conda install -c conda-forge jupyter_contrib_nbextensions && \
|
||||
conda install -c conda-forge jupyter_nbextensions_configurator && \
|
||||
conda install -c conda-forge rise && \
|
||||
jupyter nbextension enable codefolding/main
|
||||
RUN jupyter labextension install @ijmbarr/jupyterlab_spellchecker
|
||||
|
||||
RUN fix-permissions /home/$NB_USER
|
||||
|
||||
# Switch back to jovyan to avoid accidental container runs as root
|
||||
USER $NB_UID
|
||||
|
||||
# Copy jupyter_notebook_config.json
|
||||
COPY jupyter_notebook_config.json /etc/jupyter/
|
||||
|
@ -1 +1 @@
|
||||
Subproject commit c1c32938438151c7e2a22b5aa338caba2ec01da2
|
||||
Subproject commit 04f7f60d34a674a2964d96a6cb97c57a7870a828
|
@ -6,14 +6,14 @@ set -e
|
||||
|
||||
wrapper=""
|
||||
if [[ "${RESTARTABLE}" == "yes" ]]; then
|
||||
wrapper="run-one-constantly"
|
||||
wrapper="run-one-constantly"
|
||||
fi
|
||||
|
||||
if [[ ! -z "${JUPYTERHUB_API_TOKEN}" ]]; then
|
||||
# launched by JupyterHub, use single-user entrypoint
|
||||
exec /usr/local/bin/start-singleuser.sh "$@"
|
||||
# launched by JupyterHub, use single-user entrypoint
|
||||
exec /usr/local/bin/start-singleuser.sh "$@"
|
||||
elif [[ ! -z "${JUPYTER_ENABLE_LAB}" ]]; then
|
||||
. /usr/local/bin/start.sh $wrapper jupyter lab "$@"
|
||||
. /usr/local/bin/start.sh $wrapper jupyter lab "$@"
|
||||
else
|
||||
. /usr/local/bin/start.sh $wrapper jupyter notebook "$@"
|
||||
. /usr/local/bin/start.sh $wrapper jupyter notebook "$@"
|
||||
fi
|
||||
|
@ -6,7 +6,7 @@ set -e
|
||||
|
||||
# set default ip to 0.0.0.0
|
||||
if [[ "$NOTEBOOK_ARGS $@" != *"--ip="* ]]; then
|
||||
NOTEBOOK_ARGS="--ip=0.0.0.0 $NOTEBOOK_ARGS"
|
||||
NOTEBOOK_ARGS="--ip=0.0.0.0 $NOTEBOOK_ARGS"
|
||||
fi
|
||||
|
||||
# handle some deprecated environment variables
|
||||
@ -14,30 +14,26 @@ fi
|
||||
# These won't be passed from DockerSpawner 0.9,
|
||||
# so avoid specifying --arg=empty-string
|
||||
if [ ! -z "$NOTEBOOK_DIR" ]; then
|
||||
NOTEBOOK_ARGS="--notebook-dir='$NOTEBOOK_DIR' $NOTEBOOK_ARGS"
|
||||
NOTEBOOK_ARGS="--notebook-dir='$NOTEBOOK_DIR' $NOTEBOOK_ARGS"
|
||||
fi
|
||||
if [ ! -z "$JPY_PORT" ]; then
|
||||
NOTEBOOK_ARGS="--port=$JPY_PORT $NOTEBOOK_ARGS"
|
||||
NOTEBOOK_ARGS="--port=$JPY_PORT $NOTEBOOK_ARGS"
|
||||
fi
|
||||
if [ ! -z "$JPY_USER" ]; then
|
||||
NOTEBOOK_ARGS="--user=$JPY_USER $NOTEBOOK_ARGS"
|
||||
NOTEBOOK_ARGS="--user=$JPY_USER $NOTEBOOK_ARGS"
|
||||
fi
|
||||
if [ ! -z "$JPY_COOKIE_NAME" ]; then
|
||||
NOTEBOOK_ARGS="--cookie-name=$JPY_COOKIE_NAME $NOTEBOOK_ARGS"
|
||||
NOTEBOOK_ARGS="--cookie-name=$JPY_COOKIE_NAME $NOTEBOOK_ARGS"
|
||||
fi
|
||||
if [ ! -z "$JPY_BASE_URL" ]; then
|
||||
NOTEBOOK_ARGS="--base-url=$JPY_BASE_URL $NOTEBOOK_ARGS"
|
||||
NOTEBOOK_ARGS="--base-url=$JPY_BASE_URL $NOTEBOOK_ARGS"
|
||||
fi
|
||||
if [ ! -z "$JPY_HUB_PREFIX" ]; then
|
||||
NOTEBOOK_ARGS="--hub-prefix=$JPY_HUB_PREFIX $NOTEBOOK_ARGS"
|
||||
NOTEBOOK_ARGS="--hub-prefix=$JPY_HUB_PREFIX $NOTEBOOK_ARGS"
|
||||
fi
|
||||
if [ ! -z "$JPY_HUB_API_URL" ]; then
|
||||
NOTEBOOK_ARGS="--hub-api-url=$JPY_HUB_API_URL $NOTEBOOK_ARGS"
|
||||
fi
|
||||
if [ ! -z "$JUPYTER_ENABLE_LAB" ]; then
|
||||
NOTEBOOK_BIN="jupyter labhub"
|
||||
else
|
||||
NOTEBOOK_BIN="jupyterhub-singleuser"
|
||||
NOTEBOOK_ARGS="--hub-api-url=$JPY_HUB_API_URL $NOTEBOOK_ARGS"
|
||||
fi
|
||||
NOTEBOOK_BIN="jupyterhub-singleuser"
|
||||
|
||||
. /usr/local/bin/start.sh $NOTEBOOK_BIN $NOTEBOOK_ARGS "$@"
|
||||
|
@ -93,7 +93,7 @@ if [ $(id -u) == 0 ] ; then
|
||||
fi
|
||||
|
||||
# Add $CONDA_DIR/bin to sudo secure_path
|
||||
sed -r "s#Defaults\s+secure_path=\"([^\"]+)\"#Defaults secure_path=\"\1:$CONDA_DIR/bin\"#" /etc/sudoers | grep secure_path > /etc/sudoers.d/path
|
||||
sed -r "s#Defaults\s+secure_path\s*=\s*\"?([^\"]+)\"?#Defaults secure_path=\"\1:$CONDA_DIR/bin\"#" /etc/sudoers | grep secure_path > /etc/sudoers.d/path
|
||||
|
||||
# Exec the command as NB_USER with the PATH and the rest of
|
||||
# the environment preserved
|
||||
|
89
README.md
89
README.md
@ -5,7 +5,7 @@
|
||||
|
||||
First of all, thanks to [docker-stacks](https://github.com/jupyter/docker-stacks)
|
||||
for creating and maintaining a robost Python, R and Julia toolstack for Data Analytics/Science
|
||||
applications. This project uses the NVIDIA CUDA image as a basis image and installs their
|
||||
applications. This project uses the NVIDIA CUDA image as the base image and installs their
|
||||
toolstack on top of it to enable GPU calculations in the Jupyter notebooks.
|
||||
The image of this repository is available on [Dockerhub](https://hub.docker.com/r/cschranz/gpu-jupyter).
|
||||
|
||||
@ -21,13 +21,34 @@ The image of this repository is available on [Dockerhub](https://hub.docker.com/
|
||||
|
||||
## Requirements
|
||||
|
||||
1. Install [Docker](https://www.docker.com/community-edition#/download) version **1.10.0+**
|
||||
1. A NVIDIA GPU
|
||||
2. Install [Docker](https://www.docker.com/community-edition#/download) version **1.10.0+**
|
||||
and [Docker Compose](https://docs.docker.com/compose/install/) version **1.6.0+**.
|
||||
2. A NVIDIA GPU
|
||||
3. Get access to use your GPU via the CUDA drivers, check out this
|
||||
3. Get access to your GPU via CUDA drivers within Docker containers. Therfore, check out this
|
||||
[medium article](https://medium.com/@christoph.schranz/set-up-your-own-gpu-based-jupyterlab-e0d45fcacf43).
|
||||
The CUDA toolkit is not required on the host system, as it will be deployed
|
||||
in [NVIDIA-docker](https://github.com/NVIDIA/nvidia-docker).
|
||||
in [NVIDIA-docker](https://github.com/NVIDIA/nvidia-docker).
|
||||
You can be sure that you can access your GPU within Docker,
|
||||
if the command `docker run --runtime nvidia nvidia/cuda:10.1-base-ubuntu18.04 nvidia-smi`
|
||||
returns a result similar to this one:
|
||||
```bash
|
||||
Mon Jun 22 09:06:28 2020
|
||||
+-----------------------------------------------------------------------------+
|
||||
| NVIDIA-SMI 440.82 Driver Version: 440.82 CUDA Version: 10.1 |
|
||||
|-------------------------------+----------------------+----------------------+
|
||||
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
|
||||
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
|
||||
|===============================+======================+======================|
|
||||
| 0 GeForce RTX 207... Off | 00000000:01:00.0 On | N/A |
|
||||
| 0% 46C P8 9W / 215W | 424MiB / 7974MiB | 6% Default |
|
||||
+-------------------------------+----------------------+----------------------+
|
||||
|
||||
+-----------------------------------------------------------------------------+
|
||||
| Processes: GPU Memory |
|
||||
| GPU PID Type Process name Usage |
|
||||
|=============================================================================|
|
||||
+-----------------------------------------------------------------------------+
|
||||
```
|
||||
4. Clone the Repository or pull the image from
|
||||
[Dockerhub](https://hub.docker.com/repository/docker/cschranz/gpu-jupyter):
|
||||
```bash
|
||||
@ -37,29 +58,45 @@ The image of this repository is available on [Dockerhub](https://hub.docker.com/
|
||||
|
||||
## Quickstart
|
||||
|
||||
First of all, it is necessary to generate the `Dockerfile` based on the latest toolstack of
|
||||
[hub.docker.com/u/jupyter](https://hub.docker.com/u/jupyter).
|
||||
As soon as you have access to your GPU locally (it can be tested via a Tensorflow or PyTorch
|
||||
directly on the host node), you can run these commands to start the jupyter notebook via
|
||||
docker-compose (internally):
|
||||
|
||||
```bash
|
||||
./generate-Dockerfile.sh
|
||||
docker build -t gpu-jupyter .build/
|
||||
docker run -d -p [port]:8888 gpu-jupyter
|
||||
```
|
||||
|
||||
Alternatively, you can configure the environment in `docker-compose.yml` and run
|
||||
this to deploy the `GPU-Jupyter` via docker-compose (under-the-hood):
|
||||
|
||||
```bash
|
||||
./generate-Dockerfile.sh
|
||||
./start-local.sh -p 8888 # where -p stands for the port of the service
|
||||
```
|
||||
|
||||
Both options will run *GPU-Jupyter* by default on [localhost:8888](http://localhost:8888) with the default
|
||||
First of all, it is necessary to generate the `Dockerfile` based on the
|
||||
[docker-stacks](https://github.com/jupyter/docker-stacks).
|
||||
As soon as you have access to your GPU within Docker containers
|
||||
(make sure the command `docker run --runtime nvidia nvidia/cuda:10.1-base-ubuntu18.04 nvidia-smi` shows your
|
||||
GPU statistics), you can generate a Dockerfile and build it via docker-compose.
|
||||
The two commands will start *GPU-Jupyter* on [localhost:1234](http://localhost:1234) with the default
|
||||
password `asdf`.
|
||||
|
||||
```bash
|
||||
./generate-Dockerfile.sh
|
||||
./start-local.sh -p 1234 # where -p stands for the port, default 8888
|
||||
```
|
||||
|
||||
## Parameter
|
||||
|
||||
The script `generate-Dockerfile.sh` has multiple parameters:
|
||||
|
||||
* `-c|--commit`: specify a commit or `"latest"` for the `docker-stacks`, the default commit is a working one.
|
||||
|
||||
* `-s|--slim`: Generate a slim Dockerfile.
|
||||
As some installations are not needed by everyone, there is the possibility to skip some installations
|
||||
to reduce the size of the image.
|
||||
Here the `docker-stack` `scipy-notebook` is used instead of `datascience-notebook` that comes with Julia and R.
|
||||
Moreover, none of the packages within `src/Dockerfile.usefulpackages` is installed.
|
||||
|
||||
* `--no-datascience-notebook`: As the name suggests, the `docker-stack` `datascience-notebook` is not installed
|
||||
on top of the `scipy-notebook`, but the packages within `src/Dockerfile.usefulpackages` are.
|
||||
|
||||
* `--no-useful-packages`: On top of the `docker-stack` `datascience-notebook`, the essential `gpulibs` are installed
|
||||
but not the packages within `src/Dockerfile.usefulpackages`.
|
||||
|
||||
|
||||
The script `start-local.sh` is a wrapper for a quick configuration of the underlying `docker-compose.yml`.
|
||||
It is equal to these commands:
|
||||
|
||||
```bash
|
||||
docker build -t gpu-jupyter .build/
|
||||
docker run -d -p [port]:8888 gpu-jupyter
|
||||
```
|
||||
|
||||
## Tracing
|
||||
|
||||
|
@ -27,15 +27,15 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Wed Mar 11 07:16:17 2020 \n",
|
||||
"Mon Jun 22 11:24:08 2020 \n",
|
||||
"+-----------------------------------------------------------------------------+\n",
|
||||
"| NVIDIA-SMI 440.48.02 Driver Version: 440.48.02 CUDA Version: 10.2 |\n",
|
||||
"| NVIDIA-SMI 440.82 Driver Version: 440.82 CUDA Version: 10.2 |\n",
|
||||
"|-------------------------------+----------------------+----------------------+\n",
|
||||
"| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n",
|
||||
"| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n",
|
||||
"|===============================+======================+======================|\n",
|
||||
"| 0 GeForce RTX 207... Off | 00000000:01:00.0 Off | N/A |\n",
|
||||
"| 0% 42C P8 1W / 215W | 1788MiB / 7974MiB | 0% Default |\n",
|
||||
"| 0 GeForce RTX 207... Off | 00000000:01:00.0 On | N/A |\n",
|
||||
"| 0% 49C P0 38W / 215W | 430MiB / 7974MiB | 5% Default |\n",
|
||||
"+-------------------------------+----------------------+----------------------+\n",
|
||||
" \n",
|
||||
"+-----------------------------------------------------------------------------+\n",
|
||||
@ -87,10 +87,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"WARNING:tensorflow:From <ipython-input-3-d1bfbb527297>:3: is_gpu_available (from tensorflow.python.framework.test_util) is deprecated and will be removed in a future version.\n",
|
||||
"Instructions for updating:\n",
|
||||
"Use `tf.config.list_physical_devices('GPU')` instead.\n",
|
||||
"True\n"
|
||||
"[PhysicalDevice(name='/physical_device:XLA_GPU:0', device_type='XLA_GPU')]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -101,30 +98,20 @@
|
||||
" memory_limit: 268435456\n",
|
||||
" locality {\n",
|
||||
" }\n",
|
||||
" incarnation: 8034786465358909470,\n",
|
||||
" incarnation: 12436949185972503812,\n",
|
||||
" name: \"/device:XLA_CPU:0\"\n",
|
||||
" device_type: \"XLA_CPU\"\n",
|
||||
" memory_limit: 17179869184\n",
|
||||
" locality {\n",
|
||||
" }\n",
|
||||
" incarnation: 13772661904993777233\n",
|
||||
" incarnation: 9674938692146126962\n",
|
||||
" physical_device_desc: \"device: XLA_CPU device\",\n",
|
||||
" name: \"/device:GPU:0\"\n",
|
||||
" device_type: \"GPU\"\n",
|
||||
" memory_limit: 5480775680\n",
|
||||
" locality {\n",
|
||||
" bus_id: 1\n",
|
||||
" links {\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" incarnation: 8336380964433791501\n",
|
||||
" physical_device_desc: \"device: 0, name: GeForce RTX 2070 SUPER, pci bus id: 0000:01:00.0, compute capability: 7.5\",\n",
|
||||
" name: \"/device:XLA_GPU:0\"\n",
|
||||
" device_type: \"XLA_GPU\"\n",
|
||||
" memory_limit: 17179869184\n",
|
||||
" locality {\n",
|
||||
" }\n",
|
||||
" incarnation: 4817022749254415174\n",
|
||||
" incarnation: 7870544216044264725\n",
|
||||
" physical_device_desc: \"device: XLA_GPU device\"]"
|
||||
]
|
||||
},
|
||||
@ -136,7 +123,7 @@
|
||||
"source": [
|
||||
"import tensorflow as tf\n",
|
||||
"from tensorflow.python.client import device_lib\n",
|
||||
"print(tf.test.is_gpu_available(cuda_only=True))\n",
|
||||
"print(tf.config.list_physical_devices('XLA_GPU'))\n",
|
||||
"device_lib.list_local_devices()"
|
||||
]
|
||||
},
|
||||
@ -148,11 +135,11 @@
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"tensor([[0.1091, 0.0178, 0.2500],\n",
|
||||
" [0.1409, 0.9612, 0.0325],\n",
|
||||
" [0.8944, 0.3869, 0.9657],\n",
|
||||
" [0.8131, 0.5454, 0.2587],\n",
|
||||
" [0.6570, 0.0147, 0.1361]])"
|
||||
"tensor([[0.0399, 0.1738, 0.2486],\n",
|
||||
" [0.7464, 0.1461, 0.8991],\n",
|
||||
" [0.7264, 0.9835, 0.8844],\n",
|
||||
" [0.4544, 0.8331, 0.8435],\n",
|
||||
" [0.0109, 0.0689, 0.2997]])"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
@ -202,7 +189,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"248 ms ± 174 µs per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
|
||||
"276 ms ± 9.97 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@ -236,13 +223,13 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"78.2 ms ± 250 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)\n"
|
||||
"82.1 ms ± 1.85 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%timeit\n",
|
||||
"# Calculate the projection matrix of x\n",
|
||||
"# Calculate the projection matrix of x on the CPU\n",
|
||||
"H = x.mm( (x.t().mm(x)).inverse() ).mm(x.t())"
|
||||
]
|
||||
},
|
||||
@ -262,16 +249,16 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"tensor([[0.0962, 0.3125, 0.7327, 0.5982, 0.4624],\n",
|
||||
" [0.4655, 0.4890, 0.9603, 0.4339, 0.0524],\n",
|
||||
" [0.9294, 0.9639, 0.6312, 0.1752, 0.7721],\n",
|
||||
" [0.5533, 0.3656, 0.9329, 0.8796, 0.9513],\n",
|
||||
" [0.4949, 0.0972, 0.2892, 0.7570, 0.2847]], device='cuda:0')\n",
|
||||
"tensor([[0.0962, 0.3125, 0.7327, 0.5982, 0.4624],\n",
|
||||
" [0.4655, 0.4890, 0.9603, 0.4339, 0.0524],\n",
|
||||
" [0.9294, 0.9639, 0.6312, 0.1752, 0.7721],\n",
|
||||
" [0.5533, 0.3656, 0.9329, 0.8796, 0.9513],\n",
|
||||
" [0.4949, 0.0972, 0.2892, 0.7570, 0.2847]], dtype=torch.float64)\n"
|
||||
"tensor([[0.2854, 0.3384, 0.6473, 0.0433, 0.5640],\n",
|
||||
" [0.3960, 0.0449, 0.6597, 0.5347, 0.8402],\n",
|
||||
" [0.0048, 0.9231, 0.0311, 0.2545, 0.0409],\n",
|
||||
" [0.6506, 0.8651, 0.7558, 0.1086, 0.8135],\n",
|
||||
" [0.1083, 0.0039, 0.6049, 0.3596, 0.1359]], device='cuda:0')\n",
|
||||
"tensor([[0.2854, 0.3384, 0.6473, 0.0433, 0.5640],\n",
|
||||
" [0.3960, 0.0449, 0.6597, 0.5347, 0.8402],\n",
|
||||
" [0.0048, 0.9231, 0.0311, 0.2545, 0.0409],\n",
|
||||
" [0.6506, 0.8651, 0.7558, 0.1086, 0.8135],\n",
|
||||
" [0.1083, 0.0039, 0.6049, 0.3596, 0.1359]], dtype=torch.float64)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@ -295,12 +282,13 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"11.4 ms ± 60.2 µs per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
|
||||
"11.4 ms ± 28.8 µs per loop (mean ± std. dev. of 7 runs, 1 loop each)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%%timeit\n",
|
||||
"# Calculate the projection matrix of x on the GPU\n",
|
||||
"H = x.mm( (x.t().mm(x)).inverse() ).mm(x.t())"
|
||||
]
|
||||
},
|
||||
@ -341,11 +329,11 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"tensor([[0.4303, 0.7364, 0.1235, 0.7786, 0.7036],\n",
|
||||
" [0.3256, 0.4515, 0.7994, 0.9814, 0.7705],\n",
|
||||
" [0.2292, 0.5194, 0.4354, 0.3964, 0.5804],\n",
|
||||
" [0.8855, 0.5156, 0.9321, 0.9555, 0.4150],\n",
|
||||
" [0.0640, 0.0665, 0.1170, 0.9547, 0.2668]], device='cuda:0')\n"
|
||||
"tensor([[0.1101, 0.7887, 0.0641, 0.1327, 0.1681],\n",
|
||||
" [0.7914, 0.7248, 0.7731, 0.2662, 0.4908],\n",
|
||||
" [0.2451, 0.3568, 0.4006, 0.2099, 0.5212],\n",
|
||||
" [0.6195, 0.5120, 0.5212, 0.7321, 0.2272],\n",
|
||||
" [0.2374, 0.4540, 0.0868, 0.9393, 0.1561]], device='cuda:0')\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@ -376,11 +364,11 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"tensor([[1.0966e-03, 3.5866e-04, 4.0044e-04, 3.2466e-04, 2.3044e-04],\n",
|
||||
" [3.5866e-04, 9.7424e-04, 2.8649e-04, 8.2904e-04, 2.0482e-04],\n",
|
||||
" [4.0044e-04, 2.8649e-04, 5.4179e-04, 1.2729e-04, 9.4659e-05],\n",
|
||||
" [3.2466e-04, 8.2904e-04, 1.2729e-04, 1.3005e-03, 6.6951e-06],\n",
|
||||
" [2.3044e-04, 2.0482e-04, 9.4659e-05, 6.6950e-06, 1.3420e-03]],\n",
|
||||
"tensor([[ 6.4681e-04, -1.5392e-05, 3.3608e-04, 2.1025e-04, 8.0912e-05],\n",
|
||||
" [-1.5392e-05, 5.0718e-04, -1.1769e-04, -2.3084e-05, -2.3264e-04],\n",
|
||||
" [ 3.3608e-04, -1.1769e-04, 6.9678e-04, 2.2663e-04, -1.8900e-04],\n",
|
||||
" [ 2.1025e-04, -2.3084e-05, 2.2663e-04, 6.0036e-04, 2.7787e-04],\n",
|
||||
" [ 8.0912e-05, -2.3264e-04, -1.8900e-04, 2.7787e-04, 1.4208e-03]],\n",
|
||||
" device='cuda:0')\n"
|
||||
]
|
||||
}
|
||||
@ -399,11 +387,11 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"tensor([[1.0966e-03, 3.5866e-04, 4.0044e-04, 3.2466e-04, 2.3044e-04],\n",
|
||||
" [3.5866e-04, 9.7424e-04, 2.8649e-04, 8.2904e-04, 2.0482e-04],\n",
|
||||
" [4.0044e-04, 2.8649e-04, 5.4179e-04, 1.2729e-04, 9.4659e-05],\n",
|
||||
" [3.2466e-04, 8.2904e-04, 1.2729e-04, 1.3005e-03, 6.6951e-06],\n",
|
||||
" [2.3044e-04, 2.0482e-04, 9.4659e-05, 6.6950e-06, 1.3420e-03]],\n",
|
||||
"tensor([[ 6.4681e-04, -1.5392e-05, 3.3608e-04, 2.1025e-04, 8.0912e-05],\n",
|
||||
" [-1.5392e-05, 5.0718e-04, -1.1769e-04, -2.3084e-05, -2.3264e-04],\n",
|
||||
" [ 3.3608e-04, -1.1769e-04, 6.9678e-04, 2.2663e-04, -1.8900e-04],\n",
|
||||
" [ 2.1025e-04, -2.3084e-05, 2.2663e-04, 6.0036e-04, 2.7787e-04],\n",
|
||||
" [ 8.0912e-05, -2.3264e-04, -1.8900e-04, 2.7787e-04, 1.4208e-03]],\n",
|
||||
" dtype=torch.float64)\n"
|
||||
]
|
||||
}
|
||||
|
@ -4,10 +4,14 @@ cd $(cd -P -- "$(dirname -- "$0")" && pwd -P)
|
||||
# Set the path of the generated Dockerfile
|
||||
export DOCKERFILE=".build/Dockerfile"
|
||||
export STACKS_DIR=".build/docker-stacks"
|
||||
export HEAD_COMMIT="c1c32938438151c7e2a22b5aa338caba2ec01da2"
|
||||
# please test the build of the commit in https://github.com/jupyter/docker-stacks/commits/master in advance
|
||||
export HEAD_COMMIT="04f7f60d34a674a2964d96a6cb97c57a7870a828"
|
||||
|
||||
while [[ "$#" -gt 0 ]]; do case $1 in
|
||||
-c|--commit) HEAD_COMMIT="$2"; shift;;
|
||||
--no-datascience-notebook) no_datascience_notebook=1;;
|
||||
--no-useful-packages) no_useful_packages=1;;
|
||||
-s|--slim) no_datascience_notebook=1 && no_useful_packages=1;;
|
||||
*) echo "Unknown parameter passed: $1" &&
|
||||
echo "Usage: $0 -c [sha-commit] # set the head commit of the docker-stacks submodule
|
||||
(https://github.com/jupyter/docker-stacks/commits/master). default: $HEAD_COMMIT."; exit 1;;
|
||||
@ -23,11 +27,12 @@ if [[ "$HEAD_COMMIT" == "latest" ]]; then
|
||||
cd $STACKS_DIR && git pull && cd -
|
||||
else
|
||||
export GOT_HEAD="false"
|
||||
cd $STACKS_DIR && git reset --hard "$HEAD_COMMIT" > /dev/null 2>&1 && cd - && export GOT_HEAD="true"
|
||||
cd $STACKS_DIR && git pull && git reset --hard "$HEAD_COMMIT" > /dev/null 2>&1 && cd - && export GOT_HEAD="true"
|
||||
echo "$HEAD"
|
||||
if [[ "$GOT_HEAD" == "false" ]]; then
|
||||
echo "Given sha-commit is invalid."
|
||||
echo "Error: The given sha-commit is invalid."
|
||||
echo "Usage: $0 -c [sha-commit] # set the head commit of the docker-stacks submodule (https://github.com/jupyter/docker-stacks/commits/master)."
|
||||
echo "Exiting"
|
||||
exit 2
|
||||
else
|
||||
echo "Set head to given commit."
|
||||
@ -66,13 +71,17 @@ echo "
|
||||
" >> $DOCKERFILE
|
||||
cat $STACKS_DIR/scipy-notebook/Dockerfile | grep -v BASE_CONTAINER >> $DOCKERFILE
|
||||
|
||||
echo "
|
||||
############################################################################
|
||||
################ Dependency: jupyter/datascience-notebook ##################
|
||||
############################################################################
|
||||
" >> $DOCKERFILE
|
||||
cat $STACKS_DIR/datascience-notebook/Dockerfile | grep -v BASE_CONTAINER >> $DOCKERFILE
|
||||
|
||||
# install Julia and R if not excluded or spare mode is used
|
||||
if [[ "$no_datascience_notebook" != 1 ]]; then
|
||||
echo "
|
||||
############################################################################
|
||||
################ Dependency: jupyter/datascience-notebook ##################
|
||||
############################################################################
|
||||
" >> $DOCKERFILE
|
||||
cat $STACKS_DIR/datascience-notebook/Dockerfile | grep -v BASE_CONTAINER >> $DOCKERFILE
|
||||
else
|
||||
echo "Set 'no-datascience-notebook', not installing the datascience-notebook with Julia and R."
|
||||
fi
|
||||
|
||||
# Note that the following step also installs the cudatoolkit, which is
|
||||
# essential to access the GPU.
|
||||
@ -83,13 +92,17 @@ echo "
|
||||
" >> $DOCKERFILE
|
||||
cat src/Dockerfile.gpulibs >> $DOCKERFILE
|
||||
|
||||
|
||||
echo "
|
||||
############################################################################
|
||||
############################ Useful packages ###############################
|
||||
############################################################################
|
||||
" >> $DOCKERFILE
|
||||
cat src/Dockerfile.usefulpackages >> $DOCKERFILE
|
||||
# install useful packages if not excluded or spare mode is used
|
||||
if [[ "$no_useful_packages" != 1 ]]; then
|
||||
echo "
|
||||
############################################################################
|
||||
############################ Useful packages ###############################
|
||||
############################################################################
|
||||
" >> $DOCKERFILE
|
||||
cat src/Dockerfile.usefulpackages >> $DOCKERFILE
|
||||
else
|
||||
echo "Set 'no-useful-packages', not installing stuff within src/Dockerfile.usefulpackages."
|
||||
fi
|
||||
|
||||
# Copy the demo notebooks and change permissions
|
||||
cp -r extra/Getting_Started data
|
||||
|
@ -1,11 +1,15 @@
|
||||
LABEL maintainer="Christoph Schranz <christoph.schranz@salzburgresearch.at>"
|
||||
|
||||
# Install Tensorflow, check compatibility here: https://www.tensorflow.org/install/gpu
|
||||
RUN conda install --quiet --yes \
|
||||
'tensorflow-gpu=2.1*' \
|
||||
'keras-gpu' && \
|
||||
fix-permissions $CONDA_DIR && \
|
||||
fix-permissions /home/$NB_USER
|
||||
# Install Tensorflow, check compatibility here: https://www.tensorflow.org/install/gpu
|
||||
# installation via conda leads to errors in version 4.8.2
|
||||
#RUN conda install --quiet --yes \
|
||||
# 'tensorflow-gpu=2.1*' \
|
||||
# 'keras-gpu' && \
|
||||
# fix-permissions $CONDA_DIR && \
|
||||
# fix-permissions /home/$NB_USER
|
||||
RUN pip install --upgrade pip && \
|
||||
pip install --no-cache-dir "tensorflow-gpu>=2.1.*" && \
|
||||
pip install --no-cache-dir keras
|
||||
|
||||
# Install PyTorch with dependencies
|
||||
RUN conda install --quiet --yes \
|
||||
|
@ -2,4 +2,4 @@
|
||||
# The version of cudatoolkit must match those of the base image, see Dockerfile.pytorch
|
||||
FROM nvidia/cuda:10.1-base-ubuntu18.04
|
||||
LABEL maintainer="Christoph Schranz <christoph.schranz@salzburgresearch.at>"
|
||||
# The maintainers of subsequent sections may vary
|
||||
# This is a concatenated Dockerfile, the maintainers of subsequent sections may vary.
|
||||
|
Loading…
Reference in New Issue
Block a user