vllm-inference / Dockerfile
yusufs's picture
feat(run.sh): add script for running openai server
ded2af7
raw
history blame
426 Bytes
FROM python:3.12
RUN useradd -m -u 1000 user
USER user
ENV PATH="/home/user/.local/bin:$PATH"
WORKDIR /app
COPY --chown=user ./requirements.txt requirements.txt
RUN pip install --no-cache-dir -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cu113
COPY --chown=user . /app
EXPOSE 7860
#CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
RUN chmod +x /app/run.sh
CMD ["/app/run.sh"]