diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000..ddebbc0
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,9 @@
+docker-compose.yml
+models/
+images/
+app/bin
+app/lib
+app/lib64
+app/__pycache__
+app/pyvenv.cfg
+app/include
diff --git a/.env b/.env
new file mode 100644
index 0000000..806674a
--- /dev/null
+++ b/.env
@@ -0,0 +1,19 @@
+#########################LOCALAI#########################
+
+# local-ai quand flask dockerisé
+LOCALAI_HOST=local-ai
+
+MODELS_PATH=/models
+
+DEBUG=true
+REBUILD=false
+
+#THREADS=4
+
+DEFAULT_MODEL=gpt-3.5-turbo
+PRELOAD_MODELS=[{"url":"github:go-skynet/model-gallery/gpt4all-j.yaml","name":"gpt-3.5-turbo"},{"url":"github:go-skynet/model-gallery/stablediffusion.yaml","name":"stablediffusion"}]
+
+#DEFAULT_MODEL=wizard-lm
+#PRELOAD_MODELS=[{"url":"github:go-skynet/model-gallery/openllama_7b.yaml","name":"open_llama"}]
+
+#GALLERIES=[{"name":"model-gallery","url":"github:go-skynet/model-gallery/index.yaml"}]
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..fe44548
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,29 @@
+models/ggml-gpt4all-j.bin
+models/gpt4all-chat.tmpl
+models/open-llama-7b-q4_0.bin
+models/whisper.yaml
+models/ggml-whisper-base.bin
+models/gpt4all-completion.tmpl
+models/openllama-chat.tmpl
+models/gpt-3.5-turbo.yaml
+models/gpt4all-j.yaml
+models/openllama-completion.tmpl
+models/wizardlm-13b-v1.1-superhot-8k.ggmlv3.q4_0.bin
+models/wizard-lm.yaml
+models/wizard-lm-chat.tmpl
+models/wizard-lm-completion.tmpl
+models/wizard_lm.yaml
+models/open_llama.yaml
+models/openllama.yaml
+models/open_llama
+models/*
+
+app/bin
+app/include
+app/lib
+app/lib64
+app/__pycache__
+app/pyvenv.cfg
+app/client_secrets_prod.json
+
+images/*
diff --git a/Dockerfile b/Dockerfile
index 9d281ef..476ea10 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -5,4 +5,4 @@ WORKDIR /
COPY . .
RUN pip3 install -r requirements.txt
WORKDIR /app
-CMD ["gunicorn", "-w", "4", "wsgi:app", "--bind", "0.0.0.0:8000"]
\ No newline at end of file
+CMD [ "python3", "-m" , "flask", "run", "--host=0.0.0.0"]
\ No newline at end of file
diff --git a/app/app.py b/app/app.py
index 42fb6fb..a772e4f 100644
--- a/app/app.py
+++ b/app/app.py
@@ -10,32 +10,8 @@ model = "ggml-gpt4all-j.bin"
load_dotenv()
host = os.getenv("LOCALAI_HOST")
-############################### KEYCLOAK ###############################
-# app.config.update({
-# # PROD ONLY
-# 'SECRET_KEY': 'créer-un-secret-ici',
-# 'OIDC_CLIENT_SECRETS': 'client_secrets_prod.json',
-# 'OIDC_ID_TOKEN_COOKIE_SECURE': False,
-# 'OIDC_REQUIRE_VERIFIED_EMAIL': False,
-# 'OIDC_USER_INFO_ENABLED': True,
-# 'OIDC_OPENID_REALM': 'gregan',
-# 'OIDC_SCOPES': ['openid', 'email', 'profile'],
-# 'OIDC_INTROSPECTION_AUTH_METHOD': 'client_secret_post'
-# })
-
-# app.config['OVERWRITE_REDIRECT_URI'] = 'https://chat-gpt.domain.tld/oidc_callback'
-# oidc = OpenIDConnect(app)
-
-# @app.context_processor
-# def inject_oidc_user():
-# if oidc.user_loggedin:
-# return dict(oidc_user=oidc.user_getfield('email'))
-# return dict(oidc_user=None)
-
-
# CHAT BOT: GPT-TURBO-3.5
@app.route("/", methods=("GET", "POST"))
-# @oidc.require_login
def index():
result = ''
@@ -60,8 +36,6 @@ def index():
response = requests.post(url, json=payload)
if response.status_code == 200:
result = '
Loading ...
-