oscarwang2 commited on
Commit
b4ae7e6
1 Parent(s): c02dde9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -6,11 +6,11 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStream
6
  import gradio as gr
7
  from threading import Thread
8
 
9
- MODEL_LIST = ["meta-llama/Meta-Llama-3.1-8B-Instruct"]
10
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
11
  MODEL = os.environ.get("MODEL_ID")
12
 
13
- TITLE = "<h1><center>Meta-Llama3.1-8B</center></h1>"
14
 
15
  PLACEHOLDER = """
16
  <center>
@@ -45,6 +45,7 @@ model = AutoModelForCausalLM.from_pretrained(
45
  torch_dtype=torch.bfloat16,
46
  device_map="auto",
47
  quantization_config=quantization_config)
 
48
 
49
  @spaces.GPU()
50
  def stream_chat(
 
6
  import gradio as gr
7
  from threading import Thread
8
 
9
+ MODEL_LIST = ["microsoft/Phi-3.5-MoE-instruct"]
10
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
11
  MODEL = os.environ.get("MODEL_ID")
12
 
13
+ TITLE = "<h1><center>Phi-3.5-MoE-instruct</center></h1>"
14
 
15
  PLACEHOLDER = """
16
  <center>
 
45
  torch_dtype=torch.bfloat16,
46
  device_map="auto",
47
  quantization_config=quantization_config)
48
+ trust_remote_code=True
49
 
50
  @spaces.GPU()
51
  def stream_chat(