lxq2021 commited on
Commit
6ad02fc
β€’
1 Parent(s): 0f82ba0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -6,7 +6,7 @@ from transformers import (
6
  )
7
 
8
  import torch
9
-
10
  import torch.nn.functional as F
11
 
12
  import os
@@ -35,7 +35,7 @@ def load_llm():
35
 
36
  model_name = 'mistralai/Mistral-7B-Instruct-v0.2'
37
 
38
- tokenizer = AutoTokenizer.from_pretrained(model_name, token=HF_TOKEN)
39
  model = AutoModelForCausalLM.from_pretrained(
40
  model_name,
41
  #load_in_4bit=True,
@@ -43,7 +43,7 @@ def load_llm():
43
  torch_dtype=torch.bfloat16,
44
  device_map="auto",
45
  trust_remote_code=True,
46
- token=HF_TOKEN,
47
  )
48
 
49
  pipe = pipeline(
 
6
  )
7
 
8
  import torch
9
+ import os
10
  import torch.nn.functional as F
11
 
12
  import os
 
35
 
36
  model_name = 'mistralai/Mistral-7B-Instruct-v0.2'
37
 
38
+ tokenizer = AutoTokenizer.from_pretrained(model_name, token=os.getenv('HF_TOKEN'))
39
  model = AutoModelForCausalLM.from_pretrained(
40
  model_name,
41
  #load_in_4bit=True,
 
43
  torch_dtype=torch.bfloat16,
44
  device_map="auto",
45
  trust_remote_code=True,
46
+ token=os.getenv('HF_TOKEN'),
47
  )
48
 
49
  pipe = pipeline(