michaelthwan commited on
Commit
96a6d6a
1 Parent(s): 22d3c40
config/config.yaml CHANGED
@@ -3,7 +3,7 @@ gradio:
3
  port: 7860
4
  openai:
5
  api_url: "https://api.openai.com/v1/chat/completions"
6
- content_token: 3200 # tokens per content_main (e.g. transcript). If exceed it will be splitted and iterated
7
  timeout_sec: 25
8
  max_retry: 2
9
  api_key: ""
 
3
  port: 7860
4
  openai:
5
  api_url: "https://api.openai.com/v1/chat/completions"
6
+ content_token: 15000 # tokens per content_main (e.g. transcript). If exceed it will be splitted and iterated
7
  timeout_sec: 25
8
  max_retry: 2
9
  api_key: ""
digester/chatgpt_service.py CHANGED
@@ -213,7 +213,7 @@ class ChatGPTService:
213
  return prompts
214
 
215
  @staticmethod
216
- def single_call_chatgpt_with_handling(source_md, prompt_str: str, prompt_show_user: str, chatbot, api_key, gpt_model="gpt-3.5-turbo", history=[]):
217
  """
218
  Handling
219
  - token exceeding -> split input
@@ -272,7 +272,7 @@ class ChatGPTService:
272
  return gpt_response
273
 
274
  @staticmethod
275
- def single_rest_call_chatgpt(api_key, prompt_str: str, gpt_model="gpt-3.5-turbo", history=[], observe_window=None):
276
  """
277
  Single call chatgpt only. No handling on multiple call (it should be in upper caller multi_call_chatgpt_with_handling())
278
  - Support stream=True
 
213
  return prompts
214
 
215
  @staticmethod
216
+ def single_call_chatgpt_with_handling(source_md, prompt_str: str, prompt_show_user: str, chatbot, api_key, gpt_model, history=[]):
217
  """
218
  Handling
219
  - token exceeding -> split input
 
272
  return gpt_response
273
 
274
  @staticmethod
275
+ def single_rest_call_chatgpt(api_key, prompt_str: str, gpt_model, history=[], observe_window=None):
276
  """
277
  Single call chatgpt only. No handling on multiple call (it should be in upper caller multi_call_chatgpt_with_handling())
278
  - Support stream=True
digester/gradio_method_service.py CHANGED
@@ -7,7 +7,7 @@ from digester.chatgpt_service import LLMService, ChatGPTService
7
  from digester.util import Prompt, provide_text_with_css, GradioInputs
8
 
9
  WAITING_FOR_TARGET_INPUT = "Waiting for target source input"
10
- RESPONSE_SUFFIX = "⚡Powered by DigestEverythingGPT (in Github). Generated by AI"
11
 
12
 
13
  class GradioMethodService:
@@ -378,7 +378,7 @@ Instructions: (step by step instructions)
378
 
379
 
380
  if __name__ == '__main__':
381
- GPT_MODEL = "gpt-3.5-turbo"
382
  API_KEY = ""
383
  input_1 = """Give me 2 ideas for the summer"""
384
  # input_1 = """Explain more on the first idea"""
 
7
  from digester.util import Prompt, provide_text_with_css, GradioInputs
8
 
9
  WAITING_FOR_TARGET_INPUT = "Waiting for target source input"
10
+ RESPONSE_SUFFIX = "⚡by DigestEverythingGPT"
11
 
12
 
13
  class GradioMethodService:
 
378
 
379
 
380
  if __name__ == '__main__':
381
+ GPT_MODEL = "gpt-3.5-turbo-16k"
382
  API_KEY = ""
383
  input_1 = """Give me 2 ideas for the summer"""
384
  # input_1 = """Explain more on the first idea"""
digester/gradio_ui_service.py CHANGED
@@ -8,7 +8,7 @@ title_html = """
8
  <p align=\"center\">
9
  DigestEverythingGPT leverages ChatGPT/LLMs to help users quickly understand essential information from various forms of content, such as podcasts, YouTube videos, and PDF documents.<br>
10
  The prompt engineering is chained and tuned so that is result is of high quality and fast. It is not a simple single query and response tool.<br>
11
- Version 20230614 (
12
  <a href="https://github.com/michaelthwan/digest-everything-gpt"><i class="fa fa-github"></i> Github</a>
13
  ) (
14
  <a href="https://huggingface.co/spaces/michaelthwan/digest-everything-gpt"> HFSpace</a>
@@ -71,8 +71,8 @@ class GradioUIService:
71
  with gr.Accordion("Options", open=True):
72
  with gr.Row():
73
  gpt_model_textbox = gr.Dropdown(
74
- ["gpt-3.5-turbo", "gpt-4"],
75
- value="gpt-3.5-turbo", label="GPT model", info="gpt-3.5 is cheaper.\nBut if you found that the result is not good, try gpt-4 \nYour API key must support gpt-4"
76
  )
77
  with gr.Row():
78
  language_textbox = gr.Dropdown(
 
8
  <p align=\"center\">
9
  DigestEverythingGPT leverages ChatGPT/LLMs to help users quickly understand essential information from various forms of content, such as podcasts, YouTube videos, and PDF documents.<br>
10
  The prompt engineering is chained and tuned so that is result is of high quality and fast. It is not a simple single query and response tool.<br>
11
+ Version 20230614_2 (
12
  <a href="https://github.com/michaelthwan/digest-everything-gpt"><i class="fa fa-github"></i> Github</a>
13
  ) (
14
  <a href="https://huggingface.co/spaces/michaelthwan/digest-everything-gpt"> HFSpace</a>
 
71
  with gr.Accordion("Options", open=True):
72
  with gr.Row():
73
  gpt_model_textbox = gr.Dropdown(
74
+ ["gpt-3.5-turbo-16k", "gpt-4"],
75
+ value="gpt-3.5-turbo-16k", label="GPT model", info="gpt-3.5 is cheaper.\nBut if you found that the result is not good, try gpt-4 \nYour API key must support gpt-4"
76
  )
77
  with gr.Row():
78
  language_textbox = gr.Dropdown(
digester/test_chatgpt.py CHANGED
@@ -102,5 +102,5 @@ Example format:
102
  8:00 - This is the second part
103
  9:22 - This is the third part
104
  """
105
- GPT_MODEL = "gpt-3.5-turbo"
106
  ChatGPTService.single_rest_call_chatgpt(api_key, prompt_str, GPT_MODEL)
 
102
  8:00 - This is the second part
103
  9:22 - This is the third part
104
  """
105
+ GPT_MODEL = "gpt-3.5-turbo-16k"
106
  ChatGPTService.single_rest_call_chatgpt(api_key, prompt_str, GPT_MODEL)
digester/test_youtube_chain.py CHANGED
@@ -46,7 +46,7 @@ class VideoExample:
46
 
47
 
48
  class YoutubeTestChain:
49
- def __init__(self, api_key: str, gpt_model="gpt-3.5-turbo"):
50
  self.api_key = api_key
51
  self.gpt_model = gpt_model
52
 
 
46
 
47
 
48
  class YoutubeTestChain:
49
+ def __init__(self, api_key: str, gpt_model):
50
  self.api_key = api_key
51
  self.gpt_model = gpt_model
52
 
digester/util.py CHANGED
@@ -4,7 +4,7 @@ from pathlib import Path
4
  import tiktoken
5
  import yaml
6
 
7
- tokenizer = tiktoken.encoding_for_model("gpt-3.5-turbo")
8
 
9
 
10
  class GradioInputs:
 
4
  import tiktoken
5
  import yaml
6
 
7
+ tokenizer = tiktoken.encoding_for_model("gpt-3.5-turbo-16k")
8
 
9
 
10
  class GradioInputs:
requirements.txt CHANGED
@@ -4,4 +4,4 @@ tiktoken>=0.3.3
4
  openai
5
  Markdown
6
  latex2mathml
7
- everything2text4prompt==0.0.11
 
4
  openai
5
  Markdown
6
  latex2mathml
7
+ everything2text4prompt==0.0.14