pierreguillou commited on
Commit
7ebc9c6
1 Parent(s): eb9b9b5

pred_classes infos

Browse files
Files changed (1) hide show
  1. app.py +25 -13
app.py CHANGED
@@ -35,6 +35,16 @@ cfg.MODEL.DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
35
  # Step 4: define model
36
  predictor = DefaultPredictor(cfg)
37
 
 
 
 
 
 
 
 
 
 
 
38
 
39
  def analyze_image(img):
40
  md = MetadataCatalog.get(cfg.DATASETS.TEST[0])
@@ -57,20 +67,19 @@ def analyze_image(img):
57
  for field in fields:
58
  if field == 'pred_boxes':
59
  boxes = output.get_fields()[field]
60
- boxes_numpy = boxes.tensor.cpu().numpy()
61
- boxes_bytes = str(boxes_numpy.tobytes())
62
- boxes_numpy_shape = str(boxes_numpy.shape)
63
- boxes_numpy_dtype = str(boxes_numpy.dtype)
64
  # boxes_recover = torch.from_numpy(np.frombuffer(boxes_bytes, dtype=boxes_numpy_dtype).reshape(boxes_numpy_shape))
65
  elif field == 'scores':
66
  scores = output.get_fields()[field]
67
- scores_numpy = scores.cpu().numpy()
68
- scores_bytes = str(scores_numpy.tobytes())
69
- scores_numpy_shape = str(scores_numpy.shape)
70
- scores_numpy_dtype = str(scores_numpy.dtype)
71
  # scores_recover = torch.from_numpy(np.frombuffer(scores_bytes, dtype=scores_numpy_dtype).reshape(scores_numpy_shape))
 
 
 
 
72
 
73
- return result_image, num_instances, image_size, boxes_bytes, boxes_numpy_shape, boxes_numpy_dtype, scores_bytes, scores_numpy_shape, scores_numpy_dtype
74
 
75
  title = "Interactive demo: Document Layout Analysis with DiT"
76
  description = "Demo for Microsoft's DiT, the Document Image Transformer for state-of-the-art document understanding tasks. This particular model is fine-tuned on PubLayNet, a large dataset for document layout analysis (read more at the links below). To use it, simply upload an image or use the example image below and click 'Submit'. Results will show up in a few seconds. If you want to make the output bigger, right-click on it and select 'Open image in new tab'."
@@ -89,12 +98,15 @@ iface = gr.Interface(fn=analyze_image,
89
  gr.outputs.Textbox(label="boxes numpy dtype"),
90
  gr.outputs.Textbox(label="scores bytes"),
91
  gr.outputs.Textbox(label="scores numpy shape"),
92
- gr.outputs.Textbox(label="scores numpy dtype")
 
 
 
93
  ],
94
  title=title,
95
  description=description,
96
  examples=examples,
97
  article=article,
98
- css=css,
99
- enable_queue=True)
100
- iface.launch(debug=True, cache_examples=True)
 
35
  # Step 4: define model
36
  predictor = DefaultPredictor(cfg)
37
 
38
+ def get_bytes_shape_dtype(t):
39
+ """
40
+ input: tensor
41
+ output: 3 strings
42
+ """
43
+ t_numpy = t.cpu().numpy()
44
+ t_bytes = str(t_numpy.tobytes())
45
+ t_numpy_shape = str(t_numpy.shape)
46
+ t_numpy_dtype = str(t_numpy.dtype)
47
+ return t_bytes, t_numpy_shape, t_numpy_dtype
48
 
49
  def analyze_image(img):
50
  md = MetadataCatalog.get(cfg.DATASETS.TEST[0])
 
67
  for field in fields:
68
  if field == 'pred_boxes':
69
  boxes = output.get_fields()[field]
70
+ boxes = boxes.tensor
71
+ boxes_bytes, boxes_numpy_shape, boxes_numpy_dtype = get_bytes_shape_dtype(boxes)
 
 
72
  # boxes_recover = torch.from_numpy(np.frombuffer(boxes_bytes, dtype=boxes_numpy_dtype).reshape(boxes_numpy_shape))
73
  elif field == 'scores':
74
  scores = output.get_fields()[field]
75
+ scores_bytes, scores_numpy_shape, scores_numpy_dtype = get_bytes_shape_dtype(scores)
 
 
 
76
  # scores_recover = torch.from_numpy(np.frombuffer(scores_bytes, dtype=scores_numpy_dtype).reshape(scores_numpy_shape))
77
+ elif field == 'pred_classes':
78
+ pred_classes = output.get_fields()[field]
79
+ pred_classes_bytes, pred_classes_numpy_shape, pred_classes_numpy_dtype = get_bytes_shape_dtype(pred_classes)
80
+ # pred_classes_recover = torch.from_numpy(np.frombuffer(pred_classes_bytes, dtype=pred_classes_numpy_dtype).reshape(pred_classes_numpy_shape))
81
 
82
+ return result_image, num_instances, image_size, boxes_bytes, boxes_numpy_shape, boxes_numpy_dtype, scores_bytes, scores_numpy_shape, scores_numpy_dtype, pred_classes_bytes, pred_classes_numpy_shape, pred_classes_numpy_dtype
83
 
84
  title = "Interactive demo: Document Layout Analysis with DiT"
85
  description = "Demo for Microsoft's DiT, the Document Image Transformer for state-of-the-art document understanding tasks. This particular model is fine-tuned on PubLayNet, a large dataset for document layout analysis (read more at the links below). To use it, simply upload an image or use the example image below and click 'Submit'. Results will show up in a few seconds. If you want to make the output bigger, right-click on it and select 'Open image in new tab'."
 
98
  gr.outputs.Textbox(label="boxes numpy dtype"),
99
  gr.outputs.Textbox(label="scores bytes"),
100
  gr.outputs.Textbox(label="scores numpy shape"),
101
+ gr.outputs.Textbox(label="scores numpy dtype"),
102
+ gr.outputs.Textbox(label="pred_classes bytes"),
103
+ gr.outputs.Textbox(label="pred_classes numpy shape"),
104
+ gr.outputs.Textbox(label="pred_classes numpy dtype")
105
  ],
106
  title=title,
107
  description=description,
108
  examples=examples,
109
  article=article,
110
+ css=css
111
+ )
112
+ iface.launch(debug=True, cache_examples=True, enable_queue=True)