Files changed (1) hide show
  1. README.md +4 -1
README.md CHANGED
@@ -49,7 +49,7 @@ including but not limited to fine-grained sentiment analysis (ABSA), product-rel
49
  If you find this work helpful, please cite our paper as follows:
50
 
51
  ```bibtex
52
- @inproceedings{wang2023fs-absa,
53
  author = {Wang, Zengzhi and Xie, Qiming and Xia, Rui},
54
  title = {A Simple yet Effective Framework for Few-Shot Aspect-Based Sentiment Analysis},
55
  year = {2023},
@@ -58,8 +58,11 @@ publisher = {Association for Computing Machinery},
58
  address = {New York, NY, USA},
59
  url = {https://doi.org/10.1145/3539618.3591940},
60
  doi = {10.1145/3539618.3591940},
 
61
  booktitle = {Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval},
 
62
  numpages = {6},
 
63
  location = {Taipei, Taiwan},
64
  series = {SIGIR '23}
65
  }
 
49
  If you find this work helpful, please cite our paper as follows:
50
 
51
  ```bibtex
52
+ @inproceedings{10.1145/3539618.3591940,
53
  author = {Wang, Zengzhi and Xie, Qiming and Xia, Rui},
54
  title = {A Simple yet Effective Framework for Few-Shot Aspect-Based Sentiment Analysis},
55
  year = {2023},
 
58
  address = {New York, NY, USA},
59
  url = {https://doi.org/10.1145/3539618.3591940},
60
  doi = {10.1145/3539618.3591940},
61
+ abstract = {The pre-training and fine-tuning paradigm has become the main-stream framework in the field of Aspect-Based Sentiment Analysis (ABSA). Although it has achieved sound performance in the domains containing enough fine-grained aspect-sentiment annotations, it is still challenging to conduct few-shot ABSA in domains where manual annotations are scarce. In this work, we argue that two kinds of gaps, i.e., domain gap and objective gap, hinder the transfer of knowledge from pre-training language models (PLMs) to ABSA tasks. To address this issue, we introduce a simple yet effective framework called FS-ABSA, which involves domain-adaptive pre-training and text-infilling fine-tuning. We approach the End-to-End ABSA task as a text-infilling problem and perform domain-adaptive pre-training with the text-infilling objective, narrowing the two gaps and consequently facilitating the knowledge transfer. Experiments show that the resulting model achieves more compelling performance than baselines under the few-shot setting while driving the state-of-the-art performance to a new level across datasets under the fully-supervised setting. Moreover, we apply our framework to two non-English low-resource languages to demonstrate its generality and effectiveness.},
62
  booktitle = {Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval},
63
+ pages = {1765–1770},
64
  numpages = {6},
65
+ keywords = {few-shot learning, opinion mining, sentiment analysis},
66
  location = {Taipei, Taiwan},
67
  series = {SIGIR '23}
68
  }