@inproceedings{KaranEtAl2025COLING, title = "A Dataset for Expert Reviewer Recommendation with Large Language Models as Zero-shot Rankers", author = "Karan, Vanja M. and McQuistin, Stephen and Yanagida, Ryo and Perkins, Colin and Tyson, Gareth and Castro, Ignacio and Healey, Patrick G.T. and Purver, Matthew", editor = "Rambow, Owen and Wanner, Leo and Apidianaki, Marianna and Al-Khalifa, Hend and Eugenio, Barbara Di and Schockaert, Steven", booktitle = "Proceedings of the 31st International Conference on Computational Linguistics", month = jan, year = "2025", address = "Abu Dhabi, UAE", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2025.coling-main.756/", pages = "11422--11427", abstract = "The task of reviewer recommendation is increasingly important, with main techniques utilizing general models of text relevance. However, state of the art (SotA) systems still have relatively high error rates. Two possible reasons for this are: a lack of large datasets and the fact that large language models (LLMs) have not yet been applied. To fill these gaps, we first create a substantial new dataset, in the domain of Internet specification documents; then we introduce the use of LLMs and evaluate their performance. We find that LLMs with prompting can improve on SotA in some cases, but that they are not a cure-all: this task provides a challenging setting for prompt-based methods" } @inproceedings{HosseiniEtAl2025COLING, title = "Efficient Solutions For An Intriguing Failure of {LLM}s: Long Context Window Does Not Mean {LLM}s Can Analyze Long Sequences Flawlessly", author = "Hosseini, Peyman and Castro, Ignacio and Ghinassi, Iacopo and Purver, Matthew", editor = "Rambow, Owen and Wanner, Leo and Apidianaki, Marianna and Al-Khalifa, Hend and Eugenio, Barbara Di and Schockaert, Steven", booktitle = "Proceedings of the 31st International Conference on Computational Linguistics", month = jan, year = "2025", address = "Abu Dhabi, UAE", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/2408.01866", url = "https://aclanthology.org/2025.coling-main.128/", pages = "1880--1891", abstract = "Large Language Models (LLMs) have demonstrated remarkable capabilities in comprehending and analyzing lengthy sequential inputs, owing to their extensive context windows that allow processing millions of tokens in a single forward pass. However, this paper uncovers a surprising limitation: LLMs fall short when handling long input sequences. We investigate this issue using three datasets and two tasks (sentiment analysis and news categorization) across various LLMs, including Claude 3, Gemini Pro, GPT 3.5 Turbo, Llama 3 Instruct, and Mistral Instruct models. To address this limitation, we propose and evaluate ad-hoc solutions that substantially enhance LLMs' performance on long input sequences by up to 50{\%}, while reducing API cost and latency by up to 93{\%} and 50{\%}, respectively." }