--- task_categories: - question-answering language: - en tags: - TREC-RAG - RAG - MSMARCO - MSMARCOV2.1 - Snowflake - arctic - arctic-embed pretty_name: TREC-RAG-Embedding-Baseline size_categories: - 100M= top_k: break doc_embeddings = np.asarray(doc_embeddings) tokenizer = AutoTokenizer.from_pretrained('Snowflake/snowflake-arctic-embed-l') model = AutoModel.from_pretrained('Snowflake/snowflake-arctic-embed-l', add_pooling_layer=False) model.eval() query_prefix = 'Represent this sentence for searching relevant passages: ' queries = ['how do you clean smoke off walls'] queries_with_prefix = ["{}{}".format(query_prefix, i) for i in queries] query_tokens = tokenizer(queries_with_prefix, padding=True, truncation=True, return_tensors='pt', max_length=512) # Compute token embeddings with torch.no_grad(): query_embeddings = model(**query_tokens)[0][:, 0] # normalize embeddings query_embeddings = torch.nn.functional.normalize(query_embeddings, p=2, dim=1) doc_embeddings = torch.nn.functional.normalize(doc_embeddings, p=2, dim=1) # Compute dot score between query embedding and document embeddings dot_scores = np.matmul(query_embeddings, doc_embeddings.transpose())[0] top_k_hits = np.argpartition(dot_scores, -top_k)[-top_k:].tolist() # Sort top_k_hits by dot score top_k_hits.sort(key=lambda x: dot_scores[x], reverse=True) # Print results print("Query:", queries[0]) for doc_id in top_k_hits: print(docs[doc_id]['doc_id']) print(docs[doc_id]['text']) print(docs[doc_id]['url'], "\n") ```