Integrating retrieved information with LLM generation
To integrate retrieved information with LLM generation, we can create a prompt that incorporates the retrieved documents:
from transformers import AutoModelForCausalLM class GenerativeRAG(QueryExpansionRAG): def __init__( self, retriever_model, generator_model, knowledge_base ): super().__init__(retriever_model, knowledge_base) self.generator = \ AutoModelForCausalLM.from_pretrained(generator_model) self.generator_tokenizer = \ AutoTokenizer.from_pretrained(generator_model) def generate_response(self, query, max_length=100):...