from lib.files import * from lib.memory import * from lib.grapher import * from lib.pipes import * from lib.entropy import * from lib.sonsofstars import * import internetarchive longMem = TextFinder("resources") coreAi = AIAssistant() memory = MemoriaRobotNLP(max_size=200000) grapher = Grapher(memoria_nlp) sensor_request = APIRequester() class I: def __init__(self, prompt, frases_yo, preferencias, propiedades_persona): self.frases_yo = frases_yo self.preferencias = preferencias self.propiedades_persona = propiedades_persona self.dopamina = 0.0 self.frases_yo = frases_yo self.preferencias = preferencias self.propiedades_persona = propiedades_persona self.dopamina = 0.0 def obtener_paths_grafo(self, grafo_ngx): # Función para obtener los paths de un grafo ngx pass ## create questions from internet archive def crear_preguntas(self,txt): search = internetarchive.search_items(sys.argv[1]) res = [] for result in search: print(result['identifier']) idc=result["identifier"] headers = {"accept": "application/json"} ## get book pages req2 = requests.get("https://archive.org/stream/"+idc+"/"+idc+"_djvu.txt",headers=headers) #print(req2.text) try: txt = req2.text.split("
")[1].split("")[0].split("