Avendovi sottoposto il quesito, volevo sottoporvi anche la soluzione a cui sono giunto, per chiedervi se ci sono altri modi, migliori, per risolvere la cosa.
#!/usr/bin/python import sqlite3 from crawlingUtilities import URL_Canonicalization class Sqlite3Extract: def __init__(self, db_name): self.db_name = db_name self.con = sqlite3.connect(self.db_name) self.con.isolation_level = None def ans_y(self, answer): for x in enumerate(answer): yield(x) def sqlite3_extraction_meth(self): self.cur = self.con.cursor() self.cur.execute("SELECT DISTINCT link FROM link;") ris = self.cur.fetchall() for i, weblink in enumerate(ris): yield weblink self.ans_y(ris).close() def get_sqlite3_content(self): return self.sqlite3_extraction_meth() if __name__ == '__main__': nome_database = 'xyz.db' sqlite3_extract = Sqlite3Extract(nome_database) coll = sqlite3_extract.get_sqlite3_content() for i in xrange(10): url_to_canonicalize = coll.next()[0] url_canonicalization = URL_Canonicalization(url_to_canonicalize) url_canonicalized = url_canonicalization.get_canonized_url() print "url_canonicalized= ", url_canonicalized Cosa ne pensate? _______________________________________________ Python mailing list Python@lists.python.it http://lists.python.it/mailman/listinfo/python