The following code gets data from 5 different websites at the "same time".
#!/usr/bin/python import Queue import threading import urllib2 import time hosts = ["http://yahoo.com", "http://google.com", "http://amazon.com", "http://ibm.com", "http://apple.com"] queue = Queue.Queue() class MyUrl(threading.Thread): def __init__(self, queue): threading.Thread.__init__(self) self.queue = queue def run(self): while True: host = self.queue.get() if host is None: break url = urllib2.urlopen(host) print url.read(1024) #self.queue.task_done() start = time.time() def main(): for i in range(5): t = MyUrl(queue) t.setDaemon(True) t.start() for host in hosts: print "pushing", host queue.put(host) for i in range(5): queue.put(None) t.join() if __name__ == "__main__": main() print "Elapsed Time: %s" % (time.time() - start) How does the parallel download work if each thread has a lock? When the program opens www.yahoo.com, it places a lock on the thread, right? If so, then doesn't that mean the other 4 sites have to wait for the thread to release the lock? -- http://mail.python.org/mailman/listinfo/python-list