Hi. Really basic question!!
Got a chunk of some test python, trying to figure out a quick/easy way to capture all/any errors/exceptions that get thrown.. For the test process, I need to "ensure" that I capture any/all potential errors.. -Could/Should I wrap the entire func in a try/catch when I call the function from the parent process? -Should I have separate try/catch blocks within the function? -The test py app is being run from the CLI, is there a py command line attribute that auto captures all errors? Any thoughts.. Thanks! A sample of the test code is: def getParseCollegeFacultyList1(url, content): s=content s=s.replace(" ","") if(debug==1): print "s="+s url=url.strip("/") #got the page/data... parse it and get the "schools".. #use the "dept" list as the school # s contains HTML not XML text d = libxml2dom.parseString(s, html=1) ########################################################### #-- #--create the output data file for the registrar/start data #-- #-- ########################################################### #term_in=201336&&sel_subj=ACCT if(debug==1): print "inside parse state/college function \n" #-----------Form------------ #fetch the option val/text for the "depts" which are used #as the dept abbrv/name on the master side #-- the school matches the dept... #-- this results in separate packets for each dept p="//a[contains(@href,'SelectTeacher') and @id='last']//attribute::href" ap="//a[contains(@href,'campusRatings.jsp')]//attribute::href" hpath="//div[@id='profInfo']/ul/li[1]//a/attribute::href" # -get the college website cpath="//div[@id='profInfo']/ul/li[2]/text()" #-get the city,state colpath="//h2/text()" #-college name xpath="//a[contains(@title,'school id:')]/attribute::href" hh_ = d.xpath(hpath) cc_ = d.xpath(cpath) col_ = d.xpath(colpath) ap_ = d.xpath(ap) if(debug==1): print "hhl "+str(len(hh_)) print "ccl "+str(len(cc_)) web="" if (len(hh_)>0): web=hh_[0].textContent city="" if (len(cc_)>0): city=cc_[0].textContent colname="" if (len(col_)>0): colname=col_[0].textContent colname=colname.encode('ascii', 'ignore').strip() # # set up out array # ret={} out={} row={} jrow="" ndx=0 pcount_ = d.xpath(p) if(len(pcount_)==0): #at least one success/entry.. but apparently only a single page.. status=True #count=pcount_[0].textContent.strip() #countp=count.split('&pageNo=') #count=countp[1] #rr=countp[0] if(len(ap_)==1): idd=ap_[0].textContent.strip() idd=idd.split("?sid=") idd=idd[1].split("&") idd=idd[0].strip() nurl=url+"/SelectTeacher.jsp?sid="+idd+"&pageNo=1" #nurl=url+"&pageNo=1" row={} row['WriteData']=True row['tmp5']=web row['tmp6']=city row['tmp7']=colname row['tmp8']=nurl #don't json for now #jrow=simplejson.dumps(row) jrow=row out[ndx]=jrow ndx = ndx+1 else: #at least one success/entry.. set the status status=True count=pcount_[0].textContent.strip() countp=count.split('&pageNo=') count=countp[1] rr=countp[0] if(debug==1): print "c ="+str(count)+"\n" for t in range(1,int(count)+1): nurl=url+rr+"&pageNo="+str(t) if(debug==1): print "nurl = "+nurl+"\n" row={} row['WriteData']=True row['tmp5']=web row['tmp6']=city row['tmp7']=colname row['tmp8']=nurl #don't json for now #jrow=simplejson.dumps(row) jrow=row out[ndx]=jrow ndx = ndx+1 ret['data']=simplejson.dumps(out) ret['count']=ndx ret['status']=status return(ret) _______________________________________________ Tutor maillist - Tutor@python.org To unsubscribe or change subscription options: https://mail.python.org/mailman/listinfo/tutor