Test program shows, python/sikulix can handle about 3m chars per second so I must continue with a parser attempt.

# test char processing speed.
# test if sikulix 1.1, python, jython/whatever is a piece of shit when it comes to parsing dealing with characters, unicode processing bottleneck theory.

import time
import random

MaxChars = 3000000

MinChar = 33
MaxChar = 200

Chars = []

def GenerateChars():
vRandomChar = ""
for vIndex in range(0, MaxChars):
 vRandomChar = chr(random.randrange(MinChar,MaxChar))
 Chars.append(vRandomChar)
return

def ProcessChars():
Result = 0
for vIndex in range(0, MaxChars):
 if Chars[vIndex] == chr(45):
  Result = Result + 1
return Result

Tick1 = time.time()
GenerateChars()
Tick2 = time.time()
Seconds = Tick2 - Tick1
print "GenerateChars seconds: " + str(Seconds)

Tick1 = time.time()
ProcessChars()
Tick2 = time.time()
Seconds = Tick2 - Tick1
print "ProcessChars seconds: " + str(Seconds)



#print Chars


--
https://mail.python.org/mailman/listinfo/python-list

Reply via email to