from decimal import Decimal as D def bestFracForMinimumError(decimal, minimumError): denom = 0 while True: denom += 1 num = round(D(str(decimal)) * D(str(denom))) error = abs(str((str(D(num) / D(str(denom))) - D(str(decimal))) / str(D(str(decimal)) * d("100")))) if error <= D(minimumError): break return int(num), D(denom), error
dec = D(".34576598876876867756765765") me = D(".0001") print bestFracForMinimumError(dec, me) Traceback (most recent call last): File "fracSimple2-c.py", line 17, in <module> print bestFracForMinimumError(dec, me) File "fracSimple2-c.py", line 8, in bestFracForMinimumError error = abs(str((str(D(num) / D(str(denom))) - D(str(decimal))) / str(D(str( decimal)) * d("100")))) File "E:\Python25\lib\decimal.py", line 578, in __new__ "First convert the float to a string") TypeError: Cannot convert float to Decimal. First convert the float to a string I don't understand this TypeError. Seems to me that I've converted EVERYTHING in that line 8 to a string. Dick _______________________________________________ Tutor maillist - Tutor@python.org http://mail.python.org/mailman/listinfo/tutor