Context
I'm using windows 7 on my computer(the player) and linux(debian) on my college computer(the streamer), which I control using ssh.
I was trying to simulate a constant byte rate of a microphone from reading a wave file, as if someone was talking. The problem was that the byte rate was below the target.
Choosing a 32KB/s rate, and 0.020 seconds of capture time.
I implemented the simulated microphone using time.sleep to produce give each chunk of data each 0.020 seconds. But the rate obtained was around 27KB/s, not 32KB/s
The problem
I decided to test how much precise time.sleep was on the linux machine, using ideas by reading this question.
I did 2 kind of tests. 1) busy sleep 2) normal sleep
In average, from the samples I got, it shows that the linux machine sleep resolution is 4ms. While on windows its less/equal to 1ms.
Questions
- What can possibly be limiting the sleep resolution on the linux machine?
- (On linux) Why does busy sleeping has the same resolution as time.sleep?
- How could I successfully simulate a microphone from reading a wave file?
Code
import time
def busy_sleep(t):
s=time.time()
while time.time() - s < t:
pass
e=time.time()
return e-s
def normal_sleep(t):
s=time.time()
time.sleep(t)
e=time.time()
return e-s
def test(fun):
f = lambda x: sum(fun(x) for d in range(10))/10
print("0.100:{}".format(f(0.100)))
print("0.050:{}".format(f(0.050)))
print("0.025:{}".format(f(0.025)))
print("0.010:{}".format(f(0.010)))
print("0.009:{}".format(f(0.010)))
print("0.008:{}".format(f(0.008)))
print("0.007:{}".format(f(0.007)))
print("0.006:{}".format(f(0.006)))
print("0.005:{}".format(f(0.005)))
print("0.004:{}".format(f(0.004)))
print("0.003:{}".format(f(0.003)))
print("0.002:{}".format(f(0.002)))
print("0.001:{}".format(f(0.001)))
if __name__=="__main__":
print("Testing busy_sleep:")
test(busy_sleep)
print("Testing normal_sleep:")
test(normal_sleep)
Results
"""
Debian
Testing busy_sleep:
0.100:0.10223722934722901
0.050:0.051996989250183104
0.025:0.027996940612792967
0.020:0.02207831859588623
0.010:0.011997451782226562
0.009:0.011997222900390625
0.008:0.009998440742492676
0.007:0.007997279167175292
0.006:0.0079974365234375
0.005:0.007997465133666993
0.004:0.005918483734130859
0.003:0.003997836112976074
0.002:0.0039977550506591795
0.001:0.003997611999511719
Testing normal_sleep:
0.100:0.1020797061920166
0.050:0.051999988555908205
0.025:0.028000001907348634
0.020:0.02192000865936279
0.010:0.011999979019165039
0.009:0.012000055313110351
0.008:0.010639991760253906
0.007:0.008000001907348633
0.006:0.00799997329711914
0.005:0.008000059127807617
0.004:0.006159958839416504
0.003:0.004000000953674317
0.002:0.00399998664855957
0.001:0.004000091552734375
$ uname -a
Linux 3.2.0-4-amd64 #1 SMP Debian 3.2.57-3+deb7u2 x86_64 GNU/Linux
"""
"""
Windows 7
Testing busy_sleep:
0.100:0.10000572204589844
0.050:0.05000288486480713
0.025:0.0250014066696167
0.010:0.010500597953796388
0.009:0.010500597953796388
0.008:0.008000493049621582
0.007:0.00740041732788086
0.006:0.006400299072265625
0.005:0.005400300025939942
0.004:0.004700303077697754
0.003:0.003200197219848633
0.002:0.002700185775756836
0.001:0.0016000032424926759
Testing normal_sleep:
0.100:0.10000579357147217
0.050:0.0500028133392334
0.025:0.02500150203704834
0.010:0.01000049114227295
0.009:0.0100006103515625
0.008:0.008000493049621582
0.007:0.007000398635864257
0.006:0.006000304222106934
0.005:0.00500030517578125
0.004:0.0040001869201660155
0.003:0.0030002117156982424
0.002:0.0020000934600830078
0.001:0.0010000944137573243
"""
Real code
import os
import wave
import sys
import io
import time
FORMAT = 8 #get_format_from_width(2)
NCHANNELS = 1
FRAMERATE = 16000 # samples per second
SAMPWIDTH = 2 # bytes in a sample
BYTE_RATE = FRAMERATE*SAMPWIDTH
CHUNK_DURATION = 0.020
CHUNK_BYTES = int(CHUNK_DURATION*BYTE_RATE)
class StreamSimulator:
def __init__(self):
wf = wave.open("Kalimba.wav","rb")
buf = io.BytesIO()
buf.write(wf.readframes(wf.getnframes()))
wf.close()
buf.seek(0)
self.buf = buf
self.step = time.time()
def delay(self):
#delay
delta = time.time() - self.step
self.step=time.time()
delay = CHUNK_DURATION - delta
if delay > 0.001:
time.sleep(delay)
def read(self):
buf = self.buf
data = buf.read(CHUNK_BYTES)
if len(data) == 0:
buf.seek(0)
data = buf.read(CHUNK_BYTES)
self.delay()
return data
def close(self):
self.buf.close()
class DynamicPainter:
def __init__(self):
self.l=0
def paint(self,obj):
str1=str(obj)
l1=len(str1)
bs=""*self.l
clean=" "*self.l
total = bs+clean+bs+str1
sys.stdout.write(total)
sys.stdout.flush()
self.l=l1
if __name__=="__main__":
painter = DynamicPainter()
stream = StreamSimulator()
produced = 0
how_many = 0
painted = time.time()
while True:
while time.time()-painted < 1:
d = stream.read()
produced += len(d)
how_many += 1
producing_speed = int(produced/(time.time()-painted))
painter.paint("Producing speed: {} how many: {}".format(producing_speed,how_many))
produced=0
how_many=0
painted = time.time()
Edit
Changed "Real Code" , added measure of time including sleeping time.
But now I have the DOUBLE Byte Rate: Producing speed: 63996 how many: 100
This got me so MUCH confused. I have tried with different byterates and it ends up being the double everytime.
Conclusion
Thanks to @J.F.Sebastian and his code, I came to learned that:
- Its better to use use a deadline as a time reference than create to a new reference each loop
- Using a deadline "amortizes" the imprecision of time.sleep, oscilating a bit around the desired bitrate but resulting in an correct(and much more constant) average.
- You only need to you use time.time() once, that means less calculations imprecisions.
As a result, I get a constant 32000 B/s some times oscilating to 31999 and very rarely to 31745
Now I can hear the music without any lag or jitter!
Final Code
def read(self):
buf = self.buf
data = buf.read(CHUNK_BYTES)
if len(data) == 0:
buf.seek(0)
data = buf.read(CHUNK_BYTES)
self.deadline += CHUNK_DURATION
delay = self.deadline - time.time()
if delay > 0:
time.sleep(delay)
return data
See Question&Answers more detail:
os