Skip to content

Commit

Permalink
format
Browse files Browse the repository at this point in the history
  • Loading branch information
cnheider committed Jun 30, 2021
1 parent 1b01daa commit 4bcc5d3
Show file tree
Hide file tree
Showing 51 changed files with 4,307 additions and 4,631 deletions.
15 changes: 7 additions & 8 deletions benchmarks/benchmark_func.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,10 @@


def benchmark_func(func, times=100000):
"""
"""
start = time.time()
result = None
for _ in range(times):
result = func()
end = time.time()
return end - start, result
""" """
start = time.time()
result = None
for _ in range(times):
result = func()
end = time.time()
return end - start, result
107 changes: 50 additions & 57 deletions benchmarks/pqp_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,71 +6,64 @@


class Zeroes(PooledQueueTask):
def call(self, batch_size, *args, tensor_size=(9, 9, 9, 9), **kwargs):
"""
"""
batch = [(numpy.zeros(tensor_size), i) for i in range(batch_size)]
imgs = numpy.array([i[0] for i in batch], dtype=numpy.float32)
ground_truth = numpy.array([i[1] for i in batch], dtype=numpy.float32)
return (imgs, ground_truth)
def call(self, batch_size, *args, tensor_size=(9, 9, 9, 9), **kwargs):
""" """
batch = [(numpy.zeros(tensor_size), i) for i in range(batch_size)]
imgs = numpy.array([i[0] for i in batch], dtype=numpy.float32)
ground_truth = numpy.array([i[1] for i in batch], dtype=numpy.float32)
return (imgs, ground_truth)


Lamb = lambda a, tensor_size:f"{a, tensor_size}"
Lamb = lambda a, tensor_size: f"{a, tensor_size}"


def Func(a, tensor_size):
"""
"""
return f"{a, tensor_size}"
""" """
return f"{a, tensor_size}"


def pqp_benchmark():
"""
"""
task = Zeroes()
# task = Lamb #Error: cant be pickled
# task = Func
batch_size = 16
tensor_size = (9, 9, 9, 9, 9)
wait_time = 0.1
samples = 100

df = PooledQueueProcessor(
task,
args=[batch_size],
kwargs={"tensor_size":tensor_size},
max_queue_size=samples,
)

def get():
"""
"""
return df.get()

def wait_get():
"""
"""
time.sleep(wait_time)
return df.get()

def generate():
"""
"""
return task(batch_size, tensor_size=tensor_size)

def wait_generate():
"""
"""
time.sleep(wait_time)
return task(batch_size, tensor_size=tensor_size)

for func, discount in zip(
(get, wait_get, generate, wait_generate),
(0, samples * wait_time, 0, samples * wait_time),
):
t, res = benchmark_func(func, samples)
print(f"{func.__name__}: {t - discount} seconds")
""" """
task = Zeroes()
# task = Lamb #Error: cant be pickled
# task = Func
batch_size = 16
tensor_size = (9, 9, 9, 9, 9)
wait_time = 0.1
samples = 100

df = PooledQueueProcessor(
task,
args=[batch_size],
kwargs={"tensor_size": tensor_size},
max_queue_size=samples,
)

def get():
""" """
return df.get()

def wait_get():
""" """
time.sleep(wait_time)
return df.get()

def generate():
""" """
return task(batch_size, tensor_size=tensor_size)

def wait_generate():
""" """
time.sleep(wait_time)
return task(batch_size, tensor_size=tensor_size)

for func, discount in zip(
(get, wait_get, generate, wait_generate),
(0, samples * wait_time, 0, samples * wait_time),
):
t, res = benchmark_func(func, samples)
print(f"{func.__name__}: {t - discount} seconds")


if __name__ == "__main__":
pqp_benchmark()
pqp_benchmark()
113 changes: 52 additions & 61 deletions benchmarks/returns_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,67 +6,58 @@


def returns_benchmark():
"""
"""
a = 1
b = 2
c = 3

RandomABC = namedtuple("RandomABC", ("a", "b", "c"))

def implicit_return():
"""
"""
return a, b, c

def list_return():
"""
"""
return [a, b, c]

def tuple_return():
"""
"""
return (a, b, c)

def dict_return():
"""
"""
return {"a":a, "b":b, "c":c}

def sorcery_return():
"""
"""
return sorcery.dict_of(a, b, c)

def nod_return():
"""
"""
return NOD(a=a, b=b, c=c)

def inferred_return():
"""
"""
return NOD.nod_of(a, b, c)

def namedtuple_return():
"""
"""
return RandomABC(a, b, c)

for func in (
implicit_return,
list_return,
tuple_return,
dict_return,
namedtuple_return,
nod_return,
sorcery_return,
inferred_return,
):
t, res = benchmark_func(func)
print(f"{func.__name__}: {t} seconds, {res}")
""" """
a = 1
b = 2
c = 3

RandomABC = namedtuple("RandomABC", ("a", "b", "c"))

def implicit_return():
""" """
return a, b, c

def list_return():
""" """
return [a, b, c]

def tuple_return():
""" """
return (a, b, c)

def dict_return():
""" """
return {"a": a, "b": b, "c": c}

def sorcery_return():
""" """
return sorcery.dict_of(a, b, c)

def nod_return():
""" """
return NOD(a=a, b=b, c=c)

def inferred_return():
""" """
return NOD.nod_of(a, b, c)

def namedtuple_return():
""" """
return RandomABC(a, b, c)

for func in (
implicit_return,
list_return,
tuple_return,
dict_return,
namedtuple_return,
nod_return,
sorcery_return,
inferred_return,
):
t, res = benchmark_func(func)
print(f"{func.__name__}: {t} seconds, {res}")


if __name__ == "__main__":
returns_benchmark()
returns_benchmark()
Loading

0 comments on commit 4bcc5d3

Please sign in to comment.