code4.py 1.74 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
#!/usr/bin/env python
import numpy as np
import argparse
from multiprocessing import Process, Queue, Pipe, cpu_count
import mkl

# To be sure that we use 1 thread in MKL
mkl.set_num_threads(1)

def fun(my_id, orderq, rezq):
    while True:
        order = orderq.get();
        if (order < 0):
            return
        np.random.seed(order)
        A = np.random.rand (100, 100)
        B = np.random.rand (100, 100)
        rezq.put([my_id, np.sum(np.dot(A, B))])

# Parse command line arguments                                                                                                
parser = argparse.ArgumentParser()
parser.add_argument("Nm", help="Number of matrices", type=int);
parser.add_argument("-ncores", help="Number of cores to use", type=int);
args = parser.parse_args()

Nm      = args.Nm
ncores  = args.ncores

if (ncores == None):
    ncores = cpu_count()

#Adjust number of cores if necessary
if (ncores > Nm):
    ncores = Nm
        
# Result Queue will be shared between all workers
rezq  = Queue()    

# List of all workers
procs  = []

# There is one order queue/pipe for each of worker
orderqs = []
for pid in range(ncores):
    # We could use Pipe for orderq (it would be faster)
    orderq = Queue()
    p      = Process(target=fun, args=(pid, orderq, rezq))    
    p.start()
    procs.append(p)
    orderqs.append(orderq)
    
#Make first ncores orders
for i in range(ncores):
    orderqs[i].put(i)

rez = 0
#Wait and make all other orders
for i in range(ncores, Nm):
    [w_id, s] = rezq.get()
    rez      += s
    orderqs[w_id].put(i)

#Collect last ncores results
for i in range(ncores):
    [w_id, s] = rezq.get()
    rez      += s

#stop all workers
for i in range(ncores):
    orderqs[i].put(-1)
    procs[i].join()
    
print (rez)