I'm trying to concatenate two NumPy arrays based on one condition, so the order of the concatenation depends on that condition.
This is what I have now:
a = empty([2048, 1536])
for i in range(2048):
if s[i]: # s[] -> array with shape (2048, 1) with ones and zeros
a[i] = concatenate([b[i], c[i]]) # b[] and c[] -> arrays with shape (2048, 768)
continue
a[i] = concatenate([c[i], b[i]]) # order changes depending on condition
I don't know how to make this operations in only one or reducing the computation time. I want it to be as optimized as possible, but I'm new to NumPy and I don't know it well.
Thank you,
CodePudding user response:
I think concatenate is not the correct solution and it can be solved by proper slicing:
import numpy as np
dim1 = 32
dim2 = 16
a = np.empty([dim1, dim2])
b = np.ones([dim1, dim2//2])
c = np.zeros([dim1, dim2//2])
s = np.ones([dim1, 1])
s[:dim1//2] = 0
for i in range(dim1):
if s[i] == 0:
a[i, :dim2//2] = b[i, :]
a[i, dim2//2:] = c[i, :]
else:
a[i, :dim2//2] = c[i, :]
a[i, dim2//2:] = b[i, :]
CodePudding user response:
something like this?
a = np.hstack([np.where(s, b, c), np.where(np.logical_not(s), b, c)])
EDIT: I just noticed you wanted to do this in place, feel free to ignore my answer as it doesn't satisfy your requirements!
CodePudding user response:
i am posting the results of 4 different methods to do this, because the results are rather counter-intutive, also there is a bug in the function in the question which makes it always evaluate to true.
import numpy as np
import time
s = np.random.choice(2,(2048,1))
a = np.empty([2048, 1536])
b = np.random.random((2048,768))
c = np.random.random((2048,768))
def func1(s,a,b,c):
s = s.astype(bool).flatten()
not_s = np.logical_not(s)
a[s,:b.shape[1]] = b[s]
a[s, b.shape[1]:] = c[s]
a[not_s,:b.shape[1]] = c[not_s]
a[not_s,b.shape[1]:] = b[not_s]
return a
def func2(s,a,b,c):
for i in range(2048):
if s[i,0]: # s[] -> array with shape (2048, 1) with ones and zeros
a[i] = np.concatenate([b[i], c[i]]) # b[] and c[] -> arrays with shape (2048, 768)
continue
a[i] = np.concatenate([c[i], b[i]]) # order changes depending on condition
return a
def func3(s,a,b,c):
s_bool = s.astype(bool).flatten()
not_s = np.logical_not(s_bool)
a[s_bool] = np.hstack([b[s_bool], c[s_bool]])
a[not_s] = np.hstack([c[not_s], b[not_s]])
return a
import numba
@numba.njit
def func4(s,a,b,c):
for i in range(2048):
if s[i,0]: # s[] -> array with shape (2048, 1) with ones and zeros
a[i,:] = np.concatenate((b[i], c[i])) # b[] and c[] -> arrays with shape (2048, 768)
else:
a[i,:] = np.concatenate((c[i], b[i])) # order changes depending on condition
return a
def func5(s,a,b,c):
return np.hstack([np.where(s, b, c), np.where(np.logical_not(s), b, c)])
def func6(s,a,b,c):
dim1 = a.shape[0]
dim2 = a.shape[1]
for i in range(dim1):
if s[i] == 0:
a[i, :dim2 // 2] = b[i, :]
a[i, dim2 // 2:] = c[i, :]
else:
a[i, :dim2 // 2] = c[i, :]
a[i, dim2 // 2:] = b[i, :]
return a
t1 = time.time()
for _ in range(100):
res1 = func1(s,a.copy(),b,c)
t2 = time.time()
t3 = time.time()
for _ in range(100):
res2 = func2(s,a.copy(),b,c)
t4 = time.time()
t5 = time.time()
for _ in range(100):
res3 = func3(s,a.copy(),b,c)
t6 = time.time()
func4(s, a.copy(), b, c) # compile it
t7 = time.time()
for _ in range(100):
res4 = func4(s,a.copy(),b,c)
t8 = time.time()
t9 = time.time()
for _ in range(100):
res5 = func5(s,a.copy(),b,c)
t10 = time.time()
t11 = time.time()
for _ in range(100):
res6 = func6(s,a.copy(),b,c)
t12 = time.time()
assert (res1 == res2).all()
assert (res3 == res2).all()
assert (res4 == res2).all()
# assert (res5 == res2).all() # still fails
# assert (res6 == res2).all() # fails
print(f"using partial slicing = {t2-t1}")
print(f"using python for loops {t4-t3}")
print(f"using full slicing {t6-t5}")
print(f"using machine code {t8-t7}")
print(f"using np.where {t10-t9}")
print(f"optimized python {t12-t11}")
using partial slicing = 1.9734187126159668
using python for loops 1.7545206546783447
using full slicing 3.3108901977539062
using machine code 1.2231426239013672
using np.where 3.1704039573669434
optimized python 1.6609220504760742
python loops appear to be faster than numpy slicing ... which is rather counter intutive, i am guessing because it accesses the arrays in the order the data is aligned in memory.
Edit: added other ppl answers for comparison.