I am new to coding and I'm working on my college project on which I need to make a bio-inspired algorithm teacher learning-based Algorithm, but here is some error is coming here is my tlbo code
class Student:
def __init__(self, fitness, dim, minx, maxx, seed):
self.rnd = random.Random(seed)
# a list of size dim
# with 0.0 as value of all the elements
self.position = [0.0 for i in range(dim)]
# loop dim times and randomly select value of decision var
# value should be in between minx and maxx
for i in range(dim):
self.position[i] = ((maxx - minx) *
self.rnd.random() minx)
# compute the fitness of student
self.fitness = fitness(self.position)
def tlbo(fitness, max_iter, n, dim, minx, maxx):
rnd = random.Random(0)
# create n random students
classroom = [Student(fitness, dim, minx, maxx, i) for i in range(n)]
# compute the value of best_position and best_fitness in the classroom
Xbest = [0.0 for i in range(dim)]
Fbest = sys.float_info.max
for i in range(n): # check each Student
if classroom[i].fitness < Fbest:
Fbest = classroom[i].fitness
Xbest = copy.copy(classroom[i].position)
# convergence graph
convergence1 = []
timerStart = time.time()
# main loop of tlbo
Iter = 0
while Iter < max_iter:
# after every 10 iterations
# print iteration number and best fitness value so far
if Iter % 10 == 0 and Iter > 1:
print("Iter = " str(Iter) " best fitness = %.3f" % Fbest)
if Iter % 1 ==0 :
convergence1.append(Fbest)
# for each student of classroom
for i in range(n):
### Teaching phase of ith student
# compute the mean of all the students in the class
Xmean = [0.0 for i in range(dim)]
for k in range(n):
for j in range(dim):
Xmean[j] = classroom[k].position[j]
for j in range(dim):
Xmean[j] /= n;
# initialize new solution
Xnew = [0.0 for i in range(dim)]
# teaching factor (TF)
# either 1 or 2 ( randomly chosen)
TF = random.randint(1, 3)
# best student of the class is teacher
Xteacher = Xbest
# compute new solution
for j in range(dim):
Xnew[j] = classroom[i].position[j] rnd.random() * (Xteacher[j] - TF * Xmean[j])
# if Xnew < minx OR Xnew > maxx
# then clip it
for j in range(dim):
Xnew[j] = max(Xnew[j], minx)
Xnew[j] = min(Xnew[j], maxx)
# compute fitness of new solution
fnew = fitness(Xnew)
# if new solution is better than old
# replace old with new solution
if (fnew < classroom[i].fitness):
classroom[i].position = Xnew
classroom[i].fitness = fnew
# update best student
if (fnew < Fbest):
Fbest = fnew
Xbest = Xnew
### learning phase of ith student
# randomly choose a solution from classroom
# chosen solution should not be ith student
p = random.randint(0, n - 1)
while (p == i):
p = random.randint(0, n - 1)
# partner solution
Xpartner = classroom[p]
Xnew = [0.0 for i in range(dim)]
if (classroom[i].fitness < Xpartner.fitness):
for j in range(dim):
Xnew[j] = classroom[i].position[j] rnd.random() * (
classroom[i].position[j] - Xpartner.position[j])
else:
for j in range(dim):
Xnew[j] = classroom[i].position[j] - rnd.random() * (
classroom[i].position[j] - Xpartner.position[j])
# if Xnew < minx OR Xnew > maxx
# then clip it
for j in range(dim):
Xnew[j] = max(Xnew[j], minx)
Xnew[j] = min(Xnew[j], maxx)
# compute fitness of new solution
fnew = fitness(Xnew)
# if new solution is better than old
# replace old with new solution
if (fnew < classroom[i].fitness):
classroom[i].position = Xnew
classroom[i].fitness = fnew
# update best student
if (fnew < Fbest):
Fbest = fnew
Xbest = Xnew
Iter = 1
# end-while
timerEnd = time.time()
print(timerEnd-timerStart)
y = np.array(convergence1, dtype=np.longdouble)
x = np.arange(0, max_iter, dtype=int) 1
print(x)
print(y)
timerEnd = time.time()
print('Completed in', (timerEnd - timerStart))
fire = round((timerEnd - timerStart), 2)
plt.plot(x, y, 'o-')
plt.xlabel("Iterations")
plt.ylabel("Fitness")
plt.title(
f"Convergence_curve for CSO for parameter including population "
f"{n}, \niteration {max_iter},and max fitness is:{round(min(convergence1), 3)}")
plt.show()
opts = {"p": Xbest, 'c': round(min(convergence1), 3), "ti": fire}
return opts
and here is my fitness function
def fitness_function(positions):
print(positions)
features = np.where(positions >= 0.4999)[0]
# print('selected_features:', features)
# print(train_df.head())
train_xf = train_x.iloc[:, features]
test_xf = test_x.iloc[:, features]
knn_classifier = Pipeline([('s', StandardScaler()), ('t', MinMaxScaler()),
('m', RandomForestClassifier(n_estimators=100, n_jobs=14))])
knn_classifier.fit(train_xf, train_y)
accuracy = knn_classifier.score(test_xf, test_y)
# print('Accuracy:', accuracy)
w = 0.9
return -(w * accuracy (1 - w) * 1 / (len(features)))
here the main problem which is coming is ,
Traceback (most recent call last):
self.fitness = fitness(self.position)
features = np.where(positions >= 0.4999)[0]
TypeError: '>' not supported between instances of 'list' and 'float'
the main problem is my tlbo code is generating values for the best-fit position [0.456621, -0.616164564] and I need to convert it to [1,2] so that I can run knn and get accuracy result and RUC curve, so what I should do now?
CodePudding user response:
Replace the line with features = np.where(np.array(positions) >= 0.4999)[0]