Generating Uniform 2D Circular Point Data Using Python
When a value is known but not necessarily optimal, and the true optimum may lie in its vicinity, one aproach is to generate random samples around that point and evaluate them within a model to idnetify the best candidate. Below is an implementation demonstrating this concept.
import numpy as np
import matplotlib.pyplot as plt
n = 800 # Number of points
m = 2 # Angular distribution factor; 2 ensures uniform coverage, 1 covers half-circle, multiples of 2 behave like 2
p1 = 0.2 # X-axis radius of variation
p2 = 0.3 # Y-axis radius of variation
K = 0.2 # Center X coordinate
V = 15 # Center Y coordinate
t = np.random.random(size=n) * m * np.pi - np.pi
x1 = np.cos(t)
x2 = np.sin(t)
for i in range(n):
length = np.sqrt(np.random.random())
x1[i] = (x1[i] * length) * p1 + K
x2[i] = (x2[i] * length) * p2 + V
plt.figure(figsize=(10, 10))
plt.scatter(x1, x2, marker='.', color='r')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Random Scatter')
plt.show()
This method is used to find the best k and v values in the seek_best2(self) function located in def_jili.
def seek_best2(self):
"""
Further search for optimal k and v values centered around current best.
This approach attempts to refine results by sampling in a circular neighborhood,
though performance may be limited.
"""
df1 = self.df1df2()[0]
df2 = self.df1df2()[1]
t1 = self.t1t2()[0]
t2 = self.t1t2()[1]
dfs = self.seek_best()
df_kv = dfs.head(1)
k_1 = df_kv.iloc[0, 2]
v_1 = df_kv.iloc[0, 3]
df_empty = pd.DataFrame(columns=["mean", "r2", "k", "v"])
n = 50 # Sample count
m = 2 # Angular factor for uniformity
p1 = 0.2 # X-axis variation radius
p2 = 0.2 # Y-axis variation radius
tw = np.random.random(size=n) * m * np.pi - np.pi
km = np.cos(tw) # X coordinates for sample centers
vr = np.sin(tw) # Y coordinates for sample centers
counter = 0
for i in range(n):
length = np.sqrt(np.random.random())
km[i] = (km[i] * length) * p1 + k_1
vr[i] = (vr[i] * length) * p2 + v_1
x = self.x
nf1 = cmore(df1[t1:t2], km[i], vr[i], x)
tm = timemove(t1, t2, vr[i], x)
t3 = tm[0]
t2_dt = pd.to_datetime(t2)
t3_dt = pd.to_datetime(t3)
t2_str = t2_dt.strftime("%Y-%m-%d %H:%M:%S")
t3_str = t3_dt.strftime("%Y-%m-%d %H:%M:%S")
nf2 = nf1.set_index(tm)
mean1 = mean_absolute_error(nf2[t3_str:t2_str], df2[t3_str:t2_str])
nf_2 = nf2[t3_str:t2_str]
df_2 = df2[t3_str:t2_str]
index_nf_2 = nf_2.index
df_22 = df_2.set_index(index_nf_2)
dfkk = pd.concat([nf_2, df_22], axis=1)
corr = dfkk.corr(method="spearman")
corr1 = corr.iloc[0, 1]
df_empty.loc[counter] = [mean1, corr1, km[i], vr[i]]
counter += 1
dfs = df_empty.sort_values(by=["mean"])
return dfs.head()