Changed around line 1
- Website generated by Claude from prompt: Develop a website that allows users to configure and experiment with parameters of the Attention-KAN approach detailed in this article: Kolmogorov-Arnold Networks: Exploring Dynamic Weights and Attention Mechanisms. TITLE of the website: Evolver AI - Business Lab - KAN Explorer Frontend Requirements: 1. Create an intuitive user interface for configuring the parameters of Kolmogorov-Arnold Networks (KAN) and its dynamic weight integration. • Include all input parameters such as the number of inner/outer units, grid points, spline degrees, learning rate, and other relevant hyperparameters. • Allow users to adjust these parameters via sliders, dropdowns, or input fields. • OBLIGATORY: Provide a real-time reduce-size visualization of the network behavior as parameters are modified. 2. Result Visualizations: • Display metrics such as AUC, KS Statistic, and Log Loss after the experiments. • Generate a chart of the loss function over training epochs (X-axis = epochs, Y-axis = loss). Backend Requirements: 1. Implement the following example experiments provided in the article: • Example 1: Training and evaluation of a Dynamic Weight KAN. • Example 2: Training and evaluation of an Improved Standard KAN using spline-based utilities. 2. The backend should: • Use the provided PyTorch code to compute metrics (AUC, KS Statistic, Log Loss) based on user-configured parameters. • Log and output the training loss at each epoch for visualization in the frontend. 3. Ensure the backend is capable of processing user-configured input parameters and reflecting them in the experiment results. (Code provided below should be integrated into the backend implementation.) Below are the code snippet for Backend Implementation of 2 examples ### Code for Example 1: of Dynamic KAN Experiment ==================================================== import torch import torch.nn as nn import torch.optim as optim from sklearn.metrics import roc_auc_score, log_loss, roc_curve from sklearn.model_selection import train_test_split # Dynamic Weight Generator class DynamicWeightGenerator(nn.Module): def __init__(self, input_dim, output_dim): super(DynamicWeightGenerator, self).__init__() self.fc1 = nn.Linear(input_dim, 128) self.fc2 = nn.Linear(128, 64) self.fc_weight = nn.Linear(64, output_dim) self.fc_bias = nn.Linear(64, output_dim) self.softplus = nn.Softplus() def forward(self, x): h = torch.relu(self.fc1(x)) h = torch.relu(self.fc2(h)) weight = self.softplus(self.fc_weight(h)) bias = self.fc_bias(h) return weight, bias # Kolmogorov-Arnold Network (KAN) class KAN(nn.Module): def __init__(self, input_dim, num_inner_units, num_outer_units, dynamic=True): super(KAN, self).__init__() self.dynamic = dynamic self.num_inner_units = num_inner_units self.num_outer_units = num_outer_units if dynamic: self.inner_weight_generators = nn.ModuleList([ DynamicWeightGenerator(input_dim, input_dim) for _ in range(num_inner_units) ]) self.outer_weight_generators = nn.ModuleList([ DynamicWeightGenerator(input_dim, 1) for _ in range(num_outer_units) ]) else: self.inner_weights = nn.ParameterList([ nn.Parameter(torch.randn(input_dim, input_dim)) for _ in range(num_inner_units) ]) self.inner_biases = nn.ParameterList([ nn.Parameter(torch.randn(input_dim)) for _ in range(num_inner_units) ]) self.outer_weights = nn.ParameterList([ nn.Parameter(torch.randn(num_inner_units)) for _ in range(num_outer_units) ]) self.outer_biases = nn.ParameterList([ nn.Parameter(torch.randn(1)) for _ in range(num_outer_units) ]) def forward(self, x): inner_outputs = [] if self.dynamic: for generator in self.inner_weight_generators: weight, bias = generator(x) inner_output = torch.sum(weight * x + bias, dim=-1, keepdim=True) inner_outputs.append(inner_output) else: for weight, bias in zip(self.inner_weights, self.inner_biases): inner_output = torch.sum(torch.matmul(x, weight) + bias, dim=-1, keepdim=True) inner_outputs.append(inner_output) aggregated_inner_output = torch.cat(inner_outputs, dim=-1) outer_outputs = [] if self.dynamic: for generator in self.outer_weight_generators: weight, bias = generator(x) outer_output = torch.sum(weight * aggregated_inner_output + bias, dim=-1, keepdim=True) outer_outputs.append(outer_output) else: for weight, bias in zip(self.outer_weights, self.outer_biases): outer_output = torch.sum(aggregated_inner_output * weight + bias, dim=-1, keepdim=True) outer_outputs.append(outer_output) final_output = torch.sum(torch.cat(outer_outputs, dim=-1), dim=-1, keepdim=True) return final_output # Data generation torch.manual_seed(42) num_samples = 1200 input_dim = 5 # Generate complex non-linear data X = torch.rand((num_samples, input_dim)) * 10 - 5 noise = torch.randn(num_samples) * 0.5 Y = ((torch.sin(X[:, 0]) + torch.cos(X[:, 1]) + 0.3 * X[:, 2] - 0.2 * X[:, 3] ** 2 + noise) > 0).float().unsqueeze(1) # Split data X_train, X_val, Y_train, Y_val = train_test_split(X.numpy(), Y.numpy(), test_size=0.2, random_state=42) # Train Dynamic Weight KAN device = 'cuda' if torch.cuda.is_available() else 'cpu' dynamic_model = KAN(input_dim, num_inner_units=6, num_outer_units=4, dynamic=True).to(device) criterion = nn.BCELoss() optimizer = optim.Adam(dynamic_model.parameters(), lr=0.001) print("\nTraining Dynamic KAN") epochs = 60 for epoch in range(epochs): dynamic_model.train() optimizer.zero_grad() outputs = torch.sigmoid(dynamic_model(torch.tensor(X_train, dtype=torch.float32).to(device))) loss = criterion(outputs, torch.tensor(Y_train, dtype=torch.float32).to(device)) loss.backward() optimizer.step() if (epoch + 1) % 10 == 0: print(f"Dynamic KAN - Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.4f}") # Evaluate Dynamic KAN dynamic_model.eval() with torch.no_grad(): dynamic_predictions = torch.sigmoid(dynamic_model(torch.tensor(X_val, dtype=torch.float32).to(device))).cpu().numpy() dynamic_auc = roc_auc_score(Y_val, dynamic_predictions) dynamic_fpr, dynamic_tpr, _ = roc_curve(Y_val, dynamic_predictions) dynamic_ks = max(dynamic_tpr - dynamic_fpr) dynamic_logloss = log_loss(Y_val, dynamic_predictions) print("\nDynamic KAN Metrics:") print(f"AUC: {dynamic_auc:.4f}") print(f"KS Statistic: {dynamic_ks:.4f}") print(f"Log Loss: {dynamic_logloss:.4f}") # Train Improved Standard KAN # Spline Utilities def B_batch(x, grid, k=3): x = x.unsqueeze(2) grid = grid.unsqueeze(0) if k == 0: value = (x >= grid[:, :, :-1]) * (x < grid[:, :, 1:]) else: B_km1 = B_batch(x[:, :, 0], grid[0], k=k - 1) value = ( (x - grid[:, :, :-(k + 1)]) / (grid[:, :, k:-1] - grid[:, :, :-(k + 1)]) * B_km1[:, :, :-1] + (grid[:, :, k + 1:] - x) / (grid[:, :, k + 1:] - grid[:, :, 1:-k]) * B_km1[:, :, 1:] ) return torch.nan_to_num(value) def coef2curve(x_eval, grid, coef, k): b_splines = B_batch(x_eval, grid, k) return torch.einsum("ijk,jlk->ijl", b_splines, coef.to(b_splines.device)) # Improved Standard KAN class StandardKAN(nn.Module): def __init__(self, input_dim, num_inner_units, num_outer_units, grid_points=20, spline_degree=3): super(StandardKAN, self).__init__() self.input_dim = input_dim self.num_inner_units = num_inner_units self.num_outer_units = num_outer_units self.spline_degree = spline_degree # Adaptive grids self.inner_grid = nn.Parameter(torch.linspace(-10, 10, steps=grid_points).repeat(input_dim, 1), requires_grad=False) self.outer_grid = nn.Parameter(torch.linspace(-10, 10, steps=grid_points).repeat(num_inner_units, 1), requires_grad=False) # Initialize spline coefficients self.inner_coef = nn.Parameter(torch.randn(input_dim, num_inner_units, grid_points - spline_degree - 1) * 0.01) self.outer_coef = nn.Parameter(torch.randn(num_inner_units, num_outer_units, grid_points - spline_degree - 1) * 0.01) def forward(self, x): # Inner layer: Map inputs to hidden features inner_outputs = coef2curve(x, self.inner_grid, self.inner_coef, self.spline_degree) inner_outputs = inner_outputs.sum(dim=1) # Outer layer: Map hidden features to outputs outer_outputs = coef2curve(inner_outputs, self.outer_grid, self.outer_coef, self.spline_degree) final_output = outer_outputs.sum(dim=1).mean(dim=1, keepdim=True) return final_output ###################Standard KAN######################## # Data generation torch.manual_seed(42) num_samples = 1200 input_dim = 5 # Generate complex non-linear data X = torch.rand((num_samples, input_dim)) * 10 - 5 noise = torch.randn(num_samples) * 0.5 Y = ((torch.sin(X[:, 0]) + torch.cos(X[:, 1]) + 0.3 * X[:, 2] - 0.2 * X[:, 3] ** 2 + noise) > 0).float().unsqueeze(1) # Split data X_train, X_val, Y_train, Y_val = train_test_split(X.numpy(), Y.numpy(), test_size=0.2, random_state=42) # Train Improved Standard KAN device = 'cuda' if torch.cuda.is_available() else 'cpu' std_model = StandardKAN(input_dim, num_inner_units=8, num_outer_units=4, grid_points=25, spline_degree=3).to(device) criterion = nn.BCELoss() optimizer = optim.Adam(std_model.parameters(), lr=0.001) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5) epochs = 60 for epoch in range(epochs): std_model.train() optimizer.zero_grad() outputs = torch.sigmoid(std_model(torch.tensor(X_train, dtype=torch.float32).to(device))) loss = criterion(outputs, torch.tensor(Y_train, dtype=torch.float32).to(device)) loss.backward() optimizer.step() scheduler.step() if (epoch + 1) % 10 == 0: print(f"Improved Standard KAN - Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.4f}") # Evaluate Improved Standard KAN std_model.eval() with torch.no_grad(): std_predictions = torch.sigmoid(std_model(torch.tensor(X_val, dtype=torch.float32).to(device))).cpu().numpy() std_auc = roc_auc_score(Y_val, std_predictions) std_fpr, std_tpr, _ = roc_curve(Y_val, std_predictions) std_ks = max(std_tpr - std_fpr) std_logloss = log_loss(Y_val, std_predictions) print("\nImproved Standard KAN Metrics:") print(f"AUC: {std_auc:.4f}") print(f"KS Statistic: {std_ks:.4f}") print(f"Log Loss: {std_logloss:.4f}") ==================================================== ### Code for Example 2: Attention-Based KAN with three normalization functions: Softmax, Softplus, and Rectify ==================================================== import torch import torch.nn as nn import torch.optim as optim from sklearn.metrics import roc_auc_score, log_loss, roc_curve from sklearn.model_selection import train_test_split def custom_softplus(scores): return torch.log(1 + torch.exp(scores)) / torch.sum(torch.log(1 + torch.exp(scores)), dim=-1, keepdim=True) def custom_softmax(scores): exps = torch.exp(scores - torch.max(scores, dim=-1, keepdim=True)[0]) return exps / exps.sum(dim=-1, keepdim=True) def custom_rectify(scores): relu_scores = torch.relu(scores) return relu_scores / torch.sum(relu_scores, dim=-1, keepdim=True) # Attention Mechanism for KAN with multiple functions class AttentionWeightGenerator(nn.Module): def __init__(self, input_dim, norm_function): super(AttentionWeightGenerator, self).__init__() self.query = nn.Linear(input_dim, input_dim, bias=False) self.key = nn.Linear(input_dim, input_dim, bias=False) self.value = nn.Linear(input_dim, input_dim, bias=False) self.norm_function = norm_function def forward(self, x): Q = self.query(x) # Query K = self.key(x) # Key V = self.value(x) # Value scores = torch.matmul(Q, K.transpose(-1, -2)) / (x.size(-1) ** 0.5) # Scaled dot-product attention_weights = self.norm_function(scores) # Custom normalization output = torch.matmul(attention_weights, V) # Weighted sum of values return output # Kolmogorov-Arnold Network (KAN) with Attention Mechanism class AttentionKAN(nn.Module): def __init__(self, input_dim, num_inner_units, num_outer_units, norm_function): super(AttentionKAN, self).__init__() self.num_inner_units = num_inner_units self.num_outer_units = num_outer_units self.inner_attention_layers = nn.ModuleList([ AttentionWeightGenerator(input_dim, norm_function) for _ in range(num_inner_units) ]) self.outer_attention_layers = nn.ModuleList([ AttentionWeightGenerator(num_inner_units, norm_function) for _ in range(num_outer_units) ]) self.output_layer = nn.Linear(num_outer_units, 1) # Final output layer def forward(self, x): inner_outputs = [] for attention_layer in self.inner_attention_layers: inner_output = attention_layer(x).mean(dim=-1, keepdim=True) # Aggregate inner output inner_outputs.append(inner_output) aggregated_inner_output = torch.cat(inner_outputs, dim=-1) # Combine inner outputs outer_outputs = [] for attention_layer in self.outer_attention_layers: outer_output = attention_layer(aggregated_inner_output).mean(dim=-1, keepdim=True) outer_outputs.append(outer_output) aggregated_outer_output = torch.cat(outer_outputs, dim=-1) final_output = self.output_layer(aggregated_outer_output) # Final prediction return final_output # Data generation torch.manual_seed(42) num_samples = 1200 input_dim = 5 # Generate complex non-linear data X = torch.rand((num_samples, input_dim)) * 10 - 5 noise = torch.randn(num_samples) * 0.5 Y = ((torch.sin(X[:, 0]) + torch.cos(X[:, 1]) + 0.3 * X[:, 2] - 0.2 * X[:, 3] ** 2 + noise) > 0).float().unsqueeze(1) # Split data X_train, X_val, Y_train, Y_val = train_test_split(X.numpy(), Y.numpy(), test_size=0.2, random_state=42) # Train Attention KAN with different functions norm_functions = { "Softmax": custom_softmax, "Softplus": custom_softplus, "Rectify": custom_rectify, } results = {} for name, norm_function in norm_functions.items(): print(f"\nTraining Attention KAN with {name}") device = 'cuda' if torch.cuda.is_available() else 'cpu' attention_model = AttentionKAN(input_dim, num_inner_units=6, num_outer_units=4, norm_function=norm_function).to(device) criterion = nn.BCELoss() optimizer = optim.Adam(attention_model.parameters(), lr=0.001) epochs = 100 for epoch in range(epochs): attention_model.train() optimizer.zero_grad() outputs = torch.sigmoid(attention_model(torch.tensor(X_train, dtype=torch.float32).to(device))) loss = criterion(outputs, torch.tensor(Y_train, dtype=torch.float32).to(device)) loss.backward() optimizer.step() if (epoch + 1) % 10 == 0: print(f"{name} - Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.4f}") # Evaluate Attention KAN attention_model.eval() with torch.no_grad(): attention_predictions = torch.sigmoid(attention_model(torch.tensor(X_val, dtype=torch.float32).to(device))).cpu().numpy() attention_auc = roc_auc_score(Y_val, attention_predictions) attention_fpr, attention_tpr, _ = roc_curve(Y_val, attention_predictions) attention_ks = max(attention_tpr - attention_fpr) attention_logloss = log_loss(Y_val, attention_predictions) results[name] = { "AUC": attention_auc, "KS Statistic": attention_ks, "Log Loss": attention_logloss } print(f"\n{name} Metrics:") print(f"AUC: {attention_auc:.4f}") print(f"KS Statistic: {attention_ks:.4f}") print(f"Log Loss: {attention_logloss:.4f}") # Print final results print("\nComparison of Normalization Functions:") for name, metrics in results.items(): print(f"{name}: AUC={metrics['AUC']:.4f}, KS={metrics['KS Statistic']:.4f}, Log Loss={metrics['Log Loss']:.4f}") ====================================================
+ Website generated by Claude from prompt: Develop a website that allows users to configure and experiment with parameters of the Attention-KAN approach detailed in this article: Kolmogorov-Arnold Networks: Exploring Dynamic Weights and Attention Mechanisms. TITLE of the website: KAN Explorer Frontend Requirements: 1. Create an intuitive user interface for configuring the parameters of Kolmogorov-Arnold Networks (KAN) and its dynamic weight integration. • Include all input parameters such as the number of inner/outer units, grid points, spline degrees, learning rate, and other relevant hyperparameters. • Allow users to adjust these parameters via sliders, dropdowns, or input fields. • OBLIGATORY: Provide a real-time reduce-size visualization of the network behavior as parameters are modified. 2. Result Visualizations: • Display metrics such as AUC, KS Statistic, and Log Loss after the experiments. • Generate a chart of the loss function over training epochs (X-axis = epochs, Y-axis = loss). Backend Requirements: 1. Implement the following example experiments provided in the article: • Example 1: Training and evaluation of a Dynamic Weight KAN. • Example 2: Training and evaluation of an Improved Standard KAN using spline-based utilities. 2. The backend should: • Use the provided PyTorch code to compute metrics (AUC, KS Statistic, Log Loss) based on user-configured parameters. • Log and output the training loss at each epoch for visualization in the frontend. 3. Ensure the backend is capable of processing user-configured input parameters and reflecting them in the experiment results. (Code provided below should be integrated into the backend implementation.) Below are the code snippet for Backend Implementation of 2 examples ### Code for Example 1: of Dynamic KAN Experiment ==================================================== import torch import torch.nn as nn import torch.optim as optim from sklearn.metrics import roc_auc_score, log_loss, roc_curve from sklearn.model_selection import train_test_split # Dynamic Weight Generator class DynamicWeightGenerator(nn.Module): def __init__(self, input_dim, output_dim): super(DynamicWeightGenerator, self).__init__() self.fc1 = nn.Linear(input_dim, 128) self.fc2 = nn.Linear(128, 64) self.fc_weight = nn.Linear(64, output_dim) self.fc_bias = nn.Linear(64, output_dim) self.softplus = nn.Softplus() def forward(self, x): h = torch.relu(self.fc1(x)) h = torch.relu(self.fc2(h)) weight = self.softplus(self.fc_weight(h)) bias = self.fc_bias(h) return weight, bias # Kolmogorov-Arnold Network (KAN) class KAN(nn.Module): def __init__(self, input_dim, num_inner_units, num_outer_units, dynamic=True): super(KAN, self).__init__() self.dynamic = dynamic self.num_inner_units = num_inner_units self.num_outer_units = num_outer_units if dynamic: self.inner_weight_generators = nn.ModuleList([ DynamicWeightGenerator(input_dim, input_dim) for _ in range(num_inner_units) ]) self.outer_weight_generators = nn.ModuleList([ DynamicWeightGenerator(input_dim, 1) for _ in range(num_outer_units) ]) else: self.inner_weights = nn.ParameterList([ nn.Parameter(torch.randn(input_dim, input_dim)) for _ in range(num_inner_units) ]) self.inner_biases = nn.ParameterList([ nn.Parameter(torch.randn(input_dim)) for _ in range(num_inner_units) ]) self.outer_weights = nn.ParameterList([ nn.Parameter(torch.randn(num_inner_units)) for _ in range(num_outer_units) ]) self.outer_biases = nn.ParameterList([ nn.Parameter(torch.randn(1)) for _ in range(num_outer_units) ]) def forward(self, x): inner_outputs = [] if self.dynamic: for generator in self.inner_weight_generators: weight, bias = generator(x) inner_output = torch.sum(weight * x + bias, dim=-1, keepdim=True) inner_outputs.append(inner_output) else: for weight, bias in zip(self.inner_weights, self.inner_biases): inner_output = torch.sum(torch.matmul(x, weight) + bias, dim=-1, keepdim=True) inner_outputs.append(inner_output) aggregated_inner_output = torch.cat(inner_outputs, dim=-1) outer_outputs = [] if self.dynamic: for generator in self.outer_weight_generators: weight, bias = generator(x) outer_output = torch.sum(weight * aggregated_inner_output + bias, dim=-1, keepdim=True) outer_outputs.append(outer_output) else: for weight, bias in zip(self.outer_weights, self.outer_biases): outer_output = torch.sum(aggregated_inner_output * weight + bias, dim=-1, keepdim=True) outer_outputs.append(outer_output) final_output = torch.sum(torch.cat(outer_outputs, dim=-1), dim=-1, keepdim=True) return final_output # Data generation torch.manual_seed(42) num_samples = 1200 input_dim = 5 # Generate complex non-linear data X = torch.rand((num_samples, input_dim)) * 10 - 5 noise = torch.randn(num_samples) * 0.5 Y = ((torch.sin(X[:, 0]) + torch.cos(X[:, 1]) + 0.3 * X[:, 2] - 0.2 * X[:, 3] ** 2 + noise) > 0).float().unsqueeze(1) # Split data X_train, X_val, Y_train, Y_val = train_test_split(X.numpy(), Y.numpy(), test_size=0.2, random_state=42) # Train Dynamic Weight KAN device = 'cuda' if torch.cuda.is_available() else 'cpu' dynamic_model = KAN(input_dim, num_inner_units=6, num_outer_units=4, dynamic=True).to(device) criterion = nn.BCELoss() optimizer = optim.Adam(dynamic_model.parameters(), lr=0.001) print("\nTraining Dynamic KAN") epochs = 60 for epoch in range(epochs): dynamic_model.train() optimizer.zero_grad() outputs = torch.sigmoid(dynamic_model(torch.tensor(X_train, dtype=torch.float32).to(device))) loss = criterion(outputs, torch.tensor(Y_train, dtype=torch.float32).to(device)) loss.backward() optimizer.step() if (epoch + 1) % 10 == 0: print(f"Dynamic KAN - Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.4f}") # Evaluate Dynamic KAN dynamic_model.eval() with torch.no_grad(): dynamic_predictions = torch.sigmoid(dynamic_model(torch.tensor(X_val, dtype=torch.float32).to(device))).cpu().numpy() dynamic_auc = roc_auc_score(Y_val, dynamic_predictions) dynamic_fpr, dynamic_tpr, _ = roc_curve(Y_val, dynamic_predictions) dynamic_ks = max(dynamic_tpr - dynamic_fpr) dynamic_logloss = log_loss(Y_val, dynamic_predictions) print("\nDynamic KAN Metrics:") print(f"AUC: {dynamic_auc:.4f}") print(f"KS Statistic: {dynamic_ks:.4f}") print(f"Log Loss: {dynamic_logloss:.4f}") # Train Improved Standard KAN # Spline Utilities def B_batch(x, grid, k=3): x = x.unsqueeze(2) grid = grid.unsqueeze(0) if k == 0: value = (x >= grid[:, :, :-1]) * (x < grid[:, :, 1:]) else: B_km1 = B_batch(x[:, :, 0], grid[0], k=k - 1) value = ( (x - grid[:, :, :-(k + 1)]) / (grid[:, :, k:-1] - grid[:, :, :-(k + 1)]) * B_km1[:, :, :-1] + (grid[:, :, k + 1:] - x) / (grid[:, :, k + 1:] - grid[:, :, 1:-k]) * B_km1[:, :, 1:] ) return torch.nan_to_num(value) def coef2curve(x_eval, grid, coef, k): b_splines = B_batch(x_eval, grid, k) return torch.einsum("ijk,jlk->ijl", b_splines, coef.to(b_splines.device)) # Improved Standard KAN class StandardKAN(nn.Module): def __init__(self, input_dim, num_inner_units, num_outer_units, grid_points=20, spline_degree=3): super(StandardKAN, self).__init__() self.input_dim = input_dim self.num_inner_units = num_inner_units self.num_outer_units = num_outer_units self.spline_degree = spline_degree # Adaptive grids self.inner_grid = nn.Parameter(torch.linspace(-10, 10, steps=grid_points).repeat(input_dim, 1), requires_grad=False) self.outer_grid = nn.Parameter(torch.linspace(-10, 10, steps=grid_points).repeat(num_inner_units, 1), requires_grad=False) # Initialize spline coefficients self.inner_coef = nn.Parameter(torch.randn(input_dim, num_inner_units, grid_points - spline_degree - 1) * 0.01) self.outer_coef = nn.Parameter(torch.randn(num_inner_units, num_outer_units, grid_points - spline_degree - 1) * 0.01) def forward(self, x): # Inner layer: Map inputs to hidden features inner_outputs = coef2curve(x, self.inner_grid, self.inner_coef, self.spline_degree) inner_outputs = inner_outputs.sum(dim=1) # Outer layer: Map hidden features to outputs outer_outputs = coef2curve(inner_outputs, self.outer_grid, self.outer_coef, self.spline_degree) final_output = outer_outputs.sum(dim=1).mean(dim=1, keepdim=True) return final_output ###################Standard KAN######################## # Data generation torch.manual_seed(42) num_samples = 1200 input_dim = 5 # Generate complex non-linear data X = torch.rand((num_samples, input_dim)) * 10 - 5 noise = torch.randn(num_samples) * 0.5 Y = ((torch.sin(X[:, 0]) + torch.cos(X[:, 1]) + 0.3 * X[:, 2] - 0.2 * X[:, 3] ** 2 + noise) > 0).float().unsqueeze(1) # Split data X_train, X_val, Y_train, Y_val = train_test_split(X.numpy(), Y.numpy(), test_size=0.2, random_state=42) # Train Improved Standard KAN device = 'cuda' if torch.cuda.is_available() else 'cpu' std_model = StandardKAN(input_dim, num_inner_units=8, num_outer_units=4, grid_points=25, spline_degree=3).to(device) criterion = nn.BCELoss() optimizer = optim.Adam(std_model.parameters(), lr=0.001) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5) epochs = 60 for epoch in range(epochs): std_model.train() optimizer.zero_grad() outputs = torch.sigmoid(std_model(torch.tensor(X_train, dtype=torch.float32).to(device))) loss = criterion(outputs, torch.tensor(Y_train, dtype=torch.float32).to(device)) loss.backward() optimizer.step() scheduler.step() if (epoch + 1) % 10 == 0: print(f"Improved Standard KAN - Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.4f}") # Evaluate Improved Standard KAN std_model.eval() with torch.no_grad(): std_predictions = torch.sigmoid(std_model(torch.tensor(X_val, dtype=torch.float32).to(device))).cpu().numpy() std_auc = roc_auc_score(Y_val, std_predictions) std_fpr, std_tpr, _ = roc_curve(Y_val, std_predictions) std_ks = max(std_tpr - std_fpr) std_logloss = log_loss(Y_val, std_predictions) print("\nImproved Standard KAN Metrics:") print(f"AUC: {std_auc:.4f}") print(f"KS Statistic: {std_ks:.4f}") print(f"Log Loss: {std_logloss:.4f}") ==================================================== ### Code for Example 2: Attention-Based KAN with three normalization functions: Softmax, Softplus, and Rectify ==================================================== import torch import torch.nn as nn import torch.optim as optim from sklearn.metrics import roc_auc_score, log_loss, roc_curve from sklearn.model_selection import train_test_split def custom_softplus(scores): return torch.log(1 + torch.exp(scores)) / torch.sum(torch.log(1 + torch.exp(scores)), dim=-1, keepdim=True) def custom_softmax(scores): exps = torch.exp(scores - torch.max(scores, dim=-1, keepdim=True)[0]) return exps / exps.sum(dim=-1, keepdim=True) def custom_rectify(scores): relu_scores = torch.relu(scores) return relu_scores / torch.sum(relu_scores, dim=-1, keepdim=True) # Attention Mechanism for KAN with multiple functions class AttentionWeightGenerator(nn.Module): def __init__(self, input_dim, norm_function): super(AttentionWeightGenerator, self).__init__() self.query = nn.Linear(input_dim, input_dim, bias=False) self.key = nn.Linear(input_dim, input_dim, bias=False) self.value = nn.Linear(input_dim, input_dim, bias=False) self.norm_function = norm_function def forward(self, x): Q = self.query(x) # Query K = self.key(x) # Key V = self.value(x) # Value scores = torch.matmul(Q, K.transpose(-1, -2)) / (x.size(-1) ** 0.5) # Scaled dot-product attention_weights = self.norm_function(scores) # Custom normalization output = torch.matmul(attention_weights, V) # Weighted sum of values return output # Kolmogorov-Arnold Network (KAN) with Attention Mechanism class AttentionKAN(nn.Module): def __init__(self, input_dim, num_inner_units, num_outer_units, norm_function): super(AttentionKAN, self).__init__() self.num_inner_units = num_inner_units self.num_outer_units = num_outer_units self.inner_attention_layers = nn.ModuleList([ AttentionWeightGenerator(input_dim, norm_function) for _ in range(num_inner_units) ]) self.outer_attention_layers = nn.ModuleList([ AttentionWeightGenerator(num_inner_units, norm_function) for _ in range(num_outer_units) ]) self.output_layer = nn.Linear(num_outer_units, 1) # Final output layer def forward(self, x): inner_outputs = [] for attention_layer in self.inner_attention_layers: inner_output = attention_layer(x).mean(dim=-1, keepdim=True) # Aggregate inner output inner_outputs.append(inner_output) aggregated_inner_output = torch.cat(inner_outputs, dim=-1) # Combine inner outputs outer_outputs = [] for attention_layer in self.outer_attention_layers: outer_output = attention_layer(aggregated_inner_output).mean(dim=-1, keepdim=True) outer_outputs.append(outer_output) aggregated_outer_output = torch.cat(outer_outputs, dim=-1) final_output = self.output_layer(aggregated_outer_output) # Final prediction return final_output # Data generation torch.manual_seed(42) num_samples = 1200 input_dim = 5 # Generate complex non-linear data X = torch.rand((num_samples, input_dim)) * 10 - 5 noise = torch.randn(num_samples) * 0.5 Y = ((torch.sin(X[:, 0]) + torch.cos(X[:, 1]) + 0.3 * X[:, 2] - 0.2 * X[:, 3] ** 2 + noise) > 0).float().unsqueeze(1) # Split data X_train, X_val, Y_train, Y_val = train_test_split(X.numpy(), Y.numpy(), test_size=0.2, random_state=42) # Train Attention KAN with different functions norm_functions = { "Softmax": custom_softmax, "Softplus": custom_softplus, "Rectify": custom_rectify, } results = {} for name, norm_function in norm_functions.items(): print(f"\nTraining Attention KAN with {name}") device = 'cuda' if torch.cuda.is_available() else 'cpu' attention_model = AttentionKAN(input_dim, num_inner_units=6, num_outer_units=4, norm_function=norm_function).to(device) criterion = nn.BCELoss() optimizer = optim.Adam(attention_model.parameters(), lr=0.001) epochs = 100 for epoch in range(epochs): attention_model.train() optimizer.zero_grad() outputs = torch.sigmoid(attention_model(torch.tensor(X_train, dtype=torch.float32).to(device))) loss = criterion(outputs, torch.tensor(Y_train, dtype=torch.float32).to(device)) loss.backward() optimizer.step() if (epoch + 1) % 10 == 0: print(f"{name} - Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.4f}") # Evaluate Attention KAN attention_model.eval() with torch.no_grad(): attention_predictions = torch.sigmoid(attention_model(torch.tensor(X_val, dtype=torch.float32).to(device))).cpu().numpy() attention_auc = roc_auc_score(Y_val, attention_predictions) attention_fpr, attention_tpr, _ = roc_curve(Y_val, attention_predictions) attention_ks = max(attention_tpr - attention_fpr) attention_logloss = log_loss(Y_val, attention_predictions) results[name] = { "AUC": attention_auc, "KS Statistic": attention_ks, "Log Loss": attention_logloss } print(f"\n{name} Metrics:") print(f"AUC: {attention_auc:.4f}") print(f"KS Statistic: {attention_ks:.4f}") print(f"Log Loss: {attention_logloss:.4f}") # Print final results print("\nComparison of Normalization Functions:") for name, metrics in results.items(): print(f"{name}: AUC={metrics['AUC']:.4f}, KS={metrics['KS Statistic']:.4f}, Log Loss={metrics['Log Loss']:.4f}") ====================================================