20250315 Commits: GradCAM is finish

This commit is contained in:
whitekirin 2025-03-15 22:36:49 +08:00
parent ea8d08acc7
commit dfeec70a53
24 changed files with 331 additions and 739 deletions

View File

@ -36,7 +36,7 @@ class Process_File():
np.save(save_root, image) np.save(save_root, image)
def Save_CSV_File(self, file_name, data): # 儲存訓練結果 def Save_CSV_File(self, file_name, data): # 儲存訓練結果
Save_Root = '../Result/save_the_train_result(' + str(datetime.date.today()) + ")" Save_Root = '../Result/Training_Result/save_the_train_result(' + str(datetime.date.today()) + ")"
self.JudgeRoot_MakeDir(Save_Root) self.JudgeRoot_MakeDir(Save_Root)
modelfiles = self.Make_Save_Root(file_name + ".csv", Save_Root) # 將檔案名稱及路徑字串合併成完整路徑 modelfiles = self.Make_Save_Root(file_name + ".csv", Save_Root) # 將檔案名稱及路徑字串合併成完整路徑
data.to_csv(modelfiles, mode = "a") data.to_csv(modelfiles, mode = "a")

View File

@ -48,7 +48,7 @@ class Training_Precesses:
def Combine_Signal_Dataset_To_DataLoader(self, datas : list, Labels : list, Batch_Size, status : bool = True): def Combine_Signal_Dataset_To_DataLoader(self, datas : list, Labels : list, Batch_Size, status : bool = True):
dataset = self.Convert_Data_To_DataSet(datas, Labels, status) dataset = self.Convert_Data_To_DataSet(datas, Labels, status)
sampler = WeightedRandomSampler(dataset, generator = self.generator) # 創建Sampler sampler = RandomSampler(dataset, generator = self.generator) # 創建Sampler
Dataloader = DataLoader(dataset = dataset, batch_size = Batch_Size, num_workers = 0, pin_memory=True, sampler = sampler) Dataloader = DataLoader(dataset = dataset, batch_size = Batch_Size, num_workers = 0, pin_memory=True, sampler = sampler)
return Dataloader return Dataloader

View File

@ -1,256 +0,0 @@
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# SENet
# block = layers.GlobalAveragePooling2D()(residual)
# block = layers.Dense(units = residual.shape[-1] // 16, activation = "relu")(block)
# block = layers.Dense(units = residual.shape[-1], activation = "sigmoid")(block)
# block = Reshape((1, 1, residual.shape[-1]))(block)
# residual = Multiply()([residual, block])
from keras import backend
from keras import layers
from keras.layers import Reshape, Multiply, Conv1D
import math
def Xception_indepentment(input_shape=None):
channel_axis = 1 if backend.image_data_format() == "channels_first" else -1
img_input = layers.Input(shape=input_shape)
x = layers.Conv2D(
32, (3, 3), strides=(2, 2), use_bias=False, name="block1_conv1"
)(img_input)
x = layers.BatchNormalization(axis=channel_axis, name="block1_conv1_bn")(x)
x = layers.Activation("relu", name="block1_conv1_act")(x
)
x = layers.Conv2D(64, (3, 3), use_bias=False, name="block1_conv2")(x)
x = layers.BatchNormalization(axis=channel_axis, name="block1_conv2_bn")(x)
x = layers.Activation("relu", name="block1_conv2_act")(x)
residual = layers.Conv2D(
128, (1, 1), strides=(2, 2), padding="same", use_bias=False
)(x)
residual = layers.BatchNormalization(axis=channel_axis)(residual)
# 注意力機制區域
kernel = int(abs((math.log(residual.shape[-1], 2) + 1) / 2))
if kernel % 2:
kernel_size = kernel
else:
kernel_size = kernel + 1
block = layers.GlobalAveragePooling2D()(residual)
block = Reshape(target_shape = (residual.shape[-1], 1))(block)
block = Conv1D(filters = 1, kernel_size = kernel_size, padding = "same", use_bias = False, activation = "sigmoid")(block)
block = Reshape((1, 1, residual.shape[-1]))(block)
residual = Multiply()([residual, block])
x = layers.SeparableConv2D(
128, (3, 3), padding="same", use_bias=False, name="block2_sepconv1"
)(x)
x = layers.BatchNormalization(axis=channel_axis, name="block2_sepconv1_bn")(
x
)
x = layers.Activation("relu", name="block2_sepconv2_act")(x)
x = layers.SeparableConv2D(
128, (3, 3), padding="same", use_bias=False, name="block2_sepconv2"
)(x)
x = layers.BatchNormalization(axis=channel_axis, name="block2_sepconv2_bn")(
x
)
x = layers.MaxPooling2D(
(3, 3), strides=(2, 2), padding="same", name="block2_pool"
)(x)
x = layers.add([x, residual])
residual = layers.Conv2D(
256, (1, 1), strides=(2, 2), padding="same", use_bias=False
)(x)
residual = layers.BatchNormalization(axis=channel_axis)(residual)
# 注意力機制區域
kernel = int(abs((math.log(residual.shape[-1], 2) + 1) / 2))
if kernel % 2:
kernel_size = kernel
else:
kernel_size = kernel + 1
block = layers.GlobalAveragePooling2D()(residual)
block = Reshape(target_shape = (residual.shape[-1], 1))(block)
block = Conv1D(filters = 1, kernel_size = kernel_size, padding = "same", use_bias = False, activation = "sigmoid")(block)
block = Reshape((1, 1, residual.shape[-1]))(block)
residual = Multiply()([residual, block])
x = layers.Activation("relu", name="block3_sepconv1_act")(x)
x = layers.SeparableConv2D(
256, (3, 3), padding="same", use_bias=False, name="block3_sepconv1"
)(x)
x = layers.BatchNormalization(axis=channel_axis, name="block3_sepconv1_bn")(
x
)
x = layers.Activation("relu", name="block3_sepconv2_act")(x)
x = layers.SeparableConv2D(
256, (3, 3), padding="same", use_bias=False, name="block3_sepconv2"
)(x)
x = layers.BatchNormalization(axis=channel_axis, name="block3_sepconv2_bn")(x)
x = layers.MaxPooling2D(
(3, 3), strides=(2, 2), padding="same", name="block3_pool"
)(x)
x = layers.add([x, residual])
residual = layers.Conv2D(
728, (1, 1), strides=(2, 2), padding="same", use_bias=False
)(x)
residual = layers.BatchNormalization(axis=channel_axis)(residual)
# 注意力機制區域
kernel = int(abs((math.log(residual.shape[-1], 2) + 1) / 2))
if kernel % 2:
kernel_size = kernel
else:
kernel_size = kernel + 1
block = layers.GlobalAveragePooling2D()(residual)
block = Reshape(target_shape = (residual.shape[-1], 1))(block)
block = Conv1D(filters = 1, kernel_size = kernel_size, padding = "same", use_bias = False, activation = "sigmoid")(block)
block = Reshape((1, 1, residual.shape[-1]))(block)
residual = Multiply()([residual, block])
x = layers.Activation("relu", name="block4_sepconv1_act")(x)
x = layers.SeparableConv2D(
728, (3, 3), padding="same", use_bias=False, name="block4_sepconv1"
)(x)
x = layers.BatchNormalization(axis=channel_axis, name="block4_sepconv1_bn")(
x
)
x = layers.Activation("relu", name="block4_sepconv2_act")(x)
x = layers.SeparableConv2D(
728, (3, 3), padding="same", use_bias=False, name="block4_sepconv2"
)(x)
x = layers.BatchNormalization(axis=channel_axis, name="block4_sepconv2_bn")(
x
)
x = layers.MaxPooling2D(
(3, 3), strides=(2, 2), padding="same", name="block4_pool"
)(x)
x = layers.add([x, residual])
for i in range(8):
residual = x
prefix = "block" + str(i + 5)
x = layers.Activation("relu", name=prefix + "_sepconv1_act")(x)
x = layers.SeparableConv2D(
728,
(3, 3),
padding="same",
use_bias=False,
name=prefix + "_sepconv1",
)(x)
x = layers.BatchNormalization(
axis=channel_axis, name=prefix + "_sepconv1_bn"
)(x)
x = layers.Activation("relu", name=prefix + "_sepconv2_act")(x)
x = layers.SeparableConv2D(
728,
(3, 3),
padding="same",
use_bias=False,
name=prefix + "_sepconv2",
)(x)
x = layers.BatchNormalization(
axis=channel_axis, name=prefix + "_sepconv2_bn"
)(x)
x = layers.Activation("relu", name=prefix + "_sepconv3_act")(x)
x = layers.SeparableConv2D(
728,
(3, 3),
padding="same",
use_bias=False,
name=prefix + "_sepconv3",
)(x)
x = layers.BatchNormalization(
axis=channel_axis, name=prefix + "_sepconv3_bn"
)(x)
x = layers.add([x, residual])
residual = layers.Conv2D(
1024, (1, 1), strides=(2, 2), padding="same", use_bias=False
)(x)
residual = layers.BatchNormalization(axis=channel_axis)(residual)
# 注意力機制區域
kernel = int(abs((math.log(residual.shape[-1], 2) + 1) / 2))
if kernel % 2:
kernel_size = kernel
else:
kernel_size = kernel + 1
block = layers.GlobalAveragePooling2D()(residual)
block = Reshape(target_shape = (residual.shape[-1], 1))(block)
block = Conv1D(filters = 1, kernel_size = kernel_size, padding = "same", use_bias = False, activation = "sigmoid")(block)
block = Reshape((1, 1, residual.shape[-1]))(block)
residual = Multiply()([residual, block])
x = layers.Activation("relu", name="block13_sepconv1_act")(x)
x = layers.SeparableConv2D(
728, (3, 3), padding="same", use_bias=False, name="block13_sepconv1"
)(x)
x = layers.BatchNormalization(
axis=channel_axis, name="block13_sepconv1_bn"
)(x)
x = layers.Activation("relu", name="block13_sepconv2_act")(x)
x = layers.SeparableConv2D(
1024, (3, 3), padding="same", use_bias=False, name="block13_sepconv2"
)(x)
x = layers.BatchNormalization(
axis=channel_axis, name="block13_sepconv2_bn"
)(x)
x = layers.MaxPooling2D(
(3, 3), strides=(2, 2), padding="same", name="block13_pool"
)(x)
x = layers.add([x, residual])
x = layers.SeparableConv2D(
1536, (3, 3), padding="same", use_bias=False, name="block14_sepconv1"
)(x)
x = layers.BatchNormalization(
axis=channel_axis, name="block14_sepconv1_bn"
)(x)
x = layers.Activation("relu", name="block14_sepconv1_act")(x)
x = layers.SeparableConv2D(
2048, (3, 3), padding="same", use_bias=False, name="block14_sepconv2"
)(x)
x = layers.BatchNormalization(
axis=channel_axis, name="block14_sepconv2_bn"
)(x)
x = layers.Activation("relu", name="block14_sepconv2_act")(x)
return img_input, block

View File

@ -8,144 +8,82 @@ import matplotlib.pyplot as plt
import datetime import datetime
from Load_process.file_processing import Process_File from Load_process.file_processing import Process_File
# Grad-CAM implementation
class GradCAM: class GradCAM:
def __init__(self, model, target_layer): def __init__(self, model, target_layer):
"""
初始化 Grad-CAM
Args:
model: 訓練好的 ModifiedXception 模型
target_layer: 要計算 Grad-CAM 的目標層名稱 (例如 'base_model')
"""
self.model = model self.model = model
self.target_layer = target_layer self.target_layer = target_layer
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.activations = None
self.model.eval()
self.model.to(self.device)
# 用於儲存特徵圖和梯度
self.features = None
self.gradients = None self.gradients = None
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# 註冊 hook # Register hooks
self._register_hooks() self.target_layer.register_forward_hook(self.save_activations)
self.target_layer.register_backward_hook(self.save_gradients)
def _register_hooks(self): def Processing_Main(self, Test_Dataloader, File_Path):
"""註冊前向和反向傳播的 hook""" i = 0
def forward_hook(module, input, output): path = File_Path
self.features = output File = Process_File()
for images, labels in Test_Dataloader:
labels = torch.as_tensor(labels, dtype=torch.float32).to(self.device)
Generate_Image = self.generate(torch.as_tensor(images,dtype=torch.float32).to(self.device))
def backward_hook(module, grad_in, grad_out): path = File_Path
self.gradients = grad_out[0] path += str(np.argmax(labels.cpu().numpy(), 1)[0])
File.JudgeRoot_MakeDir(path)
# 獲取目標層 for Image_Batch in images:
target_module = dict(self.model.named_modules())[self.target_layer] File.Save_CV2_File(f"{str(i)}.png", path, self.overlay_heatmap(Generate_Image, Image_Batch))
target_module.register_forward_hook(forward_hook) i += 1
target_module.register_backward_hook(backward_hook)
def generate_cam(self, input_image, target_class=None): pass
"""
生成 Grad-CAM 熱力圖
Args:
input_image: 輸入影像 (torch.Tensor, shape: [1, C, H, W])
target_class: 目標類別索引 (若為 None使用預測最高分數的類別)
Returns:
cam: Grad-CAM 熱力圖 (numpy array)
"""
input_image = input_image.to(self.device)
# 前向傳播 def save_activations(self, module, input, output):
self.activations = output.detach()
def save_gradients(self, module, grad_input, grad_output):
self.gradients = grad_output[0].detach()
def generate(self, input_image, class_idx=None):
self.model.eval()
input_image.requires_grad = True
# Forward pass
output = self.model(input_image) output = self.model(input_image)
if target_class is None: if class_idx is None:
target_class = torch.argmax(output, dim=1).item() class_idx = torch.argmax(output, dim=1).item() # Use predicted class if not specified
# 清除梯度 # Zero gradients
self.model.zero_grad() self.model.zero_grad()
# 反向傳播計算梯度 # Backward pass for the specific class
one_hot = torch.zeros_like(output) output[0, class_idx].backward()
one_hot[0][target_class] = 1
output.backward(gradient=one_hot, retain_graph=True)
# 計算 Grad-CAM # Get gradients and activations
gradients = self.gradients.data.cpu().numpy()[0] gradients = self.gradients # [B, C, H, W]
features = self.features.data.cpu().numpy()[0] activations = self.activations # [B, C, H, W]
# 全局平均池化梯度 # Compute weights (global average pooling of gradients)
weights = np.mean(gradients, axis=(1, 2)) weights = torch.mean(gradients, dim=[2, 3], keepdim=True) # [B, C, 1, 1]
# 計算加權和 # Compute Grad-CAM heatmap
cam = np.zeros(features.shape[1:], dtype=np.float32) grad_cam = torch.sum(weights * activations, dim=1).squeeze() # [H, W]
for i, w in enumerate(weights): grad_cam = F.relu(grad_cam) # Apply ReLU
cam += w * features[i] grad_cam = grad_cam / (grad_cam.max() + 1e-8) # Normalize to [0, 1]
# ReLU 激活 return grad_cam.cpu().numpy()
cam = np.maximum(cam, 0)
# 歸一化到 0-1 # Utility to overlay heatmap on original image
cam = cam - np.min(cam) def overlay_heatmap(self, heatmap, image, alpha=0.5):
cam = cam / np.max(cam) heatmap = np.uint8(255 * heatmap) # Scale to 0-255
heatmap = Image.fromarray(heatmap).resize((image.shape[1], image.shape[2]), Image.BILINEAR)
heatmap = np.array(heatmap)
heatmap = plt.cm.jet(heatmap)[:, :, :3] # Apply colormap (e.g., jet)
# 調整大小到輸入影像尺寸 image = torch.as_tensor(image, dtype=torch.float32).permute(2, 1, 0)
h, w = input_image.shape[2:]
cam = cv2.resize(cam, (w, h))
return cam overlay = (alpha * heatmap + (1 - alpha) * np.array(image) / 255.0)
overlay = np.clip(overlay, 0, 1) * 255
def overlay_cam(self, original_image, cam, alpha=0.5): return overlay
"""
Grad-CAM 熱力圖疊加到原始影像上
Args:
original_image: 原始影像 (numpy array, shape: [H, W, C])
cam: Grad-CAM 熱力圖
alpha: 透明度
Returns:
overlay_img: 疊加後的影像
"""
# 將熱力圖轉為 RGB
heatmap = cv2.applyColorMap(np.uint8(255 * cam), cv2.COLORMAP_JET)
heatmap = np.float32(heatmap) / 255
# 確保原始影像格式正確
if original_image.max() > 1:
original_image = original_image / 255.0
# 疊加熱力圖
overlay_img = heatmap * alpha + original_image * (1 - alpha)
overlay_img = np.clip(overlay_img, 0, 1)
return overlay_img
def visualize(self, input_image, original_image, target_class=None, File_Name=None, model_name = None):
"""
可視化 Grad-CAM 結果
Args:
input_image: 輸入影像 (torch.Tensor)
original_image: 原始影像 (numpy array)
target_class: 目標類別索引
save_path: 保存路徑 (可選)
"""
File = Process_File()
# 生成 CAM
cam = self.generate_cam(input_image, target_class)
# 疊加到原始影像
overlay = self.overlay_cam(original_image, cam)
# 顯示結果
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.imshow(original_image)
plt.title('Original Image')
plt.axis('off')
plt.subplot(1, 2, 2)
plt.imshow(overlay)
plt.title(f'Grad-CAM (Class {target_class})')
plt.axis('off')
model_dir = '../Result/Grad-CAM( ' + str(datetime.date.today()) + " )"
File.JudgeRoot_MakeDir(model_dir)
modelfiles = File.Make_Save_Root(str(model_name) + " " + File_Name + ".png", model_dir)
plt.savefig(modelfiles)
plt.close("all") # 關閉圖表

View File

@ -25,7 +25,7 @@ def plot_history(Epochs, Losses, Accuracys, file_name, model_name):
plt.legend(['Train','Validation'], loc='upper left') plt.legend(['Train','Validation'], loc='upper left')
plt.title('Model Accuracy') plt.title('Model Accuracy')
model_dir = '../Result/save_the_train_image( ' + str(datetime.date.today()) + " )" model_dir = '../Result/Training_Image/save_the_train_image( ' + str(datetime.date.today()) + " )"
File.JudgeRoot_MakeDir(model_dir) File.JudgeRoot_MakeDir(model_dir)
modelfiles = File.Make_Save_Root(str(model_name) + " " + str(file_name) + ".png", model_dir) modelfiles = File.Make_Save_Root(str(model_name) + " " + str(file_name) + ".png", model_dir)
plt.savefig(modelfiles) plt.savefig(modelfiles)
@ -40,7 +40,7 @@ def draw_heatmap(matrix, model_name, index): # 二分類以上混淆矩陣做法
Ax = fig.add_subplot(111) Ax = fig.add_subplot(111)
sns.heatmap(matrix, square = True, annot = True, fmt = 'd', linecolor = 'white', cmap = "Purples", ax = Ax)#画热力图cmap表示设定的颜色集 sns.heatmap(matrix, square = True, annot = True, fmt = 'd', linecolor = 'white', cmap = "Purples", ax = Ax)#画热力图cmap表示设定的颜色集
model_dir = '../Result/model_matrix_image ( ' + str(datetime.date.today()) + " )" model_dir = '../Result/Matrix_Image/model_matrix_image ( ' + str(datetime.date.today()) + " )"
File.JudgeRoot_MakeDir(model_dir) File.JudgeRoot_MakeDir(model_dir)
modelfiles = File.Make_Save_Root(str(model_name) + "-" + str(index) + ".png", model_dir) modelfiles = File.Make_Save_Root(str(model_name) + "-" + str(index) + ".png", model_dir)

View File

@ -1,29 +1,22 @@
from tqdm import tqdm from tqdm import tqdm
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from torchmetrics.functional import auroc from torchmetrics.functional import auroc
from sklearn.model_selection import KFold
from sklearn.metrics import confusion_matrix
from all_models_tools.all_model_tools import call_back from all_models_tools.all_model_tools import call_back
from Model_Loss.Loss import Entropy_Loss from Model_Loss.Loss import Entropy_Loss
from merge_class.merge import merge from merge_class.merge import merge
from Training_Tools.PreProcess import ListDataset from draw_tools.Grad_cam import GradCAM
from Load_process.file_processing import Process_File
from draw_tools.draw import plot_history, draw_heatmap
from Load_process.file_processing import Process_File
import time import time
import torch.optim as optim import torch.optim as optim
import numpy as np import numpy as np
import torch import torch
import pandas as pd import pandas as pd
import datetime
class All_Step: class All_Step:
def __init__(self, PreProcess_Classes_Data, Batch, Model, Epoch, Number_Of_Classes, Model_Name, Experiment_Name): def __init__(self, Model, Epoch, Number_Of_Classes, Model_Name, Experiment_Name):
self.PreProcess_Classes_Data = PreProcess_Classes_Data
self.Training_DataLoader, self.Test_Dataloader = self.PreProcess_Classes_Data.Total_Data_Combine_To_DataLoader(Batch)
self.Model = Model self.Model = Model
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
@ -33,38 +26,11 @@ class All_Step:
self.Model_Name = Model_Name self.Model_Name = Model_Name
self.Experiment_Name = Experiment_Name self.Experiment_Name = Experiment_Name
def Training_Step(self, model_name, counter): def Training_Step(self, train_subset, train_loader, val_loader, model_name, fold, TargetLayer):
# Lists to store metrics across all folds
all_fold_train_losses = []
all_fold_val_losses = []
all_fold_train_accuracies = []
all_fold_val_accuracies = []
# Define K-fold cross-validator
K_Fold = KFold(n_splits=5, shuffle=True, random_state=42)
File = Process_File()
# Get the underlying dataset from PreProcess_Classes_Data
training_dataset = ListDataset(data_list = self.PreProcess_Classes_Data.Training_Datas, labels_list = self.PreProcess_Classes_Data.Training_Labels, status = True)
# K-Fold loop
for fold, (train_idx, val_idx) in enumerate(K_Fold.split(training_dataset)):
print(f"\nStarting Fold {fold + 1}/5")
# Create training and validation subsets for this fold
train_subset = torch.utils.data.Subset(training_dataset, train_idx)
val_subset = torch.utils.data.Subset(training_dataset, val_idx)
# Wrap subsets in DataLoaders (use same batch size as original)
batch_size = self.Training_DataLoader.batch_size
train_loader = torch.utils.data.DataLoader(train_subset, batch_size=batch_size, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_subset, batch_size=batch_size, shuffle=False)
# Reinitialize model and optimizer for each fold # Reinitialize model and optimizer for each fold
self.Model = self.Model.__class__(self.Number_Of_Classes).to(self.device) # Reinitialize model # self.Model = self.Model.__class__(self.Number_Of_Classes).to(self.device) # Reinitialize model
Optimizer = optim.SGD(self.Model.parameters(), lr=0.045, momentum=0.9, weight_decay=0.1) Optimizer = optim.SGD(self.Model.parameters(), lr=0.045, momentum=0.9, weight_decay=0.01)
model_path, early_stopping, scheduler = call_back(model_name, str(counter) + f"_fold{fold}", Optimizer) model_path, early_stopping, scheduler = call_back(model_name, f"_fold{fold}", Optimizer)
criterion = Entropy_Loss() # Custom loss function criterion = Entropy_Loss() # Custom loss function
Merge_Function = merge() Merge_Function = merge()
@ -74,6 +40,7 @@ class All_Step:
val_losses = [] val_losses = []
train_accuracies = [] train_accuracies = []
val_accuracies = [] val_accuracies = []
epoch = 0
# Epoch loop # Epoch loop
for epoch in range(self.Epoch): for epoch in range(self.Epoch):
@ -167,44 +134,22 @@ class All_Step:
val_losses.append(val_loss) val_losses.append(val_loss)
val_accuracies.append(val_accuracy) val_accuracies.append(val_accuracy)
Grad = GradCAM(self.Model, TargetLayer)
Grad.Processing_Main(val_loader, f"../Result/GradCAM_Image/Validation/GradCAM_Image({str(datetime.date.today())})/fold-{str(fold)}/")
# Early stopping # Early stopping
early_stopping(val_loss, self.Model, model_path) early_stopping(val_loss, self.Model, model_path)
if early_stopping.early_stop: if early_stopping.early_stop:
print(f"Early stopping triggered in Fold {fold + 1} at epoch {epoch + 1}") print(f"Early stopping triggered in Fold {fold + 1} at epoch {epoch + 1}")
Total_Epoch = epoch + 1
break break
# Learning rate adjustment # Learning rate adjustment
scheduler.step(val_loss) scheduler.step(val_loss)
else: # If no early stopping Total_Epoch = epoch + 1
Total_Epoch = self.Epoch return self.Model, train_losses, val_losses, train_accuracies, val_accuracies, Total_Epoch
# Store fold results def Evaluate_Model(self, cnn_model, Test_Dataloader):
all_fold_train_losses.append(train_losses)
all_fold_val_losses.append(val_losses)
all_fold_train_accuracies.append(train_accuracies)
all_fold_val_accuracies.append(val_accuracies)
Losses = [train_losses, val_losses]
Accuracies = [train_accuracies, val_accuracies]
plot_history(Total_Epoch, Losses, Accuracies, "train" + str(fold), self.Experiment_Name) # 將訓練結果化成圖,並將化出來的圖丟出去儲存
# Aggregate results across folds
avg_train_losses = np.mean([losses[-1] for losses in all_fold_train_losses])
avg_val_losses = np.mean([losses[-1] for losses in all_fold_val_losses])
avg_train_accuracies = np.mean([acc[-1] for acc in all_fold_train_accuracies])
avg_val_accuracies = np.mean([acc[-1] for acc in all_fold_val_accuracies])
print(f"\nCross-Validation Results:")
print(f"Avg Train Loss: {avg_train_losses:.4f}, Avg Val Loss: {avg_val_losses:.4f}")
print(f"Avg Train Acc: {avg_train_accuracies:.4f}, Avg Val Acc: {avg_val_accuracies:.4f}")
File.Save_TXT_File(content = f"\nCross-Validation Results:\nAvg Train Loss: {avg_train_losses:.4f}, Avg Val Loss: {avg_val_losses:.4f}\nAvg Train Acc: {avg_train_accuracies:.4f}, Avg Val Acc: {avg_val_accuracies:.4f}\n", File_Name = "Training_Average_Result")
pass
def Evaluate_Model(self, cnn_model, Model_Name, counter):
# (Unchanged Evaluate_Model method) # (Unchanged Evaluate_Model method)
cnn_model.eval() cnn_model.eval()
True_Label, Predict_Label = [], [] True_Label, Predict_Label = [], []
@ -212,8 +157,8 @@ class All_Step:
loss = 0.0 loss = 0.0
with torch.no_grad(): with torch.no_grad():
for images, labels in self.Test_Dataloader: for images, labels in Test_Dataloader:
images, labels = torch.tensor(images).to(self.device), torch.tensor(labels).to(self.device) images, labels = torch.as_tensor(images).to(self.device), torch.as_tensor(labels).to(self.device)
outputs = cnn_model(images) outputs = cnn_model(images)
Output_Values, Output_Indexs = torch.max(outputs, 1) Output_Values, Output_Indexs = torch.max(outputs, 1)
True_Indexs = np.argmax(labels.cpu().numpy(), 1) True_Indexs = np.argmax(labels.cpu().numpy(), 1)
@ -224,10 +169,10 @@ class All_Step:
Predict_Label_OneHot.append(torch.tensor(outputs, dtype=torch.float32).cpu().numpy()[0]) Predict_Label_OneHot.append(torch.tensor(outputs, dtype=torch.float32).cpu().numpy()[0])
True_Label_OneHot.append(torch.tensor(labels, dtype=torch.int).cpu().numpy()[0]) True_Label_OneHot.append(torch.tensor(labels, dtype=torch.int).cpu().numpy()[0])
loss /= len(self.Test_Dataloader) loss /= len(Test_Dataloader)
True_Label_OneHot = torch.tensor(True_Label_OneHot, dtype=torch.int) True_Label_OneHot = torch.as_tensor(True_Label_OneHot, dtype=torch.int)
Predict_Label_OneHot = torch.tensor(Predict_Label_OneHot, dtype=torch.float32) Predict_Label_OneHot = torch.as_tensor(Predict_Label_OneHot, dtype=torch.float32)
accuracy = accuracy_score(True_Label, Predict_Label) accuracy = accuracy_score(True_Label, Predict_Label)
precision = precision_score(True_Label, Predict_Label, average="macro") precision = precision_score(True_Label, Predict_Label, average="macro")
@ -235,33 +180,4 @@ class All_Step:
AUC = auroc(Predict_Label_OneHot, True_Label_OneHot, num_labels=self.Number_Of_Classes, task="multilabel", average="macro") AUC = auroc(Predict_Label_OneHot, True_Label_OneHot, num_labels=self.Number_Of_Classes, task="multilabel", average="macro")
f1 = f1_score(True_Label, Predict_Label, average="macro") f1 = f1_score(True_Label, Predict_Label, average="macro")
Matrix = self.record_matrix_image(True_Label, Predict_Label, Model_Name, counter) return True_Label, Predict_Label, loss, accuracy, precision, recall, AUC, f1
print(self.record_everyTime_test_result(loss, accuracy, precision, recall, AUC, f1, counter, self.Experiment_Name, Matrix)) # 紀錄當前訓練完之後的預測結果並輸出成csv檔
pass
def record_matrix_image(self, True_Labels, Predict_Labels, model_name, index):
'''劃出混淆矩陣(熱力圖)'''
# 計算混淆矩陣
matrix = confusion_matrix(True_Labels, Predict_Labels)
draw_heatmap(matrix, model_name, index) # 呼叫畫出confusion matrix的function
return matrix
def record_everyTime_test_result(self, loss, accuracy, precision, recall, auc, f, indexs, model_name, Matrix):
'''記錄我單次的訓練結果並將它輸出到檔案中'''
File = Process_File()
Dataframe = pd.DataFrame(
{
"model_name" : str(model_name),
"loss" : "{:.2f}".format(loss),
"precision" : "{:.2f}%".format(precision * 100),
"recall" : "{:.2f}%".format(recall * 100),
"accuracy" : "{:.2f}%".format(accuracy * 100),
"f" : "{:.2f}%".format(f * 100),
"AUC" : "{:.2f}%".format(auc * 100)
}, index = [indexs])
File.Save_CSV_File("train_result", Dataframe)
return Dataframe

View File

@ -1,15 +1,22 @@
from torchinfo import summary from torchinfo import summary
from sklearn.model_selection import KFold
from sklearn.metrics import confusion_matrix
from Training_Tools.PreProcess import Training_Precesses from Training_Tools.PreProcess import Training_Precesses, ListDataset
from experiments.pytorch_Model import ModifiedXception from experiments.pytorch_Model import ModifiedXception
from experiments.Model_All_Step import All_Step from experiments.Model_All_Step import All_Step
from Load_process.Load_Indepentend import Load_Indepentend_Data from Load_process.Load_Indepentend import Load_Indepentend_Data
from _validation.ValidationTheEnterData import validation_the_enter_data from _validation.ValidationTheEnterData import validation_the_enter_data
from Load_process.file_processing import Process_File
from draw_tools.Grad_cam import GradCAM
from draw_tools.draw import plot_history, draw_heatmap
import numpy as np import numpy as np
import torch import torch
import torch.nn as nn import torch.nn as nn
import time import time
import pandas as pd
import datetime
class experiments(): class experiments():
def __init__(self, Image_Size, Model_Name, Experiment_Name, Epoch, Train_Batch_Size, tools, Number_Of_Classes, status): def __init__(self, Image_Size, Model_Name, Experiment_Name, Epoch, Train_Batch_Size, tools, Number_Of_Classes, status):
@ -50,7 +57,6 @@ class experiments():
self.experiment_name = Experiment_Name self.experiment_name = Experiment_Name
self.epoch = Epoch self.epoch = Epoch
self.train_batch_size = Train_Batch_Size self.train_batch_size = Train_Batch_Size
self.layers = 1
self.Number_Of_Classes = Number_Of_Classes self.Number_Of_Classes = Number_Of_Classes
self.Image_Size = Image_Size self.Image_Size = Image_Size
@ -72,18 +78,73 @@ class experiments():
self.test, self.test_label = self.cut_image.test, self.cut_image.test_label self.test, self.test_label = self.cut_image.test, self.cut_image.test_label
PreProcess = Training_Precesses(Training_Data, Training_Label, self.test, self.test_label) PreProcess = Training_Precesses(Training_Data, Training_Label, self.test, self.test_label)
File = Process_File()
self.Training_DataLoader, self.Test_Dataloader = PreProcess.Total_Data_Combine_To_DataLoader(self.train_batch_size)
# Lists to store metrics across all folds
all_fold_train_losses = []
all_fold_val_losses = []
all_fold_train_accuracies = []
all_fold_val_accuracies = []
# Define K-fold cross-validator
K_Fold = KFold(n_splits = 5, shuffle = True, random_state = 42)
# Get the underlying dataset from PreProcess_Classes_Data
training_dataset = ListDataset(data_list = PreProcess.Training_Datas, labels_list = PreProcess.Training_Labels, status = True)
# K-Fold loop
for fold, (train_idx, val_idx) in enumerate(K_Fold.split(training_dataset)):
cnn_model = self.construct_model() # 呼叫讀取模型的function cnn_model = self.construct_model() # 呼叫讀取模型的function
print(summary(cnn_model, input_size=(int(self.train_batch_size / 2), 3, self.Image_Size, self.Image_Size))) print(summary(cnn_model, input_size=(int(self.train_batch_size / 2), 3, self.Image_Size, self.Image_Size)))
for name, parameters in cnn_model.named_parameters(): for name, parameters in cnn_model.named_parameters():
print(f"Layer Name: {name}, Parameters: {parameters.size()}") print(f"Layer Name: {name}, Parameters: {parameters.size()}")
step = All_Step(PreProcess, self.train_batch_size, cnn_model, self.epoch, self.Number_Of_Classes, self.model_name, self.experiment_name) TargetLayer = cnn_model.base_model.conv4.pointwise
print("\n\n\n讀取訓練資料(70000)執行時間:%f\n\n" % (end - start)) Grad = GradCAM(cnn_model, TargetLayer)
step.Training_Step(self.model_name, counter) step = All_Step(cnn_model, self.epoch, self.Number_Of_Classes, self.model_name, self.experiment_name)
step.Evaluate_Model(cnn_model, self.model_name, counter) print("\n\n\n讀取訓練資料(70000)執行時間:%f\n\n" % (end - start))
# self.Grad.process_main(cnn_model, counter, Testing_Dataset) print(f"\nStarting Fold {fold + 1}/5")
# Create training and validation subsets for this fold
train_subset = torch.utils.data.Subset(training_dataset, train_idx)
val_subset = torch.utils.data.Subset(training_dataset, val_idx)
# Wrap subsets in DataLoaders (use same batch size as original)
batch_size = self.Training_DataLoader.batch_size
train_loader = torch.utils.data.DataLoader(train_subset, batch_size=batch_size, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_subset, batch_size=batch_size, shuffle=False)
cnn_model, train_losses, val_losses, train_accuracies, val_accuracies, Total_Epoch = step.Training_Step(train_subset, train_loader, val_loader, self.model_name, fold, TargetLayer)
# Store fold results
all_fold_train_losses.append(train_losses)
all_fold_val_losses.append(val_losses)
all_fold_train_accuracies.append(train_accuracies)
all_fold_val_accuracies.append(val_accuracies)
Losses = [train_losses, val_losses]
Accuracies = [train_accuracies, val_accuracies]
plot_history(Total_Epoch, Losses, Accuracies, "train" + str(fold), self.experiment_name) # 將訓練結果化成圖,並將化出來的圖丟出去儲存
True_Label, Predict_Label, loss, accuracy, precision, recall, AUC, f1 = step.Evaluate_Model(cnn_model, self.Test_Dataloader)
Grad.Processing_Main(self.Test_Dataloader, f"../Result/GradCAM_Image/Testing/GradCAM_Image({str(datetime.date.today())})/fold-{str(fold)}/")
Matrix = self.record_matrix_image(True_Label, Predict_Label, self.model_name, counter)
print(self.record_everyTime_test_result(loss, accuracy, precision, recall, AUC, f1, counter, self.experiment_name, Matrix)) # 紀錄當前訓練完之後的預測結果並輸出成csv檔
# Aggregate results across folds
avg_train_losses = np.mean([losses[-1] for losses in all_fold_train_losses])
avg_val_losses = np.mean([losses[-1] for losses in all_fold_val_losses])
avg_train_accuracies = np.mean([acc[-1] for acc in all_fold_train_accuracies])
avg_val_accuracies = np.mean([acc[-1] for acc in all_fold_val_accuracies])
print(f"\nCross-Validation Results:")
print(f"Avg Train Loss: {avg_train_losses:.4f}, Avg Val Loss: {avg_val_losses:.4f}")
print(f"Avg Train Acc: {avg_train_accuracies:.4f}, Avg Val Acc: {avg_val_accuracies:.4f}")
File.Save_TXT_File(content = f"\nCross-Validation Results:\nAvg Train Loss: {avg_train_losses:.4f}, Avg Val Loss: {avg_val_losses:.4f}\nAvg Train Acc: {avg_train_accuracies:.4f}, Avg Val Acc: {avg_val_accuracies:.4f}\n", File_Name = "Training_Average_Result")
pass pass
@ -96,3 +157,29 @@ class experiments():
cnn_model = cnn_model.to(self.device) cnn_model = cnn_model.to(self.device)
return cnn_model return cnn_model
def record_matrix_image(self, True_Labels, Predict_Labels, model_name, index):
'''劃出混淆矩陣(熱力圖)'''
# 計算混淆矩陣
matrix = confusion_matrix(True_Labels, Predict_Labels)
draw_heatmap(matrix, model_name, index) # 呼叫畫出confusion matrix的function
return matrix
def record_everyTime_test_result(self, loss, accuracy, precision, recall, auc, f, indexs, model_name, Matrix):
'''記錄我單次的訓練結果並將它輸出到檔案中'''
File = Process_File()
Dataframe = pd.DataFrame(
{
"model_name" : str(model_name),
"loss" : "{:.2f}".format(loss),
"precision" : "{:.2f}%".format(precision * 100),
"recall" : "{:.2f}%".format(recall * 100),
"accuracy" : "{:.2f}%".format(accuracy * 100),
"f" : "{:.2f}%".format(f * 100),
"AUC" : "{:.2f}%".format(auc * 100)
}, index = [indexs])
File.Save_CSV_File("train_result", Dataframe)
return Dataframe

View File

@ -1,82 +0,0 @@
from convolution_model_tools.convolution_2D_tools import model_2D_tool
from dense_model_tools.dense_tools import model_Dense_Layer
from all_models_tools.all_model_tools import add_Activative, add_dropout
from keras.activations import softmax, sigmoid
from keras.applications import VGG19, ResNet50, InceptionResNetV2, Xception, DenseNet169, EfficientNetV2L
def original_VGG19_model():
tools = model_2D_tool()
dense_tool = model_Dense_Layer()
vgg19 = VGG19(include_top = False, weights = "imagenet", input_shape = (120, 120, 3))
GAP = tools.add_globalAveragePooling(vgg19.output)
# flatten = tools.add_flatten(vgg19.output)
dense = dense_tool.add_dense(256, GAP)
# dense = add_Activative(dense)
dense = dense_tool.add_dense(4, dense)
dense = add_Activative(dense, softmax)
return vgg19.input, dense
def original_Resnet50_model():
tools = model_2D_tool()
dense_tool = model_Dense_Layer()
resnet50 = ResNet50(include_top = False, weights = "imagenet", input_shape = (120, 120, 3))
GAP = tools.add_globalAveragePooling(resnet50.output)
dense = dense_tool.add_dense(256, GAP)
dense = dense_tool.add_dense(4, dense)
dense = add_Activative(dense, softmax)
return resnet50, dense
def original_InceptionResNetV2_model():
tools = model_2D_tool()
dense_tool = model_Dense_Layer()
inceptionresnetv2 = InceptionResNetV2(include_top = False, weights = "imagenet", input_shape = (120, 120, 3))
flatten = tools.add_flatten(inceptionresnetv2.output)
dense = dense_tool.add_dense(256, flatten)
dense = add_Activative(dense)
dense = dense_tool.add_dense(4, dense)
dense = add_Activative(dense, softmax)
return inceptionresnetv2.input, dense
def original_Xception_model():
tools = model_2D_tool()
dense_tool = model_Dense_Layer()
xception = Xception(include_top = False, weights = "imagenet", input_shape = (150, 150, 3))
GAP = tools.add_globalAveragePooling(xception.output)
dense = dense_tool.add_dense(256, GAP)
dense = dense_tool.add_dense(4, dense)
dense = add_Activative(dense, softmax)
return xception, dense
def original_EfficientNetV2L_model():
tools = model_2D_tool()
dense_tool = model_Dense_Layer()
EfficientNet_V2L = EfficientNetV2L(include_top = False, weights = "imagenet", input_shape = (120, 120, 3))
flatten = tools.add_flatten(EfficientNet_V2L.output)
dense = dense_tool.add_dense(256, flatten)
dense = add_Activative(dense)
dense = dense_tool.add_dense(4, dense)
dense = add_Activative(dense, softmax)
return EfficientNet_V2L.input, dense
def original_DenseNet169_model():
tools = model_2D_tool()
dense_tool = model_Dense_Layer()
Densenet169 = DenseNet169(include_top = False, weights = "imagenet", input_shape = (120, 120, 3))
flatten = tools.add_flatten(Densenet169.output)
dense = dense_tool.add_dense(256, flatten)
dense = add_Activative(dense)
dense = dense_tool.add_dense(4, dense)
dense = add_Activative(dense, softmax)
return Densenet169.input, dense

View File

@ -36,3 +36,20 @@ class ModifiedXception(nn.Module):
x = self.base_model.fc(x) # Identity layer (still [B, 2048]) x = self.base_model.fc(x) # Identity layer (still [B, 2048])
output = self.custom_head(x) # Custom head processing output = self.custom_head(x) # Custom head processing
return output return output
class Model_module():
def __init__(self):
self.conv1 = nn.Conv2d(in_channels = 3, out_channels = 32, kernel_size = 3, padding = 1)
self.conv2 = nn.Conv2d(in_channels = 64, out_channels = 128, kernel_size = 3, padding = 1)
self.conv3 = nn.Conv2d(in_channels = 128, out_channels = 128, kernel_size = 3, padding = 1)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
self.max_Pool = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear()
self.fc2 = nn.Linear()
pass
def forward(self, input):
pass

View File

@ -23,7 +23,7 @@ if __name__ == "__main__":
tool.Set_Labels() tool.Set_Labels()
tool.Set_Save_Roots() tool.Set_Save_Roots()
Status = 1 # 決定要使用什麼資料集 Status = 2 # 決定要使用什麼資料集
Labels = tool.Get_Data_Label() Labels = tool.Get_Data_Label()
Trainig_Root, Testing_Root = tool.Get_Save_Roots(Status) # 一般的 Trainig_Root, Testing_Root = tool.Get_Save_Roots(Status) # 一般的
Generator_Root = tool.Get_Generator_Save_Roots(Status) Generator_Root = tool.Get_Generator_Save_Roots(Status)
@ -36,7 +36,7 @@ if __name__ == "__main__":
Classification = 3 # 分類數量 Classification = 3 # 分類數量
Model_Name = "Xception" # 取名,告訴我我是用哪個模型(可能是預處理模型/自己設計的模型) Model_Name = "Xception" # 取名,告訴我我是用哪個模型(可能是預處理模型/自己設計的模型)
Experiment_Name = "Xception Skin to train Normal stomach cancer" Experiment_Name = "Xception Skin is used RandomSampler to train ICG stomach cancer"
Epoch = 10000 Epoch = 10000
Train_Batch_Size = 64 Train_Batch_Size = 64
Image_Size = 256 Image_Size = 256
@ -55,7 +55,7 @@ if __name__ == "__main__":
for Run_Range in range(0, counter, 1): # 做規定次數的訓練 for Run_Range in range(0, counter, 1): # 做規定次數的訓練
# 讀取資料 # 讀取資料
Data_Dict_Data = loading_data.process_main(Label_Length) Data_Dict_Data = loading_data.process_main(Label_Length)
# Data_Dict_Data, Train_Size = Balance_Process(Data_Dict_Data, Labels) Data_Dict_Data, Train_Size = Balance_Process(Data_Dict_Data, Labels)
for label in Labels: for label in Labels:
Train_Size += len(Data_Dict_Data[label]) Train_Size += len(Data_Dict_Data[label])
@ -86,7 +86,6 @@ if __name__ == "__main__":
trains_Data_Image = image_processing.Data_Augmentation_Image(training_data) # 讀檔 trains_Data_Image = image_processing.Data_Augmentation_Image(training_data) # 讀檔
Training_Data, Training_Label = image_processing.image_data_processing(trains_Data_Image, training_label) # 將讀出來的檔做正規化。降label轉成numpy array 格式 Training_Data, Training_Label = image_processing.image_data_processing(trains_Data_Image, training_label) # 將讀出來的檔做正規化。降label轉成numpy array 格式
# training_data = image_processing.normalization(training_data) # training_data = image_processing.normalization(training_data)
# training_data = training_data.permute(0, 3, 1, 2) # training_data = training_data.permute(0, 3, 1, 2)

View File

@ -1926,6 +1926,43 @@
" val_subset = torch.utils.data.Subset(training_dataset, val_idx)\n", " val_subset = torch.utils.data.Subset(training_dataset, val_idx)\n",
"\n" "\n"
] ]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Gaussian Kernel:\n",
" [[0.00134197 0.00407653 0.00794 0.00991586 0.00794 0.00407653\n",
" 0.00134197]\n",
" [0.00407653 0.01238341 0.02411958 0.03012171 0.02411958 0.01238341\n",
" 0.00407653]\n",
" [0.00794 0.02411958 0.04697853 0.05866909 0.04697853 0.02411958\n",
" 0.00794 ]\n",
" [0.00991586 0.03012171 0.05866909 0.07326883 0.05866909 0.03012171\n",
" 0.00991586]\n",
" [0.00794 0.02411958 0.04697853 0.05866909 0.04697853 0.02411958\n",
" 0.00794 ]\n",
" [0.00407653 0.01238341 0.02411958 0.03012171 0.02411958 0.01238341\n",
" 0.00407653]\n",
" [0.00134197 0.00407653 0.00794 0.00991586 0.00794 0.00407653\n",
" 0.00134197]]\n",
"Sum of kernel: 1.0\n"
]
}
],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
} }
], ],
"metadata": { "metadata": {

View File

@ -1,64 +0,0 @@
# import paramiko
# from scp import SCPClient
# import os
# import pexpect
# # def createSSHClient(server, port, user, password):
# # client = paramiko.SSHClient()
# # client.load_system_host_keys()
# # client.set_missing_host_key_policy(paramiko.AutoAddPolicy)
# # client.connect(server, port, user, password)
# # return client
# # ssh = createSSHClient("10.1.29.28", 31931, "root", "whitekirin")
# # # os.mkdir("Original_ResNet101V2_with_NPC_Augmentation_Image")
# # # with open("Original_ResNet101V2_with_NPC_Augmentation_Image_train3.txt", "w") as file:
# # # pass
# # with SCPClient(ssh.get_transport()) as scp:
# # scp.get("/mnt/c/張晉嘉/stomach_cancer/Original_ResNet101V2_with_NPC_Augmentation_Image_train3.txt", "/raid/whitekirin/stomach_cancer/Model_result/save_the_train_result(2024-10-05)/Original_ResNet101V2_with_NPC_Augmentation_Image_train3.txt")
# def upload(port, filename, user, ip, dst_path):
# cmdline = "scp %s -r %s %s@%s:%s" % (port, filename, user, ip, dst_path)
# try:
# child = pexpect.spawn(cmdline)
# child.expect("whitekirin109316118")
# child.sendline()
# child.expect(pexpect.EOF)
# print("file upload Finish")
# except Exception as e:
# print("upload faild: ", e)
# upload(2222, "/raid/whitekirin/stomach_cancer/Model_result/save_the_train_result(2024-10-05)", "whitekirin", "203.64.84.39", "/mnt/c/張晉嘉/stomach_cancer")
from torch.utils.data import Dataset
from torch.utils.data import Subset, DataLoader
class ListDataset(Dataset):
def __init__(self, data_list, labels_list):
self.data = data_list
self.labels = labels_list
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
sample = self.data[idx]
label = self.labels[idx]
return sample, label
# 示例數據
data_list = ["image1.jpg", "image2.jpg", "image3.jpg"]
labels_list = [0, 1, 0]
# 創建 Dataset
dataset = ListDataset(data_list, labels_list)
# 測試
# print(type(dataset[0])) # ('image1.jpg', 0)
dataloader = DataLoader(dataset = dataset, batch_size = 1, shuffle=True, num_workers = 0, pin_memory=True)
print(dataloader)