20250226 Commits: Modification tqdm in the training step

This commit is contained in:
2025-02-26 20:15:31 +08:00
parent 7cb89d2ff1
commit 3f3fa57a02
15 changed files with 211 additions and 162 deletions

View File

@@ -18,6 +18,20 @@ class Calculate():
self.AUC_Record.append(AUC)
pass
def Construction_To_DataFrame(self, Loss, Accuracy, Precision, Recall, F1, AUC):
DataFrame = pd.DataFrame(
{
"loss" : "{:.2f}".format(Loss),
"precision" : "{:.2f}".format(Precision * 100),
"recall" : "{:.2f}".format(Recall * 100),
"accuracy" : "{:.2f}".format(Accuracy * 100),
"f1" : "{:.2f}".format(F1 * 100),
"AUC" : "{:.2f}".format(AUC * 100)
}, index = [0]
)
self.History.append(DataFrame)
return DataFrame
def Calculate_Mean(self):
Loss_Mean = np.mean(self.Loss_Record)
Accuracy_Mean = np.mean(self.Accuracy_Record)
@@ -26,17 +40,8 @@ class Calculate():
F1_Mean = np.mean(self.F1_Record)
AUC_Mean = np.mean(self.AUC_Record)
Mean_DataFram = pd.DataFrame(
{
"loss" : "{:.2f}".format(Loss_Mean),
"precision" : "{:.2f}%".format(Precision_Mean * 100),
"recall" : "{:.2f}%".format(Recall_Mean * 100),
"accuracy" : "{:.2f}%".format(Accuracy_Mean * 100),
"f" : "{:.2f}%".format(F1_Mean * 100),
"AUC" : "{:.2f}%".format(AUC_Mean * 100)
}, index = [0]
)
self.History.append(Mean_DataFram)
Mean_DataFram = self.Construction_To_DataFrame(Loss_Mean, Accuracy_Mean, Precision_Mean, Recall_Mean, F1_Mean, AUC_Mean)
return Mean_DataFram
def Calculate_Std(self):
@@ -47,17 +52,7 @@ class Calculate():
F1_Std = Decimal(str(np.std(self.F1_Record))).quantize(Decimal('0.01'), rounding=ROUND_HALF_UP)
AUC_Std = Decimal(str(np.std(self.AUC_Record))).quantize(Decimal('0.01'), rounding=ROUND_HALF_UP)
Std_DataFram = pd.DataFrame(
{
"loss" : "{:.2f}".format(Loss_Std),
"precision" : "{:.2f}".format(Precision_Std),
"recall" : "{:.2f}".format(Recall_Std),
"accuracy" : "{:.2f}".format(Accuracy_Std),
"f" : "{:.2f}".format(F1_Std),
"AUC" : "{:.2f}".format(AUC_Std)
}, index = [0]
)
self.History.append(Std_DataFram)
Std_DataFram = self.Construction_To_DataFrame(Loss_Std, Accuracy_Std, Precision_Std, Recall_Std, F1_Std, AUC_Std)
return Std_DataFram
def Output_Style(self):
@@ -67,7 +62,7 @@ class Calculate():
"precision" : "{}±{}".format(self.History[0]["precision"][0], self.History[1]["precision"][0]),
"recall" : "{}±{}".format(self.History[0]["recall"][0], self.History[1]["recall"][0]),
"accuracy" : "{}±{}".format(self.History[0]["accuracy"][0], self.History[1]["accuracy"][0]),
"f" : "{}±{}".format(self.History[0]["f"][0], self.History[1]["f"][0]),
"f1" : "{}±{}".format(self.History[0]["f1"][0], self.History[1]["f1"][0]),
"AUC" : "{}±{}".format(self.History[0]["AUC"][0], self.History[1]["AUC"][0])
}, index = [0]
)

View File

@@ -32,33 +32,6 @@ class Image_generator():
data_size = self.get_processing_Augmentation(Training_Dict_Data_Root, i, data_size)
self.stop += data_size
# # 製作標準資料增強
# '''
# 這裡我想要做的是依照paper上的資料強化IMAGE DATA COLLECTION AND IMPLEMENTATION OF DEEP LEARNING-BASED MODEL IN DETECTING MONKEYPOX DISEASE USING MODIFIED VGG16
# 產生出資料強化後的影像
# '''
# print("\nAugmentation two Generator image")
# data_size = self.get_processing_Augmentation(Training_Dict_Data_Root, 2, data_size)
# self.stop += data_size
# # 製作標準資料增強
# '''
# 這裡我想要做的是依照paper上的資料強化IMAGE DATA COLLECTION AND IMPLEMENTATION OF DEEP LEARNING-BASED MODEL IN DETECTING MONKEYPOX DISEASE USING MODIFIED VGG16
# 產生出資料強化後的影像
# '''
# print("\nAugmentation three Generator image")
# data_size = self.get_processing_Augmentation(Training_Dict_Data_Root, 3, data_size)
# self.stop += data_size
# # 製作標準資料增強
# '''
# 這裡我想要做的是依照paper上的資料強化IMAGE DATA COLLECTION AND IMPLEMENTATION OF DEEP LEARNING-BASED MODEL IN DETECTING MONKEYPOX DISEASE USING MODIFIED VGG16
# 產生出資料強化後的影像
# '''
# print("\nAugmentation four Generator image")
# data_size = self.get_processing_Augmentation(Training_Dict_Data_Root, 4, data_size)
print()
def get_processing_Augmentation(self, original_image_root : dict, Augment_choose, data_size):
@@ -90,7 +63,7 @@ class Image_generator():
Training_Dataset = tool.Convert_Data_To_DataSet_And_Put_To_Dataloader(image, Classes, 1, False)
if File.JudgeRoot_MakeDir(save_root): # 判斷要存的資料夾存不存在,不存在則創立
print("The file is exist")
print("The file is exist.This Script is not creating new fold.")
for batch_idx, (images, labels) in enumerate(Training_Dataset):
for i, img in enumerate(images):

View File

@@ -51,7 +51,6 @@ class Load_Indepentend_Data():
test = []
test = image_processing.Data_Augmentation_Image(original_test_root)
test, test_label = image_processing.image_data_processing(test, original_test_label)
# test = image_processing.normalization(test)
return test, test_label

View File

@@ -10,8 +10,8 @@ class Entropy_Loss(nn.Module):
def forward(self, outputs, labels):
# 範例: 使用均方誤差作為損失計算
# outputs = torch.argmax(outputs, 1)
outputs = torch.tensor(outputs, dtype=torch.float32).clone().detach()
labels = torch.tensor(labels, dtype=torch.float32).clone().detach()
outputs_New = torch.as_tensor(outputs, dtype=torch.float32)
labels_New = torch.as_tensor(labels, dtype=torch.float32)
loss = functional.cross_entropy(outputs, labels)
return torch.tensor(loss, requires_grad = True).clone().detach().requires_grad_(True)
loss = functional.cross_entropy(outputs_New, labels_New)
return torch.as_tensor(loss, dtype = torch.float32)

View File

@@ -1,67 +1,151 @@
from Load_process.file_processing import Process_File
from torchcam.methods import GradCAM
from torchvision.transforms.functional import to_pil_image
from matplotlib import pyplot as plt
import torch
import cv2
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import cv2
from PIL import Image
import matplotlib.pyplot as plt
import datetime
from Load_process.file_processing import Process_File
class Grad_CAM:
def __init__(self, Experiment_Name, Layer, Image_Size) -> None:
self.experiment_name = Experiment_Name
self.Layer = Layer
class GradCAM:
def __init__(self, model, target_layer):
"""
初始化 Grad-CAM
Args:
model: 訓練好的 ModifiedXception 模型
target_layer: 要計算 Grad-CAM 的目標層名稱 (例如 'base_model')
"""
self.model = model
self.target_layer = target_layer
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.Image_Size = Image_Size
pass
def process_main(self, model, index, images):
cam_extractor = GradCAM(model, target_layer=self.Layer)
i = 0
for image, label in images:
heatmap = self.gradcam(image, model, cam_extractor)
self.plot_heatmap(heatmap, image, label, index, i)
i += 1
pass
def gradcam(self, Image, model, cam_extractor):
Image = torch.tensor(Image).to(self.device)
# 將模型設為評估模式
model.eval()
# 前向傳播並生成熱力圖
with torch.no_grad():
out = model(Image)
# 收集訓練預測和標籤
Output_Values, Output_Indexs = torch.max(out, 1)
self.model.eval()
self.model.to(self.device)
# 生成對應的 Grad-CAM 熱力圖
heatmap = cam_extractor(class_idx=Output_Indexs, scores=out)
return heatmap[0].cpu().numpy()
def plot_heatmap(self, heatmap, img, Label, index, Title):
# 用於儲存特徵圖和梯度
self.features = None
self.gradients = None
# 註冊 hook
self._register_hooks()
def _register_hooks(self):
"""註冊前向和反向傳播的 hook"""
def forward_hook(module, input, output):
self.features = output
def backward_hook(module, grad_in, grad_out):
self.gradients = grad_out[0]
# 獲取目標層
target_module = dict(self.model.named_modules())[self.target_layer]
target_module.register_forward_hook(forward_hook)
target_module.register_backward_hook(backward_hook)
def generate_cam(self, input_image, target_class=None):
"""
生成 Grad-CAM 熱力圖
Args:
input_image: 輸入影像 (torch.Tensor, shape: [1, C, H, W])
target_class: 目標類別索引 (若為 None使用預測最高分數的類別)
Returns:
cam: Grad-CAM 熱力圖 (numpy array)
"""
input_image = input_image.to(self.device)
# 前向傳播
output = self.model(input_image)
if target_class is None:
target_class = torch.argmax(output, dim=1).item()
# 清除梯度
self.model.zero_grad()
# 反向傳播計算梯度
one_hot = torch.zeros_like(output)
one_hot[0][target_class] = 1
output.backward(gradient=one_hot, retain_graph=True)
# 計算 Grad-CAM
gradients = self.gradients.data.cpu().numpy()[0]
features = self.features.data.cpu().numpy()[0]
# 全局平均池化梯度
weights = np.mean(gradients, axis=(1, 2))
# 計算加權和
cam = np.zeros(features.shape[1:], dtype=np.float32)
for i, w in enumerate(weights):
cam += w * features[i]
# ReLU 激活
cam = np.maximum(cam, 0)
# 歸一化到 0-1
cam = cam - np.min(cam)
cam = cam / np.max(cam)
# 調整大小到輸入影像尺寸
h, w = input_image.shape[2:]
cam = cv2.resize(cam, (w, h))
return cam
def overlay_cam(self, original_image, cam, alpha=0.5):
"""
將 Grad-CAM 熱力圖疊加到原始影像上
Args:
original_image: 原始影像 (numpy array, shape: [H, W, C])
cam: Grad-CAM 熱力圖
alpha: 透明度
Returns:
overlay_img: 疊加後的影像
"""
# 將熱力圖轉為 RGB
heatmap = cv2.applyColorMap(np.uint8(255 * cam), cv2.COLORMAP_JET)
heatmap = np.float32(heatmap) / 255
# 確保原始影像格式正確
if original_image.max() > 1:
original_image = original_image / 255.0
# 疊加熱力圖
overlay_img = heatmap * alpha + original_image * (1 - alpha)
overlay_img = np.clip(overlay_img, 0, 1)
return overlay_img
def visualize(self, input_image, original_image, target_class=None, File_Name=None, model_name = None):
"""
可視化 Grad-CAM 結果
Args:
input_image: 輸入影像 (torch.Tensor)
original_image: 原始影像 (numpy array)
target_class: 目標類別索引
save_path: 保存路徑 (可選)
"""
File = Process_File()
Label = np.argmax(Label.cpu().numpy(), 1)
# 生成 CAM
cam = self.generate_cam(input_image, target_class)
# 調整影像大小
img_path = cv2.resize(img.numpy().transpose(1, 2, 0), (self.Image_Size, self.Image_Size))
heatmap = cv2.resize(heatmap, (self.Image_Size, self.Image_Size))
heatmap = np.uint8(255 * heatmap)
img_path = cv2.cvtColor(img_path, cv2.COLOR_BGR2RGB)
# 疊加到原始影像
overlay = self.overlay_cam(original_image, cam)
# 顯示影像和熱力圖
fig, ax = plt.subplots()
ax.imshow(img_path, alpha=1)
ax.imshow(heatmap, cmap='jet', alpha=0.3)
save_root = '../Result/CNN_result_of_reading('+ str(datetime.date.today()) + ")/" + str(Label)
File.JudgeRoot_MakeDir(save_root)
save_root = File.Make_Save_Root(self.experiment_name + "-" + str(index) + "-" + str(Title) + ".png", save_root)
# 顯示結果
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.imshow(original_image)
plt.title('Original Image')
plt.axis('off')
plt.savefig(save_root)
plt.close("all")
pass
plt.subplot(1, 2, 2)
plt.imshow(overlay)
plt.title(f'Grad-CAM (Class {target_class})')
plt.axis('off')
model_dir = '../Result/Grad-CAM( ' + str(datetime.date.today()) + " )"
File.JudgeRoot_MakeDir(model_dir)
modelfiles = File.Make_Save_Root(str(model_name) + " " + File_Name + ".png", model_dir)
plt.savefig(modelfiles)
plt.close("all") # 關閉圖表

View File

@@ -7,11 +7,12 @@ import numpy as np
from all_models_tools.all_model_tools import call_back
from Model_Loss.Loss import Entropy_Loss
from merge_class.merge import merge
from Image_Process.Image_Generator import Image_generator
from draw_tools.Grad_cam import GradCAM
import time
class All_Step:
def __init__(self, Training_Data_And_Label, Test_Data_And_Label, Validation_Data_And_Label, Model, Epoch, Number_Of_Classes):
def __init__(self, Training_Data_And_Label, Test_Data_And_Label, Validation_Data_And_Label, Model, Epoch, Number_Of_Classes, Model_Name):
self.Training_Data_And_Label = Training_Data_And_Label
self.Test_Data_And_Label = Test_Data_And_Label
self.Validation_Data_And_Label = Validation_Data_And_Label
@@ -22,6 +23,8 @@ class All_Step:
self.Epoch = Epoch
self.Number_Of_Classes = Number_Of_Classes
self.Model_Name = Model_Name
pass
def Training_Step(self, model_name, counter):
@@ -42,11 +45,16 @@ class All_Step:
running_loss = 0.0
all_train_preds = []
all_train_labels = []
processed_samples = 0
epoch_iterator = tqdm(self.Training_Data_And_Label, desc= "Training (Epoch %d)" % epoch) # 使用進度條
# 計算每個 epoch 的起始時間
start_time = time.time()
total_samples = len(self.Training_Data_And_Label)
epoch_iterator = tqdm(self.Training_Data_And_Label, desc=f"Epoch [{epoch}/{self.Epoch}]")
for inputs, labels in epoch_iterator:
inputs, labels = torch.tensor(inputs).to(self.device), torch.tensor(labels).to(self.device)
inputs, labels = torch.as_tensor(inputs).to(self.device), torch.as_tensor(labels).to(self.device)
Optimizer.zero_grad()
outputs = self.Model(inputs)
@@ -62,18 +70,37 @@ class All_Step:
all_train_preds.append(Output_Indexs.cpu().numpy())
all_train_labels.append(True_Indexs)
processed_samples += len(inputs)
# 計算當前進度
progress = (processed_samples / total_samples) * 100
# 計算經過時間和剩餘時間
elapsed_time = time.time() - start_time
iterations_per_second = processed_samples / elapsed_time if elapsed_time > 0 else 0
eta = (total_samples - processed_samples) / iterations_per_second if iterations_per_second > 0 else 0
time_str = f"{int(elapsed_time//60):02d}:{int(elapsed_time%60):02d}<{int(eta//60):02d}:{int(eta%60):02d}"
# 計算當前批次的精確度(這裡需要根據你的具體需求調整)
batch_accuracy = (Output_Indexs.cpu().numpy() == True_Indexs).mean()
# 更新進度條顯示
epoch_iterator.set_description(f"Epoch [{epoch}/{self.Epoch}]")
epoch_iterator.set_postfix_str(
f"{processed_samples}/{total_samples} [{time_str}, {iterations_per_second:.2f}it/s, " +
f"acc={batch_accuracy:.3f}, loss={loss.item():.3f}, ]"
)
epoch_iterator.close()
all_train_preds = Merge_Function.merge_data_main(all_train_preds, 0, len(all_train_preds))
all_train_labels = Merge_Function.merge_data_main(all_train_labels, 0, len(all_train_labels))
# print(f"all_train_labels shape:{np.array(all_train_labels).shape}")
Training_Loss = running_loss / len(self.Training_Data_And_Label)
train_accuracy = accuracy_score(all_train_labels, all_train_preds)
train_losses.append(Training_Loss)
train_accuracies.append(train_accuracy)
print(f"\nEpoch [{epoch+1}/{self.Epoch}], Loss: {Training_Loss:.4f}, Accuracy: {train_accuracy:0.2f}", end = ' ')
self.Model.eval()
val_loss = 0.0
@@ -82,14 +109,12 @@ class All_Step:
with torch.no_grad():
for inputs, labels in self.Validation_Data_And_Label:
inputs, labels = torch.tensor(inputs).to(self.device), torch.tensor(labels).to(self.device)
inputs, labels = torch.as_tensor(inputs).to(self.device), torch.as_tensor(labels).to(self.device)
outputs = self.Model(inputs)
loss = criterion(outputs, labels)
val_loss += loss.item()
print(f"Output Contents: {outputs}")
# 收集訓練預測和標籤
Output_Values, Output_Indexs = torch.max(outputs, dim = 1)
True_Indexs = np.argmax(labels.cpu().numpy(), 1)
@@ -102,7 +127,7 @@ class All_Step:
val_losses.append(val_loss)
val_accuracies.append(val_accuracy)
print(f"Val_loss: {val_loss:.4f}, Val_accuracy: {val_accuracy:0.2f}\n")
# print(f"Val_loss: {val_loss:.4f}, Val_accuracy: {val_accuracy:0.2f}\n")
early_stopping(val_loss, self.Model, model_path)
if early_stopping.early_stop:
@@ -115,7 +140,7 @@ class All_Step:
return train_losses, val_losses, train_accuracies, val_accuracies, Total_Epoch
def Evaluate_Model(self, cnn_model):
def Evaluate_Model(self, cnn_model, counter):
# 測試模型
cnn_model.eval()
True_Label, Predict_Label = [], []
@@ -138,6 +163,12 @@ class All_Step:
Predict_Label_OneHot.append(torch.tensor(outputs, dtype = torch.float32).cpu().numpy()[0])
True_Label_OneHot.append(torch.tensor(labels, dtype = torch.int).cpu().numpy()[0])
# 創建 GradCAM 實例
Layers = cnn_model.base_model.body.conv4.pointwise
grad_cam = GradCAM(cnn_model, target_layer="base_model")
# 可視化 Grad-CAM
grad_cam.visualize(outputs, images, target_class = 3, File_Name = counter, model_name = self.Model_Name)
loss /= len(self.Test_Data_And_Label)
True_Label_OneHot = torch.tensor(True_Label_OneHot, dtype = torch.int)

View File

@@ -1,5 +1,4 @@
from draw_tools.draw import plot_history, draw_heatmap
from draw_tools.Grad_cam import Grad_CAM
from Load_process.Load_Indepentend import Load_Indepentend_Data
from _validation.ValidationTheEnterData import validation_the_enter_data
from Load_process.file_processing import Process_File
@@ -82,11 +81,8 @@ class experiments():
print(summary(cnn_model, input_size=(int(self.train_batch_size / 2), 3, self.Image_Size, self.Image_Size)))
for name, parameters in cnn_model.named_parameters():
print(f"Layer Name: {name}, Parameters: {parameters.size()}")
Layers = cnn_model.base_model.body.conv4.pointwise
self.Grad = Grad_CAM(self.experiment_name, Layers, self.Image_Size)
step = All_Step(Training_Dataset, Testing_Dataset, Validation_Dataset, cnn_model, self.epoch, self.Number_Of_Classes)
step = All_Step(Training_Dataset, Testing_Dataset, Validation_Dataset, cnn_model, self.epoch, self.Number_Of_Classes, self.model_name)
# model_dir = '../save_the_best_model/Topic/Remove background with Normal image/best_model( 2023-10-17 )-2.h5' # 這是一個儲存模型權重的路徑,每一個模型都有一個自己權重儲存的檔
# if os.path.exists(model_dir): # 如果這個檔案存在
@@ -95,7 +91,7 @@ class experiments():
print("\n\n\n讀取訓練資料(70000)執行時間:%f\n\n" % (end - start))
train_losses, val_losses, train_accuracies, val_accuracies, Epoch = step.Training_Step(self.model_name, counter)
loss, accuracy, precision, recall, AUC, f1, True_Label, Predict_Label = step.Evaluate_Model(cnn_model)
loss, accuracy, precision, recall, AUC, f1, True_Label, Predict_Label = step.Evaluate_Model(cnn_model, counter)
Matrix = self.record_matrix_image(True_Label, Predict_Label, self.model_name, counter)
print(self.record_everyTime_test_result(loss, accuracy, precision, recall, AUC, f1, counter, self.experiment_name, Matrix)) # 紀錄當前訓練完之後的預測結果並輸出成csv檔

31
main.py
View File

@@ -98,36 +98,7 @@ if __name__ == "__main__":
# training_data = training_data.permute(0, 3, 1, 2)
end = time.time()
print("\n\n\n讀取訓練資料(70000)執行時間:%f\n\n" % (end - start))
# 針對其他資料執行值方圖等化加去背
# with ProcessPoolExecutor() as executor: ## 默认为1多執行續
# trains = list(executor.map(adaptive_histogram_equalization, trains_another))
# # 針對正常資料做值方圖等化加去背
# train_normal = list(executor.map(image_processing.get_data, normal)) # 多執行續讀檔
# normal = list(executor.map(Remove_Background, train_normal)) # 多執行續去背
# normal_data = list(executor.map(adaptive_histogram_equalization, normal))
# # 針對猴痘水痘進行讀檔
# Chickenpox = list(executor.map(image_processing.get_data, chickenpox)) # 多執行續讀檔
# train_chickenpox = list(executor.map(shapen, Chickenpox))
# Monkeypox = list(executor.map(image_processing.get_data, monkeypox)) # 多執行續讀檔
# train_monkeypox = list(executor.map(shapen, Monkeypox)) # 銳化
# for All_Normal_Data in normal_data:
# trains.append(All_Normal_Data)
# training_label.append([0, 0, 0, 0, 0, 1, 0])
# for All_Chhickenpox_Data in train_chickenpox:
# trains.append(All_Chhickenpox_Data)
# training_label.append([0, 0, 0, 1, 0, 0, 0])
# for All_Monkeypox_Data in train_monkeypox:
# trains.append(All_Monkeypox_Data)
# training_label.append([0, 0, 0, 0, 1, 0, 0])
print("\n\n\n讀取訓練資料(70000)執行時間:%f\n\n" % (end - start))
loss, accuracy, precision, recall, AUC, f = experiment.processing_main(Training_Dataset, Run_Range) # 執行訓練方法
Calculate_Tool.Append_numbers(loss, accuracy, precision, recall, AUC, f)