20250218 commits: The script can run but Grad-cam class have some question

This commit is contained in:
2025-02-18 22:54:45 +08:00
parent 2eb97d60c9
commit 16c7a074bb
9 changed files with 1808 additions and 72 deletions

View File

@@ -1,92 +1,67 @@
from Load_process.file_processing import Process_File
from keras.models import Model
from torchcam.methods import GradCAM
from torchvision.transforms.functional import to_pil_image
from matplotlib import pyplot as plt
import torch
import cv2
import numpy as np
from keras import backend as K
from keras.preprocessing import image
import tensorflow as tf
import datetime
class Grad_CAM:
def __init__(self, Label, One_Hot, Experiment_Name, Layer_Name) -> None:
def __init__(self, Experiment_Name, Layer, Image_Size) -> None:
self.experiment_name = Experiment_Name
self.Layer_Name = Layer_Name
self.Label = Label
self.One_Hot_Label = One_Hot
self.Save_File_Name = self.Convert_One_Hot_To_int()
self.Layer = Layer
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.Image_Size = Image_Size
pass
def process_main(self, model, index, images):
for i in range(len(images)):
array = np.expand_dims(images[i], axis=0) # 替圖片增加一個維度,代表他的數量
heatmap = self.gradcam(array, model)
self.plot_heatmap(heatmap, images[i], self.Save_File_Name[i], index, i)
cam_extractor = GradCAM(model, target_layer=self.Layer)
i = 0
for image, label in images:
heatmap = self.gradcam(image, model, cam_extractor)
self.plot_heatmap(heatmap, image, label, index, i)
i += 1
pass
def Convert_One_Hot_To_int(self):
return [np.argmax(Label)for Label in self.One_Hot_Label]
def gradcam(self, Image, model, cam_extractor):
Image = torch.tensor(Image).to(self.device)
# 將模型設為評估模式
model.eval()
# 前向傳播並生成熱力圖
with torch.no_grad():
out = model(Image)
def gradcam(self, Image, model, pred_index = None):
# 首先,我們創建了一個模型,將輸入圖像映射為最後一個卷積層的激活值以及輸出的預測結果。
grad_model = Model(
[model.inputs], [model.get_layer(self.Layer_Name).output, model.output]
)
# 然後,我們計算了對於輸入圖像而言預測類別的最高梯度,並相對於最後一個卷積層的激活值進行了計算。
with tf.GradientTape() as tape: # 創建一個梯度紀錄器,並做前向傳播
last_conv_layer_output, preds = grad_model(Image)
if pred_index is None:
pred_index = tf.argmax(preds[0])
class_channel = preds[:, pred_index]
# 這是輸出神經元(預測的頂部或所選的)對於最後一個卷積層的輸出特徵圖的梯度。
grads = tape.gradient(class_channel, last_conv_layer_output)
# 這是一個向量,其中每個項目是在特定特徵圖通道上梯度的平均強度。
pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))
# 我們將特徵圖陣列中的每個通道乘以「這個通道相對於頂部預測類別的重要性」然後將所有通道加總起來以獲得熱圖類別激活。n
last_conv_layer_output = last_conv_layer_output[0]
heatmap = last_conv_layer_output @ pooled_grads[..., tf.newaxis]
heatmap = tf.squeeze(heatmap)
# For visualization purpose, we will also normalize the heatmap between 0 & 1
heatmap = tf.maximum(heatmap, 0) / tf.math.reduce_max(heatmap)
return heatmap.numpy()
# 收集訓練預測和標籤
Output_Values, Output_Indexs = torch.max(out, 1)
# 生成對應的 Grad-CAM 熱力圖
heatmap = cam_extractor(class_idx=Output_Indexs, scores=out)
return heatmap[0].cpu().numpy()
def plot_heatmap(self, heatmap, img, Label, index, Title):
File = Process_File()
# ReLU
heatmap = np.maximum(heatmap, 0)
# 正規化
heatmap /= np.max(heatmap)
Label = np.argmax(Label.cpu().numpy(), 1)
# 讀取影像
# img = cv2.imread(img)
fig, ax = plt.subplots()
# im = cv2.resize(cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB), (img.shape[1], img.shape[0]))
# 拉伸 heatmap
img_path = cv2.resize(img, (512, 512))
heatmap = cv2.resize(heatmap, (512, 512))
# 調整影像大小
img_path = cv2.resize(img.numpy().transpose(1, 2, 0), (self.Image_Size, self.Image_Size))
heatmap = cv2.resize(heatmap, (self.Image_Size, self.Image_Size))
heatmap = np.uint8(255 * heatmap)
img_path = cv2.cvtColor(img_path, cv2.COLOR_BGR2RGB)
# 以 0.6 透明度繪製原始影像
# 顯示影像和熱力圖
fig, ax = plt.subplots()
ax.imshow(img_path, alpha=1)
# 以 0.4 透明度繪製熱力圖
ax.imshow(heatmap, cmap='jet', alpha=0.3)
save_root = '../Result/CNN_result_of_reading('+ str(datetime.date.today()) + " )/" + str(Label)
save_root = '../Result/CNN_result_of_reading('+ str(datetime.date.today()) + ")/" + str(Label)
File.JudgeRoot_MakeDir(save_root)
save_root = File.Make_Save_Root(self.experiment_name + "-" + str(index) + "-" + str(Title) + ".png", save_root)
# 存plt檔
plt.savefig(save_root)
plt.close("all") # 關閉圖表
plt.close("all")
pass

View File

@@ -1,4 +1,5 @@
from draw_tools.draw import plot_history, draw_heatmap
from draw_tools.Grad_cam import Grad_CAM
from Load_process.Load_Indepentend import Load_Indepentend_Data
from _validation.ValidationTheEnterData import validation_the_enter_data
from Load_process.file_processing import Process_File
@@ -6,6 +7,7 @@ from merge_class.merge import merge
from sklearn.metrics import confusion_matrix
from experiments.pytorch_Model import ModifiedXception
from experiments.Model_All_Step import All_Step
from torchinfo import summary
import pandas as pd
import numpy as np
import torch
@@ -13,7 +15,7 @@ import torch.nn as nn
import time
class experiments():
def __init__(self, Model_Name, Experiment_Name, Generator_Batch_Size, Epoch, Train_Batch_Size, Convolution_Name, tools, Number_Of_Classes, status):
def __init__(self, Image_Size, Model_Name, Experiment_Name, Generator_Batch_Size, Epoch, Train_Batch_Size, tools, Number_Of_Classes, status):
'''
# 實驗物件
@@ -54,8 +56,8 @@ class experiments():
self.epoch = Epoch
self.train_batch_size = Train_Batch_Size
self.layers = 1
self.convolution_name = Convolution_Name
self.Number_Of_Classes = Number_Of_Classes
self.Image_Size = Image_Size
self.Grad = ""
self.Status = status
@@ -71,8 +73,6 @@ class experiments():
end = time.time()
print("讀取testing與validation資料(154)執行時間:%f\n" % (end - start))
# Generator = Image_generator("", "")
# 將處理好的test Data 與 Validation Data 丟給這個物件的變數
self.test, self.test_label = self.cut_image.test, self.cut_image.test_label
self.validation, self.validation_label = self.cut_image.validation, self.cut_image.validation_label
@@ -80,9 +80,14 @@ class experiments():
Testing_Dataset = self.Topic_Tool.Convert_Data_To_DataSet_And_Put_To_Dataloader(self.test, self.test_label, 1)
Validation_Dataset = self.Topic_Tool.Convert_Data_To_DataSet_And_Put_To_Dataloader(self.validation, self.validation_label, 1)
# self.Grad = Grad_CAM(self.Topic_Tool.Get_Data_Label(), self.test_label, self.experiment_name, self.convolution_name)
cnn_model = self.construct_model() # 呼叫讀取模型的function
print(summary(cnn_model, input_size=(int(self.train_batch_size / 2), 3, self.Image_Size, self.Image_Size)))
for name, parameters in cnn_model.named_parameters():
print(f"Layer Name: {name}, Parameters: {parameters.size()}")
Layers = cnn_model.base_model.body.conv4.pointwise
self.Grad = Grad_CAM(self.experiment_name, Layers, self.Image_Size)
step = All_Step(Training_Dataset, Testing_Dataset, Validation_Dataset, cnn_model, self.epoch, self.Number_Of_Classes)
# model_dir = '../save_the_best_model/Topic/Remove background with Normal image/best_model( 2023-10-17 )-2.h5' # 這是一個儲存模型權重的路徑,每一個模型都有一個自己權重儲存的檔
@@ -100,7 +105,7 @@ class experiments():
Losses = [train_losses, val_losses]
Accuracies = [train_accuracies, val_accuracies]
plot_history(Epoch + 1, Losses, Accuracies, "train" + str(counter), self.experiment_name) # 將訓練結果化成圖,並將化出來的圖丟出去儲存
# self.Grad.process_main(cnn_model, counter, self.test)
self.Grad.process_main(cnn_model, counter, Testing_Dataset)
return loss, accuracy, precision, recall, AUC, f1

View File

@@ -40,11 +40,10 @@ if __name__ == "__main__":
Epoch = 10000
Train_Batch_Size = 50
Image_Size = 256
Convolution_Name = "block14_sepconv2"
Prepare = Load_Data_Prepare()
loading_data = Load_ImageGenerator(Trainig_Root, Testing_Root, Validation_Root, Generator_Root, Labels, Image_Size)
experiment = experiments(Model_Name, Experiment_Name, Generator_Batch_Size, Epoch, Train_Batch_Size, Convolution_Name, tool, Classification, Status)
experiment = experiments(Image_Size, Model_Name, Experiment_Name, Generator_Batch_Size, Epoch, Train_Batch_Size, tool, Classification, Status)
image_processing = Read_image_and_Process_image(Image_Size)
Merge = merge()
Calculate_Tool = Calculate()

1761
test.ipynb

File diff suppressed because it is too large Load Diff