The training step is finish, but Grad-CAM is not modification finished
This commit is contained in:
@@ -58,12 +58,12 @@ class Calculate():
|
||||
def Output_Style(self):
|
||||
Result = pd.DataFrame(
|
||||
{
|
||||
"loss" : "{}±{}".format(self.History[0]["loss"][0], self.History[1]["loss"][0]),
|
||||
"precision" : "{}±{}".format(self.History[0]["precision"][0], self.History[1]["precision"][0]),
|
||||
"recall" : "{}±{}".format(self.History[0]["recall"][0], self.History[1]["recall"][0]),
|
||||
"accuracy" : "{}±{}".format(self.History[0]["accuracy"][0], self.History[1]["accuracy"][0]),
|
||||
"f1" : "{}±{}".format(self.History[0]["f1"][0], self.History[1]["f1"][0]),
|
||||
"AUC" : "{}±{}".format(self.History[0]["AUC"][0], self.History[1]["AUC"][0])
|
||||
"loss" : "{}%±{}".format(self.History[0]["loss"][0], self.History[1]["loss"][0]),
|
||||
"precision" : "{}%±{}".format(self.History[0]["precision"][0], self.History[1]["precision"][0]),
|
||||
"recall" : "{}%±{}".format(self.History[0]["recall"][0], self.History[1]["recall"][0]),
|
||||
"accuracy" : "{}%±{}".format(self.History[0]["accuracy"][0], self.History[1]["accuracy"][0]),
|
||||
"f1" : "{}%±{}".format(self.History[0]["f1"][0], self.History[1]["f1"][0]),
|
||||
"AUC" : "{}%±{}".format(self.History[0]["AUC"][0], self.History[1]["AUC"][0])
|
||||
}, index = [0]
|
||||
)
|
||||
return Result
|
||||
Binary file not shown.
@@ -16,11 +16,12 @@ class Image_generator():
|
||||
self.stop = 0
|
||||
self.Labels = Labels
|
||||
self.Generator_Root = Generator_Root
|
||||
self.Image_Size = Image_Size
|
||||
self.Image_Size = Image_Size
|
||||
self.Class_Count = 904
|
||||
pass
|
||||
|
||||
def Processing_Main(self, Training_Dict_Data_Root):
|
||||
data_size = 0
|
||||
data_size = 2712
|
||||
|
||||
# 製作標準資料增強
|
||||
'''
|
||||
@@ -51,83 +52,76 @@ class Image_generator():
|
||||
'''
|
||||
File = Process_File()
|
||||
image_processing = Read_image_and_Process_image(self.Image_Size)
|
||||
tool = Training_Precesses("", "", "", "")
|
||||
tool = Training_Precesses(self.Image_Size)
|
||||
Classes = []
|
||||
Transform = self.Generator_Content(stardand)
|
||||
|
||||
for label in self.Labels: # 分別對所有類別進行資料強化
|
||||
image = self.load_data(label) # 取的資料
|
||||
Image_Roots = self.get_data_roots[label]
|
||||
save_root = File.Make_Save_Root(label, save_roots) # 合併路徑
|
||||
|
||||
Classes = image_processing.make_label_list(len(image), "1")
|
||||
Training_Dataset = tool.Combine_Signal_Dataset_To_DataLoader(image, Classes, 1, False)
|
||||
Classes = image_processing.make_label_list(len(Image_Roots), "1")
|
||||
Training_Dataset = tool.Setting_DataSet(Image_Roots, Classes)
|
||||
Training_DataLoader = tool.Dataloader_Sampler(Training_Dataset, 1, False)
|
||||
|
||||
if File.JudgeRoot_MakeDir(save_root): # 判斷要存的資料夾存不存在,不存在則創立
|
||||
print("The file is exist.This Script is not creating new fold.")
|
||||
|
||||
for batch_idx, (images, labels) in enumerate(Training_Dataset):
|
||||
for i, img in enumerate(images):
|
||||
if i == self.stop:
|
||||
break
|
||||
for i in range(1, int(self.Class_Count / len(Image_Roots)) + 1, 1):
|
||||
for batch_idx, (images, labels) in enumerate(Training_DataLoader):
|
||||
for j, img in enumerate(images):
|
||||
# if i == self.stop:
|
||||
# break
|
||||
|
||||
img = img.permute(2, 0, 1)
|
||||
img = Transform(img)
|
||||
img = img.permute(2, 0, 1)
|
||||
img = Transform(img)
|
||||
|
||||
# 轉換為 NumPy 陣列並從 BGR 轉為 RGB
|
||||
img_np = img.numpy().transpose(1, 2, 0) # 轉回 HWC 格式
|
||||
img_np = cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB) # BGR 轉 RGB
|
||||
# 轉換為 NumPy 陣列並從 BGR 轉為 RGB
|
||||
img_np = img.numpy().transpose(1, 2, 0) # 轉回 HWC 格式
|
||||
img_np = cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB) # BGR 轉 RGB
|
||||
|
||||
img_pil = transforms.ToPILImage()(img_np)
|
||||
File.Save_PIL_File("image_" + label + str(data_size) + ".png", save_root, img_pil) # 存檔
|
||||
data_size += 1
|
||||
img_pil = transforms.ToPILImage()(img_np)
|
||||
File.Save_PIL_File("image_" + label + str(data_size) + ".png", save_root, img_pil) # 存檔
|
||||
data_size += 1
|
||||
|
||||
return data_size
|
||||
|
||||
def load_data(self, label):
|
||||
'''Images is readed by myself'''
|
||||
image_processing = Read_image_and_Process_image(self.Image_Size)
|
||||
img = image_processing.Data_Augmentation_Image(self.get_data_roots[label])
|
||||
img = torch.tensor(img)
|
||||
|
||||
self.stop = len(img) * 5
|
||||
return img
|
||||
|
||||
def Generator_Content(self, judge): # 影像資料增強
|
||||
'''
|
||||
ImageGenerator的參數:
|
||||
featurewise_center : 布爾值。將輸入數據的均值設置為0,逐特徵進行。
|
||||
samplewise_center : 布爾值。將每個樣本的均值設置為0。
|
||||
featurewise_std_normalization : Boolean. 布爾值。將輸入除以數據標準差,逐特徵進行。
|
||||
samplewise_std_normalization : 布爾值。將每個輸入除以其標準差。
|
||||
zca_epsilon : ZCA 白化的epsilon 值,默認為1e-6。
|
||||
zca_whitening : 布爾值。是否應用ZCA 白化。
|
||||
rotation_range : 整數。隨機旋轉的度數範圍。
|
||||
width_shift_range : 浮點數、一維數組或整數
|
||||
float: 如果<1,則是除以總寬度的值,或者如果>=1,則為像素值。
|
||||
1-D 數組: 數組中的隨機元素。
|
||||
int: 來自間隔 (-width_shift_range, +width_shift_range) 之間的整數個像素。
|
||||
width_shift_range=2時,可能值是整數[-1, 0, +1],與 width_shift_range=[-1, 0, +1] 相同;而 width_shift_range=1.0 時,可能值是 [-1.0, +1.0) 之間的浮點數。
|
||||
height_shift_range : 浮點數、一維數組或整數
|
||||
float: 如果<1,則是除以總寬度的值,或者如果>=1,則為像素值。
|
||||
1-D array-like: 數組中的隨機元素。
|
||||
int: 來自間隔 (-height_shift_range, +height_shift_range) 之間的整數個像素。
|
||||
height_shift_range=2時,可能值是整數[-1, 0, +1],與 height_shift_range=[-1, 0, +1] 相同;而 height_shift_range=1.0 時,可能值是 [-1.0, +1.0) 之間的浮點數。
|
||||
shear_range : 浮點數。剪切強度(以弧度逆時針方向剪切角度)。
|
||||
zoom_range : 浮點數或[lower, upper]。隨機縮放範圍。如果是浮點數,[lower, upper] = [1-zoom_range, 1+zoom_range]。
|
||||
channel_shift_range : 浮點數。隨機通道轉換的範圍。
|
||||
fill_mode : {"constant", "nearest", "reflect" or "wrap"} 之一。默認為'nearest'。輸入邊界以外的點根據給定的模式填充:
|
||||
'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
|
||||
'nearest': aaaaaaaa|abcd|dddddddd
|
||||
'reflect': abcddcba|abcd|dcbaabcd
|
||||
'wrap': abcdabcd|abcd|abcdabcd
|
||||
cval : 浮點數或整數。用於邊界之外的點的值,當 fill_mode = "constant" 時。
|
||||
horizontal_flip : 布爾值。隨機水平翻轉。
|
||||
vertical_flip : 布爾值。隨機垂直翻轉。
|
||||
rescale : 重縮放因子。默認為None。如果是None 或0,不進行縮放,否則將數據乘以所提供的值(在應用任何其他轉換之前)。
|
||||
preprocessing_function : 應用於每個輸入的函數。這個函數會在任何其他改變之前運行。這個函數需要一個參數:一張圖像(秩為3 的Numpy 張量),並且應該輸出一個同尺寸的Numpy 張量。
|
||||
data_format : 圖像數據格式,{"channels_first", "channels_last"} 之一。"channels_last" 模式表示圖像輸入尺寸應該為(samples, height, width, channels),"channels_first" 模式表示輸入尺寸應該為(samples, channels, height, width)。默認為在Keras 配置文件 ~/.keras/keras.json 中的 image_data_format 值。如果你從未設置它,那它就是"channels_last"。
|
||||
validation_split : 浮點數。Float. 保留用於驗證的圖像的比例(嚴格在0和1之間)。
|
||||
dtype : 生成數組使用的數據類型。
|
||||
## Parameters:
|
||||
<b>featurewise_center</b> : 布爾值。將輸入數據的均值設置為0,逐特徵進行。<br/>
|
||||
<b>samplewise_center</b> : 布爾值。將每個樣本的均值設置為0。<br/>
|
||||
<b>featurewise_std_normalization</b> : Boolean. 布爾值。將輸入除以數據標準差,逐特徵進行。<br/>
|
||||
<b>samplewise_std_normalization</b> : 布爾值。將每個輸入除以其標準差。<br/>
|
||||
<b>zca_epsilon</b> : ZCA 白化的epsilon 值,默認為1e-6。<br/>
|
||||
<b>zca_whitening</b> : 布爾值。是否應用ZCA 白化。<br/>
|
||||
<b>rotation_range</b> : 整數。隨機旋轉的度數範圍。<br/>
|
||||
<b>width_shift_range</b> : 浮點數、一維數組或整數<br/>
|
||||
float: 如果<1,則是除以總寬度的值,或者如果>=1,則為像素值。
|
||||
1-D 數組: 數組中的隨機元素。
|
||||
int: 來自間隔 (-width_shift_range, +width_shift_range) 之間的整數個像素。
|
||||
width_shift_range=2時,可能值是整數[-1, 0, +1],與 width_shift_range=[-1, 0, +1] 相同;而 width_shift_range=1.0 時,可能值是 [-1.0, +1.0) 之間的浮點數。
|
||||
<b>height_shift_range</b> : 浮點數、一維數組或整數<br/>
|
||||
float: 如果<1,則是除以總寬度的值,或者如果>=1,則為像素值。
|
||||
1-D array-like: 數組中的隨機元素。
|
||||
int: 來自間隔 (-height_shift_range, +height_shift_range) 之間的整數個像素。
|
||||
height_shift_range=2時,可能值是整數[-1, 0, +1],與 height_shift_range=[-1, 0, +1] 相同;而 height_shift_range=1.0 時,可能值是 [-1.0, +1.0) 之間的浮點數。
|
||||
<b>shear_range</b> : 浮點數。剪切強度(以弧度逆時針方向剪切角度)。<br/>
|
||||
<b>zoom_range</b> : 浮點數或[lower, upper]。隨機縮放範圍。如果是浮點數,[lower, upper] = [1-zoom_range, 1+zoom_range]。<br/>
|
||||
<b>channel_shift_range</b> : 浮點數。隨機通道轉換的範圍。<br/>
|
||||
<b>fill_mode</b> : {"constant", "nearest", "reflect" or "wrap"} 之一。默認為'nearest'。輸入邊界以外的點根據給定的模式填充:<br/>
|
||||
'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
|
||||
'nearest': aaaaaaaa|abcd|dddddddd
|
||||
'reflect': abcddcba|abcd|dcbaabcd
|
||||
'wrap': abcdabcd|abcd|abcdabcd
|
||||
<b>cval</b> : 浮點數或整數。用於邊界之外的點的值,當 fill_mode = "constant" 時。<br/>
|
||||
<b>horizontal_flip</b> : 布爾值。隨機水平翻轉。<br/>
|
||||
<b>vertical_flip</b> : 布爾值。隨機垂直翻轉。<br/>
|
||||
<b>rescale</b> : 重縮放因子。默認為None。如果是None 或0,不進行縮放,否則將數據乘以所提供的值(在應用任何其他轉換之前)。<br/>
|
||||
<b>preprocessing_function</b> : 應用於每個輸入的函數。這個函數會在任何其他改變之前運行。這個函數需要一個參數:一張圖像(秩為3 的Numpy 張量),並且應該輸出一個同尺寸的Numpy 張量。<br/>
|
||||
<b>data_format</b> : 圖像數據格式,{"channels_first", "channels_last"} 之一。"channels_last" 模式表示圖像輸入尺寸應該為(samples, height, width, channels),"channels_first" 模式表示輸入尺寸應該為(samples, channels, height, width)。默認為在Keras 配置文件 ~/.keras/keras.json 中的 image_data_format 值。如果你從未設置它,那它就是"channels_last"。<br/>
|
||||
<b>validation_split</b> : 浮點數。Float. 保留用於驗證的圖像的比例(嚴格在0和1之間)。<br/>
|
||||
<b>dtype</b> : 生成數組使用的數據類型。<br/>
|
||||
'''
|
||||
if judge == 1:
|
||||
return transforms.Compose([
|
||||
@@ -159,6 +153,4 @@ class Image_generator():
|
||||
transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.2),
|
||||
transforms.RandomHorizontalFlip(),
|
||||
transforms.RandomVerticalFlip(),
|
||||
])
|
||||
else:
|
||||
return transforms.ToTensor() # 將數值歸一化到[0, 1]之間
|
||||
])
|
||||
Binary file not shown.
@@ -18,9 +18,10 @@ class Loding_Data_Root(Process_File):
|
||||
get_Image_Data = self.get_Image_data_roots(self.Train_Root)
|
||||
Get_ImageGenerator_Image_Data = self.get_Image_data_roots(self.Generator_Root)
|
||||
|
||||
Get_Total_Image_Data_Root = Merge.merge_dict_to_dict(get_Image_Data, Get_ImageGenerator_Image_Data)
|
||||
# Get_Total_Image_Data_Root = Merge.merge_dict_to_dict(get_Image_Data, Get_ImageGenerator_Image_Data)
|
||||
# Get_Total_Image_Data_Root = Merge.merge_data_main(get_Image_Data, 0, len(self.Label_List))
|
||||
|
||||
return Get_Total_Image_Data_Root
|
||||
return get_Image_Data
|
||||
|
||||
def get_Image_data_roots(self, DataRoot) -> dict:
|
||||
Prepare = Load_Data_Prepare()
|
||||
|
||||
@@ -28,8 +28,8 @@ class Load_Indepentend_Data():
|
||||
image_processing = Read_image_and_Process_image(123)
|
||||
|
||||
classify_image = []
|
||||
Total_Size_List = []
|
||||
Total_Dict_Data_Root = self.Get_Independent_data_Root(independent_DataRoot) # 讀取測試資料集的資料
|
||||
Total_Dict_Data_Root, Size = Balance_Process(Total_Dict_Data_Root, self.Labels) # 打亂並取出指定資料筆數的資料
|
||||
|
||||
Total_List_Data_Root = []
|
||||
for Label in self.Labels:
|
||||
@@ -41,16 +41,23 @@ class Load_Indepentend_Data():
|
||||
test_label = image_processing.make_label_list(len(test_title), self.OneHot_Encording[i]) # 製作對應圖片數量的label出來+
|
||||
print(self.Labels[i] + " 有 " + str(len(test_label)) + " 筆資料 ")
|
||||
|
||||
Total_Size_List.append(len(test_label))
|
||||
|
||||
classify_image.append(test_title)
|
||||
Classify_Label.append(test_label)
|
||||
i += 1
|
||||
|
||||
original_test_root = self.merge.merge_data_main(classify_image, 0)
|
||||
original_test_label = self.merge.merge_data_main(Classify_Label, 0)
|
||||
test = self.merge.merge_data_main(classify_image, 0, len(self.Labels))
|
||||
test_label = self.merge.merge_data_main(Classify_Label, 0, len(self.Labels))
|
||||
|
||||
# test = []
|
||||
# test = image_processing.Data_Augmentation_Image(original_test_root)
|
||||
# test, test_label = image_processing.image_data_processing(test, original_test_label)
|
||||
|
||||
# Balance_Data = list(zip(test, test_label))
|
||||
# test, test_label = Balance_Process(Balance_Data, Total_Size_List) # 打亂並取出指定資料筆數的資料
|
||||
# test = image_processing.normalization(test)
|
||||
|
||||
test = []
|
||||
test = image_processing.Data_Augmentation_Image(original_test_root)
|
||||
test, test_label = image_processing.image_data_processing(test, original_test_label)
|
||||
|
||||
return test, test_label
|
||||
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -44,7 +44,7 @@ class Read_image_and_Process_image:
|
||||
img = img / 255 # 標準化影像資料
|
||||
imgs.append(img)
|
||||
|
||||
return np.array(imgs)
|
||||
return torch.as_tensor(imgs)
|
||||
|
||||
# def load_numpy_data(self, file_names):
|
||||
# '''載入numpy圖檔,並執行影像處理提高特徵擷取'''
|
||||
|
||||
Binary file not shown.
@@ -1,60 +1,117 @@
|
||||
from torch.utils.data import Dataset, DataLoader, RandomSampler, WeightedRandomSampler
|
||||
from torch.utils.data import Dataset, DataLoader, RandomSampler, WeightedRandomSampler, SubsetRandomSampler, Subset
|
||||
from torchvision.datasets import ImageFolder
|
||||
import torchvision.transforms as transforms
|
||||
from PIL import Image
|
||||
import torch
|
||||
import numpy as np
|
||||
import cv2
|
||||
|
||||
class ListDataset(Dataset):
|
||||
def __init__(self, data_list, labels_list, status):
|
||||
def __init__(self, data_list, labels_list, transform):
|
||||
self.data = data_list
|
||||
self.labels = labels_list
|
||||
self.status = status
|
||||
self.transform = transform
|
||||
|
||||
def __len__(self):
|
||||
return len(self.data)
|
||||
|
||||
def __getitem__(self, idx):
|
||||
sample = self.data[idx]
|
||||
Image_Root = self.data[idx]
|
||||
try:
|
||||
with open(Image_Root, 'rb') as file:
|
||||
Images = Image.open(file).convert("RGB")
|
||||
# Image = cv2.imread(Image_Root, cv2.IMREAD_COLOR) # 讀檔(彩色)
|
||||
# Image = cv2.cvtColor(Image, cv2.COLOR_BGR2RGB)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
if self.status:
|
||||
from Image_Process.Image_Generator import Image_generator
|
||||
ImageGenerator = Image_generator("", "", 12)
|
||||
Transform = ImageGenerator.Generator_Content(5)
|
||||
sample = Transform(sample)
|
||||
if self.transform is not "Generator":
|
||||
Images = self.transform(Images)
|
||||
|
||||
Images = torch.tensor(np.array(Images))
|
||||
label = self.labels[idx]
|
||||
return sample, label
|
||||
|
||||
# print(f"Dataset_Data: \n{sample}\n")
|
||||
return Images, label
|
||||
|
||||
class Training_Precesses:
|
||||
def __init__(self, Training_Datas, Training_Labels, Testing_Datas, Testing_Labels):
|
||||
self.Training_Datas = Training_Datas
|
||||
self.Training_Labels = Training_Labels
|
||||
self.Testing_Datas = Testing_Datas
|
||||
self.Testing_Labels = Testing_Labels
|
||||
|
||||
seed = 42 # 設定任意整數作為種子
|
||||
# 產生隨機種子產生器
|
||||
def __init__(self, ImageSize):
|
||||
seed = 42 # Set an arbitrary integer as the seed
|
||||
self.ImageSize = ImageSize
|
||||
self.generator = torch.Generator()
|
||||
self.generator.manual_seed(seed)
|
||||
|
||||
pass
|
||||
def Dataloader_Sampler(self, SubDataSet, Batch_Size, Sampler=True):
|
||||
if Sampler:
|
||||
# Data_Loader = DataLoader(
|
||||
# dataset=SubDataSet,
|
||||
# batch_size=Batch_Size,
|
||||
# num_workers=0,
|
||||
# pin_memory=True,
|
||||
# sampler=self.Setting_RandomSampler_Content(SubDataSet)
|
||||
# )
|
||||
Data_Loader = DataLoader(
|
||||
dataset=SubDataSet,
|
||||
batch_size=Batch_Size,
|
||||
num_workers=0,
|
||||
pin_memory=True,
|
||||
sampler=self.Setting_RandomSampler_Content(SubDataSet)
|
||||
)
|
||||
else:
|
||||
Data_Loader = DataLoader(
|
||||
dataset=SubDataSet,
|
||||
batch_size=Batch_Size,
|
||||
num_workers=0,
|
||||
pin_memory=True,
|
||||
shuffle=True
|
||||
)
|
||||
return Data_Loader
|
||||
|
||||
def Total_Data_Combine_To_DataLoader(self, Batch_Size):
|
||||
Training_Dataset = self.Convert_Data_To_DataSet(self.Training_Datas, self.Training_Labels)
|
||||
Testing_Dataset = self.Convert_Data_To_DataSet(self.Testing_Datas, self.Testing_Labels)
|
||||
def Setting_WeightedRandomSampler_Content(self, SubDataSet):
|
||||
# Check if SubDataSet is a Subset or a full dataset
|
||||
if isinstance(SubDataSet, Subset):
|
||||
# Get the underlying dataset and subset indices
|
||||
base_dataset = SubDataSet.dataset
|
||||
subset_indices = SubDataSet.indices
|
||||
# Extract labels for the subset
|
||||
labels = [base_dataset.labels[i] for i in subset_indices]
|
||||
else:
|
||||
# Assume SubDataSet is a ListDataset or similar
|
||||
labels = SubDataSet.labels
|
||||
|
||||
Training_DataLoader = DataLoader(dataset = Training_Dataset, batch_size = Batch_Size, num_workers = 0, pin_memory=True, shuffle = True)
|
||||
Testing_DataLoader = DataLoader(dataset = Testing_Dataset, batch_size = 1, num_workers = 0, pin_memory=True, shuffle = True)
|
||||
# Convert labels to class indices if they are one-hot encoded
|
||||
labels = np.array(labels)
|
||||
if labels.ndim > 1: # If one-hot encoded
|
||||
labels = np.argmax(labels, axis=1)
|
||||
|
||||
# Count occurrences of each class
|
||||
class_counts = np.bincount(labels)
|
||||
class_weights = 1.0 / class_counts # Inverse frequency as weight
|
||||
sample_weights = class_weights[labels] # Assign weight to each sample
|
||||
|
||||
return Training_DataLoader, Testing_DataLoader
|
||||
return WeightedRandomSampler(
|
||||
weights=sample_weights,
|
||||
num_samples=len(sample_weights),
|
||||
replacement=True
|
||||
)
|
||||
|
||||
def Combine_Signal_Dataset_To_DataLoader(self, datas : list, Labels : list, Batch_Size, status : bool = True):
|
||||
dataset = self.Convert_Data_To_DataSet(datas, Labels, status)
|
||||
sampler = RandomSampler(dataset, generator = self.generator) # 創建Sampler
|
||||
Dataloader = DataLoader(dataset = dataset, batch_size = Batch_Size, num_workers = 0, pin_memory=True, sampler = sampler)
|
||||
return Dataloader
|
||||
def Setting_RandomSampler_Content(self, Dataset):
|
||||
return RandomSampler(Dataset, generator = self.generator)
|
||||
|
||||
def Convert_Data_To_DataSet(self, Datas : list, Labels : list, status : bool = True):
|
||||
# 創建 Dataset
|
||||
list_dataset = ListDataset(Datas, Labels, status)
|
||||
def Setting_DataSet(self, Datas, Labels, transform = None):
|
||||
# 資料預處理
|
||||
if transform == None:
|
||||
transform = transforms.Compose([
|
||||
transforms.Resize((256, 256))
|
||||
])
|
||||
elif transform == "Transform":
|
||||
transform = transforms.Compose([
|
||||
transforms.Resize((256, 256)),
|
||||
transforms.ToTensor()
|
||||
])
|
||||
elif transform == "Generator":
|
||||
transform = "Generator"
|
||||
|
||||
|
||||
return list_dataset
|
||||
# Create Dataset
|
||||
list_dataset = ListDataset(Datas, Labels , transform)
|
||||
return list_dataset
|
||||
@@ -23,17 +23,18 @@ class Tool:
|
||||
|
||||
def Set_Labels(self):
|
||||
self.__Labels = ["stomach_cancer_Crop", "Normal_Crop", "Have_Question_Crop"]
|
||||
# self.__Labels = ["NPC_negative", "NPC_positive"]
|
||||
|
||||
def Set_Save_Roots(self):
|
||||
self.__ICG_Training_Root = "../Dataset/Training/CA_ICG"
|
||||
self.__ICG_Training_Root = "../Dataset/Training"
|
||||
self.__Normal_Training_Root = "../Dataset/Training/CA"
|
||||
self.__Comprehensive_Training_Root = "../Dataset/Training/Mixed"
|
||||
|
||||
self.__ICG_Test_Data_Root = "../Dataset/Training/CA_ICG_TestData"
|
||||
self.__ICG_Test_Data_Root = "../Dataset/Testing"
|
||||
self.__Normal_Test_Data_Root = "../Dataset/Training/Normal_TestData"
|
||||
self.__Comprehensive_Testing_Root = "../Dataset/Training/Comprehensive_TestData"
|
||||
|
||||
self.__ICG_ImageGenerator_Data_Root = "../Dataset/Training/ICG_ImageGenerator"
|
||||
self.__ICG_ImageGenerator_Data_Root = "../Dataset/ImageGenerator"
|
||||
self.__Normal_ImageGenerator_Data_Root = "../Dataset/Training/Normal_ImageGenerator"
|
||||
self.__Comprehensive_Generator_Root = "../Dataset/Training/Comprehensive_ImageGenerator"
|
||||
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -61,7 +61,6 @@ def call_back(model_name, index, optimizer):
|
||||
factor = 0.94, # 學習率降低的量。 new_lr = lr * factor
|
||||
patience = 2, # 沒有改進的時期數,之後學習率將降低
|
||||
verbose = 0,
|
||||
mode = 'min',
|
||||
min_lr = 0 # 學習率下限
|
||||
)
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@ import matplotlib.pyplot as plt
|
||||
import datetime
|
||||
from Load_process.file_processing import Process_File
|
||||
|
||||
# Grad-CAM implementation
|
||||
class GradCAM:
|
||||
def __init__(self, model, target_layer):
|
||||
self.model = model
|
||||
@@ -16,74 +15,90 @@ class GradCAM:
|
||||
self.activations = None
|
||||
self.gradients = None
|
||||
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||||
self.model.to(self.device) # Ensure model is on the correct device
|
||||
|
||||
# Register hooks
|
||||
self.target_layer.register_forward_hook(self.save_activations)
|
||||
self.target_layer.register_backward_hook(self.save_gradients)
|
||||
|
||||
def Processing_Main(self, Test_Dataloader, File_Path):
|
||||
i = 0
|
||||
path = File_Path
|
||||
File = Process_File()
|
||||
for images, labels in Test_Dataloader:
|
||||
labels = torch.as_tensor(labels, dtype=torch.float32).to(self.device)
|
||||
Generate_Image = self.generate(torch.as_tensor(images,dtype=torch.float32).to(self.device))
|
||||
|
||||
path = File_Path
|
||||
path += str(np.argmax(labels.cpu().numpy(), 1)[0])
|
||||
File.JudgeRoot_MakeDir(path)
|
||||
|
||||
for Image_Batch in images:
|
||||
File.Save_CV2_File(f"{str(i)}.png", path, self.overlay_heatmap(Generate_Image, Image_Batch))
|
||||
i += 1
|
||||
for batch_idx, (images, labels) in enumerate(Test_Dataloader):
|
||||
# Move data to device
|
||||
images = images.to(self.device, dtype=torch.float32) # [64, C, H, W]
|
||||
labels = labels.to(self.device, dtype=torch.float32) # [64, num_classes]
|
||||
|
||||
pass
|
||||
# Get ground-truth class indices
|
||||
label_classes = torch.argmax(labels, dim=1).cpu().numpy() # [64]
|
||||
|
||||
# Generate Grad-CAM heatmaps for the entire batch
|
||||
heatmaps = self.generate(images, label_classes)
|
||||
|
||||
# Process each image in the batch
|
||||
for i in range(images.size(0)): # Loop over batch size (64)
|
||||
class_idx = label_classes[i]
|
||||
heatmap = heatmaps[i] # Extract heatmap for this image
|
||||
overlaid_image = self.overlay_heatmap(heatmap, images[i])
|
||||
|
||||
# Create file path based on class
|
||||
path = f"{File_Path}/class_{class_idx}"
|
||||
File.JudgeRoot_MakeDir(path)
|
||||
File.Save_CV2_File(f"batch_{batch_idx}_img_{i}.png", path, overlaid_image)
|
||||
|
||||
def save_activations(self, module, input, output):
|
||||
self.activations = output.detach()
|
||||
self.activations = output.detach() # [64, C, H', W']
|
||||
|
||||
def save_gradients(self, module, grad_input, grad_output):
|
||||
self.gradients = grad_output[0].detach()
|
||||
self.gradients = grad_output[0].detach() # [64, C, H', W']
|
||||
|
||||
def generate(self, input_image, class_idx=None):
|
||||
def generate(self, input_images, class_indices=None):
|
||||
self.model.eval()
|
||||
input_image.requires_grad = True
|
||||
input_images.requires_grad = True # [64, C, H, W]
|
||||
|
||||
# Forward pass
|
||||
output = self.model(input_image)
|
||||
|
||||
if class_idx is None:
|
||||
class_idx = torch.argmax(output, dim=1).item() # Use predicted class if not specified
|
||||
outputs = self.model(input_images) # [64, num_classes]
|
||||
|
||||
if class_indices is None:
|
||||
class_indices = torch.argmax(outputs, dim=1).cpu().numpy() # [64]
|
||||
|
||||
# Zero gradients
|
||||
self.model.zero_grad()
|
||||
|
||||
# Backward pass for the specific class
|
||||
output[0, class_idx].backward()
|
||||
# Backward pass for each image in the batch
|
||||
heatmaps = []
|
||||
for i in range(input_images.size(0)):
|
||||
self.model.zero_grad()
|
||||
outputs[i, class_indices[i]].backward(retain_graph=True) # Backward for specific image/class
|
||||
heatmap = self._compute_heatmap()
|
||||
heatmaps.append(heatmap)
|
||||
|
||||
return np.stack(heatmaps) # [64, H', W']
|
||||
|
||||
def _compute_heatmap(self):
|
||||
# Get gradients and activations
|
||||
gradients = self.gradients # [B, C, H, W]
|
||||
activations = self.activations # [B, C, H, W]
|
||||
gradients = self.gradients # [64, C, H', W']
|
||||
activations = self.activations # [64, C, H', W']
|
||||
|
||||
# Compute weights (global average pooling of gradients)
|
||||
weights = torch.mean(gradients, dim=[2, 3], keepdim=True) # [B, C, 1, 1]
|
||||
weights = torch.mean(gradients, dim=[2, 3], keepdim=True) # [64, C, 1, 1]
|
||||
|
||||
# Compute Grad-CAM heatmap
|
||||
grad_cam = torch.sum(weights * activations, dim=1).squeeze() # [H, W]
|
||||
# Compute Grad-CAM heatmap for one image (after single backward)
|
||||
grad_cam = torch.sum(weights * activations, dim=1)[0] # [64, H', W'] -> [H', W']
|
||||
grad_cam = F.relu(grad_cam) # Apply ReLU
|
||||
grad_cam = grad_cam / (grad_cam.max() + 1e-8) # Normalize to [0, 1]
|
||||
|
||||
return grad_cam.cpu().numpy()
|
||||
|
||||
# Utility to overlay heatmap on original image
|
||||
|
||||
def overlay_heatmap(self, heatmap, image, alpha=0.5):
|
||||
heatmap = np.uint8(255 * heatmap) # Scale to 0-255
|
||||
# Resize heatmap to match input image spatial dimensions
|
||||
heatmap = np.uint8(255 * heatmap) # Scale to [0, 255]
|
||||
heatmap = Image.fromarray(heatmap).resize((image.shape[1], image.shape[2]), Image.BILINEAR)
|
||||
heatmap = np.array(heatmap)
|
||||
heatmap = plt.cm.jet(heatmap)[:, :, :3] # Apply colormap (e.g., jet)
|
||||
heatmap = plt.cm.jet(heatmap)[:, :, :3] # Apply colormap (jet)
|
||||
|
||||
image = torch.as_tensor(image, dtype=torch.float32).permute(2, 1, 0)
|
||||
|
||||
overlay = (alpha * heatmap + (1 - alpha) * np.array(image) / 255.0)
|
||||
# Convert image tensor to numpy and denormalize (assuming ImageNet stats)
|
||||
image_np = image.detach().cpu().permute(1, 2, 0).numpy() # [H, W, C]
|
||||
|
||||
# Overlay
|
||||
overlay = alpha * heatmap + (1 - alpha) * image_np / 255.0
|
||||
overlay = np.clip(overlay, 0, 1) * 255
|
||||
return overlay
|
||||
return overlay.astype(np.uint8) # Return uint8 for cv2
|
||||
Binary file not shown.
@@ -1,6 +1,7 @@
|
||||
from tqdm import tqdm
|
||||
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
|
||||
from torchmetrics.functional import auroc
|
||||
from torch.nn import functional
|
||||
|
||||
from all_models_tools.all_model_tools import call_back
|
||||
from Model_Loss.Loss import Entropy_Loss
|
||||
@@ -26,7 +27,7 @@ class All_Step:
|
||||
self.Model_Name = Model_Name
|
||||
self.Experiment_Name = Experiment_Name
|
||||
|
||||
def Training_Step(self, train_subset, train_loader, val_loader, model_name, fold, TargetLayer):
|
||||
def Training_Step(self, train_subset, val_subset, train_loader, val_loader, model_name, fold, TargetLayer):
|
||||
# Reinitialize model and optimizer for each fold
|
||||
# self.Model = self.Model.__class__(self.Number_Of_Classes).to(self.device) # Reinitialize model
|
||||
Optimizer = optim.SGD(self.Model.parameters(), lr=0.045, momentum=0.9, weight_decay=0.01)
|
||||
@@ -53,6 +54,7 @@ class All_Step:
|
||||
# Calculate epoch start time
|
||||
start_time = time.time()
|
||||
total_samples = len(train_subset) # Total samples in subset, not DataLoader
|
||||
total_Validation_samples = len(val_subset)
|
||||
|
||||
# Progress bar for training batches
|
||||
epoch_iterator = tqdm(train_loader, desc=f"Fold {fold + 1}/5, Epoch [{epoch + 1}/{self.Epoch}]")
|
||||
@@ -83,7 +85,7 @@ class All_Step:
|
||||
eta = (total_samples - processed_samples) / iterations_per_second if iterations_per_second > 0 else 0
|
||||
time_str = f"{int(elapsed_time//60):02d}:{int(elapsed_time%60):02d}<{int(eta//60):02d}:{int(eta%60):02d}"
|
||||
|
||||
# Calculate batch accuracy
|
||||
# Calculate batch accuracy(正確label數量 / 該batch總共的label數量)
|
||||
batch_accuracy = (Output_Indexs.cpu().numpy() == True_Indexs).mean()
|
||||
|
||||
# Update progress bar
|
||||
@@ -93,7 +95,6 @@ class All_Step:
|
||||
)
|
||||
|
||||
epoch_iterator.close()
|
||||
|
||||
# Merge predictions and labels
|
||||
all_train_preds = Merge_Function.merge_data_main(all_train_preds, 0, len(all_train_preds))
|
||||
all_train_labels = Merge_Function.merge_data_main(all_train_labels, 0, len(all_train_labels))
|
||||
@@ -110,8 +111,10 @@ class All_Step:
|
||||
all_val_preds = []
|
||||
all_val_labels = []
|
||||
|
||||
start_Validation_time = time.time()
|
||||
epoch_iterator = tqdm(val_loader, desc=f"\tValidation-Fold {fold + 1}/5, Epoch [{epoch + 1}/{self.Epoch}]")
|
||||
with torch.no_grad():
|
||||
for inputs, labels in val_loader:
|
||||
for inputs, labels in epoch_iterator:
|
||||
inputs, labels = inputs.to(self.device), labels.to(self.device)
|
||||
outputs = self.Model(inputs)
|
||||
loss = criterion(outputs, labels)
|
||||
@@ -124,6 +127,27 @@ class All_Step:
|
||||
all_val_preds.append(Output_Indexs.cpu().numpy())
|
||||
all_val_labels.append(True_Indexs)
|
||||
|
||||
processed_samples += inputs.size(0) # Use size(0) for batch size
|
||||
|
||||
# Calculate progress and timing
|
||||
progress = (processed_samples / total_Validation_samples) * 100
|
||||
elapsed_time = time.time() - start_Validation_time
|
||||
iterations_per_second = processed_samples / elapsed_time if elapsed_time > 0 else 0
|
||||
eta = (total_Validation_samples - processed_samples) / iterations_per_second if iterations_per_second > 0 else 0
|
||||
time_str = f"{int(elapsed_time//60):02d}:{int(elapsed_time%60):02d}<{int(eta//60):02d}:{int(eta%60):02d}"
|
||||
|
||||
# Calculate batch accuracy
|
||||
batch_accuracy = (Output_Indexs.cpu().numpy() == True_Indexs).mean()
|
||||
|
||||
# Update progress bar
|
||||
epoch_iterator.set_postfix_str(
|
||||
f"{processed_samples}/{total_Validation_samples} [{time_str}, {iterations_per_second:.2f}it/s, "
|
||||
f"acc={batch_accuracy:.3f}, loss={loss.item():.3f}]"
|
||||
)
|
||||
|
||||
epoch_iterator.close()
|
||||
print("\n")
|
||||
|
||||
# Merge predictions and labels
|
||||
all_val_preds = Merge_Function.merge_data_main(all_val_preds, 0, len(all_val_preds))
|
||||
all_val_labels = Merge_Function.merge_data_main(all_val_labels, 0, len(all_val_labels))
|
||||
@@ -134,8 +158,11 @@ class All_Step:
|
||||
val_losses.append(val_loss)
|
||||
val_accuracies.append(val_accuracy)
|
||||
|
||||
Grad = GradCAM(self.Model, TargetLayer)
|
||||
Grad.Processing_Main(val_loader, f"../Result/GradCAM_Image/Validation/GradCAM_Image({str(datetime.date.today())})/fold-{str(fold)}/")
|
||||
print(f"Traini Loss: {Training_Loss:.4f}, Accuracy: {train_accuracy:0.2f}, Validation Loss: {val_loss:.4f}, Accuracy: {val_accuracy:0.2f}\n")
|
||||
|
||||
if epoch % 10 == 0:
|
||||
Grad = GradCAM(self.Model, TargetLayer)
|
||||
Grad.Processing_Main(val_loader, f"../Result/GradCAM_Image/Validation/GradCAM_Image({str(datetime.date.today())})/fold-{str(fold)}/")
|
||||
|
||||
# Early stopping
|
||||
early_stopping(val_loss, self.Model, model_path)
|
||||
@@ -147,7 +174,7 @@ class All_Step:
|
||||
scheduler.step(val_loss)
|
||||
|
||||
Total_Epoch = epoch + 1
|
||||
return self.Model, train_losses, val_losses, train_accuracies, val_accuracies, Total_Epoch
|
||||
return self.Model, model_path, train_losses, val_losses, train_accuracies, val_accuracies, Total_Epoch
|
||||
|
||||
def Evaluate_Model(self, cnn_model, Test_Dataloader):
|
||||
# (Unchanged Evaluate_Model method)
|
||||
@@ -166,7 +193,7 @@ class All_Step:
|
||||
True_Label.append(Output_Indexs.cpu().numpy())
|
||||
Predict_Label.append(True_Indexs)
|
||||
|
||||
Predict_Label_OneHot.append(torch.tensor(outputs, dtype=torch.float32).cpu().numpy()[0])
|
||||
Predict_Label_OneHot.append(torch.tensor(functional.one_hot(Output_Indexs, self.Number_Of_Classes), dtype=torch.float32).cpu().numpy()[0])
|
||||
True_Label_OneHot.append(torch.tensor(labels, dtype=torch.int).cpu().numpy()[0])
|
||||
|
||||
loss /= len(Test_Dataloader)
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -2,7 +2,7 @@ from torchinfo import summary
|
||||
from sklearn.model_selection import KFold
|
||||
from sklearn.metrics import confusion_matrix
|
||||
|
||||
from Training_Tools.PreProcess import Training_Precesses, ListDataset
|
||||
from Training_Tools.PreProcess import Training_Precesses
|
||||
from experiments.pytorch_Model import ModifiedXception
|
||||
from experiments.Model_All_Step import All_Step
|
||||
from Load_process.Load_Indepentend import Load_Indepentend_Data
|
||||
@@ -10,6 +10,7 @@ from _validation.ValidationTheEnterData import validation_the_enter_data
|
||||
from Load_process.file_processing import Process_File
|
||||
from draw_tools.Grad_cam import GradCAM
|
||||
from draw_tools.draw import plot_history, draw_heatmap
|
||||
from Calculate_Process.Calculate import Calculate
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
@@ -66,8 +67,11 @@ class experiments():
|
||||
|
||||
pass
|
||||
|
||||
def processing_main(self, Training_Data, Training_Label, counter):
|
||||
Train, Test = self.Topic_Tool.Get_Save_Roots(self.Status) # 要換不同資料集就要改
|
||||
def processing_main(self, Training_Data, Training_Label):
|
||||
Train, Test = self.Topic_Tool.Get_Save_Roots(self.Status)
|
||||
Calculate_Process = Calculate()
|
||||
|
||||
print(f"Training Data Content: {Training_Data[0]}")
|
||||
|
||||
start = time.time()
|
||||
self.cut_image.process_main(Test) # 呼叫處理test Data與Validation Data的function
|
||||
@@ -76,10 +80,17 @@ class experiments():
|
||||
|
||||
# 將處理好的test Data 與 Validation Data 丟給這個物件的變數
|
||||
self.test, self.test_label = self.cut_image.test, self.cut_image.test_label
|
||||
# self.test = self.test.permute(0, 3, 1, 2)
|
||||
# Training_Data = Training_Data.permute(0, 3, 1, 2)
|
||||
|
||||
PreProcess = Training_Precesses(Training_Data, Training_Label, self.test, self.test_label)
|
||||
PreProcess = Training_Precesses(self.Image_Size)
|
||||
File = Process_File()
|
||||
self.Training_DataLoader, self.Test_Dataloader = PreProcess.Total_Data_Combine_To_DataLoader(self.train_batch_size)
|
||||
|
||||
# print(f"Dataset_Data: \n{self.test}\nLabel: \n{self.test_label}\n")
|
||||
Testing_Dataset = PreProcess.Setting_DataSet(self.test, self.test_label, "Transform")
|
||||
self.Test_Dataloader = PreProcess.Dataloader_Sampler(Testing_Dataset, 1, False)
|
||||
# for images, labels in self.Test_Dataloader:
|
||||
# print(images.shape)
|
||||
|
||||
# Lists to store metrics across all folds
|
||||
all_fold_train_losses = []
|
||||
@@ -90,8 +101,7 @@ class experiments():
|
||||
# Define K-fold cross-validator
|
||||
K_Fold = KFold(n_splits = 5, shuffle = True, random_state = 42)
|
||||
# Get the underlying dataset from PreProcess_Classes_Data
|
||||
training_dataset = ListDataset(data_list = PreProcess.Training_Datas, labels_list = PreProcess.Training_Labels, status = True)
|
||||
|
||||
training_dataset = PreProcess.Setting_DataSet(Training_Data, Training_Label, "Transform")
|
||||
# K-Fold loop
|
||||
for fold, (train_idx, val_idx) in enumerate(K_Fold.split(training_dataset)):
|
||||
|
||||
@@ -104,19 +114,20 @@ class experiments():
|
||||
Grad = GradCAM(cnn_model, TargetLayer)
|
||||
|
||||
step = All_Step(cnn_model, self.epoch, self.Number_Of_Classes, self.model_name, self.experiment_name)
|
||||
print("\n\n\n讀取訓練資料(70000)執行時間:%f 秒\n\n" % (end - start))
|
||||
print("\n\n\n讀取訓練資料執行時間:%f 秒\n\n" % (end - start))
|
||||
print(f"\nStarting Fold {fold + 1}/5")
|
||||
|
||||
# Create training and validation subsets for this fold
|
||||
train_subset = torch.utils.data.Subset(training_dataset, train_idx)
|
||||
val_subset = torch.utils.data.Subset(training_dataset, val_idx)
|
||||
|
||||
# Wrap subsets in DataLoaders (use same batch size as original)
|
||||
batch_size = self.Training_DataLoader.batch_size
|
||||
train_loader = torch.utils.data.DataLoader(train_subset, batch_size=batch_size, shuffle=True)
|
||||
val_loader = torch.utils.data.DataLoader(val_subset, batch_size=batch_size, shuffle=False)
|
||||
# print(f"Dataset_Data: \n{train_subset.dataset.data}\nLabel: \n{train_subset.dataset.labels}\n")
|
||||
|
||||
cnn_model, train_losses, val_losses, train_accuracies, val_accuracies, Total_Epoch = step.Training_Step(train_subset, train_loader, val_loader, self.model_name, fold, TargetLayer)
|
||||
# Wrap subsets in DataLoaders (use same batch size as original)
|
||||
train_loader = PreProcess.Dataloader_Sampler(train_subset , self.train_batch_size, False)
|
||||
val_loader = PreProcess.Dataloader_Sampler(val_subset, self.train_batch_size, False)
|
||||
|
||||
cnn_model, model_path, train_losses, val_losses, train_accuracies, val_accuracies, Total_Epoch = step.Training_Step(train_subset, val_subset, train_loader, val_loader, self.model_name, fold, TargetLayer)
|
||||
|
||||
# Store fold results
|
||||
all_fold_train_losses.append(train_losses)
|
||||
@@ -128,11 +139,14 @@ class experiments():
|
||||
Accuracies = [train_accuracies, val_accuracies]
|
||||
plot_history(Total_Epoch, Losses, Accuracies, "train" + str(fold), self.experiment_name) # 將訓練結果化成圖,並將化出來的圖丟出去儲存
|
||||
|
||||
cnn_model.load_state_dict(torch.load(model_path))
|
||||
True_Label, Predict_Label, loss, accuracy, precision, recall, AUC, f1 = step.Evaluate_Model(cnn_model, self.Test_Dataloader)
|
||||
Grad.Processing_Main(self.Test_Dataloader, f"../Result/GradCAM_Image/Testing/GradCAM_Image({str(datetime.date.today())})/fold-{str(fold)}/")
|
||||
|
||||
Grad.Processing_Main(self.Test_Dataloader, f"../Result/GradCAM_Image/Testing/GradCAM_Image({str(datetime.date.today())})/fold-{str(fold)}")
|
||||
Calculate_Process.Append_numbers(loss, accuracy, precision, recall, AUC, f1)
|
||||
|
||||
Matrix = self.record_matrix_image(True_Label, Predict_Label, self.model_name, counter)
|
||||
print(self.record_everyTime_test_result(loss, accuracy, precision, recall, AUC, f1, counter, self.experiment_name, Matrix)) # 紀錄當前訓練完之後的預測結果,並輸出成csv檔
|
||||
self.record_matrix_image(True_Label, Predict_Label, self.experiment_name, fold)
|
||||
print(self.record_everyTime_test_result(loss, accuracy, precision, recall, AUC, f1, fold, self.experiment_name)) # 紀錄當前訓練完之後的預測結果,並輸出成csv檔
|
||||
|
||||
# Aggregate results across folds
|
||||
avg_train_losses = np.mean([losses[-1] for losses in all_fold_train_losses])
|
||||
@@ -144,6 +158,11 @@ class experiments():
|
||||
print(f"Avg Train Loss: {avg_train_losses:.4f}, Avg Val Loss: {avg_val_losses:.4f}")
|
||||
print(f"Avg Train Acc: {avg_train_accuracies:.4f}, Avg Val Acc: {avg_val_accuracies:.4f}")
|
||||
|
||||
Calculate_Process.Calculate_Mean()
|
||||
Calculate_Process.Calculate_Std()
|
||||
|
||||
print(Calculate_Process.Output_Style())
|
||||
|
||||
File.Save_TXT_File(content = f"\nCross-Validation Results:\nAvg Train Loss: {avg_train_losses:.4f}, Avg Val Loss: {avg_val_losses:.4f}\nAvg Train Acc: {avg_train_accuracies:.4f}, Avg Val Acc: {avg_val_accuracies:.4f}\n", File_Name = "Training_Average_Result")
|
||||
|
||||
pass
|
||||
@@ -163,10 +182,8 @@ class experiments():
|
||||
# 計算混淆矩陣
|
||||
matrix = confusion_matrix(True_Labels, Predict_Labels)
|
||||
draw_heatmap(matrix, model_name, index) # 呼叫畫出confusion matrix的function
|
||||
|
||||
return matrix
|
||||
|
||||
def record_everyTime_test_result(self, loss, accuracy, precision, recall, auc, f, indexs, model_name, Matrix):
|
||||
def record_everyTime_test_result(self, loss, accuracy, precision, recall, auc, f, indexs, model_name):
|
||||
'''記錄我單次的訓練結果並將它輸出到檔案中'''
|
||||
File = Process_File()
|
||||
|
||||
|
||||
@@ -2,40 +2,66 @@ import torch.nn as nn
|
||||
import timm
|
||||
|
||||
|
||||
# class ModifiedXception(nn.Module):
|
||||
# def __init__(self, num_classes):
|
||||
# super(ModifiedXception, self).__init__()
|
||||
|
||||
# # Load Xception pre-trained model (full model, not just features)
|
||||
# self.base_model = timm.create_model(
|
||||
# 'xception',
|
||||
# pretrained=True
|
||||
# )
|
||||
|
||||
# # Replace the default global pooling with AdaptiveAvgPool2d
|
||||
# self.base_model.global_pool = nn.AdaptiveAvgPool2d(output_size=1) # Output size of 1x1 spatially
|
||||
|
||||
# # Replace the final fully connected layer with Identity to get features
|
||||
# self.base_model.fc = nn.Identity() # Output will be 2048 (Xception's default feature size)
|
||||
|
||||
# # Custom head: Linear from 2048 to 1370, additional 1370 layer, then to num_classes
|
||||
# self.custom_head = nn.Sequential(
|
||||
# nn.Linear(2048, 1025), # From Xception’s 2048 features to 1370
|
||||
# nn.ReLU(), # Activation
|
||||
# nn.Dropout(0.6), # Dropout for regularization
|
||||
# nn.Linear(1025, num_classes) # Final output layer
|
||||
# # nn.Softmax(dim = 1) # Sigmoid for binary/multi-label classification
|
||||
# )
|
||||
|
||||
# def forward(self, x):
|
||||
# # Pass through the base Xception model (up to global pooling)
|
||||
# x = self.base_model.forward_features(x) # Get feature maps
|
||||
# x = self.base_model.global_pool(x) # Apply AdaptiveAvgPool2d (output: [B, 2048, 1, 1])
|
||||
# x = x.flatten(1) # Flatten to [B, 2048]
|
||||
# # x = self.base_model.fc(x) # Identity layer (still [B, 2048])
|
||||
# output = self.custom_head(x) # Custom head processing
|
||||
# return output
|
||||
|
||||
class ModifiedXception(nn.Module):
|
||||
def __init__(self, num_classes):
|
||||
super(ModifiedXception, self).__init__()
|
||||
|
||||
# Load Xception pre-trained model (full model, not just features)
|
||||
self.base_model = timm.create_model(
|
||||
'xception',
|
||||
pretrained=True,
|
||||
drop_rate=0.0, # Optional: adjust dropout if needed
|
||||
)
|
||||
# 加載 Xception 預訓練模型,去掉最後一層 (fc 層)
|
||||
self.base_model = timm.create_model('xception', pretrained=True)
|
||||
self.base_model.fc = nn.Identity() # 移除原來的 fully connected 層
|
||||
|
||||
# Replace the default global pooling with AdaptiveAvgPool2d
|
||||
self.base_model.global_pool = nn.AdaptiveAvgPool2d(output_size=1) # Output size of 1x1 spatially
|
||||
# 新增全局平均池化層、隱藏層和輸出層
|
||||
GAP_Output = 2048
|
||||
self.global_avg_pool = nn.AdaptiveAvgPool1d(2048) # 全局平均池化
|
||||
self.hidden_layer = nn.Linear(2048, 1025) # 隱藏層,輸入大小取決於 Xception 的輸出大小
|
||||
self.output_layer = nn.Linear(1025, num_classes) # 輸出層,依據分類數目設定
|
||||
|
||||
# Replace the final fully connected layer with Identity to get features
|
||||
self.base_model.fc = nn.Identity() # Output will be 2048 (Xception's default feature size)
|
||||
|
||||
# Custom head: Linear from 2048 to 1370, additional 1370 layer, then to num_classes
|
||||
self.custom_head = nn.Sequential(
|
||||
nn.Linear(2048, 1025), # From Xception’s 2048 features to 1370
|
||||
nn.ReLU(), # Activation
|
||||
nn.Dropout(0.6), # Dropout for regularization
|
||||
nn.Linear(1025, num_classes), # Final output layer
|
||||
nn.Sigmoid() # Sigmoid for binary/multi-label classification
|
||||
)
|
||||
# 激活函數與 dropout
|
||||
self.relu = nn.ReLU()
|
||||
self.softmax = nn.Softmax(1)
|
||||
self.dropout = nn.Dropout(0.6)
|
||||
|
||||
def forward(self, x):
|
||||
# Pass through the base Xception model (up to global pooling)
|
||||
x = self.base_model.forward_features(x) # Get feature maps
|
||||
x = self.base_model.global_pool(x) # Apply AdaptiveAvgPool2d (output: [B, 2048, 1, 1])
|
||||
x = x.flatten(1) # Flatten to [B, 2048]
|
||||
x = self.base_model.fc(x) # Identity layer (still [B, 2048])
|
||||
output = self.custom_head(x) # Custom head processing
|
||||
return output
|
||||
x = self.base_model(x) # Xception 主體
|
||||
x = self.global_avg_pool(x) # 全局平均池化
|
||||
x = self.relu(self.hidden_layer(x)) # 隱藏層 + ReLU
|
||||
x = self.dropout(x) # Dropout
|
||||
x = self.output_layer(x) # 輸出層
|
||||
return x
|
||||
|
||||
class Model_module():
|
||||
def __init__(self):
|
||||
|
||||
74
main.py
74
main.py
@@ -23,7 +23,7 @@ if __name__ == "__main__":
|
||||
tool.Set_Labels()
|
||||
tool.Set_Save_Roots()
|
||||
|
||||
Status = 2 # 決定要使用什麼資料集
|
||||
Status = 1 # 決定要使用什麼資料集
|
||||
Labels = tool.Get_Data_Label()
|
||||
Trainig_Root, Testing_Root = tool.Get_Save_Roots(Status) # 一般的
|
||||
Generator_Root = tool.Get_Generator_Save_Roots(Status)
|
||||
@@ -33,64 +33,66 @@ if __name__ == "__main__":
|
||||
Encording_Label = tool.Get_OneHot_Encording_Label()
|
||||
|
||||
Label_Length = len(Labels)
|
||||
Classification = 3 # 分類數量
|
||||
|
||||
Model_Name = "Xception" # 取名,告訴我我是用哪個模型(可能是預處理模型/自己設計的模型)
|
||||
Experiment_Name = "Xception Skin is used RandomSampler to train ICG stomach cancer"
|
||||
Experiment_Name = "Xception Skin trains Stomach Cancer Dataset with original images"
|
||||
Epoch = 10000
|
||||
Train_Batch_Size = 64
|
||||
Image_Size = 256
|
||||
|
||||
Prepare = Load_Data_Prepare()
|
||||
loading_data = Load_ImageGenerator(Trainig_Root, Testing_Root, Generator_Root, Labels, Image_Size)
|
||||
experiment = experiments(Image_Size, Model_Name, Experiment_Name, Epoch, Train_Batch_Size, tool, Classification, Status)
|
||||
experiment = experiments(Image_Size, Model_Name, Experiment_Name, Epoch, Train_Batch_Size, tool, Label_Length, Status)
|
||||
image_processing = Read_image_and_Process_image(Image_Size)
|
||||
Merge = merge()
|
||||
Calculate_Tool = Calculate()
|
||||
|
||||
counter = 1
|
||||
Batch_Size = 128
|
||||
Train_Size = 0
|
||||
|
||||
for Run_Range in range(0, counter, 1): # 做規定次數的訓練
|
||||
# 讀取資料
|
||||
Data_Dict_Data = loading_data.process_main(Label_Length)
|
||||
Data_Dict_Data, Train_Size = Balance_Process(Data_Dict_Data, Labels)
|
||||
|
||||
# 讀取資料
|
||||
Data_Dict_Data = loading_data.process_main(Label_Length)
|
||||
Total_Size_List = []
|
||||
|
||||
for label in Labels:
|
||||
Train_Size += len(Data_Dict_Data[label])
|
||||
for label in Labels:
|
||||
Train_Size += len(Data_Dict_Data[label])
|
||||
Total_Size_List.append(len(Data_Dict_Data[label]))
|
||||
print(f"Labels: {label}, 總數為: {len(Data_Dict_Data[label])}")
|
||||
|
||||
print("總共有 " + str(Train_Size) + " 筆資料")
|
||||
print("總共有 " + str(Train_Size) + " 筆資料")
|
||||
|
||||
# 做出跟資料相同數量的Label
|
||||
Classes = []
|
||||
i = 0
|
||||
for encording in Encording_Label:
|
||||
Classes.append(image_processing.make_label_list(Train_Size, encording))
|
||||
i += 1
|
||||
# 做出跟資料相同數量的Label
|
||||
Classes = []
|
||||
i = 0
|
||||
for encording in Encording_Label:
|
||||
Classes.append(image_processing.make_label_list(Total_Size_List[i], encording))
|
||||
i += 1
|
||||
|
||||
# 將資料做成Dict的資料型態
|
||||
Prepare.Set_Final_Dict_Data(Labels, Data_Dict_Data, Classes, Label_Length)
|
||||
Final_Dict_Data = Prepare.Get_Final_Data_Dict()
|
||||
keys = list(Final_Dict_Data.keys())
|
||||
# 將資料做成Dict的資料型態
|
||||
Prepare.Set_Final_Dict_Data(Labels, Data_Dict_Data, Classes, Label_Length)
|
||||
Final_Dict_Data = Prepare.Get_Final_Data_Dict()
|
||||
keys = list(Final_Dict_Data.keys())
|
||||
|
||||
training_data = Merge.merge_all_image_data(Final_Dict_Data[keys[0]], Final_Dict_Data[keys[1]]) # 將訓練資料合併成一個list
|
||||
for i in range(2, Label_Length):
|
||||
training_data = Merge.merge_all_image_data(training_data, Final_Dict_Data[keys[i]]) # 將訓練資料合併成一個list
|
||||
Training_Data = Merge.merge_all_image_data(Final_Dict_Data[keys[0]], Final_Dict_Data[keys[1]]) # 將訓練資料合併成一個list
|
||||
for i in range(2, Label_Length):
|
||||
Training_Data = Merge.merge_all_image_data(Training_Data, Final_Dict_Data[keys[i]]) # 將訓練資料合併成一個list
|
||||
|
||||
training_label = Merge.merge_all_image_data(Final_Dict_Data[keys[Label_Length]], Final_Dict_Data[keys[Label_Length + 1]]) #將訓練資料的label合併成一個label的list
|
||||
for i in range(Label_Length + 2, 2 * Label_Length):
|
||||
training_label = Merge.merge_all_image_data(training_label, Final_Dict_Data[keys[i]]) # 將訓練資料合併成一個list
|
||||
Training_Label = Merge.merge_all_image_data(Final_Dict_Data[keys[Label_Length]], Final_Dict_Data[keys[Label_Length + 1]]) #將訓練資料的label合併成一個label的list
|
||||
for i in range(Label_Length + 2, 2 * Label_Length):
|
||||
Training_Label = Merge.merge_all_image_data(Training_Label, Final_Dict_Data[keys[i]]) # 將訓練資料合併成一個list
|
||||
|
||||
start = time.time()
|
||||
trains_Data_Image = image_processing.Data_Augmentation_Image(training_data) # 讀檔
|
||||
Training_Data, Training_Label = image_processing.image_data_processing(trains_Data_Image, training_label) # 將讀出來的檔做正規化。降label轉成numpy array 格式
|
||||
start = time.time()
|
||||
# trains_Data_Image = image_processing.Data_Augmentation_Image(training_data) # 讀檔
|
||||
# Training_Data, Training_Label = image_processing.image_data_processing(trains_Data_Image, training_label) # 將讀出來的檔做正規化。降label轉成numpy array 格式
|
||||
# Training_Data = image_processing.normalization(Training_Data)
|
||||
|
||||
# training_data = image_processing.normalization(training_data)
|
||||
|
||||
# Balance_Data = list(zip(Training_Data, Training_Label))
|
||||
# Training_Data, Training_Label = Balance_Process(Balance_Data, Total_Size_List)
|
||||
# training_data = training_data.permute(0, 3, 1, 2)
|
||||
|
||||
end = time.time()
|
||||
print("\n\n\n讀取訓練資料(70000)執行時間:%f 秒\n\n" % (end - start))
|
||||
end = time.time()
|
||||
print("\n\n\n讀取訓練資料(70000)執行時間:%f 秒\n\n" % (end - start))
|
||||
|
||||
experiment.processing_main(Training_Data, Training_Label, Run_Range) # 執行訓練方法
|
||||
experiment.processing_main(Training_Data, Training_Label) # 執行訓練方法
|
||||
|
||||
Binary file not shown.
@@ -23,7 +23,7 @@ class merge:
|
||||
|
||||
return self.merge_data_main(merged_data, 0, Number_Of_Classes)
|
||||
|
||||
def merge_data_main(self, merge_data, merge_start_index, total_merge_number = 3):
|
||||
def merge_data_main(self, merge_data, merge_start_index, total_merge_number):
|
||||
'''
|
||||
將各類別資料合併在一起
|
||||
## Parameter:
|
||||
|
||||
Binary file not shown.
@@ -1,4 +1,5 @@
|
||||
import random
|
||||
from merge_class.merge import merge
|
||||
|
||||
|
||||
def calculate_confusion_matrix(predict, result):
|
||||
@@ -41,15 +42,26 @@ def shuffle_data(image, label, mode = 1):
|
||||
|
||||
return shuffle_image
|
||||
|
||||
def Balance_Process(Data_Dict_Data, Labels):
|
||||
def Balance_Process(Datas, Size_List):
|
||||
# Data_Dict_Data = shuffle_data(Data_Content, Labels, 2)
|
||||
Train_Size = 0
|
||||
Train_Size, start = 0, 0
|
||||
Image_List = []
|
||||
Images, Labels = [], []
|
||||
Merge = merge()
|
||||
|
||||
Train_Size = min(len(Data_Dict_Data[Labels[0]]), len(Data_Dict_Data[Labels[1]]))
|
||||
for i in range(1, len(Labels) - 1):
|
||||
Train_Size = min(Train_Size, len(Data_Dict_Data[Labels[i + 1]]))
|
||||
Train_Size = min(Size_List[0], Size_List[1])
|
||||
for i in range(2, len(Size_List), 1):
|
||||
Train_Size = min(Train_Size, Size_List[i])
|
||||
|
||||
for i in range(len(Labels)):
|
||||
Data_Dict_Data[Labels[i]] = Data_Dict_Data[Labels[i]][0 : Train_Size]
|
||||
for i in Size_List:
|
||||
Image_List.append(Datas[start : Train_Size])
|
||||
start = Train_Size
|
||||
Train_Size += Train_Size
|
||||
|
||||
return Data_Dict_Data, Train_Size
|
||||
Image_List = Merge.merge_data_main(Image_List, 0, len(Image_List))
|
||||
|
||||
for Image in Image_List:
|
||||
Images.append(Image[0])
|
||||
Labels.append(Image[1])
|
||||
|
||||
return Images, Labels
|
||||
397
test.ipynb
397
test.ipynb
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user