20250210 Modification Programing can run, and train,validation, and test still can training
This commit is contained in:
74
Calculate_Process/Calculate.py
Normal file
74
Calculate_Process/Calculate.py
Normal file
@@ -0,0 +1,74 @@
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from decimal import Decimal, ROUND_HALF_UP
|
||||
|
||||
class Calculate():
|
||||
def __init__(self) -> None:
|
||||
self.Loss, self.Accuracy, self.Precision, self.Recall, self.F1, self.AUC = 0, 0, 0, 0, 0, 0
|
||||
self.Loss_Record, self.Accuracy_Record, self.Precision_Record, self.Recall_Record, self.F1_Record, self.AUC_Record = [], [], [], [], [], []
|
||||
self.History = []
|
||||
pass
|
||||
|
||||
def Append_numbers(self, Loss, Accuracy, Precision, Recall, AUC, F1):
|
||||
self.Loss_Record.append(Loss)
|
||||
self.Accuracy_Record.append(Accuracy)
|
||||
self.Precision_Record.append(Precision)
|
||||
self.Recall_Record.append(Recall)
|
||||
self.F1_Record.append(F1)
|
||||
self.AUC_Record.append(AUC)
|
||||
pass
|
||||
|
||||
def Calculate_Mean(self):
|
||||
Loss_Mean = np.mean(self.Loss_Record)
|
||||
Accuracy_Mean = np.mean(self.Accuracy_Record)
|
||||
Precision_Mean = np.mean(self.Precision_Record)
|
||||
Recall_Mean = np.mean(self.Recall_Record)
|
||||
F1_Mean = np.mean(self.F1_Record)
|
||||
AUC_Mean = np.mean(self.AUC_Record)
|
||||
|
||||
Mean_DataFram = pd.DataFrame(
|
||||
{
|
||||
"loss" : "{:.2f}".format(Loss_Mean),
|
||||
"precision" : "{:.2f}%".format(Precision_Mean * 100),
|
||||
"recall" : "{:.2f}%".format(Recall_Mean * 100),
|
||||
"accuracy" : "{:.2f}%".format(Accuracy_Mean * 100),
|
||||
"f" : "{:.2f}%".format(F1_Mean * 100),
|
||||
"AUC" : "{:.2f}%".format(AUC_Mean * 100)
|
||||
}, index = [0]
|
||||
)
|
||||
self.History.append(Mean_DataFram)
|
||||
return Mean_DataFram
|
||||
|
||||
def Calculate_Std(self):
|
||||
Loss_Std = Decimal(str(np.std(self.Loss_Record))).quantize(Decimal('0.01'), rounding=ROUND_HALF_UP)
|
||||
Accuracy_Std = Decimal(str(np.std(self.Accuracy_Record))).quantize(Decimal('0.01'), rounding=ROUND_HALF_UP)
|
||||
Precision_Std = Decimal(str(np.std(self.Precision_Record))).quantize(Decimal('0.01'), rounding=ROUND_HALF_UP)
|
||||
Recall_Std = Decimal(str(np.std(self.Recall_Record))).quantize(Decimal('0.01'), rounding=ROUND_HALF_UP)
|
||||
F1_Std = Decimal(str(np.std(self.F1_Record))).quantize(Decimal('0.01'), rounding=ROUND_HALF_UP)
|
||||
AUC_Std = Decimal(str(np.std(self.AUC_Record))).quantize(Decimal('0.01'), rounding=ROUND_HALF_UP)
|
||||
|
||||
Std_DataFram = pd.DataFrame(
|
||||
{
|
||||
"loss" : "{:.2f}".format(Loss_Std),
|
||||
"precision" : "{:.2f}".format(Precision_Std),
|
||||
"recall" : "{:.2f}".format(Recall_Std),
|
||||
"accuracy" : "{:.2f}".format(Accuracy_Std),
|
||||
"f" : "{:.2f}".format(F1_Std),
|
||||
"AUC" : "{:.2f}".format(AUC_Std)
|
||||
}, index = [0]
|
||||
)
|
||||
self.History.append(Std_DataFram)
|
||||
return Std_DataFram
|
||||
|
||||
def Output_Style(self):
|
||||
Result = pd.DataFrame(
|
||||
{
|
||||
"loss" : "{}±{}".format(self.History[0]["loss"][0], self.History[1]["loss"][0]),
|
||||
"precision" : "{}±{}".format(self.History[0]["precision"][0], self.History[1]["precision"][0]),
|
||||
"recall" : "{}±{}".format(self.History[0]["recall"][0], self.History[1]["recall"][0]),
|
||||
"accuracy" : "{}±{}".format(self.History[0]["accuracy"][0], self.History[1]["accuracy"][0]),
|
||||
"f" : "{}±{}".format(self.History[0]["f"][0], self.History[1]["f"][0]),
|
||||
"AUC" : "{}±{}".format(self.History[0]["AUC"][0], self.History[1]["AUC"][0])
|
||||
}, index = [0]
|
||||
)
|
||||
return Result
|
||||
0
Calculate_Process/__init__.py
Normal file
0
Calculate_Process/__init__.py
Normal file
BIN
Calculate_Process/__pycache__/Calculate.cpython-310.pyc
Normal file
BIN
Calculate_Process/__pycache__/Calculate.cpython-310.pyc
Normal file
Binary file not shown.
BIN
Calculate_Process/__pycache__/__init__.cpython-310.pyc
Normal file
BIN
Calculate_Process/__pycache__/__init__.cpython-310.pyc
Normal file
Binary file not shown.
201
Image_Process/Image_Generator.py
Normal file
201
Image_Process/Image_Generator.py
Normal file
@@ -0,0 +1,201 @@
|
||||
from Read_and_process_image.ReadAndProcess import Read_image_and_Process_image
|
||||
from _validation.ValidationTheEnterData import validation_the_enter_data
|
||||
from Load_process.file_processing import Process_File
|
||||
from keras.preprocessing.image import ImageDataGenerator
|
||||
from Load_process.LoadData import Load_Data_Prepare
|
||||
|
||||
class Image_generator():
|
||||
'''製作資料強化'''
|
||||
def __init__(self, Generator_Root, Labels) -> None:
|
||||
self._validation = validation_the_enter_data()
|
||||
self.stop = 0
|
||||
self.Labels = Labels
|
||||
self.Generator_Root = Generator_Root
|
||||
pass
|
||||
|
||||
def Processing_Main(self, Training_Dict_Data_Root):
|
||||
data_size = 0
|
||||
|
||||
# 製作標準資料增強
|
||||
'''
|
||||
這裡我想要做的是依照paper上的資料強化IMAGE DATA COLLECTION AND IMPLEMENTATION OF DEEP LEARNING-BASED MODEL IN DETECTING MONKEYPOX DISEASE USING MODIFIED VGG16
|
||||
產生出資料強化後的影像
|
||||
'''
|
||||
print("\nAugmentation one monkeypox image")
|
||||
data_size = self.get_processing_Augmentation(Training_Dict_Data_Root, 1, data_size)
|
||||
self.stop += data_size
|
||||
|
||||
# 製作標準資料增強
|
||||
'''
|
||||
這裡我想要做的是依照paper上的資料強化IMAGE DATA COLLECTION AND IMPLEMENTATION OF DEEP LEARNING-BASED MODEL IN DETECTING MONKEYPOX DISEASE USING MODIFIED VGG16
|
||||
產生出資料強化後的影像
|
||||
'''
|
||||
print("\nAugmentation two monkeypox image")
|
||||
data_size = self.get_processing_Augmentation(Training_Dict_Data_Root, 2, data_size)
|
||||
self.stop += data_size
|
||||
|
||||
# 製作標準資料增強
|
||||
'''
|
||||
這裡我想要做的是依照paper上的資料強化IMAGE DATA COLLECTION AND IMPLEMENTATION OF DEEP LEARNING-BASED MODEL IN DETECTING MONKEYPOX DISEASE USING MODIFIED VGG16
|
||||
產生出資料強化後的影像
|
||||
'''
|
||||
print("\nAugmentation three monkeypox image")
|
||||
data_size = self.get_processing_Augmentation(Training_Dict_Data_Root, 3, data_size)
|
||||
self.stop += data_size
|
||||
|
||||
|
||||
# 製作標準資料增強
|
||||
'''
|
||||
這裡我想要做的是依照paper上的資料強化IMAGE DATA COLLECTION AND IMPLEMENTATION OF DEEP LEARNING-BASED MODEL IN DETECTING MONKEYPOX DISEASE USING MODIFIED VGG16
|
||||
產生出資料強化後的影像
|
||||
'''
|
||||
print("\nAugmentation four monkeypox image")
|
||||
data_size = self.get_processing_Augmentation(Training_Dict_Data_Root, 4, data_size)
|
||||
|
||||
print()
|
||||
|
||||
def get_processing_Augmentation(self, original_image_root : dict, Augment_choose, data_size):
|
||||
Prepaer = Load_Data_Prepare()
|
||||
|
||||
self.get_data_roots = original_image_root # 要處理的影像路徑
|
||||
Prepaer.Set_Label_List(self.Labels)
|
||||
data_size = self.Generator_main(self.Generator_Root, Augment_choose, data_size) # 執行
|
||||
return data_size
|
||||
|
||||
def Generator_main(self, save_roots, stardand, data_size):
|
||||
'''
|
||||
Parameter:
|
||||
labels = 取得資料的標籤
|
||||
save_root = 要儲存資料的地方
|
||||
strardand = 要使用哪種Image Augmentation
|
||||
'''
|
||||
File = Process_File()
|
||||
|
||||
for label in self.Labels: # 分別對所有類別進行資料強化
|
||||
image = self.load_data(label) # 取的資料
|
||||
save_root = File.Make_Save_Root(label, save_roots) # 合併路徑
|
||||
|
||||
if File.JudgeRoot_MakeDir(save_root): # 判斷要存的資料夾存不存在,不存在則創立
|
||||
print("The file is exist")
|
||||
|
||||
train_Generator = self.Generator_Content(stardand) # 取的要做怎樣的資料強化
|
||||
stop_counter = 0
|
||||
for batches in train_Generator.flow(image, batch_size = 16): # 執行資料強化
|
||||
'''
|
||||
程式碼中的 batch_size 參數被設置為 12, 這表示每次從訓練資料集中產生 12 個影像資料,作為一個 batch。當程式碼迭代 train_Generator 物件時,每次從訓練資料集中取出一個 batch 的影像,並逐一處理其中的每個影像。
|
||||
|
||||
在迭代過程中, counter 變數用於計算存儲的 Numpy 陣列檔案的數量,而 i 變數用於計算迭代的次數。當 i 的值是 417 的倍數時, if i % 417 == 0: 條件就會成立,進而執行 break 語句,退出外層迴圈。因此,最多只會從訓
|
||||
練資料集中產生 417 個 batch 的影像資料。如果資料集中的影像總數不能被 12 整除,最後一個 batch 的影像數量會少於 12。因此, 最終產生的 Numpy 陣列檔案數量可能小於 image 資料集的影像總數除以 12。
|
||||
|
||||
總之,根據程式碼的設計,最多只會從訓練資料集中產生 417 個 batch 的影像資料,並將它們存儲為 Numpy 陣列檔案。在最後一個 batch 中的影像數量可能少於 12。
|
||||
|
||||
* train_Generator: 是一個ImageDataGenerator物件, 它從訓練資料中產生影像資料,這些影像資料可以用於訓練神經網路模型。
|
||||
* flow(): 方法是ImageDataGenerator物件的一個方法, 它可以從資料集中產生一個batch的影像資料。image 是要傳遞給ImageDataGenerator的影像資料, batch_size 是一個batch中包含的影像數量。
|
||||
* for batch in train_Generator.flow(image, batch_size = 12): 語句會迭代ImageDataGenerator物件產生的影像batch, 這些影像batch會逐一傳遞給batch 變數。
|
||||
* for batches in batch: 語句會迭代batch中的每個影像資料, 這些影像資料會逐一傳遞給batches 變數。
|
||||
* self.save_dir("image_" + label + str(counter) + ".npy", batches): 語句會以Numpy陣列的形式將每個影像資料存儲為一個Numpy陣列檔案, 檔案名稱會包含影像標籤和計數器。
|
||||
* counter += 1: 語句會將計數器增加1, 以便標識存儲的Numpy陣列檔案。
|
||||
* i += 1: 語句會增加迭代器的值, 以便在每417個影像batch之後退出迴圈。
|
||||
* if i % 417 == 0: break 語句會檢查迭代器是否達到了417的倍數, 如果是, 就會退出外層的迴圈, 以便結束影像資料的存儲。
|
||||
'''
|
||||
for batch in batches: # 分別抓出每一張照片來儲存
|
||||
File.Save_CV2_File("image_" + label + str(data_size) + ".png", save_root, batch) # 存檔
|
||||
data_size += 1
|
||||
stop_counter += 1
|
||||
|
||||
if stop_counter >= self.stop: # 若做指定次數則停止
|
||||
break
|
||||
print(str(label) + "有" + str(stop_counter) + "筆資料")
|
||||
|
||||
return data_size
|
||||
|
||||
def load_data(self, label):
|
||||
'''Images is readed by myself'''
|
||||
image_processing = Read_image_and_Process_image()
|
||||
img = image_processing.Data_Augmentation_Image(self.get_data_roots[label])
|
||||
|
||||
self.stop = len(img) * 1.5
|
||||
return img
|
||||
|
||||
def Generator_Content(self, judge): # 影像資料增強
|
||||
'''
|
||||
ImageGenerator的參數:
|
||||
featurewise_center : 布爾值。將輸入數據的均值設置為0,逐特徵進行。
|
||||
samplewise_center : 布爾值。將每個樣本的均值設置為0。
|
||||
featurewise_std_normalization : Boolean. 布爾值。將輸入除以數據標準差,逐特徵進行。
|
||||
samplewise_std_normalization : 布爾值。將每個輸入除以其標準差。
|
||||
zca_epsilon : ZCA 白化的epsilon 值,默認為1e-6。
|
||||
zca_whitening : 布爾值。是否應用ZCA 白化。
|
||||
rotation_range : 整數。隨機旋轉的度數範圍。
|
||||
width_shift_range : 浮點數、一維數組或整數
|
||||
float: 如果<1,則是除以總寬度的值,或者如果>=1,則為像素值。
|
||||
1-D 數組: 數組中的隨機元素。
|
||||
int: 來自間隔 (-width_shift_range, +width_shift_range) 之間的整數個像素。
|
||||
width_shift_range=2時,可能值是整數[-1, 0, +1],與 width_shift_range=[-1, 0, +1] 相同;而 width_shift_range=1.0 時,可能值是 [-1.0, +1.0) 之間的浮點數。
|
||||
height_shift_range : 浮點數、一維數組或整數
|
||||
float: 如果<1,則是除以總寬度的值,或者如果>=1,則為像素值。
|
||||
1-D array-like: 數組中的隨機元素。
|
||||
int: 來自間隔 (-height_shift_range, +height_shift_range) 之間的整數個像素。
|
||||
height_shift_range=2時,可能值是整數[-1, 0, +1],與 height_shift_range=[-1, 0, +1] 相同;而 height_shift_range=1.0 時,可能值是 [-1.0, +1.0) 之間的浮點數。
|
||||
shear_range : 浮點數。剪切強度(以弧度逆時針方向剪切角度)。
|
||||
zoom_range : 浮點數或[lower, upper]。隨機縮放範圍。如果是浮點數,[lower, upper] = [1-zoom_range, 1+zoom_range]。
|
||||
channel_shift_range : 浮點數。隨機通道轉換的範圍。
|
||||
fill_mode : {"constant", "nearest", "reflect" or "wrap"} 之一。默認為'nearest'。輸入邊界以外的點根據給定的模式填充:
|
||||
'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
|
||||
'nearest': aaaaaaaa|abcd|dddddddd
|
||||
'reflect': abcddcba|abcd|dcbaabcd
|
||||
'wrap': abcdabcd|abcd|abcdabcd
|
||||
cval : 浮點數或整數。用於邊界之外的點的值,當 fill_mode = "constant" 時。
|
||||
horizontal_flip : 布爾值。隨機水平翻轉。
|
||||
vertical_flip : 布爾值。隨機垂直翻轉。
|
||||
rescale : 重縮放因子。默認為None。如果是None 或0,不進行縮放,否則將數據乘以所提供的值(在應用任何其他轉換之前)。
|
||||
preprocessing_function : 應用於每個輸入的函數。這個函數會在任何其他改變之前運行。這個函數需要一個參數:一張圖像(秩為3 的Numpy 張量),並且應該輸出一個同尺寸的Numpy 張量。
|
||||
data_format : 圖像數據格式,{"channels_first", "channels_last"} 之一。"channels_last" 模式表示圖像輸入尺寸應該為(samples, height, width, channels),"channels_first" 模式表示輸入尺寸應該為(samples, channels, height, width)。默認為在Keras 配置文件 ~/.keras/keras.json 中的 image_data_format 值。如果你從未設置它,那它就是"channels_last"。
|
||||
validation_split : 浮點數。Float. 保留用於驗證的圖像的比例(嚴格在0和1之間)。
|
||||
dtype : 生成數組使用的數據類型。
|
||||
'''
|
||||
if judge == 1:
|
||||
datagen = ImageDataGenerator(
|
||||
rotation_range=30, # 旋轉影像
|
||||
width_shift_range=0.1, # 圖像隨機水平移動,位移距離為圖像長度乘以參數(0.1)
|
||||
height_shift_range=0.1, # 圖像隨機垂直移動,位移距離為圖像長度乘以參數(0.1)
|
||||
zoom_range=0.2, # 隨機縮放範圍,[lower, upper] = [1-zoom_range, 1+zoom_range]
|
||||
horizontal_flip=False, # 水平翻轉
|
||||
vertical_flip=False, # 垂直翻轉
|
||||
fill_mode='nearest' # 在旋轉或平移造成空隙時,則空隙補常數
|
||||
)
|
||||
if judge == 2:
|
||||
datagen = ImageDataGenerator(
|
||||
rotation_range=180,
|
||||
width_shift_range=0.2,
|
||||
height_shift_range=0.1,
|
||||
zoom_range=0.1,
|
||||
horizontal_flip=True,
|
||||
vertical_flip=True,
|
||||
fill_mode='nearest'
|
||||
)
|
||||
if judge == 3:
|
||||
datagen = ImageDataGenerator(
|
||||
rotation_range=45, # 旋轉影像
|
||||
width_shift_range=0.02, # 圖像隨機水平移動,位移距離為圖像長度乘以參數(0.1)
|
||||
height_shift_range=0.02, # 圖像隨機垂直移動,位移距離為圖像長度乘以參數(0.1)
|
||||
shear_range = 0.02,
|
||||
zoom_range=0.02, # 隨機縮放範圍,[lower, upper] = [1-zoom_range, 1+zoom_range]
|
||||
horizontal_flip = True,
|
||||
fill_mode = "reflect"
|
||||
)
|
||||
|
||||
if judge == 4: # 第二份paper的資料強化
|
||||
datagen = ImageDataGenerator(
|
||||
rotation_range=50, # 旋轉影像
|
||||
width_shift_range=0.2, # 圖像隨機水平移動,位移距離為圖像長度乘以參數(0.1)
|
||||
height_shift_range=0.2, # 圖像隨機垂直移動,位移距離為圖像長度乘以參數(0.1)
|
||||
shear_range = 0.25,
|
||||
zoom_range=0.1, # 隨機縮放範圍,[lower, upper] = [1-zoom_range, 1+zoom_range]
|
||||
channel_shift_range = 20 # 隨機通道轉換的範圍
|
||||
)
|
||||
|
||||
if judge == 5: # 第一份paper的資料強化
|
||||
datagen = ImageDataGenerator(rescale = 1 / 255)
|
||||
|
||||
return datagen
|
||||
BIN
Image_Process/__pycache__/Image_Generator.cpython-310.pyc
Normal file
BIN
Image_Process/__pycache__/Image_Generator.cpython-310.pyc
Normal file
Binary file not shown.
BIN
Image_Process/__pycache__/Image_Generator.cpython-39.pyc
Normal file
BIN
Image_Process/__pycache__/Image_Generator.cpython-39.pyc
Normal file
Binary file not shown.
BIN
Image_Process/__pycache__/LoadData.cpython-310.pyc
Normal file
BIN
Image_Process/__pycache__/LoadData.cpython-310.pyc
Normal file
Binary file not shown.
BIN
Image_Process/__pycache__/LoadData.cpython-311.pyc
Normal file
BIN
Image_Process/__pycache__/LoadData.cpython-311.pyc
Normal file
Binary file not shown.
BIN
Image_Process/__pycache__/LoadData.cpython-39.pyc
Normal file
BIN
Image_Process/__pycache__/LoadData.cpython-39.pyc
Normal file
Binary file not shown.
BIN
Image_Process/__pycache__/image_enhancement.cpython-310.pyc
Normal file
BIN
Image_Process/__pycache__/image_enhancement.cpython-310.pyc
Normal file
Binary file not shown.
BIN
Image_Process/__pycache__/image_enhancement.cpython-39.pyc
Normal file
BIN
Image_Process/__pycache__/image_enhancement.cpython-39.pyc
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
Image_Process/__pycache__/load_and_ImageGenerator.cpython-39.pyc
Normal file
BIN
Image_Process/__pycache__/load_and_ImageGenerator.cpython-39.pyc
Normal file
Binary file not shown.
BIN
Image_Process/__pycache__/load_paper_data.cpython-39.pyc
Normal file
BIN
Image_Process/__pycache__/load_paper_data.cpython-39.pyc
Normal file
Binary file not shown.
BIN
Image_Process/__pycache__/make_ImageGenerator.cpython-310.pyc
Normal file
BIN
Image_Process/__pycache__/make_ImageGenerator.cpython-310.pyc
Normal file
Binary file not shown.
BIN
Image_Process/__pycache__/make_ImageGenerator.cpython-311.pyc
Normal file
BIN
Image_Process/__pycache__/make_ImageGenerator.cpython-311.pyc
Normal file
Binary file not shown.
BIN
Image_Process/__pycache__/make_ImageGenerator.cpython-39.pyc
Normal file
BIN
Image_Process/__pycache__/make_ImageGenerator.cpython-39.pyc
Normal file
Binary file not shown.
87
Image_Process/image_enhancement.py
Normal file
87
Image_Process/image_enhancement.py
Normal file
@@ -0,0 +1,87 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
def shapen(image): # 銳化處理
|
||||
sigma = 100
|
||||
blur_img = cv2.GaussianBlur(image, (0, 0), sigma)
|
||||
usm = cv2.addWeighted(image, 1.5, blur_img, -0.5, 0)
|
||||
|
||||
return usm
|
||||
|
||||
def increase_contrast(image): # 增加資料對比度
|
||||
output = image # 建立 output 變數
|
||||
alpha = 2
|
||||
beta = 10
|
||||
cv2.convertScaleAbs(image, output, alpha, beta) # 套用 convertScaleAbs
|
||||
|
||||
return output
|
||||
|
||||
def adaptive_histogram_equalization(image):
|
||||
ycrcb = cv2.cvtColor(image, cv2.COLOR_BGR2YCR_CB)
|
||||
channels = cv2.split(ycrcb)
|
||||
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
|
||||
clahe.apply(channels[0], channels[0])
|
||||
|
||||
ycrcb = cv2.merge(channels)
|
||||
Change_image = cv2.cvtColor(ycrcb, cv2.COLOR_YCR_CB2BGR)
|
||||
|
||||
return Change_image
|
||||
|
||||
def Remove_Background(image, Matrix_Size):
|
||||
skinCrCbHist = np.zeros((256,256), dtype= np.uint8)
|
||||
cv2.ellipse(skinCrCbHist, (113,155),(23,25), 43, 0, 360, (255, 255, 255), -1) #繪製橢圓弧線
|
||||
|
||||
img_ycrcb = cv2.cvtColor(image, cv2.COLOR_BGR2YCR_CB)
|
||||
y,cr,cb = cv2.split(img_ycrcb) #拆分出Y,Cr,Cb值
|
||||
|
||||
skin = np.zeros(cr.shape, dtype = np.uint8) #掩膜
|
||||
(x,y) = cr.shape
|
||||
|
||||
# 依序取出圖片中每個像素
|
||||
for i in range(x):
|
||||
for j in range(y):
|
||||
if skinCrCbHist [cr[i][j], cb[i][j]] > 0: #若不在橢圓區間中
|
||||
skin[i][j] = 255
|
||||
# 如果該像素的灰階度大於 200,調整該像素的透明度
|
||||
# 使用 255 - gray[y, x] 可以將一些邊緣的像素變成半透明,避免太過鋸齒的邊緣
|
||||
# img_change = cv2.cvtColor(img_change, cv2.COLOR_BGRA2BGR)
|
||||
img = cv2.bitwise_and(image, image, mask = skin)
|
||||
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
||||
|
||||
h = image.shape[0] # 取得圖片高度
|
||||
w = image.shape[1] # 取得圖片寬度
|
||||
|
||||
for x in range(w):
|
||||
for y in range(h):
|
||||
if img_gray[y, x] == 0:
|
||||
# if x == 0 and y == 0: # 當X Y都在左上角時
|
||||
# image[y, x] = Add(1, Matrix_Size, image[y, x]) / Matrix_Size
|
||||
# if x == w - 1 and y == 0: # 當X Y都在右上角時
|
||||
# image[y, x] = Add(w - Matrix_Size, w, image[y, x]) / Matrix_Size
|
||||
# if x == 0 and y == h - 1: # 當X Y都在左下角時
|
||||
# image[y, x] = (image[y - 1, x] + image[y - 1, x + 1] + image[y, x + 1]) / 3
|
||||
# if x == w - 1 and y == h - 1: # 當X Y都在右下角時
|
||||
# image[y, x] = (image[y, x - 1] + image[y - 1, x - 1] + image[y - 1, x]) / 3
|
||||
|
||||
# if (x > 0 and x < w - 1) and y == 0: # 當上面的X Y從左到右
|
||||
# image[y, x] = (image[y, x - 1] + image[y + 1, x - 1] + image[y + 1, x] + image[y, x + 1] + image[y + 1, x + 1]) / 5
|
||||
# if (x > 0 and x < w - 1) and y == h - 1: # 當下面的X Y從左到右
|
||||
# image[y, x] = (image[y, x - 1] + image[y - 1, x - 1] + image[y - 1, x] + image[y, x + 1] + image[y - 1, x + 1]) / 5
|
||||
# if x == 0 and (y > 0 and y < h - 1): # 當左邊的X Y從上到下
|
||||
# image[y, x] = (image[y - 1, x] + image[y - 1, x + 1] + image[y, x + 1] + image[y + 1, x + 1] + image[y + 1, x]) / 5
|
||||
# if x == w - 1 and (y > 0 and y < h - 1): # 當右邊X Y從上到下
|
||||
# image[y, x] = (image[y - 1, x] + image[y - 1, x - 1] + image[y, x - 1] + image[y + 1, x - 1] + image[y + 1, x]) / 5
|
||||
|
||||
if (x >= 1 and x < w - 1) and (y >= 1 and y < h - 1): # 當y >= 2 且 X >= 2
|
||||
image[y, x] = Add(x, y, image, Matrix_Size) / Matrix_Size
|
||||
# BGRA_image[y, x, 3] = 255 - gray[y, x]
|
||||
return image
|
||||
|
||||
|
||||
def Add(width_Center, Height_Center, image, Mask_Size):
|
||||
total = 0
|
||||
for i in range(Mask_Size):
|
||||
for j in range(Mask_Size):
|
||||
total += image[width_Center - ((Mask_Size - 1) / 2) + j, Height_Center - ((Mask_Size - 1) / 2) + i]
|
||||
|
||||
return total
|
||||
62
Image_Process/load_and_ImageGenerator.py
Normal file
62
Image_Process/load_and_ImageGenerator.py
Normal file
@@ -0,0 +1,62 @@
|
||||
from Load_process.LoadData import Loding_Data_Root
|
||||
from Image_Process.Image_Generator import Image_generator
|
||||
from Load_process.file_processing import Process_File
|
||||
from model_data_processing.processing_for_cut_image import Cut_Indepentend_Data
|
||||
from Load_process.Loading_Tools import Load_Data_Prepare, Load_Data_Tools
|
||||
|
||||
class Load_ImageGenerator():
|
||||
'''
|
||||
這是一個拿來進行資料強化的物件,最主要結合了學姊給的資料強化與我自行設定的資料強化。
|
||||
藉由此物件先將資料讀取出來,並將資料分別進行資料強化,利用資料強化來迷部資料的不平衡
|
||||
這只是其中一個實驗
|
||||
|
||||
Parmeter
|
||||
standard_root: 做跟學姊給的資料強化同一種的資料強化
|
||||
myself_root: 資料強化的內容參數是我自己設定的
|
||||
IndependentDataRoot: 要存回去的資料夾路徑
|
||||
Herpeslabels: 皰疹的類別
|
||||
MonKeyPoxlabels: 猴痘的類別(猴痘、水痘、正常)
|
||||
herpes_data: 合併herpes Dataset的資料成一個List
|
||||
MonkeyPox_data: 合併MonkeyPox DataSet 的資料成一個List
|
||||
'''
|
||||
def __init__(self, Training_Root,Test_Root, Validation_Root, Generator_Root, Labels) -> None:
|
||||
self.Training_Root = Training_Root
|
||||
self.TestRoot = Test_Root
|
||||
self.ValidationRoot = Validation_Root
|
||||
self.GeneratoRoot = Generator_Root
|
||||
self.Labels = Labels
|
||||
pass
|
||||
|
||||
def process_main(self, Data_Length : int):
|
||||
File = Process_File()
|
||||
Prepare = Load_Data_Prepare()
|
||||
load = Loding_Data_Root(self.Labels, self.Training_Root, self.GeneratoRoot)
|
||||
Indepentend = Cut_Indepentend_Data(self.Training_Root, self.Labels)
|
||||
Load_Tool = Load_Data_Tools()
|
||||
Generator = Image_generator(self.GeneratoRoot, self.Labels)
|
||||
|
||||
# 將測試資料獨立出來
|
||||
test_size = 0.1
|
||||
Indepentend.IndependentData_main(self.TestRoot, test_size)
|
||||
|
||||
# 將驗證資料獨立出來
|
||||
test_size = 0.1
|
||||
Indepentend.IndependentData_main(self.ValidationRoot, test_size)
|
||||
|
||||
if not File.Judge_File_Exist(self.GeneratoRoot): # 檔案若不存在
|
||||
# 確定我要多少個List
|
||||
Prepare.Set_Data_Content([], Data_Length)
|
||||
|
||||
# 製作讀檔字典並回傳檔案路徑
|
||||
Prepare.Set_Label_List(self.Labels)
|
||||
Prepare.Set_Data_Dictionary(Prepare.Get_Label_List(), Prepare.Get_Data_Content(), Data_Length)
|
||||
Original_Dict_Data_Root = Prepare.Get_Data_Dict()
|
||||
get_all_original_image_data = Load_Tool.get_data_root(self.Training_Root, Original_Dict_Data_Root, Prepare.Get_Label_List())
|
||||
|
||||
# 儲存資料強化後資料
|
||||
Generator.Processing_Main(get_all_original_image_data) # 執行資料強化
|
||||
else: # 若檔案存在
|
||||
print("standard data and myself data are exist\n")
|
||||
|
||||
# 執行讀檔
|
||||
return load.process_main()
|
||||
38
Load_process/LoadData.py
Normal file
38
Load_process/LoadData.py
Normal file
@@ -0,0 +1,38 @@
|
||||
from Load_process.file_processing import Process_File
|
||||
from Load_process.Loading_Tools import Load_Data_Prepare, Load_Data_Tools
|
||||
from merge_class.merge import merge
|
||||
|
||||
class Loding_Data_Root(Process_File):
|
||||
def __init__(self, Labels, Training_Root, Generator_Root):
|
||||
self.Label_List = Labels
|
||||
self.Train_Root = Training_Root
|
||||
self.Generator_Root = Generator_Root
|
||||
|
||||
super().__init__()
|
||||
pass
|
||||
|
||||
def process_main(self):
|
||||
'''處理讀Training、Image Generator檔資料'''
|
||||
Merge = merge()
|
||||
|
||||
get_Image_Data = self.get_Image_data_roots(self.Train_Root)
|
||||
Get_ImageGenerator_Image_Data = self.get_Image_data_roots(self.Generator_Root)
|
||||
|
||||
Get_Total_Image_Data_Root = Merge.merge_dict_to_dict(get_Image_Data, Get_ImageGenerator_Image_Data)
|
||||
|
||||
return Get_Total_Image_Data_Root
|
||||
|
||||
def get_Image_data_roots(self, DataRoot) -> dict:
|
||||
Prepare = Load_Data_Prepare()
|
||||
Loading_Tool = Load_Data_Tools()
|
||||
|
||||
# Setting the dictionary's content on Prepare.Set_Data_Dictionary object
|
||||
Prepare.Set_Label_List(self.Label_List)
|
||||
Prepare.Set_Data_Content([], len(self.Label_List))
|
||||
Prepare.Set_Data_Dictionary(Prepare.Get_Label_List(), Prepare.Get_Data_Content(), len(self.Label_List))
|
||||
|
||||
# Gets the classifcation's total data
|
||||
get_image_data = Loading_Tool.get_data_root(DataRoot, Prepare.Get_Data_Dict(), Prepare.Get_Label_List())
|
||||
|
||||
# return get_NPC_original_image_data
|
||||
return get_image_data
|
||||
78
Load_process/Load_Indepentend.py
Normal file
78
Load_process/Load_Indepentend.py
Normal file
@@ -0,0 +1,78 @@
|
||||
from Read_and_process_image.ReadAndProcess import Read_image_and_Process_image
|
||||
from model_data_processing.processing import shuffle_data
|
||||
from merge_class.merge import merge
|
||||
from Read_and_process_image.ReadAndProcess import Read_image_and_Process_image
|
||||
from Load_process.LoadData import Load_Data_Prepare, Load_Data_Tools
|
||||
|
||||
class Load_Indepentend_Data():
|
||||
def __init__(self, Labels, OneHot_Encording):
|
||||
'''
|
||||
影像切割物件
|
||||
label有2類,會將其轉成one-hot-encoding的形式
|
||||
[0, 1] = NPC_negative
|
||||
[1, 0] = NPC_positive
|
||||
'''
|
||||
self.merge = merge()
|
||||
self.Labels = Labels
|
||||
self.OneHot_Encording = OneHot_Encording
|
||||
pass
|
||||
|
||||
def process_main(self, Test_data_root, Validation_data_root):
|
||||
self.test, self.test_label = self.get_Independent_image(Test_data_root)
|
||||
print("\ntest_labels有" + str(len(self.test_label)) + "筆資料\n")
|
||||
|
||||
self.validation, self.validation_label = self.get_Independent_image(Validation_data_root)
|
||||
print("validation_labels有 " + str(len(self.validation_label)) + " 筆資料\n")
|
||||
|
||||
def get_Independent_image(self, independent_DataRoot):
|
||||
image_processing = Read_image_and_Process_image()
|
||||
|
||||
classify_image = []
|
||||
Total_Dict_Data_Root = self.Get_Independent_data_Root(independent_DataRoot) # 讀取測試資料集的資料
|
||||
Total_Dict_Data_Root = self.Specified_Amount_Of_Data(Total_Dict_Data_Root) # 打亂並取出指定資料筆數的資料
|
||||
Total_List_Data_Root = [Total_Dict_Data_Root[self.Labels[0]], Total_Dict_Data_Root[self.Labels[1]]]
|
||||
|
||||
test_label, Classify_Label = [], []
|
||||
i = 0 # 計算classify_image的counter,且計算總共有幾筆資料
|
||||
for test_title in Total_List_Data_Root: # 藉由讀取所有路徑來進行讀檔
|
||||
test_label = image_processing.make_label_list(len(test_title), self.OneHot_Encording[i]) # 製作對應圖片數量的label出來+
|
||||
print(self.Labels[i] + " 有 " + str(len(test_label)) + " 筆資料 ")
|
||||
|
||||
classify_image.append(test_title)
|
||||
Classify_Label.append(test_label)
|
||||
i += 1
|
||||
|
||||
original_test_root = self.merge.merge_data_main(classify_image, 0, 2)
|
||||
original_test_label = self.merge.merge_data_main(Classify_Label, 0, 2)
|
||||
|
||||
test = []
|
||||
test = image_processing.Data_Augmentation_Image(original_test_root)
|
||||
test, test_label = image_processing.image_data_processing(test, original_test_label)
|
||||
test = image_processing.normalization(test)
|
||||
|
||||
return test, test_label
|
||||
|
||||
|
||||
def Get_Independent_data_Root(self, load_data_root):
|
||||
Prepare = Load_Data_Prepare()
|
||||
Load_Tool = Load_Data_Tools()
|
||||
|
||||
Prepare.Set_Data_Content([], len(self.Labels))
|
||||
Prepare.Set_Data_Dictionary(self.Labels, Prepare.Get_Data_Content(), 2)
|
||||
Get_Data_Dict_Content = Prepare.Get_Data_Dict()
|
||||
Total_Data_Roots = Load_Tool.get_data_root(load_data_root, Get_Data_Dict_Content, self.Labels)
|
||||
|
||||
return Total_Data_Roots
|
||||
|
||||
def Specified_Amount_Of_Data(self, Data): # 打亂資料後重新處理
|
||||
Data = shuffle_data(Data, self.Labels, 2)
|
||||
tmp = []
|
||||
if len(Data[self.Labels[0]]) >= len(Data[self.Labels[1]]):
|
||||
for i in range(len(Data[self.Labels[1]])):
|
||||
tmp.append(Data[self.Labels[0]][i])
|
||||
Data[self.Labels[0]] = tmp
|
||||
else:
|
||||
for i in range(len(Data[self.Labels[0]])):
|
||||
tmp.append(Data[self.Labels[1]][i])
|
||||
Data[self.Labels[1]] = tmp
|
||||
return Data
|
||||
90
Load_process/Loading_Tools.py
Normal file
90
Load_process/Loading_Tools.py
Normal file
@@ -0,0 +1,90 @@
|
||||
import os
|
||||
import glob
|
||||
|
||||
class Load_Data_Prepare:
|
||||
def __init__(self) -> None:
|
||||
self.__Label_List = []
|
||||
self.__Data_List = []
|
||||
self.__Contect_Dictionary = {}
|
||||
self.__Final_Dict_data = {}
|
||||
self.__PreSave_Data_Root = [] # 所有要讀取資料所在的位置
|
||||
self.__Data_Content = []
|
||||
pass
|
||||
|
||||
def Set_Data_Content(self, Content, Length):
|
||||
tmp = []
|
||||
for i in range(Length):
|
||||
tmp.append(Content)
|
||||
|
||||
self.__Data_Content = tmp
|
||||
|
||||
def Set_Label_List(self, Label_List): # 為讀取檔案準備label list
|
||||
self.__Label_List = Label_List
|
||||
pass
|
||||
|
||||
def Set_Data_List(self, Data_List):
|
||||
self.__Data_List = Data_List
|
||||
pass
|
||||
|
||||
def Set_Data_Dictionary(self, Label : list, Content : list, Total_Label_Size : int):
|
||||
'''將資料合併成1個Dict'''
|
||||
for i in range(Total_Label_Size):
|
||||
temp = {Label[i] : Content[i]}
|
||||
self.__Contect_Dictionary.update(temp)
|
||||
pass
|
||||
|
||||
def Set_Final_Dict_Data(self, Name : list, Label_Root : list, Label_LabelEncoding : list, Label_Len : int):
|
||||
'''
|
||||
Name : 讀取出來的Data Root的名字
|
||||
Label_Root: 所有影像資料的路徑
|
||||
Label_LabelEncoding: LabelEncoding後的資料
|
||||
Label_Len: Label的大小
|
||||
'''
|
||||
for i in range(Label_Len):
|
||||
temp = {Name[i] + "_Data_Root" : Label_Root[Name[i]]}
|
||||
self.__Final_Dict_data.update(temp)
|
||||
|
||||
for i in range(Label_Len):
|
||||
temp = {Name[i] + "_Data_LabelEncoding" : Label_LabelEncoding[i]}
|
||||
self.__Final_Dict_data.update(temp)
|
||||
|
||||
def Set_PreSave_Data_Root(self, PreSave_Roots : list):
|
||||
for Root in PreSave_Roots:
|
||||
self.__PreSave_Data_Root.append(Root)
|
||||
|
||||
def Get_Label_List(self):
|
||||
'''
|
||||
將private的資料讀取出來
|
||||
現在要放入需要的Label 需要先Set Label
|
||||
'''
|
||||
return self.__Label_List
|
||||
|
||||
def Get_Data_List(self):
|
||||
return self.__Data_List
|
||||
|
||||
def Get_Data_Dict(self):
|
||||
return self.__Contect_Dictionary
|
||||
|
||||
def Get_Final_Data_Dict(self):
|
||||
return self.__Final_Dict_data
|
||||
|
||||
def Get_PreSave_Data_Root(self):
|
||||
return self.__PreSave_Data_Root
|
||||
|
||||
def Get_Data_Content(self):
|
||||
return self.__Data_Content
|
||||
|
||||
class Load_Data_Tools():
|
||||
def __init__(self) -> None:
|
||||
pass
|
||||
|
||||
def get_data_root(self, root, data_dict, classify_label, judge = True) -> dict :
|
||||
'''取得資料路徑'''
|
||||
for label in classify_label:
|
||||
if judge:
|
||||
path = os.path.join(root, label, "*")
|
||||
else:
|
||||
path = os.path.join(root, "*")
|
||||
path = glob.glob(path)
|
||||
data_dict[label] = path
|
||||
return data_dict
|
||||
BIN
Load_process/__pycache__/LoadData.cpython-310.pyc
Normal file
BIN
Load_process/__pycache__/LoadData.cpython-310.pyc
Normal file
Binary file not shown.
BIN
Load_process/__pycache__/LoadData.cpython-311.pyc
Normal file
BIN
Load_process/__pycache__/LoadData.cpython-311.pyc
Normal file
Binary file not shown.
BIN
Load_process/__pycache__/LoadData.cpython-39.pyc
Normal file
BIN
Load_process/__pycache__/LoadData.cpython-39.pyc
Normal file
Binary file not shown.
BIN
Load_process/__pycache__/Load_Indepentend.cpython-310.pyc
Normal file
BIN
Load_process/__pycache__/Load_Indepentend.cpython-310.pyc
Normal file
Binary file not shown.
BIN
Load_process/__pycache__/Load_Indepentend.cpython-311.pyc
Normal file
BIN
Load_process/__pycache__/Load_Indepentend.cpython-311.pyc
Normal file
Binary file not shown.
BIN
Load_process/__pycache__/Load_Indepentend_Data.cpython-310.pyc
Normal file
BIN
Load_process/__pycache__/Load_Indepentend_Data.cpython-310.pyc
Normal file
Binary file not shown.
BIN
Load_process/__pycache__/Loading_Tools.cpython-310.pyc
Normal file
BIN
Load_process/__pycache__/Loading_Tools.cpython-310.pyc
Normal file
Binary file not shown.
BIN
Load_process/__pycache__/Loading_Tools.cpython-311.pyc
Normal file
BIN
Load_process/__pycache__/Loading_Tools.cpython-311.pyc
Normal file
Binary file not shown.
BIN
Load_process/__pycache__/file_processing.cpython-310.pyc
Normal file
BIN
Load_process/__pycache__/file_processing.cpython-310.pyc
Normal file
Binary file not shown.
BIN
Load_process/__pycache__/file_processing.cpython-311.pyc
Normal file
BIN
Load_process/__pycache__/file_processing.cpython-311.pyc
Normal file
Binary file not shown.
BIN
Load_process/__pycache__/file_processing.cpython-39.pyc
Normal file
BIN
Load_process/__pycache__/file_processing.cpython-39.pyc
Normal file
Binary file not shown.
49
Load_process/file_processing.py
Normal file
49
Load_process/file_processing.py
Normal file
@@ -0,0 +1,49 @@
|
||||
import os
|
||||
import cv2
|
||||
import numpy as np
|
||||
import datetime
|
||||
import pandas as pd
|
||||
|
||||
class Process_File():
|
||||
def __init__(self) -> None:
|
||||
pass
|
||||
def JudgeRoot_MakeDir(self, file_root): # 先判斷檔案是否存在,再決定是否要進行開檔
|
||||
if self.Judge_File_Exist(file_root):
|
||||
return True
|
||||
else:
|
||||
self.Make_Dir(file_root)
|
||||
return False
|
||||
|
||||
def Judge_File_Exist(self, file_root):
|
||||
'''判斷檔案是否存在,存在回傳true,否則為False'''
|
||||
if os.path.exists(file_root):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def Make_Dir(self, file_root): # 建立資料夾
|
||||
os.makedirs(file_root)
|
||||
|
||||
def Make_Save_Root(self, FileName, File_root): # 合併路徑
|
||||
return os.path.join(File_root, FileName)
|
||||
|
||||
def Save_CV2_File(self, FileName, save_root, image): # 存CSV檔
|
||||
save_root = self.Make_Save_Root(FileName, save_root)
|
||||
cv2.imwrite(save_root, image)
|
||||
|
||||
def Save_NPY_File(self, FileName, save_root, image): # 存.npy檔
|
||||
save_root = self.Make_Save_Root(FileName, save_root)
|
||||
np.save(save_root, image)
|
||||
|
||||
def Save_CSV_File(self, file_name, data): # 儲存訓練結果
|
||||
Save_Root = '../Result/save_the_train_result(' + str(datetime.date.today()) + ")"
|
||||
self.JudgeRoot_MakeDir(Save_Root)
|
||||
modelfiles = self.Make_Save_Root(file_name + ".csv", Save_Root) # 將檔案名稱及路徑字串合併成完整路徑
|
||||
data.to_csv(modelfiles, mode = "a")
|
||||
|
||||
def Save_TXT_File(self, content, File_Name):
|
||||
model_dir = '../Result/save_the_train_result(' + str(datetime.date.today()) + ")" # 儲存的檔案路徑,由save_the_train_result + 當天日期
|
||||
self.JudgeRoot_MakeDir(model_dir)
|
||||
modelfiles = self.Make_Save_Root(File_Name + ".txt", model_dir) # 將檔案名稱及路徑字串合併成完整路徑
|
||||
with open(modelfiles, mode = 'a') as file:
|
||||
file.write(content)
|
||||
15
Model_Loss/Loss.py
Normal file
15
Model_Loss/Loss.py
Normal file
@@ -0,0 +1,15 @@
|
||||
from torch import nn
|
||||
from torch.nn import functional
|
||||
|
||||
|
||||
class Entropy_Loss(nn.Module):
|
||||
def __init__(self):
|
||||
super(Entropy_Loss, self).__init__()
|
||||
|
||||
def forward(self, outputs, labels):
|
||||
# 範例: 使用均方誤差作為損失計算
|
||||
# outputs = torch.argmax(outputs, 1)
|
||||
# outputs = outputs.float()
|
||||
labels = labels.float()
|
||||
loss = functional.binary_cross_entropy(outputs, labels)
|
||||
return loss
|
||||
32
Processing_image.py
Normal file
32
Processing_image.py
Normal file
@@ -0,0 +1,32 @@
|
||||
from merge_class.merge import merge
|
||||
from Load_process.Loading_Tools import Load_Data_Prepare
|
||||
from Load_process.LoadData import Loding_Data_Root
|
||||
from Training_Tools.Tools import Tool
|
||||
from Read_and_process_image.ReadAndProcess import Read_image_and_Process_image
|
||||
from matplotlib import pyplot as plt
|
||||
|
||||
if __name__ == "__main__":
|
||||
Merge = merge()
|
||||
read = Read_image_and_Process_image()
|
||||
tool = Tool()
|
||||
Prepare = Load_Data_Prepare()
|
||||
|
||||
tool.Set_Labels()
|
||||
tool.Set_Save_Roots()
|
||||
Labels = tool.Get_Data_Label()
|
||||
Trainig_Root, Testing_Root, Validation_Root = tool.Get_Save_Roots(2)
|
||||
|
||||
load = Loding_Data_Root(Labels, Trainig_Root, "")
|
||||
Data_Root = load.get_Image_data_roots(Trainig_Root)
|
||||
|
||||
# 將資料做成Dict的資料型態
|
||||
Prepare.Set_Final_Dict_Data(Labels, Data_Root, [[], []], 2)
|
||||
Final_Dict_Data = Prepare.Get_Final_Data_Dict()
|
||||
keys = list(Final_Dict_Data.keys())
|
||||
|
||||
training_data = Merge.merge_all_image_data(Final_Dict_Data[keys[0]], Final_Dict_Data[keys[1]]) # 將訓練資料合併成一個list
|
||||
|
||||
Image = read.Data_Augmentation_Image(training_data)
|
||||
plt.imshow(Image[0])
|
||||
plt.show()
|
||||
|
||||
52
README.md
Normal file
52
README.md
Normal file
@@ -0,0 +1,52 @@
|
||||
main.py: 主程式檔
|
||||
|
||||
## load_process
|
||||
### 負責讀取影像檔案、分割獨立資料(測試、驗證)、讀取獨立資料、一般檔案的操作
|
||||
File_Process : 檔案操作的主程式,包含開檔、創立檔案、判斷檔案是否存在等都是他負責的範圍。是一般物件也是LoadData的父物件
|
||||
LoadData : 讀檔主程式,一切讀檔動作由他開始。繼承File_Process(子物件)
|
||||
Cutting_Indepentend_Image : 讀取獨立資料(testing、Validation)的物件
|
||||
|
||||
## Image_Process
|
||||
### 負責進行資料擴增、影像處理等的操作
|
||||
* Generator_Content : 負責建立基礎Generator項目,為Image_Generator的父類別
|
||||
* Image_Generator : 負責製造資料擴增的資料,並將資料存到檔案中。繼承Generator_Content(子物件)
|
||||
* image_enhancement : 負責進行影像處理並將資料回傳
|
||||
|
||||
## Model_Tools
|
||||
### 負責進行模型的基礎架構,包含Convolution、Dense、以及其他模型的配件
|
||||
* All_Model_Tools : 所有模型的附加工具,是所有的父類別
|
||||
|
||||
## CNN
|
||||
### 包含所有CNN的工具與應用架構
|
||||
* CNN_Tools : 為卷積層的工具,包含一維、二維、三維捲積。CNN_Application的父類別,繼承All_Model_Tools(子類別)
|
||||
* CNN_Application : 為Convolution的應用架構。繼承CNN_Tools(子類別)
|
||||
|
||||
## Dense
|
||||
### 包含所有Dense的應用
|
||||
* Dense_Application : 為全連階層工具,包含一般Dense layer與增加正則化之Dense layer。繼承All_Model_Tools()
|
||||
|
||||
## Model_Construction
|
||||
### 包含所有要進行實驗的模型架構
|
||||
* Model_Constructions : 所有模型的實驗架構
|
||||
|
||||
## Data_Merge
|
||||
### 負責進行資料的合併
|
||||
* Merge : 負責合併Dict、List到List並匯出
|
||||
|
||||
## initalization
|
||||
### 負責初始化特定物件
|
||||
* Img_initalization : 針對影像資料的初始化
|
||||
* Data_Initalization : 針對數據資料的初始化
|
||||
|
||||
## Validation_Program
|
||||
### 負責驗證程式碼內的資料型態或輸入錯誤等問題
|
||||
* Validation : 驗證程式碼錯誤
|
||||
|
||||
## Draw
|
||||
### 負責畫圖的工具
|
||||
* Draw_Tools : 畫出混淆矩陣、走勢圖的工具
|
||||
* Grad_CAM : 畫出模型可視化的熱力圖的工具
|
||||
|
||||
## Experiment
|
||||
### 執行實驗的主程式
|
||||
* Experiment : 負責執行讀檔、設定模型Compile的細節、執行訓練、驗證結果等功能
|
||||
75
Read_and_process_image/ReadAndProcess.py
Normal file
75
Read_and_process_image/ReadAndProcess.py
Normal file
@@ -0,0 +1,75 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
class Read_image_and_Process_image:
|
||||
def __init__(self) -> None:
|
||||
pass
|
||||
def get_data(self, path):
|
||||
'''讀檔'''
|
||||
img_size = 512 # 縮小後的影像
|
||||
try:
|
||||
img_arr = cv2.imread(path, cv2.IMREAD_COLOR) # 讀檔(彩色)
|
||||
# img_arr = cv2.imread(path, cv2.IMREAD_GRAYSCALE) # 讀檔(灰階)
|
||||
resized_arr = cv2.resize(img_arr, (img_size, img_size)) # 濤整圖片大小
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
return resized_arr
|
||||
|
||||
def Data_Augmentation_Image(self, path):
|
||||
resized_arr = []
|
||||
|
||||
for p in path:
|
||||
img_size = 512 # 縮小後的影像
|
||||
try:
|
||||
img_arr = cv2.imread(p, cv2.IMREAD_COLOR) # 讀檔(彩色)
|
||||
# img_arr = cv2.imread(path, cv2.IMREAD_GRAYSCALE) # 讀檔(灰階)
|
||||
resized_arr.append(cv2.resize(img_arr, (img_size, img_size))) # 濤整圖片大小
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
return np.array(resized_arr)
|
||||
|
||||
def image_data_processing(self, data, label):
|
||||
'''讀檔後處理圖片'''
|
||||
img_size = 512
|
||||
data = np.asarray(data).astype(np.float32) # 將圖list轉成np.array
|
||||
data = data.reshape(-1, img_size, img_size, 3) # 更改陣列形狀
|
||||
label = np.array(label) # 將label從list型態轉成 numpy array
|
||||
return data, label
|
||||
|
||||
def normalization(self, images):
|
||||
imgs = []
|
||||
for img in images:
|
||||
img = np.asarray(img).astype(np.float32) # 將圖list轉成np.array
|
||||
img = img / 255 # 標準化影像資料
|
||||
imgs.append(img)
|
||||
|
||||
return np.array(imgs)
|
||||
|
||||
# def load_numpy_data(self, file_names):
|
||||
# '''載入numpy圖檔,並執行影像處理提高特徵擷取'''
|
||||
# i = 0
|
||||
# numpy_image = []
|
||||
# original_image = []
|
||||
# for file_name in file_names:
|
||||
# compare = str(file_name).split(".")
|
||||
# if compare[-1] == "npy":
|
||||
# image = np.load(file_name) # 讀圖片檔
|
||||
# numpy_image.append(image) # 合併成一個陣列
|
||||
# else:
|
||||
# original_image.append(file_name)
|
||||
|
||||
# original_image = self.get_data(original_image)
|
||||
|
||||
# for file in original_image:
|
||||
# numpy_image.append(file)
|
||||
|
||||
# return numpy_image
|
||||
|
||||
def make_label_list(self, length, content):
|
||||
'''製作label的列表'''
|
||||
label_list = []
|
||||
for i in range(length):
|
||||
label_list.append(content)
|
||||
return label_list
|
||||
0
Read_and_process_image/__init__.py
Normal file
0
Read_and_process_image/__init__.py
Normal file
Binary file not shown.
Binary file not shown.
BIN
Read_and_process_image/__pycache__/ReadAndProcess.cpython-39.pyc
Normal file
BIN
Read_and_process_image/__pycache__/ReadAndProcess.cpython-39.pyc
Normal file
Binary file not shown.
BIN
Read_and_process_image/__pycache__/__init__.cpython-310.pyc
Normal file
BIN
Read_and_process_image/__pycache__/__init__.cpython-310.pyc
Normal file
Binary file not shown.
BIN
Read_and_process_image/__pycache__/__init__.cpython-311.pyc
Normal file
BIN
Read_and_process_image/__pycache__/__init__.cpython-311.pyc
Normal file
Binary file not shown.
BIN
Read_and_process_image/__pycache__/__init__.cpython-39.pyc
Normal file
BIN
Read_and_process_image/__pycache__/__init__.cpython-39.pyc
Normal file
Binary file not shown.
29
SCP_Process/Scp_Process.py
Normal file
29
SCP_Process/Scp_Process.py
Normal file
@@ -0,0 +1,29 @@
|
||||
import paramiko
|
||||
from scp import SCPClient
|
||||
import os
|
||||
from Load_process.file_processing import Process_File
|
||||
|
||||
class SCP():
|
||||
def __init__(self) -> None:
|
||||
pass
|
||||
def createSSHClient(self, server, port, user, password):
|
||||
client = paramiko.SSHClient()
|
||||
client.load_system_host_keys()
|
||||
client.set_missing_host_key_policy(paramiko.AutoAddPolicy)
|
||||
client.connect(server, port, user, password)
|
||||
|
||||
return client
|
||||
|
||||
def Process_Main(self, Remote_Save_Root, Local_Save_Root, File_Name):
|
||||
Process_File_Tool = Process_File()
|
||||
|
||||
ssh = self.createSSHClient("10.1.29.28", 31931, "root", "whitekirin")
|
||||
|
||||
Process_File_Tool.JudgeRoot_MakeDir(Local_Save_Root)
|
||||
|
||||
with SCPClient(ssh.get_transport()) as scp:
|
||||
scp.get(Remote_Save_Root, Local_Save_Root + "/" + File_Name)
|
||||
|
||||
os.remove(Remote_Save_Root + "/" + File_Name, Local_Save_Root)
|
||||
|
||||
print("傳輸成功\n")
|
||||
BIN
SCP_Process/__pycache__/Scp_Process.cpython-310.pyc
Normal file
BIN
SCP_Process/__pycache__/Scp_Process.cpython-310.pyc
Normal file
Binary file not shown.
BIN
To_load_Input_Data/__pycache__/read_input_data.cpython-39.pyc
Normal file
BIN
To_load_Input_Data/__pycache__/read_input_data.cpython-39.pyc
Normal file
Binary file not shown.
98
To_load_Input_Data/read_input_data.py
Normal file
98
To_load_Input_Data/read_input_data.py
Normal file
@@ -0,0 +1,98 @@
|
||||
import tensorflow as tf
|
||||
import os
|
||||
|
||||
class read_Input_Data:
|
||||
def __init__(self) -> None:
|
||||
pass
|
||||
def save_tfrecords(self, images, label):
|
||||
'''將資料儲存為TFRecord數據'''
|
||||
image_width, image_height = 64, 64
|
||||
image_channel = 3
|
||||
tfrecod_data_root = "../../Dataset/tfrecode_Dataset/tfrecod_data.tfrecords"
|
||||
if not os.path.exists(tfrecod_data_root):
|
||||
os.makedirs(tfrecod_data_root)
|
||||
|
||||
TFWriter = tf.python_io.TFRecordWriter(tfrecod_data_root)
|
||||
|
||||
try:
|
||||
for i in range(len(images)):
|
||||
if images[i] is None:
|
||||
print('Error image:' + images[i])
|
||||
else:
|
||||
#圖片轉為字串
|
||||
image_raw = str(images[i])
|
||||
|
||||
|
||||
# 將 tf.train.Feature 合併成 tf.train.Features
|
||||
train_feature = tf.train.Features(feature={
|
||||
'Label' : self.int64_feature(label),
|
||||
'image_raw' : self.bytes_feature(image_raw),
|
||||
'channel' : self.int64_feature(image_channel),
|
||||
'width' : self.int64_feature(image_width),
|
||||
'height' : self.int64_feature(image_height)}
|
||||
)
|
||||
|
||||
# 將 tf.train.Features 轉成 tf.train.Example
|
||||
train_example = tf.train.Example(features = train_feature)
|
||||
|
||||
# 將 tf.train.Example 寫成 tfRecord 格式
|
||||
TFWriter.write(train_example.SerializeToString())
|
||||
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
TFWriter.close()
|
||||
print('Transform done!')
|
||||
|
||||
return tfrecod_data_root
|
||||
|
||||
# 轉Bytes資料為 tf.train.Feature 格式
|
||||
def int64_feature(self, value):
|
||||
if not isinstance(value, list):
|
||||
value = [value]
|
||||
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
|
||||
|
||||
def bytes_feature(self, value):
|
||||
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
|
||||
|
||||
def Data_Decompile(self, example):
|
||||
'''反編譯TFR數據'''
|
||||
feature_description = {
|
||||
'data': tf.io.FixedLenFeature([], tf.string),
|
||||
'label': tf.io.FixedLenFeature([], tf.float32),
|
||||
}
|
||||
parsed_example = tf.io.parse_single_example(example, features=feature_description)
|
||||
|
||||
x_sample = tf.io.parse_tensor(parsed_example['data'], tf.float32)
|
||||
y_sample = parsed_example['label']
|
||||
|
||||
return x_sample, y_sample
|
||||
|
||||
def load_dataset(self, filepaths):
|
||||
'''
|
||||
載入TFR數據集
|
||||
* dataset.shuffle(shuffle_buffer_size):
|
||||
隨機打亂此數據集的元素。
|
||||
|
||||
該數據集用 buffer_size 元素填充緩衝區,然後從該緩衝區中隨機採樣元素,用新元素替換所選元素。
|
||||
為了完美改組,需要緩衝區大小大於或等於數據集的完整大小。
|
||||
|
||||
例如,如果您的數據集包含 10,000 個元素,但 buffer_size 設置為 1,000,
|
||||
則 shuffle 最初只會從緩衝區的前 1,000 個元素中選擇一個隨機元素。
|
||||
一旦選擇了一個元素,它在緩衝區中的空間將被下一個(即第 1,001 個)元素替換,從而保持 1,000 個元素的緩衝區。
|
||||
'''
|
||||
shuffle_buffer_size = 700
|
||||
batch_size = 128
|
||||
tfrecod_data_root = "../../Dataset/tfrecode_Dataset"
|
||||
|
||||
dataset = tf.data.TFRecordDataset(filepaths)
|
||||
dataset = dataset.shuffle(shuffle_buffer_size)
|
||||
dataset = dataset.map(map_func=self.Data_Decompile, num_parallel_calls= 8)
|
||||
dataset = dataset.batch(batch_size).prefetch(64)
|
||||
|
||||
# 產生文件名隊列
|
||||
filename_queue = tf.train.string_input_producer([filename],
|
||||
shuffle=True,
|
||||
num_epochs=3)
|
||||
|
||||
return dataset
|
||||
96
Training_Tools/Tools.py
Normal file
96
Training_Tools/Tools.py
Normal file
@@ -0,0 +1,96 @@
|
||||
import pandas as pd
|
||||
from sklearn.preprocessing import OneHotEncoder
|
||||
from torch.nn import functional
|
||||
|
||||
class Tool:
|
||||
def __init__(self) -> None:
|
||||
self.__ICG_Training_Root = ""
|
||||
self.__Normal_Training_Root = ""
|
||||
self.__Comprehensive_Training_Root = ""
|
||||
|
||||
self.__ICG_Test_Data_Root = ""
|
||||
self.__Normal_Test_Data_Root = ""
|
||||
self.__Comprehensive_Testing_Root = ""
|
||||
|
||||
self.__ICG_Validation_Data_Root = ""
|
||||
self.__Normal_Validation_Data_Root = ""
|
||||
self.__Comprehensive_Validation_Root = ""
|
||||
|
||||
self.__ICG_ImageGenerator_Data_Root = ""
|
||||
self.__Normal_ImageGenerator_Data_Root = ""
|
||||
self.__Comprehensive_Generator_Root = ""
|
||||
|
||||
self.Training_Zip = ""
|
||||
self.Validation_Zip = ""
|
||||
self.Testing_Zip = ""
|
||||
|
||||
self.__Labels = []
|
||||
self.__OneHot_Encording = []
|
||||
pass
|
||||
|
||||
def Set_Labels(self):
|
||||
self.__Labels = ["stomach_cancer_Crop", "Normal_Crop"]
|
||||
|
||||
def Set_Save_Roots(self):
|
||||
self.__ICG_Training_Root = "../Dataset/Training/CA_ICG"
|
||||
self.__Normal_Training_Root = "../Dataset/Training/CA"
|
||||
self.__Comprehensive_Training_Root = "../Dataset/Training/Mixed"
|
||||
|
||||
self.__ICG_Test_Data_Root = "../Dataset/Training/CA_ICG_TestData"
|
||||
self.__Normal_Test_Data_Root = "../Dataset/Training/Normal_TestData"
|
||||
self.__Comprehensive_Testing_Root = "../Dataset/Training/Comprehensive_TestData"
|
||||
|
||||
self.__ICG_Validation_Data_Root = "../Dataset/Training/CA_ICG_ValidationData"
|
||||
self.__Normal_Validation_Data_Root = "../Dataset/Training/Normal_ValidationData"
|
||||
self.__Comprehensive_Validation_Root = "../Dataset/Training/Comprehensive_ValidationData"
|
||||
|
||||
self.__ICG_ImageGenerator_Data_Root = "../Dataset/Training/ICG_ImageGenerator"
|
||||
self.__Normal_ImageGenerator_Data_Root = "../Dataset/Training/Normal_ImageGenerator"
|
||||
self.__Comprehensive_Generator_Root = "../Dataset/Training/Comprehensive_ImageGenerator"
|
||||
|
||||
def Set_OneHotEncording(self, content, Number_Of_Classes):
|
||||
OneHot_labels = functional.one_hot(content, Number_Of_Classes)
|
||||
return OneHot_labels
|
||||
|
||||
def Set_Zips(self, Datas, Labels, Address_Name):
|
||||
if Address_Name == "Training":
|
||||
self.Training_Zip = zip(Datas, Labels)
|
||||
if Address_Name == "Validation":
|
||||
self.Validation_Zip = zip(Datas, Labels)
|
||||
if Address_Name == "Testing":
|
||||
self.Testing_Zip = zip(Datas, Labels)
|
||||
|
||||
def Get_Data_Label(self):
|
||||
'''
|
||||
取得所需資料的Labels
|
||||
'''
|
||||
return self.__Labels
|
||||
|
||||
def Get_Save_Roots(self, choose):
|
||||
'''回傳結果為Train, test, validation
|
||||
choose = 1 => 取ICG Label
|
||||
else => 取Normal Label
|
||||
|
||||
若choose != 1 || choose != 2 => 會回傳四個結果
|
||||
'''
|
||||
if choose == 1:
|
||||
return self.__ICG_Training_Root, self.__ICG_Test_Data_Root, self.__ICG_Validation_Data_Root
|
||||
if choose == 2:
|
||||
return self.__Normal_Training_Root, self.__Normal_Test_Data_Root, self.__Normal_Validation_Data_Root
|
||||
else:
|
||||
return self.__Comprehensive_Training_Root, self.__Comprehensive_Testing_Root, self.__Comprehensive_Validation_Root
|
||||
|
||||
def Get_Generator_Save_Roots(self, choose):
|
||||
'''回傳結果為Train, test, validation'''
|
||||
if choose == 1:
|
||||
return self.__ICG_ImageGenerator_Data_Root
|
||||
if choose == 2:
|
||||
return self.__Normal_ImageGenerator_Data_Root
|
||||
else:
|
||||
return self.__Comprehensive_Generator_Root
|
||||
|
||||
def Get_OneHot_Encording_Label(self):
|
||||
return self.__OneHot_Encording
|
||||
|
||||
def Get_Zip(self):
|
||||
return self.Training_Zip, self.Testing_Zip, self.Validation_Zip
|
||||
0
Training_Tools/__init__.py
Normal file
0
Training_Tools/__init__.py
Normal file
BIN
Training_Tools/__pycache__/Tools.cpython-310.pyc
Normal file
BIN
Training_Tools/__pycache__/Tools.cpython-310.pyc
Normal file
Binary file not shown.
BIN
Training_Tools/__pycache__/__init__.cpython-310.pyc
Normal file
BIN
Training_Tools/__pycache__/__init__.cpython-310.pyc
Normal file
Binary file not shown.
14
_validation/ValidationTheEnterData.py
Normal file
14
_validation/ValidationTheEnterData.py
Normal file
@@ -0,0 +1,14 @@
|
||||
class validation_the_enter_data:
|
||||
def __init__(self) -> None:
|
||||
pass
|
||||
def validation_string(self, content, Comparison):
|
||||
if content == Comparison:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def validation_type(self, enter, Type: type):
|
||||
if not isinstance(enter, Type):
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
BIN
_validation/__pycache__/ValidationTheEnterData.cpython-310.pyc
Normal file
BIN
_validation/__pycache__/ValidationTheEnterData.cpython-310.pyc
Normal file
Binary file not shown.
BIN
_validation/__pycache__/ValidationTheEnterData.cpython-311.pyc
Normal file
BIN
_validation/__pycache__/ValidationTheEnterData.cpython-311.pyc
Normal file
Binary file not shown.
BIN
_validation/__pycache__/ValidationTheEnterData.cpython-39.pyc
Normal file
BIN
_validation/__pycache__/ValidationTheEnterData.cpython-39.pyc
Normal file
Binary file not shown.
0
all_models_tools/__init__.py
Normal file
0
all_models_tools/__init__.py
Normal file
BIN
all_models_tools/__pycache__/__init__.cpython-310.pyc
Normal file
BIN
all_models_tools/__pycache__/__init__.cpython-310.pyc
Normal file
Binary file not shown.
BIN
all_models_tools/__pycache__/__init__.cpython-311.pyc
Normal file
BIN
all_models_tools/__pycache__/__init__.cpython-311.pyc
Normal file
Binary file not shown.
BIN
all_models_tools/__pycache__/__init__.cpython-39.pyc
Normal file
BIN
all_models_tools/__pycache__/__init__.cpython-39.pyc
Normal file
Binary file not shown.
BIN
all_models_tools/__pycache__/all_model_tools.cpython-310.pyc
Normal file
BIN
all_models_tools/__pycache__/all_model_tools.cpython-310.pyc
Normal file
Binary file not shown.
BIN
all_models_tools/__pycache__/all_model_tools.cpython-311.pyc
Normal file
BIN
all_models_tools/__pycache__/all_model_tools.cpython-311.pyc
Normal file
Binary file not shown.
BIN
all_models_tools/__pycache__/all_model_tools.cpython-39.pyc
Normal file
BIN
all_models_tools/__pycache__/all_model_tools.cpython-39.pyc
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
68
all_models_tools/all_model_tools.py
Normal file
68
all_models_tools/all_model_tools.py
Normal file
@@ -0,0 +1,68 @@
|
||||
from Load_process.file_processing import Process_File
|
||||
import datetime
|
||||
import torch
|
||||
|
||||
# def attention_block(input):
|
||||
# channel = input.shape[-1]
|
||||
|
||||
# GAP = GlobalAveragePooling2D()(input)
|
||||
|
||||
# block = Dense(units = channel // 16, activation = "relu")(GAP)
|
||||
# block = Dense(units = channel, activation = "sigmoid")(block)
|
||||
# block = Reshape((1, 1, channel))(block)
|
||||
|
||||
# block = Multiply()([input, block])
|
||||
|
||||
# return block
|
||||
|
||||
class EarlyStopping:
|
||||
def __init__(self, patience=74, verbose=False, delta=0):
|
||||
self.patience = patience
|
||||
self.verbose = verbose
|
||||
self.delta = delta
|
||||
self.counter = 0
|
||||
self.best_loss = None
|
||||
self.early_stop = False
|
||||
|
||||
def __call__(self, val_loss, model, save_path):
|
||||
if self.best_loss is None:
|
||||
self.best_loss = val_loss
|
||||
self.save_checkpoint(val_loss, model, save_path)
|
||||
elif val_loss > self.best_loss + self.delta:
|
||||
self.counter += 1
|
||||
if self.verbose:
|
||||
print(f"EarlyStopping counter: {self.counter} out of {self.patience}")
|
||||
if self.counter >= self.patience:
|
||||
self.early_stop = True
|
||||
else:
|
||||
self.best_loss = val_loss
|
||||
self.save_checkpoint(val_loss, model, save_path)
|
||||
self.counter = 0
|
||||
|
||||
def save_checkpoint(self, val_loss, model, save_path):
|
||||
torch.save(model.state_dict(), save_path)
|
||||
if self.verbose:
|
||||
print(f"Validation loss decreased ({self.best_loss:.6f} --> {val_loss:.6f}). Saving model to {save_path}")
|
||||
|
||||
|
||||
def call_back(model_name, index, optimizer):
|
||||
File = Process_File()
|
||||
|
||||
model_dir = '../Result/save_the_best_model/' + model_name
|
||||
File.JudgeRoot_MakeDir(model_dir)
|
||||
modelfiles = File.Make_Save_Root('best_model( ' + str(datetime.date.today()) + " )-" + str(index) + ".weights.h5", model_dir)
|
||||
|
||||
# model_mckp = ModelCheckpoint(modelfiles, monitor='val_loss', save_best_only=True, save_weights_only = True, mode='auto')
|
||||
|
||||
earlystop = EarlyStopping(patience=74, verbose=True) # 提早停止
|
||||
|
||||
reduce_lr = torch.optim.lr_scheduler.ReduceLROnPlateau(
|
||||
optimizer,
|
||||
factor = 0.94, # 學習率降低的量。 new_lr = lr * factor
|
||||
patience = 2, # 沒有改進的時期數,之後學習率將降低
|
||||
verbose = 0,
|
||||
mode = 'min',
|
||||
min_lr = 0 # 學習率下限
|
||||
)
|
||||
|
||||
return modelfiles, earlystop, reduce_lr
|
||||
116
all_models_tools/pre_train_model_construction.py
Normal file
116
all_models_tools/pre_train_model_construction.py
Normal file
@@ -0,0 +1,116 @@
|
||||
from all_models_tools.all_model_tools import attention_block
|
||||
from keras.activations import softmax, sigmoid
|
||||
from keras.applications import VGG16,VGG19, ResNet50, ResNet50V2, ResNet101, ResNet101V2, ResNet152, ResNet152V2, InceptionV3, InceptionResNetV2, MobileNet, MobileNetV2, DenseNet121, NASNetLarge, Xception
|
||||
from keras.layers import GlobalAveragePooling2D, Dense, Flatten
|
||||
from keras import regularizers
|
||||
from keras.layers import Add
|
||||
from application.Xception_indepentment import Xception_indepentment
|
||||
|
||||
def Original_VGG19_Model():
|
||||
vgg19 = VGG19(include_top = False, weights = "imagenet", input_shape = (200, 200, 3))
|
||||
GAP = GlobalAveragePooling2D()(vgg19.output)
|
||||
dense = Dense(units = 4096, activation = "relu")(GAP)
|
||||
dense = Dense(units = 4096, activation = "relu")(dense)
|
||||
output = Dense(units = 2, activation = "softmax")(dense)
|
||||
|
||||
return vgg19.input, output
|
||||
|
||||
def Original_ResNet50_model():
|
||||
xception = ResNet50(include_top = False, weights = "imagenet", input_shape = (200, 200, 3))
|
||||
GAP = GlobalAveragePooling2D()(xception.output)
|
||||
dense = Dense(units = 2, activation = "softmax")(GAP)
|
||||
|
||||
return xception.input, dense
|
||||
|
||||
def Original_NASNetLarge_model():
|
||||
nasnetlarge = NASNetLarge(include_top = False, weights = "imagenet", input_shape = (200, 200, 3))
|
||||
GAP = GlobalAveragePooling2D()(nasnetlarge.output)
|
||||
dense = Dense(units = 2, activation = "softmax")(GAP)
|
||||
|
||||
return nasnetlarge.input, dense
|
||||
|
||||
def Original_DenseNet121_model():
|
||||
Densenet201 = DenseNet121(include_top = False, weights = "imagenet", input_shape = (200, 200, 3))
|
||||
GAP = GlobalAveragePooling2D()(Densenet201.output)
|
||||
dense = Dense(units = 2, activation = "softmax")(GAP)
|
||||
|
||||
return Densenet201.input, dense
|
||||
|
||||
def Original_Xception_model():
|
||||
xception = Xception(include_top = False, weights = "imagenet", input_shape = (200, 200, 3))
|
||||
GAP = GlobalAveragePooling2D()(xception.output)
|
||||
dense = Dense(units = 2, activation = "softmax")(GAP)
|
||||
|
||||
return xception.input, dense
|
||||
|
||||
def Original_VGG16_Model():
|
||||
vgg16 = VGG16(include_top = False, weights = "imagenet", input_shape = (200, 200, 3))
|
||||
flatten = Flatten()(vgg16.output)
|
||||
dense = Dense(units = 4096, activation = "relu")(flatten)
|
||||
dense = Dense(units = 4096, activation = "relu")(dense)
|
||||
output = Dense(units = 2, activation = "softmax")(dense)
|
||||
|
||||
return vgg16.input, output
|
||||
|
||||
def Original_ResNet50v2_model():
|
||||
resnet50v2 = ResNet50V2(include_top = False, weights = "imagenet", input_shape = (200, 200, 3))
|
||||
GAP = GlobalAveragePooling2D()(resnet50v2.output)
|
||||
dense = Dense(units = 2, activation = "softmax")(GAP)
|
||||
|
||||
return resnet50v2.input, dense
|
||||
|
||||
def Original_ResNet101_model():
|
||||
resnet101 = ResNet101(include_top = False, weights = "imagenet", input_shape = (200, 200, 3))
|
||||
GAP = GlobalAveragePooling2D()(resnet101.output)
|
||||
dense = Dense(units = 2, activation = "softmax")(GAP)
|
||||
|
||||
return resnet101.input, dense
|
||||
|
||||
def Original_ResNet101V2_model():
|
||||
resnet101v2 = ResNet101V2(include_top = False, weights = "imagenet", input_shape = (512, 512, 3))
|
||||
GAP = GlobalAveragePooling2D()(resnet101v2.output)
|
||||
dense = Dense(units = 2, activation = "softmax")(GAP)
|
||||
|
||||
return resnet101v2.input, dense
|
||||
|
||||
def Original_ResNet152_model():
|
||||
resnet152 = ResNet152(include_top = False, weights = "imagenet", input_shape = (200, 200, 3))
|
||||
GAP = GlobalAveragePooling2D()(resnet152.output)
|
||||
dense = Dense(units = 2, activation = "softmax")(GAP)
|
||||
|
||||
return resnet152.input, dense
|
||||
|
||||
def Original_ResNet152V2_model():
|
||||
resnet152v2 = ResNet152V2(include_top = False, weights = "imagenet", input_shape = (200, 200, 3))
|
||||
GAP = GlobalAveragePooling2D()(resnet152v2.output)
|
||||
dense = Dense(units = 2, activation = "softmax")(GAP)
|
||||
|
||||
return resnet152v2.input, dense
|
||||
|
||||
def Original_InceptionV3_model():
|
||||
inceptionv3 = InceptionV3(include_top = False, weights = "imagenet", input_shape = (200, 200, 3))
|
||||
GAP = GlobalAveragePooling2D()(inceptionv3.output)
|
||||
dense = Dense(units = 2, activation = "softmax")(GAP)
|
||||
|
||||
return inceptionv3.input, dense
|
||||
|
||||
def Original_InceptionResNetV2_model():
|
||||
inceptionResnetv2 = InceptionResNetV2(include_top = False, weights = "imagenet", input_shape = (200, 200, 3))
|
||||
GAP = GlobalAveragePooling2D()(inceptionResnetv2.output)
|
||||
dense = Dense(units = 2, activation = "softmax")(GAP)
|
||||
|
||||
return inceptionResnetv2.input, dense
|
||||
|
||||
def Original_MobileNet_model():
|
||||
mobilenet = MobileNet(include_top = False, weights = "imagenet", input_shape = (200, 200, 3))
|
||||
GAP = GlobalAveragePooling2D()(mobilenet.output)
|
||||
dense = Dense(units = 2, activation = "softmax")(GAP)
|
||||
|
||||
return mobilenet.input, dense
|
||||
|
||||
def Original_MobileNetV2_model():
|
||||
mobilenetv2 = MobileNetV2(include_top = False, weights = "imagenet", input_shape = (200, 200, 3))
|
||||
GAP = GlobalAveragePooling2D()(mobilenetv2.output)
|
||||
dense = Dense(units = 2, activation = "softmax")(GAP)
|
||||
|
||||
return mobilenetv2.input, dense
|
||||
0
all_models_tools/pytorch_Model.py
Normal file
0
all_models_tools/pytorch_Model.py
Normal file
256
application/Xception_indepentment.py
Normal file
256
application/Xception_indepentment.py
Normal file
@@ -0,0 +1,256 @@
|
||||
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
# SENet
|
||||
# block = layers.GlobalAveragePooling2D()(residual)
|
||||
# block = layers.Dense(units = residual.shape[-1] // 16, activation = "relu")(block)
|
||||
# block = layers.Dense(units = residual.shape[-1], activation = "sigmoid")(block)
|
||||
# block = Reshape((1, 1, residual.shape[-1]))(block)
|
||||
# residual = Multiply()([residual, block])
|
||||
|
||||
|
||||
|
||||
from keras import backend
|
||||
from keras import layers
|
||||
from keras.layers import Reshape, Multiply, Conv1D
|
||||
import math
|
||||
|
||||
def Xception_indepentment(input_shape=None):
|
||||
|
||||
channel_axis = 1 if backend.image_data_format() == "channels_first" else -1
|
||||
|
||||
img_input = layers.Input(shape=input_shape)
|
||||
x = layers.Conv2D(
|
||||
32, (3, 3), strides=(2, 2), use_bias=False, name="block1_conv1"
|
||||
)(img_input)
|
||||
x = layers.BatchNormalization(axis=channel_axis, name="block1_conv1_bn")(x)
|
||||
x = layers.Activation("relu", name="block1_conv1_act")(x
|
||||
)
|
||||
x = layers.Conv2D(64, (3, 3), use_bias=False, name="block1_conv2")(x)
|
||||
x = layers.BatchNormalization(axis=channel_axis, name="block1_conv2_bn")(x)
|
||||
x = layers.Activation("relu", name="block1_conv2_act")(x)
|
||||
|
||||
residual = layers.Conv2D(
|
||||
128, (1, 1), strides=(2, 2), padding="same", use_bias=False
|
||||
)(x)
|
||||
residual = layers.BatchNormalization(axis=channel_axis)(residual)
|
||||
|
||||
# 注意力機制區域
|
||||
kernel = int(abs((math.log(residual.shape[-1], 2) + 1) / 2))
|
||||
if kernel % 2:
|
||||
kernel_size = kernel
|
||||
else:
|
||||
kernel_size = kernel + 1
|
||||
|
||||
block = layers.GlobalAveragePooling2D()(residual)
|
||||
block = Reshape(target_shape = (residual.shape[-1], 1))(block)
|
||||
block = Conv1D(filters = 1, kernel_size = kernel_size, padding = "same", use_bias = False, activation = "sigmoid")(block)
|
||||
block = Reshape((1, 1, residual.shape[-1]))(block)
|
||||
residual = Multiply()([residual, block])
|
||||
|
||||
x = layers.SeparableConv2D(
|
||||
128, (3, 3), padding="same", use_bias=False, name="block2_sepconv1"
|
||||
)(x)
|
||||
x = layers.BatchNormalization(axis=channel_axis, name="block2_sepconv1_bn")(
|
||||
x
|
||||
)
|
||||
x = layers.Activation("relu", name="block2_sepconv2_act")(x)
|
||||
|
||||
x = layers.SeparableConv2D(
|
||||
128, (3, 3), padding="same", use_bias=False, name="block2_sepconv2"
|
||||
)(x)
|
||||
x = layers.BatchNormalization(axis=channel_axis, name="block2_sepconv2_bn")(
|
||||
x
|
||||
)
|
||||
|
||||
x = layers.MaxPooling2D(
|
||||
(3, 3), strides=(2, 2), padding="same", name="block2_pool"
|
||||
)(x)
|
||||
x = layers.add([x, residual])
|
||||
|
||||
residual = layers.Conv2D(
|
||||
256, (1, 1), strides=(2, 2), padding="same", use_bias=False
|
||||
)(x)
|
||||
residual = layers.BatchNormalization(axis=channel_axis)(residual)
|
||||
|
||||
# 注意力機制區域
|
||||
kernel = int(abs((math.log(residual.shape[-1], 2) + 1) / 2))
|
||||
if kernel % 2:
|
||||
kernel_size = kernel
|
||||
else:
|
||||
kernel_size = kernel + 1
|
||||
|
||||
block = layers.GlobalAveragePooling2D()(residual)
|
||||
block = Reshape(target_shape = (residual.shape[-1], 1))(block)
|
||||
block = Conv1D(filters = 1, kernel_size = kernel_size, padding = "same", use_bias = False, activation = "sigmoid")(block)
|
||||
block = Reshape((1, 1, residual.shape[-1]))(block)
|
||||
residual = Multiply()([residual, block])
|
||||
|
||||
x = layers.Activation("relu", name="block3_sepconv1_act")(x)
|
||||
x = layers.SeparableConv2D(
|
||||
256, (3, 3), padding="same", use_bias=False, name="block3_sepconv1"
|
||||
)(x)
|
||||
x = layers.BatchNormalization(axis=channel_axis, name="block3_sepconv1_bn")(
|
||||
x
|
||||
)
|
||||
x = layers.Activation("relu", name="block3_sepconv2_act")(x)
|
||||
|
||||
x = layers.SeparableConv2D(
|
||||
256, (3, 3), padding="same", use_bias=False, name="block3_sepconv2"
|
||||
)(x)
|
||||
x = layers.BatchNormalization(axis=channel_axis, name="block3_sepconv2_bn")(x)
|
||||
|
||||
x = layers.MaxPooling2D(
|
||||
(3, 3), strides=(2, 2), padding="same", name="block3_pool"
|
||||
)(x)
|
||||
x = layers.add([x, residual])
|
||||
|
||||
residual = layers.Conv2D(
|
||||
728, (1, 1), strides=(2, 2), padding="same", use_bias=False
|
||||
)(x)
|
||||
residual = layers.BatchNormalization(axis=channel_axis)(residual)
|
||||
|
||||
# 注意力機制區域
|
||||
kernel = int(abs((math.log(residual.shape[-1], 2) + 1) / 2))
|
||||
if kernel % 2:
|
||||
kernel_size = kernel
|
||||
else:
|
||||
kernel_size = kernel + 1
|
||||
|
||||
block = layers.GlobalAveragePooling2D()(residual)
|
||||
block = Reshape(target_shape = (residual.shape[-1], 1))(block)
|
||||
block = Conv1D(filters = 1, kernel_size = kernel_size, padding = "same", use_bias = False, activation = "sigmoid")(block)
|
||||
block = Reshape((1, 1, residual.shape[-1]))(block)
|
||||
residual = Multiply()([residual, block])
|
||||
|
||||
x = layers.Activation("relu", name="block4_sepconv1_act")(x)
|
||||
x = layers.SeparableConv2D(
|
||||
728, (3, 3), padding="same", use_bias=False, name="block4_sepconv1"
|
||||
)(x)
|
||||
x = layers.BatchNormalization(axis=channel_axis, name="block4_sepconv1_bn")(
|
||||
x
|
||||
)
|
||||
x = layers.Activation("relu", name="block4_sepconv2_act")(x)
|
||||
|
||||
x = layers.SeparableConv2D(
|
||||
728, (3, 3), padding="same", use_bias=False, name="block4_sepconv2"
|
||||
)(x)
|
||||
x = layers.BatchNormalization(axis=channel_axis, name="block4_sepconv2_bn")(
|
||||
x
|
||||
)
|
||||
|
||||
x = layers.MaxPooling2D(
|
||||
(3, 3), strides=(2, 2), padding="same", name="block4_pool"
|
||||
)(x)
|
||||
x = layers.add([x, residual])
|
||||
|
||||
for i in range(8):
|
||||
residual = x
|
||||
prefix = "block" + str(i + 5)
|
||||
|
||||
x = layers.Activation("relu", name=prefix + "_sepconv1_act")(x)
|
||||
x = layers.SeparableConv2D(
|
||||
728,
|
||||
(3, 3),
|
||||
padding="same",
|
||||
use_bias=False,
|
||||
name=prefix + "_sepconv1",
|
||||
)(x)
|
||||
x = layers.BatchNormalization(
|
||||
axis=channel_axis, name=prefix + "_sepconv1_bn"
|
||||
)(x)
|
||||
x = layers.Activation("relu", name=prefix + "_sepconv2_act")(x)
|
||||
|
||||
x = layers.SeparableConv2D(
|
||||
728,
|
||||
(3, 3),
|
||||
padding="same",
|
||||
use_bias=False,
|
||||
name=prefix + "_sepconv2",
|
||||
)(x)
|
||||
x = layers.BatchNormalization(
|
||||
axis=channel_axis, name=prefix + "_sepconv2_bn"
|
||||
)(x)
|
||||
x = layers.Activation("relu", name=prefix + "_sepconv3_act")(x)
|
||||
|
||||
x = layers.SeparableConv2D(
|
||||
728,
|
||||
(3, 3),
|
||||
padding="same",
|
||||
use_bias=False,
|
||||
name=prefix + "_sepconv3",
|
||||
)(x)
|
||||
x = layers.BatchNormalization(
|
||||
axis=channel_axis, name=prefix + "_sepconv3_bn"
|
||||
)(x)
|
||||
|
||||
x = layers.add([x, residual])
|
||||
|
||||
residual = layers.Conv2D(
|
||||
1024, (1, 1), strides=(2, 2), padding="same", use_bias=False
|
||||
)(x)
|
||||
residual = layers.BatchNormalization(axis=channel_axis)(residual)
|
||||
|
||||
# 注意力機制區域
|
||||
kernel = int(abs((math.log(residual.shape[-1], 2) + 1) / 2))
|
||||
if kernel % 2:
|
||||
kernel_size = kernel
|
||||
else:
|
||||
kernel_size = kernel + 1
|
||||
|
||||
block = layers.GlobalAveragePooling2D()(residual)
|
||||
block = Reshape(target_shape = (residual.shape[-1], 1))(block)
|
||||
block = Conv1D(filters = 1, kernel_size = kernel_size, padding = "same", use_bias = False, activation = "sigmoid")(block)
|
||||
block = Reshape((1, 1, residual.shape[-1]))(block)
|
||||
residual = Multiply()([residual, block])
|
||||
|
||||
x = layers.Activation("relu", name="block13_sepconv1_act")(x)
|
||||
x = layers.SeparableConv2D(
|
||||
728, (3, 3), padding="same", use_bias=False, name="block13_sepconv1"
|
||||
)(x)
|
||||
x = layers.BatchNormalization(
|
||||
axis=channel_axis, name="block13_sepconv1_bn"
|
||||
)(x)
|
||||
x = layers.Activation("relu", name="block13_sepconv2_act")(x)
|
||||
|
||||
x = layers.SeparableConv2D(
|
||||
1024, (3, 3), padding="same", use_bias=False, name="block13_sepconv2"
|
||||
)(x)
|
||||
x = layers.BatchNormalization(
|
||||
axis=channel_axis, name="block13_sepconv2_bn"
|
||||
)(x)
|
||||
|
||||
x = layers.MaxPooling2D(
|
||||
(3, 3), strides=(2, 2), padding="same", name="block13_pool"
|
||||
)(x)
|
||||
x = layers.add([x, residual])
|
||||
|
||||
x = layers.SeparableConv2D(
|
||||
1536, (3, 3), padding="same", use_bias=False, name="block14_sepconv1"
|
||||
)(x)
|
||||
x = layers.BatchNormalization(
|
||||
axis=channel_axis, name="block14_sepconv1_bn"
|
||||
)(x)
|
||||
x = layers.Activation("relu", name="block14_sepconv1_act")(x)
|
||||
|
||||
x = layers.SeparableConv2D(
|
||||
2048, (3, 3), padding="same", use_bias=False, name="block14_sepconv2"
|
||||
)(x)
|
||||
x = layers.BatchNormalization(
|
||||
axis=channel_axis, name="block14_sepconv2_bn"
|
||||
)(x)
|
||||
x = layers.Activation("relu", name="block14_sepconv2_act")(x)
|
||||
|
||||
return img_input, block
|
||||
BIN
application/__pycache__/Xception.cpython-39.pyc
Normal file
BIN
application/__pycache__/Xception.cpython-39.pyc
Normal file
Binary file not shown.
BIN
application/__pycache__/Xception_indepentment.cpython-310.pyc
Normal file
BIN
application/__pycache__/Xception_indepentment.cpython-310.pyc
Normal file
Binary file not shown.
BIN
application/__pycache__/Xception_indepentment.cpython-311.pyc
Normal file
BIN
application/__pycache__/Xception_indepentment.cpython-311.pyc
Normal file
Binary file not shown.
BIN
application/__pycache__/Xception_indepentment.cpython-39.pyc
Normal file
BIN
application/__pycache__/Xception_indepentment.cpython-39.pyc
Normal file
Binary file not shown.
BIN
best_model( 2023-10-17 )-2.h5
Normal file
BIN
best_model( 2023-10-17 )-2.h5
Normal file
Binary file not shown.
85
claculate_output_data.py
Normal file
85
claculate_output_data.py
Normal file
@@ -0,0 +1,85 @@
|
||||
import csv
|
||||
import numpy as np
|
||||
|
||||
judge = input("是否需要移動? (Y/N)")
|
||||
|
||||
if judge == 'y' or judge == 'Y':
|
||||
times = int(input("輸入要移動幾天: "))
|
||||
|
||||
for i in range(times):
|
||||
date = input("輸入被移動的日期: ")
|
||||
dateroot = "../Model_training_result/save_the_train_result(2024-" + date + ")/train_result.csv"
|
||||
quantity_data = int(input("輸入要取出的資料筆數: "))
|
||||
|
||||
next_date = input("移動到哪一天? ")
|
||||
|
||||
with open(dateroot, "r", newline = '') as csvFile:
|
||||
data = csv.reader(csvFile)
|
||||
data = list(data)
|
||||
|
||||
with open("../Model_training_result/save_the_train_result(2024-" + next_date + ")/train_result.csv", "a+", newline = '') as csvFile1:
|
||||
writer = csv.writer(csvFile1)
|
||||
for i in range((quantity_data * 2 + 1) * -1 + 1, 0, 1):
|
||||
writer.writerow(data[i])
|
||||
print("Data has been moved finish\n")
|
||||
|
||||
|
||||
|
||||
date = input("輸入計算的日期: ")
|
||||
with open("../Model_training_result/save_the_train_result(2024-" + date + ")/train_result.csv", newline = '') as csvfile:
|
||||
rows = csv.reader(csvfile)
|
||||
|
||||
row = list(rows)
|
||||
|
||||
calcalate_loss = 0
|
||||
calculate_precision = 0
|
||||
calculate_recall = 0
|
||||
calculate_accuracy = 0
|
||||
calculate_f = 0
|
||||
calculate_auc = 0
|
||||
|
||||
list_loss = []
|
||||
list_precision = []
|
||||
list_recall = []
|
||||
list_accuracy = []
|
||||
list_f = []
|
||||
list_auc = []
|
||||
|
||||
for i in range(-1, -10, -2):
|
||||
calcalate_loss += float(row[i][2])
|
||||
list_loss.append(float(row[i][2]))
|
||||
|
||||
precision = str(row[i][3]).split("%")
|
||||
calculate_precision += float(precision[0])
|
||||
list_precision.append(float(precision[0]))
|
||||
|
||||
|
||||
recall = str(row[i][4]).split("%")
|
||||
calculate_recall += float(recall[0])
|
||||
list_recall.append(float(recall[0]))
|
||||
|
||||
accuracy = str(row[i][5]).split("%")
|
||||
calculate_accuracy += float(accuracy[0])
|
||||
list_accuracy.append(float(accuracy[0]))
|
||||
|
||||
f = str(row[i][6]).split("%")
|
||||
calculate_f += float(f[0])
|
||||
list_f.append(float(f[0]))
|
||||
|
||||
auc = str(row[i][7]).split("%")
|
||||
calculate_auc += float(auc[0])
|
||||
list_auc.append(float(auc[0]))
|
||||
|
||||
calculate_list = [calcalate_loss, calculate_precision, calculate_recall, calculate_accuracy, calculate_f, calculate_auc]
|
||||
average = []
|
||||
for i in range(len(calculate_list)):
|
||||
average.append((calculate_list[i] / 5))
|
||||
|
||||
std_list = [list_precision, list_recall, list_accuracy, list_f, list_auc]
|
||||
standard = []
|
||||
standard.append(np.std(list_loss))
|
||||
for i in range(len(std_list)):
|
||||
standard.append((np.std(std_list[i]) / 100))
|
||||
|
||||
for i in range(len(average)):
|
||||
print("{:.2f}±{:.3f}".format(average[i], standard[i]))
|
||||
92
draw_tools/Grad_cam.py
Normal file
92
draw_tools/Grad_cam.py
Normal file
@@ -0,0 +1,92 @@
|
||||
from Load_process.file_processing import Process_File
|
||||
from keras.models import Model
|
||||
from matplotlib import pyplot as plt
|
||||
import cv2
|
||||
import numpy as np
|
||||
from keras import backend as K
|
||||
from keras.preprocessing import image
|
||||
import tensorflow as tf
|
||||
import datetime
|
||||
|
||||
class Grad_CAM:
|
||||
def __init__(self, Label, One_Hot, Experiment_Name, Layer_Name) -> None:
|
||||
self.experiment_name = Experiment_Name
|
||||
self.Layer_Name = Layer_Name
|
||||
self.Label = Label
|
||||
self.One_Hot_Label = One_Hot
|
||||
self.Save_File_Name = self.Convert_One_Hot_To_int()
|
||||
|
||||
pass
|
||||
|
||||
def process_main(self, model, index, images):
|
||||
for i in range(len(images)):
|
||||
array = np.expand_dims(images[i], axis=0) # 替圖片增加一個維度,代表他的數量
|
||||
heatmap = self.gradcam(array, model)
|
||||
self.plot_heatmap(heatmap, images[i], self.Save_File_Name[i], index, i)
|
||||
|
||||
pass
|
||||
|
||||
def Convert_One_Hot_To_int(self):
|
||||
return [np.argmax(Label)for Label in self.One_Hot_Label]
|
||||
|
||||
|
||||
def gradcam(self, Image, model, pred_index = None):
|
||||
# 首先,我們創建了一個模型,將輸入圖像映射為最後一個卷積層的激活值以及輸出的預測結果。
|
||||
grad_model = Model(
|
||||
[model.inputs], [model.get_layer(self.Layer_Name).output, model.output]
|
||||
)
|
||||
|
||||
# 然後,我們計算了對於輸入圖像而言預測類別的最高梯度,並相對於最後一個卷積層的激活值進行了計算。
|
||||
with tf.GradientTape() as tape: # 創建一個梯度紀錄器,並做前向傳播
|
||||
last_conv_layer_output, preds = grad_model(Image)
|
||||
if pred_index is None:
|
||||
pred_index = tf.argmax(preds[0])
|
||||
class_channel = preds[:, pred_index]
|
||||
|
||||
# 這是輸出神經元(預測的頂部或所選的)對於最後一個卷積層的輸出特徵圖的梯度。
|
||||
grads = tape.gradient(class_channel, last_conv_layer_output)
|
||||
|
||||
# 這是一個向量,其中每個項目是在特定特徵圖通道上梯度的平均強度。
|
||||
pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))
|
||||
|
||||
# 我們將特徵圖陣列中的每個通道乘以「這個通道相對於頂部預測類別的重要性」,然後將所有通道加總起來,以獲得熱圖類別激活。n
|
||||
last_conv_layer_output = last_conv_layer_output[0]
|
||||
heatmap = last_conv_layer_output @ pooled_grads[..., tf.newaxis]
|
||||
heatmap = tf.squeeze(heatmap)
|
||||
|
||||
# For visualization purpose, we will also normalize the heatmap between 0 & 1
|
||||
heatmap = tf.maximum(heatmap, 0) / tf.math.reduce_max(heatmap)
|
||||
return heatmap.numpy()
|
||||
|
||||
def plot_heatmap(self, heatmap, img, Label, index, Title):
|
||||
File = Process_File()
|
||||
|
||||
# ReLU
|
||||
heatmap = np.maximum(heatmap, 0)
|
||||
# 正規化
|
||||
heatmap /= np.max(heatmap)
|
||||
|
||||
# 讀取影像
|
||||
# img = cv2.imread(img)
|
||||
fig, ax = plt.subplots()
|
||||
# im = cv2.resize(cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB), (img.shape[1], img.shape[0]))
|
||||
|
||||
# 拉伸 heatmap
|
||||
img_path = cv2.resize(img, (512, 512))
|
||||
heatmap = cv2.resize(heatmap, (512, 512))
|
||||
heatmap = np.uint8(255 * heatmap)
|
||||
|
||||
img_path = cv2.cvtColor(img_path, cv2.COLOR_BGR2RGB)
|
||||
|
||||
# 以 0.6 透明度繪製原始影像
|
||||
ax.imshow(img_path, alpha=1)
|
||||
# 以 0.4 透明度繪製熱力圖
|
||||
ax.imshow(heatmap, cmap='jet', alpha=0.3)
|
||||
|
||||
save_root = '../Result/CNN_result_of_reading('+ str(datetime.date.today()) + " )/" + str(Label)
|
||||
File.JudgeRoot_MakeDir(save_root)
|
||||
save_root = File.Make_Save_Root(self.experiment_name + "-" + str(index) + "-" + str(Title) + ".png", save_root)
|
||||
# 存plt檔
|
||||
plt.savefig(save_root)
|
||||
plt.close("all") # 關閉圖表
|
||||
pass
|
||||
0
draw_tools/__init__.py
Normal file
0
draw_tools/__init__.py
Normal file
BIN
draw_tools/__pycache__/Grad_cam.cpython-310.pyc
Normal file
BIN
draw_tools/__pycache__/Grad_cam.cpython-310.pyc
Normal file
Binary file not shown.
BIN
draw_tools/__pycache__/Grad_cam.cpython-311.pyc
Normal file
BIN
draw_tools/__pycache__/Grad_cam.cpython-311.pyc
Normal file
Binary file not shown.
BIN
draw_tools/__pycache__/Grad_cam.cpython-39.pyc
Normal file
BIN
draw_tools/__pycache__/Grad_cam.cpython-39.pyc
Normal file
Binary file not shown.
BIN
draw_tools/__pycache__/__init__.cpython-310.pyc
Normal file
BIN
draw_tools/__pycache__/__init__.cpython-310.pyc
Normal file
Binary file not shown.
BIN
draw_tools/__pycache__/__init__.cpython-311.pyc
Normal file
BIN
draw_tools/__pycache__/__init__.cpython-311.pyc
Normal file
Binary file not shown.
BIN
draw_tools/__pycache__/__init__.cpython-39.pyc
Normal file
BIN
draw_tools/__pycache__/__init__.cpython-39.pyc
Normal file
Binary file not shown.
BIN
draw_tools/__pycache__/draw.cpython-310.pyc
Normal file
BIN
draw_tools/__pycache__/draw.cpython-310.pyc
Normal file
Binary file not shown.
BIN
draw_tools/__pycache__/draw.cpython-311.pyc
Normal file
BIN
draw_tools/__pycache__/draw.cpython-311.pyc
Normal file
Binary file not shown.
BIN
draw_tools/__pycache__/draw.cpython-39.pyc
Normal file
BIN
draw_tools/__pycache__/draw.cpython-39.pyc
Normal file
Binary file not shown.
77
draw_tools/draw.py
Normal file
77
draw_tools/draw.py
Normal file
@@ -0,0 +1,77 @@
|
||||
from matplotlib import pyplot as plt
|
||||
import seaborn as sns
|
||||
import datetime
|
||||
import matplotlib.figure as figure
|
||||
import matplotlib.backends.backend_agg as agg
|
||||
from Load_process.file_processing import Process_File
|
||||
|
||||
def plot_history(Epochs, Losses, Accuracys, file_name, model_name):
|
||||
File = Process_File()
|
||||
|
||||
plt.figure(figsize=(16,4))
|
||||
plt.subplot(1,2,1)
|
||||
plt.plot(range(1, Epochs + 1), Losses[0])
|
||||
plt.plot(range(1, Epochs + 1), Losses[1])
|
||||
plt.ylabel('Accuracy')
|
||||
plt.xlabel('epoch')
|
||||
plt.legend(['Train','Validation'], loc='upper left')
|
||||
plt.title('Model Accuracy')
|
||||
|
||||
plt.subplot(1,2,2)
|
||||
plt.plot(range(1, Epochs + 1), Accuracys[0])
|
||||
plt.plot(range(1, Epochs + 1), Accuracys[1])
|
||||
plt.ylabel('loss')
|
||||
plt.xlabel('epoch')
|
||||
plt.legend(['Train','Validation'], loc='upper left')
|
||||
plt.title('Model Loss')
|
||||
|
||||
model_dir = '../Result/save_the_train_image( ' + str(datetime.date.today()) + " )"
|
||||
File.JudgeRoot_MakeDir(model_dir)
|
||||
modelfiles = File.Make_Save_Root(str(model_name) + " " + str(file_name) + ".png", model_dir)
|
||||
plt.savefig(modelfiles)
|
||||
plt.close("all") # 關閉圖表
|
||||
|
||||
def draw_heatmap(matrix, model_name, index): # 二分類以上混淆矩陣做法
|
||||
File = Process_File()
|
||||
|
||||
# 创建热图
|
||||
fig = figure.Figure(figsize=(6, 4))
|
||||
canvas = agg.FigureCanvasAgg(fig)
|
||||
Ax = fig.add_subplot(111)
|
||||
sns.heatmap(matrix, square = True, annot = True, fmt = 'd', linecolor = 'white', cmap = "Purples", ax = Ax)#画热力图,cmap表示设定的颜色集
|
||||
|
||||
model_dir = '../Result/model_matrix_image ( ' + str(datetime.date.today()) + " )"
|
||||
File.JudgeRoot_MakeDir(model_dir)
|
||||
modelfiles = File.Make_Save_Root(str(model_name) + "-" + str(index) + ".png", model_dir)
|
||||
|
||||
# confusion.figure.savefig(modelfiles)
|
||||
# 设置图像参数
|
||||
Ax.set_title(str(model_name) + " confusion matrix")
|
||||
Ax.set_xlabel("X-Predict label of the model")
|
||||
Ax.set_ylabel("Y-True label of the model")
|
||||
|
||||
# 保存图像到文件中
|
||||
canvas.print_figure(modelfiles)
|
||||
|
||||
def Confusion_Matrix_of_Two_Classification(Model_Name, Matrix, index):
|
||||
File = Process_File()
|
||||
|
||||
fx = sns.heatmap(Matrix, annot=True, cmap='turbo')
|
||||
|
||||
# labels the title and x, y axis of plot
|
||||
fx.set_title('Plotting Confusion Matrix using Seaborn\n\n')
|
||||
fx.set_xlabel('answer Values ')
|
||||
fx.set_ylabel('Predicted Values')
|
||||
|
||||
# labels the boxes
|
||||
fx.xaxis.set_ticklabels(['False','True'])
|
||||
fx.yaxis.set_ticklabels(['False','True'])
|
||||
|
||||
model_dir = '../Result/model_matrix_image ( ' + str(datetime.date.today()) + " )"
|
||||
File.JudgeRoot_MakeDir(model_dir)
|
||||
modelfiles = File.Make_Save_Root(str(Model_Name) + "-" + str(index) + ".png", model_dir)
|
||||
|
||||
plt.savefig(modelfiles)
|
||||
plt.close("all") # 關閉圖表
|
||||
|
||||
pass
|
||||
131
experiments/Model_All_Step.py
Normal file
131
experiments/Model_All_Step.py
Normal file
@@ -0,0 +1,131 @@
|
||||
from tqdm import tqdm
|
||||
from torch.nn import functional
|
||||
import torch
|
||||
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
|
||||
from torchmetrics.functional import auroc
|
||||
import torch.optim as optim
|
||||
|
||||
from all_models_tools.all_model_tools import call_back
|
||||
from Model_Loss.Loss import Entropy_Loss
|
||||
|
||||
|
||||
class All_Step:
|
||||
def __init__(self, Training_Data_And_Label, Test_Data_And_Label, Validation_Data_And_Label, Model, Epoch, Number_Of_Classes):
|
||||
self.Training_Data_And_Label = Training_Data_And_Label
|
||||
self.Test_Data_And_Label = Test_Data_And_Label
|
||||
self.Validation_Data_And_Label = Validation_Data_And_Label
|
||||
|
||||
self.Model = Model
|
||||
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||||
|
||||
self.Epoch = Epoch
|
||||
self.Number_Of_Classes = Number_Of_Classes
|
||||
|
||||
pass
|
||||
|
||||
def Training_Step(self, model_name, counter):
|
||||
# 定義優化器,並設定 weight_decay 參數來加入 L2 正則化
|
||||
Optimizer = optim.SGD(self.Model.parameters(), lr=0.045, momentum = 0.9, weight_decay=0.1)
|
||||
model_path, early_stopping, scheduler = call_back(model_name, counter, Optimizer)
|
||||
|
||||
criterion = Entropy_Loss() # 使用自定義的損失函數
|
||||
train_losses = []
|
||||
val_losses = []
|
||||
train_accuracies = []
|
||||
val_accuracies = []
|
||||
|
||||
for epoch in range(self.Epoch):
|
||||
self.Model.train()
|
||||
running_loss = 0.0
|
||||
all_train_preds = []
|
||||
all_train_labels = []
|
||||
|
||||
epoch_iterator = tqdm(self.Training_Data_And_Label, desc= "Training (Epoch %d)" % epoch)
|
||||
|
||||
|
||||
for inputs, labels in epoch_iterator:
|
||||
# labels = np.reshape(labels, (int(labels.shape[0]), 1))
|
||||
inputs, OneHot_labels = inputs.to(self.device), OneHot_labels.to(self.device)
|
||||
# inputs, labels = inputs.cuda(), labels.cuda()
|
||||
|
||||
Optimizer.zero_grad()
|
||||
outputs = self.Model(inputs)
|
||||
loss = criterion(outputs, OneHot_labels)
|
||||
loss.backward()
|
||||
Optimizer.step()
|
||||
running_loss += loss.item()
|
||||
|
||||
# 收集訓練預測和標籤
|
||||
_, preds = torch.max(outputs, 1)
|
||||
all_train_preds.extend(preds.cpu().numpy())
|
||||
all_train_labels.extend(labels.cpu().numpy())
|
||||
|
||||
Training_Loss = running_loss/len(self.Training_Data_And_Label)
|
||||
|
||||
# all_train_labels = torch.FloatTensor(all_train_labels)
|
||||
# all_train_labels = torch.argmax(all_train_labels, 1)
|
||||
train_accuracy = accuracy_score(all_train_labels, all_train_preds)
|
||||
|
||||
train_losses.append(Training_Loss)
|
||||
train_accuracies.append(train_accuracy)
|
||||
|
||||
print(f"Epoch [{epoch+1}/{self.epoch}], Loss: {Training_Loss:.4f}, Accuracy: {train_accuracy:0.2f}", end = ' ')
|
||||
|
||||
self.Model.eval()
|
||||
val_loss = 0.0
|
||||
all_val_preds = []
|
||||
all_val_labels = []
|
||||
|
||||
with torch.no_grad():
|
||||
for inputs, labels in self.Validation_Data_And_Label:
|
||||
inputs, OneHot_labels = inputs.to(self.device), labels.to(self.device)
|
||||
|
||||
outputs = self.Model(inputs)
|
||||
loss = criterion(outputs, OneHot_labels)
|
||||
val_loss += loss.item()
|
||||
|
||||
# 驗證預測與標籤
|
||||
_, preds = torch.max(outputs, 1)
|
||||
all_val_preds.extend(preds.cpu().numpy())
|
||||
all_val_labels.extend(labels.cpu().numpy())
|
||||
|
||||
# 計算驗證損失與準確率
|
||||
val_loss /= len(list(self.Validation_Data_And_Label))
|
||||
val_accuracy = accuracy_score(all_val_labels, all_val_preds)
|
||||
|
||||
val_losses.append(val_loss)
|
||||
val_accuracies.append(val_accuracy)
|
||||
print(f"Epoch [{epoch+1}/{self.epoch}], Loss: {val_loss:.4f}, Accuracy: {val_accuracy:0.2f}")
|
||||
|
||||
early_stopping(val_loss, self.Model, model_path)
|
||||
if early_stopping.early_stop:
|
||||
print("Early stopping triggered. Training stopped.")
|
||||
break
|
||||
|
||||
# 學習率調整
|
||||
scheduler.step(val_loss)
|
||||
|
||||
return train_losses, val_losses, train_accuracies, val_accuracies
|
||||
|
||||
def Evaluate_Model(self, cnn_model):
|
||||
# 測試模型
|
||||
cnn_model.eval()
|
||||
True_Label, Predict_Label = [], []
|
||||
loss = 0.0
|
||||
with torch.no_grad():
|
||||
for images, labels in self.Test_Data_And_Label:
|
||||
images, OneHot_labels = images.to(self.device), OneHot_labels.to(self.device)
|
||||
|
||||
outputs = cnn_model(images)
|
||||
_, predicted = torch.max(outputs, 1)
|
||||
Predict_Label.extend(predicted.cpu().numpy())
|
||||
True_Label.extend(labels.cpu().numpy())
|
||||
|
||||
loss /= len(self.Test_Data_And_Label)
|
||||
|
||||
accuracy = accuracy_score(True_Label, Predict_Label)
|
||||
precision = precision_score(True_Label, Predict_Label)
|
||||
recall = recall_score(True_Label, Predict_Label)
|
||||
AUC = auroc(True_Label, Predict_Label, task = ["Stomatch_Cancer", "Normal"])
|
||||
f1 = f1_score(True_Label, Predict_Label)
|
||||
return loss, accuracy, precision, recall, AUC, f1, True_Label, Predict_Label
|
||||
0
experiments/__init__.py
Normal file
0
experiments/__init__.py
Normal file
BIN
experiments/__pycache__/__init__.cpython-310.pyc
Normal file
BIN
experiments/__pycache__/__init__.cpython-310.pyc
Normal file
Binary file not shown.
BIN
experiments/__pycache__/__init__.cpython-311.pyc
Normal file
BIN
experiments/__pycache__/__init__.cpython-311.pyc
Normal file
Binary file not shown.
BIN
experiments/__pycache__/__init__.cpython-39.pyc
Normal file
BIN
experiments/__pycache__/__init__.cpython-39.pyc
Normal file
Binary file not shown.
BIN
experiments/__pycache__/experiment.cpython-310.pyc
Normal file
BIN
experiments/__pycache__/experiment.cpython-310.pyc
Normal file
Binary file not shown.
BIN
experiments/__pycache__/experiment.cpython-311.pyc
Normal file
BIN
experiments/__pycache__/experiment.cpython-311.pyc
Normal file
Binary file not shown.
BIN
experiments/__pycache__/experiment.cpython-39.pyc
Normal file
BIN
experiments/__pycache__/experiment.cpython-39.pyc
Normal file
Binary file not shown.
BIN
experiments/__pycache__/model.cpython-39.pyc
Normal file
BIN
experiments/__pycache__/model.cpython-39.pyc
Normal file
Binary file not shown.
BIN
experiments/__pycache__/original_image_model.cpython-39.pyc
Normal file
BIN
experiments/__pycache__/original_image_model.cpython-39.pyc
Normal file
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user