From f78cc738fba5189d7652087d771f1fba1f161b45 Mon Sep 17 00:00:00 2001 From: whitekirin <113206109@gms.tcu.edu.tw> Date: Fri, 7 Mar 2025 18:35:32 +0000 Subject: [PATCH] 20250308 Commits: K-Fold has been finish, but sampler has some question to solve --- Load_process/Load_Indepentend.py | 4 +- .../Load_Indepentend.cpython-311.pyc | Bin 4616 -> 4298 bytes Training_Tools/PreProcess.py | 53 ++++ Training_Tools/Tools.py | 42 +-- .../__pycache__/PreProcess.cpython-311.pyc | Bin 0 -> 3430 bytes .../__pycache__/Tools.cpython-311.pyc | Bin 6619 -> 4464 bytes experiments/Model_All_Step.py | 176 ++++++----- .../Model_All_Step.cpython-311.pyc | Bin 11050 -> 11523 bytes .../__pycache__/experiment.cpython-311.pyc | Bin 8603 -> 8274 bytes .../__pycache__/pytorch_Model.cpython-311.pyc | Bin 2001 -> 2372 bytes experiments/experiment.py | 17 +- experiments/pytorch_Model.py | 54 ++-- experiments/topic_model.py | 298 ------------------ main.py | 22 +- test.ipynb | 32 +- 15 files changed, 217 insertions(+), 481 deletions(-) create mode 100644 Training_Tools/PreProcess.py create mode 100644 Training_Tools/__pycache__/PreProcess.cpython-311.pyc delete mode 100644 experiments/topic_model.py diff --git a/Load_process/Load_Indepentend.py b/Load_process/Load_Indepentend.py index 157a6c6..538d8e5 100644 --- a/Load_process/Load_Indepentend.py +++ b/Load_process/Load_Indepentend.py @@ -21,8 +21,8 @@ class Load_Indepentend_Data(): self.test, self.test_label = self.get_Independent_image(Test_data_root) print("\ntest_labels有" + str(len(self.test_label)) + "筆資料\n") - self.validation, self.validation_label = self.get_Independent_image(Validation_data_root) - print("validation_labels有 " + str(len(self.validation_label)) + " 筆資料\n") + # self.validation, self.validation_label = self.get_Independent_image(Validation_data_root) + # print("validation_labels有 " + str(len(self.validation_label)) + " 筆資料\n") def get_Independent_image(self, independent_DataRoot): image_processing = Read_image_and_Process_image(123) diff --git a/Load_process/__pycache__/Load_Indepentend.cpython-311.pyc b/Load_process/__pycache__/Load_Indepentend.cpython-311.pyc index 6cc885100eb7be9592e84cddd4e014fe9f67845e..dbab7c84ed817b2c6d84d6159cd8b599213a4a65 100644 GIT binary patch delta 188 zcmeBBIi<+EoR^o20SNlEPpALi$lJxF@XKDmJijQrxF9h(RX?C8KfNe1H$I@UB)=#* zBi=8yeDZdtNlcpv0q&6Y| delta 391 zcmX@5*rCF^oR^o20SFfP?oI!_k++LU;j0aht6!d9lwDkqn4GE~P?Voul$fiatMFn; z`-{bUCvRt(#1zLk*_(O8WPfH!OQvOv3=FG*7y@FL7#M08;@Mz)Ae$Y=E`hTc7*d!g zFJzXU9L>f#`6#pE6^o#lZEVj@uO-w~HF?S2Wx&^0}YTyT})Dkt6a7 zN8|+<+N{cYkb_ZS^K)J%Ms69PJBx%sgu>)6e75|otWk^~7~te&2mTXMjv#3P5Wxo| VesS33=BJeAq}mm^O;!?E0RU~Lag6`~ diff --git a/Training_Tools/PreProcess.py b/Training_Tools/PreProcess.py new file mode 100644 index 0000000..5b0647d --- /dev/null +++ b/Training_Tools/PreProcess.py @@ -0,0 +1,53 @@ +from torch.utils.data import Dataset, DataLoader, RandomSampler +import torchvision.transforms as transforms +import torch + +class ListDataset(Dataset): + def __init__(self, data_list, labels_list, status): + self.data = data_list + self.labels = labels_list + self.status = status + + def __len__(self): + return len(self.data) + + def __getitem__(self, idx): + sample = self.data[idx] + + if self.status: + from Image_Process.Image_Generator import Image_generator + ImageGenerator = Image_generator("", "", 12) + Transform = ImageGenerator.Generator_Content(5) + sample = Transform(sample) + + label = self.labels[idx] + return sample, label + +class Training_Precesses: + def __init__(self, Training_Datas, Training_Labels, Testing_Datas, Testing_Labels): + self.Training_Datas = Training_Datas + self.Training_Labels = Training_Labels + self.Testing_Datas = Testing_Datas + self.Testing_Labels = Testing_Labels + pass + + def Total_Data_Combine_To_DataLoader(self, Batch_Size): + Training_Dataset = self.Convert_Data_To_DataSet(self.Training_Datas, self.Training_Labels) + Testing_Dataset = self.Convert_Data_To_DataSet(self.Testing_Datas, self.Testing_Labels) + + Training_DataLoader = DataLoader(dataset = Training_Dataset, batch_size = Batch_Size, num_workers = 0, pin_memory=True, shuffle = True) + Testing_DataLoader = DataLoader(dataset = Testing_Dataset, batch_size = 1, num_workers = 0, pin_memory=True, shuffle = True) + + return Training_DataLoader, Testing_DataLoader + + def Convert_Data_To_DataSet(self, Datas : list, Labels : list, status : bool = True): + seed = 42 # 設定任意整數作為種子 + # 產生隨機種子產生器 + generator = torch.Generator() + generator.manual_seed(seed) + + # 創建 Dataset + list_dataset = ListDataset(Datas, Labels, status) + # sampler = RandomSampler(list_dataset, generator = generator) # 創建Sampler + + return list_dataset diff --git a/Training_Tools/Tools.py b/Training_Tools/Tools.py index febafbe..f89d15e 100644 --- a/Training_Tools/Tools.py +++ b/Training_Tools/Tools.py @@ -1,30 +1,8 @@ import pandas as pd from torch.nn import functional import torch -from torch.utils.data import Dataset, DataLoader, RandomSampler -import torchvision.transforms as transforms -class ListDataset(Dataset): - def __init__(self, data_list, labels_list, status): - self.data = data_list - self.labels = labels_list - self.status = status - - def __len__(self): - return len(self.data) - - def __getitem__(self, idx): - sample = self.data[idx] - - if self.status: - from Image_Process.Image_Generator import Image_generator - ImageGenerator = Image_generator("", "", 12) - Transform = ImageGenerator.Generator_Content(5) - sample = Transform(sample) - - label = self.labels[idx] - return sample, label - + class Tool: def __init__(self) -> None: self.__ICG_Training_Root = "" @@ -84,8 +62,8 @@ class Tool: def Get_Save_Roots(self, choose): '''回傳結果為Train, test, validation - choose = 1 => 取ICG Label - else => 取Normal Label + choose = 1 => 取白光 Label + else => 取濾光 Label 若choose != 1 || choose != 2 => 會回傳四個結果 ''' @@ -106,16 +84,4 @@ class Tool: return self.__Comprehensive_Generator_Root def Get_OneHot_Encording_Label(self): - return self.__OneHot_Encording - - def Convert_Data_To_DataSet_And_Put_To_Dataloader(self, Datas : list, Labels : list, Batch_Size : int, status : bool = True): - seed = 42 # 設定任意整數作為種子 - # 產生隨機種子產生器 - generator = torch.Generator() - generator.manual_seed(seed) - - # 創建 Dataset - list_dataset = ListDataset(Datas, Labels, status) - # sampler = RandomSampler(list_dataset, generator = generator) # 創建Sampler - - return DataLoader(dataset = list_dataset, batch_size = Batch_Size, num_workers = 0, pin_memory=True, shuffle = True) \ No newline at end of file + return self.__OneHot_Encording \ No newline at end of file diff --git a/Training_Tools/__pycache__/PreProcess.cpython-311.pyc b/Training_Tools/__pycache__/PreProcess.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..adbd1ed42392fd772f9565a7adc3a071e5844488 GIT binary patch literal 3430 zcmb7G%}*Og6rWx1uGcnTAP@+~goXs-d_W}YDKOxI|eXMh(Qb?ODrKJSYk@F0;zz2ePUL!rU3B6Y1($UJ2F4PA@P|h)Gg{K~kb2JS3@rAp(`qrj%?*u#U_k z*E6K$VuDuMwC%w=_nE3`AapcszHQGo2W!0wt`q~Z zNXl*TqVyOv|-|~VCbI1kpy$j7}3C3C*znFxUr+&jlQ4>M%fM!0A5S`(xK(yrJkbl zDiVD(Rv}VopxkiTt^b$frKxtd9DUxh(K%M?94lTe zxA#2$Zu#y;dw;3Dzxeg5=C-BC@_4Daw@OI>M%hWU$)-)?` z%w>^p(6pcD^sFbLEnp;VM{)$nGr|Y+ol@L7C+G|NOCXE>uObHIc!l^REXX|-;u9r| z(*4An9Vbr1=NIh2QVm5PjAXEb6zfd(p5lY1$6PEl=K5YkJ~@%pplJ z#L$ySP9ZrB#74_^0mnTwAM>a8cj!ks3~=@U5CB`xsiIU;W97qxSa+A>C$R4F`_+b` zv_G8S`SbH(z+gC8fqH-08%%)r15$m4!=SUWe=zjG82>DRmugDyjSo0@X{!Z7hJazq z5dR3Q!pW@R>F}~3)#*VX`-q+lUX*#*k-e~!gtQpM`GEv~SJL%w)}NjOB2%Iv~8=6Q>ODFe^T5hgfh$pEc_K; zZ3^eVTiDQ0|H2k9o~wa+l-xiE|o^Fy-1e6(Klk#rP#Eqc9y$(wz@_(yGGWX z(z&ZI&XlhGu+gQJy0l{Sb{DTT1*TfqlREKYhZ#aF+sjJ3b#s}7i z+?(IJ`m~$3LFEha5^DBbMoplg3Di})UPX_%M@QV~h^vlpa3`7U-8#j)Z9Ie2J$?{H z9PtHU;Ke5Zn*3Z&_9V?!kgvh#l?AyVulTsH^eHY=oLW>vsQ@?*yq9Q|f&$S? zoY=-70Iw#QKYCbbHyuSRJqMv$vrW_Bk+_=4=mi)-KSP4a65m^HMatBQ$gk(aceJ~|GisTp)o{PqS zV-niWq-l8aabsye%JCWGrKaP^A?bq8{t?Kc|Eq|y+*lz#X$s2cD#RzPVYwSqQ}i0oh$Vi}DI$v<jxAv?(*Hb|eSx+h|-l7!`J^9|WLF$j$-OtQ>JMYbW-^{z7N=MgI zRrL~RbKhT%wW)-BM5Ef=JA=*CgRb_BM9vVyb&nYCClVpg;M;B)*`ODQE`LQcty9N4 zI$g|{Cahwvkl&djx@yGcNiZJ~lVqqtGLlipU&-CF1Uc$?O!-Nj)z+FdnkZApZJUKr zo5J+y*5Txg#je>?xxASiWY!qVO=bsglq{AX&!){8?$DT~4qH|skHj-An2`JggZ4!>gCPi_+OzjK)H- zIz&AJszq1>LPPivenbF)Nm&T7Yte)nr~CLt^*U`{T~}r5FVnQ{ab%V&j+u_DU>5oN zhEC632-MxIiECPi5(dKw@*yqA2eb%1$W0BxxfVLW-M(Ib^shw*eDTI(;I(m|v!vo7 zzR=RG4NnV5TJWvVb)NE{Q@$)e|1`J27yTW|=d~9&T=8EyEtCpxX&=86xC-0+5V#-jfgkJ;KzIFFonG+%4qOhV=z0Do zcvR?1wC=|k%CH=}>Ik+I;WtAOUJA9xMlc6%4Ng%DzScT7Je{l=Y+YgsbZNbg`{e!o z5g4;}L=2HcbR&)+uwv{upiD$C!@4uM3QS}_=JSmYFB2+vRS?_raEh)t%2cjs`1l4>I*furs&MLgo^Yh{WYdnuFAn z_Y}NoFK3nUX&%bFd7xAy|ML@Hc`h>)|MiU+uQSES+v&Y6%z#>q7<1!LzW7_#ZhXI_%?s_9d;j8;+; zyveI_e$otLt(b@I&9EiH7}^3|s5p{1Jm2hx%*ZPWeOaB!WaW{ks4_(}h4`NMt>3o*^dOwdfOm@7_+6N-{oM;J}cylMtS5y~}DEO$1|Fj+4RU?=wgnIlGLPnEwAG&b*q zBy4QnhDjBEx1pM3omm2d>%rD!2r`$5)>Ky^rycGp=k!1ijX`17S$bg5n%NNl z1R2;iavD8BSuzV7*cdR~FL9Q&&w{mkU_&MR@opQQm#K+vrr_E~*+3Fue9n^II17&u_2#mcr2s zhgS(795f=2>+aX+8D0=SICZ1r#;L`ggSDQ6)s)da@Ok&a&$hTIatHdE8PT0Om9O4UM z?RsKqN5xtAJ5SKg-A+TM3C`L9#CQNSo&lP`01d!3?UVdK{ZasEKnem4N+F;jsY8m$ zVW|cFt#U+)$}N)tsqMUwZk5pNVkVE!Vfv`7m1r^La(f2~|HTF&xuB47jP^Bl{Yv9680cSCp;;- z6}UvJ+0Ecn6iI1Y#zoAV3hW#{&vab4?x^hE9 zQ+ETlklo!K*j!sHMtgB*_d^FcG>L7qI_J@#K((Sg-WmzbTe#W(@Dbow%mf& z?3{KzC~^h}fKK18Dqjo4je&bH?KAd_U^=waKY-~@V=#efzcHM|G_f=^jOk!q&KTK; z{P0q8FQ&VVefu#@8aBVwy%o;S7NfTxi+31-NR_uvDaS`(9F9kI19OQ?fgS(a?Nye2 zLJ$#CHKmZrPK()0F)LGXlqxf3*j{1AbR5rs)jeH8%Yfa-GB{ulf#$VqV}{0MO|%xY z9A!(?D7X(AsCb^jB6Uyb?8T{6;dhrA{E^Lo`61-2M{AOP3E1Rrn> zOz;EOz=QyB4NM3E*T94ja1Bfd1J}TW2yhN2Fu!j}mA597Bj|__Svn!q$mkISfb!;z z>&ja;D*s;YtSpuLHmb7Ji4(9d*ww~DbzG?n4Q;5&jW2cC#D=nL5@N%DS>GFqu0sVT zx_3jZt`V$JKCn>>gi<#ElHJ+K>I%L|>;6?#VRVA1X^$RLi) zdYXfB&G9n=@uoW(b#q6_i(RwciswqRcF<`(6|Y;GBNw_g#}^4X?RfC|m{)Ur59Azg zu6XV`HW)?Y3laR+=?neW`kV-u0z{-!GTj7YG<}(w8M!E#0tCw0Y10Q0uBuRq^EdrU zQ5L5ajeZBX1aC#W{;U-SoBmOy1OYKMxt#OWWyP~Gv;ph5aZa0Zw_^xvYMub)b!+J8 ztopxb-~0=8;lR%-^OeQ+{#tuK1c5<}1cTeFM~q19ozz>Yg?)>Wo?4_wXYX=H*ZlCp zD~lb2wT?kh&^uh~z5m8@A7?)q{A>R2`NiHRYrRjRIo#<~!4rmA0|wddZy$(uxDq-@ z$cotG@#S~kUH-?Vn{UpoT%No2;Tt#K{pFXxI&U2fjtqSSTCDetW3Vm=71l{&`-Z#l zBs5ep_L?KB0n*la!3n=zUu!s8HsYT{20yYT?qz0ywA1zF*7U6g<&KR`ddIPir>PBU zV-LtRtg`95i?wOCL~`1(E@o0ng=hPwRTd&_fgTLX4fte|k zc`TRlS>36qqW;a-ire0R#3oy46g~s;>I*=?7FtQ9gPJBoW2P_ws=l2m(5FC@J`E({U3=1;f>PHKcHS9csRiE8I9GV1A1V9jcvjd5MAKL%ntoSk7YCnfX8!Xnca9~oe&XP zqw}(zz5*GzzT{?w<|DANqgSE+I{d9?)>rY};jmkCnZuFhqYmx?J(aZ=Lw?N(KH;GC z!}XB|FOdGgUxkrgcSZ|VB&1e`U;ylnX9BJt{gM@gnxU82995X387`IzBHsDQR5e30 zxuRH*3krSJ^sCdQ$;rH&5GWouX3#MkGhE0N;qF3IWm&TBC$QT@fVnP3@1uByK);U! zKZEfWQkKl%RQ;mJjNo0QXb(t5Agm*qbsp}4o1-%_wXuIfVP^oIkw=P>_*6-Q4>VSk zXP30)``G^fUu_iV(?$v-+Fs?CJGacAT)cfh8iDrD13jMwdKRR`KpzBIdZ6zc zAL;J9^1ufJHwG5F4%NC2RU=T;w)M)`M`itmv_AE+{>o4EXk{^4sYNS#pu!9+;i0>s zC%9~$NJQx_$WrueI*6nL$)iXfLo$XWg=7NBvq+u?V)D3JaB)`DO#UVK*rXT<(tDBM zN0fC@#P%A=Hu$Sw0GV@stDcz9vP#@!h!dXS@VdyMzNnCak|x^g$94|s;PFPuA(4pi zQ;=_>+x)@-l($I-Ck$c_Hh~MNS&&Ei$x$oS$Dgw^lQ}!1owYJ|r`F7IzKIsoHv^aS z;KiO9$qcZtELkkFh|DtHHmj_pO)vq7HBN} 0 else 0 - eta = (total_samples - processed_samples) / iterations_per_second if iterations_per_second > 0 else 0 - time_str = f"{int(elapsed_time//60):02d}:{int(elapsed_time%60):02d}<{int(eta//60):02d}:{int(eta%60):02d}" - - # 計算當前批次的精確度(這裡需要根據你的具體需求調整) - batch_accuracy = (Output_Indexs.cpu().numpy() == True_Indexs).mean() - - # 更新進度條顯示 - epoch_iterator.set_description(f"Epoch [{epoch}/{self.Epoch}]") - epoch_iterator.set_postfix_str( - f"{processed_samples}/{total_samples} [{time_str}, {iterations_per_second:.2f}it/s, " + - f"acc={batch_accuracy:.3f}, loss={loss.item():.3f}, ]" - ) - - epoch_iterator.close() - - all_train_preds = Merge_Function.merge_data_main(all_train_preds, 0, len(all_train_preds)) - all_train_labels = Merge_Function.merge_data_main(all_train_labels, 0, len(all_train_labels)) - - Training_Loss = running_loss / len(self.Training_Data_And_Label) - train_accuracy = accuracy_score(all_train_labels, all_train_preds) - - train_losses.append(Training_Loss) - train_accuracies.append(train_accuracy) - - self.Model.eval() - val_loss = 0.0 - all_val_preds = [] - all_val_labels = [] - - with torch.no_grad(): - for inputs, labels in self.Validation_Data_And_Label: + for inputs, labels in epoch_iterator: inputs, labels = torch.as_tensor(inputs).to(self.device), torch.as_tensor(labels).to(self.device) + Optimizer.zero_grad() outputs = self.Model(inputs) loss = criterion(outputs, labels) - val_loss += loss.item() + loss.backward() + Optimizer.step() + running_loss += loss.item() # 收集訓練預測和標籤 Output_Values, Output_Indexs = torch.max(outputs, dim = 1) True_Indexs = np.argmax(labels.cpu().numpy(), 1) + + all_train_preds.append(Output_Indexs.cpu().numpy()) + all_train_labels.append(True_Indexs) - all_val_preds.append(Output_Indexs.cpu().numpy()) - all_val_labels.append(True_Indexs) + processed_samples += len(inputs) - val_loss /= len(self.Validation_Data_And_Label) - val_accuracy = accuracy_score(all_val_labels, all_val_preds) + # 計算當前進度 + progress = (processed_samples / total_samples) * 100 + + # 計算經過時間和剩餘時間 + elapsed_time = time.time() - start_time + iterations_per_second = processed_samples / elapsed_time if elapsed_time > 0 else 0 + eta = (total_samples - processed_samples) / iterations_per_second if iterations_per_second > 0 else 0 + time_str = f"{int(elapsed_time//60):02d}:{int(elapsed_time%60):02d}<{int(eta//60):02d}:{int(eta%60):02d}" - val_losses.append(val_loss) - val_accuracies.append(val_accuracy) - # print(f"Val_loss: {val_loss:.4f}, Val_accuracy: {val_accuracy:0.2f}\n") + # 計算當前批次的精確度(這裡需要根據你的具體需求調整) + batch_accuracy = (Output_Indexs.cpu().numpy() == True_Indexs).mean() - early_stopping(val_loss, self.Model, model_path) - if early_stopping.early_stop: - print("Early stopping triggered. Training stopped.") - Total_Epoch = epoch - break + # 更新進度條顯示 + epoch_iterator.set_description(f"Epoch [{epoch}/{self.Epoch}]") + epoch_iterator.set_postfix_str( + f"{processed_samples}/{total_samples} [{time_str}, {iterations_per_second:.2f}it/s, " + + f"acc={batch_accuracy:.3f}, loss={loss.item():.3f}, ]" + ) - # 學習率調整 - scheduler.step(val_loss) + epoch_iterator.close() + + all_train_preds = Merge_Function.merge_data_main(all_train_preds, 0, len(all_train_preds)) + all_train_labels = Merge_Function.merge_data_main(all_train_labels, 0, len(all_train_labels)) + + Training_Loss = running_loss / len(self.Training_DataLoader) + train_accuracy = accuracy_score(all_train_labels, all_train_preds) + + train_losses.append(Training_Loss) + train_accuracies.append(train_accuracy) + + self.Model.eval() + val_loss = 0.0 + all_val_preds = [] + all_val_labels = [] + + with torch.no_grad(): + for inputs, labels in val_subset: + inputs, labels = torch.as_tensor(inputs).to(self.device), torch.as_tensor(labels).to(self.device) + + outputs = self.Model(inputs) + loss = criterion(outputs, labels) + val_loss += loss.item() + + # 收集訓練預測和標籤 + Output_Values, Output_Indexs = torch.max(outputs, dim = 1) + True_Indexs = np.argmax(labels.cpu().numpy(), 1) + + all_val_preds.append(Output_Indexs.cpu().numpy()) + all_val_labels.append(True_Indexs) + + val_loss /= len(val_subset) + val_accuracy = accuracy_score(all_val_labels, all_val_preds) + + val_losses.append(val_loss) + val_accuracies.append(val_accuracy) + # print(f"Val_loss: {val_loss:.4f}, Val_accuracy: {val_accuracy:0.2f}\n") + + early_stopping(val_loss, self.Model, model_path) + if early_stopping.early_stop: + print("Early stopping triggered. Training stopped.") + Total_Epoch = epoch + break + + # 學習率調整 + scheduler.step(val_loss) return train_losses, val_losses, train_accuracies, val_accuracies, Total_Epoch @@ -148,7 +160,7 @@ class All_Step: loss = 0.0 with torch.no_grad(): - for images, labels in self.Test_Data_And_Label: + for images, labels in self.Test_Dataloader: images, labels = torch.tensor(images).to(self.device), torch.tensor(labels).to(self.device) outputs = cnn_model(images) @@ -163,13 +175,13 @@ class All_Step: Predict_Label_OneHot.append(torch.tensor(outputs, dtype = torch.float32).cpu().numpy()[0]) True_Label_OneHot.append(torch.tensor(labels, dtype = torch.int).cpu().numpy()[0]) - # 創建 GradCAM 實例 - Layers = cnn_model.base_model.body.conv4.pointwise - grad_cam = GradCAM(cnn_model, target_layer="base_model") - # 可視化 Grad-CAM - grad_cam.visualize(outputs, images, target_class = 3, File_Name = counter, model_name = self.Model_Name) + # # 創建 GradCAM 實例 + # Layers = cnn_model.base_model.body.conv4.pointwise + # grad_cam = GradCAM(cnn_model, target_layer="base_model") + # # 可視化 Grad-CAM + # grad_cam.visualize(outputs, images, target_class = 3, File_Name = counter, model_name = self.Model_Name) - loss /= len(self.Test_Data_And_Label) + loss /= len(self.Test_Dataloader) True_Label_OneHot = torch.tensor(True_Label_OneHot, dtype = torch.int) Predict_Label_OneHot = torch.tensor(Predict_Label_OneHot, dtype = torch.float32) diff --git a/experiments/__pycache__/Model_All_Step.cpython-311.pyc b/experiments/__pycache__/Model_All_Step.cpython-311.pyc index 1b9e8821d3f48c24d1e71fb7d1556057e5db41b3..2d95cf89375ecb7087df7196710ab3e4a53985e3 100644 GIT binary patch delta 5698 zcmcgwZ)_V!c3+a>AMx)}T=AboQKCpn6e-b>6w9(?MYa`7j{ZinY)i2~(;_WXCPihr zmMwL8O40jZ6lpp+(d~J_PNdx65uo9svPDQKJa@;Uv@t zr1{dmS&DS1T!MaShr2({%$xV-&6}CGZ|1Kq|H-WOsYav3Ao)-K(?YQEfz~YAebCfz zlSV6J%2}ld3t=H~R25Uts*m!TS?z=GHl3}ER>zF9Mxjy}HO0)c=9p#Ha@5E=YZYUI zSV;9ujYOErD+hv9a3mfK zg-K>gG-#3+IpMzoUJ1PasTMIm6&W(&irm;Y&f5;;V zLZT&5LNbL!sfsxaTa*>^lwh7hX|beBT7i0FK9=j4Gh&Ok)S9jUiIqiOS_-8SKP`ij ziuq%4&AK{onjpgyWPCoHNCeJDgNZ~q5kLda*I$ZL!KjcAoR7zDM3%yVOL3uGFiPHV zi3~=TB1;Qjt<+r#Cn%vJDwr-W9gT;=(Y%z3lk>OovQT&>G9M-lpjuwdNF*Io@_a=i z9KA_aqbljSAT@uBv_PJ;A*e%O2S}hX*B8>4z5cs#@^)f5I3M3La?m{tJZ!g5W0_bYgXGuM5F@1D+fuVlJc*t;~>o#eZdoZ=)R)~B7k<0SJR zl4~jrFbT8!&l+a3;@_G>vXDHa*s6R2mG74(EvcnY7e|T47AqN6YE%tiDcRg;QU?{x zoA%avSc%FczJMk5@K%MSNdsjnGQs?+m~Ev=Pf%#YM+rDJAj)K^4ylqxcuf>CMUkpa z>(HF5VKpt{N0rQPWELhT)iM)O={k#+TGA|_C26HxMOL%`7t!E^L|e)?MAG<;Ev?5| zny99t6p(5Bs?!EEv^^%E(W4cB|F^re#U78`fE~MGgmdh1JTAys7&J
    =19d9F=!Rm$w{&(%RiWC1(T;L6fTjk@!fffR z&NZ0rq=7cj)ob8XLb^4WhGgTh1vsHFuG7RC_=lvEc7~+41y4jufz657mPD{>U9_u2 zQP9=20kx-HCAym$ERF~2ogsV;EL*NuNdI0%sOoF&z-K*KD07uULql0Z_y1BLYKH`V zP2MAUvgw8WU_zC&Ez zm2o#AZZFJ*z*`MyqMHECU`Y#D!um)Ns-c@hwUD=!R#rfTx1yT5N76M=<#-P%@!P*B z>!rDN znYm@n4AbW=o4!`scWnCFXk}^oykDC>b%AEV=(giDaGz4z%RI&#I$Dbhgfvid#XR^L z`*9vxm(+-`B@-dWJFx(Y7s6yBuUw8S z1!Cb?oLnV+(2Qa94z2vv<@o%q`ZvB5`SYSV=0kl;g~V4s$L#6bdi;55e3^>G@{*~+ z0UK=OG#+C9)v$10O`#YrkT?o? zS0Lg$%2(Wkm^fcqphrUY@(LhE0w|{`Q=7R;{7&R&e&d8Mbu|I6OiVhMmFgC`5VrNo znWxpuKD{yBuu1KnWA%faevsD>-j_YAwr+HO|1?+a=BwQYSVUCmITVob8~fK!akbri zZTJ1rT#fDiNX}Ao|FviSQ#-i zIHNbUnr?n(ccnwTyKlFWv%kjMUrVX?Y>nvw-r2izi?a>#w!xJ0S$)f9?+(Ftzs`4G zW1}&4?K>?87&a^(7sFq}g!oXv=K`iQ2MVmI;{bE&jfV)B+t$GhPGDH(cczD9Y z5}q9MGizh3`Q6cnqYuX48BdMp2-~}3563vd$rDbNaOQ~m^uW`LKfd^*;m5-q;ZKb) z&#dRx8?!`9hG^M5llAsyyuF;akN5Uvy#pEV0O!5HdoOUrAWsak#9)qaW(jYG@NNS8 zDV{jR5~ubF*9V5p%Ri~vs^N%EmT(=&u!i>SQLg^%?i6PqN)7#9hB=!bcmJs8anH7u zYwSslnuF$zB9XO&Nsrsb8`WWV`hnclg!MJ>z%#{CNCKFhn$ z?!J+6Ph?$}Gp@^=>niWMni_+lR)aa)GekSvF}X7h0MG8FPi6}_K#rK=i7A$t+9Mo7 z%k}nLjgK|Q;Mq(T{&y9He|98zlo7}|^Vr6BSo2un*>QgC`K9L<-oNvfsvvVH5n0AX zhZ0G(@i_)?fZ!0p@0FO|p2eLR+{xi?9(S|2d*5*)yYA@EIQn<(?BvvE(^uJjMc%QP(&ZXo$~K=g@ct35ek7&YYw)ut`gi?Y!zAA@nbPj#wOPC|gEyu}IlP_6+gZFlhu3G<@um#k zw5jFrE*|e<@veQGct`oJ?xBvu8+g2d#T#-pbty%z-jUMe8X8kt*v9Y7JqWxLIKVXe z9szPKod=k@y5|tV_p}?bv~17Qwq@Tja-M$P)1UR6&v?%74tz4qc_w(z#AlON_{ku9 z;}$o$9^oe=*~w^TGRnr@aVk0aZ%kCZ=C z{#5;;I(y<==ES*m?!*9pVqiDOxrcc7(1vQ??8=&b8MAM5F5B9dY3<`$`}x-XZ0m(g z>jkcLh;JR@%olm{Mb>;VXLe`Jtr>Ic=55Y=nm3M*_K(fPFUd*k09NB&N+XK&qH+KF-Y5#BzMQt#u|EZ&g8 z8`8}{=5a5JdvkbQ7I$TE*smPk!Q&k)-T|9RZ%=7A?rjoV*6rDK4)5pjeirY~85($l z4_2(+x<^=8>mWxA@x%~I4CNe6so@-9Nxh1gt?z0d0y~E{^LR6hLvPLhwXdt$m}NcP zJAI$_u)52v;&K9=`2VGMpl7B+{@aR*8HMV%DiPp(={yZ|_-u{I7weBT}oU$|58r~N*FbaolT}w3r+fjmBl!}maOLpv-t}8p%{^2%w z*@0Gu?n@SlqFgw|O1BipPO*9v`Hd8td_&h&aLClsIygA$A$W$aZz>?7sT?EQfRStm z@QCu#5Vg7-CdJH4jVIUt9Mu`gtB8)i`{WD)h~==ntnm3K(K(B(L4dj^M-hx6Knj!N z2qpmX@|$o2Lv^&1ucNB72++ZfM3EGUE{aHWnkihc$RX!3koD+3AzwMa1R)Z-wIS!2 zX6KL#9Ypf7NG!MzPLTHy(<*{LLO>%(0x(aVO(Fw?0Y>!s|J>=5zX!F6XJV$-b*e^N zIWIcE5at|ixMzWdteF?VCeY0PJnoti!Lqw?P=o3YBrn<)I1UBA=K>aLz8!@fb6m&~ ziw~>R{DmYnz!R`eJv{E=6rTGP&uW=e)35MB^^i*Psj6Z~C;zlcG^As0G<#fVfsucT z;D-P|!AK{{I}o7j9|3S?yP0sQr5f;hMW|g9Nw2_|%l__3nBO%Ung4EQ1g0-Ko`P+d34LyCge#q^J|Y~3|$H3@q*MlR3w^c3&9QG zi)fVWN1X_x{Y#W9S>Q`WFX(wE8tx-M00O$}OuWpPTHau;x7=Iwv)CVaa+vBZ;ddY- V#iD7j&^P}b7HH4ErHP=-{{T8L>N5ZU delta 5263 zcmb^#YiwI*^26OoaEwBm?$`)vW@{kBm?FXPCfpo8RCtFu4^am50gdl+-Rg;i7-*xR0m+=yx z?>+Z?=R4o|9_O6ze8(T1{`ynKR}BU|0p)W){ptK4e3y+a_G!CG0X4E-Q$npSm{UOw_moRkSHlQmQC-8c z|1i>`rbbm1n@~U(L|<39y6UW)8pPGvISth6BDp50>Cqn*?tHW4JkG|G%gN>WnbCMQ zKJ(~uVrC+KmPtw0<4h*oNOV0FPbCxaY%;yv*rYz3PCz$xHqFj1NSXw5Avw#iG|W<> zl9`$Kg?KU*KbvAC^}&@iaF{30FQKB+eNro_GE8cYwE!z?#lVJv9Rn8ztr)lgWblIg z_sU-%hDzVFY4%)ZB|gjay^l@Lv+*T=kN>UfpLpvlKS4&-zK{v`N&vV-RHFOWRHC-M z654~m{z_~FeUt{b@MinLd^9=J zJdvZ}H7C?LOBPS~URl(m%W9|DnswGF1OY-Q5b4P0Y+1aRdj*qgc%q3jevwFM@#r-g z)OFX?Hyg|_XXosrL_u-p8>eArIY-W!_0)QR#o%Sjg>$?D_I9Igb48j!f7aQQ#G)B( zH94dIypyxB6WY?*t9{fCZO)l){AiZFs3@1he-sm=CZ-*1Z@%U~6CQU4c zYvX{AlV|q)w?2N~UH>;1S78@NFPmI_4c7lJTsvHM;_$Y0{ePV}@}5o{z3;?m%DmSc zZqQ>GpS_(I;k?L&x9Ks;MH=+j!Rf2Q?BNHjld85jI~rTy%u{n=6tV<%M{3$*t=Xv> z4u^%Su@7g=8AxK;$!Xv)1TzgkFkV6Z{M($4({s%n#Tn+*aP??K-?u#C|9wsF#U?qN z`cJ(}_DSTK7%8a|OlB5MS%c`3HHN-v?a6mY>hwxBxg;r%9U7GbtE69vL$GAB44Yx` z#el_EB^Iolkko8^d7hC&FGO>EJCIb+nURz!X1N-raj0amW+h!bGm~YOGig>*K;*~q zPSRau*z^np3wK|i@rdK5O3Y%Qc#$s$6 zny`NhDIIerBZDsqGf9|W9OR;#jz5tx^g_geZaEza)`NIwXOkC3%luq}$d;8Bd+XI* zFAWOzh-i;&63>vj@RkhGwUz_+hv?iEaVUe$eB_j2>~W`XW1P=D1B5AD9Wy5W4CK!+(LpgfSdJbjMGpwk5ivR<(2t4qV?6y>h4z=}Xo-%lgUnqby^E)J-Jt_t zJ+|(6`PB7O0{swA2R1c?r}vft_VzWa;2te#VPt>%ORJy%&`Td$pB8+33shy|{rsuZ zrHRu#Jo%1?HztIRF&>_?vv}zz7Wt*+(urmM#Bw3W!;G8*{Lm4>afF8_d%mE&?F|&W zZz+V}PQlwRdi#0zYW4X^Xe0Id&L5E#QXWDPxBvmMtJ%e;Y3_K5x?KG zkUz(#()Z3Qi?CG=1^*k20v3&GuDaCbSluN2%T1c$}o@N4c;aH1SI zS_&K$0>{O`@q!Wf&9I%lCAydIo7}Jifaf*a&swV(K!KhT=_#I`s?c7UGv8ZrcJQ_& zJnO^N@5Wg5%a5J8hsezp+t}6pybT)hb1U)v+v^^9Oq9Ygd>c2gb}=RqIYg> zs^aS@`-V%tVSfK4|Gpz{9D9mCIWHVr5RWYgzNF|&@>JWMwvKP~uV=-+CxxyFv1>wT zn^-%1+tbB&4{f9c&!p&?1h=7D%9O7}`HBXC>J_P8p6aboZDs0Ms6>T|lLFN*QvE#D ze}|%_R63V=@cWQ zn*?RvyM+Pzxa$!5tt+(63v6A5G1S*Ok`I(^9VJ`Gx~Ci)EX4+e*pL_-D#u1ju@NCQ zD#k_y+d(=K&n=~gu#AId39858x&ghi7opCYTw$}ZFg+FccVvekBRQFwTU~FqfB{9l;`S# zKt)9=%2Uw_)mo+kB?^2(p!!6rkEi;;5Do6N$*Tj!w4HZ6CeWiIJ<8Lg6>q4}T%qj+HSX+qVe*sEU7*@Us-36W0YN=JXO}Q^kT|JR zmvyR>23?sZ0nXO*Q#g2W)R0rA0S!v~o>eN&Xutcm?~?K;N8Zmr7IFS3mql|O!-<|; za>i`5hQo~tZpAk>4YAn7nX0idrsuS9i-Zh%v7Tt%)V>1O?;G$|3key~^Ln%t>L+{9 z%b^=&A3EIb$$-zw#@+p}6jrw|W=4}7J<*{UU{{xQ(r`U`2OfJz3QmO6p z$yxE0J74Sc9z)w8S)$=OY2mDp2Eg>@v=HVElWaS5WIF&rP**3it1AqPuh)|PY&^rv zEa7w;TBvNCooBK$srV|xzKvHDQ`U*Isd&bik#vtIQ_RdH1bp^Mv^%^f&tWzXdk8*q z9B0QchLdkeQw`_rUQEI9oE^eo7=!&7U`4WzU~m9{)HIh$$Fu!A*+((y0w$@>rW31@ zdN#d$VV9&^NkeddHkn~qoajor3(3sLdH5RvZV7nzSiGq$&X8EFNEZLE!Q%9{n)f#W z7PGT$@R4-0%gfbuvlHmu@Yvp2Ow}Zp;`2;~jpIINF-Ty*U@!+j(oEp3$w*p!C7GFx zFQNHJl(gg9gv#%SyTq%J?xq)jGt;X?Qe?nw)2))51csGhL<|mY5>~@1xrO0(d!kR1 zdGMRokm%|WT{}hBfan@2Xl}cM#fLYBc=uSDA3P|^m9D@93Y!fEPPXWf6(gCTk-_nkztimhX?EPtCN6o_bwbajZ*GWx(S`( zv&zD#XzLc}9+B=5v^|K7{em8|JYiP;(yV&I-t@~B@(DXC$6{yjy^76XfB<}rV0{?J z{}r&t8o)^AFn~+--=@N#Hoy^A1Ku>LJ_#GS?Q<5L>GWD;fmlS$$zqdb9q3b?o#>6u z77tdv)GYst)GOawS)A`z4Q?3eyCS3x?dY<*@P$<}gGJ9XGuX^Cy)w==qnR$(PHbxI zE)3)uU%^;?74K??zwd Lcm4&7Jm}v6@+Cig diff --git a/experiments/__pycache__/experiment.cpython-311.pyc b/experiments/__pycache__/experiment.cpython-311.pyc index 881fa87a9688b859bd10e0e10c8d42b0bb2231f9..4fc3c955e512269db8d267bfc8080c6f3b39f0eb 100644 GIT binary patch delta 1803 zcmZ`(Z)jUp6u&pgOOuzpB+Z{DFHO^QYi(oyiFR`>o32_BVMwX9ldPJ(=B0nKG@19M z+ZtciA&NyrR?Y{7f)s_QbRPyqCL%ateo?_r`YB8pi1Bh#viFOjsgc5fpToMg&EX`wWi9cTGmUeod# zEYq1bx2cS)fYXliubY&R4~XuB2XuEur8>!aOf|g;Ij_ZWnPhUR4LP65=`Q5_CRgb& zDMAX(CS6IH6z&F1dLv{~xI$5K!&Q4XZQiQej|wyl-@D#`bM9BeT)*L6(6Y)UWud6d z6!U2}{OC@DN9^zCaiz@h@WkNdGP|H%RC4*jaaNvV%3^kWxl(4i`Rt4N%P=dBIJr~W zEvSp=Bw3vEaa+K2_XcN z=hB-X=Y8#D(A)iGCvCer}YE|adKeRg(0c-yYUv%ZD zc_Vs&{_R`UMdYk^MB0iO>9BdK?nJL}?v0vsC}B-JVO4C5Dry||pAuc#h+s+Nq)4xI zn{Gc`_0w3!HDAqN3)GrxK|IldzDMk$ZijV$7mdQV{#MU{)9|~$9p3b}KqBB3c!p%? z4-B6=X8MOQ231!TSv{e*sqV^?2PF2x9vWKwhED<)AkX%)7Vt7I3;YQD(4a6C9dWq3U}H55k;#tW}uN1b$%b#Yp6A_lc(MaV=x>ikgL7~AO_{&M)XdZ%E>=eG-`ox<{6e|5)SU3L88lC}n0 zWJ(9NUWm#x%EL^%8Sgrl%0^h_^Gza%%({un%$zNf( z59FlN5T1raypt}%X#5Ghd?}u^b@CEtlN_cvoaOK`LW9n-S8z>JHV?NFJ@gs4pE%NI zXma_!dc&?PJZ9tHAi3R6rN*LKC7K~AsAny_j!cL)O zmzV(l_PEC|#IH{$V5t4qVcB~1$#S`<4eUSoUR}A3kK00Nw#>YEs^Pq-lvG7yC3q~A ze0&DC7_V@{W46pG!vYWGq3a};;!-jH6nh6*{`t_l;9Bbaz=>^g=nvkG@ux3QioX9h N{%P$0{XbH}KX%T#!|;>Yjio&qzW@YVF_2-^HJAyi>zeUUCIo>> zhcjUY>;;1Tbs#wA7yz!2xim5n!FCWNowIjq_#Mc_)E$;}CbAmH{PS}Nz*|7E;3(6Y zR?G_E9Lpv^fP`~uoMI)MOXsN0t#JZFIFH6DHo|!|PF}B302KThEw9rkA`L9l22f-1 zmk}1XoNin*M4*5Enqj>O2JuhUS6OyUltjeKCB%*La`q%w$xq=UHjecX_og#n;rK~D zU*t!NV$zPkwIx}0ukJCRdK@Gk_iU*?T8V@{9&90O>U0U6s+9Sxm>NQ*F~pB2dJ}iw zf8*|Z*YQ644l{k8$`|;8ec00d={@Ag> zM9Nw);`2=0s2CL!e(2Z|u*iBX0j;z}G2;Q}@qTKl1uWP!YL!&)W0sb^&4#n%AtneN zq=rHKsWX%im^qTr6+Ou+>0v>O32LfgFIcsW9{gXvq^mXOa(iOOI`l9=vCFYG4ixm0 zAQ+~|X__NP>xmfbHJ^SMF1sQQv*Nf21dHOV`BnL% zYs2(LEh+#CBq!9S*hxfIlM*`EMz7K6ISa{%%%xFvt?Gr9_F{_EVaoi^+#yh0wUFiq z0CsvDq<(r_g;{+AN60m+x~rb5x9Y34H(J z!^b>M6KQgx152LmPiS@8?Z6k(N4B_`P9ooyOsb-h(98sS&ZDHEct!3VsOqiK>6 zpO=zx6u{qmyI=_a;oWqiX`kead4ZSnC8_CMp*eD`#A35|cS)KQk<6uendc6b)b*e! zbI(cwH#8w@qGE{`MAY>mTuVRKLL~YP5(LAi=%<9a1p-{n3=x4dCS#!WAk%2(%-LL;Y;2-9#k^__uNS?}odfjK2v#PJ%xS$C>UhbvjBRLt&W0O9YxQhepVngs6yjb@aeJSnOE$m}*J& zE9OlDKaoXc>SNAJg7``WsXPsjiwzfpu>y!bZnbLjYT6QqxiGfFVi;i z$;h6}OSuxF-?pZ?%u51aL5MOYX=1#5rpZWUwZcuSYTPGDC`Z(2D+M*BDy8%^A6j>j zBRx?R`_Va~pl`QIfMCl2A-oN?)>r7%Q6rQnn*K2#$$wHF|61lb#oNTCSDXl6f&{C;VwGf;@;R32wi+AF>U3)jP z>-Bo?_g~V!)3g-eBT0U)EPbWzr;^-=^Y`f<)uEq^-<7twc_&V9!pWQ?-&M9C zH~HHgE8vRUTU#o<2M6dAIFva=rvwet0yWeNb`p+CM`FWJqa`>^|K(nomu6P7s7`{0%7 zLj26x{kMvX9=RI$b{Q9EiB}ux7SQv3PGiSet3h^g`Zmqlpfhh&*U* zx;4FGM``jG`14s5{Q+EP_>~DAr00)=aEI5IMy7t!v?_td0EhjIoA$DTqgtEBEw=s zoQwz$Tw+RB4uNPXm&=i(v?&*wqudHEbGQzEMg6UrN9fo#I(GZRHY#>daTPx9lH=QG z{5EW($qt$nr1qtG!(6w;sT{;)w^8=ieD_>AsSiBT^V@oUGvC(7I{MhE^0;qcRUK8*Js1UQRw%e myG``AufAQ2JZH#B`g)@FW*+jI&%-+TdU;pydoOTC>HYzCu0(?X delta 917 zcmZuuQA-q26ux(Mc6S_SR!7lN*L6+HRtUo|3QJswO4(M)3hHGUcBW-&+}ZBVn!<*@ zwaP};74jiMNDR?OpMoBOen1-wEd#wP^su5r_|&<(SWups``vTCbMAKz_kL+8)X5)Y zSwvt6|zn=ua|w(l%7nQNo%E(o=ePDlu+ZX20fmubZlE>q*BG0ezJ?{B1pQm;Q7O z1rIKPUf)xTJWi+7xZXETtOQXfgXejQ zpfS*=2e>Q;m(4{9To&v^fsL@rkc+g!=HO|^16P~M*lG3%!C;WI)6bqW;a-T+7Xju_ zNveN2wlKCFE=Yl*6v*L{BfM^M8OHvxXlLG$WY<{I5%865+|Sa3I& z(eWTuGcb%O_;Ca65u%|wP61Xyo2#i4LzvXU7h=jl6P@^T1+#Q(5J OmS%Ptzkf`!BKI5eH1FsD diff --git a/experiments/experiment.py b/experiments/experiment.py index 5f6386b..14ab88c 100644 --- a/experiments/experiment.py +++ b/experiments/experiment.py @@ -5,6 +5,7 @@ from Load_process.file_processing import Process_File from sklearn.metrics import confusion_matrix from experiments.pytorch_Model import ModifiedXception from experiments.Model_All_Step import All_Step +from Training_Tools.PreProcess import Training_Precesses from torchinfo import summary import pandas as pd import numpy as np @@ -13,7 +14,7 @@ import torch.nn as nn import time class experiments(): - def __init__(self, Image_Size, Model_Name, Experiment_Name, Generator_Batch_Size, Epoch, Train_Batch_Size, tools, Number_Of_Classes, status): + def __init__(self, Image_Size, Model_Name, Experiment_Name, Epoch, Train_Batch_Size, tools, Number_Of_Classes, status): ''' # 實驗物件 @@ -49,7 +50,6 @@ class experiments(): self.model_name = Model_Name # 取名,告訴我我是用哪個模型(可能是預處理模型/自己設計的模型) self.experiment_name = Experiment_Name - self.generator_batch_size = Generator_Batch_Size self.epoch = Epoch self.train_batch_size = Train_Batch_Size self.layers = 1 @@ -62,7 +62,7 @@ class experiments(): pass - def processing_main(self, Training_Dataset, counter): + def processing_main(self, Training_Data, Training_Label, counter): Train, Test, Validation = self.Topic_Tool.Get_Save_Roots(self.Status) # 要換不同資料集就要改 start = time.time() @@ -72,22 +72,15 @@ class experiments(): # 將處理好的test Data 與 Validation Data 丟給這個物件的變數 self.test, self.test_label = self.cut_image.test, self.cut_image.test_label - self.validation, self.validation_label = self.cut_image.validation, self.cut_image.validation_label - Testing_Dataset = self.Topic_Tool.Convert_Data_To_DataSet_And_Put_To_Dataloader(self.test, self.test_label, 1) - Validation_Dataset = self.Topic_Tool.Convert_Data_To_DataSet_And_Put_To_Dataloader(self.validation, self.validation_label, 1) + PreProcess = Training_Precesses(Training_Data, Training_Label, self.test, self.test_label) cnn_model = self.construct_model() # 呼叫讀取模型的function print(summary(cnn_model, input_size=(int(self.train_batch_size / 2), 3, self.Image_Size, self.Image_Size))) for name, parameters in cnn_model.named_parameters(): print(f"Layer Name: {name}, Parameters: {parameters.size()}") - step = All_Step(Training_Dataset, Testing_Dataset, Validation_Dataset, cnn_model, self.epoch, self.Number_Of_Classes, self.model_name) - - # model_dir = '../save_the_best_model/Topic/Remove background with Normal image/best_model( 2023-10-17 )-2.h5' # 這是一個儲存模型權重的路徑,每一個模型都有一個自己權重儲存的檔 - # if os.path.exists(model_dir): # 如果這個檔案存在 - # cnn_model.load_weights(model_dir) # 將模型權重讀出來 - # print("讀出權重\n") + step = All_Step(PreProcess, self.train_batch_size, cnn_model, self.epoch, self.Number_Of_Classes, self.model_name) print("\n\n\n讀取訓練資料(70000)執行時間:%f 秒\n\n" % (end - start)) train_losses, val_losses, train_accuracies, val_accuracies, Epoch = step.Training_Step(self.model_name, counter) diff --git a/experiments/pytorch_Model.py b/experiments/pytorch_Model.py index ac5cfc6..cf0b659 100644 --- a/experiments/pytorch_Model.py +++ b/experiments/pytorch_Model.py @@ -10,41 +10,33 @@ class ModifiedXception(nn.Module): def __init__(self, num_classes): super(ModifiedXception, self).__init__() - # 加載 Xception 預訓練模型,去掉最後一層 (fc 層) + # Load Xception pre-trained model (full model, not just features) self.base_model = timm.create_model( - 'xception', - pretrained=True, - features_only=True, # 只保留特徵提取部分 - out_indices=[3] # 選擇特徵層索引(根據模型結構) - ) + 'xception', + pretrained=True, + drop_rate=0.0, # Optional: adjust dropout if needed + ) - # 自定義分類頭 + # Replace the default global pooling with AdaptiveAvgPool2d + self.base_model.global_pool = nn.AdaptiveAvgPool2d(output_size=1) # Output size of 1x1 spatially + + # Replace the final fully connected layer with Identity to get features + self.base_model.fc = nn.Identity() # Output will be 2048 (Xception's default feature size) + + # Custom head: Linear from 2048 to 1370, additional 1370 layer, then to num_classes self.custom_head = nn.Sequential( - nn.AdaptiveAvgPool2d(1), # Global Average Pooling, - nn.Flatten(), - nn.Linear(728, 368), # Xception 輸出特徵維度為2048 - nn.ReLU(), # 可選激活函數 - nn.Linear(368, num_classes), - nn.Sigmoid() + nn.Linear(2048, 1025), # From Xception’s 2048 features to 1370 + nn.ReLU(), # Activation + nn.Dropout(0.6), # Dropout for regularization + nn.Linear(1025, num_classes), # Final output layer + nn.Sigmoid() # Sigmoid for binary/multi-label classification ) - # self.base_model.fc = nn.Identity() # 移除原來的 fully connected 層 - - # # 新增全局平均池化層、隱藏層和輸出層 - # self.global_avg_pool = nn.AdaptiveAvgPool2d(1) # 全局平均池化 - # self.hidden_layer = nn.Linear(2048, 1370) # 隱藏層,輸入大小取決於 Xception 的輸出大小 - # self.output_layer = nn.Linear(1370, 2) # 輸出層,依據分類數目設定 - - # # 激活函數與 dropout - # self.relu = nn.ReLU() - # self.dropout = nn.Dropout(0.6) - def forward(self, x): - x = self.base_model(x) # Xception 主體 - x = x[0] - output = self.custom_head(x) - # x = self.global_avg_pool(x) # 全局平均池化 - # x = self.relu(self.hidden_layer(x)) # 隱藏層 + ReLU - # x = self.dropout(x) # Dropout - # x = self.output_layer(x) # 輸出層 + # Pass through the base Xception model (up to global pooling) + x = self.base_model.forward_features(x) # Get feature maps + x = self.base_model.global_pool(x) # Apply AdaptiveAvgPool2d (output: [B, 2048, 1, 1]) + x = x.flatten(1) # Flatten to [B, 2048] + x = self.base_model.fc(x) # Identity layer (still [B, 2048]) + output = self.custom_head(x) # Custom head processing return output diff --git a/experiments/topic_model.py b/experiments/topic_model.py deleted file mode 100644 index fbf7467..0000000 --- a/experiments/topic_model.py +++ /dev/null @@ -1,298 +0,0 @@ -from convolution_model_tools.convolution_2D_tools import model_2D_tool -from dense_model_tools.dense_tools import model_Dense_Layer -from all_models_tools.all_model_tools import add_optimizers_function, add_Activative, add_dropout, call_back -from keras.activations import softmax, sigmoid -from keras.applications import VGG19, ResNet50, NASNetLarge, DenseNet201, Xception -from keras.applications.efficientnet_v2 import EfficientNetV2L -from keras.layers import BatchNormalization, Flatten, GlobalAveragePooling2D, MaxPooling2D, Dense, Conv2D, Dropout, TimeDistributed, LSTM, Input -from keras import regularizers - -def one_layer_cnn_model(): - tools = model_2D_tool() - dense_tool = model_Dense_Layer() - - img_Input = tools.add_2D_input() - x = tools.add_Convolution2D(img_Input, 32) - x = add_Activative(x) - x = tools.add_MaxPooling(x) - - x = tools.add_Convolution2D(x, 64) - x = add_Activative(x) - x = tools.add_MaxPooling(x) - - flatter = tools.add_flatten(x) - - dense = dense_tool.add_dense(64, flatter) - dense = add_Activative(dense) - dense = dense_tool.add_dense(32, dense) - dense = add_Activative(dense) - dense = dense_tool.add_dense(7, dense) - dense = add_Activative(dense, softmax) - return img_Input, dense - -def find_example_cnn_model(): - tools = model_2D_tool() - dense_tool = model_Dense_Layer() - - img_Input = tools.add_2D_input() - - x = tools.add_Convolution2D(img_Input, 16) - x = add_Activative(x) - x = add_dropout(x, 0.25) - - x = tools.add_Convolution2D(x, 32) - x = add_Activative(x) - x = add_dropout(x, 0.25) - - x = tools.add_MaxPooling(x) - - x = tools.add_Convolution2D(x, 64) - x = add_Activative(x) - x = add_dropout(x, 0.25) - - x = tools.add_MaxPooling(x) - - x = tools.add_Convolution2D(x, 128) - x = add_Activative(x) - x = add_dropout(x, 0.25) - - x = tools.add_MaxPooling(x) - - flatter = tools.add_flatten(x) - - dense = dense_tool.add_dense(64, flatter) - dense = add_Activative(dense) - dense = add_dropout(dense, 0.25) - dense = dense_tool.add_dense(7, dense) - dense = add_Activative(dense, sigmoid) - - return img_Input, dense - -def change_example_cnn_model(): - tools = model_2D_tool() - dense_tool = model_Dense_Layer() - - img_Input = tools.add_2D_input() - - x = tools.add_Convolution2D(img_Input, 16) - x = add_Activative(x) - x = tools.add_batchnomlization(x) - - x = tools.add_Convolution2D(x, 32) - x = add_Activative(x) - x = tools.add_batchnomlization(x) - - x = tools.add_MaxPooling(x) - - x = tools.add_Convolution2D(x, 64) - x = add_Activative(x) - x = tools.add_batchnomlization(x) - - x = tools.add_MaxPooling(x) - - x = tools.add_Convolution2D(x, 128) - x = add_Activative(x) - x = tools.add_batchnomlization(x) - - x = tools.add_MaxPooling(x) - - flatter = tools.add_flatten(x) - - dense = dense_tool.add_dense(64, flatter) - dense = add_Activative(dense) - dense = add_dropout(dense, 0.3) - dense = dense_tool.add_dense(7, dense) - dense = add_Activative(dense, softmax) - - return img_Input, dense - -def two_convolution_cnn_model(): - tools = model_2D_tool() - dense_tool = model_Dense_Layer() - - img_Input = tools.add_2D_input() - x = tools.add_two_floors_convolution2D(img_Input, 32) - x = tools.add_MaxPooling(x) - - x = tools.add_two_floors_convolution2D(x, 64) - x = tools.add_MaxPooling(x) - - flatter = tools.add_flatten(x) - - dense = dense_tool.add_dense(64, flatter) - dense = add_Activative(dense) - dense = dense_tool.add_dense(32, dense) - dense = add_Activative(dense) - dense = dense_tool.add_dense(7, dense) - dense = add_Activative(dense, softmax) - return img_Input, dense - -def VGG19_model(): - tools = model_2D_tool() - dense_tool = model_Dense_Layer() - - vgg19 = VGG19(include_top = False, weights = "imagenet", input_shape = (120, 120, 3)) - flatten = tools.add_flatten(vgg19.output) - dense = dense_tool.add_dense(64, flatten) - dense = add_Activative(dense) - dense = dense_tool.add_dense(7, dense) - dense = add_Activative(dense, softmax) - - return vgg19, dense - -def Resnet50_model(): - tools = model_2D_tool() - dense_tool = model_Dense_Layer() - - vgg19 = ResNet50(include_top = False, weights = "imagenet", input_shape = (120, 120, 3)) - flatten = tools.add_flatten(vgg19.output) - dense = dense_tool.add_dense(64, flatten) - dense = add_Activative(dense) - dense = dense_tool.add_dense(7, dense) - dense = add_Activative(dense, softmax) - - return vgg19, dense - -def DenseNet201_model(): - tools = model_2D_tool() - dense_tool = model_Dense_Layer() - - Densenet201 = DenseNet201(include_top = False, weights = "imagenet", input_shape = (120, 120, 3)) - flatten = tools.add_flatten(Densenet201.output) - dense = dense_tool.add_dense(64, flatten) - dense = add_Activative(dense) - dense = dense_tool.add_dense(7, dense) - dense = add_Activative(dense, softmax) - - return Densenet201, dense - -def Xception_model(): - xception = Xception(include_top = False, weights = "imagenet", input_shape = (120, 120, 3)) - flatten = Flatten()(xception.output) - dense = Dense(units = 64, activation = "relu")(flatten) - dense = Dense(units = 7, activation = "softmax")(dense) - - return xception, dense - -def cnn_LSTM(): - head = Input(shape = (150, 150, 3)) - inputs = Conv2D(filters = 64, strides = 1, kernel_size = (3, 3), padding = "same", activation = "relu")(head) - inputs = Conv2D(filters = 64, strides = 1, kernel_size = (3, 3), padding = "same", activation = "relu")(inputs) - inputs = MaxPooling2D(strides = 2, pool_size = (2, 2))(inputs) - inputs = Dropout(0.25)(inputs) - - inputs = Conv2D(filters = 128, strides = 1, kernel_size = (3, 3), padding = "same", activation = "relu")(inputs) - inputs = Conv2D(filters = 128, strides = 1, kernel_size = (3, 3), padding = "same", activation = "relu")(inputs) - inputs = MaxPooling2D(strides = 2, pool_size = (2, 2))(inputs) - inputs = Dropout(0.25)(inputs) - - inputs = Conv2D(filters = 256, strides = 1, kernel_size = (3, 3), padding = "same", activation = "relu")(inputs) - inputs = Conv2D(filters = 256, strides = 1, kernel_size = (3, 3), padding = "same", activation = "relu")(inputs) - inputs = MaxPooling2D(strides = 2, pool_size = (2, 2))(inputs) - inputs = Dropout(0.25)(inputs) - - inputs = Conv2D(filters = 512, strides = 1, kernel_size = (3, 3), padding = "same", activation = "relu")(inputs) - inputs = Conv2D(filters = 512, strides = 1, kernel_size = (3, 3), padding = "same", activation = "relu")(inputs) - inputs = Conv2D(filters = 512, strides = 1, kernel_size = (3, 3), padding = "same", activation = "relu")(inputs) - inputs = MaxPooling2D(strides = 2, pool_size = (2, 2))(inputs) - inputs = Dropout(0.25)(inputs) - - inputs = Conv2D(filters = 512, strides = 1, kernel_size = (3, 3), padding = "same", activation = "relu")(inputs) - inputs = Conv2D(filters = 512, strides = 1, kernel_size = (3, 3), padding = "same", activation = "relu")(inputs) - inputs = Conv2D(filters = 512, strides = 1, kernel_size = (3, 3), padding = "same", activation = "relu")(inputs) - inputs = MaxPooling2D(strides = 2, pool_size = (2, 2))(inputs) - inputs = Dropout(0.25)(inputs) - inputs = TimeDistributed(Flatten())(inputs) - - inputs = LSTM(units = 49)(inputs) - inputs = Dense(units = 64)(inputs) - output = Dense(units = 7, activation = "softmax")(inputs) - - return head, output - -def add_regularizers_L1(): # 比較正規化 - tools = model_2D_tool() - dense_tool = model_Dense_Layer() - - Resnet50 = ResNet50(include_top = False, weights = "imagenet", input_shape = (120, 120, 3)) - flatten = tools.add_flatten(Resnet50.output) - dense = dense_tool.add_regularizer_dense(64, flatten, regularizers.L1()) - dense = add_Activative(dense) - dense = dense_tool.add_dense(7, dense) - dense = add_Activative(dense, softmax) - - return Resnet50, dense - -def add_regularizers_L2(): # 比較正規化 - tools = model_2D_tool() - dense_tool = model_Dense_Layer() - - Resnet50 = ResNet50(include_top = False, weights = "imagenet", input_shape = (120, 120, 3)) - flatten = tools.add_flatten(Resnet50.output) - dense = dense_tool.add_regularizer_dense(64, flatten, regularizers.L2()) - dense = add_Activative(dense) - dense = dense_tool.add_dense(7, dense) - dense = add_Activative(dense, softmax) - - return Resnet50, dense - -def add_regularizers_L1L2(): # 比較正規化 - tools = model_2D_tool() - dense_tool = model_Dense_Layer() - - Resnet50 = ResNet50(include_top = False, weights = "imagenet", input_shape = (120, 120, 3)) - flatten = tools.add_flatten(Resnet50.output) - dense = dense_tool.add_regularizer_dense(64, flatten, regularizers.L1L2()) - dense = add_Activative(dense) - dense = dense_tool.add_dense(7, dense) - dense = add_Activative(dense, softmax) - - return Resnet50, dense - -def add_layers1_L2(Dense_layers): # 比較正規化 - tools = model_2D_tool() - dense_tool = model_Dense_Layer() - layers = 32 - - Densenet201 = DenseNet201(include_top = False, weights = "imagenet", input_shape = (120, 120, 3)) - flatten = tools.add_flatten(Densenet201.output) - - for layer in range(Dense_layers): - dense = dense_tool.add_regularizer_kernel_dense(unit = layers, input_data = flatten, regularizer = regularizers.L2()) - dense = add_Activative(dense) - layers *= 2 - - dense = dense_tool.add_dense(7, dense) - dense = add_Activative(dense, softmax) - - return Densenet201, dense - -def add_layers_another_L2(Dense_layers, layers): # 比較正規化 - tools = model_2D_tool() - dense_tool = model_Dense_Layer() - - Densenet201 = DenseNet201(include_top = False, weights = "imagenet", input_shape = (120, 120, 3)) - flatten = tools.add_flatten(Densenet201.output) - - for layer in range(Dense_layers): - dense = dense_tool.add_regularizer_dense(unit = layers, input_data = flatten, regularizer = regularizers.L2()) - dense = add_Activative(dense) - layers /= 2 - - dense = dense_tool.add_dense(7, dense) - dense = add_Activative(dense, softmax) - - return Densenet201, dense - -def add_bias_regularizers(): # 比較正規化 - tools = model_2D_tool() - dense_tool = model_Dense_Layer() - - Resnet50 = ResNet50(include_top = False, weights = "imagenet", input_shape = (120, 120, 3)) - flatten = tools.add_flatten(Resnet50.output) - dense = dense_tool.add_regularizer_bias_dense(64, flatten, regularizers.L2()) - dense = add_Activative(dense) - dense = dense_tool.add_dense(7, dense) - dense = add_Activative(dense, softmax) - - return Resnet50, dense \ No newline at end of file diff --git a/main.py b/main.py index 834a666..9c06709 100644 --- a/main.py +++ b/main.py @@ -8,7 +8,6 @@ from Calculate_Process.Calculate import Calculate from merge_class.merge import merge import time import torch -import os if __name__ == "__main__": # 測試GPU是否可用 @@ -23,7 +22,7 @@ if __name__ == "__main__": tool.Set_Labels() tool.Set_Save_Roots() - Status = 2 # 決定要使用什麼資料集 + Status = 1 # 決定要使用什麼資料集 Labels = tool.Get_Data_Label() Trainig_Root, Testing_Root, Validation_Root = tool.Get_Save_Roots(Status) # 一般的 Generator_Root = tool.Get_Generator_Save_Roots(Status) @@ -37,14 +36,13 @@ if __name__ == "__main__": Model_Name = "Xception" # 取名,告訴我我是用哪個模型(可能是預處理模型/自己設計的模型) Experiment_Name = "Xception Skin to train Normal stomach cancer" - Generator_Batch_Size = 50 Epoch = 10000 - Train_Batch_Size = 50 + Train_Batch_Size = 64 Image_Size = 256 Prepare = Load_Data_Prepare() loading_data = Load_ImageGenerator(Trainig_Root, Testing_Root, Validation_Root, Generator_Root, Labels, Image_Size) - experiment = experiments(Image_Size, Model_Name, Experiment_Name, Generator_Batch_Size, Epoch, Train_Batch_Size, tool, Classification, Status) + experiment = experiments(Image_Size, Model_Name, Experiment_Name, Epoch, Train_Batch_Size, tool, Classification, Status) image_processing = Read_image_and_Process_image(Image_Size) Merge = merge() Calculate_Tool = Calculate() @@ -81,17 +79,7 @@ if __name__ == "__main__": start = time.time() trains_Data_Image = image_processing.Data_Augmentation_Image(training_data) # 讀檔 - - # total_trains, train_label = shuffle_data(trains_Data_Image, training_label) # 將資料打亂 - # training_data = list(total_trains) # 轉換資料型態 - - training_data, train_label = image_processing.image_data_processing(trains_Data_Image, training_label) # 將讀出來的檔做正規化。降label轉成numpy array 格式 - Training_Dataset = tool.Convert_Data_To_DataSet_And_Put_To_Dataloader(training_data, train_label, Train_Batch_Size) - - # 查看Dataloader的Shape - for idx, data in enumerate(Training_Dataset): - datas = data[0] - print(f"Shape: {datas.shape}") + Training_Data, Training_Label = image_processing.image_data_processing(trains_Data_Image, training_label) # 將讀出來的檔做正規化。降label轉成numpy array 格式 # training_data = image_processing.normalization(training_data) @@ -100,7 +88,7 @@ if __name__ == "__main__": end = time.time() print("\n\n\n讀取訓練資料(70000)執行時間:%f 秒\n\n" % (end - start)) - loss, accuracy, precision, recall, AUC, f = experiment.processing_main(Training_Dataset, Run_Range) # 執行訓練方法 + loss, accuracy, precision, recall, AUC, f = experiment.processing_main(Training_Data, Training_Label, Run_Range) # 執行訓練方法 Calculate_Tool.Append_numbers(loss, accuracy, precision, recall, AUC, f) print("實驗結果") diff --git a/test.ipynb b/test.ipynb index cd5844d..aa43732 100644 --- a/test.ipynb +++ b/test.ipynb @@ -1857,6 +1857,36 @@ "model.base_model.body.conv4.pointwise" ] }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[(array([0, 3, 4, 5, 6, 7]), array([1, 2])), (array([0, 1, 2, 4, 6, 7]), array([3, 5])), (array([1, 2, 3, 5, 6, 7]), array([0, 4])), (array([0, 1, 2, 3, 4, 5, 6]), array([7])), (array([0, 1, 2, 3, 4, 5, 7]), array([6]))]\n", + "[0 1 2 3 4 7] [5 6]\n", + "[0 1 2 5 6 7] [3 4]\n", + "[1 2 3 4 5 6] [0 7]\n", + "[0 2 3 4 5 6 7] [1]\n", + "[0 1 3 4 5 6 7] [2]\n" + ] + } + ], + "source": [ + "from sklearn.model_selection import KFold\n", + "\n", + "k = KFold(n_splits = 5, shuffle = True)\n", + "a = [1, 2, 3, 4 ,5, 6,7, 8]\n", + "\n", + "print(list(k.split(a)))\n", + "\n", + "for d, b in k.split(a):\n", + " print(d, b)" + ] + }, { "cell_type": "code", "execution_count": null, @@ -1881,7 +1911,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.10" + "version": "3.11.11" } }, "nbformat": 4,