From c1320aa09a057475f1f9ae3cbdc1d36e3ad83dff Mon Sep 17 00:00:00 2001 From: Brainway Date: Mon, 6 Feb 2023 06:17:57 +0000 Subject: [PATCH] update hello.py. --- hello.py | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/hello.py b/hello.py index 3a6f262..0b7ebc7 100644 --- a/hello.py +++ b/hello.py @@ -23,18 +23,18 @@ import numpy as np def net(): - model = ViT( - image_size = 600, - patch_size = 30, - num_classes = 5, - dim = 1024, - depth = 6, - max_tokens_per_depth = (256, 128, 64, 32, 16, 8), # a tuple that denotes the maximum number of tokens that any given layer should have. if the layer has greater than this amount, it will undergo adaptive token sampling - heads = 16, - mlp_dim = 2048, - dropout = 0.1, - emb_dropout = 0.1 -) +# model = ViT( +# image_size = 600, +# patch_size = 30, +# num_classes = 5, +# dim = 1024, +# depth = 6, +# max_tokens_per_depth = (256, 128, 64, 32, 16, 8), # a tuple that denotes the maximum number of tokens that any given layer should have. if the layer has greater than this amount, it will undergo adaptive token sampling +# heads = 16, +# mlp_dim = 2048, +# dropout = 0.1, +# emb_dropout = 0.1 +# ) # modelv = ViT( # image_size = 600, @@ -52,15 +52,15 @@ def net(): # ) -# model = NesT( -# image_size = 600, -# patch_size = 30, -# dim = 256, -# heads = 16, -# num_hierarchies = 3, # number of hierarchies -# block_repeats = (2, 2, 12), # the number of transformer blocks at each heirarchy, starting from the bottom -# num_classes = 5 -# ) + model = NesT( + image_size = 600, + patch_size = 30, + dim = 256, + heads = 18,#16 + num_hierarchies = 3, # number of hierarchies + block_repeats = (3, 3, 16), # (2,2,12) the number of transformer blocks at each heirarchy, starting from the bottom + num_classes = 5 +) # model = CrossFormer( #图片尺寸要是7的倍数,如448 # num_classes = 5, # number of output classes