importwarningsfromcollectionsimportnamedtuplefromtypingimportOptional,Tuple,List,Callable,Anyimporttorchimporttorch.nnasnnimporttorch.nn.functionalasFfromtorchimportTensorfrom.._internally_replaced_utilsimportload_state_dict_from_urlfrom..utilsimport_log_api_usage_once__all__=["GoogLeNet","googlenet","GoogLeNetOutputs","_GoogLeNetOutputs"]model_urls={# GoogLeNet ported from TensorFlow"googlenet":"https://download.pytorch.org/models/googlenet-1378be20.pth",}GoogLeNetOutputs=namedtuple("GoogLeNetOutputs",["logits","aux_logits2","aux_logits1"])GoogLeNetOutputs.__annotations__={"logits":Tensor,"aux_logits2":Optional[Tensor],"aux_logits1":Optional[Tensor]}# Script annotations failed with _GoogleNetOutputs = namedtuple ...# _GoogLeNetOutputs set here for backwards compat_GoogLeNetOutputs=GoogLeNetOutputsclassGoogLeNet(nn.Module):__constants__=["aux_logits","transform_input"]def__init__(self,num_classes:int=1000,aux_logits:bool=True,transform_input:bool=False,init_weights:Optional[bool]=None,blocks:Optional[List[Callable[...,nn.Module]]]=None,dropout:float=0.2,dropout_aux:float=0.7,)->None:super().__init__()_log_api_usage_once(self)ifblocksisNone:blocks=[BasicConv2d,Inception,InceptionAux]ifinit_weightsisNone:warnings.warn("The default weight initialization of GoogleNet will be changed in future releases of ""torchvision. If you wish to keep the old behavior (which leads to long initialization times"" due to scipy/scipy#11299), please set init_weights=True.",FutureWarning,)init_weights=Trueassertlen(blocks)==3conv_block=blocks[0]inception_block=blocks[1]inception_aux_block=blocks[2]self.aux_logits=aux_logitsself.transform_input=transform_inputself.conv1=conv_block(3,64,kernel_size=7,stride=2,padding=3)self.maxpool1=nn.MaxPool2d(3,stride=2,ceil_mode=True)self.conv2=conv_block(64,64,kernel_size=1)self.conv3=conv_block(64,192,kernel_size=3,padding=1)self.maxpool2=nn.MaxPool2d(3,stride=2,ceil_mode=True)self.inception3a=inception_block(192,64,96,128,16,32,32)self.inception3b=inception_block(256,128,128,192,32,96,64)self.maxpool3=nn.MaxPool2d(3,stride=2,ceil_mode=True)self.inception4a=inception_block(480,192,96,208,16,48,64)self.inception4b=inception_block(512,160,112,224,24,64,64)self.inception4c=inception_block(512,128,128,256,24,64,64)self.inception4d=inception_block(512,112,144,288,32,64,64)self.inception4e=inception_block(528,256,160,320,32,128,128)self.maxpool4=nn.MaxPool2d(2,stride=2,ceil_mode=True)self.inception5a=inception_block(832,256,160,320,32,128,128)self.inception5b=inception_block(832,384,192,384,48,128,128)ifaux_logits:self.aux1=inception_aux_block(512,num_classes,dropout=dropout_aux)self.aux2=inception_aux_block(528,num_classes,dropout=dropout_aux)else:self.aux1=None# type: ignore[assignment]self.aux2=None# type: ignore[assignment]self.avgpool=nn.AdaptiveAvgPool2d((1,1))self.dropout=nn.Dropout(p=dropout)self.fc=nn.Linear(1024,num_classes)ifinit_weights:forminself.modules():ifisinstance(m,nn.Conv2d)orisinstance(m,nn.Linear):torch.nn.init.trunc_normal_(m.weight,mean=0.0,std=0.01,a=-2,b=2)elifisinstance(m,nn.BatchNorm2d):nn.init.constant_(m.weight,1)nn.init.constant_(m.bias,0)def_transform_input(self,x:Tensor)->Tensor:ifself.transform_input:x_ch0=torch.unsqueeze(x[:,0],1)*(0.229/0.5)+(0.485-0.5)/0.5x_ch1=torch.unsqueeze(x[:,1],1)*(0.224/0.5)+(0.456-0.5)/0.5x_ch2=torch.unsqueeze(x[:,2],1)*(0.225/0.5)+(0.406-0.5)/0.5x=torch.cat((x_ch0,x_ch1,x_ch2),1)returnxdef_forward(self,x:Tensor)->Tuple[Tensor,Optional[Tensor],Optional[Tensor]]:# N x 3 x 224 x 224x=self.conv1(x)# N x 64 x 112 x 112x=self.maxpool1(x)# N x 64 x 56 x 56x=self.conv2(x)# N x 64 x 56 x 56x=self.conv3(x)# N x 192 x 56 x 56x=self.maxpool2(x)# N x 192 x 28 x 28x=self.inception3a(x)# N x 256 x 28 x 28x=self.inception3b(x)# N x 480 x 28 x 28x=self.maxpool3(x)# N x 480 x 14 x 14x=self.inception4a(x)# N x 512 x 14 x 14aux1:Optional[Tensor]=Noneifself.aux1isnotNone:ifself.training:aux1=self.aux1(x)x=self.inception4b(x)# N x 512 x 14 x 14x=self.inception4c(x)# N x 512 x 14 x 14x=self.inception4d(x)# N x 528 x 14 x 14aux2:Optional[Tensor]=Noneifself.aux2isnotNone:ifself.training:aux2=self.aux2(x)x=self.inception4e(x)# N x 832 x 14 x 14x=self.maxpool4(x)# N x 832 x 7 x 7x=self.inception5a(x)# N x 832 x 7 x 7x=self.inception5b(x)# N x 1024 x 7 x 7x=self.avgpool(x)# N x 1024 x 1 x 1x=torch.flatten(x,1)# N x 1024x=self.dropout(x)x=self.fc(x)# N x 1000 (num_classes)returnx,aux2,aux1@torch.jit.unuseddefeager_outputs(self,x:Tensor,aux2:Tensor,aux1:Optional[Tensor])->GoogLeNetOutputs:ifself.trainingandself.aux_logits:return_GoogLeNetOutputs(x,aux2,aux1)else:returnx# type: ignore[return-value]defforward(self,x:Tensor)->GoogLeNetOutputs:x=self._transform_input(x)x,aux1,aux2=self._forward(x)aux_defined=self.trainingandself.aux_logitsiftorch.jit.is_scripting():ifnotaux_defined:warnings.warn("Scripted GoogleNet always returns GoogleNetOutputs Tuple")returnGoogLeNetOutputs(x,aux2,aux1)else:returnself.eager_outputs(x,aux2,aux1)classInception(nn.Module):def__init__(self,in_channels:int,ch1x1:int,ch3x3red:int,ch3x3:int,ch5x5red:int,ch5x5:int,pool_proj:int,conv_block:Optional[Callable[...,nn.Module]]=None,)->None:super().__init__()ifconv_blockisNone:conv_block=BasicConv2dself.branch1=conv_block(in_channels,ch1x1,kernel_size=1)self.branch2=nn.Sequential(conv_block(in_channels,ch3x3red,kernel_size=1),conv_block(ch3x3red,ch3x3,kernel_size=3,padding=1))self.branch3=nn.Sequential(conv_block(in_channels,ch5x5red,kernel_size=1),# Here, kernel_size=3 instead of kernel_size=5 is a known bug.# Please see https://github.com/pytorch/vision/issues/906 for details.conv_block(ch5x5red,ch5x5,kernel_size=3,padding=1),)self.branch4=nn.Sequential(nn.MaxPool2d(kernel_size=3,stride=1,padding=1,ceil_mode=True),conv_block(in_channels,pool_proj,kernel_size=1),)def_forward(self,x:Tensor)->List[Tensor]:branch1=self.branch1(x)branch2=self.branch2(x)branch3=self.branch3(x)branch4=self.branch4(x)outputs=[branch1,branch2,branch3,branch4]returnoutputsdefforward(self,x:Tensor)->Tensor:outputs=self._forward(x)returntorch.cat(outputs,1)classInceptionAux(nn.Module):def__init__(self,in_channels:int,num_classes:int,conv_block:Optional[Callable[...,nn.Module]]=None,dropout:float=0.7,)->None:super().__init__()ifconv_blockisNone:conv_block=BasicConv2dself.conv=conv_block(in_channels,128,kernel_size=1)self.fc1=nn.Linear(2048,1024)self.fc2=nn.Linear(1024,num_classes)self.dropout=nn.Dropout(p=dropout)defforward(self,x:Tensor)->Tensor:# aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14x=F.adaptive_avg_pool2d(x,(4,4))# aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4x=self.conv(x)# N x 128 x 4 x 4x=torch.flatten(x,1)# N x 2048x=F.relu(self.fc1(x),inplace=True)# N x 1024x=self.dropout(x)# N x 1024x=self.fc2(x)# N x 1000 (num_classes)returnxclassBasicConv2d(nn.Module):def__init__(self,in_channels:int,out_channels:int,**kwargs:Any)->None:super().__init__()self.conv=nn.Conv2d(in_channels,out_channels,bias=False,**kwargs)self.bn=nn.BatchNorm2d(out_channels,eps=0.001)defforward(self,x:Tensor)->Tensor:x=self.conv(x)x=self.bn(x)returnF.relu(x,inplace=True)
[docs]defgooglenet(pretrained:bool=False,progress:bool=True,**kwargs:Any)->GoogLeNet:r"""GoogLeNet (Inception v1) model architecture from `"Going Deeper with Convolutions" <http://arxiv.org/abs/1409.4842>`_. The required minimum input size of the model is 15x15. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr aux_logits (bool): If True, adds two auxiliary branches that can improve training. Default: *False* when pretrained is True otherwise *True* transform_input (bool): If True, preprocesses the input according to the method with which it was trained on ImageNet. Default: True if ``pretrained=True``, else False. """ifpretrained:if"transform_input"notinkwargs:kwargs["transform_input"]=Trueif"aux_logits"notinkwargs:kwargs["aux_logits"]=Falseifkwargs["aux_logits"]:warnings.warn("auxiliary heads in the pretrained googlenet model are NOT pretrained, so make sure to train them")original_aux_logits=kwargs["aux_logits"]kwargs["aux_logits"]=Truekwargs["init_weights"]=Falsemodel=GoogLeNet(**kwargs)state_dict=load_state_dict_from_url(model_urls["googlenet"],progress=progress)model.load_state_dict(state_dict)ifnotoriginal_aux_logits:model.aux_logits=Falsemodel.aux1=None# type: ignore[assignment]model.aux2=None# type: ignore[assignment]returnmodelreturnGoogLeNet(**kwargs)
Docs
Access comprehensive developer documentation for PyTorch
To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: Cookies Policy.