代码拉取完成,页面将自动刷新
name | about | labels |
---|---|---|
Bug Report | Use this template for reporting a bug | kind/bug |
Ascend
/GPU
/CPU
): GPU/device gpu
class DataParallelNet(Cell, MetaFactory):
def __init__(self, in_channel, out_channel, strategy1=None, strategy2=None):
super().__init__()
MetaFactory.__init__(self)
self.relu = nn.ReLU()
self.relu6 = nn.ReLU6()
self.conv = nn.Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=2, stride=1, has_bias=False,
weight_init='ones', pad_mode='same')
self.mean = P.ReduceMean(keep_dims=False)
self.fc1 = nn.Dense(in_channels=out_channel, out_channels=out_channel, weight_init='ones', bias_init='zeros',
has_bias=True)
self.fc2 = nn.Dense(in_channels=out_channel, out_channels=out_channel, weight_init='ones', bias_init='zeros',
has_bias=False)
if strategy1 is not None:
self.fc1.matmul.set_strategy(strategy1)
if strategy2 is not None:
self.fc2.matmul.set_strategy(strategy2)
self.i = Parameter(Tensor(0, mstype.int32), name="index", requires_grad=False)
self.funcs = (self.fc1, self.fc2)
self.cast = P.Cast()
weight_np = np.ones([128, 16]).astype(np.float32) * 0.00001
self.embedding = nn.Embedding(128, 16, False, Tensor(weight_np))
def construct(self, x):
x = self.relu(x) # (128, 3, 2, 1024)
# x = self.relu6(x) # (128, 3, 2, 1024)
x = self.conv(x) ##(128, 12, 2, 1024)
x = self.mean(x, (2, 3)) # (128, 12)
# x = self.fc1(x) # (128, 12)
x = self.funcs[self.i](x)
x = self.cast(x, ms.int32) # (#(128, 12)
x = self.embedding(x) # (128, 12, 3)
x = self.mean(x, (2)) # (128, 12)
return x
Train this netwrok using auto_parallel mode.
finish train normally
已转需求。
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。
登录 后才可以发表评论