内容接前文:

https://www.cnblogs.com/devilmaycry812839668/p/14988686.html

这里我们考虑的数据集是自建数据集,那么效果又会如何呢???

import mindspore
import numpy as np # 引入numpy科学计算库
import matplotlib.pyplot as plt # 引入绘图库 np.random.seed(123) # 随机数生成种子 import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import Tensor
from mindspore import ParameterTuple, Parameter
from mindspore import dtype as mstype
from mindspore import Model
import mindspore.dataset as ds
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig
from mindspore.train.callback import LossMonitor class Net(nn.Cell):
def __init__(self, input_dims, output_dims):
super(Net, self).__init__()
self.matmul = ops.MatMul() self.weight_1 = Parameter(Tensor(np.random.randn(input_dims, 128), dtype=mstype.float32), name='weight_1')
self.bias_1 = Parameter(Tensor(np.zeros(128), dtype=mstype.float32), name='bias_1')
self.weight_2 = Parameter(Tensor(np.random.randn(128, 64), dtype=mstype.float32), name='weight_2')
self.bias_2 = Parameter(Tensor(np.zeros(64), dtype=mstype.float32), name='bias_2')
self.weight_3 = Parameter(Tensor(np.random.randn(64, output_dims), dtype=mstype.float32), name='weight_3')
self.bias_3 = Parameter(Tensor(np.zeros(output_dims), dtype=mstype.float32), name='bias_3') def construct(self, x):
x1 = self.matmul(x, self.weight_1) + self.bias_1
x2 = self.matmul(x1, self.weight_2) + self.bias_2
x3 = self.matmul(x2, self.weight_3) + self.bias_3
return x3 def main():
net = Net(1, 1)
# loss function
loss = nn.MSELoss()
# optimizer
optim = nn.SGD(params=net.trainable_params(), learning_rate=0.000001)
# make net model
model = Model(net, loss, optim, metrics={'loss': nn.Loss()}) # 数据集
x, y = np.array([[0.1]], dtype=np.float32), np.array([[0.1]], dtype=np.float32) def generator_multidimensional():
for i in range(10):
a = x*i
b = y*i
print(a, b)
yield (a, b) dataset = ds.GeneratorDataset(source=generator_multidimensional, column_names=["input", "output"]) model.train(1, dataset, callbacks=LossMonitor(1), dataset_sink_mode=True) print('*' * 100) x, y = np.array([[0.2]], dtype=np.float32), np.array([[0.2]], dtype=np.float32)
model.train(1, dataset, callbacks=LossMonitor(1), dataset_sink_mode=False) # right
# False, False
# False, True
# True, True xxx # not right
# True, False if __name__ == '__main__':
""" 设置运行的背景context """
from mindspore import context # 为mindspore设置运行背景context
#context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
context.set_context(mode=context.GRAPH_MODE, device_target='GPU') import time a = time.time()
main()
b = time.time()
# print(b-a)

运行结果:

WARNING: 'ControlDepend' is deprecated from version 1.1 and will be removed in a future version, use 'Depend' instead.
[WARNING] ME(22644:139765219266688,MainProcess):2021-07-09-03:51:42.606.193 [mindspore/ops/operations/array_ops.py:2302] WARN_DEPRECATED: The usage of Pack is deprecated. Please use Stack.
[[0.]] [[0.]]
[[0.1]] [[0.1]]
[[0.2]] [[0.2]]
[[0.3]] [[0.3]]
[[0.4]] [[0.4]]
[[0.5]] [[0.5]]
[[0.6]] [[0.6]]
[[0.7]] [[0.7]]
[[0.8]] [[0.8]]
[[0.90000004]] [[0.90000004]]
[[0.]] [[0.]]
[[0.1]] [[0.1]]
[[0.]] [[0.]]
[[0.1]] [[0.1]]
[[0.2]] [[0.2]]
[[0.3]] [[0.3]]
[[0.4]] [[0.4]]
[[0.5]] [[0.5]]
[[0.6]] [[0.6]]
[[0.7]] [[0.7]]
[[0.8]] [[0.8]]
[[0.90000004]] [[0.90000004]]
epoch: 1 step: 10, loss is 14095.578
****************************************************************************************************
[[0.]] [[0.]]
[[0.2]] [[0.2]]
[[0.4]] [ERROR] ANALYZER(22644,python):2021-07-09-03:51:44.281.599 [mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc:74] Eval] Function construct_wrapper, The number of parameters of this function is 20, but the number of provided arguments is 22. NodeInfo:
Traceback (most recent call last):
File "/tmp/pycharm_project_753/second_nnnew_line_regression.py", line 85, in <module>
[[0.4]] main()
File "/tmp/pycharm_project_753/second_nnnew_line_regression.py", line 63, in main [[0.6]] [[0.6]]model.train(1, dataset, callbacks=LossMonitor(1), dataset_sink_mode=False) File "/usr/local/python-3.7.5/lib/python3.7/site-packages/mindspore/train/model.py", line 592, in train
[[0.8]] [[0.8]]
[[1.]] sink_size=sink_size)[[1.]] File "/usr/local/python-3.7.5/lib/python3.7/site-packages/mindspore/train/model.py", line 385, in _train
[[1.2]] [[1.2]]
[[1.4]] [[1.4]]self._train_process(epoch, train_dataset, list_callback, cb_params) File "/usr/local/python-3.7.5/lib/python3.7/site-packages/mindspore/train/model.py", line 513, in _train_process
[[1.6]] [[1.6]]
[[1.8000001]] [[1.8000001]]outputs = self._train_network(*next_element) File "/usr/local/python-3.7.5/lib/python3.7/site-packages/mindspore/nn/cell.py", line 322, in __call__
out = self.compile_and_run(*inputs)
File "/usr/local/python-3.7.5/lib/python3.7/site-packages/mindspore/nn/cell.py", line 578, in compile_and_run
self.compile(*inputs)
File "/usr/local/python-3.7.5/lib/python3.7/site-packages/mindspore/nn/cell.py", line 565, in compile
_executor.compile(self, *inputs, phase=self.phase, auto_parallel_mode=self._auto_parallel_mode)
File "/usr/local/python-3.7.5/lib/python3.7/site-packages/mindspore/common/api.py", line 505, in compile
result = self._executor.compile(obj, args_list, phase, use_vm)
TypeError: mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.cc:74 Eval] Function construct_wrapper, The number of parameters of this function is 20, but the number of provided arguments is 22. NodeInfo: # 进程已结束,退出代码为 1

经过多次实验,发现:

设置为:

    model.train(1, dataset, callbacks=LossMonitor(1), dataset_sink_mode=True)

    print('*' * 100)

    x, y = np.array([[0.2]], dtype=np.float32), np.array([[0.2]], dtype=np.float32)
model.train(1, dataset, callbacks=LossMonitor(1), dataset_sink_mode=False)

则会报错。

而:

    model.train(1, dataset, callbacks=LossMonitor(1), dataset_sink_mode=True)

    print('*' * 100)

    x, y = np.array([[0.2]], dtype=np.float32), np.array([[0.2]], dtype=np.float32)
model.train(1, dataset, callbacks=LossMonitor(1), dataset_sink_mode=True)
    model.train(1, dataset, callbacks=LossMonitor(1), dataset_sink_mode=False)

    print('*' * 100)

    x, y = np.array([[0.2]], dtype=np.float32), np.array([[0.2]], dtype=np.float32)
model.train(1, dataset, callbacks=LossMonitor(1), dataset_sink_mode=False)
    model.train(1, dataset, callbacks=LossMonitor(1), dataset_sink_mode=False)

    print('*' * 100)

    x, y = np.array([[0.2]], dtype=np.float32), np.array([[0.2]], dtype=np.float32)
model.train(1, dataset, callbacks=LossMonitor(1), dataset_sink_mode=True)

则都不会报错。

可以看到如果两次调用同一个mode来训练持续数据,那么dataset_sink_mode 的设置还是很重要的,

这里面推荐的dataset_sink_mode的设置是如果同一个model多次训练,那么所有的训练时dataset_sink_mode都设置为False ,

这样经验上来说能够更大可能性保证正常运行。

====================================================================

那么我们对多次的 model.train 进行训练和一次的model.train 进行训练,那么在运算效率上会有多大区别呢???

1.

一次model.tain 进行20000epochs的训练, 代码如下:

import mindspore
import numpy as np # 引入numpy科学计算库
import matplotlib.pyplot as plt # 引入绘图库 np.random.seed(123) # 随机数生成种子 import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import Tensor
from mindspore import ParameterTuple, Parameter
from mindspore import dtype as mstype
from mindspore import Model
import mindspore.dataset as ds
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig
from mindspore.train.callback import LossMonitor class Net(nn.Cell):
def __init__(self, input_dims, output_dims):
super(Net, self).__init__()
self.matmul = ops.MatMul() self.weight_1 = Parameter(Tensor(np.random.randn(input_dims, 128), dtype=mstype.float32), name='weight_1')
self.bias_1 = Parameter(Tensor(np.zeros(128), dtype=mstype.float32), name='bias_1')
self.weight_2 = Parameter(Tensor(np.random.randn(128, 64), dtype=mstype.float32), name='weight_2')
self.bias_2 = Parameter(Tensor(np.zeros(64), dtype=mstype.float32), name='bias_2')
self.weight_3 = Parameter(Tensor(np.random.randn(64, output_dims), dtype=mstype.float32), name='weight_3')
self.bias_3 = Parameter(Tensor(np.zeros(output_dims), dtype=mstype.float32), name='bias_3') def construct(self, x):
x1 = self.matmul(x, self.weight_1) + self.bias_1
x2 = self.matmul(x1, self.weight_2) + self.bias_2
x3 = self.matmul(x2, self.weight_3) + self.bias_3
return x3 def main():
net = Net(1, 1)
# loss function
loss = nn.MSELoss()
# optimizer
optim = nn.SGD(params=net.trainable_params(), learning_rate=0.000001)
# make net model
model = Model(net, loss, optim, metrics={'loss': nn.Loss()}) # 数据集
x, y = np.array([[0.1]], dtype=np.float32), np.array([[0.1]], dtype=np.float32) def generator_multidimensional():
for i in range(100):
a = x*i
b = y*i
#print(a, b)
yield (a, b) dataset = ds.GeneratorDataset(source=generator_multidimensional, column_names=["input", "output"]) model.train(20000, dataset, callbacks=LossMonitor(100), dataset_sink_mode=False) """
print('*' * 100) x, y = np.array([[0.2]], dtype=np.float32), np.array([[0.2]], dtype=np.float32)
model.train(1, dataset, callbacks=LossMonitor(1), dataset_sink_mode=False)
""" # right
# False, False
# False, True
# True, True xxx # not right
# True, False if __name__ == '__main__':
""" 设置运行的背景context """
from mindspore import context # 为mindspore设置运行背景context
#context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
context.set_context(mode=context.GRAPH_MODE, device_target='GPU') import time a = time.time()
main()
b = time.time()
print(b-a)

训练时间:

1464.53s

1460.07s

1452.64s

2.

2次model.tain 分别进行10000epochs的训练, 代码如下:

import mindspore
import numpy as np # 引入numpy科学计算库
import matplotlib.pyplot as plt # 引入绘图库 np.random.seed(123) # 随机数生成种子 import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import Tensor
from mindspore import ParameterTuple, Parameter
from mindspore import dtype as mstype
from mindspore import Model
import mindspore.dataset as ds
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig
from mindspore.train.callback import LossMonitor class Net(nn.Cell):
def __init__(self, input_dims, output_dims):
super(Net, self).__init__()
self.matmul = ops.MatMul() self.weight_1 = Parameter(Tensor(np.random.randn(input_dims, 128), dtype=mstype.float32), name='weight_1')
self.bias_1 = Parameter(Tensor(np.zeros(128), dtype=mstype.float32), name='bias_1')
self.weight_2 = Parameter(Tensor(np.random.randn(128, 64), dtype=mstype.float32), name='weight_2')
self.bias_2 = Parameter(Tensor(np.zeros(64), dtype=mstype.float32), name='bias_2')
self.weight_3 = Parameter(Tensor(np.random.randn(64, output_dims), dtype=mstype.float32), name='weight_3')
self.bias_3 = Parameter(Tensor(np.zeros(output_dims), dtype=mstype.float32), name='bias_3') def construct(self, x):
x1 = self.matmul(x, self.weight_1) + self.bias_1
x2 = self.matmul(x1, self.weight_2) + self.bias_2
x3 = self.matmul(x2, self.weight_3) + self.bias_3
return x3 def main():
net = Net(1, 1)
# loss function
loss = nn.MSELoss()
# optimizer
optim = nn.SGD(params=net.trainable_params(), learning_rate=0.000001)
# make net model
model = Model(net, loss, optim, metrics={'loss': nn.Loss()}) # 数据集
x, y = np.array([[0.1]], dtype=np.float32), np.array([[0.1]], dtype=np.float32) def generator_multidimensional():
for i in range(100):
a = x*i
b = y*i
#print(a, b)
yield (a, b) dataset = ds.GeneratorDataset(source=generator_multidimensional, column_names=["input", "output"]) model.train(10000, dataset, callbacks=LossMonitor(100), dataset_sink_mode=False) print('*' * 100) x, y = np.array([[0.1]], dtype=np.float32), np.array([[0.1]], dtype=np.float32)
model.train(10000, dataset, callbacks=LossMonitor(100), dataset_sink_mode=False) # right
# False, False
# False, True
# True, True xxx # not right
# True, False if __name__ == '__main__':
""" 设置运行的背景context """
from mindspore import context # 为mindspore设置运行背景context
#context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
context.set_context(mode=context.GRAPH_MODE, device_target='GPU') import time a = time.time()
main()
b = time.time()
print(b-a)

训练时间:

1460.29s

1454.51s

1457.07s

3.

10次model.tain 分别进行2000epochs的训练, 代码如下:

import mindspore
import numpy as np # 引入numpy科学计算库
import matplotlib.pyplot as plt # 引入绘图库 np.random.seed(123) # 随机数生成种子 import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import Tensor
from mindspore import ParameterTuple, Parameter
from mindspore import dtype as mstype
from mindspore import Model
import mindspore.dataset as ds
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig
from mindspore.train.callback import LossMonitor class Net(nn.Cell):
def __init__(self, input_dims, output_dims):
super(Net, self).__init__()
self.matmul = ops.MatMul() self.weight_1 = Parameter(Tensor(np.random.randn(input_dims, 128), dtype=mstype.float32), name='weight_1')
self.bias_1 = Parameter(Tensor(np.zeros(128), dtype=mstype.float32), name='bias_1')
self.weight_2 = Parameter(Tensor(np.random.randn(128, 64), dtype=mstype.float32), name='weight_2')
self.bias_2 = Parameter(Tensor(np.zeros(64), dtype=mstype.float32), name='bias_2')
self.weight_3 = Parameter(Tensor(np.random.randn(64, output_dims), dtype=mstype.float32), name='weight_3')
self.bias_3 = Parameter(Tensor(np.zeros(output_dims), dtype=mstype.float32), name='bias_3') def construct(self, x):
x1 = self.matmul(x, self.weight_1) + self.bias_1
x2 = self.matmul(x1, self.weight_2) + self.bias_2
x3 = self.matmul(x2, self.weight_3) + self.bias_3
return x3 def main():
net = Net(1, 1)
# loss function
loss = nn.MSELoss()
# optimizer
optim = nn.SGD(params=net.trainable_params(), learning_rate=0.000001)
# make net model
model = Model(net, loss, optim, metrics={'loss': nn.Loss()}) # 数据集
x, y = np.array([[0.1]], dtype=np.float32), np.array([[0.1]], dtype=np.float32) def generator_multidimensional():
for i in range(100):
a = x*i
b = y*i
#print(a, b)
yield (a, b) dataset = ds.GeneratorDataset(source=generator_multidimensional, column_names=["input", "output"]) for i in range(10):
print(i, '\t', '*' * 100)
model.train(2000, dataset, callbacks=LossMonitor(100), dataset_sink_mode=False) # right
# False, False
# False, True
# True, True xxx # not right
# True, False if __name__ == '__main__':
""" 设置运行的背景context """
from mindspore import context # 为mindspore设置运行背景context
#context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
context.set_context(mode=context.GRAPH_MODE, device_target='GPU') import time a = time.time()
main()
b = time.time()
print(b-a)

训练时间:

1457.52s

4.

100次model.tain 分别进行200epochs的训练, 代码如下:

import mindspore
import numpy as np # 引入numpy科学计算库
import matplotlib.pyplot as plt # 引入绘图库 np.random.seed(123) # 随机数生成种子 import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import Tensor
from mindspore import ParameterTuple, Parameter
from mindspore import dtype as mstype
from mindspore import Model
import mindspore.dataset as ds
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig
from mindspore.train.callback import LossMonitor class Net(nn.Cell):
def __init__(self, input_dims, output_dims):
super(Net, self).__init__()
self.matmul = ops.MatMul() self.weight_1 = Parameter(Tensor(np.random.randn(input_dims, 128), dtype=mstype.float32), name='weight_1')
self.bias_1 = Parameter(Tensor(np.zeros(128), dtype=mstype.float32), name='bias_1')
self.weight_2 = Parameter(Tensor(np.random.randn(128, 64), dtype=mstype.float32), name='weight_2')
self.bias_2 = Parameter(Tensor(np.zeros(64), dtype=mstype.float32), name='bias_2')
self.weight_3 = Parameter(Tensor(np.random.randn(64, output_dims), dtype=mstype.float32), name='weight_3')
self.bias_3 = Parameter(Tensor(np.zeros(output_dims), dtype=mstype.float32), name='bias_3') def construct(self, x):
x1 = self.matmul(x, self.weight_1) + self.bias_1
x2 = self.matmul(x1, self.weight_2) + self.bias_2
x3 = self.matmul(x2, self.weight_3) + self.bias_3
return x3 def main():
net = Net(1, 1)
# loss function
loss = nn.MSELoss()
# optimizer
optim = nn.SGD(params=net.trainable_params(), learning_rate=0.000001)
# make net model
model = Model(net, loss, optim, metrics={'loss': nn.Loss()}) # 数据集
x, y = np.array([[0.1]], dtype=np.float32), np.array([[0.1]], dtype=np.float32) def generator_multidimensional():
for i in range(100):
a = x*i
b = y*i
#print(a, b)
yield (a, b) dataset = ds.GeneratorDataset(source=generator_multidimensional, column_names=["input", "output"]) for i in range(100):
print(i, '\t', '*' * 100)
model.train(200, dataset, callbacks=LossMonitor(100), dataset_sink_mode=False) # right
# False, False
# False, True
# True, True xxx # not right
# True, False if __name__ == '__main__':
""" 设置运行的背景context """
from mindspore import context # 为mindspore设置运行背景context
#context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
context.set_context(mode=context.GRAPH_MODE, device_target='GPU') import time a = time.time()
main()
b = time.time()
print(b-a)

训练时间:

1457.56s

5.

1000次model.tain 分别进行20epochs的训练, 代码如下:

6.

10000次model.tain 分别进行2epochs的训练, 代码如下:

import mindspore
import numpy as np # 引入numpy科学计算库
import matplotlib.pyplot as plt # 引入绘图库 np.random.seed(123) # 随机数生成种子 import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import Tensor
from mindspore import ParameterTuple, Parameter
from mindspore import dtype as mstype
from mindspore import Model
import mindspore.dataset as ds
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig
from mindspore.train.callback import LossMonitor class Net(nn.Cell):
def __init__(self, input_dims, output_dims):
super(Net, self).__init__()
self.matmul = ops.MatMul() self.weight_1 = Parameter(Tensor(np.random.randn(input_dims, 128), dtype=mstype.float32), name='weight_1')
self.bias_1 = Parameter(Tensor(np.zeros(128), dtype=mstype.float32), name='bias_1')
self.weight_2 = Parameter(Tensor(np.random.randn(128, 64), dtype=mstype.float32), name='weight_2')
self.bias_2 = Parameter(Tensor(np.zeros(64), dtype=mstype.float32), name='bias_2')
self.weight_3 = Parameter(Tensor(np.random.randn(64, output_dims), dtype=mstype.float32), name='weight_3')
self.bias_3 = Parameter(Tensor(np.zeros(output_dims), dtype=mstype.float32), name='bias_3') def construct(self, x):
x1 = self.matmul(x, self.weight_1) + self.bias_1
x2 = self.matmul(x1, self.weight_2) + self.bias_2
x3 = self.matmul(x2, self.weight_3) + self.bias_3
return x3 def main():
net = Net(1, 1)
# loss function
loss = nn.MSELoss()
# optimizer
optim = nn.SGD(params=net.trainable_params(), learning_rate=0.000001)
# make net model
model = Model(net, loss, optim, metrics={'loss': nn.Loss()}) # 数据集
x, y = np.array([[0.1]], dtype=np.float32), np.array([[0.1]], dtype=np.float32) def generator_multidimensional():
for i in range(100):
a = x*i
b = y*i
#print(a, b)
yield (a, b) dataset = ds.GeneratorDataset(source=generator_multidimensional, column_names=["input", "output"]) for i in range(10000):
print(i, '\t', '*' * 100)
model.train(2, dataset, callbacks=[LossMonitor(100)], dataset_sink_mode=False) # right
# False, False
# False, True
# True, True xxx # not right
# True, False if __name__ == '__main__':
""" 设置运行的背景context """
from mindspore import context # 为mindspore设置运行背景context
#context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
context.set_context(mode=context.GRAPH_MODE, device_target='GPU') import time a = time.time()
main()
b = time.time()
print(b-a)

训练时间:

1464.64s

7.

20000次model.tain 分别进行1epochs的训练, 代码如下:

import mindspore
import numpy as np # 引入numpy科学计算库
import matplotlib.pyplot as plt # 引入绘图库 np.random.seed(123) # 随机数生成种子 import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import Tensor
from mindspore import ParameterTuple, Parameter
from mindspore import dtype as mstype
from mindspore import Model
import mindspore.dataset as ds
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig
from mindspore.train.callback import LossMonitor class Net(nn.Cell):
def __init__(self, input_dims, output_dims):
super(Net, self).__init__()
self.matmul = ops.MatMul() self.weight_1 = Parameter(Tensor(np.random.randn(input_dims, 128), dtype=mstype.float32), name='weight_1')
self.bias_1 = Parameter(Tensor(np.zeros(128), dtype=mstype.float32), name='bias_1')
self.weight_2 = Parameter(Tensor(np.random.randn(128, 64), dtype=mstype.float32), name='weight_2')
self.bias_2 = Parameter(Tensor(np.zeros(64), dtype=mstype.float32), name='bias_2')
self.weight_3 = Parameter(Tensor(np.random.randn(64, output_dims), dtype=mstype.float32), name='weight_3')
self.bias_3 = Parameter(Tensor(np.zeros(output_dims), dtype=mstype.float32), name='bias_3') def construct(self, x):
x1 = self.matmul(x, self.weight_1) + self.bias_1
x2 = self.matmul(x1, self.weight_2) + self.bias_2
x3 = self.matmul(x2, self.weight_3) + self.bias_3
return x3 def main():
net = Net(1, 1)
# loss function
loss = nn.MSELoss()
# optimizer
optim = nn.SGD(params=net.trainable_params(), learning_rate=0.000001)
# make net model
model = Model(net, loss, optim, metrics={'loss': nn.Loss()}) # 数据集
x, y = np.array([[0.1]], dtype=np.float32), np.array([[0.1]], dtype=np.float32) def generator_multidimensional():
for i in range(100):
a = x*i
b = y*i
#print(a, b)
yield (a, b) dataset = ds.GeneratorDataset(source=generator_multidimensional, column_names=["input", "output"]) for i in range(20000):
#print(i, '\t', '*' * 100)
model.train(1, dataset, callbacks=[LossMonitor(100)], dataset_sink_mode=False) # right
# False, False
# False, True
# True, True xxx # not right
# True, False if __name__ == '__main__':
""" 设置运行的背景context """
from mindspore import context # 为mindspore设置运行背景context
#context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
context.set_context(mode=context.GRAPH_MODE, device_target='GPU') import time a = time.time()
main()
b = time.time()
print(b-a)

训练时间:

1475.06s

1469.48s

1475.70s

1471.46s

=========================================================

从测试的数据上来看使用多次的model.train确实要比一次调用model.train要耗费时间,不过考虑到这样能够更多的支持其他功能,这种性能损耗也是完全可以接受的。

本文实验环境为  MindSpore1.1  docker版本

宿主机:Ubuntu18.04系统

CPU:I7-8700

GPU:1060ti NVIDIA显卡

(续)在深度计算框架MindSpore中如何对不持续的计算进行处理——对数据集进行一定epoch数量的训练后,进行其他工作处理,再返回来接着进行一定epoch数量的训练——单步计算的更多相关文章

  1. 带你学习MindSpore中算子使用方法

    摘要:本文分享下MindSpore中算子的使用和遇到问题时的解决方法. 本文分享自华为云社区<[MindSpore易点通]算子使用问题与解决方法>,作者:chengxiaoli. 简介 算 ...

  2. TensorFlow - 框架实现中的三种 Graph

    文章目录 TensorFlow - 框架实现中的三种 Graph 1. Graph 2. GraphDef 3. MetaGraph 4. Checkpoint 5. 总结 TensorFlow - ...

  3. SSH框架应用中常用Jar包用途介绍

    struts2需要的几个jar包:1)xwork-core-2.1.62)struts2-core-2.1.83)ognl-2.7.34)freemarker-2.3.155)commons-io-1 ...

  4. python运维开发(十七)----jQuery续(示例)web框架django

    内容目录: jQuery示例 前端插件 web框架 Django框架 jQuery示例 dom事件绑定,dom绑定在form表单提交按钮地方都会绑定一个onclick事件,所有查看网站的人都能看到代码 ...

  5. 如何在Crystal框架项目中内置启动MetaQ服务?

    当Crystal框架项目中需要使用消息机制,而项目规模不大.性能要求不高时,可内置启动MetaQ服务器. 分步指南 项目引入crystal-extend-metaq模块,如下: <depende ...

  6. 如何在Crystal框架项目中内置启动Zookeeper服务?

    当Crystal框架项目需要使用到Zookeeper服务时(如使用Dubbo RPC时,需要注册服务到Zookeeper),而独立部署和启动Zookeeper服务不仅繁琐,也容易出现错误. 在小型项目 ...

  7. 浅入深出之Java集合框架(中)

    Java中的集合框架(中) 由于Java中的集合框架的内容比较多,在这里分为三个部分介绍Java的集合框架,内容是从浅到深,如果已经有java基础的小伙伴可以直接跳到<浅入深出之Java集合框架 ...

  8. Javscript调用iframe框架页面中函数的方法

    Javscript调用iframe框架页面中函数的方法,可以实现iframe之间传值或修改值了, 访问iframe里面的函数: window.frames['CallCenter_iframe'].h ...

  9. 游戏框架设计中的。绑定binding。。。命令 command 和消息message 以及MVVM

    游戏框架设计中的.绑定binding...命令 command 和消息message

  10. 关于MFC框架程序中CWinApp::OnIdle

    很早之前就发现,我写的图形引擎在MFC框架程序中的刷帧率始终在60FPS左右.好在自己的程序对刷帧率的要求不是很高,所以一直没有太过纠结此事.直到今天看了别人的程序才发现应该在函数CWinApp::O ...

随机推荐

  1. 『手写Mybatis』实现映射器的注册和使用

    前言 如何面对复杂系统的设计? 我们可以把 Spring.MyBatis.Dubbo 这样的大型框架或者一些公司内部的较核心的项目,都可以称为复杂的系统. 这样的工程也不在是初学编程手里的玩具项目,没 ...

  2. C# 8字节byte数组转int

    对方是协议 对于整型.长整型等数据类型,Big endian 认为第一个字节是最高位字节(按照从低地址到高地址的顺序存放数据的高位字节到低位字节):而 Little endian 则相反,它认为第一个 ...

  3. 蚁群算法及 TSP 问题上的应用

    群智能(Swarm intelligence) 自然界动物群,称之为群. 群的特征: 相互作用的相邻个体的集合 个体的行为简单,既有竞争又有协作 智能化的集体行为(1+1>2): 个体间不仅能够 ...

  4. python重拾第五天-常用模块学习

    本节大纲: 模块介绍 time &datetime模块 random os sys shutil json & picle shelve xml处理 yaml处理 configpars ...

  5. 3568F-Linux系统启动卡制作及系统固化

  6. NXP i.MX 8M Mini工业级核心板规格书(四核ARM Cortex-A53 + 单核ARM Cortex-M4,主频1.6GHz)

     1 核心板简介 创龙科技SOM-TLIMX8是一款基于NXP i.MX 8M Mini的四核ARM Cortex-A53 + 单核ARM Cortex-M4异构多核处理器设计的高端工业级核心板,AR ...

  7. TI AM62x工业核心板规格书(单/双/四核ARM Cortex-A53 + 单核ARM Cortex-M4F,主频1.4GHz)

    1 核心板简介 创龙科技SOM-TL62x是一款基于TI Sitara系列AM62x单/双/四核ARM Cortex-A53 + 单核ARM Cortex-M4F异构多核处理器设计的高性能低功耗工业级 ...

  8. 移动web布局方法

    继续更新移动端的一个布局,这也是经典中的经典,当初只知道个rem和vwvh适配,其实这里面还有很多的门道不只是一个适配这么简单 一.前置 1.背景缩放 我们都知道做移动端,给的图都是二倍图,你拿来用直 ...

  9. Windows部署语音转文字项目_Whisper

    Windows部署语音转文字项目_Whisper Windows部署语音转文字项目_Whisper 一.前置安装准备 Github源仓库,Whisper 下载安装whisper及其依赖项 官方有两种部 ...

  10. 基于SSD202D芯片的最小嵌入式Linux开发板来了 仅需99元 入门嵌入式Linux必选