<?xml version="1.0" ?>
<net name="paddle-onnx" version="11">
<layers>
<layer id="0" name="x" type="Parameter" version="opset1">
<data shape="1,3,224,224" element_type="f32"/>
<rt_info>
<attribute name="fused_names" version="0" value="x"/>
</rt_info>
<output>
<port id="0" precision="FP32" names="x">
<dim>1</dim>
<dim>3</dim>
<dim>224</dim>
<dim>224</dim>
</port>
</output>
</layer>
<layer id="1" name="Multiply_8831" type="Const" version="opset1">
<data element_type="f32" shape="32, 3, 3, 3" offset="0" size="3456"/>
<output>
<port id="0" precision="FP32">
<dim>32</dim>
<dim>3</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="2" name="Multiply_8439" type="Convolution" version="opset1">
<data strides="2, 2" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
<rt_info>
<attribute name="fused_names" version="0" value="batch_norm_0.b_0, batch_norm_0.tmp_3, batch_norm_0.w_0, batch_norm_0.w_1, batch_norm_0.w_2, conv2d_55.tmp_0"/>
</rt_info>
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>224</dim>
<dim>224</dim>
</port>
<port id="1" precision="FP32">
<dim>32</dim>
<dim>3</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>112</dim>
<dim>112</dim>
</port>
</output>
</layer>
<layer id="3" name="Constant_8444" type="Const" version="opset1">
<data element_type="f32" shape="1, 32, 1, 1" offset="3456" size="128"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="4" name="batch_norm_0.tmp_3" type="Add" version="opset1">
<data auto_broadcast="numpy"/>
<rt_info>
<attribute name="fused_names" version="0" value="batch_norm_0.b_0, batch_norm_0.tmp_3, batch_norm_0.w_0, batch_norm_0.w_1, batch_norm_0.w_2"/>
</rt_info>
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>112</dim>
<dim>112</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="batch_norm_0.tmp_3">
<dim>1</dim>
<dim>32</dim>
<dim>112</dim>
<dim>112</dim>
</port>
</output>
</layer>
<layer id="5" name="relu_0.tmp_0" type="ReLU" version="opset1">
<rt_info>
<attribute name="fused_names" version="0" value="relu_0.tmp_0"/>
</rt_info>
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>112</dim>
<dim>112</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="relu_0.tmp_0">
<dim>1</dim>
<dim>32</dim>
<dim>112</dim>
<dim>112</dim>
</port>
</output>
</layer>
<layer id="6" name="Multiply_8840" type="Const" version="opset1">
<data element_type="f32" shape="32, 32, 3, 3" offset="3584" size="36864"/>
<output>
<port id="0" precision="FP32">
<dim>32</dim>
<dim>32</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="7" name="Multiply_8446" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
<rt_info>
<attribute name="fused_names" version="0" value="batch_norm_1.b_0, batch_norm_1.tmp_3, batch_norm_1.w_0, batch_norm_1.w_1, batch_norm_1.w_2, conv2d_56.tmp_0"/>
</rt_info>
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>112</dim>
<dim>112</dim>
</port>
<port id="1" precision="FP32">
<dim>32</dim>
<dim>32</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>112</dim>
<dim>112</dim>
</port>
</output>
</layer>
<layer id="8" name="Constant_8451" type="Const" version="opset1">
<data element_type="f32" shape="1, 32, 1, 1" offset="40448" size="128"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="9" name="batch_norm_1.tmp_3" type="Add" version="opset1">
<data auto_broadcast="numpy"/>
<rt_info>
<attribute name="fused_names" version="0" value="batch_norm_1.b_0, batch_norm_1.tmp_3, batch_norm_1.w_0, batch_norm_1.w_1, batch_norm_1.w_2"/>
</rt_info>
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>112</dim>
<dim>112</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="batch_norm_1.tmp_3">
<dim>1</dim>
<dim>32</dim>
<dim>112</dim>
<dim>112</dim>
</port>
</output>
</layer>
<layer id="10" name="relu_1.tmp_0" type="ReLU" version="opset1">
<rt_info>
<attribute name="fused_names" version="0" value="relu_1.tmp_0"/>
</rt_info>
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>112</dim>
<dim>112</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="relu_1.tmp_0">
<dim>1</dim>
<dim>32</dim>
<dim>112</dim>
<dim>112</dim>
</port>
</output>
</layer>
<layer id="11" name="Multiply_8849" type="Const" version="opset1">
<data element_type="f32" shape="64, 32, 3, 3" offset="40576" size="73728"/>
<output>
<port id="0" precision="FP32">
<dim>64</dim>
<dim>32</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="12" name="Multiply_8453" type="Convolution" version="opset1">
<data strides="1, 1" dilations="1, 1" pads_begin="1, 1" pads_end="1, 1" auto_pad="explicit"/>
<rt_info>
<attribute name="fused_names" version="0" value="batch_norm_2.b_0, batch_norm_2.tmp_3, batch_norm_2.w_0, batch_norm_2.w_1, batch_norm_2.w_2, conv2d_57.tmp_0"/>
</rt_info>
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>32</dim>
<dim>112</dim>
<dim>112</dim>
</port>
<port id="1" precision="FP32">
<dim>64</dim>
<dim>32</dim>
<dim>3</dim>
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>64</dim>
<dim>112</dim>
<dim>112</dim>
</port>
</output>
</layer>
<layer id="13" name="Constant_8458" type="Const" version="opset1">
<data element_type="f32" shape="1, 64, 1, 1" offset="114304" size="256"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>64</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="14" name="batch_norm_2.tmp_3" type="Add" version="opset1">
<data auto_broadcast="numpy"/>
<rt_info>
<attribute name="fused_names" version="0" value="batch_norm_2.b_0, batch_norm_2.tmp_3, batch_norm_2.w_0, batch_norm_2.w_1, batch_norm_2.w_2"/>
</rt_info>
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>64</dim>
<dim>112</dim>
<dim>112</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>64</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="batch_norm_2.tmp_3">
<dim>1</dim>
<dim>64</dim>
<dim>112</dim>
<dim>112</dim>
</port>
</output>
</layer>
<layer id="15" name="relu_2.tmp_0" type="ReLU" version="opset1">
<rt_info>
<attribute name="fused_names" version="0" value="relu_2.tmp_0"/>
</rt_info>
<input>
<port id="0" precision="FP32">
没有合适的资源?快使用搜索试试~ 我知道了~
温馨提示
ResNet50分类训练模型,使用flowers102数据集利用paddlepaddle平台训练,此处包含paddle、onnx、ir、engine四种格式模型文件,可以在Paddle Inference\OpenVINO、NVIDIA TensorRT以及ONNX runtime等部署平台使用。
资源详情
资源评论
资源推荐
收起资源包目录
ResNet50训练模型(flowers102数据集).zip (10个子文件)
ResNet50训练模型(flowers102数据集)
flower_clas.onnx 90.79MB
tensor RT模型
flower_clas_auto.engine 0B
flower_clas.engine 46.78MB
paddle模型
inference.pdmodel 1.13MB
inference.pdiparams.info 23KB
inference.pdiparams 90.76MB
IR模型
flower_clas.bin 90.45MB
flower_clas.mapping 19KB
flower_clas.xml 183KB
flower_clas_auto.onnx 90.79MB
共 10 条
- 1
椒颜皮皮虾྅
- 粉丝: 2844
- 资源: 7
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功
评论0