Skip to content

Commit 2f8c2e2

Browse files
committed
deepxIR:ir列表
1 parent 51a07f8 commit 2f8c2e2

3 files changed

Lines changed: 43 additions & 20 deletions

File tree

doc/deepxIR/ir.md

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
#IR设计说明
2+
3+
1.除了newtensor以外,其他IR均不创建新的张量,而是引用已有的张量
4+
2.IR的输入输出均使用张量名,而不是张量指针(后期IR可能支持直接用值)
5+
3.命名,t开头为张量名,a开头为参数名,v开头为vector名
6+
##IR列表
7+
8+
| IR | 说明 | 例子 |例子作用|
9+
| --- | --- | --- | --- |
10+
| argset | 设置参数 | argset@int32 1->a1 |设置a1为int32类型,值为1|
11+
| argset | 设置参数 | argset@int32 1->a1 |设置a2为int32类型,值为2|
12+
| argset | 设置vector参数 | argset@int32 1 2 3->vec1 |设置vec1为int32类型,值为1 2 3|
13+
| argset | 设置vector参数 | argset@int32 0 1 2->vec2 |设置vec2为int32类型,值为0 1 2|
14+
| argdel | 删除参数 | argdel a |删除a参数|
15+
| newtensor | 创建张量 | newtensor@int32 vec1->t1 |创建一个int32类型的张量t1,并从vec1中复制数据|
16+
| constant | tensor初始化-填充固定值 | constant@int32 a1->t1 |给t1填充固定值,值引用a1|
17+
| arange | tensor初始化-生成序列 | arange@int32 a1 a2>t1 |给t1生成序列,从a1开始,步长为a2|
18+
| uniform | tensor初始化-均匀分布 | uniform@int32 a1 a2>t1 |给t1生成均匀分布,low为a1,high为a2|
19+
| add | 矩阵加法 | add@float32 t1 t2->t3 |t3=t1+t2|
20+
| add_scalar | 矩阵加法 | add_scalar@float32 t1 a1->t3 |t3=t1+a1,a1为常数|
21+
| sub | 矩阵减法 | sub@float32 t1 t2->t3 |t3=t1-t2|
22+
| mul | 矩阵乘法 | mul@float32 t1 t2->t3 |t3=t1*t2|
23+
| mul_scalar | 矩阵乘法 | mul_scalar@float32 t1 a1->t3 |t3=t1*a1,a1为常数|
24+
| div | 除法 | div@float32 t1 t2->t3 |t3=t1/t2|
25+
| div_scalar | 除法 | div_scalar@float32 t1 a1->t3 |t3=t1/a1,a1为常数|
26+
| mod (还没实现)| 取模 | mod@float32 t1 t2->t3 |t3=t1%t2|
27+
| mod_scalar (还没实现) | 取模 | mod_scalar@float32 t1 a1->t3 |t3=t1%a1,a1为常数|
28+
| exp | 指数 | exp@float32 t1->t3 |t3=exp(t1)|
29+
| sqrt | 平方根 | sqrt@float32 t1->t3 |t3=sqrt(t1)|
30+
| log | 对数 | log@float32 t1->t3 |t3=log(t1)|
31+
| sum | 规约计算-按dims求和 | sum@float32 t1 vec2->t3 |t3=sum(t1,dims=vec2),按vec2的维度求和|
32+
| max | 规约计算-按dims求最大值 | max@float32 t1 t2->t3 |t3=max(t1,t2) |
33+
| max_scalar | 规约计算-按dims求最大值 | max_scalar@float32 t1 a1->t3 |t3=max(t1,a1),a1为常数|
34+
| min | 规约计算-按dims求最小值 | min@float32 t1 t2->t3 |t3=min(t1,t2) |
35+
| min_scalar | 规约计算-按dims求最小值 | min_scalar@float32 t1 a1->t3 |t3=min(t1,a1),a1为常数|
36+

excuter/op-mem-ompsimd/src/deepx/mem/mem.hpp renamed to excuter/common/src/deepx/mem/mem.hpp

Lines changed: 7 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -19,19 +19,6 @@ namespace deepx::mem
1919

2020
std::unordered_map<std::string, std::shared_ptr<TensorBase>> mem;
2121
int tempidx = 0;
22-
23-
// template <typename T>
24-
// static std::shared_ptr<void> type_erase(const std::shared_ptr<Tensor<T>> &ptr)
25-
// {
26-
// return std::static_pointer_cast<void>(ptr);
27-
// }
28-
29-
// template <typename T>
30-
// static std::shared_ptr<Tensor<T>> type_restore(const std::shared_ptr<void> &ptr)
31-
// {
32-
// return std::static_pointer_cast<Tensor<T>>(ptr);
33-
// }
34-
3522
public:
3623
Mem() = default;
3724
~Mem() = default;
@@ -62,7 +49,7 @@ namespace deepx::mem
6249
{
6350
if (args.find(name) != args.end())
6451
{
65-
cerr << "Argument already exists: " << name << endl;
52+
cerr << "arg already exists: " << name << endl;
6653
}
6754
args[name] = value;
6855
}
@@ -72,7 +59,7 @@ namespace deepx::mem
7259
{
7360
if (args.find(name) == args.end())
7461
{
75-
cerr << "Argument not found: " << name << endl;
62+
cerr << "arg not found: " << name << endl;
7663
return T();
7764
}
7865
return any_cast<T>(args.at(name));
@@ -83,7 +70,7 @@ namespace deepx::mem
8370
{
8471
if (args.find(name) != args.end())
8572
{
86-
cerr << "Vector already exists: " << name << endl;
73+
cerr << "vector already exists: " << name << endl;
8774
return;
8875
}
8976
args[name] = value;
@@ -94,7 +81,7 @@ namespace deepx::mem
9481
{
9582
if (args.find(name) == args.end())
9683
{
97-
cerr << "Vector not found: " << name << endl;
84+
cerr << "vector not found: " << name << endl;
9885
return vector<T>();
9986
}
10087
auto v = any_cast<vector<T>>(args.at(name));
@@ -108,7 +95,7 @@ namespace deepx::mem
10895
{
10996
if (mem.find(name) != mem.end())
11097
{
111-
cerr << "Tensor already exists: " << name << endl;
98+
cerr << "tensor already exists: " << name << endl;
11299
return;
113100
}
114101
auto ptr = std::make_shared<Tensor<T>>(std::move(tensor));
@@ -120,7 +107,7 @@ namespace deepx::mem
120107
{
121108
if (mem.find(name) != mem.end())
122109
{
123-
cerr << "Tensor already exists: " << name << endl;
110+
cerr << "tensor already exists: " << name << endl;
124111
return;
125112
}
126113
auto ptr = std::make_shared<Tensor<T>>(tensor);
@@ -160,7 +147,7 @@ namespace deepx::mem
160147
{
161148
if (mem.find(name) == mem.end())
162149
{
163-
cerr << "Tensor not found: " << name << endl;
150+
cerr << "tensor not found: " << name << endl;
164151
continue;
165152
}
166153
auto ptr = mem.at(name);
File renamed without changes.

0 commit comments

Comments
 (0)