TensorFlow.js 张量运算 #
运算概览 #
TensorFlow.js 提供丰富的张量运算,涵盖数学运算、逻辑运算、归约操作等。
text
┌─────────────────────────────────────────────────────────────┐
│ 张量运算分类 │
├─────────────────────────────────────────────────────────────┤
│ │
│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │
│ │ 数学运算 │ │ 逻辑运算 │ │ 归约操作 │ │
│ │ + - * / │ │ && || ! │ │ sum mean │ │
│ └─────────────┘ └─────────────┘ └─────────────┘ │
│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │
│ │ 形状操作 │ │ 切片索引 │ │ 神经网络 │ │
│ │ reshape │ │ slice │ │ conv pool │ │
│ └─────────────┘ └─────────────┘ └─────────────┘ │
│ │
└─────────────────────────────────────────────────────────────┘
基本数学运算 #
加法 #
javascript
const a = tf.tensor([1, 2, 3]);
const b = tf.tensor([4, 5, 6]);
const sum1 = a.add(b);
sum1.print();
const sum2 = tf.add(a, b);
sum2.print();
const sum3 = a.add(10);
sum3.print();
减法 #
javascript
const a = tf.tensor([10, 20, 30]);
const b = tf.tensor([1, 2, 3]);
const diff1 = a.sub(b);
diff1.print();
const diff2 = a.sub(5);
diff2.print();
乘法 #
javascript
const a = tf.tensor([1, 2, 3]);
const b = tf.tensor([4, 5, 6]);
const product1 = a.mul(b);
product1.print();
const product2 = a.mul(2);
product2.print();
除法 #
javascript
const a = tf.tensor([10, 20, 30]);
const b = tf.tensor([2, 5, 10]);
const quotient1 = a.div(b);
quotient1.print();
const quotient2 = a.div(2);
quotient2.print();
整除与取余 #
javascript
const a = tf.tensor([7, 8, 9]);
const b = tf.tensor([2, 3, 4]);
const floorDiv = a.floorDiv(b);
floorDiv.print();
const mod = a.mod(b);
mod.print();
幂运算 #
javascript
const a = tf.tensor([2, 3, 4]);
const pow = a.pow(2);
pow.print();
const square = a.square();
square.print();
const sqrt = tf.tensor([4, 9, 16]).sqrt();
sqrt.print();
指数与对数 #
指数函数 #
javascript
const a = tf.tensor([0, 1, 2]);
const exp = a.exp();
exp.print();
const expm1 = a.expm1();
expm1.print();
对数函数 #
javascript
const a = tf.tensor([1, 2.718, 7.389]);
const log = a.log();
log.print();
const log1p = tf.tensor([0, 1, 2]).log1p();
log1p.print();
const log2 = tf.tensor([2, 4, 8]).log(2);
log2.print();
三角函数 #
基本三角函数 #
javascript
const angles = tf.tensor([0, Math.PI / 4, Math.PI / 2]);
const sin = angles.sin();
sin.print();
const cos = angles.cos();
cos.print();
const tan = angles.tan();
tan.print();
反三角函数 #
javascript
const values = tf.tensor([0, 0.5, 1]);
const asin = values.asin();
asin.print();
const acos = values.acos();
acos.print();
const atan = values.atan();
atan.print();
双曲函数 #
javascript
const values = tf.tensor([0, 1, 2]);
const sinh = values.sinh();
sinh.print();
const cosh = values.cosh();
cosh.print();
const tanh = values.tanh();
tanh.print();
符号与绝对值 #
abs #
javascript
const a = tf.tensor([-1, 2, -3, 4]);
const abs = a.abs();
abs.print();
sign #
javascript
const a = tf.tensor([-5, 0, 5]);
const sign = a.sign();
sign.print();
neg #
javascript
const a = tf.tensor([1, -2, 3]);
const neg = a.neg();
neg.print();
取整函数 #
floor #
javascript
const a = tf.tensor([1.2, 2.7, 3.5]);
const floor = a.floor();
floor.print();
ceil #
javascript
const a = tf.tensor([1.2, 2.7, 3.5]);
const ceil = a.ceil();
ceil.print();
round #
javascript
const a = tf.tensor([1.2, 1.8, 2.5]);
const round = a.round();
round.print();
最值与比较 #
最大值 #
javascript
const a = tf.tensor([1, 5, 3]);
const b = tf.tensor([2, 4, 6]);
const max = a.maximum(b);
max.print();
const maxVal = a.max();
maxVal.print();
最小值 #
javascript
const a = tf.tensor([1, 5, 3]);
const b = tf.tensor([2, 4, 6]);
const min = a.minimum(b);
min.print();
const minVal = a.min();
minVal.print();
比较运算 #
javascript
const a = tf.tensor([1, 2, 3, 4, 5]);
const b = tf.tensor([2, 2, 2, 2, 2]);
const greater = a.greater(b);
greater.print();
const less = a.less(b);
less.print();
const equal = a.equal(b);
equal.print();
const greaterEqual = a.greaterEqual(b);
greaterEqual.print();
const lessEqual = a.lessEqual(b);
lessEqual.print();
where 条件选择 #
javascript
const condition = tf.tensor([true, false, true]);
const a = tf.tensor([1, 2, 3]);
const b = tf.tensor([4, 5, 6]);
const result = tf.where(condition, a, b);
result.print();
归约操作 #
求和 #
javascript
const t = tf.tensor([[1, 2, 3], [4, 5, 6]]);
const sum = t.sum();
sum.print();
const sumAxis0 = t.sum(0);
sumAxis0.print();
const sumAxis1 = t.sum(1);
sumAxis1.print();
平均值 #
javascript
const t = tf.tensor([[1, 2, 3], [4, 5, 6]]);
const mean = t.mean();
mean.print();
const meanAxis0 = t.mean(0);
meanAxis0.print();
最大值与最小值 #
javascript
const t = tf.tensor([[1, 2, 3], [4, 5, 6]]);
const max = t.max();
max.print();
const min = t.min();
min.print();
const argMax = t.argMax();
argMax.print();
const argMin = t.argMin();
argMin.print();
乘积 #
javascript
const t = tf.tensor([[1, 2, 3], [4, 5, 6]]);
const prod = t.prod();
prod.print();
const prodAxis0 = t.prod(0);
prodAxis0.print();
范数 #
javascript
const t = tf.tensor([3, 4]);
const norm1 = t.norm(1);
norm1.print();
const norm2 = t.norm(2);
norm2.print();
const normInf = t.norm('euclidean');
normInf.print();
方差与标准差 #
javascript
const t = tf.tensor([1, 2, 3, 4, 5]);
const variance = t.variance();
variance.print();
const std = t.std();
std.print();
any 与 all #
javascript
const t = tf.tensor([true, false, true]);
const any = t.any();
any.print();
const all = t.all();
all.print();
广播机制 #
广播允许不同形状的张量进行运算。
广播规则 #
text
┌─────────────────────────────────────────────────────────────┐
│ 广播规则 │
├─────────────────────────────────────────────────────────────┤
│ │
│ 1. 从右向左比较形状 │
│ 2. 维度相等或其中一个为 1 时可以广播 │
│ 3. 缺失的维度视为 1 │
│ │
│ 示例: │
│ [3, 1] + [1, 4] = [3, 4] │
│ [3, 4] + [4] = [3, 4] │
│ [3, 4] + [3, 1] = [3, 4] │
│ │
└─────────────────────────────────────────────────────────────┘
广播示例 #
javascript
const a = tf.tensor([[1], [2], [3]]);
const b = tf.tensor([[10, 20, 30]]);
const result = a.add(b);
result.print();
javascript
const a = tf.tensor([[1, 2, 3], [4, 5, 6]]);
const b = tf.tensor([10, 20, 30]);
const result = a.add(b);
result.print();
javascript
const a = tf.tensor([[1, 2, 3], [4, 5, 6]]);
const b = tf.tensor([[10], [20]]);
const result = a.add(b);
result.print();
矩阵运算 #
矩阵乘法 #
javascript
const a = tf.tensor([[1, 2], [3, 4]]);
const b = tf.tensor([[5, 6], [7, 8]]);
const matMul = a.matMul(b);
matMul.print();
const dot = a.dot(b);
dot.print();
转置 #
javascript
const a = tf.tensor([[1, 2, 3], [4, 5, 6]]);
const transposed = a.transpose();
transposed.print();
行列式 #
javascript
const a = tf.tensor([[1, 2], [3, 4]]);
const det = tf.linalg.det(a);
det.print();
逆矩阵 #
javascript
const a = tf.tensor([[1, 2], [3, 4]]);
const inv = tf.linalg.inv(a);
inv.print();
矩阵分解 #
javascript
const a = tf.tensor([[1, 2], [3, 4], [5, 6]]);
const [q, r] = tf.linalg.qr(a);
q.print();
r.print();
逐元素运算 #
clipByValue #
javascript
const t = tf.tensor([1, 5, 10, 15, 20]);
const clipped = t.clipByValue(5, 15);
clipped.print();
relu #
javascript
const t = tf.tensor([-2, -1, 0, 1, 2]);
const relu = t.relu();
relu.print();
sigmoid #
javascript
const t = tf.tensor([-2, -1, 0, 1, 2]);
const sigmoid = t.sigmoid();
sigmoid.print();
softmax #
javascript
const t = tf.tensor([1, 2, 3]);
const softmax = t.softmax();
softmax.print();
elu #
javascript
const t = tf.tensor([-2, -1, 0, 1, 2]);
const elu = t.elu();
elu.print();
selu #
javascript
const t = tf.tensor([-2, -1, 0, 1, 2]);
const selu = t.selu();
selu.print();
leakyRelu #
javascript
const t = tf.tensor([-2, -1, 0, 1, 2]);
const leakyRelu = tf.leakyRelu(t, 0.1);
leakyRelu.print();
归一化 #
batchNorm #
javascript
const x = tf.tensor([[1, 2], [3, 4]]);
const mean = tf.tensor([1, 2]);
const variance = tf.tensor([1, 1]);
const offset = tf.tensor([0, 0]);
const scale = tf.tensor([1, 1]);
const normalized = tf.batchNorm(x, mean, variance, offset, scale);
normalized.print();
layerNorm #
javascript
const t = tf.tensor([[1, 2, 3], [4, 5, 6]]);
const normalized = tf.layers.normalization().apply(t);
梯度运算 #
计算梯度 #
javascript
const x = tf.tensor([1, 2, 3]);
const grad = tf.grad(x => x.square());
const gradient = grad(x);
gradient.print();
计算高阶梯度 #
javascript
const x = tf.tensor([1, 2, 3]);
const f = x => x.square();
const grad1 = tf.grad(f);
const grad2 = tf.grad(grad1);
console.log('一阶导数:');
grad1(x).print();
console.log('二阶导数:');
grad2(x).print();
valueAndGrad #
javascript
const x = tf.tensor([1, 2, 3]);
const f = x => x.square().sum();
const { value, grad } = tf.valueAndGrad(f)(x);
console.log('函数值:');
value.print();
console.log('梯度:');
grad.print();
多变量梯度 #
javascript
const x1 = tf.tensor([1, 2, 3]);
const x2 = tf.tensor([4, 5, 6]);
const f = (x1, x2) => x1.mul(x2).sum();
const grads = tf.grads(f);
const [grad1, grad2] = grads([x1, x2]);
grad1.print();
grad2.print();
自定义操作 #
使用 tf.customGrad #
javascript
const customOp = tf.customGrad((x, save) => {
save([x]);
return {
value: x.square(),
gradFunc: (dy, saved) => {
const [x] = saved;
return dy.mul(x.mul(2));
}
};
});
const x = tf.tensor([1, 2, 3]);
const y = customOp(x);
y.print();
运算链式调用 #
javascript
const result = tf.tidy(() => {
return tf.tensor([1, 2, 3])
.square()
.add(1)
.div(2)
.sqrt();
});
result.print();
性能优化 #
使用 tf.tidy #
javascript
const result = tf.tidy(() => {
const a = tf.tensor([1, 2, 3]);
const b = a.square();
const c = b.add(1);
return c;
});
避免不必要的同步 #
javascript
const t = tf.tensor([1, 2, 3]);
const data = t.data();
使用 inplace 操作 #
javascript
const t = tf.tensor([1, 2, 3]);
t.add(1);
下一步 #
现在你已经掌握了张量运算,接下来学习 模型构建,了解如何构建神经网络模型!
最后更新:2026-03-29