habana_frameworks.mediapipe.fn.Mult
habana_frameworks.mediapipe.fn.Mult¶
- Class:
habana_frameworks.mediapipe.fn.Mult(**kwargs)
- Define graph call:
__call__(input1, input2)
- Parameter:
input1 - First input tensor to operator. Supported dimensions: minimum = 1, maximum = 5. Supported data types: INT16, INT32, FLOAT16, BFLOAT16, FLOAT32.
input2 - Second input tensor to operator. Supported dimensions: minimum = 1, maximum = 5. Supported data types: INT16, INT32, FLOAT16, BFLOAT16, FLOAT32.
Description:
Resultant tensor formed from the multiplication of the two operands element-wise. This operator performs element-wise multiplication and supports Broadcasting
.
Computes output as: output = (input1 * input2), element-wise.
- Supported backend:
HPU
Keyword Arguments
kwargs |
Description |
---|---|
dtype |
Output data type.
|
Note
All input/output tensors must be of the same data type and must have the same dimensionality except in broadcast support where dimensionality can be different.
This operator is agnostic to the data layout.
Example: Mult Operator
The following code snippet shows usage of Mult operator:
from habana_frameworks.mediapipe import fn
from habana_frameworks.mediapipe.mediapipe import MediaPipe
from habana_frameworks.mediapipe.media_types import imgtype as it
from habana_frameworks.mediapipe.media_types import dtype as dt
import matplotlib.pyplot as plt
from numpy.random import RandomState
import numpy as np
# Create media pipeline derived class
class myMediaPipe(MediaPipe):
def __init__(self, device, dir, queue_depth, batch_size, img_h, img_w, img_c):
super(
myMediaPipe,
self).__init__(
device,
queue_depth,
batch_size,
self.__class__.__name__)
self.input = fn.ReadImageDatasetFromDir(shuffle=False,
dir=dir,
format="jpg")
rng = RandomState(seed=100)
self.input_data = rng.rand(img_c,
img_w,
img_h,
batch_size)
self.input_data = self.input_data.astype(np.float32)
self.input_node = fn.MediaConst(data=self.input_data,
shape=[img_w, img_h, img_c, batch_size],
dtype=dt.FLOAT32)
self.cast = fn.Cast(dtype=dt.FLOAT32, round_mode=0)
# WHCN
self.decode = fn.ImageDecoder(device="hpu",
output_format=it.RGB_P,
resize=[img_w, img_h])
self.mult = fn.Mult(dtype=dt.FLOAT32)
# WHCN -> CWHN
self.transpose = fn.Transpose(permutation=[2, 0, 1, 3],
tensorDim=4,
dtype=dt.FLOAT32)
def definegraph(self):
images, labels = self.input()
images = self.decode(images)
images1 = self.cast(images)
images2 = self.input_node()
images = self.mult(images1, images2)
images = self.transpose(images)
return images, labels
def main():
batch_size = 6
img_width = 200
img_height = 200
img_channel = 3
img_dir = "/path/to/images"
queue_depth = 2
columns = 3
# Create media pipeline object
pipe = myMediaPipe('hpu', img_dir, queue_depth, batch_size,
img_height, img_width, img_channel)
# Build media pipeline
pipe.build()
# Initialize media pipeline iterator
pipe.iter_init()
# Run media pipeline
images, labels = pipe.run()
# Copy data to host from device as numpy array
images = images.as_cpu().as_nparray()
labels = labels.as_cpu().as_nparray()
# Display data
print('data:\n', images)
if __name__ == "__main__":
main()
The following is the output of Mult operator:
data: [[[[6.62954025e+01 2.29007854e+01 6.44151917e+01] [3.36826973e+01 7.10452347e+01 8.23742752e+01] [5.00930786e+01 4.28848877e+01 4.84426270e+01] ... [2.04287510e+01 7.26719761e+00 9.49951267e+00] [3.55139351e+01 3.16886635e+01 4.68619633e+00] [7.50539303e+00 1.52365170e+01 1.89718781e+01]]]]