habana_frameworks.mediapipe.fn.Where

Class:
  • habana_frameworks.mediapipe.fn.Where(**kwargs)

Define graph call:
  • __call__(perdicate, input0, input1)

Parameter:
  • predicate - Predicate tensor to operator, size=[batch_size]. Supported dimensions: minimum = 1, maximum = 1. Supported data types: UINT8.

  • input0 - Input0 tensor, Supported dimensions: minimum = 1, maximum = 5. Supported data types: FLOAT16, FLOAT32, BFLOAT16.

  • input1 - Input1 tensor, Supported dimensions: minimum = 1, maximum = 5. Supported data types: FLOAT16, FLOAT32, BFLOAT16.

Description:

Outputs a tensor of elements selected from either input0 or input1, depending on predicate. If predicate is true then input0 is selected, else input1 is selected.

Supported backend:
  • HPU, CPU

Keyword Arguments

kwargs

Description

dtype

Output data type.

  • Type: habana_frameworks.mediapipe.media_types.dtype

  • Default: UINT8

  • Optional: yes

  • Supported data type:

    • FLOAT16

    • FLOAT32

    • BFLOAT16

Note

All input0, input1, output tensors must be of the same data type and must have the same dimensionality.

Example: Where Operator

The following code snippet shows usage of Where operator:

from habana_frameworks.mediapipe import fn
from habana_frameworks.mediapipe.mediapipe import MediaPipe
from habana_frameworks.mediapipe.media_types import dtype as dt
import numpy as np
import os

g_batch_size = 3
g_predicate = np.array(np.arange(g_batch_size).reshape(
    [g_batch_size, 1, 1, 1]), dtype=dt.INT8)
g_predicate = np.mod(g_predicate, 2)
print(g_predicate)
print()
g_scale1 = np.ones([g_batch_size, 1, 1, 1], dtype=dt.FLOAT32) * 0.3
g_scale2 = np.ones([g_batch_size, 1, 1, 1], dtype=dt.FLOAT32) * 0.7


# Create MediaPipe derived class
class myMediaPipe(MediaPipe):
    def __init__(self, device, queue_depth, batch_size, num_threads,
                op_device, dir):
        super(myMediaPipe, self).__init__(
            device,
            queue_depth,
            batch_size,
            num_threads,
            self.__class__.__name__)

        self.inp = fn.ReadNumpyDatasetFromDir(num_outputs=1,
                                              shuffle=False,
                                              dir=dir,
                                              pattern="inp_x_*.npy",
                                              dense=True,
                                              dtype=dt.FLOAT32,
                                              device="cpu")

        self.predicate = fn.MediaConst(data=g_predicate,
                                      shape=[1, 1, 1, batch_size],
                                      dtype=dt.INT8,
                                      device="cpu",
                                      batch_broadcast=False)

        self.scale1 = fn.MediaConst(data=g_scale1,
                                    shape=[1, 1, 1, batch_size],
                                    dtype=dt.FLOAT32,
                                    device="cpu",
                                    batch_broadcast=False)

        self.scale2 = fn.MediaConst(data=g_scale2,
                                    shape=[1, 1, 1, batch_size],
                                    dtype=dt.FLOAT32,
                                    device="cpu",
                                    batch_broadcast=False)

        self.where = fn.Where(dtype=dt.FLOAT32,
                              device=op_device)

        self.mul = fn.Mult(device=op_device)
        self.op_device = op_device

    def definegraph(self):
        inp = self.inp()
        scale1 = self.scale1()
        scale2 = self.scale2()
        predicate = self.predicate()
        scale = self.where(predicate, scale1, scale2)
        out = self.mul(inp, scale)
        return inp, scale, out


def run(device, op_device):
    queue_depth = 1
    num_threads = 1
    base_dir = os.environ['DATASET_DIR']
    dir = base_dir+"/npy_data/fp32/"

    # Create MediaPipe object
    pipe = myMediaPipe(device, queue_depth, g_batch_size, num_threads,
                      op_device, dir)

    # Build MediaPipe
    pipe.build()

    # Initialize MediaPipe iterator
    pipe.iter_init()

    # Run MediaPipe
    inp, scale, out = pipe.run()

    def as_cpu(tensor):
        if (callable(getattr(tensor, "as_cpu", None))):
            tensor = tensor.as_cpu()
        return tensor

    inp = as_cpu(inp).as_nparray()
    scale = as_cpu(scale).as_nparray()
    out = as_cpu(out).as_nparray()

    del pipe

    print("\ninp tensor shape:", inp.shape)
    print("inp tensor dtype:", inp.dtype)
    print("inp tensor data:\n", inp)

    print("\nscale tensor shape:", scale.shape)
    print("scale tensor dtype:", scale.dtype)
    print("scale tensor data:\n", scale)

    print("\nout tensor shape:", out.shape)
    print("out tensor dtype:", out.dtype)
    print("out tensor data:\n", out)
    return inp, scale, out


def compare_ref(inp, scale, out):
    ref_scale = np.where(g_predicate == 1, g_scale1, g_scale2)
    if np.array_equal(ref_scale, scale) == False:
        raise ValueError(f"Mismatch w.r.t scale ref")
    ref_out = inp * ref_scale
    if np.array_equal(ref_out, out) == False:
        raise ValueError(f"Mismatch w.r.t out ref")


if __name__ == "__main__":
    dev_opdev = {'cpu': ['cpu'],
                'mixed': ['hpu'],
                'legacy': ['hpu']}

    for dev in dev_opdev.keys():
        for op_dev in dev_opdev[dev]:
            inp, scale, out = run(dev, op_dev)
            compare_ref(inp, scale, out)

The following is the output of Where operator:

inp tensor shape: (3, 3, 2, 3)
inp tensor dtype: float32
inp tensor data:
[[[[182. 227. 113.]
  [175. 128. 253.]]

  [[ 58. 140. 136.]
  [ 86.  80. 111.]]

  [[175. 196. 178.]
  [ 20. 163. 108.]]]


[[[186. 254.  96.]
  [180.  64. 132.]]

  [[149.  50. 117.]
  [213.   6. 111.]]

  [[ 77.  11. 160.]
  [129. 102. 154.]]]


[[[100.  95. 164.]
  [134. 131. 112.]]

  [[ 77.  38. 127.]
  [123.  87.  71.]]

  [[227. 186. 223.]
  [ 35.  56. 166.]]]]

scale tensor shape: (3, 1, 1, 1)
scale tensor dtype: float32
scale tensor data:
[[[[0.7]]]


[[[0.3]]]


[[[0.7]]]]

out tensor shape: (3, 3, 2, 3)
out tensor dtype: float32
out tensor data:
[[[[127.4       158.9        79.1      ]
  [122.5        89.6       177.09999  ]]

  [[ 40.6        98.         95.2      ]
  [ 60.2        56.         77.7      ]]

  [[122.5       137.2       124.6      ]
  [ 14.        114.1        75.6      ]]]


[[[ 55.800003   76.200005   28.800001 ]
  [ 54.000004   19.2        39.600002 ]]

  [[ 44.7        15.000001   35.100002 ]
  [ 63.9         1.8000001  33.300003 ]]

  [[ 23.1         3.3000002  48.       ]
  [ 38.7        30.6        46.2      ]]]


[[[ 70.         66.5       114.799995 ]
  [ 93.799995   91.7        78.4      ]]

  [[ 53.899998   26.6        88.9      ]
  [ 86.1        60.899998   49.7      ]]

  [[158.9       130.2       156.09999  ]
  [ 24.5        39.2       116.2      ]]]]