habana_frameworks.mediapipe.fn.MediaConst

Class:
  • habana_frameworks.mediapipe.fn.MediaConst(**kwargs)

Define graph call:
  • __call__()

Parameter:
  • None

Description:

This operator generates constant data as per given shape and data type on every iteration.

Supported backend:
  • CPU

Keyword Arguments

kwargs

Description

data

Data to be used as constant.

  • Type: Numpy array

  • Default: None

  • Optional: no

batch_broadcast

If True, media will broadcast data on batch dimension, else provided data must have batch dimension.

  • Type: bool

  • Default: True

  • Optional: yes

layout

Layout of data.

  • Type: str

  • Default: ‘’

  • Optional: yes

  • Supported types:

    • batch_broadcast = True * HWC = ‘CWH’ * CHW = ‘WHC’ * HW = ‘WH’ * C = ‘C’

    • batch_broadcast = False * NA = ‘’ * NHWC = ‘CWHN’ * NCHW = ‘WHCN’ * FHWC = ‘CWHC’

shape

Shape of the output tensor.

  • Type: list[int]

  • Default: [1]

  • Optional: no

Note

Produces one output.

Example: MediaConst Operator

The following code snippet shows usage of MediaConst operator. In this example the indices which are input to GatherND operation are generated using MediaConst operator:

from habana_frameworks.mediapipe import fn
from habana_frameworks.mediapipe.mediapipe import MediaPipe
from habana_frameworks.mediapipe.media_types import imgtype as it
from habana_frameworks.mediapipe.media_types import dtype as dt
import matplotlib.pyplot as plt
import numpy as np
import os

g_display_timeout = os.getenv("DISPLAY_TIMEOUT") or 5

# Create MediaPipe derived class
class myMediaPipe(MediaPipe):
    def __init__(self, device, queue_depth, batch_size, num_threads, op_device, dir, img_h, img_w):
        super(
            myMediaPipe,
            self).__init__(
            device,
            queue_depth,
            batch_size,
            num_threads,
            self.__class__.__name__)

        self.input = fn.ReadImageDatasetFromDir(shuffle=False,
                                                dir=dir,
                                                format="jpg",
                                                device="cpu")

        # WHCN
        self.decode = fn.ImageDecoder(device="hpu",
                                      output_format=it.RGB_P,
                                      resize=[img_w, img_h])

        indices_data = np.array([[5], [4], [3], [2], [1], [0]], dtype='int32')

        self.indices = fn.MediaConst(data=indices_data,
                                    shape=[1, batch_size],
                                    dtype=dt.INT32,
                                    batch_broadcast=False,
                                    device=op_device)
        self.gather_nd = fn.GatherND(dtype=dt.UINT8, device="hpu")

        # WHCN -> CWHN
        self.transpose = fn.Transpose(permutation=[2, 0, 1, 3],
                                      tensorDim=4,
                                      dtype=dt.UINT8,
                                      device="hpu")
        self.reshape_lbls = fn.Reshape(size=[batch_size],
                                      tensorDim=1,
                                      layout='',
                                      dtype=dt.INT32,
                                      device='hpu')

    def definegraph(self):
        images, labels = self.input()
        images = self.decode(images)
        indices = self.indices()
        images = self.gather_nd(images, indices)
        images = self.transpose(images)
        labels.as_hpu()
        return images, labels


def display_images(images, batch_size, cols):
    rows = (batch_size + 1) // cols
    plt.figure(figsize=(10, 10))
    for i in range(batch_size):
        ax = plt.subplot(rows, cols, i + 1)
        plt.imshow(images[i])
        plt.axis("off")
    plt.show(block=False)
    plt.pause(g_display_timeout)
    plt.close()


def run(device, op_device):
    batch_size = 6
    queue_depth = 2
    num_threads = 1
    img_width = 200
    img_height = 200
    base_dir = os.environ['DATASET_DIR']
    dir = base_dir + "/img_data/"
    columns = 3

    # Create MediaPipe object
    pipe = myMediaPipe(device, queue_depth, batch_size,
                      num_threads, op_device, dir,
                      img_height, img_width)

    # Build MediaPipe
    pipe.build()

    # Initialize MediaPipe iterator
    pipe.iter_init()

    # Run MediaPipe
    images, labels = pipe.run()

    def as_cpu(tensor):
        if (callable(getattr(tensor, "as_cpu", None))):
            tensor = tensor.as_cpu()
        return tensor

    # Copy data to host from device as numpy array
    images = as_cpu(images).as_nparray()
    labels = as_cpu(labels).as_nparray()
    del pipe

    # Display images
    display_images(images, batch_size, columns)

if __name__ == "__main__":
    dev_opdev = {'mixed': ['cpu'],
                'legacy': ['cpu']}

    for dev in dev_opdev.keys():
        for op_dev in dev_opdev[dev]:
            run(dev, op_dev)

MediaConst() Generated Images from GatherND 1

Image1 of media_const
Image2 of media_const
Image3 of media_const
Image4 of media_const
Image5 of media_const
Image6 of media_const
1

Licensed under a CC BY SA 4.0 license. The images used here are taken from https://data.caltech.edu/records/mzrjq-6wc02.