Skip to content

load_clip_batch

LoadClipBatch

Bases: ClipLoader

Load a product to timeline as clip

Place clip to timeline on its asset origin timings collected during conforming to project

Source code in client/ayon_flame/plugins/load/load_clip_batch.py
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
class LoadClipBatch(ayfapi.ClipLoader):
    """Load a product to timeline as clip

    Place clip to timeline on its asset origin timings collected
    during conforming to project
    """

    product_types = {"render2d", "source", "plate", "render", "review"}
    representations = {"*"}
    extensions = set(
        ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS)
    )

    label = "Load as clip to current batch"
    order = -10
    icon = "code-fork"
    color = "orange"

    # settings
    reel_name = "AYON_LoadedReel"
    clip_name_template = "{batch}_{folder[name]}_{product[name]}<_{output}>"

    """ Anatomy keys from version context data and dynamically added:
        - {layerName} - original layer name token
        - {layerUID} - original layer UID token
        - {originalBasename} - original clip name taken from file
    """
    layer_rename_template = "{folder[name]}_{product[name]}<_{output}>"
    layer_rename_patterns = []

    def load(self, context, name, namespace, options):

        # get flame objects
        self.batch = options.get("batch") or flame.batch

        # load clip to timeline and get main variables
        version_entity = context["version"]
        version_attributes =version_entity["attrib"]
        version_name = version_entity["version"]
        colorspace = self.get_colorspace(context)

        clip_name_template = self.clip_name_template
        layer_rename_template = self.layer_rename_template
        # in case output is not in context replace key to representation
        if not context["representation"]["context"].get("output"):
            clip_name_template = clip_name_template.replace(
                "output", "representation")
            layer_rename_template = layer_rename_template.replace(
                "output", "representation")

        folder_entity = context["folder"]
        product_entity = context["product"]
        formatting_data = deepcopy(context["representation"]["context"])
        formatting_data["batch"] = self.batch.name.get_value()
        formatting_data.update({
            "asset": folder_entity["name"],
            "folder": {
                "name": folder_entity["name"],
            },
            "subset": product_entity["name"],
            "family": product_entity["productType"],
            "product": {
                "name": product_entity["name"],
                "type": product_entity["productType"],
            }
        })

        clip_name = StringTemplate(clip_name_template).format(
            formatting_data)

        # convert colorspace with ocio to flame mapping
        # in imageio flame section
        colorspace = self.get_native_colorspace(colorspace)
        self.log.info("Loading with colorspace: `{}`".format(colorspace))

        # create workfile path
        workfile_dir = options.get("workdir") or os.environ["AYON_WORKDIR"]
        openclip_dir = os.path.join(
            workfile_dir, clip_name
        )
        openclip_path = os.path.join(
            openclip_dir, clip_name + ".clip"
        )

        if not os.path.exists(openclip_dir):
            os.makedirs(openclip_dir)

        # prepare clip data from context and send it to openClipLoader
        path = self.filepath_from_context(context)
        loading_context = {
            "path": path.replace("\\", "/"),
            "colorspace": colorspace,
            "version": "v{:0>3}".format(version_name),
            "layer_rename_template": layer_rename_template,
            "layer_rename_patterns": self.layer_rename_patterns,
            "context_data": formatting_data
        }
        self.log.debug(pformat(
            loading_context
        ))
        self.log.debug(openclip_path)

        # make AYON clip file
        ayfapi.OpenClipSolver(
            openclip_path, loading_context, logger=self.log).make()

        # prepare Reel group in actual desktop
        opc = self._get_clip(
            clip_name,
            openclip_path
        )

        # add additional metadata from the version to imprint basic
        # folder attributes
        add_keys = [
            "frameStart", "frameEnd", "source", "author",
            "fps", "handleStart", "handleEnd"
        ]

        # move all version data keys to tag data
        data_imprint = {
            key: version_attributes.get(key, str(None))
            for key in add_keys
        }
        # add variables related to version context
        data_imprint.update({
            "version": version_name,
            "colorspace": colorspace,
            "objectName": clip_name
        })

        # TODO: finish the containerisation
        # opc_segment = ayfapi.get_clip_segment(opc)

        # return ayfapi.containerise(
        #     opc_segment,
        #     name, namespace, context,
        #     self.__class__.__name__,
        #     data_imprint)

        return opc

    def _get_clip(self, name, clip_path):
        reel = self._get_reel()

        # with maintained openclip as opc
        matching_clip = None
        for cl in reel.clips:
            if cl.name.get_value() != name:
                continue
            matching_clip = cl

        if not matching_clip:
            created_clips = flame.import_clips(str(clip_path), reel)
            return created_clips.pop()

        return matching_clip

    def _get_reel(self):

        matching_reel = [
            rg for rg in self.batch.reels
            if rg.name.get_value() == self.reel_name
        ]

        return (
            matching_reel.pop()
            if matching_reel
            else self.batch.create_reel(str(self.reel_name))
        )

clip_name_template = '{batch}_{folder[name]}_{product[name]}<_{output}>' class-attribute instance-attribute

Anatomy keys from version context data and dynamically added: - {layerName} - original layer name token - {layerUID} - original layer UID token - {originalBasename} - original clip name taken from file