A sequence of TensorOps to run. They must all share the same mode. It also doesn't support scheduled ops at
the moment, though the subnet itself may be scheduled.
required
Raises:
Type
Description
ValueError
If ops are invalid.
Source code in fastestimator/fastestimator/op/tensorop/meta/fuse.py
@traceable()classFuse(TensorOp):"""Run a sequence of TensorOps as a single Op. Args: ops: A sequence of TensorOps to run. They must all share the same mode. It also doesn't support scheduled ops at the moment, though the subnet itself may be scheduled. Raises: ValueError: If `ops` are invalid. """def__init__(self,ops:Union[TensorOp,List[TensorOp]])->None:ops=to_list(ops)iflen(ops)<1:raiseValueError("Fuse requires at least one op")inputs=[]outputs=[]mode=ops[0].modeds_id=ops[0].ds_idself.last_retain_idx=0self.models=set()self.loss_keys=set()foridx,opinenumerate(ops):ifop.mode!=mode:raiseValueError(f"All Fuse ops must share the same mode, but got {mode} and {op.mode}")ifop.ds_id!=ds_id:raiseValueError(f"All Fuse ops must share the same ds_id, but got {ds_id} and {op.ds_id}")forinpinop.inputs:ifinpnotininputsandinpnotinoutputs:inputs.append(inp)foroutinop.outputs:ifoutnotinoutputs:outputs.append(out)ifop.fe_retain_graph(True)isnotNone:# Set all of the internal ops to retainself.last_retain_idx=idx# Keep tabs on the last one since it might be set to Falseself.models|=op.get_fe_models()self.loss_keys|=op.get_fe_loss_keys()super().__init__(inputs=inputs,outputs=outputs,mode=mode,ds_id=ds_id)self.ops=opsdefbuild(self,framework:str,device:Optional[torch.device]=None)->None:foropinself.ops:op.build(framework,device)defget_fe_models(self)->Set[Model]:returnself.modelsdefget_fe_loss_keys(self)->Set[str]:returnself.loss_keysdeffe_retain_graph(self,retain:Optional[bool]=None)->Optional[bool]:returnself.ops[self.last_retain_idx].fe_retain_graph(retain)def__getstate__(self)->Dict[str,List[Dict[Any,Any]]]:return{'ops':[elem.__getstate__()ifhasattr(elem,'__getstate__')else{}foreleminself.ops]}defforward(self,data:List[Tensor],state:Dict[str,Any])->List[Tensor]:data={key:elemforkey,eleminzip(self.inputs,data)}BaseNetwork._forward_batch(data,state,self.ops)return[data[key]forkeyinself.outputs]