This example implements a transition between two overlapping sequences. It uses an overexposure wipe, with a bloom on the images. Two faders are used to control the transition.
Multi-input Wipe Example
These headers are required for all HDK-based OPs.
These headers are used for SYSceil(), and SYSabs() and SYSsqrt().
Since this COP works with regions, we need TIL_Region
. As it requests floating point regions, it requires TIL_Plane
to create a duplicate plane of the cooked plane with a FP32 data format type, to be passed to COP2_Node::inputRegion()
.
Finally, TIL_Tile
is needed for most COPs and COP2_CookAreaInfo
is needed whenever COP2_Node::getInputDependenciesForOutputArea()
is overridden.
{
PRM_Name(
"boostval",
"Overexposure Boost"),
};
#define FADE_LINEAR 0
#define FADE_SQUARE 1
#define FADE_ROOT 2
{
};
COP2_MultiInputWipe::myTemplateList[] =
{
&wipeRange),
&wipeRange),
&boostRange),
&blurRange),
&fadeMenu),
};
COP2_MultiInputWipe::myTemplateList,
This code defines the parameter dialog for the COP. It has two faders, which are sliders, a boost and a blur parameter, and a menu to select the type of transition rate.
const char * COP2_MultiInputWipe::myInputLabels[] =
{
"Wipe A",
"Wipe B",
0
};
This node does not have any local variables. The inputs are labeled so they correspond to the fader sliders' names.
COP2_MultiInputWipe::myConstructor(
OP_Network *net,
const char *name,
{
return new COP2_MultiInputWipe(net, name, op);
}
COP2_MultiInputWipe::COP2_MultiInputWipe(
OP_Network *parent,
const char *name,
{
}
{
}
This node derives from COP2_MultiBase
, which provides a 'Merge' page full of parameters that deal with combining two sequences into one. This resolves issues that arise when the two sequences do not exactly match, such as having different planes, frame ranges, resolutions or frame rates.
{
public:
cop2_MultiInputWipeData() {}
virtual ~cop2_MultiInputWipeData() {}
float myFaderA;
float myFaderB;
float myBoostA;
float myBoostB;
int myBlurRadA;
int myBlurRadB;
float myBlurA;
float myBlurB;
bool myPassA;
bool myPassB;
}};
COP2_MultiInputWipe::newContextData(
const TIL_Plane *,
int ,
float t,
int xres, int , int , int )
{
cop2_MultiInputWipeData *data = new cop2_MultiInputWipeData();
int fademode;
float blur;
float boost;
data->myFaderA = evalFloat("fadera",0,t);
data->myFaderB = evalFloat("faderb",0,t);
fademode = evalInt("fademode", 0, t);
{
data->myFaderA *= data->myFaderA;
data->myFaderB *= data->myFaderB;
}
{
data->myFaderA = SYSsqrt(data->myFaderA);
data->myFaderB = SYSsqrt(data->myFaderB);
}
data->myPassA = (data->myFaderA == 1.0f && data->myFaderB == 0.0f);
data->myPassB = (data->myFaderB == 1.0f && data->myFaderA == 0.0f);
boost = evalFloat("boostval", 0, t) * 0.5f;
data->myBoostA = data->myFaderB * boost;
data->myBoostB = data->myFaderA * boost;
blur = evalFloat("bloomblur", 0, t) * getXScaleFactor(xres);
data->myBlurA = data->myFaderB * blur;
data->myBlurB = data->myFaderA * blur;
data->myBlurRadA = (
int)
SYSceil(data->myBlurA * 0.5f);
data->myBlurRadB = (
int)
SYSceil(data->myBlurB * 0.5f);
}
Here a new COP2_ContextData
type is defined, which holds the values of our parameters, as well as some pre-computed data. The newContextData()
method creates an instance, populates it with values, and returns the new context data instance.
void
COP2_MultiInputWipe::computeImageBounds(
COP2_Context &context)
{
cop2_MultiInputWipeData *data =
static_cast<cop2_MultiInputWipeData *
>(context.
data());
bool init = false;
int ix1, ix2, iy1, iy2;
x1 = 0;
y1 = 0;
for(int i=0; i<nInputs(); i++)
{
if(getInputBounds(i, context, ix1, iy1, ix2, iy2))
{
if(!init)
{
x1 = ix1;
y1 = iy1;
x2 = ix2;
y2 = iy2;
init = true;
}
else
{
if(ix1 < x1) x1 = ix1;
if(ix2 > x2) x2 = ix2;
if(iy1 < y1) y1 = iy1;
if(iy2 > y2) y2 = iy2;
}
}
}
if(!data->myPassA && !data->myPassB)
{
int brad =
SYSmax(data->myBlurRadA, data->myBlurRadB);
x1 -= brad;
y1 -= brad;
x2 += brad;
y2 += brad;
}
}
This algorithm uses a blur in the transition, so it's useful to expand the canvas size to include the blur. The canvas size also needs to contain all the bounds of the inputs so that the output image isn't cropped improperly.
void
COP2_MultiInputWipe::getInputDependenciesForOutputArea(
{
cop2_MultiInputWipeData *cdata;
if (getBypass())
{
area = makeOutputAreaDependOnMyPlane(0, output_area, input_areas,
needed_areas);
return;
}
cdata =
static_cast<cop2_MultiInputWipeData *
>(context->
data());
if(!data->myPassB)
{
area = makeOutputAreaDependOnMyPlane(0, output_area, input_areas,
needed_areas);
if(area)
cdata->myBlurRadA, cdata->myBlurRadA);
}
if(!data->myPassA)
{
area = makeOutputAreaDependOnMyPlane(1, output_area, input_areas,
needed_areas);
if(area)
cdata->myBlurRadB, cdata->myBlurRadB);
}
}
This method tells the COP cooking scheduler which input subregions are needed to cook the subregion in output_area
. In this case, we need the image from both input A and B corresponding to the plane in output_area
. Due to the blur, the COP needs neighboring pixels, so the bounds of each input area is expanded to accommodate this (by the blur radius).
int
int, float t,
int xstart, int ystart)
{
cop2_MultiInputWipeData *data =
static_cast<cop2_MultiInputWipeData *
>(context.
data());
if(data->myPassA)
inputseq = inputInfo(0);
else if(data->myPassB)
inputseq = inputInfo(1);
if(inputseq)
{
if(inputplane)
{
int xres,yres;
int ixres, iyres;
mySequence.getRes(xres,yres);
inputseq->
getRes(ixres,iyres);
ixres == xres && iyres == yres &&
isTileAlignedWithInput(0,context, xstart,ystart))
{
return 1;
}
}
}
return 0;
}
void
COP2_MultiInputWipe::passThroughTiles(
COP2_Context &context,
float t, int xstart, int ystart,
int block, bool *mask, bool *blocked)
{
cop2_MultiInputWipeData *data =
static_cast<cop2_MultiInputWipeData *
>(context.
data());
bool iblocked = false;
if(data->myPassA)
{
tiles = passInputTile(0,context, plane, array_index, t, xstart,
ystart, block, &iblocked, mask);
}
else if(data->myPassB)
{
tiles = passInputTile(1, context, plane, array_index, t, xstart,
ystart, block, &iblocked, mask);
}
if(!tiles && iblocked && blocked)
blocked = true;
}
These methods are an optimization that can pass tiles through the node without modification. In this example, when one fader is at 1 and the other at 0, the result is exactly the same as one of the inputs. As most transitions only occur for a fraction of the two sequences, this is a substantial optimization to the node.
In order to pass a tile through, two methods are overridden: COP2_Node::passThrough()
and COP2_Node::passThroughTiles()
. The first method returns true (non-zero) if the tiles specified can be passed through as-is. The second method does the actual passing of the tiles, using COP2_Node::passInputTile()
.
It is important that the output tile match the input tile being passed through, in terms of data format and position in the canvas. The frame and plane must also exist in the input that is being passed through. For most single input filters, this is the case. For multi-input filters, it's quite possible that a mismatch in these tile attributes will occur.
{
cop2_MultiInputWipeData *data =
static_cast<cop2_MultiInputWipeData *
>(context.
data());
int arad, brad;
bool init = false;
arad = data->myBlurRadA;
brad = data->myBlurRadB;
fpplane.setScoped(1);
if(data->myFaderA != 0.0f)
{
aregion = inputRegion(0, context, &fpplane, 0, context.
getTime(),
if(aregion)
{
boostAndBlur(tilelist, aregion, data->myFaderA,
data->myBoostA, arad, data->myBlurA, false);
init = true;
releaseRegion(aregion);
}
}
if(data->myFaderB != 0.0f)
{
bregion = inputRegion(1, context, &fpplane, 0, context.
getTime(),
if(bregion)
{
boostAndBlur(tilelist, bregion, data->myFaderB,
data->myBoostB, brad, data->myBlurB, init);
init = true;
releaseRegion(bregion);
}
}
if(!init)
}
The next method, cookMyTile()
, actually processes the image data. It uses a FP32 version of the cook plane to access the input regions, which converts the data to FP32 for easier processing. For each input that is required (its fader is not zero), the region is requested, and the private method boostAndBlur()
is called. This method applies the bright and bloom effect to the input, and either assigns it to the tiles (first pass) or adds it in (based on the init
variable). Each region is then released.
If both faders were zero, then the tiles are cleared to black, as no images would be seen.
void
float fade, float boost, int rad, float blur,
{
float vedge;
float sum, hsum;
float *dest = NULL;
bool alloced = false;
const float iblur = fade / ((1.0f + blur) * (1.0f + blur));
const float edge = 1.0f - (rad - blur * 0.5f);
const float addf = add ? 1.0f : 0.0f;
stride = w + rad * 2;
if(boost != 0.0f)
{
{
if(src)
for(y=0; y<(h+rad*2) * stride; y++)
src++ += boost;
}
}
if(!add)
{
alloced = true;
}
The first part of this method sets up some constants for the blur, and makes a pass through the input region data to boost the input values.
{
if(add)
{
if(getTileInFP(tiles, dest, ti))
alloced = true;
}
else
{
memset(dest, 0, sizeof(float)*w*h);
}
for(idx=0, y=0; y<
h; y++)
{
for(x=0; x<
w; x++, idx++)
{
sum = 0.0f;
for(i=-rad; i<=rad; i++)
{
vedge = (i == -rad || i == rad) ? edge : 1.0f;
hsum = scan[-rad] * edge;
if(rad)
hsum += scan[rad] * edge;
for(j=-rad+1; j<rad; j++)
hsum += scan[j];
sum += hsum * vedge;
}
dest[idx] = dest[idx] * addf + sum * iblur;
}
}
writeFPtoTile(tiles, dest, ti);
}
if(alloced)
delete [] dest;
}
The second part iterates over each tile, and blurs each pixel using the boosted input. This creates a HDR-bloom effect in bright spots.
The tile data is extracted in FP from the tile using COP2_Node::getTileInFP()
on the second pass, where the second input is accumulated with the first. On the first pass, the extra conversion is avoided and the dest
array is initialized with zero. For both passes, COP2_Node::writeFPtoTile()
is used to commit the data back to the tile.
- Note
- This blur uses a convolve which is very slow for larger blur sizes. This was done to keep the example simple. Separable filters, resolution reduction or filtering by integration, are all much faster methods of applying a blur.
void
{
"HDK Multi Input Wipe",
COP2_MultiInputWipe::myConstructor,
&COP2_MultiInputWipe::myTemplatePair,
2,
2,
&COP2_MultiInputWipe::myVariablePair,
0,
COP2_MultiInputWipe::myInputLabels));
}
Finally, the custom COP is registered with Houdini. It is a 2 input filter, both inputs required.
- Note
- This example could be extended to any number of inputs, using a multiparm for the faders, array versions of the stashed data in
cop2_MultiInputWipeData
and looping over each input in the cookMyTile()
method. The number of faders can be managed automatically by overriding OP_Node::inputConnectChanged()
(but be sure to call the parent class version as well).