【看涨分离股价源码】【涂鸦怪物源码】【视频来源码】equalis源码

1.matlab BP神经网络的训练算法中训练函数(traingdm 、trainlm、看涨分离股价源码trainbr)的涂鸦怪物源码实现过程及相应的VC源代码
2.如何设计自定义的transform filter

equalis源码

matlab BP神经网络的训练算法中训练函数(traingdm 、trainlm、视频来源码trainbr)的神魔降临源码实现过程及相应的VC源代码

       VC源代码?你很搞笑嘛。。轨道炮源码

       给你trainlm的m码

       function [out1,out2] = trainlm(varargin)

       %TRAINLM Levenberg-Marquardt backpropagation.

       %

       % <a href="matlab:doc trainlm">trainlm</a> is a network training function that updates weight and

       % bias states according to Levenberg-Marquardt optimization.

       %

       % <a href="matlab:doc trainlm">trainlm</a> is often the fastest backpropagation algorithm in the toolbox,

       % and is highly recommended as a first choice supervised algorithm,

       % although it does require more memory than other algorithms.

       %

       % [NET,TR] = <a href="matlab:doc trainlm">trainlm</a>(NET,X,T) takes a network NET, input data X

       % and target data T and returns the network after training it, and a

       % a training record TR.

       %

       % [NET,TR] = <a href="matlab:doc trainlm">trainlm</a>(NET,X,T,Xi,Ai,EW) takes additional optional

       % arguments suitable for training dynamic networks and training with

       % error weights. Xi and Ai are the initial input and layer delays states

       % respectively and EW defines error weights used to indicate

       % the relative importance of each target value.

       %

       % Training occurs according to training parameters, with default values.

       % Any or all of these can be overridden with parameter name/value argument

       % pairs appended to the input argument list, or by appending a structure

       % argument with fields having one or more of these names.

       % show Epochs between displays

       % showCommandLine 0 generate command line output

       % showWindow 1 show training GUI

       % epochs Maximum number of epochs to train

       % goal 0 Performance goal

       % max_fail 5 Maximum validation failures

       % min_grad 1e- Minimum performance gradient

       % mu 0. Initial Mu

       % mu_dec 0.1 Mu decrease factor

       % mu_inc Mu increase factor

       % mu_max 1e Maximum Mu

       % time inf Maximum time to train in seconds

       %

       % To make this the default training function for a network, and view

       % and/or change parameter settings, use these two properties:

       %

       % net.<a href="matlab:doc nnproperty.net_trainFcn">trainFcn</a> = 'trainlm';

       % net.<a href="matlab:doc nnproperty.net_trainParam">trainParam</a>

       %

       % See also trainscg, feedforwardnet, narxnet.

       % Mark Beale, --, ODJ //

       % Updated by Orlando De Jes鷖, Martin Hagan, Dynamic Training 7--

       % Copyright - The MathWorks, Inc.

       % $Revision: 1.1.6..2.2 $ $Date: // :: $

       %% =======================================================

       % BOILERPLATE_START

       % This code is the same for all Training Functions.

        persistent INFO;

        if isempty(INFO), INFO = get_info; end

        nnassert.minargs(nargin,1);

        in1 = varargin{ 1};

        if ischar(in1)

        switch (in1)

        case 'info'

        out1 = INFO;

        case 'check_param'

        nnassert.minargs(nargin,2);

        param = varargin{ 2};

        err = nntest.param(INFO.parameters,param);

        if isempty(err)

        err = check_param(param);

        end

        if nargout > 0

        out1 = err;

        elseif ~isempty(err)

        nnerr.throw('Type',err);

        end

        otherwise,

        try

        out1 = eval(['INFO.' in1]);

        catch me, nnerr.throw(['Unrecognized first argument: ''' in1 ''''])

        end

        end

        return

        end

        nnassert.minargs(nargin,2);

        net = nn.hints(nntype.network('format',in1,'NET'));

        oldTrainFcn = net.trainFcn;

        oldTrainParam = net.trainParam;

        if ~strcmp(net.trainFcn,mfilename)

        net.trainFcn = mfilename;

        net.trainParam = INFO.defaultParam;

        end

        [args,param] = nnparam.extract_param(varargin(2:end),net.trainParam);

        err = nntest.param(INFO.parameters,param);

        if ~isempty(err), nnerr.throw(nnerr.value(err,'NET.trainParam')); end

        if INFO.isSupervised && isempty(net.performFcn) % TODO - fill in MSE

        nnerr.throw('Training function is supervised but NET.performFcn is undefined.');

        end

        if INFO.usesGradient && isempty(net.derivFcn) % TODO - fill in

        nnerr.throw('Training function uses derivatives but NET.derivFcn is undefined.');

        end

        if net.hint.zeroDelay, nnerr.throw('NET contains a zero-delay loop.'); end

        [X,T,Xi,Ai,EW] = nnmisc.defaults(args,{ },{ },{ },{ },{ 1});

        X = nntype.data('format',X,'Inputs X');

        T = nntype.data('format',T,'Targets T');

        Xi = nntype.data('format',Xi,'Input states Xi');

        Ai = nntype.data('format',Ai,'Layer states Ai');

        EW = nntype.nndata_pos('format',EW,'Error weights EW');

        % Prepare Data

        [net,data,tr,~,err] = nntraining.setup(net,mfilename,X,Xi,Ai,T,EW);

        if ~isempty(err), nnerr.throw('Args',err), end

        % Train

        net = struct(net);

        fcns = nn.subfcns(net);

        [net,tr] = train_network(net,tr,data,fcns,param);

        tr = nntraining.tr_clip(tr);

        if isfield(tr,'perf')

        tr.best_perf = tr.perf(tr.best_epoch+1);

        end

        if isfield(tr,'vperf')

        tr.best_vperf = tr.vperf(tr.best_epoch+1);

        end

        if isfield(tr,'tperf')

        tr.best_tperf = tr.tperf(tr.best_epoch+1);

        end

        net.trainFcn = oldTrainFcn;

        net.trainParam = oldTrainParam;

        out1 = network(net);

        out2 = tr;

       end

       % BOILERPLATE_END

       %% =======================================================

       % TODO - MU => MU_START

       % TODO - alternate parameter names (i.e. MU for MU_START)

       function info = get_info()

        info = nnfcnTraining(mfilename,'Levenberg-Marquardt',7.0,true,true,...

        [ ...

        nnetParamInfo('showWindow','Show Training Window Feedback','nntype.bool_scalar',true,...

        'Display training window during training.'), ...

        nnetParamInfo('showCommandLine','Show Command Line Feedback','nntype.bool_scalar',false,...

        'Generate command line output during training.'), ...

        nnetParamInfo('show','Command Line Frequency','nntype.strict_pos_int_inf_scalar',,...

        'Frequency to update command line.'), ...

        ...

        nnetParamInfo('epochs','Maximum Epochs','nntype.pos_int_scalar',,...

        'Maximum number of training iterations before training is stopped.'), ...

        nnetParamInfo('time','Maximum Training Time','nntype.pos_inf_scalar',inf,...

        'Maximum time in seconds before training is stopped.'), ...

        ...

        nnetParamInfo('goal','Performance Goal','nntype.pos_scalar',0,...

        'Performance goal.'), ...

        nnetParamInfo('min_grad','Minimum Gradient','nntype.pos_scalar',1e-5,...

        'Minimum performance gradient before training is stopped.'), ...

        nnetParamInfo('max_fail','Maximum Validation Checks','nntype.strict_pos_int_scalar',6,...

        'Maximum number of validation checks before training is stopped.'), ...

        ...

        nnetParamInfo('mu','Mu','nntype.pos_scalar',0.,...

        'Mu.'), ...

        nnetParamInfo('mu_dec','Mu Decrease Ratio','nntype.real_0_to_1',0.1,...

        'Ratio to decrease mu.'), ...

        nnetParamInfo('mu_inc','Mu Increase Ratio','nntype.over1',,...

        'Ratio to increase mu.'), ...

        nnetParamInfo('mu_max','Maximum mu','nntype.strict_pos_scalar',1e,...

        'Maximum mu before training is stopped.'), ...

        ], ...

        [ ...

        nntraining.state_info('gradient','Gradient','continuous','log') ...

        nntraining.state_info('mu','Mu','continuous','log') ...

        nntraining.state_info('val_fail','Validation Checks','discrete','linear') ...

        ]);

       end

       function err = check_param(param)

        err = '';

       end

       function [net,tr] = train_network(net,tr,data,fcns,param)

        % Checks

        if isempty(net.performFcn)

        warning('nnet:trainlm:Performance',nnwarning.empty_performfcn_corrected);

        net.performFcn = 'mse';

        net.performParam = mse('defaultParam');

        tr.performFcn = net.performFcn;

        tr.performParam = net.performParam;

        end

        if isempty(strmatch(net.performFcn,{ 'sse','mse'},'exact'))

        warning('nnet:trainlm:Performance',nnwarning.nonjacobian_performfcn_replaced);

        net.performFcn = 'mse';

        net.performParam = mse('defaultParam');

        tr.performFcn = net.performFcn;

        tr.performParam = net.performParam;

        end

        % Initialize

        startTime = clock;

        original_net = net;

        [perf,vperf,tperf,je,jj,gradient] = nntraining.perfs_jejj(net,data,fcns);

        [best,val_fail] = nntraining.validation_start(net,perf,vperf);

        WB = getwb(net);

        lengthWB = length(WB);

        ii = sparse(1:lengthWB,1:lengthWB,ones(1,lengthWB));

        mu = param.mu;

        % Training Record

        tr.best_epoch = 0;

        tr.goal = param.goal;

        tr.states = { 'epoch','time','perf','vperf','tperf','mu','gradient','val_fail'};

        % Status

        status = ...

        [ ...

        nntraining.status('Epoch','iterations','linear','discrete',0,param.epochs,0), ...

        nntraining.status('Time','seconds','linear','discrete',0,param.time,0), ...

        nntraining.status('Performance','','log','continuous',perf,param.goal,perf) ...

        nntraining.status('Gradient','','log','continuous',gradient,param.min_grad,gradient) ...

        nntraining.status('Mu','','log','continuous',mu,param.mu_max,mu) ...

        nntraining.status('Validation Checks','','linear','discrete',0,param.max_fail,0) ...

        ];

        nn_train_feedback('start',net,status);

        % Train

        for epoch = 0:param.epochs

        % Stopping Criteria

        current_time = etime(clock,startTime);

        [userStop,userCancel] = nntraintool('check');

        if userStop, tr.stop = 'User stop.'; net = best.net;

        elseif userCancel, tr.stop = 'User cancel.'; net = original_net;

        elseif (perf <= param.goal), tr.stop = 'Performance goal met.'; net = best.net;

        elseif (epoch == param.epochs), tr.stop = 'Maximum epoch reached.'; net = best.net;

        elseif (current_time >= param.time), tr.stop = 'Maximum time elapsed.'; net = best.net;

        elseif (gradient <= param.min_grad), tr.stop = 'Minimum gradient reached.'; net = best.net;

        elseif (mu >= param.mu_max), tr.stop = 'Maximum MU reached.'; net = best.net;

        elseif (val_fail >= param.max_fail), tr.stop = 'Validation stop.'; net = best.net;

        end

        % Feedback

        tr = nntraining.tr_update(tr,[epoch current_time perf vperf tperf mu gradient val_fail]);

        nn_train_feedback('update',net,status,tr,data, ...

        [epoch,current_time,best.perf,gradient,mu,val_fail]);

        % Stop

        if ~isempty(tr.stop), break, end

        % Levenberg Marquardt

        while (mu <= param.mu_max)

        % CHECK FOR SINGULAR MATRIX

        [msgstr,msgid] = lastwarn;

        lastwarn('MATLAB:nothing','MATLAB:nothing')

        warnstate = warning('off','all');

        dWB = -(jj+ii*mu) \ je;

        [~,msgid1] = lastwarn;

        flag_inv = isequal(msgid1,'MATLAB:nothing');

        if flag_inv, lastwarn(msgstr,msgid); end;

        warning(warnstate)

        WB2 = WB + dWB;

        net2 = setwb(net,WB2);

        perf2 = nntraining.train_perf(net2,data,fcns);

        % TODO - possible speed enhancement

        % - retain intermediate variables for Memory Reduction = 1

        if (perf2 < perf) && flag_inv

        WB = WB2; net = net2;

        mu = max(mu*param.mu_dec,1e-);

        break

        end

        mu = mu * param.mu_inc;

        end

        % Validation

        [perf,vperf,tperf,je,jj,gradient] = nntraining.perfs_jejj(net,data,fcns);

        [best,tr,val_fail] = nntraining.validation(best,tr,val_fail,net,perf,vperf,epoch);

        end

       end

如何设计自定义的transform filter

       å¯¹äºŽDIrectShow的初学者而言,最大的困难莫过于尝试设计自定义的filter。

       è®¾è®¡è‡ªå®šä¹‰çš„transform filter是困难的

       å› ä¸º 首先filter是一种dll (后缀名为.ax)而编写dll工程需要一定的VC基础 所以建议先补充一点dll的知识

       å…¶æ¬¡ dll的注册,GUID的生成和工程的配置都很麻烦。

       å†æ¬¡ 网上缺乏现成的transform filter的例子。DirectShow给的源码比如NULLINPLACE 和CONTRAST都太复杂,都带有对话框和属性页,不适合初学者,而且这些例子 没有一个涉及到图像格式的转换,而transform filter最大的公用就是媒体类型的转换,因此这些例子不适用

       ä½œä¸ºä¸€ä¸ªåˆå­¦è€…,我深深受到这些问题的困扰,经过刻苦钻研终于走出了这个泥潭,豁然开朗。于是把它记录下来,希望可以对其他人有帮助,也作为对年的一个小结。

       æˆ‘的例子是 设计一个 transform filter 把 YUY2 bit 的媒体转化为RGB bit的类型。

       åŽŸå› æ˜¯æˆ‘的摄像头只支持YUY2 bit这种格式, 我想得到位图。。顺便学习一下Filter的设计

       ä»¥ä¸‹ä¸ºå…·ä½“步骤:

       ä¸€ 配置开发环境

        1. VC中在Tools->Options->Directories 设置好DirectX SDK的头文件和库文件路径

        2. 编译了基类源码,生成strmbasd.lib (debug版), strmbase.lib(release版)

        3. VC向导新建一个win DLL(empty)工程

        4. Setting->Link->Output file name: YUV2RGBfilter.ax

        5. Setting->Link加入strmbasd.lib winmm.lib quartz.lib vfw.lib (注意路径)

        6. 定义一个同名.def文件,加入到工程,内容如下:

        LIBRARY YUV2RGBfilter.ax

        EXPORTS

        DllMain PRIVATE

        DllGetClassObject PRIVATE

        DllCanUnloadNow PRIVATE

        DllRegisterServer PRIVATE

        DllUnregisterServer PRIVATE

        7.建立一个类 YUV2RGBfilter 建立他的cpp文件和h文件

        8. 在YUV2RGBfilter.cpp中定义DLL的入口函数及注册 放在cpp文件的最后

       //

       // DllEntryPoint

       //

       extern "C" BOOL WINAPI DllEntryPoint(HINSTANCE, ULONG, LPVOID);

       BOOL APIENTRY DllMain(HANDLE hModule,

        DWORD dwReason,

        LPVOID lpReserved)

       {

       return DllEntryPoint((HINSTANCE)(hModule), dwReason, lpReserved);

       }

       ////////////////////////////////////////////////////////////////////////

       //

       // Exported entry points for registration and unregistration

       // (in this case they only call through to default implementations).

       //

       ////////////////////////////////////////////////////////////////////////

       STDAPI DllRegisterServer()

       {

        return AMovieDllRegisterServer2( TRUE );

       }

       STDAPI DllUnregisterServer()

       {

        return AMovieDllRegisterServer2( FALSE );

       }

        9. cpp文件中要包含的头文件

       #include <streams.h>

       #include <windows.h>

       #include <initguid.h>

       #include <olectl.h>

       #if ( > _MSC_VER)

       #include <olectlid.h>

       #endif

       #include "Y2Ruids.h" // our own public guids

       #include "YUV2RGBfilter.h"

       äºŒ 开发Filter

        1. 生成GUID( 命令行模式下运行guidgen工具) 为他建立一个文件Y2Ruids.h 单独引用

       #include <initguid.h>

       // YUV2toRGB Filter Object

       // { FFC8FD-B1A6-b0-A-D6EDEAFDA}

       DEFINE_GUID(CLSID_YUV2toRGB,

       0xffc8fd, 0xb1a6, 0xb0, 0xa3, 0x8, 0xd6, 0xed, 0xea, 0xf4, 0x5, 0xda);

        2. 构造CYUV2RGBfilterç±» 继承自CTransformFilter 写在TransformFilter.h中

       // ----------------------------------------------------------------------------

       // Class definitions of CYUV2RGBfilter

       // ----------------------------------------------------------------------------

       //

       //

       class CYUV2RGBfilter : public CTransformFilter

       {

        public:

        static CUnknown * WINAPI CreateInstance(LPUNKNOWN punk, HRESULT *phr);

        STDMETHODIMP NonDelegatingQueryInterface(REFIID riid, void ** ppv);

        DECLARE_IUNKNOWN;

       // override pure virtual function

        HRESULT CheckInputType(const CMediaType *mtIn);

        HRESULT CheckTransform(const CMediaType *mtIn, const CMediaType *mtOut);

        HRESULT DecideBufferSize(IMemAllocator *pAlloc, ALLOCATOR_PROPERTIES *pProp);

        HRESULT GetMediaType(int iPosition, CMediaType *pMediaType);

        HRESULT Transform(IMediaSample *pIn, IMediaSample *pOut);

        private:

        //Constructor

        CYUV2RGBfilter(TCHAR *tszName, LPUNKNOWN punk, HRESULT *phr);

        // member function

        VOID ChangeFormat(AM_MEDIA_TYPE* pAdjustedType);

        DWORD ConvertYUV2toRGB(BYTE* yuv, BYTE* rgb, DWORD dsize);

        // member variable

        const long m_lBufferRequest;

        CCritSec m_Y2RLock; // To serialise access.

       };

        3. 按格式改写构造函数

        //

       // CNullInPlace::Constructor

       //

       CYUV2RGBfilter::CYUV2RGBfilter(TCHAR *tszName,LPUNKNOWN punk,HRESULT *phr) :

        CTransformFilter(tszName, punk, CLSID_YUV2toRGB),

        m_lBufferRequest(1)

       {

       ASSERT(tszName);

        ASSERT(phr);

       } // CYUV2RGBfilter

        4. 改写CTransformFilter五个纯虚函数(最重要的地方)

        HRESULT CheckInputType(const CMediaType *mtIn);

        HRESULT CheckTransform(const CMediaType *mtIn, const CMediaType *mtOut);

        HRESULT DecideBufferSize(IMemAllocator *pAlloc, ALLOCATOR_PROPERTIES *pProp);

        HRESULT GetMediaType(int iPosition, CMediaType *pMediaType);

        HRESULT Transform(IMediaSample *pIn, IMediaSample *pOut);

        5. 设计自己的私有函数 完成一定的功能

        6. 注册Filter信息

       // 注册信息

       //setup data

       const AMOVIESETUP_MEDIATYPE

       sudPinTypes = { &MEDIATYPE_Video // clsMajorType

        , &MEDIASUBTYPE_NULL } ; // clsMinorType

       const AMOVIESETUP_PIN

       psudPins[] = { { L"Input" // strName

        , FALSE // bRendered

        , FALSE // bOutput

        , FALSE // bZero

        , FALSE // bMany

        , &CLSID_NULL // clsConnectsToFilter

        , L"Output" // strConnectsToPin

        , 1 // nTypes

        , &sudPinTypes } // lpTypes

        , { L"Output" // strName

        , FALSE // bRendered

        , TRUE // bOutput

        , FALSE // bZero

        , FALSE // bMany

        , &CLSID_NULL // clsConnectsToFilter

        , L"Input" // strConnectsToPin

        , 1 // nTypes

        , &sudPinTypes } }; // lpTypes

       const AMOVIESETUP_FILTER

       sudYUV2RGB = { &CLSID_YUV2toRGB // clsID

        , L"YUV2RGB" // strName

        , MERIT_DO_NOT_USE // dwMerit

        , 2 // nPins

        , psudPins }; // lpPin

       //

       // Needed for the CreateInstance mechanism

       //

       CFactoryTemplate g_Templates[1]=

        { { L"YUV2RGB"

        , &CLSID_YUV2toRGB

        , CYUV2RGBfilter::CreateInstance

        , NULL

        , &sudYUV2RGB }

        };

       int g_cTemplates = sizeof(g_Templates)/sizeof(g_Templates[0]);

       ç¼–译成功后生成GrayFilter.ax

       å‘½ä»¤è¡Œè¿è¡Œregsvr GrayFilter.ax注册即可 不用反复注册,只用注册一次,如若修改只需将重新编译的.ax覆盖原来的就行了

       è°ƒè¯•æœ€å¥½åœ¨graphEdit中经行 比较方便。

       ä»¥ä¸Šå°±æ˜¯è®¾è®¡ä¸€ä¸ªfilter的总体步骤。

       ä¸‰ 下面就关键点 五个重载的纯虚函数做详细介绍。 这才是最关键的地方。

        HRESULT CheckInputType(const CMediaType *mtIn);

        HRESULT CheckTransform(const CMediaType *mtIn, const CMediaType *mtOut);

        HRESULT DecideBufferSize(IMemAllocator *pAlloc, ALLOCATOR_PROPERTIES *pProp);

        HRESULT GetMediaType(int iPosition, CMediaType *pMediaType);

        HRESULT Transform(IMediaSample *pIn, IMediaSample *pOut);

       è¿™äº”个函数全部是都纯虚函数 ,是CTransformFilter为我们提供的接口,必须重载他们才能实例化。

       åˆå­¦è€…最大的困扰莫过于,是谁调用了这些函数。这些函数调用的时候实参是从哪来的。我一开始就被这些问题困扰。其实DX的帮助文档里就讲的很清楚了只是我一开始没认真看;

       CheckInputType是由tranformfiltr的输入pin调用的用来检查本Filter的输入媒体是否合法;

       CheckTransform是由tranformfiltr的输出pin调用的用来检查本filter的输出是否和合法;

       GetMediaType是有由tranformfiltr的输出pin调用的用来获取该输出端口支持的媒体格式供下游filter的枚举

       DecideBufferSize是由tranformfiltr的输出pin调用的来确定buffer的数量和大小

       ä¸Šæ¸¸filter通过调用filter上输入pin上的IMemInputPin::Receive方法,将sample传递到filter,filter调用CTransformFilter::Transform方法来处理数据

       æ•´ä¸ªè¿‡ç¨‹å°±æ˜¯

        输入pin调用CheckInputType来筛选上游过来的媒体类型,如果可以接受 就有输出pin通GetMediaType来枚举输出媒体类型,进一步通过输出pin的CheckTransform来找到与输入媒体类型相融合的输出媒 体类型并选中。在通过DecideBufferSize确定输出buffer的属性,所有的检查和筛选通过以后就可以连接了, 并通过tranform 将输入pin上的sample 传个输出pin输出媒体的类型是由GetMediaType来确定的, 只要媒体类型对应了就可以成功连接但是数据的传送还是要通过transform来实现。理论上对于没有压缩的视频, 一个sample就是一帧的数据,可以精确的量化处理。

       è¦å®žçŽ°è¾“出pin上媒体格式的转化 就必须在在GetMediaType函数中修改新的媒体格式,然后在checkTransform中确认 输出的媒体格式是不是期望的输出。例如 要将YUY2 bit的媒体格式改为RGB8 8bit的媒体格式 就要做如下修改:

       åœ¨GetMediaType中

       CheckPointer(pMediaType,E_POINTER);

        VIDEOINFO vih;

        memset(&vih, 0, sizeof(vih));

        vih.bmiHeader.biCompression = 0;

        vih.bmiHeader.biBitCount = 8;

        vih.bmiHeader.biSize = ;

        vih.bmiHeader.biWidth = ;

        vih.bmiHeader.biHeight = ;

        vih.bmiHeader.biPlanes = 1;

        vih.bmiHeader.biSizeImage = ;

        vih.bmiHeader.biClrImportant = 0;

        vih.bmiHeader.biClrUsed = ;

       //alter the pallete

        for (UINT i=0; i<; i++)

        {

        vih.bmiColors[i].rgbBlue=(BYTE)i;

        vih.bmiColors[i].rgbRed=(BYTE)i;

        vih.bmiColors[i].rgbGreen=(BYTE)i;

        vih.bmiColors[i].rgbReserved=(BYTE)0;

        }

        pMediaType->SetType(&MEDIATYPE_Video);

        pMediaType->SetFormatType(&FORMAT_VideoInfo);

        pMediaType->SetFormat((BYTE*)&vih, sizeof(vih));

        pMediaType->SetSubtype(&MEDIASUBTYPE_RGB8);

        pMediaType->SetSampleSize();

       return NOERROR;

       ç„¶åŽåœ¨checkTransform中确认是否是期望的输出

        BITMAPINFOHEADER *pNewType = HEADER(mtOut->Format());

        if ((pNewType->biPlanes==1)

        &&(pNewType->biBitCount==8)

        &&(pNewType->biWidth==)

        &&(pNewType->biHeight==)

        &&(pNewType->biClrUsed==)

        &&(pNewType->biSizeImage==))

        {

        return S_OK;

        }

       æˆ‘的实现过程如下

       // GetMediaType

       //

       // I support one type, namely the type of the input pin

       // We must be connected to support the single output type

       //

       HRESULT CYUV2RGBfilter::GetMediaType(int iPosition, CMediaType *pMediaType)

       {

        // Is the input pin connected

        if(m_pInput->IsConnected() == FALSE)

        {

        return E_UNEXPECTED;

        }

        // This should never happen

        if(iPosition < 0)

        {

        return E_INVALIDARG;

        }

        // Do we have more items to offer

        if(iPosition > 0)

        {

        return VFW_S_NO_MORE_ITEMS;

        }

        CheckPointer(pMediaType,E_POINTER);

       if (iPosition == 0)

       {

        HRESULT hr = m_pInput->ConnectionMediaType(pMediaType);

        if (FAILED(hr))

        {

        return hr;

        }

       }

       // make some appropriate change

       ASSERT(pMediaType->formattype == FORMAT_VideoInfo);

       pMediaType->subtype = MEDIASUBTYPE_RGB;

       VIDEOINFOHEADER *pVih =

       reinterpret_cast<VIDEOINFOHEADER*>(pMediaType->pbFormat);

       pVih->bmiHeader.biCompression = 0;

       pVih->bmiHeader.biSizeImage = DIBSIZE(pVih->bmiHeader);

       pVih->bmiHeader.biBitCount = ;

       pVih->bmiHeader.biHeight = ;

       pVih->bmiHeader.biWidth = ;

       return S_OK;

       } // GetMediaType

       //

       // CheckInputType

       //

       // Check the input type is OK, return an error otherwise

       //

       HRESULT CYUV2RGBfilter::CheckInputType(const CMediaType *mtIn)

       {

        CheckPointer(mtIn,E_POINTER);

        // Check this is a VIDEOINFO type

        if(*mtIn->FormatType() != FORMAT_VideoInfo)

        {

        return E_INVALIDARG;

        }

        if((IsEqualGUID(*mtIn->Type(), MEDIATYPE_Video)) &&

        (IsEqualGUID(*mtIn->Subtype(), MEDIASUBTYPE_YUY2)))

        {

        VIDEOINFO *pvi = (VIDEOINFO *) mtIn->Format();

        if ((pvi->bmiHeader.biBitCount == )

        &&(pvi->bmiHeader.biCompression==0))

        return S_OK;

        else

        return FALSE;

        }

        else

        {

        return FALSE;

        }

       } // CheckInputType

       // CheckTransform

       //

       // To be able to transform the formats must be compatible

       //mtIn YUV2 bit

       //mtOut RGB bit

       HRESULT CYUV2RGBfilter::CheckTransform(const CMediaType *mtIn, const CMediaType *mtOut)

       {

        CheckPointer(mtIn,E_POINTER);

        CheckPointer(mtOut,E_POINTER);

        HRESULT hr;

        if(FAILED(hr = CheckInputType(mtIn)))

        {

        return hr;

        }

        // format must be a VIDEOINFOHEADER

        if((*mtOut->FormatType() != FORMAT_VideoInfo)

        ||(mtOut->cbFormat<sizeof(VIDEOINFOHEADER ))

        ||(mtOut->subtype!=MEDIASUBTYPE_RGB))

        {

        return E_INVALIDARG;

        }

        BITMAPINFOHEADER *pBmiOut = HEADER(mtOut->pbFormat);

        if ((pBmiOut->biPlanes!=1)

        ||(pBmiOut->biBitCount!=)

        ||(pBmiOut->biCompression!=0)

        ||(pBmiOut->biWidth!=)

        ||(pBmiOut->biHeight!=))

        {

        return E_INVALIDARG;

        }

        return S_OK;

       }

       // CheckTransform

       HRESULT CYUV2RGBfilter::DecideBufferSize(IMemAllocator *pAlloc, ALLOCATOR_PROPERTIES *pProperties)

       {

        CheckPointer(pAlloc,E_POINTER);

        CheckPointer(pProperties,E_POINTER);

        // Is the input pin connected

        if(m_pInput->IsConnected() == FALSE)

        {

        return E_UNEXPECTED;

        }

        HRESULT hr = NOERROR;

        pProperties->cBuffers = 1;

        pProperties->cbBuffer = m_pInput->CurrentMediaType().GetSampleSize()*2; //output is double of the input samples

        ASSERT(pProperties->cbBuffer);

        // If we don't have fixed sized samples we must guess some size

        if(!m_pInput->CurrentMediaType().bFixedSizeSamples)

        {

        if(pProperties->cbBuffer < )

        {

        // nothing more than a guess!!

        pProperties->cbBuffer = ;

        }

        }

        // Ask the allocator to reserve us some sample memory, NOTE the function

        // can succeed (that is return NOERROR) but still not have allocated the

        // memory that we requested, so we must check we got whatever we wanted

        ALLOCATOR_PROPERTIES Actual;

        hr = pAlloc->SetProperties(pProperties,&Actual);

        if(FAILED(hr))

        {

        return hr;

        }

        ASSERT(Actual.cBuffers == 1);

        if(pProperties->cBuffers > Actual.cBuffers ||

        pProperties->cbBuffer > Actual.cbBuffer)

        {

        return E_FAIL;

        }

        return NOERROR;

       } // DecideBufferSize

       //

       // Transform

       //

       // Copy the input sample into the output sample

       //

       //

       HRESULT CYUV2RGBfilter::Transform(IMediaSample *pIn, IMediaSample *pOut)

       {

        CheckPointer(pIn,E_POINTER);

        CheckPointer(pOut,E_POINTER);

        // Copy the sample data

        BYTE *pSourceBuffer, *pDestBuffer;

        long lSourceSize = pIn->GetActualDataLength();

        long lDestSize = (long)(lSourceSize*1.5);

        pIn->GetPointer(&pSourceBuffer);

        pOut->GetPointer(&pDestBuffer);

       //change data

        ConvertYUV2toRGB(pSourceBuffer,pDestBuffer,lSourceSize);

       //memset(pDestBuffer,,lDestSize);

        REFERENCE_TIME TimeStart, TimeEnd;

        if(NOERROR == pIn->GetTime(&TimeStart, &TimeEnd))

        {

        pOut->SetTime(&TimeStart, &TimeEnd);

        }

        LONGLONG MediaStart, MediaEnd;

        if(pIn->GetMediaTime(&MediaStart,&MediaEnd) == NOERROR)

        {

        pOut->SetMediaTime(&MediaStart,&MediaEnd);

        }

        // Copy the Sync point property

        HRESULT hr = pIn->IsSyncPoint();

        if(hr == S_OK)

        {

        pOut->SetSyncPoint(TRUE);

        }

        else if(hr == S_FALSE)

        {

        pOut->SetSyncPoint(FALSE);

        }

        else

        { // an unexpected error has occured...

        return E_UNEXPECTED;

        }

       //

        AM_MEDIA_TYPE* pMediaType;

        pIn->GetMediaType(&pMediaType);

       ChangeFormat(pMediaType);

        // Copy the media type

        pOut->SetMediaType(pMediaType);

        // Copy the preroll property

        hr = pIn->IsPreroll();

        if(hr == S_OK)

        {

        pOut->SetPreroll(TRUE);

        }

        else if(hr == S_FALSE)

        {

        pOut->SetPreroll(FALSE);

        }

        else

        { // an unexpected error has occured...

        return E_UNEXPECTED;

        }

        // Copy the discontinuity property

        hr = pIn->IsDiscontinuity();

        if(hr == S_OK)

        {

        pOut->SetDiscontinuity(TRUE);

        }

        else if(hr == S_FALSE)

        {

        pOut->SetDiscontinuity(FALSE);

        }

        else

        { // an unexpected error has occured...

        return E_UNEXPECTED;

        }

        // Copy the actual data length

       //KASSERT((long)lDestSize <= pOut->GetSize());

        pOut->SetActualDataLength(lDestSize);

        return S_OK;

       } // Transform

       ç»è¿‡è¿™äº›æ­¥éª¤å°±èƒ½å¾—到符合功能要求的transform filter

       åŒæ—¶ç»è¿‡ä»¥ä¸Šæ­¥éª¤ä¹Ÿèƒ½å¯¹filter开发有个大体的了解

       å‡ºè‡ªï¼š/s/blog_vusf.html

更多内容请点击【探索】专栏