/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef _ncv_pixel_operations_hpp_
#define _ncv_pixel_operations_hpp_
#include <limits.h>
#include <float.h>
#include "NCV.hpp"
template<typename TBase> inline __host__ __device__ TBase _pixMaxVal();
template<> inline __host__ __device__ Ncv8u _pixMaxVal<Ncv8u>() {return UCHAR_MAX;}
template<> inline __host__ __device__ Ncv16u _pixMaxVal<Ncv16u>() {return USHRT_MAX;}
template<> inline __host__ __device__ Ncv32u _pixMaxVal<Ncv32u>() {return UINT_MAX;}
template<> inline __host__ __device__ Ncv8s _pixMaxVal<Ncv8s>() {return SCHAR_MAX;}
template<> inline __host__ __device__ Ncv16s _pixMaxVal<Ncv16s>() {return SHRT_MAX;}
template<> inline __host__ __device__ Ncv32s _pixMaxVal<Ncv32s>() {return INT_MAX;}
template<> inline __host__ __device__ Ncv32f _pixMaxVal<Ncv32f>() {return FLT_MAX;}
template<> inline __host__ __device__ Ncv64f _pixMaxVal<Ncv64f>() {return DBL_MAX;}
template<typename TBase> inline __host__ __device__ TBase _pixMinVal();
template<> inline __host__ __device__ Ncv8u _pixMinVal<Ncv8u>() {return 0;}
template<> inline __host__ __device__ Ncv16u _pixMinVal<Ncv16u>() {return 0;}
template<> inline __host__ __device__ Ncv32u _pixMinVal<Ncv32u>() {return 0;}
template<> inline __host__ __device__ Ncv8s _pixMinVal<Ncv8s>() {return SCHAR_MIN;}
template<> inline __host__ __device__ Ncv16s _pixMinVal<Ncv16s>() {return SHRT_MIN;}
template<> inline __host__ __device__ Ncv32s _pixMinVal<Ncv32s>() {return INT_MIN;}
template<> inline __host__ __device__ Ncv32f _pixMinVal<Ncv32f>() {return FLT_MIN;}
template<> inline __host__ __device__ Ncv64f _pixMinVal<Ncv64f>() {return DBL_MIN;}
template<typename Tvec> struct TConvVec2Base;
template<> struct TConvVec2Base<uchar1> {typedef Ncv8u TBase;};
template<> struct TConvVec2Base<uchar3> {typedef Ncv8u TBase;};
template<> struct TConvVec2Base<uchar4> {typedef Ncv8u TBase;};
template<> struct TConvVec2Base<ushort1> {typedef Ncv16u TBase;};
template<> struct TConvVec2Base<ushort3> {typedef Ncv16u TBase;};
template<> struct TConvVec2Base<ushort4> {typedef Ncv16u TBase;};
template<> struct TConvVec2Base<uint1> {typedef Ncv32u TBase;};
template<> struct TConvVec2Base<uint3> {typedef Ncv32u TBase;};
template<> struct TConvVec2Base<uint4> {typedef Ncv32u TBase;};
template<> struct TConvVec2Base<float1> {typedef Ncv32f TBase;};
template<> struct TConvVec2Base<float3> {typedef Ncv32f TBase;};
template<> struct TConvVec2Base<float4> {typedef Ncv32f TBase;};
template<> struct TConvVec2Base<double1> {typedef Ncv64f TBase;};
template<> struct TConvVec2Base<double3> {typedef Ncv64f TBase;};
template<> struct TConvVec2Base<double4> {typedef Ncv64f TBase;};
#define NC(T) (sizeof(T) / sizeof(TConvVec2Base<T>::TBase))
template<typename TBase, Ncv32u NC> struct TConvBase2Vec;
template<> struct TConvBase2Vec<Ncv8u, 1> {typedef uchar1 TVec;};
template<> struct TConvBase2Vec<Ncv8u, 3> {typedef uchar3 TVec;};
template<> struct TConvBase2Vec<Ncv8u, 4> {typedef uchar4 TVec;};
template<> struct TConvBase2Vec<Ncv16u, 1> {typedef ushort1 TVec;};
template<> struct TConvBase2Vec<Ncv16u, 3> {typedef ushort3 TVec;};
template<> struct TConvBase2Vec<Ncv16u, 4> {typedef ushort4 TVec;};
template<> struct TConvBase2Vec<Ncv32u, 1> {typedef uint1 TVec;};
template<> struct TConvBase2Vec<Ncv32u, 3> {typedef uint3 TVec;};
template<> struct TConvBase2Vec<Ncv32u, 4> {typedef uint4 TVec;};
template<> struct TConvBase2Vec<Ncv32f, 1> {typedef float1 TVec;};
template<> struct TConvBase2Vec<Ncv32f, 3> {typedef float3 TVec;};
template<> struct TConvBase2Vec<Ncv32f, 4> {typedef float4 TVec;};
template<> struct TConvBase2Vec<Ncv64f, 1> {typedef double1 TVec;};
template<> struct TConvBase2Vec<Ncv64f, 3> {typedef double3 TVec;};
template<> struct TConvBase2Vec<Ncv64f, 4> {typedef double4 TVec;};
//TODO: consider using CUDA intrinsics to avoid branching
template<typename Tin> static inline __host__ __device__ void _TDemoteClampZ(Tin &a, Ncv8u &out) {out = (Ncv8u)CLAMP_0_255(a);};
template<typename Tin> static inline __host__ __device__ void _TDemoteClampZ(Tin &a, Ncv16u &out) {out = (Ncv16u)CLAMP(a, 0, USHRT_MAX);}
template<typename Tin> static inline __host__ __device__ void _TDemoteClampZ(Tin &a, Ncv32u &out) {out = (Ncv32u)CLAMP(a, 0, UINT_MAX);}
template<typename Tin> static inline __host__ __device__ void _TDemoteClampZ(Tin &a, Ncv32f &out) {out = (Ncv32f)a;}
//TODO: consider using CUDA intrinsics to avoid branching
template<typename Tin> static inline __host__ __device__ void _TDemoteClampNN(Tin &a, Ncv8u &out) {out = (Ncv8u)CLAMP_0_255(a+0.5f);}
template<typename Tin> static inline __host__ __device__ void _TDemoteClampNN(Tin &a, Ncv16u &out) {out = (Ncv16u)CLAMP(a+0.5f, 0, USHRT_MAX);}
template<typename Tin> static inline __host__ __device__ void _TDemoteClampNN(Tin &a, Ncv32u &out) {out = (Ncv32u)CLAMP(a+0.5f, 0, UINT_MAX);}
template<typename Tin> static inline __host__ __device__ void _TDemoteClampNN(Tin &a, Ncv32f &out) {out = (Ncv32f)a;}
template<typename Tout> inline Tout _pixMakeZero();
template<> inline __host__ __device__ uchar1 _pixMakeZero<uchar1>() {return make_uchar1(0);}
template<> inline __host__ __device__ uchar3 _pixMakeZero<uchar3>() {return make_uchar3(0,0,0);}
template<> inline __host__ __device__ uchar4 _pixMakeZero<uchar4>() {return make_uchar4(0,0,0,0);}
template<> inline __host__ __device__ ushort1 _pixMakeZero<ushort1>() {return make_ushort1(0);}
template<> inline __host__ __device__ ushort3 _pixMakeZero<ushort3>() {return make_ushort3(0,0,0);}
template<> inline __host__ __device__ ushort4 _pixMakeZero<ushort4>() {return make_ushort4(0,0,0,0);}
template<> inline __host__ __device__ uint1 _pixMakeZero<uint1>() {return make_uint1(0);}
template<> inline __host__ __device__ uint3 _pixMakeZero<uint3>() {return make_uint3(0,0,0);}
template<> inline __host__ _