Warning: file_get_contents(/data/phpspider/zhask/data//catemap/6/cplusplus/146.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
C++ NV12纹理在DirectX 11.1中不起作用_C++_Ffmpeg_Directx - Fatal编程技术网

C++ NV12纹理在DirectX 11.1中不起作用

C++ NV12纹理在DirectX 11.1中不起作用,c++,ffmpeg,directx,C++,Ffmpeg,Directx,我正在尝试使用DirectX 11.1从使用ffmpeg 2.8.11解码的帧渲染NV12纹理,但是当我渲染它们时,纹理被破坏,颜色始终处于关闭状态 结果是: 下面的代码是如何通过ffmpeg解码YUV420P格式的帧,然后通过交错U和V平面将其转换(不确定)为NV12格式 static uint8_t *pixelsPtr_ = nullptr; UINT rowPitch = ((width + 1) >> 1) * 2; UINT imageSize = (rowPitch

我正在尝试使用DirectX 11.1从使用ffmpeg 2.8.11解码的帧渲染NV12纹理,但是当我渲染它们时,纹理被破坏,颜色始终处于关闭状态

结果是:

下面的代码是如何通过ffmpeg解码YUV420P格式的帧,然后通过交错U和V平面将其转换(不确定)为NV12格式

static uint8_t *pixelsPtr_ = nullptr;

UINT rowPitch = ((width + 1) >> 1) * 2;
UINT imageSize = (rowPitch * height) + ((rowPitch * height + 1) >> 1);

if (!pixelsPtr_)
{
    pixelsPtr_ = new uint8_t[imageSize];
}

int j, position = 0;

uint32_t pitchY = avFrame.linesize[0];
uint32_t pitchU = avFrame.linesize[1];
uint32_t pitchV = avFrame.linesize[2];

uint8_t *avY = avFrame.data[0];
uint8_t *avU = avFrame.data[1];
uint8_t *avV = avFrame.data[2];

::SecureZeroMemory(pixelsPtr_, imageSize);

for (j = 0; j < height; j++)
{               
    ::CopyMemory(pixelsPtr_ + position, avY, (width));
    position += (width);
    avY += pitchY;
}

for (j = 0; j < height >> 1; j++)
{
    ::CopyMemory(pixelsPtr_ + position, avU, (width >> 1));
    position += (width >> 1);
    avU += pitchU;

    ::CopyMemory(pixelsPtr_ + position, avV, (width >> 1));
    position += (width >> 1);
    avV += pitchV;
}
然后我将这两个着色器资源视图传递给像素着色器

graphics->Context()->PSSetShaderResources(0, 2, textureViewYUV);
这是像素着色器:

struct PixelShaderInput
{
    float4 pos         : SV_POSITION;
    float4 Color       : COLOR;
    float2 texCoord    : TEXCOORD;
};

static const float3x3 YUVtoRGBCoeffMatrix =
{
    1.164383f,  1.164383f, 1.164383f,
    0.000000f, -0.391762f, 2.017232f,
    1.596027f, -0.812968f, 0.000000f
};

Texture2D<float>  luminanceChannel;
Texture2D<float2> chrominanceChannel;

SamplerState linearfilter
{
    Filter = MIN_MAG_MIP_LINEAR;
};

float3 ConvertYUVtoRGB(float3 yuv)
{
    // Derived from https://msdn.microsoft.com/en-us/library/windows/desktop/dd206750(v=vs.85).aspx
    // Section: Converting 8-bit YUV to RGB888

    // These values are calculated from (16 / 255) and (128 / 255)
    yuv -= float3(0.062745f, 0.501960f, 0.501960f);
    yuv = mul(yuv, YUVtoRGBCoeffMatrix);

    return saturate(yuv);
}

float4 main(PixelShaderInput input) : SV_TARGET
{
    float y = luminanceChannel.Sample(linearfilter, input.texCoord);
    float2 uv = chrominanceChannel.Sample(linearfilter, input.texCoord);

    float3 YUV = float3(y, uv.x, uv.y);
    float4 YUV4 = float4(YUV.x, YUV.y, YUV.z, 1);

    float3 RGB = ConvertYUVtoRGB(YUV);
    float4 RGB4 = float4(RGB.x, RGB.y, RGB.z, 1);

    return RGB4;
}
struct PixelShaderInput
{
浮动4位置:SV_位置;
浮动4颜色:颜色;
2特克斯库德:特克斯库德;
};
静态常数浮动3x3 YUVtoRGBCoeffMatrix=
{
1.164383f、1.164383f、1.164383f、,
0.000000f,-0.391762f,2.017232f,
1.596027f,-0.812968f,0.000000f
};
纹理2D亮度通道;
纹理2D色度通道;
采样状态线性滤波器
{
过滤器=最小值最大值最小值线性;
};
浮动3转换器YUVTORGB(浮动3 yuv)
{
//源自https://msdn.microsoft.com/en-us/library/windows/desktop/dd206750(v=vs.85).aspx
//部分:将8位YUV转换为RGB888
//这些值由(16/255)和(128/255)计算得出
yuv-=float3(0.062745f,0.501960f,0.501960f);
yuv=mul(yuv,YUVtoRGBCoeffMatrix);
返回饱和(yuv);
}
float4 main(像素着色器输入):SV_目标
{
float y=亮度通道.Sample(linearfilter,input.texCoord);
float2 uv=色度通道样品(线性过滤器,输入.texCoord);
float3-YUV=float3(y,uv.x,uv.y);
float4-YUV4=float4(YUV.x,YUV.y,YUV.z,1);
float3 RGB=ConvertYUVtoRGB(YUV);
float4 RGB4=float4(RGB.x,RGB.y,RGB.z,1);
返回RGB4;
}
有人能帮我吗?我做错了什么

编辑#1

int skipLineArea=0;
int uvCount=(高度>>1)*(宽度>>1);
对于(j=0,k=0;j>1))
{
k+=pitchU-(宽度>>1);
skipLineArea=0;
}
pixelsPtr_u[position++]=avU[k];
pixelsPtr_u[position++]=avV[k];
skipLineArea++;
}
编辑#2

更新纹理而不是创建新纹理

D3D11_MAPPED_SUBRESOURCE mappedResource;
d3dContext->Map(tex, 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedResource);

uint8_t* mappedData = reinterpret_cast<uint8_t*>(mappedResource.pData);

for (UINT i = 0; i < height * 1.5; ++i)
{
    memcpy(mappedData, frameData, rowPitch);
    mappedData += mappedResource.RowPitch;
    frameData += rowPitch;
}

d3dContext->Unmap(tex, 0);
D3D11_-MAPPED_子资源mappedResource;
d3dContext->Map(tex、0、D3D11\u Map\u WRITE\u DISCARD、0和mappedResource);
uint8_t*mappedData=reinterpret_cast(mappedResource.pData);
对于(UINT i=0;i<高度*1.5;++i)
{
memcpy(mappedData、frameData、rowPitch);
mappedData+=mappedResource.RowPitch;
帧数据+=行间距;
}
d3dContext->取消映射(tex,0);

NV12
格式不分离
U
V
值。它们是交错的。您应该使用
IMC4
纹理格式。或者您可以将复制代码更改为交错
U
V
值。

但是我在第一个代码引号中交错了U和V值。avFrame是ffmpeg以YUV420P格式提供给我的帧,我正在以NV12格式将其内容复制到pixelsPtr_uu。@安德烈·维托您可以逐行交错,但对于
NV12
,它们必须逐值交错。在
IMC4
中使用逐行交织,我做了所需的更改,U和V值的新代码在文章中。现在有时候图像是完美的,有时候像以前一样被破坏了@AndréVitor这意味着你有两个问题:-)AndréVitor你有什么样的GPU?
int skipLineArea = 0;
int uvCount = (height >> 1) * (width >> 1);

for (j = 0, k = 0; j < uvCount; j++, k++)
{
    if (skipLineArea == (width >> 1))
    {
        k += pitchU - (width >> 1);
        skipLineArea = 0;
    }

    pixelsPtr_[position++] = avU[k];
    pixelsPtr_[position++] = avV[k];
    skipLineArea++;
}
D3D11_MAPPED_SUBRESOURCE mappedResource;
d3dContext->Map(tex, 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedResource);

uint8_t* mappedData = reinterpret_cast<uint8_t*>(mappedResource.pData);

for (UINT i = 0; i < height * 1.5; ++i)
{
    memcpy(mappedData, frameData, rowPitch);
    mappedData += mappedResource.RowPitch;
    frameData += rowPitch;
}

d3dContext->Unmap(tex, 0);