前言
DirectX11 With Windows SDK完整目录:http://www.cnblogs.com/X-Jun/p/9028764.html
由于考虑后续的项目需要有一个比较好的演示环境,因此这里将先从摄像机这个专题入手。在这之前,需要复习一下有关世界矩阵和观察矩阵的内容。
DirectX11 With Windows SDK完整目录
Github项目源码
欢迎加入QQ群: 727623616 可以一起探讨DX11,以及有什么问题也可以在这里汇报。
世界矩阵和观察矩阵
若已知物体所在位置\(\mathbf{Q} = (Q_{x}, Q_{y}, Q_{z})\)以及三个互相垂直的坐标轴 \(\mathbf{u} = (u_{x}, u_{y}, u_{z})\), \(\mathbf{v} = (v_{x}, v_{y}, v_{z})\), \(\mathbf{w} = (w_{x}, w_{y}, w_{z})\),则我们可以得到对应的世界矩阵:
\[ \mathbf{W}=\begin{bmatrix} u_{x} & u_{y} & u_{z} & 0 \\ v_{x} & v_{y} & v_{z} & 0 \\ w_{x} & w_{y} & w_{z} & 0 \\ Q_{x} & Q_{y} & Q_{z} & 1 \end{bmatrix}\]
该矩阵的应用有两种解释方式:
- 将物体从世界坐标系的原点搬移到世界矩阵对应的位置,并按其坐标轴做对应朝向和大小的调整
- 经过世界变化后物体已经在世界坐标系的对应位置,实际上是做从物体坐标系到世界坐标系的变换
然而现在我们需要做的是从世界坐标系转换到观察空间坐标系,如果把摄像机看做物体的话,则实际上做的相当于是世界矩阵的逆变换,从世界坐标系来到了摄像机的局部坐标系(右方向为X轴,上方向为Y轴,目视方向为Z轴),即\(\mathbf{V}=\mathbf{(RT)}^{-1}=\mathbf{T}^{-1}\mathbf{R}^{-1}=\mathbf{T}^{-1}\mathbf{R}^{T}\)
\[ \mathbf{V}=\begin{bmatrix} u_{x} & v_{x} & w_{x} & 0 \\ u_{y} & v_{y} & w_{y} & 0 \\ u_{z} & v_{z} & w_{z} & 0 \\ -\mathbf{Q}\cdot\mathbf{u} & -\mathbf{Q}\cdot\mathbf{v} & -\mathbf{Q}\cdot\mathbf{w} & 1 \end{bmatrix}\]
摄像机
第一人称/自由视角摄像机和第三人称摄像机在元素构成上是有部分相同的地方,因此在这里可以提炼出它们相同的部分来实现摄像机的抽象基类
摄像机抽象基类
Camera
类的定义如下:
class Camera
{
public:
Camera();
virtual ~Camera() = 0;
// 获取摄像机位置
DirectX::XMVECTOR GetPositionXM() const;
DirectX::XMFLOAT3 GetPosition() const;
// 获取摄像机的坐标轴向量
DirectX::XMVECTOR GetRightXM() const;
DirectX::XMFLOAT3 GetRight() const;
DirectX::XMVECTOR GetUpXM() const;
DirectX::XMFLOAT3 GetUp() const;
DirectX::XMVECTOR GetLookXM() const;
DirectX::XMFLOAT3 GetLook() const;
// 获取视锥体信息
float GetNearWindowWidth() const;
float GetNearWindowHeight() const;
float GetFarWindowWidth() const;
float GetFarWindowHeight() const;
// 获取矩阵
DirectX::XMMATRIX GetViewXM() const;
DirectX::XMMATRIX GetProjXM() const;
DirectX::XMMATRIX GetViewProjXM() const;
// 获取视口
D3D11_VIEWPORT GetViewPort() const;
// 设置视锥体
void SetFrustum(float fovY, float aspect, float nearZ, float farZ);
// 设置视口
void SetViewPort(const D3D11_VIEWPORT& viewPort);
void SetViewPort(float topLeftX, float topLeftY, float width, float height, float minDepth = 0.0f, float maxDepth = 1.0f);
// 更新观察矩阵
virtual void UpdateViewMatrix() = 0;
protected:
// 摄像机的观察空间坐标系对应在世界坐标系中的表示
DirectX::XMFLOAT3 m_Position;
DirectX::XMFLOAT3 m_Right;
DirectX::XMFLOAT3 m_Up;
DirectX::XMFLOAT3 m_Look;
// 视锥体属性
float m_NearZ;
float m_FarZ;
float m_Aspect;
float m_FovY;
float m_NearWindowHeight;
float m_FarWindowHeight;
// 观察矩阵和透视投影矩阵
DirectX::XMFLOAT4X4 m_View;
DirectX::XMFLOAT4X4 m_Proj;
// 当前视口
D3D11_VIEWPORT m_ViewPort;
};
可以看到,无论是什么类型的摄像机,都一定需要包含观察矩阵、投影矩阵以及设置这两个坐标系所需要的一些相关信息。这里面观察矩阵的更新是虚方法,是因为第一人称/自由视角摄像机实现和第三人称的不同。
这里只列出视锥体信息的获取方法:
float Camera::GetNearWindowWidth() const
{
return m_Aspect * m_NearWindowHeight;
}
float Camera::GetNearWindowHeight() const
{
return m_NearWindowHeight;
}
float Camera::GetFarWindowWidth() const
{
return m_Aspect * m_FarWindowHeight;
}
float Camera::GetFarWindowHeight() const
{
return m_FarWindowHeight;
}
第一人称/自由视角摄像机
FirstPersonCamera
类的定义如下:
class FirstPersonCamera : public Camera
{
public:
FirstPersonCamera();
~FirstPersonCamera() override;
// 设置摄像机位置
void SetPosition(float x, float y, float z);
void SetPosition(const DirectX::XMFLOAT3& v);
// 设置摄像机的朝向
void XM_CALLCONV LookAt(DirectX::FXMVECTOR pos, DirectX::FXMVECTOR target, DirectX::FXMVECTOR up);
void LookAt(const DirectX::XMFLOAT3& pos, const DirectX::XMFLOAT3& target,const DirectX::XMFLOAT3& up);
void XM_CALLCONV LookTo(DirectX::FXMVECTOR pos, DirectX::FXMVECTOR to, DirectX::FXMVECTOR up);
void LookTo(const DirectX::XMFLOAT3& pos, const DirectX::XMFLOAT3& to, const DirectX::XMFLOAT3& up);
// 平移
void Strafe(float d);
// 直行(平面移动)
void Walk(float d);
// 前进(朝前向移动)
void MoveForward(float d);
// 上下观察
void Pitch(float rad);
// 左右观察
void RotateY(float rad);
// 更新观察矩阵
void UpdateViewMatrix() override;
};
该第一人称摄像机没有实现碰撞检测,它具有如下功能:
- 设置摄像机的朝向、位置
- 朝摄像机的正前方进行向前/向后移动(自由视角)
- 在水平地面上向前/向后移动(第一人称视角)
- 左/右平移
- 视野左/右旋转(绕Y轴)
- 视野上/下旋转(绕摄像机的右方向轴),并限制了旋转角度防止旋转角度过大
具体实现如下:
void FirstPersonCamera::SetPosition(float x, float y, float z)
{
SetPosition(XMFLOAT3(x, y, z));
}
void FirstPersonCamera::SetPosition(const DirectX::XMFLOAT3 & v)
{
m_Position = v;
}
void XM_CALLCONV FirstPersonCamera::LookAt(DirectX::FXMVECTOR pos, DirectX::FXMVECTOR target, DirectX::FXMVECTOR up)
{
LookTo(pos, target - pos, up);
}
void FirstPersonCamera::LookAt(const DirectX::XMFLOAT3 & pos, const DirectX::XMFLOAT3 & target,const DirectX::XMFLOAT3 & up)
{
LookAt(XMLoadFloat3(&pos), XMLoadFloat3(&target), XMLoadFloat3(&up));
}
void XM_CALLCONV FirstPersonCamera::LookTo(DirectX::FXMVECTOR pos, DirectX::FXMVECTOR to, DirectX::FXMVECTOR up)
{
XMVECTOR L = XMVector3Normalize(to);
XMVECTOR R = XMVector3Normalize(XMVector3Cross(up, L));
XMVECTOR U = XMVector3Cross(L, R);
XMStoreFloat3(&m_Position, pos);
XMStoreFloat3(&m_Look, L);
XMStoreFloat3(&m_Right, R);
XMStoreFloat3(&m_Up, U);
}
void FirstPersonCamera::LookTo(const DirectX::XMFLOAT3 & pos, const DirectX::XMFLOAT3 & to, const DirectX::XMFLOAT3 & up)
{
LookTo(XMLoadFloat3(&pos), XMLoadFloat3(&to), XMLoadFloat3(&up));
}
void FirstPersonCamera::Strafe(float d)
{
XMVECTOR Pos = XMLoadFloat3(&m_Position);
XMVECTOR Right = XMLoadFloat3(&m_Right);
XMVECTOR Dist = XMVectorReplicate(d);
// DestPos = Dist * Right + SrcPos
XMStoreFloat3(&m_Position, XMVectorMultiplyAdd(Dist, Right, Pos));
}
void FirstPersonCamera::Walk(float d)
{
XMVECTOR Pos = XMLoadFloat3(&m_Position);
XMVECTOR Right = XMLoadFloat3(&m_Right);
XMVECTOR Up = XMVectorSet(0.0f, 1.0f, 0.0f, 0.0f);
XMVECTOR Front = XMVector3Normalize(XMVector3Cross(Right, Up));
XMVECTOR Dist = XMVectorReplicate(d);
// DestPos = Dist * Front + SrcPos
XMStoreFloat3(&m_Position, XMVectorMultiplyAdd(Dist, Front, Pos));
}
void FirstPersonCamera::MoveForward(float d)
{
XMVECTOR Pos = XMLoadFloat3(&m_Position);
XMVECTOR Look = XMLoadFloat3(&m_Look);
XMVECTOR Dist = XMVectorReplicate(d);
// DestPos = Dist * Look + SrcPos
XMStoreFloat3(&m_Position, XMVectorMultiplyAdd(Dist, Look, Pos));
}
void FirstPersonCamera::Pitch(float rad)
{
XMMATRIX R = XMMatrixRotationAxis(XMLoadFloat3(&m_Right), rad);
XMVECTOR Up = XMVector3TransformNormal(XMLoadFloat3(&m_Up), R);
XMVECTOR Look = XMVector3TransformNormal(XMLoadFloat3(&m_Look), R);
float cosPhi = XMVectorGetY(Look);
// 将上下视野角度Phi限制在[2pi/9, 7pi/9],
// 即余弦值[-cos(2pi/9), cos(2pi/9)]之间
if (fabs(cosPhi) > cosf(XM_2PI / 9))
return;
XMStoreFloat3(&m_Up, Up);
XMStoreFloat3(&m_Look, Look);
}
void FirstPersonCamera::RotateY(float rad)
{
XMMATRIX R = XMMatrixRotationY(rad);
XMStoreFloat3(&m_Right, XMVector3TransformNormal(XMLoadFloat3(&m_Right), R));
XMStoreFloat3(&m_Up, XMVector3TransformNormal(XMLoadFloat3(&m_Up), R));
XMStoreFloat3(&m_Look, XMVector3TransformNormal(XMLoadFloat3(&m_Look), R));
}
其中上下视野角度Phi
、观察轴Y值有如下对应关系:
\[L_{y} = cos(\Phi)\]
当Phi
为弧度0
的时候相当于竖直向上看,Phi
为弧度pi
时相当于竖直向下看。在本例中将视野角度Phi
限制在弧度[2pi/9, 7pi/9]
。
构造观察矩阵
FirstPersonCamera::UpdateViewMatrix
方法首先需要重新规格化、正交化摄像机的右方向轴、上方向轴和前方向轴,然后计算剩余的部分以填充观察矩阵:
void FirstPersonCamera::UpdateViewMatrix()
{
XMVECTOR R = XMLoadFloat3(&m_Right);
XMVECTOR U = XMLoadFloat3(&m_Up);
XMVECTOR L = XMLoadFloat3(&m_Look);
XMVECTOR P = XMLoadFloat3(&m_Position);
// 保持摄像机的轴互为正交,且长度都为1
L = XMVector3Normalize(L);
U = XMVector3Normalize(XMVector3Cross(L, R));
// U, L已经正交化,需要计算对应叉乘得到R
R = XMVector3Cross(U, L);
// 填充观察矩阵
float x = -XMVectorGetX(XMVector3Dot(P, R));
float y = -XMVectorGetX(XMVector3Dot(P, U));
float z = -XMVectorGetX(XMVector3Dot(P, L));
XMStoreFloat3(&m_Right, R);
XMStoreFloat3(&m_Up, U);
XMStoreFloat3(&m_Look, L);
m_View = {
m_Right.x, m_Up.x, m_Look.x, 0.0f,
m_Right.y, m_Up.y, m_Look.y, 0.0f,
m_Right.z, m_Up.z, m_Look.z, 0.0f,
x, y, z, 1.0f
};
}
第三人称摄像机
ThirdPersonCamera
类的定义如下:
class ThirdPersonCamera : public Camera
{
public:
ThirdPersonCamera();
~ThirdPersonCamera() override;
// 获取当前跟踪物体的位置
DirectX::XMFLOAT3 GetTargetPosition() const;
// 获取与物体的距离
float GetDistance() const;
// 获取绕X轴的旋转方向
float GetRotationX() const;
// 获取绕Y轴的旋转方向
float GetRotationY() const;
// 绕物体垂直旋转(注意上下视野角度Phi限制在[pi/6, pi/2])
void RotateX(float rad);
// 绕物体水平旋转
void RotateY(float rad);
// 拉近物体
void Approach(float dist);
// 设置初始绕X轴的弧度(注意上下视野角度Phi限制在[pi/6, pi/2])
void SetRotationX(float phi);
// 设置初始绕Y轴的弧度
void SetRotationY(float theta);
// 设置并绑定待跟踪物体的位置
void SetTarget(const DirectX::XMFLOAT3& target);
// 设置初始距离
void SetDistance(float dist);
// 设置最小最大允许距离
void SetDistanceMinMax(float minDist, float maxDist);
// 更新观察矩阵
void UpdateViewMatrix() override;
private:
DirectX::XMFLOAT3 m_Target;
float m_Distance;
// 最小允许距离,最大允许距离
float m_MinDist, m_MaxDist;
// 以世界坐标系为基准,当前的旋转角度
float m_Theta;
float m_Phi;
};
该第三人称摄像机同样没有实现碰撞检测,它具有如下功能:
- 设置观察目标的位置
- 设置与观察目标的距离(限制在合理范围内)
- 绕物体进行水平旋转
- 绕物体Y轴进行旋转
上述部分具体实现如下:
void ThirdPersonCamera::RotateX(float rad)
{
m_Phi -= rad;
// 将上下视野角度Phi限制在[pi/6, pi/2],
// 即余弦值[0, cos(pi/6)]之间
if (m_Phi < XM_PI / 6)
m_Phi = XM_PI / 6;
else if (m_Phi > XM_PIDIV2)
m_Phi = XM_PIDIV2;
}
void ThirdPersonCamera::RotateY(float rad)
{
m_Theta = XMScalarModAngle(m_Theta - rad);
}
void ThirdPersonCamera::Approach(float dist)
{
m_Distance += dist;
// 限制距离在[m_MinDist, m_MaxDist]之间
if (m_Distance < m_MinDist)
m_Distance = m_MinDist;
else if (m_Distance > m_MaxDist)
m_Distance = m_MaxDist;
}
void ThirdPersonCamera::SetRotationX(float phi)
{
m_Phi = XMScalarModAngle(phi);
// 将上下视野角度Phi限制在[pi/6, pi/2],
// 即余弦值[0, cos(pi/6)]之间
if (m_Phi < XM_PI / 6)
m_Phi = XM_PI / 6;
else if (m_Phi > XM_PIDIV2)
m_Phi = XM_PIDIV2;
}
void ThirdPersonCamera::SetRotationY(float theta)
{
m_Theta = XMScalarModAngle(theta);
}
void ThirdPersonCamera::SetTarget(const DirectX::XMFLOAT3 & target)
{
m_Target = target;
}
void ThirdPersonCamera::SetDistance(float dist)
{
m_Distance = dist;
}
void ThirdPersonCamera::SetDistanceMinMax(float minDist, float maxDist)
{
m_MinDist = minDist;
m_MaxDist = maxDist;
}
球面坐标系
要计算摄影机在物体后方的某个具体位置,如果使用下面的公式计算出摄像机位置
\[\mathbf{Q} = \mathbf{T} - dist * \mathbf{L} \]
然后通过XMMatrixLookAtLH
函数来获取观察矩阵,在运行时会发现旋转的时候会有不和谐的抖动效果,因为这样计算出来的摄像机位置有误差影响。
而使用球面坐标系计算出来的摄像机位置会比较平滑,不会看到有抖动效果。
对于右手坐标系,球面坐标系的公式为:
\[\begin{cases} x = Rsin(\phi)cos(\theta) \\ y = Rsin(\phi)sin(\theta) \\ z = Rcos(\phi) \end{cases} \]
而对于左手坐标系,球面坐标系的公式为:
\[\begin{cases} x = Rsin(\phi)cos(\theta) \\ z = Rsin(\phi)sin(\theta) \\ y = Rcos(\phi) \end{cases} \]
最后将物体坐标加上,就可以得到摄像机的坐标:
\[\begin{cases} Q_{x} = T_{x} + Rsin(\phi)cos(\theta) \\ Q_{z} = T_{y} + Rsin(\phi)sin(\theta) \\ Q_{y} = T_{z} + Rcos(\phi) \end{cases} \]
构造观察矩阵
ThirdPersonCamera::UpdateViewMatrix
方法首先需要计算出摄像机的位置,然后和之前一样重新规格化、正交化摄像机的右方向轴、上方向轴和前方向轴,最后计算剩余的部分以填充观察矩阵:
void ThirdPersonCamera::UpdateViewMatrix()
{
// 球面坐标系
float x = m_Target.x + m_Distance * sinf(m_Phi) * cosf(m_Theta);
float z = m_Target.z + m_Distance * sinf(m_Phi) * sinf(m_Theta);
float y = m_Target.y + m_Distance * cosf(m_Phi);
m_Position = { x, y, z };
XMVECTOR P = XMLoadFloat3(&m_Position);
XMVECTOR L = XMVector3Normalize(XMLoadFloat3(&m_Target) - P);
XMVECTOR R = XMVector3Normalize(XMVector3Cross(XMVectorSet(0.0f, 1.0f, 0.0f, 0.0f), L));
XMVECTOR U = XMVector3Cross(L, R);
// 更新向量
XMStoreFloat3(&m_Right, R);
XMStoreFloat3(&m_Up, U);
XMStoreFloat3(&m_Look, L);
m_View = {
m_Right.x, m_Up.x, m_Look.x, 0.0f,
m_Right.y, m_Up.y, m_Look.y, 0.0f,
m_Right.z, m_Up.z, m_Look.z, 0.0f,
-XMVectorGetX(XMVector3Dot(P, R)), -XMVectorGetX(XMVector3Dot(P, U)), -XMVectorGetX(XMVector3Dot(P, L)), 1.0f
};
}
合理对常量缓冲区进行分块
由于项目正在逐渐变得更加庞大,常量缓冲区会频繁更新,但是每次更新常量缓冲区都必须将整个块的内容都刷新一遍,如果只是为了更新里面其中一个变量就要进行一次块的刷新,这样会导致性能上的损耗。所以将常量缓冲区根据刷新频率和类别来进行更细致的分块,可以尽可能保证每一次更新都不会有变量在进行无意义的刷新。因此HLSL常量缓冲区的变化如下:
cbuffer CBChangesEveryDrawing : register(b0)
{
matrix g_World;
matrix g_WorldInvTranspose;
}
cbuffer CBChangesEveryFrame : register(b1)
{
matrix g_View;
float3 g_EyePosW;
}
cbuffer CBChangesOnResize : register(b2)
{
matrix g_Proj;
}
cbuffer CBChangesRarely : register(b3)
{
DirectionalLight g_DirLight[10];
PointLight g_PointLight[10];
SpotLight g_SpotLight[10];
Material g_Material;
int g_NumDirLight;
int g_NumPointLight;
int g_NumSpotLight;
}
对应的C++结构体如下:
struct CBChangesEveryDrawing
{
DirectX::XMMATRIX world;
DirectX::XMMATRIX worldInvTranspose;
};
struct CBChangesEveryFrame
{
DirectX::XMMATRIX view;
DirectX::XMFLOAT4 eyePos;
};
struct CBChangesOnResize
{
DirectX::XMMATRIX proj;
};
struct CBChangesRarely
{
DirectionalLight dirLight[10];
PointLight pointLight[10];
SpotLight spotLight[10];
Material material;
int numDirLight;
int numPointLight;
int numSpotLight;
float pad; // 打包保证16字节对齐
};
这里主要更新频率从快到慢分成了四种:每次绘制物体时、每帧更新时、每次窗口大小变化时、从不更新。然后根据当前项目的实际需求将变量存放在合理的位置上。当然这样子可能会导致不同着色器需要的变量放在了同一个块上。不过着色器绑定常量缓冲区的操作可以在一开始初始化的时候就完成,所以问题不大。
GameObject类--管理游戏物体
由于场景中的物体也在逐渐变多,为了尽可能方便地去管理每一个物体,这里实现了GameObject
类:
class GameObject
{
public:
GameObject();
// 获取位置
DirectX::XMFLOAT3 GetPosition() const;
// 设置缓冲区
template
void SetBuffer(ID3D11Device * device, const Geometry::MeshData& meshData);
// 设置纹理
void SetTexture(ID3D11ShaderResourceView * texture);
// 设置矩阵
void SetWorldMatrix(const DirectX::XMFLOAT4X4& world);
void XM_CALLCONV SetWorldMatrix(DirectX::FXMMATRIX world);
// 绘制
void Draw(ID3D11DeviceContext * deviceContext);
// 设置调试对象名
// 若缓冲区被重新设置,调试对象名也需要被重新设置
void SetDebugObjectName(const std::string& name);
private:
DirectX::XMFLOAT4X4 m_WorldMatrix; // 世界矩阵
ComPtr m_pTexture; // 纹理
ComPtr m_pVertexBuffer; // 顶点缓冲区
ComPtr m_pIndexBuffer; // 索引缓冲区
UINT m_VertexStride; // 顶点字节大小
UINT m_IndexCount; // 索引数目
};
然而目前的GameObject
类还需要依赖GameApp
类中的几个常量缓冲区,到13章的时候就可以独立出来了。
需要额外注意的是,如果你想动态调整物体,可以记录物体的缩放、旋转、平移量,然后在每一帧修改这些变量,最后再生成世界矩阵。
其中原来GameApp::InitResource
方法中创建顶点和索引缓冲区的操作都转移到了GameObject::SetBuffer
上:
template
void GameApp::GameObject::SetBuffer(ID3D11Device * device, const Geometry::MeshData& meshData)
{
// 释放旧资源
m_pVertexBuffer.Reset();
m_pIndexBuffer.Reset();
// 设置顶点缓冲区描述
m_VertexStride = sizeof(VertexType);
D3D11_BUFFER_DESC vbd;
ZeroMemory(&vbd, sizeof(vbd));
vbd.Usage = D3D11_USAGE_IMMUTABLE;
vbd.ByteWidth = (UINT)meshData.vertexVec.size() * m_VertexStride;
vbd.BindFlags = D3D11_BIND_VERTEX_BUFFER;
vbd.CPUAccessFlags = 0;
// 新建顶点缓冲区
D3D11_SUBRESOURCE_DATA InitData;
ZeroMemory(&InitData, sizeof(InitData));
InitData.pSysMem = meshData.vertexVec.data();
HR(device->CreateBuffer(&vbd, &InitData, m_pVertexBuffer.GetAddressOf()));
// 设置索引缓冲区描述
m_IndexCount = (UINT)meshData.indexVec.size();
D3D11_BUFFER_DESC ibd;
ZeroMemory(&ibd, sizeof(ibd));
ibd.Usage = D3D11_USAGE_IMMUTABLE;
ibd.ByteWidth = m_IndexCount * sizeof(IndexType);
ibd.BindFlags = D3D11_BIND_INDEX_BUFFER;
ibd.CPUAccessFlags = 0;
// 新建索引缓冲区
InitData.pSysMem = meshData.indexVec.data();
HR(device->CreateBuffer(&ibd, &InitData, m_pIndexBuffer.GetAddressOf()));
}
ID3D11DeviceContext::XXGetConstantBuffers系列方法--获取某一着色阶段的常量缓冲区
这里的XX可以是VS
, DS
, CS
, GS
, HS
, PS
,即顶点着色阶段、域着色阶段、计算着色阶段、几何着色阶段、外壳着色阶段、像素着色阶段。它们的形参基本上都是一致的,这里只列举ID3D11DeviceContext::VSGetConstantBuffers
方法的形参含义:
void ID3D11DeviceContext::VSGetConstantBuffers(
UINT StartSlot, // [In]指定的起始槽索引
UINT NumBuffers, // [In]常量缓冲区数目
ID3D11Buffer **ppConstantBuffers) = 0; // [Out]常量固定缓冲区数组
最后GameObject::Draw
方法如下,由于内部已经承担了转置,因此在外部设置世界矩阵的时候不需要预先进行转置:
void GameApp::GameObject::Draw(ID3D11DeviceContext * deviceContext)
{
// 设置顶点/索引缓冲区
UINT strides = m_VertexStride;
UINT offsets = 0;
deviceContext->IASetVertexBuffers(0, 1, m_pVertexBuffer.GetAddressOf(), &strides, &offsets);
deviceContext->IASetIndexBuffer(m_pIndexBuffer.Get(), DXGI_FORMAT_R16_UINT, 0);
// 获取之前已经绑定到渲染管线上的常量缓冲区并进行修改
ComPtr cBuffer = nullptr;
deviceContext->VSGetConstantBuffers(0, 1, cBuffer.GetAddressOf());
CBChangesEveryDrawing cbDrawing;
// 内部进行转置,这样外部就不需要提前转置了
XMMATRIX W = XMLoadFloat4x4(&m_WorldMatrix);
cbDrawing.world = XMMatrixTranspose(W);
cbDrawing.worldInvTranspose = XMMatrixInverse(nullptr, W); // 两次转置抵消
// 更新常量缓冲区
D3D11_MAPPED_SUBRESOURCE mappedData;
HR(deviceContext->Map(cBuffer.Get(), 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedData));
memcpy_s(mappedData.pData, sizeof(CBChangesEveryDrawing), &cbDrawing, sizeof(CBChangesEveryDrawing));
deviceContext->Unmap(cBuffer.Get(), 0);
// 设置纹理
deviceContext->PSSetShaderResources(0, 1, m_pTexture.GetAddressOf());
// 可以开始绘制
deviceContext->DrawIndexed(m_IndexCount, 0, 0);
}
这里会对每次绘制需要更新的常量缓冲区进行修改
GameApp类的变化
GameApp::OnResize方法的变化
由于摄像机保留有设置视锥体和视口的方法,并且需要更新常量缓冲区中的投影矩阵,因此该部分操作需要转移到这里进行:
void GameApp::OnResize()
{
// 省略...
D3DApp::OnResize();
// 省略...
// 摄像机变更显示
if (m_pCamera != nullptr)
{
m_pCamera->SetFrustum(XM_PI / 3, AspectRatio(), 0.5f, 1000.0f);
m_pCamera->SetViewPort(0.0f, 0.0f, (float)m_ClientWidth, (float)m_ClientHeight);
m_CBOnResize.proj = XMMatrixTranspose(m_pCamera->GetProjXM());
D3D11_MAPPED_SUBRESOURCE mappedData;
HR(m_pd3dImmediateContext->Map(m_pConstantBuffers[2].Get(), 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedData));
memcpy_s(mappedData.pData, sizeof(CBChangesOnResize), &m_CBOnResize, sizeof(CBChangesOnResize));
m_pd3dImmediateContext->Unmap(m_pConstantBuffers[2].Get(), 0);
}
}
GameApp::InitResource方法的变化
该方法创建了墙体、地板和木箱三种游戏物体,然后还创建了多个常量缓冲区,最后渲染管线的各个阶段按需要绑定各种所需资源。这里设置了一个平行光和一盏点光灯:
bool GameApp::InitResource()
{
// ******************
// 设置常量缓冲区描述
D3D11_BUFFER_DESC cbd;
ZeroMemory(&cbd, sizeof(cbd));
cbd.Usage = D3D11_USAGE_DYNAMIC;
cbd.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
cbd.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
// 新建用于VS和PS的常量缓冲区
cbd.ByteWidth = sizeof(CBChangesEveryDrawing);
HR(m_pd3dDevice->CreateBuffer(&cbd, nullptr, m_pConstantBuffers[0].GetAddressOf()));
cbd.ByteWidth = sizeof(CBChangesEveryFrame);
HR(m_pd3dDevice->CreateBuffer(&cbd, nullptr, m_pConstantBuffers[1].GetAddressOf()));
cbd.ByteWidth = sizeof(CBChangesOnResize);
HR(m_pd3dDevice->CreateBuffer(&cbd, nullptr, m_pConstantBuffers[2].GetAddressOf()));
cbd.ByteWidth = sizeof(CBChangesRarely);
HR(m_pd3dDevice->CreateBuffer(&cbd, nullptr, m_pConstantBuffers[3].GetAddressOf()));
// ******************
// 初始化游戏对象
ComPtr texture;
// 初始化木箱
HR(CreateDDSTextureFromFile(m_pd3dDevice.Get(), L"Texture\\WoodCrate.dds", nullptr, texture.GetAddressOf()));
m_WoodCrate.SetBuffer(m_pd3dDevice.Get(), Geometry::CreateBox());
m_WoodCrate.SetTexture(texture.Get());
// 初始化地板
HR(CreateDDSTextureFromFile(m_pd3dDevice.Get(), L"Texture\\floor.dds", nullptr, texture.ReleaseAndGetAddressOf()));
m_Floor.SetBuffer(m_pd3dDevice.Get(),
Geometry::CreatePlane(XMFLOAT3(0.0f, -1.0f, 0.0f), XMFLOAT2(20.0f, 20.0f), XMFLOAT2(5.0f, 5.0f)));
m_Floor.SetTexture(texture.Get());
// 初始化墙体
m_Walls.resize(4);
HR(CreateDDSTextureFromFile(m_pd3dDevice.Get(), L"Texture\\brick.dds", nullptr, texture.ReleaseAndGetAddressOf()));
// 这里控制墙体四个面的生成
for (int i = 0; i < 4; ++i)
{
m_Walls[i].SetBuffer(m_pd3dDevice.Get(),
Geometry::CreatePlane(XMFLOAT3(), XMFLOAT2(20.0f, 8.0f), XMFLOAT2(5.0f, 1.5f)));
XMMATRIX world = XMMatrixRotationX(-XM_PIDIV2) * XMMatrixRotationY(XM_PIDIV2 * i)
* XMMatrixTranslation(i % 2 ? -10.0f * (i - 2) : 0.0f, 3.0f, i % 2 == 0 ? -10.0f * (i - 1) : 0.0f);
m_Walls[i].SetWorldMatrix(world);
m_Walls[i].SetTexture(texture.Get());
}
// 初始化采样器状态
D3D11_SAMPLER_DESC sampDesc;
ZeroMemory(&sampDesc, sizeof(sampDesc));
sampDesc.Filter = D3D11_FILTER_MIN_MAG_MIP_LINEAR;
sampDesc.AddressU = D3D11_TEXTURE_ADDRESS_WRAP;
sampDesc.AddressV = D3D11_TEXTURE_ADDRESS_WRAP;
sampDesc.AddressW = D3D11_TEXTURE_ADDRESS_WRAP;
sampDesc.ComparisonFunc = D3D11_COMPARISON_NEVER;
sampDesc.MinLOD = 0;
sampDesc.MaxLOD = D3D11_FLOAT32_MAX;
HR(m_pd3dDevice->CreateSamplerState(&sampDesc, m_pSamplerState.GetAddressOf()));
// ******************
// 初始化常量缓冲区的值
// 初始化每帧可能会变化的值
m_CameraMode = CameraMode::FirstPerson;
auto camera = std::shared_ptr(new FirstPersonCamera);
m_pCamera = camera;
camera->SetViewPort(0.0f, 0.0f, (float)m_ClientWidth, (float)m_ClientHeight);
camera->LookAt(XMFLOAT3(), XMFLOAT3(0.0f, 0.0f, 1.0f), XMFLOAT3(0.0f, 1.0f, 0.0f));
// 初始化仅在窗口大小变动时修改的值
m_pCamera->SetFrustum(XM_PI / 3, AspectRatio(), 0.5f, 1000.0f);
m_CBOnResize.proj = XMMatrixTranspose(m_pCamera->GetProjXM());
// 初始化不会变化的值
// 环境光
m_CBRarely.dirLight[0].ambient = XMFLOAT4(0.5f, 0.5f, 0.5f, 1.0f);
m_CBRarely.dirLight[0].diffuse = XMFLOAT4(0.8f, 0.8f, 0.8f, 1.0f);
m_CBRarely.dirLight[0].specular = XMFLOAT4(0.5f, 0.5f, 0.5f, 1.0f);
m_CBRarely.dirLight[0].direction = XMFLOAT3(0.0f, -1.0f, 0.0f);
// 灯光
m_CBRarely.pointLight[0].position = XMFLOAT3(0.0f, 10.0f, 0.0f);
m_CBRarely.pointLight[0].ambient = XMFLOAT4(0.5f, 0.5f, 0.5f, 1.0f);
m_CBRarely.pointLight[0].diffuse = XMFLOAT4(0.8f, 0.8f, 0.8f, 1.0f);
m_CBRarely.pointLight[0].specular = XMFLOAT4(0.5f, 0.5f, 0.5f, 1.0f);
m_CBRarely.pointLight[0].att = XMFLOAT3(0.0f, 0.1f, 0.0f);
m_CBRarely.pointLight[0].range = 25.0f;
m_CBRarely.numDirLight = 1;
m_CBRarely.numPointLight = 1;
m_CBRarely.numSpotLight = 0;
// 初始化材质
m_CBRarely.material.ambient = XMFLOAT4(0.5f, 0.5f, 0.5f, 1.0f);
m_CBRarely.material.diffuse = XMFLOAT4(0.6f, 0.6f, 0.6f, 1.0f);
m_CBRarely.material.specular = XMFLOAT4(0.1f, 0.1f, 0.1f, 50.0f);
// 更新不容易被修改的常量缓冲区资源
D3D11_MAPPED_SUBRESOURCE mappedData;
HR(m_pd3dImmediateContext->Map(m_pConstantBuffers[2].Get(), 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedData));
memcpy_s(mappedData.pData, sizeof(CBChangesOnResize), &m_CBOnResize, sizeof(CBChangesOnResize));
m_pd3dImmediateContext->Unmap(m_pConstantBuffers[2].Get(), 0);
HR(m_pd3dImmediateContext->Map(m_pConstantBuffers[3].Get(), 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedData));
memcpy_s(mappedData.pData, sizeof(CBChangesRarely), &m_CBRarely, sizeof(CBChangesRarely));
m_pd3dImmediateContext->Unmap(m_pConstantBuffers[3].Get(), 0);
// ******************
// 给渲染管线各个阶段绑定好所需资源
// 设置图元类型,设定输入布局
m_pd3dImmediateContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
m_pd3dImmediateContext->IASetInputLayout(m_pVertexLayout3D.Get());
// 默认绑定3D着色器
m_pd3dImmediateContext->VSSetShader(m_pVertexShader3D.Get(), nullptr, 0);
// 预先绑定各自所需的缓冲区,其中每帧更新的缓冲区需要绑定到两个缓冲区上
m_pd3dImmediateContext->VSSetConstantBuffers(0, 1, m_pConstantBuffers[0].GetAddressOf());
m_pd3dImmediateContext->VSSetConstantBuffers(1, 1, m_pConstantBuffers[1].GetAddressOf());
m_pd3dImmediateContext->VSSetConstantBuffers(2, 1, m_pConstantBuffers[2].GetAddressOf());
m_pd3dImmediateContext->PSSetConstantBuffers(1, 1, m_pConstantBuffers[1].GetAddressOf());
m_pd3dImmediateContext->PSSetConstantBuffers(3, 1, m_pConstantBuffers[3].GetAddressOf());
m_pd3dImmediateContext->PSSetShader(m_pPixelShader3D.Get(), nullptr, 0);
m_pd3dImmediateContext->PSSetSamplers(0, 1, m_pSamplerState.GetAddressOf());
return true;
}
GameApp::UpdateScene的变化
使用Mouse类的相对模式
在使用摄像机模式游玩时,鼠标是不可见的。这时候可以将鼠标模式设为相对模式。
首先使用GetSystemMetrics
函数来获取当前屏幕分辨率,在CreateWindow
的时候将窗口居中。
下面是D3DApp::InitMainWindow
的变化:
bool D3DApp::InitMainWindow()
{
// 省略不变部分...
int screenWidth = GetSystemMetrics(SM_CXSCREEN);
int screenHeight = GetSystemMetrics(SM_CYSCREEN);
// Compute window rectangle dimensions based on requested client area dimensions.
RECT R = { 0, 0, m_ClientWidth, m_ClientHeight };
AdjustWindowRect(&R, WS_OVERLAPPEDWINDOW, false);
int width = R.right - R.left;
int height = R.bottom - R.top;
m_hMainWnd = CreateWindow(L"D3DWndClassName", m_MainWndCaption.c_str(),
WS_OVERLAPPEDWINDOW, (screenWidth - width) / 2, (screenHeight - height) / 2, width, height, 0, 0, m_hAppInst, 0);
// 省略不变部分...
return true;
}
然后GameApp::Init
方法设置间接模式:
bool GameApp::Init()
{
if (!D3DApp::Init())
return false;
if (!InitEffect())
return false;
if (!InitResource())
return false;
// 初始化鼠标,键盘不需要
m_pMouse->SetWindow(m_hMainWnd);
m_pMouse->SetMode(DirectX::Mouse::MODE_RELATIVE);
return true;
}
最后就可以开始获取相对位移,并根据当前摄像机的模式和键鼠操作的状态来进行对应操作:
void GameApp::UpdateScene(float dt)
{
// 更新鼠标事件,获取相对偏移量
Mouse::State mouseState = m_pMouse->GetState();
Mouse::State lastMouseState = m_MouseTracker.GetLastState();
Keyboard::State keyState = m_pKeyboard->GetState();
m_KeyboardTracker.Update(keyState);
// 获取子类
auto cam1st = std::dynamic_pointer_cast(m_pCamera);
auto cam3rd = std::dynamic_pointer_cast(m_pCamera);
if (m_CameraMode == CameraMode::FirstPerson || m_CameraMode == CameraMode::Free)
{
// 第一人称/自由摄像机的操作
// 方向移动
if (keyState.IsKeyDown(Keyboard::W))
{
if (m_CameraMode == CameraMode::FirstPerson)
cam1st->Walk(dt * 3.0f);
else
cam1st->MoveForward(dt * 3.0f);
}
if (keyState.IsKeyDown(Keyboard::S))
{
if (m_CameraMode == CameraMode::FirstPerson)
cam1st->Walk(dt * -3.0f);
else
cam1st->MoveForward(dt * -3.0f);
}
if (keyState.IsKeyDown(Keyboard::A))
cam1st->Strafe(dt * -3.0f);
if (keyState.IsKeyDown(Keyboard::D))
cam1st->Strafe(dt * 3.0f);
// 将位置限制在[-8.9f, 8.9f]的区域内
// 不允许穿地
XMFLOAT3 adjustedPos;
XMStoreFloat3(&adjustedPos, XMVectorClamp(cam1st->GetPositionXM(), XMVectorSet(-8.9f, 0.0f, -8.9f, 0.0f), XMVectorReplicate(8.9f)));
cam1st->SetPosition(adjustedPos);
// 仅在第一人称模式移动箱子
if (m_CameraMode == CameraMode::FirstPerson)
m_WoodCrate.SetWorldMatrix(XMMatrixTranslation(adjustedPos.x, adjustedPos.y, adjustedPos.z));
// 视野旋转,防止开始的差值过大导致的突然旋转
cam1st->Pitch(mouseState.y * dt * 1.25f);
cam1st->RotateY(mouseState.x * dt * 1.25f);
}
else if (m_CameraMode == CameraMode::ThirdPerson)
{
// 第三人称摄像机的操作
cam3rd->SetTarget(m_WoodCrate.GetPosition());
// 绕物体旋转
cam3rd->RotateX(mouseState.y * dt * 1.25f);
cam3rd->RotateY(mouseState.x * dt * 1.25f);
cam3rd->Approach(-mouseState.scrollWheelValue / 120 * 1.0f);
}
// 更新观察矩阵
m_pCamera->UpdateViewMatrix();
XMStoreFloat4(&m_CBFrame.eyePos, m_pCamera->GetPositionXM());
m_CBFrame.view = XMMatrixTranspose(m_pCamera->GetViewXM());
// 重置滚轮值
m_pMouse->ResetScrollWheelValue();
// 摄像机模式切换
if (m_KeyboardTracker.IsKeyPressed(Keyboard::D1) && m_CameraMode != CameraMode::FirstPerson)
{
if (!cam1st)
{
cam1st.reset(new FirstPersonCamera);
cam1st->SetFrustum(XM_PI / 3, AspectRatio(), 0.5f, 1000.0f);
m_pCamera = cam1st;
}
cam1st->LookTo(m_WoodCrate.GetPosition(),
XMFLOAT3(0.0f, 0.0f, 1.0f),
XMFLOAT3(0.0f, 1.0f, 0.0f));
m_CameraMode = CameraMode::FirstPerson;
}
else if (m_KeyboardTracker.IsKeyPressed(Keyboard::D2) && m_CameraMode != CameraMode::ThirdPerson)
{
if (!cam3rd)
{
cam3rd.reset(new ThirdPersonCamera);
cam3rd->SetFrustum(XM_PI / 3, AspectRatio(), 0.5f, 1000.0f);
m_pCamera = cam3rd;
}
XMFLOAT3 target = m_WoodCrate.GetPosition();
cam3rd->SetTarget(target);
cam3rd->SetDistance(8.0f);
cam3rd->SetDistanceMinMax(3.0f, 20.0f);
m_CameraMode = CameraMode::ThirdPerson;
}
else if (m_KeyboardTracker.IsKeyPressed(Keyboard::D3) && m_CameraMode != CameraMode::Free)
{
if (!cam1st)
{
cam1st.reset(new FirstPersonCamera);
cam1st->SetFrustum(XM_PI / 3, AspectRatio(), 0.5f, 1000.0f);
m_pCamera = cam1st;
}
// 从箱子上方开始
XMFLOAT3 pos = m_WoodCrate.GetPosition();
XMFLOAT3 to = XMFLOAT3(0.0f, 0.0f, 1.0f);
XMFLOAT3 up = XMFLOAT3(0.0f, 1.0f, 0.0f);
pos.y += 3;
cam1st->LookTo(pos, to, up);
m_CameraMode = CameraMode::Free;
}
// 退出程序,这里应向窗口发送销毁信息
if (keyState.IsKeyDown(Keyboard::Escape))
SendMessage(MainWnd(), WM_DESTROY, 0, 0);
D3D11_MAPPED_SUBRESOURCE mappedData;
HR(m_pd3dImmediateContext->Map(m_pConstantBuffers[1].Get(), 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedData));
memcpy_s(mappedData.pData, sizeof(CBChangesEveryFrame), &m_CBFrame, sizeof(CBChangesEveryFrame));
m_pd3dImmediateContext->Unmap(m_pConstantBuffers[1].Get(), 0);
}
其中对摄像机位置使用XMVectorClamp
函数是为了将X, Y和Z值都限制在范围为[-8.9, 8.9]
的立方体活动区域防止跑出场景区域外,但使用第三人称摄像机的时候没有这样的限制,因为可以营造出一种透视观察的效果。
GameApp::DrawScene的变化
该方法变化不大,具体如下:
void GameApp::DrawScene()
{
assert(m_pd3dImmediateContext);
assert(m_pSwapChain);
m_pd3dImmediateContext->ClearRenderTargetView(m_pRenderTargetView.Get(), reinterpret_cast(&Colors::Black));
m_pd3dImmediateContext->ClearDepthStencilView(m_pDepthStencilView.Get(), D3D11_CLEAR_DEPTH | D3D11_CLEAR_STENCIL, 1.0f, 0);
//
// 绘制几何模型
//
m_WoodCrate.Draw(m_pd3dImmediateContext.Get());
m_Floor.Draw(m_pd3dImmediateContext.Get());
for (auto& wall : m_Walls)
wall.Draw(m_pd3dImmediateContext.Get());
//
// 绘制Direct2D部分
//
// ...
HR(m_pSwapChain->Present(0, 0));
}
最后下面演示了三种模式下的操作效果:
练习题
- 在第三人称模式下,让物体也能够进行前后、左右的平移运动
- 在第三人称模式下,使用平躺的圆柱体,让其左右平移运动改为左右旋转运动,前后运动改为朝前滚动
- 在第一人称模式下,给摄像机添加对"滚动"的支持(绕Look轴旋转)
DirectX11 With Windows SDK完整目录
Github项目源码
欢迎加入QQ群: 727623616 可以一起探讨DX11,以及有什么问题也可以在这里汇报。