五个重要的坐标系统
为了将坐标从一个坐标系变换到另一个坐标系,需要经过MVP变换。
顶点坐标起始于局部空间,在这里它被称为局部坐标,取个参照物就很好理解概念,局部坐标可以理解为相对于其他物体的坐标,它与世界坐标能够很好的区分开,世界空间坐标就是物体相对于整个场景的坐标。
在某个游戏场景中,会存在非常多的物体,每个物体的位置各不相同,它们此时的顶点坐标就是相对于整个游戏场景的坐标,这就是世界空间坐标。可以通过MVP变换中的Model(模型)矩阵将局部空间坐标变换为世界空间坐标。
模型矩阵是一种变换矩阵,它能通过对物体进行位移、缩放、旋转来将它置于它本应该在的位置或朝向。
观察空间就是从摄像机的视角所观察到的空间。通过View矩阵能够将世界空间的坐标变换到观察空间。所以在OpenGL中可以通过设计并创建一个View矩阵来模拟出一个摄像机(Camera)。
既然有了观察空间或者说有了从摄像机的视角观察到的空间,那么作为摄影机它必然有一个可视范围,超过这个范围的顶点坐标就会在裁剪空间中被剔除掉(忽略掉)。所以在游戏里,我们在屏幕上所看到的游戏画面并不会包含所有的object,这样就能够提升渲染的效率。
为了将顶点坐标从观察空间变换到裁剪空间,需要定义投影(Projection)矩阵。它在每个维度上指定一个坐标范围,超过这个范围的坐标就会被剔除掉,不会将其映射成标准化设备坐标。
由投影矩阵创建的观察箱(Viewing Box)被称为平截头体(Frustum),每个出现在平截头体范围内的坐标都会最终出现在用户的屏幕上。
一旦所有顶点被变换到裁剪空间,最终的操作——透视除法(Perspective Division)将会执行,在这个过程中我们将位置向量的x,y,z分量分别除以向量的齐次w分量;透视除法是将4D裁剪空间坐标变换为3D标准化设备坐标的过程。这一步会在每一个顶点着色器运行的最后被自动执行。
这个投影矩阵将给定的平截头体范围映射到裁剪空间,除此之外还修改了每个顶点坐标的w值,使得离观察者越远的顶点坐标w分量越大,从而通过透视除法实现距离观察者越远顶点坐标就越小。被变换到裁剪空间的坐标都会在-w到w的范围之间(任何大于这个范围的坐标都会被裁剪掉)。
透视投影的平截头体长这样:
使用glm库的glm::perspective()
定义一个透视投影的平截头体,如
glm::perspective(glm::radians(45.0f), 800.0f / 600.0f, 0.1f, 100.0f);
第一个参数是FOV,第二个参数是宽高比,第三个参数是近平面与摄像机的距离,第四个参数为远平面与摄像机的距离。
使用MVP变换,将一个2D平面3D化:
#include
#define GLEW_STATIC
#include
#include
#include "./shader/Shader.h"
#include
#include
#include
#include
void processInput(GLFWwindow*); //键盘或鼠标的输入
float mix_visibility = 0.2f;
int main() {
#pragma region Init
//初始化glfw
glfwInit();
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
//创建窗口
GLFWwindow* window = glfwCreateWindow(800, 600, "Learning OpenGL", nullptr, nullptr);
if (window == nullptr) {
std::cout << "Create window failed." << std::endl;
glfwTerminate();
return -1;
}
glfwMakeContextCurrent(window);
//初始化glew
glewExperimental = true;
if (glewInit() != GLEW_OK) {
std::cout << "Init glew failed." << std::endl;
glfwTerminate();
return -1;
}
glViewport(0, 0, 800, 600);
/*glEnable(GL_CULL_FACE);
glCullFace(GL_BACK);*/
#pragma endregion
#pragma region Shader
Shader* shader = new Shader("resource/vertexShader.vert", "resource/fragmentShader.frag");
#pragma endregion
float vertex_data[] = {
// positions // colors // texture coords
0.5f, 0.5f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, // top right
0.5f, -0.5f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f, // bottom right
-0.5f, -0.5f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, // bottom left
-0.5f, 0.5f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f // top left
};
unsigned int vertex_index[] = {
0, 1, 3, // first triangle
1, 2, 3 // second triangle
};
unsigned int VAO;
glGenVertexArrays(1, &VAO);
glBindVertexArray(VAO);
unsigned int VBO;
glGenBuffers(1, &VBO); //生成一个VBO
glBindBuffer(GL_ARRAY_BUFFER, VBO); //将VBO绑定到GL_ARRAY_BUFFER
glBufferData(GL_ARRAY_BUFFER, sizeof(vertex_data), vertex_data, GL_STATIC_DRAW); //将顶点数据填充进缓冲区
unsigned int EBO;
glGenBuffers(1, &EBO);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(vertex_index), vertex_index, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)0); //告诉OpenGL该如何具体地使用这些数据
glEnableVertexAttribArray(0); //开启location = 0位置上的顶点属性的使用权限(默认为禁用)
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)(3 * sizeof(float)));
glEnableVertexAttribArray(1);
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)(6 * sizeof(float)));
glEnableVertexAttribArray(2);
unsigned int texture1, texture2;
glGenTextures(1, &texture1);
glBindTexture(GL_TEXTURE_2D, texture1);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
int width = 0, height, nrChannels;
stbi_set_flip_vertically_on_load(true);
unsigned char* data = stbi_load("resource/container.jpg", &width, &height, &nrChannels, 0);
if (data) {
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, data);
glGenerateMipmap(GL_TEXTURE_2D);
}
else {
std::cout << "Failed to load texture" << std::endl;
}
stbi_image_free(data);
glGenTextures(1, &texture2);
glBindTexture(GL_TEXTURE_2D, texture2);
// set the texture wrapping parameters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); // set texture wrapping to GL_REPEAT (default wrapping method)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
// set texture filtering parameters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
// load image, create texture and generate mipmaps
data = stbi_load("resource/awesomeface.png", &width, &height, &nrChannels, 0);
if (data)
{
// note that the awesomeface.png has transparency and thus an alpha channel, so make sure to tell OpenGL the data type is of GL_RGBA
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
glGenerateMipmap(GL_TEXTURE_2D);
}
else
{
std::cout << "Failed to load texture" << std::endl;
}
stbi_image_free(data);
glm::mat4 model = glm::mat4(1.0f);
glm::mat4 view = glm::mat4(1.0f);
glm::mat4 projection = glm::mat4(1.0f);
model = glm::rotate(model, glm::radians(-55.0f), glm::vec3(1.0f, 0.0f, 0.0f)); //绕x轴逆时针旋转55度
view = glm::translate(view, glm::vec3(0.0f, 0.0f, -3.0f));
projection = glm::perspective(glm::radians(45.0f), 800.0f / 600.0f, 0.1f, 100.0f);
shader->use();
glUniformMatrix4fv(glGetUniformLocation(shader->m_ID, "model"), 1, GL_FALSE, glm::value_ptr(model));
glUniformMatrix4fv(glGetUniformLocation(shader->m_ID, "view"), 1, GL_FALSE, glm::value_ptr(view));
glUniformMatrix4fv(glGetUniformLocation(shader->m_ID, "projection"), 1, GL_FALSE, glm::value_ptr(projection));
// or set it via the texture class
shader->setInt("texture1", 0);
shader->setInt("texture2", 1);
//render loop
while (!glfwWindowShouldClose(window)) {
//input
processInput(window);
//rendering
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
//glDrawArrays(GL_TRIANGLES, 0, 3);
glActiveTexture(GL_TEXTURE0); //激活纹理采样器0 (texture1)
glBindTexture(GL_TEXTURE_2D, texture1);
glActiveTexture(GL_TEXTURE1); //激活纹理采样器1 (texture2)
glBindTexture(GL_TEXTURE_2D, texture2);
glBindVertexArray(VAO);
shader->setFloat("visibility", mix_visibility);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
//check and call events and swap the buffers
glfwSwapBuffers(window); //swap double buffer(双缓冲)
glfwPollEvents(); //监听是否有鼠标或者键盘输入的事件
}
glDeleteVertexArrays(1, &VAO);
glDeleteBuffers(1, &VBO);
glDeleteBuffers(1, &EBO);
glfwTerminate(); //释放所有资源
return 0;
}
void processInput(GLFWwindow* window) {
if (glfwGetKey(window, GLFW_KEY_ESCAPE) == GLFW_PRESS) {
glfwSetWindowShouldClose(window, true); //按下ESC 将窗口关闭
}
if (glfwGetKey(window, GLFW_KEY_UP) == GLFW_PRESS) {
mix_visibility += 0.001f;
if (mix_visibility >= 1.0f) mix_visibility = 1.0f;
}
if (glfwGetKey(window, GLFW_KEY_DOWN) == GLFW_PRESS) {
mix_visibility -= 0.001f;
if (mix_visibility <= 0.0f) mix_visibility = 0.0f;
}
}
OpenGL将所有深度信息存储在Z缓冲(Z-Buffer)中,也被称为深度缓冲。深度值存储在每个片段里面(作为片段的z值),当片段想要输出它的颜色时,OpenGL会将它的深度值和z缓冲进行比较,如果当前的片段在其它片段之后,它将会被丢弃,否则将会覆盖。这个过程称为深度测试(Depth Testing),它是由OpenGL自动完成的。
使用Z缓冲进行深度测试,并且将平面转化为立方体(绘制36个顶点):
#include
#define GLEW_STATIC
#include
#include
#include "./shader/Shader.h"
#include
#include
#include
#include
int main() {
//初始化glfw
glfwInit();
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
//创建窗口
GLFWwindow* window = glfwCreateWindow(800, 600, "Learning OpenGL", nullptr, nullptr);
if (window == nullptr) {
std::cout << "Create window failed." << std::endl;
glfwTerminate();
return -1;
}
glfwMakeContextCurrent(window);
//初始化glew
glewExperimental = true;
if (glewInit() != GLEW_OK) {
std::cout << "Init glew failed." << std::endl;
glfwTerminate();
return -1;
}
glViewport(0, 0, 800, 600);
Shader* shader = new Shader("resource/vertexShader.vert","resource/fragmentShader.frag");
float vertex_data[] = {
-0.5f, -0.5f, -0.5f, 0.0f, 0.0f,
0.5f, -0.5f, -0.5f, 1.0f, 0.0f,
0.5f, 0.5f, -0.5f, 1.0f, 1.0f,
0.5f, 0.5f, -0.5f, 1.0f, 1.0f,
-0.5f, 0.5f, -0.5f, 0.0f, 1.0f,
-0.5f, -0.5f, -0.5f, 0.0f, 0.0f,
-0.5f, -0.5f, 0.5f, 0.0f, 0.0f,
0.5f, -0.5f, 0.5f, 1.0f, 0.0f,
0.5f, 0.5f, 0.5f, 1.0f, 1.0f,
0.5f, 0.5f, 0.5f, 1.0f, 1.0f,
-0.5f, 0.5f, 0.5f, 0.0f, 1.0f,
-0.5f, -0.5f, 0.5f, 0.0f, 0.0f,
-0.5f, 0.5f, 0.5f, 1.0f, 0.0f,
-0.5f, 0.5f, -0.5f, 1.0f, 1.0f,
-0.5f, -0.5f, -0.5f, 0.0f, 1.0f,
-0.5f, -0.5f, -0.5f, 0.0f, 1.0f,
-0.5f, -0.5f, 0.5f, 0.0f, 0.0f,
-0.5f, 0.5f, 0.5f, 1.0f, 0.0f,
0.5f, 0.5f, 0.5f, 1.0f, 0.0f,
0.5f, 0.5f, -0.5f, 1.0f, 1.0f,
0.5f, -0.5f, -0.5f, 0.0f, 1.0f,
0.5f, -0.5f, -0.5f, 0.0f, 1.0f,
0.5f, -0.5f, 0.5f, 0.0f, 0.0f,
0.5f, 0.5f, 0.5f, 1.0f, 0.0f,
-0.5f, -0.5f, -0.5f, 0.0f, 1.0f,
0.5f, -0.5f, -0.5f, 1.0f, 1.0f,
0.5f, -0.5f, 0.5f, 1.0f, 0.0f,
0.5f, -0.5f, 0.5f, 1.0f, 0.0f,
-0.5f, -0.5f, 0.5f, 0.0f, 0.0f,
-0.5f, -0.5f, -0.5f, 0.0f, 1.0f,
-0.5f, 0.5f, -0.5f, 0.0f, 1.0f,
0.5f, 0.5f, -0.5f, 1.0f, 1.0f,
0.5f, 0.5f, 0.5f, 1.0f, 0.0f,
0.5f, 0.5f, 0.5f, 1.0f, 0.0f,
-0.5f, 0.5f, 0.5f, 0.0f, 0.0f,
-0.5f, 0.5f, -0.5f, 0.0f, 1.0f
};
//每一个立方体在世界空间中的位置
glm::vec3 cubePositions[] = {
glm::vec3(0.0f, 0.0f, 0.0f),
glm::vec3(2.0f, 5.0f, -15.0f),
glm::vec3(-1.5f, -2.2f, -2.5f),
glm::vec3(-3.8f, -2.0f, -12.3f),
glm::vec3(2.4f, -0.4f, -3.5f),
glm::vec3(-1.7f, 3.0f, -7.5f),
glm::vec3(1.3f, -2.0f, -2.5f),
glm::vec3(1.5f, 2.0f, -2.5f),
glm::vec3(1.5f, 0.2f, -1.5f),
glm::vec3(-1.3f, 1.0f, -1.5f)
};
unsigned int VAO;
glGenVertexArrays(1, &VAO);
glBindVertexArray(VAO);
unsigned int VBO;
glGenBuffers(1, &VBO); //生成一个VBO
glBindBuffer(GL_ARRAY_BUFFER, VBO); //将VBO绑定到GL_ARRAY_BUFFER
glBufferData(GL_ARRAY_BUFFER, sizeof(vertex_data), vertex_data, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 5 * sizeof(float), (void*)0); //告诉OpenGL该如何具体地使用这些数据
glEnableVertexAttribArray(0); //开启location = 0位置上的顶点属性的使用权限(默认为禁用)
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 5 * sizeof(float), (void*)(3 * sizeof(float)));
glEnableVertexAttribArray(1);
unsigned int texture1, texture2;
glGenTextures(1, &texture1);
glBindTexture(GL_TEXTURE_2D, texture1);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
int width = 0, height, nrChannels;
stbi_set_flip_vertically_on_load(true);
unsigned char* data = stbi_load("resource/container.jpg", &width, &height, &nrChannels, 0);
if (data) {
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, data);
glGenerateMipmap(GL_TEXTURE_2D);
}
else {
std::cout << "Failed to load texture" << std::endl;
}
stbi_image_free(data);
glGenTextures(1, &texture2);
glBindTexture(GL_TEXTURE_2D, texture2);
// set the texture wrapping parameters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); // set texture wrapping to GL_REPEAT (default wrapping method)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
// set texture filtering parameters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
// load image, create texture and generate mipmaps
data = stbi_load("resource/awesomeface.png", &width, &height, &nrChannels, 0);
if (data)
{
// note that the awesomeface.png has transparency and thus an alpha channel, so make sure to tell OpenGL the data type is of GL_RGBA
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
glGenerateMipmap(GL_TEXTURE_2D);
}
else
{
std::cout << "Failed to load texture" << std::endl;
}
stbi_image_free(data);
glm::mat4 view = glm::mat4(1.0f);
glm::mat4 projection = glm::mat4(1.0f);
view = glm::translate(view, glm::vec3(0.0f, 0.0f, -3.0f));
projection = glm::perspective(glm::radians(60.0f), 800.0f / 600.0f, 0.1f, 100.0f);
shader->use();
glUniformMatrix4fv(glGetUniformLocation(shader->m_ID, "view"), 1, GL_FALSE, glm::value_ptr(view));
glUniformMatrix4fv(glGetUniformLocation(shader->m_ID, "projection"), 1, GL_FALSE, glm::value_ptr(projection));
// or set it via the texture class
shader->setInt("texture1", 0);
shader->setInt("texture2", 1);
glEnable(GL_DEPTH_TEST); //开启深度测试
//render loop
while (!glfwWindowShouldClose(window)) {
//input
processInput(window);
//rendering
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glActiveTexture(GL_TEXTURE0); //激活纹理采样器0 (texture1)
glBindTexture(GL_TEXTURE_2D, texture1);
glActiveTexture(GL_TEXTURE1); //激活纹理采样器1 (texture2)
glBindTexture(GL_TEXTURE_2D, texture2);
glBindVertexArray(VAO);
for (unsigned int i = 0; i < 10; i++) {
glm::mat4 model = glm::mat4(1.0f);
model = glm::translate(model, cubePositions[i]);
float angle = 20.0f * i;
if (i % 3 == 0) {
angle = glfwGetTime() * 25.0f;
}
model = glm::rotate(model, glm::radians(angle), glm::vec3(0.5f, 1.0f, 0.3f));
glUniformMatrix4fv(glGetUniformLocation(shader->m_ID, "model"), 1, GL_FALSE, glm::value_ptr(model));
glDrawArrays(GL_TRIANGLES, 0, 36);
}
//check and call events and swap the buffers
glfwSwapBuffers(window); //swap double buffer(双缓冲)
glfwPollEvents(); //监听是否有鼠标或者键盘输入的事件
}
glDeleteVertexArrays(1, &VAO);
glDeleteBuffers(1, &VBO);
glfwTerminate(); //释放所有资源
return 0;
}