1
0

CCRenderer.cpp 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929
  1. /****************************************************************************
  2. Copyright (c) 2013-2017 Chukong Technologies Inc.
  3. http://www.cocos2d-x.org
  4. Permission is hereby granted, free of charge, to any person obtaining a copy
  5. of this software and associated documentation files (the "Software"), to deal
  6. in the Software without restriction, including without limitation the rights
  7. to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  8. copies of the Software, and to permit persons to whom the Software is
  9. furnished to do so, subject to the following conditions:
  10. The above copyright notice and this permission notice shall be included in
  11. all copies or substantial portions of the Software.
  12. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  13. IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  14. FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  15. AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  16. LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  17. OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  18. THE SOFTWARE.
  19. ****************************************************************************/
  20. #include "renderer/CCRenderer.h"
  21. #include <algorithm>
  22. #include "renderer/CCTrianglesCommand.h"
  23. #include "renderer/CCBatchCommand.h"
  24. #include "renderer/CCCustomCommand.h"
  25. #include "renderer/CCGroupCommand.h"
  26. #include "renderer/CCPrimitiveCommand.h"
  27. #include "renderer/CCMeshCommand.h"
  28. #include "renderer/CCGLProgramCache.h"
  29. #include "renderer/CCMaterial.h"
  30. #include "renderer/CCTechnique.h"
  31. #include "renderer/CCPass.h"
  32. #include "renderer/CCRenderState.h"
  33. #include "renderer/ccGLStateCache.h"
  34. #include "base/CCConfiguration.h"
  35. #include "base/CCDirector.h"
  36. #include "base/CCEventDispatcher.h"
  37. #include "base/CCEventListenerCustom.h"
  38. #include "base/CCEventType.h"
  39. #include "2d/CCCamera.h"
  40. #include "2d/CCScene.h"
  41. NS_CC_BEGIN
  42. // helper
  43. static bool compareRenderCommand(RenderCommand* a, RenderCommand* b)
  44. {
  45. return a->getGlobalOrder() < b->getGlobalOrder();
  46. }
  47. static bool compare3DCommand(RenderCommand* a, RenderCommand* b)
  48. {
  49. return a->getDepth() > b->getDepth();
  50. }
  51. // queue
  52. RenderQueue::RenderQueue()
  53. {
  54. }
  55. void RenderQueue::push_back(RenderCommand* command)
  56. {
  57. float z = command->getGlobalOrder();
  58. if(z < 0)
  59. {
  60. _commands[QUEUE_GROUP::GLOBALZ_NEG].push_back(command);
  61. }
  62. else if(z > 0)
  63. {
  64. _commands[QUEUE_GROUP::GLOBALZ_POS].push_back(command);
  65. }
  66. else
  67. {
  68. if(command->is3D())
  69. {
  70. if(command->isTransparent())
  71. {
  72. _commands[QUEUE_GROUP::TRANSPARENT_3D].push_back(command);
  73. }
  74. else
  75. {
  76. _commands[QUEUE_GROUP::OPAQUE_3D].push_back(command);
  77. }
  78. }
  79. else
  80. {
  81. _commands[QUEUE_GROUP::GLOBALZ_ZERO].push_back(command);
  82. }
  83. }
  84. }
  85. ssize_t RenderQueue::size() const
  86. {
  87. ssize_t result(0);
  88. for(int index = 0; index < QUEUE_GROUP::QUEUE_COUNT; ++index)
  89. {
  90. result += _commands[index].size();
  91. }
  92. return result;
  93. }
  94. void RenderQueue::sort()
  95. {
  96. // Don't sort _queue0, it already comes sorted
  97. std::stable_sort(std::begin(_commands[QUEUE_GROUP::TRANSPARENT_3D]), std::end(_commands[QUEUE_GROUP::TRANSPARENT_3D]), compare3DCommand);
  98. std::stable_sort(std::begin(_commands[QUEUE_GROUP::GLOBALZ_NEG]), std::end(_commands[QUEUE_GROUP::GLOBALZ_NEG]), compareRenderCommand);
  99. std::stable_sort(std::begin(_commands[QUEUE_GROUP::GLOBALZ_POS]), std::end(_commands[QUEUE_GROUP::GLOBALZ_POS]), compareRenderCommand);
  100. }
  101. RenderCommand* RenderQueue::operator[](ssize_t index) const
  102. {
  103. for(int queIndex = 0; queIndex < QUEUE_GROUP::QUEUE_COUNT; ++queIndex)
  104. {
  105. if(index < static_cast<ssize_t>(_commands[queIndex].size()))
  106. return _commands[queIndex][index];
  107. else
  108. {
  109. index -= _commands[queIndex].size();
  110. }
  111. }
  112. CCASSERT(false, "invalid index");
  113. return nullptr;
  114. }
  115. void RenderQueue::clear()
  116. {
  117. for(int i = 0; i < QUEUE_COUNT; ++i)
  118. {
  119. _commands[i].clear();
  120. }
  121. }
  122. void RenderQueue::realloc(size_t reserveSize)
  123. {
  124. for(int i = 0; i < QUEUE_COUNT; ++i)
  125. {
  126. _commands[i] = std::vector<RenderCommand*>();
  127. _commands[i].reserve(reserveSize);
  128. }
  129. }
  130. void RenderQueue::saveRenderState()
  131. {
  132. _isDepthEnabled = glIsEnabled(GL_DEPTH_TEST) != GL_FALSE;
  133. _isCullEnabled = glIsEnabled(GL_CULL_FACE) != GL_FALSE;
  134. glGetBooleanv(GL_DEPTH_WRITEMASK, &_isDepthWrite);
  135. CHECK_GL_ERROR_DEBUG();
  136. }
  137. void RenderQueue::restoreRenderState()
  138. {
  139. if (_isCullEnabled)
  140. {
  141. glEnable(GL_CULL_FACE);
  142. RenderState::StateBlock::_defaultState->setCullFace(true);
  143. }
  144. else
  145. {
  146. glDisable(GL_CULL_FACE);
  147. RenderState::StateBlock::_defaultState->setCullFace(false);
  148. }
  149. if (_isDepthEnabled)
  150. {
  151. glEnable(GL_DEPTH_TEST);
  152. RenderState::StateBlock::_defaultState->setDepthTest(true);
  153. }
  154. else
  155. {
  156. glDisable(GL_DEPTH_TEST);
  157. RenderState::StateBlock::_defaultState->setDepthTest(false);
  158. }
  159. glDepthMask(_isDepthWrite);
  160. RenderState::StateBlock::_defaultState->setDepthWrite(_isDepthEnabled);
  161. CHECK_GL_ERROR_DEBUG();
  162. }
  163. //
  164. //
  165. //
  166. static const int DEFAULT_RENDER_QUEUE = 0;
  167. //
  168. // constructors, destructor, init
  169. //
  170. Renderer::Renderer()
  171. :_lastBatchedMeshCommand(nullptr)
  172. ,_filledVertex(0)
  173. ,_filledIndex(0)
  174. ,_glViewAssigned(false)
  175. ,_isRendering(false)
  176. ,_isDepthTestFor2D(false)
  177. ,_triBatchesToDraw(nullptr)
  178. ,_triBatchesToDrawCapacity(-1)
  179. #if CC_ENABLE_CACHE_TEXTURE_DATA
  180. ,_cacheTextureListener(nullptr)
  181. #endif
  182. {
  183. _groupCommandManager = new (std::nothrow) GroupCommandManager();
  184. _commandGroupStack.push(DEFAULT_RENDER_QUEUE);
  185. RenderQueue defaultRenderQueue;
  186. _renderGroups.push_back(defaultRenderQueue);
  187. _queuedTriangleCommands.reserve(BATCH_TRIAGCOMMAND_RESERVED_SIZE);
  188. // default clear color
  189. _clearColor = Color4F::BLACK;
  190. // for the batched TriangleCommand
  191. _triBatchesToDrawCapacity = 500;
  192. _triBatchesToDraw = (TriBatchToDraw*) malloc(sizeof(_triBatchesToDraw[0]) * _triBatchesToDrawCapacity);
  193. }
  194. Renderer::~Renderer()
  195. {
  196. _renderGroups.clear();
  197. _groupCommandManager->release();
  198. glDeleteBuffers(2, _buffersVBO);
  199. free(_triBatchesToDraw);
  200. if (Configuration::getInstance()->supportsShareableVAO())
  201. {
  202. glDeleteVertexArrays(1, &_buffersVAO);
  203. GL::bindVAO(0);
  204. }
  205. #if CC_ENABLE_CACHE_TEXTURE_DATA
  206. Director::getInstance()->getEventDispatcher()->removeEventListener(_cacheTextureListener);
  207. #endif
  208. }
  209. void Renderer::initGLView()
  210. {
  211. #if CC_ENABLE_CACHE_TEXTURE_DATA
  212. _cacheTextureListener = EventListenerCustom::create(EVENT_RENDERER_RECREATED, [this](EventCustom* event){
  213. /** listen the event that renderer was recreated on Android/WP8 */
  214. this->setupBuffer();
  215. });
  216. Director::getInstance()->getEventDispatcher()->addEventListenerWithFixedPriority(_cacheTextureListener, -1);
  217. #endif
  218. setupBuffer();
  219. _glViewAssigned = true;
  220. }
  221. void Renderer::setupBuffer()
  222. {
  223. if(Configuration::getInstance()->supportsShareableVAO())
  224. {
  225. setupVBOAndVAO();
  226. }
  227. else
  228. {
  229. setupVBO();
  230. }
  231. }
  232. void Renderer::setupVBOAndVAO()
  233. {
  234. //generate vbo and vao for trianglesCommand
  235. glGenVertexArrays(1, &_buffersVAO);
  236. GL::bindVAO(_buffersVAO);
  237. glGenBuffers(2, &_buffersVBO[0]);
  238. glBindBuffer(GL_ARRAY_BUFFER, _buffersVBO[0]);
  239. glBufferData(GL_ARRAY_BUFFER, sizeof(_verts[0]) * VBO_SIZE, _verts, GL_DYNAMIC_DRAW);
  240. // vertices
  241. glEnableVertexAttribArray(GLProgram::VERTEX_ATTRIB_POSITION);
  242. glVertexAttribPointer(GLProgram::VERTEX_ATTRIB_POSITION, 3, GL_FLOAT, GL_FALSE, sizeof(V3F_C4B_T2F), (GLvoid*) offsetof( V3F_C4B_T2F, vertices));
  243. // colors
  244. glEnableVertexAttribArray(GLProgram::VERTEX_ATTRIB_COLOR);
  245. glVertexAttribPointer(GLProgram::VERTEX_ATTRIB_COLOR, 4, GL_UNSIGNED_BYTE, GL_TRUE, sizeof(V3F_C4B_T2F), (GLvoid*) offsetof( V3F_C4B_T2F, colors));
  246. // tex coords
  247. glEnableVertexAttribArray(GLProgram::VERTEX_ATTRIB_TEX_COORD);
  248. glVertexAttribPointer(GLProgram::VERTEX_ATTRIB_TEX_COORD, 2, GL_FLOAT, GL_FALSE, sizeof(V3F_C4B_T2F), (GLvoid*) offsetof( V3F_C4B_T2F, texCoords));
  249. glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _buffersVBO[1]);
  250. glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(_indices[0]) * INDEX_VBO_SIZE, _indices, GL_STATIC_DRAW);
  251. // Must unbind the VAO before changing the element buffer.
  252. GL::bindVAO(0);
  253. glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
  254. glBindBuffer(GL_ARRAY_BUFFER, 0);
  255. CHECK_GL_ERROR_DEBUG();
  256. }
  257. void Renderer::setupVBO()
  258. {
  259. glGenBuffers(2, &_buffersVBO[0]);
  260. // Issue #15652
  261. // Should not initialize VBO with a large size (VBO_SIZE=65536),
  262. // it may cause low FPS on some Android devices like LG G4 & Nexus 5X.
  263. // It's probably because some implementations of OpenGLES driver will
  264. // copy the whole memory of VBO which initialized at the first time
  265. // once glBufferData/glBufferSubData is invoked.
  266. // For more discussion, please refer to https://github.com/cocos2d/cocos2d-x/issues/15652
  267. // mapBuffers();
  268. }
  269. void Renderer::mapBuffers()
  270. {
  271. // Avoid changing the element buffer for whatever VAO might be bound.
  272. GL::bindVAO(0);
  273. glBindBuffer(GL_ARRAY_BUFFER, _buffersVBO[0]);
  274. glBufferData(GL_ARRAY_BUFFER, sizeof(_verts[0]) * VBO_SIZE, _verts, GL_DYNAMIC_DRAW);
  275. glBindBuffer(GL_ARRAY_BUFFER, 0);
  276. glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _buffersVBO[1]);
  277. glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(_indices[0]) * INDEX_VBO_SIZE, _indices, GL_STATIC_DRAW);
  278. glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
  279. CHECK_GL_ERROR_DEBUG();
  280. }
  281. void Renderer::addCommand(RenderCommand* command)
  282. {
  283. int renderQueue =_commandGroupStack.top();
  284. addCommand(command, renderQueue);
  285. }
  286. void Renderer::addCommand(RenderCommand* command, int renderQueue)
  287. {
  288. CCASSERT(!_isRendering, "Cannot add command while rendering");
  289. CCASSERT(renderQueue >=0, "Invalid render queue");
  290. CCASSERT(command->getType() != RenderCommand::Type::UNKNOWN_COMMAND, "Invalid Command Type");
  291. _renderGroups[renderQueue].push_back(command);
  292. }
  293. void Renderer::pushGroup(int renderQueueID)
  294. {
  295. CCASSERT(!_isRendering, "Cannot change render queue while rendering");
  296. _commandGroupStack.push(renderQueueID);
  297. }
  298. void Renderer::popGroup()
  299. {
  300. CCASSERT(!_isRendering, "Cannot change render queue while rendering");
  301. _commandGroupStack.pop();
  302. }
  303. int Renderer::createRenderQueue()
  304. {
  305. RenderQueue newRenderQueue;
  306. _renderGroups.push_back(newRenderQueue);
  307. return (int)_renderGroups.size() - 1;
  308. }
  309. void Renderer::processRenderCommand(RenderCommand* command)
  310. {
  311. auto commandType = command->getType();
  312. if( RenderCommand::Type::TRIANGLES_COMMAND == commandType)
  313. {
  314. // flush other queues
  315. flush3D();
  316. auto cmd = static_cast<TrianglesCommand*>(command);
  317. // flush own queue when buffer is full
  318. if(_filledVertex + cmd->getVertexCount() > VBO_SIZE || _filledIndex + cmd->getIndexCount() > INDEX_VBO_SIZE)
  319. {
  320. CCASSERT(cmd->getVertexCount()>= 0 && cmd->getVertexCount() < VBO_SIZE, "VBO for vertex is not big enough, please break the data down or use customized render command");
  321. CCASSERT(cmd->getIndexCount()>= 0 && cmd->getIndexCount() < INDEX_VBO_SIZE, "VBO for index is not big enough, please break the data down or use customized render command");
  322. drawBatchedTriangles();
  323. }
  324. // queue it
  325. _queuedTriangleCommands.push_back(cmd);
  326. _filledIndex += cmd->getIndexCount();
  327. _filledVertex += cmd->getVertexCount();
  328. }
  329. else if (RenderCommand::Type::MESH_COMMAND == commandType)
  330. {
  331. flush2D();
  332. auto cmd = static_cast<MeshCommand*>(command);
  333. if (cmd->isSkipBatching() || _lastBatchedMeshCommand == nullptr || _lastBatchedMeshCommand->getMaterialID() != cmd->getMaterialID())
  334. {
  335. flush3D();
  336. CCGL_DEBUG_INSERT_EVENT_MARKER("RENDERER_MESH_COMMAND");
  337. if(cmd->isSkipBatching())
  338. {
  339. // XXX: execute() will call bind() and unbind()
  340. // but unbind() shouldn't be call if the next command is a MESH_COMMAND with Material.
  341. // Once most of cocos2d-x moves to Pass/StateBlock, only bind() should be used.
  342. cmd->execute();
  343. }
  344. else
  345. {
  346. cmd->preBatchDraw();
  347. cmd->batchDraw();
  348. _lastBatchedMeshCommand = cmd;
  349. }
  350. }
  351. else
  352. {
  353. CCGL_DEBUG_INSERT_EVENT_MARKER("RENDERER_MESH_COMMAND");
  354. cmd->batchDraw();
  355. }
  356. }
  357. else if(RenderCommand::Type::GROUP_COMMAND == commandType)
  358. {
  359. flush();
  360. int renderQueueID = ((GroupCommand*) command)->getRenderQueueID();
  361. CCGL_DEBUG_PUSH_GROUP_MARKER("RENDERER_GROUP_COMMAND");
  362. visitRenderQueue(_renderGroups[renderQueueID]);
  363. CCGL_DEBUG_POP_GROUP_MARKER();
  364. }
  365. else if(RenderCommand::Type::CUSTOM_COMMAND == commandType)
  366. {
  367. flush();
  368. auto cmd = static_cast<CustomCommand*>(command);
  369. CCGL_DEBUG_INSERT_EVENT_MARKER("RENDERER_CUSTOM_COMMAND");
  370. cmd->execute();
  371. }
  372. else if(RenderCommand::Type::BATCH_COMMAND == commandType)
  373. {
  374. flush();
  375. auto cmd = static_cast<BatchCommand*>(command);
  376. CCGL_DEBUG_INSERT_EVENT_MARKER("RENDERER_BATCH_COMMAND");
  377. cmd->execute();
  378. }
  379. else if(RenderCommand::Type::PRIMITIVE_COMMAND == commandType)
  380. {
  381. flush();
  382. auto cmd = static_cast<PrimitiveCommand*>(command);
  383. CCGL_DEBUG_INSERT_EVENT_MARKER("RENDERER_PRIMITIVE_COMMAND");
  384. cmd->execute();
  385. }
  386. else
  387. {
  388. CCLOGERROR("Unknown commands in renderQueue");
  389. }
  390. }
  391. void Renderer::visitRenderQueue(RenderQueue& queue)
  392. {
  393. queue.saveRenderState();
  394. //
  395. //Process Global-Z < 0 Objects
  396. //
  397. const auto& zNegQueue = queue.getSubQueue(RenderQueue::QUEUE_GROUP::GLOBALZ_NEG);
  398. if (zNegQueue.size() > 0)
  399. {
  400. if(_isDepthTestFor2D)
  401. {
  402. glEnable(GL_DEPTH_TEST);
  403. glDepthMask(true);
  404. glEnable(GL_BLEND);
  405. RenderState::StateBlock::_defaultState->setDepthTest(true);
  406. RenderState::StateBlock::_defaultState->setDepthWrite(true);
  407. RenderState::StateBlock::_defaultState->setBlend(true);
  408. }
  409. else
  410. {
  411. glDisable(GL_DEPTH_TEST);
  412. glDepthMask(false);
  413. glEnable(GL_BLEND);
  414. RenderState::StateBlock::_defaultState->setDepthTest(false);
  415. RenderState::StateBlock::_defaultState->setDepthWrite(false);
  416. RenderState::StateBlock::_defaultState->setBlend(true);
  417. }
  418. glDisable(GL_CULL_FACE);
  419. RenderState::StateBlock::_defaultState->setCullFace(false);
  420. for (const auto& zNegNext : zNegQueue)
  421. {
  422. processRenderCommand(zNegNext);
  423. }
  424. flush();
  425. }
  426. //
  427. //Process Opaque Object
  428. //
  429. const auto& opaqueQueue = queue.getSubQueue(RenderQueue::QUEUE_GROUP::OPAQUE_3D);
  430. if (opaqueQueue.size() > 0)
  431. {
  432. //Clear depth to achieve layered rendering
  433. glEnable(GL_DEPTH_TEST);
  434. glDepthMask(true);
  435. glDisable(GL_BLEND);
  436. glEnable(GL_CULL_FACE);
  437. RenderState::StateBlock::_defaultState->setDepthTest(true);
  438. RenderState::StateBlock::_defaultState->setDepthWrite(true);
  439. RenderState::StateBlock::_defaultState->setBlend(false);
  440. RenderState::StateBlock::_defaultState->setCullFace(true);
  441. for (const auto& opaqueNext : opaqueQueue)
  442. {
  443. processRenderCommand(opaqueNext);
  444. }
  445. flush();
  446. }
  447. //
  448. //Process 3D Transparent object
  449. //
  450. const auto& transQueue = queue.getSubQueue(RenderQueue::QUEUE_GROUP::TRANSPARENT_3D);
  451. if (transQueue.size() > 0)
  452. {
  453. glEnable(GL_DEPTH_TEST);
  454. glDepthMask(false);
  455. glEnable(GL_BLEND);
  456. glEnable(GL_CULL_FACE);
  457. RenderState::StateBlock::_defaultState->setDepthTest(true);
  458. RenderState::StateBlock::_defaultState->setDepthWrite(false);
  459. RenderState::StateBlock::_defaultState->setBlend(true);
  460. RenderState::StateBlock::_defaultState->setCullFace(true);
  461. for (const auto& transNext : transQueue)
  462. {
  463. processRenderCommand(transNext);
  464. }
  465. flush();
  466. }
  467. //
  468. //Process Global-Z = 0 Queue
  469. //
  470. const auto& zZeroQueue = queue.getSubQueue(RenderQueue::QUEUE_GROUP::GLOBALZ_ZERO);
  471. if (zZeroQueue.size() > 0)
  472. {
  473. if(_isDepthTestFor2D)
  474. {
  475. glEnable(GL_DEPTH_TEST);
  476. glDepthMask(true);
  477. glEnable(GL_BLEND);
  478. RenderState::StateBlock::_defaultState->setDepthTest(true);
  479. RenderState::StateBlock::_defaultState->setDepthWrite(true);
  480. RenderState::StateBlock::_defaultState->setBlend(true);
  481. }
  482. else
  483. {
  484. glDisable(GL_DEPTH_TEST);
  485. glDepthMask(false);
  486. glEnable(GL_BLEND);
  487. RenderState::StateBlock::_defaultState->setDepthTest(false);
  488. RenderState::StateBlock::_defaultState->setDepthWrite(false);
  489. RenderState::StateBlock::_defaultState->setBlend(true);
  490. }
  491. glDisable(GL_CULL_FACE);
  492. RenderState::StateBlock::_defaultState->setCullFace(false);
  493. for (const auto& zZeroNext : zZeroQueue)
  494. {
  495. processRenderCommand(zZeroNext);
  496. }
  497. flush();
  498. }
  499. //
  500. //Process Global-Z > 0 Queue
  501. //
  502. const auto& zPosQueue = queue.getSubQueue(RenderQueue::QUEUE_GROUP::GLOBALZ_POS);
  503. if (zPosQueue.size() > 0)
  504. {
  505. if(_isDepthTestFor2D)
  506. {
  507. glEnable(GL_DEPTH_TEST);
  508. glDepthMask(true);
  509. glEnable(GL_BLEND);
  510. RenderState::StateBlock::_defaultState->setDepthTest(true);
  511. RenderState::StateBlock::_defaultState->setDepthWrite(true);
  512. RenderState::StateBlock::_defaultState->setBlend(true);
  513. }
  514. else
  515. {
  516. glDisable(GL_DEPTH_TEST);
  517. glDepthMask(false);
  518. glEnable(GL_BLEND);
  519. RenderState::StateBlock::_defaultState->setDepthTest(false);
  520. RenderState::StateBlock::_defaultState->setDepthWrite(false);
  521. RenderState::StateBlock::_defaultState->setBlend(true);
  522. }
  523. glDisable(GL_CULL_FACE);
  524. RenderState::StateBlock::_defaultState->setCullFace(false);
  525. for (const auto& zPosNext : zPosQueue)
  526. {
  527. processRenderCommand(zPosNext);
  528. }
  529. flush();
  530. }
  531. queue.restoreRenderState();
  532. }
  533. void Renderer::render()
  534. {
  535. //Uncomment this once everything is rendered by new renderer
  536. //glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
  537. //TODO: setup camera or MVP
  538. _isRendering = true;
  539. if (_glViewAssigned)
  540. {
  541. //Process render commands
  542. //1. Sort render commands based on ID
  543. for (auto &renderqueue : _renderGroups)
  544. {
  545. renderqueue.sort();
  546. }
  547. visitRenderQueue(_renderGroups[0]);
  548. }
  549. clean();
  550. _isRendering = false;
  551. }
  552. void Renderer::clean()
  553. {
  554. // Clear render group
  555. for (size_t j = 0, size = _renderGroups.size() ; j < size; j++)
  556. {
  557. //commands are owned by nodes
  558. // for (const auto &cmd : _renderGroups[j])
  559. // {
  560. // cmd->releaseToCommandPool();
  561. // }
  562. _renderGroups[j].clear();
  563. }
  564. // Clear batch commands
  565. _queuedTriangleCommands.clear();
  566. _filledVertex = 0;
  567. _filledIndex = 0;
  568. _lastBatchedMeshCommand = nullptr;
  569. }
  570. void Renderer::clear()
  571. {
  572. //Enable Depth mask to make sure glClear clear the depth buffer correctly
  573. glDepthMask(true);
  574. glClearColor(_clearColor.r, _clearColor.g, _clearColor.b, _clearColor.a);
  575. glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
  576. glDepthMask(false);
  577. RenderState::StateBlock::_defaultState->setDepthWrite(false);
  578. }
  579. void Renderer::setDepthTest(bool enable)
  580. {
  581. if (enable)
  582. {
  583. glClearDepth(1.0f);
  584. glEnable(GL_DEPTH_TEST);
  585. glDepthFunc(GL_LEQUAL);
  586. RenderState::StateBlock::_defaultState->setDepthTest(true);
  587. RenderState::StateBlock::_defaultState->setDepthFunction(RenderState::DEPTH_LEQUAL);
  588. // glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST);
  589. }
  590. else
  591. {
  592. glDisable(GL_DEPTH_TEST);
  593. RenderState::StateBlock::_defaultState->setDepthTest(false);
  594. }
  595. _isDepthTestFor2D = enable;
  596. CHECK_GL_ERROR_DEBUG();
  597. }
  598. void Renderer::fillVerticesAndIndices(const TrianglesCommand* cmd)
  599. {
  600. memcpy(&_verts[_filledVertex], cmd->getVertices(), sizeof(V3F_C4B_T2F) * cmd->getVertexCount());
  601. // fill vertex, and convert them to world coordinates
  602. const Mat4& modelView = cmd->getModelView();
  603. for(ssize_t i=0; i < cmd->getVertexCount(); ++i)
  604. {
  605. modelView.transformPoint(&(_verts[i + _filledVertex].vertices));
  606. }
  607. // fill index
  608. const unsigned short* indices = cmd->getIndices();
  609. for(ssize_t i=0; i< cmd->getIndexCount(); ++i)
  610. {
  611. _indices[_filledIndex + i] = _filledVertex + indices[i];
  612. }
  613. _filledVertex += cmd->getVertexCount();
  614. _filledIndex += cmd->getIndexCount();
  615. }
  616. void Renderer::drawBatchedTriangles()
  617. {
  618. if(_queuedTriangleCommands.empty())
  619. return;
  620. CCGL_DEBUG_INSERT_EVENT_MARKER("RENDERER_BATCH_TRIANGLES");
  621. _filledVertex = 0;
  622. _filledIndex = 0;
  623. /************** 1: Setup up vertices/indices *************/
  624. _triBatchesToDraw[0].offset = 0;
  625. _triBatchesToDraw[0].indicesToDraw = 0;
  626. _triBatchesToDraw[0].cmd = nullptr;
  627. int batchesTotal = 0;
  628. int prevMaterialID = -1;
  629. bool firstCommand = true;
  630. for(const auto& cmd : _queuedTriangleCommands)
  631. {
  632. auto currentMaterialID = cmd->getMaterialID();
  633. const bool batchable = !cmd->isSkipBatching();
  634. fillVerticesAndIndices(cmd);
  635. // in the same batch ?
  636. if (batchable && (prevMaterialID == currentMaterialID || firstCommand))
  637. {
  638. CC_ASSERT(firstCommand || _triBatchesToDraw[batchesTotal].cmd->getMaterialID() == cmd->getMaterialID() && "argh... error in logic");
  639. _triBatchesToDraw[batchesTotal].indicesToDraw += cmd->getIndexCount();
  640. _triBatchesToDraw[batchesTotal].cmd = cmd;
  641. }
  642. else
  643. {
  644. // is this the first one?
  645. if (!firstCommand) {
  646. batchesTotal++;
  647. _triBatchesToDraw[batchesTotal].offset = _triBatchesToDraw[batchesTotal-1].offset + _triBatchesToDraw[batchesTotal-1].indicesToDraw;
  648. }
  649. _triBatchesToDraw[batchesTotal].cmd = cmd;
  650. _triBatchesToDraw[batchesTotal].indicesToDraw = (int) cmd->getIndexCount();
  651. // is this a single batch ? Prevent creating a batch group then
  652. if (!batchable)
  653. currentMaterialID = -1;
  654. }
  655. // capacity full ?
  656. if (batchesTotal + 1 >= _triBatchesToDrawCapacity) {
  657. _triBatchesToDrawCapacity *= 1.4;
  658. _triBatchesToDraw = (TriBatchToDraw*) realloc(_triBatchesToDraw, sizeof(_triBatchesToDraw[0]) * _triBatchesToDrawCapacity);
  659. }
  660. prevMaterialID = currentMaterialID;
  661. firstCommand = false;
  662. }
  663. batchesTotal++;
  664. /************** 2: Copy vertices/indices to GL objects *************/
  665. auto conf = Configuration::getInstance();
  666. if (conf->supportsShareableVAO() && conf->supportsMapBuffer())
  667. {
  668. //Bind VAO
  669. GL::bindVAO(_buffersVAO);
  670. //Set VBO data
  671. glBindBuffer(GL_ARRAY_BUFFER, _buffersVBO[0]);
  672. // option 1: subdata
  673. // glBufferSubData(GL_ARRAY_BUFFER, sizeof(_quads[0])*start, sizeof(_quads[0]) * n , &_quads[start] );
  674. // option 2: data
  675. // glBufferData(GL_ARRAY_BUFFER, sizeof(_verts[0]) * _filledVertex, _verts, GL_STATIC_DRAW);
  676. // option 3: orphaning + glMapBuffer
  677. // FIXME: in order to work as fast as possible, it must "and the exact same size and usage hints it had before."
  678. // source: https://www.opengl.org/wiki/Buffer_Object_Streaming#Explicit_multiple_buffering
  679. // so most probably we won't have any benefit of using it
  680. glBufferData(GL_ARRAY_BUFFER, sizeof(_verts[0]) * _filledVertex, nullptr, GL_STATIC_DRAW);
  681. void *buf = glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
  682. memcpy(buf, _verts, sizeof(_verts[0]) * _filledVertex);
  683. glUnmapBuffer(GL_ARRAY_BUFFER);
  684. glBindBuffer(GL_ARRAY_BUFFER, 0);
  685. glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _buffersVBO[1]);
  686. glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(_indices[0]) * _filledIndex, _indices, GL_STATIC_DRAW);
  687. }
  688. else
  689. {
  690. // Client Side Arrays
  691. #define kQuadSize sizeof(_verts[0])
  692. glBindBuffer(GL_ARRAY_BUFFER, _buffersVBO[0]);
  693. glBufferData(GL_ARRAY_BUFFER, sizeof(_verts[0]) * _filledVertex , _verts, GL_DYNAMIC_DRAW);
  694. GL::enableVertexAttribs(GL::VERTEX_ATTRIB_FLAG_POS_COLOR_TEX);
  695. // vertices
  696. glVertexAttribPointer(GLProgram::VERTEX_ATTRIB_POSITION, 3, GL_FLOAT, GL_FALSE, kQuadSize, (GLvoid*) offsetof(V3F_C4B_T2F, vertices));
  697. // colors
  698. glVertexAttribPointer(GLProgram::VERTEX_ATTRIB_COLOR, 4, GL_UNSIGNED_BYTE, GL_TRUE, kQuadSize, (GLvoid*) offsetof(V3F_C4B_T2F, colors));
  699. // tex coords
  700. glVertexAttribPointer(GLProgram::VERTEX_ATTRIB_TEX_COORD, 2, GL_FLOAT, GL_FALSE, kQuadSize, (GLvoid*) offsetof(V3F_C4B_T2F, texCoords));
  701. glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _buffersVBO[1]);
  702. glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(_indices[0]) * _filledIndex, _indices, GL_STATIC_DRAW);
  703. }
  704. /************** 3: Draw *************/
  705. for (int i=0; i<batchesTotal; ++i)
  706. {
  707. CC_ASSERT(_triBatchesToDraw[i].cmd && "Invalid batch");
  708. _triBatchesToDraw[i].cmd->useMaterial();
  709. glDrawElements(GL_TRIANGLES, (GLsizei) _triBatchesToDraw[i].indicesToDraw, GL_UNSIGNED_SHORT, (GLvoid*) (_triBatchesToDraw[i].offset*sizeof(_indices[0])) );
  710. _drawnBatches++;
  711. _drawnVertices += _triBatchesToDraw[i].indicesToDraw;
  712. }
  713. /************** 4: Cleanup *************/
  714. if (Configuration::getInstance()->supportsShareableVAO() && conf->supportsMapBuffer())
  715. {
  716. //Unbind VAO
  717. GL::bindVAO(0);
  718. }
  719. else
  720. {
  721. glBindBuffer(GL_ARRAY_BUFFER, 0);
  722. glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
  723. }
  724. _queuedTriangleCommands.clear();
  725. _filledVertex = 0;
  726. _filledIndex = 0;
  727. }
  728. void Renderer::flush()
  729. {
  730. flush2D();
  731. flush3D();
  732. }
  733. void Renderer::flush2D()
  734. {
  735. flushTriangles();
  736. }
  737. void Renderer::flush3D()
  738. {
  739. if (_lastBatchedMeshCommand)
  740. {
  741. CCGL_DEBUG_INSERT_EVENT_MARKER("RENDERER_BATCH_MESH");
  742. _lastBatchedMeshCommand->postBatchDraw();
  743. _lastBatchedMeshCommand = nullptr;
  744. }
  745. }
  746. void Renderer::flushTriangles()
  747. {
  748. drawBatchedTriangles();
  749. }
  750. // helpers
  751. bool Renderer::checkVisibility(const Mat4 &transform, const Size &size)
  752. {
  753. auto scene = Director::getInstance()->getRunningScene();
  754. //If draw to Rendertexture, return true directly.
  755. // only cull the default camera. The culling algorithm is valid for default camera.
  756. if (!scene || (scene && scene->_defaultCamera != Camera::getVisitingCamera()))
  757. return true;
  758. auto director = Director::getInstance();
  759. Rect visibleRect(director->getVisibleOrigin(), director->getVisibleSize());
  760. // transform center point to screen space
  761. float hSizeX = size.width/2;
  762. float hSizeY = size.height/2;
  763. Vec3 v3p(hSizeX, hSizeY, 0);
  764. transform.transformPoint(&v3p);
  765. Vec2 v2p = Camera::getVisitingCamera()->projectGL(v3p);
  766. // convert content size to world coordinates
  767. float wshw = std::max(fabsf(hSizeX * transform.m[0] + hSizeY * transform.m[4]), fabsf(hSizeX * transform.m[0] - hSizeY * transform.m[4]));
  768. float wshh = std::max(fabsf(hSizeX * transform.m[1] + hSizeY * transform.m[5]), fabsf(hSizeX * transform.m[1] - hSizeY * transform.m[5]));
  769. // enlarge visible rect half size in screen coord
  770. visibleRect.origin.x -= wshw;
  771. visibleRect.origin.y -= wshh;
  772. visibleRect.size.width += wshw * 2;
  773. visibleRect.size.height += wshh * 2;
  774. bool ret = visibleRect.containsPoint(v2p);
  775. return ret;
  776. }
  777. void Renderer::setClearColor(const Color4F &clearColor)
  778. {
  779. _clearColor = clearColor;
  780. }
  781. NS_CC_END