diff --git a/.travis.yml b/.travis.yml index 80b6f4b78..b179f30c7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,4 +1,3 @@ language: java jdk: - - openjdk7 - - oraclejdk7 + - openjdk8 diff --git a/LICENSE b/LICENSE index e06d20818..23cb79033 100644 --- a/LICENSE +++ b/LICENSE @@ -1,202 +1,339 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + {description} + Copyright (C) {year} {fullname} + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + {signature of Ty Coon}, 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/README.md b/README.md index 6e9ed7b62..e4fe272f2 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,6 @@ Mycat’s target is to smoothly migrate the current stand-alone database and app * [Getting Started](https://github.com/MyCATApache/Mycat-doc/tree/master/en) * [尝试 MyCAT](https://github.com/MyCATApache/Mycat-doc/blob/master/MyCat_In_Action_%E4%B8%AD%E6%96%87%E7%89%88.doc) - ## Features @@ -48,45 +47,379 @@ There are some compiled binary installation packages in Mycat-download project o There are some documents in Mycat-doc project on github at [Mycat-doc](https://github.com/MyCATApache/Mycat-doc). -## Play with Mycat -- Mycat All In One -All in one is the integrated developing & testing environment ,a centos 7 virtual machine with Mycat-server,Mycat-WEB,MYSQL,ZooKeeper installed . - -You can execute the following steps to get mycat working platform: -import CentOs7.voa ->* install Oracle VM VirtualBox ->* run Oracle VM VirtualBox ->* download mycat-all-in-one ,[press here to get all-in-one](http://pan.baidu.com/s/1qWMkJPM),password:v63y ->* File -> Import Appliances ->* choose the path you download CentOS 7.ova, choose the ova file ->* you can get everything done,just press next -> -setup virtual box ->* login user name : root, password: 123456 ->* run the multiple mysql instances by the commands shown as follow: -``` -mysqld_multi start -``` -setup Mycat -``` - cd /opt/mycat - ./bin/mycat start -``` -run zookeeper ->* cd /opt/zookeeper-3.4.6 ->* bin/zkServer.sh start ->* bin/zkCli.sh - -connect to Mycat --> this step should be done on you host computer ->* setup Navicat Premium ->* create a connection to Mycat with IP:8066 , username : test , password: test ->* if connection correct , you would see the database's name is TESTDB ->* then you could try everything you like ,just have fun ! - -pay attention to Mycat's IP in 'connect to Mycat' step ,
the IP address may different from your own host IP ,
-you need to justify the IP by rewrite the file under /etc/sysconfig/network-scripts/ifcfg-enp0s3 ,
then run - -``` -service network restart -```` -to let the modified file work -
\ No newline at end of file + +Mycat 简单demo,具体参考Mycat权威指南 + +官网 : mycat.io +qq官方群:106088787 +Mycat权威指南官方下载:http://songwie.com/attached/file/mycat_1.5.2.pdf +wiki: wiki + +# Mycat前世今生 + +2013年阿里的Cobar在社区使用过程中发现存在一些比较严重的问题,及其使用限制,经过Mycat发起人第一次改良,第一代改良版——Mycat诞生。 Mycat开源以后,一些Cobar的用户参与了Mycat的开发,最终Mycat发展成为一个由众多软件公司的实力派架构师和资深开发人员维护的社区型开源软件。 + +2014年Mycat首次在上海的《中华架构师》大会上对外宣讲,更多的人参与进来,随后越来越多的项目采用了Mycat。 + +2015年5月,由核心参与者们一起编写的第一本官方权威指南《Mycat权威指南》电子版发布,累计超过500本,成为开源项目中的首创。 + +2015年10月为止,Mycat项目总共有16个Committer。 + +截至2015年11月,超过300个项目采用Mycat,涵盖银行、电信、电子商务、物流、移动应用、O2O的众多领域和公司。 + +截至2015年12月,超过4000名用户加群或研究讨论或测试或使用Mycat。 + +Mycat是基于开源cobar演变而来,我们对cobar的代码进行了彻底的重构,使用NIO重构了网络模块,并且优化了Buffer内核,增强了聚合,Join等基本特性,同时兼容绝大多数数据库成为通用的数据库中间件。1.4 版本以后 完全的脱离基本cobar内核,结合Mycat集群管理、自动扩容、智能优化,成为高性能的中间件。我们致力于开发高性能数据库中间而努力。永不收费,永不闭源,持续推动开源社区的发展。 + +Mycat吸引和聚集了一大批业内大数据和云计算方面的资深工程师,Mycat的发展壮大基于开源社区志愿者的持续努力,感谢社区志愿者的努力让Mycat更加强大,同时我们也欢迎社区更多的志愿者,特别是公司能够参与进来,参与Mycat的开发,一起推动社区的发展,为社区提供更好的开源中间件。 + +Mycat还不够强大,Mycat还有很多不足,欢迎社区志愿者的持续优化改进。 + +# 关键特性 +支持SQL92标准 + +遵守Mysql原生协议,跨语言,跨平台,跨数据库的通用中间件代理。 + +基于心跳的自动故障切换,支持读写分离,支持MySQL主从,以及galera cluster集群。 + +支持Galera for MySQL集群,Percona Cluster或者MariaDB cluster + +基于Nio实现,有效管理线程,高并发问题。 + +支持数据的多片自动路由与聚合,支持sum,count,max等常用的聚合函数。 + +支持单库内部任意join,支持跨库2表join,甚至基于caltlet的多表join。 + +支持通过全局表,ER关系的分片策略,实现了高效的多表join查询。 + +支持多租户方案。 + +支持分布式事务(弱xa)。 + +支持全局序列号,解决分布式下的主键生成问题。 + +分片规则丰富,插件化开发,易于扩展。 + +强大的web,命令行监控。 + +支持前端作为mysq通用代理,后端JDBC方式支持Oracle、DB2、SQL Server 、 mongodb 、巨杉。 + +支持密码加密 + +支持服务降级 + +支持IP白名单 + +支持SQL黑名单、sql注入攻击拦截 + +支持分表(1.6) + +集群基于ZooKeeper管理,在线升级,扩容,智能优化,大数据处理(2.0开发版)。 + + +# Mycat安装与使用 + +## 下载: +[https://github.com/MyCATApache/Mycat-download](https://github.com/MyCATApache/Mycat-download) +具体下载哪个版本以发布为准,推荐1.4,1.5. + +## 安装: +下载的文件直接解压即可。 + +## 运行: +### linux: + ./mycat start 启动 + + ./mycat stop 停止 + + ./mycat console 前台运行 + + ./mycat install 添加到系统自动启动(暂未实现) + + ./mycat remove 取消随系统自动启动(暂未实现) + + ./mycat restart 重启服务 + + ./mycat pause 暂停 + + ./mycat status 查看启动状态 + +### win: +直接运行startup_nowrap.bat,如果出现闪退,在cmd 命令行运行,查看出错原因。 + +## 内存配置: +启动前,一般需要修改JVM配置参数,打开conf/wrapper.conf文件,如下行的内容为2G和2048,可根据本机配置情况修改为512M或其它值。 +以下配置跟jvm参数完全一致,可以根据自己的jvm参数调整。 + +Java Additional Parameters + +wrapper.java.additional.1= + +wrapper.java.additional.1=-DMYCAT_HOME=. + +wrapper.java.additional.2=-server + +wrapper.java.additional.3=-XX:MaxPermSize=64M + +wrapper.java.additional.4=-XX:+AggressiveOpts + +wrapper.java.additional.5=-XX:MaxDirectMemorySize=100m + +wrapper.java.additional.6=-Dcom.sun.management.jmxremote + +wrapper.java.additional.7=-Dcom.sun.management.jmxremote.port=1984 + +wrapper.java.additional.8=-Dcom.sun.management.jmxremote.authenticate=false + +wrapper.java.additional.9=-Dcom.sun.management.jmxremote.ssl=false + +wrapper.java.additional.10=-Xmx100m + +wrapper.java.additional.11=-Xms100m + +wrapper.java.additional.12=-XX:+UseParNewGC + +wrapper.java.additional.13=-XX:+UseConcMarkSweepGC + +wrapper.java.additional.14=-XX:+UseCMSCompactAtFullCollection + +wrapper.java.additional.15=-XX:CMSFullGCsBeforeCompaction=0 + +wrapper.java.additional.16=-XX:CMSInitiatingOccupancyFraction=70 + + +以下配置作废: + +wrapper.java.initmemory=3 + +wrapper.java.maxmemory=64 + +### Mycat连接测试: +测试mycat与测试mysql完全一致,mysql怎么连接,mycat就怎么连接。 + +推荐先采用命令行测试: + +mysql -uroot -proot -P8066 -h127.0.0.1 + +如果采用工具连接,1.4,1.3目前部分工具无法连接,会提示database not selected,建议采用高版本,navicat测试。1.5已经修复了部分工具连接。 + + +# Mycat配置入门 + +## 配置: +--bin 启动目录 + +--conf 配置文件存放配置文件: + + --server.xml:是Mycat服务器参数调整和用户授权的配置文件。 + + --schema.xml:是逻辑库定义和表以及分片定义的配置文件。 + + --rule.xml: 是分片规则的配置文件,分片规则的具体一些参数信息单独存放为文件,也在这个目录下,配置文件修改需要重启MyCAT。 + + --log4j.xml: 日志存放在logs/log中,每天一个文件,日志的配置是在conf/log4j.xml中,根据自己的需要可以调整输出级别为debug debug级别下,会输出更多的信息,方便排查问题。 + + --autopartition-long.txt,partition-hash-int.txt,sequence_conf.properties, sequence_db_conf.properties 分片相关的id分片规则配置文件 + + --lib MyCAT自身的jar包或依赖的jar包的存放目录。 + + --logs MyCAT日志的存放目录。日志存放在logs/log中,每天一个文件 + +下面图片描述了Mycat最重要的3大配置文件: +

+ +

+ +## 逻辑库配置: +### 配置server.xml +添加两个mycat逻辑库:user,pay: +system 参数是所有的mycat参数配置,比如添加解析器:defaultSqlParser,其他类推 +user 是用户参数。 + + + + druidparser + + + + + + mycat + + user,pay + + + +### 编辑schema.xml +修改dataHost和schema对应的连接信息,user,pay 垂直切分后的配置如下所示: + +schema 是实际逻辑库的配置,user,pay分别对应两个逻辑库,多个schema代表多个逻辑库。 + +dataNode是逻辑库对应的分片,如果配置多个分片只需要多个dataNode即可。 + +dataHost是实际的物理库配置地址,可以配置多主主从等其他配置,多个dataHost代表分片对应的物理库地址,下面的writeHost、readHost代表该分片是否配置多写,主从,读写分离等高级特性。 + +以下例子配置了两个writeHost为主从。 + + + + + + + + + + + + select 1 + + + + + + + +# Mycat逻辑库、系统参数配置 + +## 配置Mycat环境参数 + + + + + druidparser + + + +如例子中配置的所有的Mycat参数变量都是配置在server.xml 文件中,system标签下配置所有的参数,如果需要配置某个变量添加相应的配置即可,例如添加启动端口8066,默认为8066: + + 8066 + +其他所有变量类似。 + +## 配置Mycat逻辑库与用户 + + + + + + mycat + TESTDB + + + + +如例子中配置的所有的Mycat连接的用户与逻辑库映射都是配置在server.xml 文件中,user标签下配置所有的参数,例如例子中配置了一个mycat用户供应用连接到mycat,同时mycat 在schema.xml中配置后了一个逻辑库TESTDB,配置好逻辑库与用户的映射关系。 + + +# 逻辑库、表分片配置 + +## 配置逻辑库(schema) + +Mycat作为一个中间件,实现mysql协议那么对前端应用连接来说就是一个数据库,也就有数据库的配置,mycat的数据库配置是在schema.xml中配置,配置好后映射到server.xml里面的用户就可以了。 + + + + + + +
+
+ + + + + + show status like 'wsrep%' + + + + + +上面例子配置了一个逻辑库TESTDB,同时配置了t_user,ht_jy_login_log两个分片表。 + +### 逻辑表配置 +
+ +table 标签 是逻辑表的配置 其中 + +name代表表名, + +dataNode代表表对应的分片, + +Mycat默认采用分库方式,也就是一个表映射到不同的库上, + +rule代表表要采用的数据切分方式,名称对应到rule.xml中的对应配置,如果要分片必须配置。 + + +## 配置分片(dataNode) + + + + +表切分后需要配置映射到哪几个数据库中,Mycat的分片实际上就是库的别名,例如上面例子配置了两个分片dn1,dn2 分别对应到物理机映射dataHost +localhost1 的两个库上。 + +## 配置物理库分片映射(dataHost) + + + show status like 'wsrep%' + + + + +Mycat作为数据库代理需要逻辑库,逻辑用户,表切分后需要配置分片,分片也就需要映射到真实的物理主机上,至于是映射到一台还是一台的多个实例上,Mycat并不关心,只需要配置好映射即可,例如例子中: + +配置了一个名为localhost1的物理主机(dataHost)映射。 + +heartbeat 标签代表Mycat需要对物理库心跳检测的语句,正常情况下生产案例可能配置主从,或者多写 或者单库,无论哪种情况Mycat都需要维持到数据库的数据源连接,因此需要定时检查后端连接可以性,心跳语句就是来作为心跳检测。 + +writeHost 此标签代表 一个逻辑主机(dataHost)对应的后端的物理主机映射,例如例子中写库hostM1 映射到127.0.0.1:3306。如果后端需要做读写分离或者多写 或者主从则通过配置 多个writeHost 或者readHost即可。 + +dataHost 标签中的 writeType balance 等标签则是不同的策略,具体参考指南。 + +# Mycat 表切分规则配置 + +## 表切分规则 + + + + + + + + createTime + sharding-by-hour + + + + + 24 + + + + +数据切分中作为表切分规则中最重要的配置,表的切分方式决定了数据切分后的性能好坏,因此也是最重要的配置。 + +如上面例子配置了一个切分规则,名为sharding-by-hour 对应的切分方式(function )是按日期切分,该配置中: + +### tableRule + +name 为schema.xml 中table 标签中对应的 rule="sharding-by-hour" ,也就是配置表的分片规则, + +columns 是表的切分字段: createTime 创建日期。 + +algorithm 是规则对应的切分规则:映射到function 的name。 + + +### function + +function 配置是分片规则的配置。 + +name 为切分规则的名称,名字人员取,但是需要与tableRule 中匹配。 + +class 是切分规则对应的切分类,写死,需要哪种规则则配置哪种,例如本例子是按小时分片:org.opencloudb.route.function.LatestMonthPartion + +property 标签是切分规则对应的不同属性,不同的切分规则配置不同。 + + diff --git a/README_Chinese.md b/README_Chinese.md index 5ff2f24e4..8ff63cf1a 100644 --- a/README_Chinese.md +++ b/README_Chinese.md @@ -3,7 +3,7 @@ ### 官网:[http://www.mycat.org.cn](http://www.mycat.org.cn) ### github:[https://github.com/MyCATApache](https://github.com/MyCATApache) -##### 入门: [zh-CN: https://github.com/MyCATApache/Mycat-doc/blob/master/MyCat_In_Action_%E4%B8%AD%E6%96%87%E7%89%88.doc] [English:https://github.com/MyCATApache/Mycat-doc/tree/master/en] +##### 入门: [zh-CN: https://github.com/MyCATApache/Mycat-doc/blob/master/history/MyCat_In_Action_%E4%B8%AD%E6%96%87%E7%89%88.doc] [English:https://github.com/MyCATApache/Mycat-doc/tree/master/en] 什么是Mycat?简单的说,Mycat就是: @@ -57,55 +57,3 @@ github上面的Mycat-download项目是编译好的二进制安装包 [https://gi ##### 文档: github上面的Mycat-doc项目是相关文档 [https://github.com/MyCATApache/Mycat-doc](https://github.com/MyCATApache/Mycat-doc) - -##### 尝试 Mycat -- Mycat All in One -在这里我们为您提供了集 mycat-server,mycat-web,mysql,zookeeper 于一身的测试开发环境,是您开发测试必备良器, -您只需要执行如下几个步骤便可开启属于您的 mycat 之旅 : - -> 导入 OVA ->* 安装Oracle VM VirtualBox ->* 启动Oracle VM VirtualBox ->* 下载 mycat-all-in-one 镜像文件,[戳这里下载all-in-one镜像](http://pan.baidu.com/s/1qWMkJPM),密码:v63y ->* File(管理) -> Import Appliances(导入虚拟电脑)<网络模式首选桥接模式> ->* 选择CentOS 7.ova ->* 一路Next - -> 启动虚拟机 ->* 登录虚拟机 root/123456 ->* 启动多实例Mysql - - ``` - mysqld_multi start - ``` - -> 启动 Mycat - ``` - cd /opt/mycat/ - ./bin/mycat start - ``` - > ZK启动 - -``` - cd /opt/zookeeper-3.4.6 - bin/zkServer.sh start - bin/zkCli.sh -``` -> 体验 Mycat - >* 启动Navicat Premium - >* 连接Mycat,IP:8066 test/test - >* 连接TESTDB - >* 测试 - -``` - select * from t_user; -``` -请留意 '体验 Mycat'该步骤中的 IP 地址的设定,虚拟机中 IP 地址若与主机地址不匹配会引发连接失败的情况, -此时可以将 虚拟机IP 地址修改静态IP地址来解决,修改位于路径 -```` -/etc/sysconfig/network-scripts/ifcfg-enp0s3 -```` -下面的文件,然后运行命令 -```` -service network restart -```` -来让刚刚修改过的文件生效即可 \ No newline at end of file diff --git a/pom.xml b/pom.xml index 743fc1773..f8190fe96 100644 --- a/pom.xml +++ b/pom.xml @@ -2,9 +2,9 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0%20http://maven.apache.org/maven-v4_0_0.xsd"> 4.0.0 - io.mycat.mycat + io.mycat Mycat-server - 2.0-dev + 1.6.5-release jar Mycat-server The project of Mycat-server @@ -13,7 +13,7 @@ UTF-8 + ${maven.build.timestamp} yyyy-MM-dd HH:mm:ss version.txt.template version.txt @@ -43,26 +43,8 @@ - - - org.codehaus.jsr166-mirror - jsr166y - 1.7.0 - test - - - junit - junit - 4.4 - test - - - - - mysql - mysql-connector-java - 5.1.35 - + org.mongodb mongo-java-driver @@ -78,103 +60,177 @@ leveldb-api 0.7 - - com.sequoiadb - sequoiadb-driver - 1.12 - - - com.google.guava guava - 18.0 + 19.0 - com.google.code.findbugs - jsr305 - 3.0.0 + com.alibaba + druid + 1.0.26 - commons-beanutils - commons-beanutils - 1.9.2 + mysql + mysql-connector-java + 5.1.35 - - - com.univocity - univocity-parsers - 1.5.4 - jar + net.sf.ehcache + ehcache-core + 2.6.11 + compile - com.alibaba - druid - 1.0.14 + org.mapdb + mapdb + 1.0.7 - org.yaml - snakeyaml - 1.16 + junit + junit + 4.4 + provided + + org.apache.velocity + velocity + 1.7 + - com.alibaba - fastjson - 1.2.7 + org.codehaus.jsr166-mirror + jsr166y + 1.7.0 + test + + com.lmax + disruptor + 3.3.4 + + + org.apache.logging.log4j + log4j-slf4j-impl + 2.5 + + + org.apache.logging.log4j + log4j-core + 2.5 + + + org.apache.logging.log4j + log4j-1.2-api + 2.5 + - org.apache.velocity - velocity - 1.7 + com.univocity + univocity-parsers + 2.2.1 + jar - + - org.mapdb - mapdb - 1.0.7 + com.sequoiadb + sequoiadb-driver + 1.12 + + + - net.sf.ehcache - ehcache-core - 2.6.11 - compile + dom4j + dom4j + 1.6.1 + + + xml-apis + xml-apis + + org.apache.curator curator-framework - 2.9.0 - + 2.11.0 - - - org.slf4j - slf4j-api - 1.7.12 - compile + org.apache.curator + curator-recipes + 2.11.0 - org.apache.logging.log4j - log4j-slf4j-impl - 2.3 + org.apache.curator + curator-test + 2.11.0 + test + + + log4j + log4j + + - org.apache.logging.log4j - log4j-core - 2.3 + com.alibaba + fastjson + 1.2.12 - - + joda-time joda-time - 2.8.2 + 2.9.3 + + com.github.shyiko + mysql-binlog-connector-java + 0.6.0 + + + + + org.mockito + mockito-all + 1.8.5 + test + + + + com.google.code.findbugs + jsr305 + 2.0.3 + + + + com.esotericsoftware.kryo + kryo + 2.10 + + + + org.hamcrest + hamcrest-library + 1.3 + + + + + + commons-lang + commons-lang + 2.6 + + + + io.netty + netty-buffer + 4.1.9.Final + @@ -287,12 +343,12 @@ replace - ${project.basedir}/src/main/java/io/mycat/server/Versions.template - ${project.basedir}/src/main/java/io/mycat/server/Versions.java + ${project.basedir}/src/main/java/io/mycat/config/Versions.template + ${project.basedir}/src/main/java/io/mycat/config/Versions.java @server-version@ - 5.5.8-mycat-${project.version}-${timestamp} + 5.6.29-mycat-${project.version}-${timestamp} @@ -305,11 +361,12 @@ org.apache.maven.plugins maven-compiler-plugin - 1.8 - 1.8 + 1.7 + 1.7 ${app.encoding} + org.apache.maven.plugins maven-source-plugin @@ -364,6 +421,7 @@ + org.codehaus.mojo @@ -391,15 +449,15 @@ MYCAT_HOME=. - -server + -server -XX:MaxPermSize=64M -XX:+AggressiveOpts -XX:MaxDirectMemorySize=2G - -Dcom.sun.management.jmxremote + -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=1984 - -Dcom.sun.management.jmxremote.authenticate=false - -Dcom.sun.management.jmxremote.ssl=false + -Dcom.sun.management.jmxremote.authenticate=false + -Dcom.sun.management.jmxremote.ssl=false -Xmx4G -Xms1G @@ -430,14 +488,22 @@ configuration.directory.in.classpath.first conf - - wrapper.ping.timeout - 120 - + + wrapper.ping.timeout + 120 + set.default.REPO_DIR lib + + wrapper.logfile.maxsize + 512m + + + wrapper.logfile.maxfiles + 30 + wrapper.logfile logs/wrapper.log diff --git a/src/main/assembly/assembly-linux.xml b/src/main/assembly/assembly-linux.xml index 1737c8517..618ed4057 100644 --- a/src/main/assembly/assembly-linux.xml +++ b/src/main/assembly/assembly-linux.xml @@ -37,7 +37,7 @@ mycat/conf *.dtd - log4j.* + log4j* diff --git a/src/main/assembly/assembly-mac.xml b/src/main/assembly/assembly-mac.xml index 3b410409e..8c06476ba 100644 --- a/src/main/assembly/assembly-mac.xml +++ b/src/main/assembly/assembly-mac.xml @@ -37,7 +37,7 @@ mycat/conf *.dtd - log4j.* + log4j* diff --git a/src/main/assembly/assembly-solaris.xml b/src/main/assembly/assembly-solaris.xml index d56385814..2fc2b33e1 100644 --- a/src/main/assembly/assembly-solaris.xml +++ b/src/main/assembly/assembly-solaris.xml @@ -37,7 +37,7 @@ mycat/conf *.dtd - log4j.* + log4j* diff --git a/src/main/assembly/assembly-unix.xml b/src/main/assembly/assembly-unix.xml index 4e78dae7b..5b3e0d425 100644 --- a/src/main/assembly/assembly-unix.xml +++ b/src/main/assembly/assembly-unix.xml @@ -38,7 +38,7 @@ mycat/conf *.dtd - log4j.* + log4j* diff --git a/src/main/assembly/assembly-win.xml b/src/main/assembly/assembly-win.xml index 53f316ddc..9d10d0e45 100644 --- a/src/main/assembly/assembly-win.xml +++ b/src/main/assembly/assembly-win.xml @@ -37,7 +37,7 @@ mycat/conf *.dtd - log4j.* + log4j* diff --git a/src/main/assembly/bin/create_zookeeper_data.bat b/src/main/assembly/bin/create_zookeeper_data.bat deleted file mode 100644 index fd16c7d1f..000000000 --- a/src/main/assembly/bin/create_zookeeper_data.bat +++ /dev/null @@ -1,17 +0,0 @@ - -REM check JAVA_HOME & java -set "JAVA_CMD="%JAVA_HOME%/bin/java"" -if "%JAVA_HOME%" == "" goto noJavaHome -if exist "%JAVA_HOME%\bin\java.exe" goto mainEntry -:noJavaHome -echo --------------------------------------------------- -echo WARN: JAVA_HOME environment variable is not set. -echo --------------------------------------------------- -set "JAVA_CMD=java" -:mainEntry -REM set HOME_DIR -set "CURR_DIR=%cd%" -cd .. -set "MYCAT_HOME=%cd%" -cd %CURR_DIR% -"%JAVA_CMD%" -Xms256M -Xmx1G -XX:MaxPermSize=64M -DMYCAT_HOME=%MYCAT_HOME% -cp "..\conf;..\lib\*" demo.ZkCreate \ No newline at end of file diff --git a/src/main/assembly/bin/create_zookeeper_data.sh b/src/main/assembly/bin/create_zookeeper_data.sh deleted file mode 100755 index 2029329ff..000000000 --- a/src/main/assembly/bin/create_zookeeper_data.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -echo "check JAVA_HOME & java" -JAVA_CMD=$JAVA_HOME/bin/java -MAIN_CLASS=demo.ZkCreate -if [ ! -d "$JAVA_HOME" ]; then - echo --------------------------------------------------- - echo WARN: JAVA_HOME environment variable is not set. - echo --------------------------------------------------- - JAVA_CMD=java -fi - -echo "---------set HOME_DIR------------" -CURR_DIR=`pwd` -cd .. -MYCAT_HOME=`pwd` -cd $CURR_DIR -$JAVA_CMD -Xms256M -Xmx1G -XX:MaxPermSize=64M -DMYCAT_HOME=$MYCAT_HOME -cp "$MYCAT_HOME/conf:$MYCAT_HOME/lib/*" $MAIN_CLASS diff --git a/src/main/assembly/conf/log4j.xml b/src/main/assembly/conf/log4j.xml deleted file mode 100644 index 60f98a449..000000000 --- a/src/main/assembly/conf/log4j.xml +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/src/main/assembly/conf/log4j2.xml b/src/main/assembly/conf/log4j2.xml new file mode 100644 index 000000000..dfe0dc212 --- /dev/null +++ b/src/main/assembly/conf/log4j2.xml @@ -0,0 +1,32 @@ + + + + + + + + + + %d{yyyy-MM-dd HH:mm:ss.SSS} %5p [%t] (%l) - %m%n + + + + + + + + + + + + + + + + + + + + + diff --git a/src/main/conf/dnindex.properties b/src/main/conf/dnindex.properties deleted file mode 100644 index da2b00c13..000000000 --- a/src/main/conf/dnindex.properties +++ /dev/null @@ -1,3 +0,0 @@ -#update -#Thu Sep 10 16:14:18 CST 2015 -jdbchost=0 diff --git a/src/main/java/io/mycat/MycatServer.java b/src/main/java/io/mycat/MycatServer.java index 77c61d8b0..484323cc3 100644 --- a/src/main/java/io/mycat/MycatServer.java +++ b/src/main/java/io/mycat/MycatServer.java @@ -2,8 +2,8 @@ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the * terms of the GNU General Public License version 2 only, as published by the * Free Software Foundation. * @@ -16,95 +16,218 @@ * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address + * + * Any questions about this component can be directed to it's project Web address * https://code.google.com/p/opencloudb/. * */ package io.mycat; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.channels.AsynchronousChannelGroup; +import java.util.Collection; +import java.util.Iterator; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; + +import io.mycat.buffer.NettyBufferPool; +import org.apache.curator.framework.CuratorFramework; +import org.apache.curator.framework.recipes.locks.InterProcessMutex; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.io.Files; import com.google.common.util.concurrent.ListeningExecutorService; import com.google.common.util.concurrent.MoreExecutors; -import io.mycat.backend.PhysicalDBPool; +import io.mycat.backend.BackendConnection; +import io.mycat.backend.datasource.PhysicalDBNode; +import io.mycat.backend.datasource.PhysicalDBPool; +import io.mycat.backend.mysql.nio.handler.MultiNodeCoordinator; +import io.mycat.backend.mysql.xa.CoordinatorLogEntry; +import io.mycat.backend.mysql.xa.ParticipantLogEntry; +import io.mycat.backend.mysql.xa.TxState; +import io.mycat.backend.mysql.xa.XARollbackCallback; +import io.mycat.backend.mysql.xa.recovery.Repository; +import io.mycat.backend.mysql.xa.recovery.impl.FileSystemRepository; +import io.mycat.buffer.BufferPool; +import io.mycat.buffer.DirectByteBufferPool; import io.mycat.cache.CacheService; -import io.mycat.net.*; +import io.mycat.config.MycatConfig; +import io.mycat.config.classloader.DynaClassLoader; +import io.mycat.config.loader.zkprocess.comm.ZkConfig; +import io.mycat.config.loader.zkprocess.comm.ZkParamCfg; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.config.model.TableConfig; +import io.mycat.config.table.structure.MySQLTableStructureDetector; +import io.mycat.manager.ManagerConnectionFactory; +import io.mycat.memory.MyCatMemory; +import io.mycat.net.AIOAcceptor; +import io.mycat.net.AIOConnector; +import io.mycat.net.NIOAcceptor; +import io.mycat.net.NIOConnector; +import io.mycat.net.NIOProcessor; +import io.mycat.net.NIOReactorPool; +import io.mycat.net.SocketAcceptor; +import io.mycat.net.SocketConnector; import io.mycat.route.MyCATSequnceProcessor; import io.mycat.route.RouteService; -import io.mycat.server.MySQLFrontConnectionFactory; -import io.mycat.server.MySQLFrontConnectionHandler; -import io.mycat.server.classloader.DynaClassLoader; -import io.mycat.server.config.ConfigException; -import io.mycat.server.config.cluster.ClusterSync; -import io.mycat.server.config.loader.ConfigFactory; -import io.mycat.server.config.node.MycatConfig; -import io.mycat.server.config.node.SystemConfig; +import io.mycat.route.factory.RouteStrategyFactory; +import io.mycat.route.sequence.handler.SequenceHandler; +import io.mycat.server.ServerConnectionFactory; import io.mycat.server.interceptor.SQLInterceptor; import io.mycat.server.interceptor.impl.GlobalTableUtil; +import io.mycat.sqlengine.OneRawSQLQueryResultHandler; +import io.mycat.sqlengine.SQLJob; +import io.mycat.statistic.SQLRecorder; +import io.mycat.statistic.stat.SqlResultSizeRecorder; +import io.mycat.statistic.stat.UserStat; +import io.mycat.statistic.stat.UserStatAnalyzer; +import io.mycat.util.ExecutorUtil; +import io.mycat.util.NameableExecutor; import io.mycat.util.TimeUtil; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.nio.channels.AsynchronousChannelGroup; -import java.util.Map; -import java.util.Timer; -import java.util.TimerTask; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicLong; +import io.mycat.util.ZKUtils; /** * @author mycat */ public class MycatServer { + public static final String NAME = "MyCat"; private static final long LOG_WATCH_DELAY = 60000L; private static final long TIME_UPDATE_PERIOD = 20L; + private static final long DEFAULT_SQL_STAT_RECYCLE_PERIOD = 5 * 1000L; + private static final long DEFAULT_OLD_CONNECTION_CLEAR_PERIOD = 5 * 1000L; + private static final MycatServer INSTANCE = new MycatServer(); private static final Logger LOGGER = LoggerFactory.getLogger("MycatServer"); + private static final Repository fileRepository = new FileSystemRepository(); private final RouteService routerService; private final CacheService cacheService; + private Properties dnIndexProperties; + + //AIO连接群组 private AsynchronousChannelGroup[] asyncChannelGroups; private volatile int channelIndex = 0; - private final MyCATSequnceProcessor sequnceProcessor = new MyCATSequnceProcessor(); + + //全局序列号 +// private final MyCATSequnceProcessor sequnceProcessor = new MyCATSequnceProcessor(); private final DynaClassLoader catletClassLoader; private final SQLInterceptor sqlInterceptor; + private volatile int nextProcessor; + + // System Buffer Pool Instance + private BufferPool bufferPool; + private boolean aio = false; + + //XA事务全局ID生成 private final AtomicLong xaIDInc = new AtomicLong(); + //sequence处理对象 + private SequenceHandler sequenceHandler; + + /** + * Mycat 内存管理类 + */ + private MyCatMemory myCatMemory = null; public static final MycatServer getInstance() { return INSTANCE; } private final MycatConfig config; - private final Timer timer; + private final ScheduledExecutorService scheduler; + private final ScheduledExecutorService heartbeatScheduler; + private final SQLRecorder sqlRecorder; private final AtomicBoolean isOnline; private final long startupTime; - private NamebleScheduledExecutor timerExecutor; + private NIOProcessor[] processors; + private SocketConnector connector; + private NameableExecutor businessExecutor; + private NameableExecutor sequenceExecutor; + private NameableExecutor timerExecutor; private ListeningExecutorService listeningExecutorService; + private InterProcessMutex dnindexLock; + private long totalNetWorkBufferSize = 0; - private ClusterSync clusterSync; - - public MycatServer() { + private final AtomicBoolean startup=new AtomicBoolean(false); + private MycatServer() { + + //读取文件配置 this.config = new MycatConfig(); - this.timer = new Timer(NAME + "Timer", true); + + //定时线程池,单线程线程池 + scheduler = Executors.newSingleThreadScheduledExecutor(); + + //心跳调度独立出来,避免被其他任务影响 + heartbeatScheduler = Executors.newSingleThreadScheduledExecutor(); + + //SQL记录器 + this.sqlRecorder = new SQLRecorder(config.getSystem().getSqlRecordCount()); + + /** + * 是否在线,MyCat manager中有命令控制 + * | offline | Change MyCat status to OFF | + * | online | Change MyCat status to ON | + */ this.isOnline = new AtomicBoolean(true); + + //缓存服务初始化 cacheService = new CacheService(); + + //路由计算初始化 routerService = new RouteService(cacheService); + + // load datanode active index from properties + dnIndexProperties = loadDnIndexProps(); try { + //SQL解析器 sqlInterceptor = (SQLInterceptor) Class.forName( config.getSystem().getSqlInterceptor()).newInstance(); } catch (Exception e) { throw new RuntimeException(e); } + + //catlet加载器 catletClassLoader = new DynaClassLoader(SystemConfig.getHomePath() - + File.separator + "catlet", config.getSystem() - .getCatletClassCheckSeconds()); + + File.separator + "catlet", config.getSystem().getCatletClassCheckSeconds()); + + //记录启动时间 + this.startupTime = TimeUtil.currentTimeMillis(); + if(isUseZkSwitch()) { + String path= ZKUtils.getZKBasePath()+"lock/dnindex.lock"; + dnindexLock = new InterProcessMutex(ZKUtils.getConnection(), path); + } + } - this.startupTime = TimeUtil.currentTimeMillis(); + public AtomicBoolean getStartup() { + return startup; + } + + public long getTotalNetWorkBufferSize() { + return totalNetWorkBufferSize; + } + + public BufferPool getBufferPool() { + return bufferPool; + } + + public NameableExecutor getTimerExecutor() { + return timerExecutor; } public DynaClassLoader getCatletClassLoader() { @@ -112,25 +235,42 @@ public DynaClassLoader getCatletClassLoader() { } public MyCATSequnceProcessor getSequnceProcessor() { - return sequnceProcessor; + return MyCATSequnceProcessor.getInstance(); } public SQLInterceptor getSqlInterceptor() { return sqlInterceptor; } + public ScheduledExecutorService getScheduler() { + return scheduler; + } + public String genXATXID() { long seq = this.xaIDInc.incrementAndGet(); if (seq < 0) { synchronized (xaIDInc) { - if (xaIDInc.get() < 0) { + if ( xaIDInc.get() < 0 ) { xaIDInc.set(0); } seq = xaIDInc.incrementAndGet(); } } - return "'Mycat." + this.getConfig().getSystem().getMycatNodeId() + "." - + seq + "'"; + return "'Mycat." + this.getConfig().getSystem().getMycatNodeId() + "." + seq + "'"; + } + + public String getXATXIDGLOBAL(){ + return "'" + getUUID() + "'"; + } + + public static String getUUID(){ + String s = UUID.randomUUID().toString(); + //去掉“-”符号 + return s.substring(0,8)+s.substring(9,13)+s.substring(14,18)+s.substring(19,23)+s.substring(24); + } + + public MyCatMemory getMyCatMemory() { + return myCatMemory; } /** @@ -158,88 +298,291 @@ public MycatConfig getConfig() { return config; } + public void beforeStart() { + String home = SystemConfig.getHomePath(); + + + //ZkConfig.instance().initZk(); + } + public void startup() throws IOException { SystemConfig system = config.getSystem(); int processorCount = system.getProcessors(); + //init RouteStrategyFactory first + RouteStrategyFactory.init(); + // server startup - LOGGER.info("==============================================="); LOGGER.info(NAME + " is ready to startup ..."); String inf = "Startup processors ...,total processors:" + system.getProcessors() + ",aio thread pool size:" + system.getProcessorExecutor() + " \r\n each process allocated socket buffer pool " - + " bytes ,buffer chunk size:" - + system.getProcessorBufferChunk() - + " buffer pool's capacity(buferPool/bufferChunk) is:" - + system.getProcessorBufferPool() - / system.getProcessorBufferChunk(); + + " bytes ,a page size:" + + system.getBufferPoolPageSize() + + " a page's chunk number(PageSize/ChunkSize) is:" + + (system.getBufferPoolPageSize() + /system.getBufferPoolChunkSize()) + + " buffer page's number is:" + + system.getBufferPoolPageNumber(); LOGGER.info(inf); LOGGER.info("sysconfig params:" + system.toString()); + // startup manager + ManagerConnectionFactory mf = new ManagerConnectionFactory(); + ServerConnectionFactory sf = new ServerConnectionFactory(); + SocketAcceptor manager = null; + SocketAcceptor server = null; + aio = (system.getUsingAIO() == 1); + + // startup processors int threadPoolSize = system.getProcessorExecutor(); - long processBuferPool = system.getProcessorBufferPool(); - int processBufferChunk = system.getProcessorBufferChunk(); + processors = new NIOProcessor[processorCount]; + // a page size + int bufferPoolPageSize = system.getBufferPoolPageSize(); + // total page number + short bufferPoolPageNumber = system.getBufferPoolPageNumber(); + //minimum allocation unit + short bufferPoolChunkSize = system.getBufferPoolChunkSize(); + int socketBufferLocalPercent = system.getProcessorBufferLocalPercent(); + int bufferPoolType = system.getProcessorBufferPoolType(); - // server startup - LOGGER.info("==============================================="); - LOGGER.info(NAME + " is ready to startup ,network config:" + system); - - // message byte buffer pool - BufferPool bufferPool = new BufferPool(processBuferPool, - processBufferChunk, system.getFrontSocketSoRcvbuf(), - socketBufferLocalPercent / processorCount); - // Business Executor ,用来执行那些耗时的任务 - NameableExecutor businessExecutor = ExecutorUtil.create( - "BusinessExecutor", threadPoolSize); - // 定时器Executor,用来执行定时任务 - timerExecutor = ExecutorUtil.createSheduledExecute("Timer", - system.getTimerExecutor()); - listeningExecutorService = MoreExecutors - .listeningDecorator(businessExecutor); - - // create netsystem to store our network related objects - NetSystem netSystem = new NetSystem(bufferPool, businessExecutor, - timerExecutor); - netSystem.setNetConfig(system); - // Reactor pool - NIOReactorPool reactorPool = new NIOReactorPool( - BufferPool.LOCAL_BUF_THREAD_PREX + "NIOREACTOR", processorCount); - NIOConnector connector = new NIOConnector( - BufferPool.LOCAL_BUF_THREAD_PREX + "NIOConnector", reactorPool); - connector.start(); - netSystem.setConnector(connector); - - MySQLFrontConnectionFactory frontFactory = new MySQLFrontConnectionFactory( - new MySQLFrontConnectionHandler()); - NIOAcceptor server = new NIOAcceptor(BufferPool.LOCAL_BUF_THREAD_PREX - + NAME + "Server", system.getBindIp(), system.getServerPort(), - frontFactory, reactorPool); + switch (bufferPoolType){ + case 0: + bufferPool = new DirectByteBufferPool(bufferPoolPageSize,bufferPoolChunkSize, + bufferPoolPageNumber,system.getFrontSocketSoRcvbuf()); + + + totalNetWorkBufferSize = bufferPoolPageSize*bufferPoolPageNumber; + break; + case 1: + /** + * todo 对应权威指南修改: + * + * bytebufferarena由6个bytebufferlist组成,这六个list有减少内存碎片的机制 + * 每个bytebufferlist由多个bytebufferchunk组成,每个list也有减少内存碎片的机制 + * 每个bytebufferchunk由多个page组成,平衡二叉树管理内存使用状态,计算灵活 + * 设置的pagesize对应bytebufferarena里面的每个bytebufferlist的每个bytebufferchunk的buffer长度 + * bufferPoolChunkSize对应每个bytebufferchunk的每个page的长度 + * bufferPoolPageNumber对应每个bytebufferlist有多少个bytebufferchunk + */ + + totalNetWorkBufferSize = 6*bufferPoolPageSize * bufferPoolPageNumber; + break; + case 2: + bufferPool = new NettyBufferPool(bufferPoolChunkSize); + LOGGER.info("Use Netty Buffer Pool"); + + break; + default: + bufferPool = new DirectByteBufferPool(bufferPoolPageSize,bufferPoolChunkSize, + bufferPoolPageNumber,system.getFrontSocketSoRcvbuf());; + totalNetWorkBufferSize = bufferPoolPageSize*bufferPoolPageNumber; + } + + /** + * Off Heap For Merge/Order/Group/Limit 初始化 + */ + if(system.getUseOffHeapForMerge() == 1){ + try { + myCatMemory = new MyCatMemory(system,totalNetWorkBufferSize); + } catch (NoSuchFieldException e) { + LOGGER .error("NoSuchFieldException",e); + } catch (IllegalAccessException e) { + LOGGER.error("Error",e); + } + } + businessExecutor = ExecutorUtil.create("BusinessExecutor", + threadPoolSize); + sequenceExecutor = ExecutorUtil.create("SequenceExecutor", threadPoolSize); + timerExecutor = ExecutorUtil.create("Timer", system.getTimerExecutor()); + listeningExecutorService = MoreExecutors.listeningDecorator(businessExecutor); + + for (int i = 0; i < processors.length; i++) { + processors[i] = new NIOProcessor("Processor" + i, bufferPool, + businessExecutor); + } + + if (aio) { + LOGGER.info("using aio network handler "); + asyncChannelGroups = new AsynchronousChannelGroup[processorCount]; + // startup connector + connector = new AIOConnector(); + for (int i = 0; i < processors.length; i++) { + asyncChannelGroups[i] = AsynchronousChannelGroup.withFixedThreadPool(processorCount, + new ThreadFactory() { + private int inx = 1; + @Override + public Thread newThread(Runnable r) { + Thread th = new Thread(r); + //TODO + th.setName(DirectByteBufferPool.LOCAL_BUF_THREAD_PREX + "AIO" + (inx++)); + LOGGER.info("created new AIO thread "+ th.getName()); + return th; + } + } + ); + } + manager = new AIOAcceptor(NAME + "Manager", system.getBindIp(), + system.getManagerPort(), mf, this.asyncChannelGroups[0]); + + // startup server + + server = new AIOAcceptor(NAME + "Server", system.getBindIp(), + system.getServerPort(), sf, this.asyncChannelGroups[0]); + + } else { + LOGGER.info("using nio network handler "); + + NIOReactorPool reactorPool = new NIOReactorPool( + DirectByteBufferPool.LOCAL_BUF_THREAD_PREX + "NIOREACTOR", + processors.length); + connector = new NIOConnector(DirectByteBufferPool.LOCAL_BUF_THREAD_PREX + "NIOConnector", reactorPool); + ((NIOConnector) connector).start(); + + manager = new NIOAcceptor(DirectByteBufferPool.LOCAL_BUF_THREAD_PREX + NAME + + "Manager", system.getBindIp(), system.getManagerPort(), mf, reactorPool); + + server = new NIOAcceptor(DirectByteBufferPool.LOCAL_BUF_THREAD_PREX + NAME + + "Server", system.getBindIp(), system.getServerPort(), sf, reactorPool); + } + // manager start + manager.start(); + LOGGER.info(manager.getName() + " is started and listening on " + manager.getPort()); server.start(); + // server started - LOGGER.info(server.getName() + " is started and listening on " - + server.getPort()); + LOGGER.info(server.getName() + " is started and listening on " + server.getPort()); + + LOGGER.info("==============================================="); // init datahost - config.initDatasource(); + Map dataHosts = config.getDataHosts(); + LOGGER.info("Initialize dataHost ..."); + for (PhysicalDBPool node : dataHosts.values()) { + String index = dnIndexProperties.getProperty(node.getHostName(),"0"); + if (!"0".equals(index)) { + LOGGER.info("init datahost: " + node.getHostName() + " to use datasource index:" + index); + } + node.init(Integer.parseInt(index)); + node.startHeartbeat(); + } long dataNodeIldeCheckPeriod = system.getDataNodeIdleCheckPeriod(); - timer.schedule(updateTime(), 0L, TIME_UPDATE_PERIOD); - timer.schedule(processorCheck(), 0L, system.getProcessorCheckPeriod()); - timer.schedule(dataNodeConHeartBeatCheck(dataNodeIldeCheckPeriod), 0L, - dataNodeIldeCheckPeriod); - timer.schedule(dataNodeHeartbeat(), 0L, - system.getDataNodeHeartbeatPeriod()); - timer.schedule(glableTableConsistencyCheck(), 0L, - system.getGlableTableCheckPeriod()); - timer.schedule(catletClassClear(), 30000); - + + heartbeatScheduler.scheduleAtFixedRate(updateTime(), 0L, TIME_UPDATE_PERIOD,TimeUnit.MILLISECONDS); + heartbeatScheduler.scheduleAtFixedRate(processorCheck(), 0L, system.getProcessorCheckPeriod(),TimeUnit.MILLISECONDS); + heartbeatScheduler.scheduleAtFixedRate(dataNodeConHeartBeatCheck(dataNodeIldeCheckPeriod), 0L, dataNodeIldeCheckPeriod,TimeUnit.MILLISECONDS); + heartbeatScheduler.scheduleAtFixedRate(dataNodeHeartbeat(), 0L, system.getDataNodeHeartbeatPeriod(),TimeUnit.MILLISECONDS); + heartbeatScheduler.scheduleAtFixedRate(dataSourceOldConsClear(), 0L, DEFAULT_OLD_CONNECTION_CLEAR_PERIOD, TimeUnit.MILLISECONDS); + scheduler.schedule(catletClassClear(), 30000,TimeUnit.MILLISECONDS); + + if(system.getCheckTableConsistency()==1) { + scheduler.scheduleAtFixedRate(tableStructureCheck(), 0L, system.getCheckTableConsistencyPeriod(), TimeUnit.MILLISECONDS); + } + + if(system.getUseSqlStat()==1) { + scheduler.scheduleAtFixedRate(recycleSqlStat(), 0L, DEFAULT_SQL_STAT_RECYCLE_PERIOD, TimeUnit.MILLISECONDS); + } + + if(system.getUseGlobleTableCheck() == 1){ // 全局表一致性检测是否开启 + scheduler.scheduleAtFixedRate(glableTableConsistencyCheck(), 0L, system.getGlableTableCheckPeriod(), TimeUnit.MILLISECONDS); + } + + //定期清理结果集排行榜,控制拒绝策略 + scheduler.scheduleAtFixedRate(resultSetMapClear(),0L, system.getClearBigSqLResultSetMapMs(),TimeUnit.MILLISECONDS); + + +// new Thread(tableStructureCheck()).start(); + + //XA Init recovery Log + LOGGER.info("==============================================="); + LOGGER.info("Perform XA recovery log ..."); + performXARecoveryLog(); + + if(isUseZkSwitch()) { + //首次启动如果发现zk上dnindex为空,则将本地初始化上zk + initZkDnindex(); + } + initRuleData(); + + startup.set(true); + } + + public void initRuleData() { + if(!isUseZk()) return; + InterProcessMutex ruleDataLock =null; + try { + File file = new File(SystemConfig.getHomePath(), "conf" + File.separator + "ruledata"); + String path= ZKUtils.getZKBasePath()+"lock/ruledata.lock"; + ruleDataLock= new InterProcessMutex(ZKUtils.getConnection(), path); + ruleDataLock.acquire(30, TimeUnit.SECONDS); + File[] childFiles= file.listFiles(); + if(childFiles!=null&&childFiles.length>0) { + String basePath = ZKUtils.getZKBasePath() + "ruledata/"; + for (File childFile : childFiles) { + CuratorFramework zk = ZKUtils.getConnection(); + if (zk.checkExists().forPath(basePath + childFile.getName()) == null) { + zk.create().creatingParentsIfNeeded().forPath(basePath + childFile.getName(), Files.toByteArray(childFile)); + } + } + } + + } catch (Exception e) { + throw new RuntimeException(e); + } finally { + try { + if(ruleDataLock!=null) + ruleDataLock.release(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } } - private TimerTask catletClassClear() { - return new TimerTask() { + private void initZkDnindex() { + try { + File file = new File(SystemConfig.getHomePath(), "conf" + File.separator + "dnindex.properties"); + dnindexLock.acquire(30, TimeUnit.SECONDS); + String path = ZKUtils.getZKBasePath() + "bindata/dnindex.properties"; + CuratorFramework zk = ZKUtils.getConnection(); + if (zk.checkExists().forPath(path) == null) { + zk.create().creatingParentsIfNeeded().forPath(path, Files.toByteArray(file)); + } + + } catch (Exception e) { + throw new RuntimeException(e); + } finally { + try { + dnindexLock.release(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + } + + public void reloadDnIndex() + { + if(MycatServer.getInstance().getProcessors()==null) return; + // load datanode active index from properties + dnIndexProperties = loadDnIndexProps(); + // init datahost + Map dataHosts = config.getDataHosts(); + LOGGER.info("reInitialize dataHost ..."); + for (PhysicalDBPool node : dataHosts.values()) { + String index = dnIndexProperties.getProperty(node.getHostName(),"0"); + if (!"0".equals(index)) { + LOGGER.info("reinit datahost: " + node.getHostName() + " to use datasource index:" + index); + } + node.switchSource(Integer.parseInt(index),true,"reload dnindex"); + + } + } + + private Runnable catletClassClear() { + return new Runnable() { @Override public void run() { try { @@ -251,7 +594,172 @@ public void run() { }; } + + /** + * 清理 reload @@config_all 后,老的 connection 连接 + * @return + */ + private Runnable dataSourceOldConsClear() { + return new Runnable() { + @Override + public void run() { + timerExecutor.execute(new Runnable() { + @Override + public void run() { + + long sqlTimeout = MycatServer.getInstance().getConfig().getSystem().getSqlExecuteTimeout() * 1000L; + + //根据 lastTime 确认事务的执行, 超过 sqlExecuteTimeout 阀值 close connection + long currentTime = TimeUtil.currentTimeMillis(); + Iterator iter = NIOProcessor.backends_old.iterator(); + while( iter.hasNext() ) { + BackendConnection con = iter.next(); + long lastTime = con.getLastTime(); + if ( currentTime - lastTime > sqlTimeout ) { + con.close("clear old backend connection ..."); + iter.remove(); + } + } + } + }); + }; + }; + } + + /** + * 在bufferpool使用率大于使用率阈值时不清理 + * 在bufferpool使用率小于使用率阈值时清理大结果集清单内容 + * + */ + private Runnable resultSetMapClear() { + return new Runnable() { + @Override + public void run() { + try { + BufferPool bufferPool=getBufferPool(); + long bufferSize=bufferPool.size(); + long bufferCapacity=bufferPool.capacity(); + long bufferUsagePercent=(bufferCapacity-bufferSize)*100/bufferCapacity; + if(bufferUsagePercent map =UserStatAnalyzer.getInstance().getUserStatMap(); + Set userSet=config.getUsers().keySet(); + for (String user : userSet) { + UserStat userStat = map.get(user); + if(userStat!=null){ + SqlResultSizeRecorder recorder=userStat.getSqlResultSizeRecorder(); + //System.out.println(recorder.getSqlResultSet().size()); + recorder.clearSqlResultSet(); + } + } + } + } catch (Exception e) { + LOGGER.warn("resultSetMapClear err " + e); + } + }; + }; + } + + private Properties loadDnIndexProps() { + Properties prop = new Properties(); + File file = new File(SystemConfig.getHomePath(), "conf" + File.separator + "dnindex.properties"); + if (!file.exists()) { + return prop; + } + FileInputStream filein = null; + try { + filein = new FileInputStream(file); + prop.load(filein); + } catch (Exception e) { + LOGGER.warn("load DataNodeIndex err:" + e); + } finally { + if (filein != null) { + try { + filein.close(); + } catch (IOException e) { + } + } + } + return prop; + } + + /** + * save cur datanode index to properties file + * + * @param + * @param curIndex + */ + public synchronized void saveDataHostIndex(String dataHost, int curIndex) { + File file = new File(SystemConfig.getHomePath(), "conf" + File.separator + "dnindex.properties"); + FileOutputStream fileOut = null; + try { + String oldIndex = dnIndexProperties.getProperty(dataHost); + String newIndex = String.valueOf(curIndex); + if (newIndex.equals(oldIndex)) { + return; + } + + dnIndexProperties.setProperty(dataHost, newIndex); + LOGGER.info("save DataHost index " + dataHost + " cur index " + curIndex); + + File parent = file.getParentFile(); + if (parent != null && !parent.exists()) { + parent.mkdirs(); + } + + fileOut = new FileOutputStream(file); + dnIndexProperties.store(fileOut, "update"); + + if(isUseZkSwitch()) { + // save to zk + try { + dnindexLock.acquire(30,TimeUnit.SECONDS) ; + String path = ZKUtils.getZKBasePath() + "bindata/dnindex.properties"; + CuratorFramework zk = ZKUtils.getConnection(); + if(zk.checkExists().forPath(path)==null) { + zk.create().creatingParentsIfNeeded().forPath(path, Files.toByteArray(file)); + } else{ + byte[] data= zk.getData().forPath(path); + ByteArrayOutputStream out=new ByteArrayOutputStream(); + Properties properties=new Properties(); + properties.load(new ByteArrayInputStream(data)); + if(!String.valueOf(curIndex).equals(properties.getProperty(dataHost))) { + properties.setProperty(dataHost, String.valueOf(curIndex)); + properties.store(out, "update"); + zk.setData().forPath(path, out.toByteArray()); + } + } + + }finally { + dnindexLock.release(); + } + } + } catch (Exception e) { + LOGGER.warn("saveDataNodeIndex err:", e); + } finally { + if (fileOut != null) { + try { + fileOut.close(); + } catch (IOException e) { + } + } + } + + } + + + private boolean isUseZk(){ + String loadZk=ZkConfig.getInstance().getValue(ZkParamCfg.ZK_CFG_FLAG); + return "true".equalsIgnoreCase(loadZk) ; + } + + private boolean isUseZkSwitch() + { + MycatConfig mycatConfig=config; + boolean isUseZkSwitch= mycatConfig.getSystem().isUseZKSwitch(); + String loadZk=ZkConfig.getInstance().getValue(ZkParamCfg.ZK_CFG_FLAG); + return (isUseZkSwitch&&"true".equalsIgnoreCase(loadZk)) ; + } public RouteService getRouterService() { return routerService; @@ -261,10 +769,34 @@ public CacheService getCacheService() { return cacheService; } + public NameableExecutor getBusinessExecutor() { + return businessExecutor; + } + public RouteService getRouterservice() { return routerService; } + public NIOProcessor nextProcessor() { + int i = ++nextProcessor; + if (i >= processors.length) { + i = nextProcessor = 0; + } + return processors[i]; + } + + public NIOProcessor[] getProcessors() { + return processors; + } + + public SocketConnector getConnector() { + return connector; + } + + public SQLRecorder getSqlRecorder() { + return sqlRecorder; + } + public long getStartupTime() { return startupTime; } @@ -282,8 +814,8 @@ public void online() { } // 系统时间定时更新任务 - private TimerTask updateTime() { - return new TimerTask() { + private Runnable updateTime() { + return new Runnable() { @Override public void run() { TimeUtil.update(); @@ -292,104 +824,204 @@ public void run() { } // 处理器定时检查任务 - private TimerTask processorCheck() { - return new TimerTask() { + private Runnable processorCheck() { + return new Runnable() { @Override public void run() { timerExecutor.execute(new Runnable() { @Override public void run() { try { - NetSystem.getInstance().checkConnections(); + for (NIOProcessor p : processors) { + p.checkBackendCons(); + } } catch (Exception e) { - LOGGER.warn("checkBackendCons caught err:", e); + LOGGER.warn("checkBackendCons caught err:" + e); } } }); + timerExecutor.execute(new Runnable() { + @Override + public void run() { + try { + for (NIOProcessor p : processors) { + p.checkFrontCons(); + } + } catch (Exception e) { + LOGGER.warn("checkFrontCons caught err:" + e); + } + } + }); } }; } // 数据节点定时连接空闲超时检查任务 - private TimerTask dataNodeConHeartBeatCheck(final long heartPeriod) { - return new TimerTask() { + private Runnable dataNodeConHeartBeatCheck(final long heartPeriod) { + return new Runnable() { @Override public void run() { timerExecutor.execute(new Runnable() { @Override public void run() { - Map nodes = config - .getDataHosts(); + + Map nodes = config.getDataHosts(); for (PhysicalDBPool node : nodes.values()) { node.heartbeatCheck(heartPeriod); } - Map _nodes = config - .getBackupDataHosts(); + + /* + Map _nodes = config.getBackupDataHosts(); if (_nodes != null) { for (PhysicalDBPool node : _nodes.values()) { node.heartbeatCheck(heartPeriod); } - } + }*/ } }); } }; } - // 全局表一致性检查任务 - private TimerTask glableTableConsistencyCheck() { - return new TimerTask() { + // 数据节点定时心跳任务 + private Runnable dataNodeHeartbeat() { + return new Runnable() { @Override public void run() { timerExecutor.execute(new Runnable() { @Override public void run() { - GlobalTableUtil.consistencyCheck(); + Map nodes = config.getDataHosts(); + for (PhysicalDBPool node : nodes.values()) { + node.doHeartbeat(); + } } }); } }; } + + //定时清理保存SqlStat中的数据 + private Runnable recycleSqlStat(){ + return new Runnable() { + @Override + public void run() { + Map statMap = UserStatAnalyzer.getInstance().getUserStatMap(); + for (UserStat userStat : statMap.values()) { + userStat.getSqlLastStat().recycle(); + userStat.getSqlRecorder().recycle(); + userStat.getSqlHigh().recycle(); + userStat.getSqlLargeRowStat().recycle(); + } + } + }; + } + + //定时检查不同分片表结构一致性 + private Runnable tableStructureCheck(){ + return new MySQLTableStructureDetector(); + } - // 数据节点定时心跳任务 - private TimerTask dataNodeHeartbeat() { - return new TimerTask() { + // 全局表一致性检查任务 + private Runnable glableTableConsistencyCheck() { + return new Runnable() { @Override public void run() { timerExecutor.execute(new Runnable() { @Override public void run() { - Map nodes = config - .getDataHosts(); - for (PhysicalDBPool node : nodes.values()) { - node.doHeartbeat(); - } + GlobalTableUtil.consistencyCheck(); } }); } }; } + + //XA recovery log check + private void performXARecoveryLog() { + //fetch the recovery log + CoordinatorLogEntry[] coordinatorLogEntries = getCoordinatorLogEntries(); + + for(int i=0; i allCoordinatorLogEntries = fileRepository.getAllCoordinatorLogEntries(); + if(allCoordinatorLogEntries == null){return new CoordinatorLogEntry[0];} + if(allCoordinatorLogEntries.size()==0){return new CoordinatorLogEntry[0];} + return allCoordinatorLogEntries.toArray(new CoordinatorLogEntry[allCoordinatorLogEntries.size()]); + } + + public NameableExecutor getSequenceExecutor() { + return sequenceExecutor; + } + + //huangyiming add + public DirectByteBufferPool getDirectByteBufferPool() { + return (DirectByteBufferPool)bufferPool; + } + + public boolean isAIO() { + return aio; + } + + public ListeningExecutorService getListeningExecutorService() { return listeningExecutorService; } - /** - * save cur datanode index to properties file - * - * @param dataNode - * @param curIndex - */ - public synchronized void saveDataHostIndex(String dataHost, int curIndex) { - if(clusterSync==null){ - clusterSync = ConfigFactory.instanceCluster(); - } - boolean isSwitch = clusterSync.switchDataSource(dataHost, curIndex); - if(isSwitch){ - config.setHostIndex(dataHost, curIndex); - }else { - LOGGER.warn("can't switch dataHost"+dataHost +" to curIndex " + curIndex); - throw new ConfigException("can't switch dataHost"+dataHost +" to curIndex " + curIndex); - } + + public static void main(String[] args) throws Exception { + String path = ZKUtils.getZKBasePath() + "bindata"; + CuratorFramework zk = ZKUtils.getConnection(); + if(zk.checkExists().forPath(path)==null); + + byte[] data= zk.getData().forPath(path); + System.out.println(data.length); } + } diff --git a/src/main/java/io/mycat/MycatStartup.java b/src/main/java/io/mycat/MycatStartup.java index 3438dcaec..fbc1ffd90 100644 --- a/src/main/java/io/mycat/MycatStartup.java +++ b/src/main/java/io/mycat/MycatStartup.java @@ -23,44 +23,44 @@ */ package io.mycat; -import io.mycat.server.config.node.SystemConfig; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + import java.text.SimpleDateFormat; import java.util.Date; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.config.loader.zkprocess.comm.ZkConfig; +import io.mycat.config.model.SystemConfig; + /** * @author mycat */ public final class MycatStartup { - private static final String dateFormat = "yyyy-MM-dd HH:mm:ss"; - - private static final class Holder { - private static final Logger LOGGER = LoggerFactory - .getLogger(MycatStartup.class); - } + private static final String dateFormat = "yyyy-MM-dd HH:mm:ss"; + private static final Logger LOGGER = LoggerFactory.getLogger(MycatStartup.class); + public static void main(String[] args) { + //use zk ? + ZkConfig.getInstance().initZk(); + try { + String home = SystemConfig.getHomePath(); + if (home == null) { + System.out.println(SystemConfig.SYS_HOME + " is not set."); + System.exit(-1); + } + // init + MycatServer server = MycatServer.getInstance(); + server.beforeStart(); - public static void main(String[] args) { - try { - String home = SystemConfig.getHomePath(); - if (home == null) { - System.out.println(SystemConfig.SYS_HOME + " is not set."); - System.exit(-1); - } - // init - MycatServer server = MycatServer.getInstance(); + // startup + server.startup(); + System.out.println("MyCAT Server startup successfully. see logs in logs/mycat.log"); - // startup - server.startup(); - System.out.println("MyCAT Server startup successfully. see logs in logs/mycat.log"); - while (true) { - Thread.sleep(300 * 1000); - } - } catch (Exception e) { - SimpleDateFormat sdf = new SimpleDateFormat(dateFormat); - Holder.LOGGER.error(sdf.format(new Date()) + " startup error", e); - System.exit(-1); - } - } -} \ No newline at end of file + } catch (Exception e) { + SimpleDateFormat sdf = new SimpleDateFormat(dateFormat); + LOGGER.error(sdf.format(new Date()) + " startup error", e); + System.exit(-1); + } + } +} diff --git a/src/main/java/io/mycat/backend/BackendConnection.java b/src/main/java/io/mycat/backend/BackendConnection.java index 4d64b9e3c..3440a793b 100644 --- a/src/main/java/io/mycat/backend/BackendConnection.java +++ b/src/main/java/io/mycat/backend/BackendConnection.java @@ -1,64 +1,65 @@ -package io.mycat.backend; - -import io.mycat.net.ClosableConnection; -import io.mycat.route.RouteResultsetNode; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.executors.ResponseHandler; - -import java.io.IOException; -import java.io.UnsupportedEncodingException; - -public interface BackendConnection extends ClosableConnection{ - public boolean isModifiedSQLExecuted(); - - public boolean isFromSlaveDB(); - - public String getSchema(); - - public void setSchema(String newSchema); - - public long getLastTime(); - - public boolean isClosedOrQuit(); - - public void setAttachment(Object attachment); - - public void quit(); - - public void setLastTime(long currentTimeMillis); - - public void release(); - - public void setResponseHandler(ResponseHandler commandHandler); - - public void commit(); - - public void query(String sql) throws UnsupportedEncodingException; - - public Object getAttachment(); - - // public long getThreadId(); - - public void execute(RouteResultsetNode node, MySQLFrontConnection source, - boolean autocommit) throws IOException; - - public boolean syncAndExcute(); - - public void rollback(); - - public boolean isBorrowed(); - - public void setBorrowed(boolean borrowed); - - public int getTxIsolation(); - - public boolean isAutocommit(); - - public long getId(); - - public void close(String reason); - - public String getCharset(); - - public PhysicalDatasource getPool(); -} +package io.mycat.backend; + +import java.io.IOException; +import java.io.UnsupportedEncodingException; + +import io.mycat.backend.mysql.nio.handler.ResponseHandler; +import io.mycat.net.ClosableConnection; +import io.mycat.route.RouteResultsetNode; +import io.mycat.server.ServerConnection; + +public interface BackendConnection extends ClosableConnection { + public boolean isModifiedSQLExecuted(); + + public boolean isFromSlaveDB(); + + public String getSchema(); + + public void setSchema(String newSchema); + + public long getLastTime(); + + public boolean isClosedOrQuit(); + + public void setAttachment(Object attachment); + + public void quit(); + + public void setLastTime(long currentTimeMillis); + + public void release(); + + public boolean setResponseHandler(ResponseHandler commandHandler); + + public void commit(); + + public void query(String sql) throws UnsupportedEncodingException; + + public Object getAttachment(); + + // public long getThreadId(); + + + + public void execute(RouteResultsetNode node, ServerConnection source, + boolean autocommit) throws IOException; + + public void recordSql(String host, String schema, String statement); + + public boolean syncAndExcute(); + + public void rollback(); + + public boolean isBorrowed(); + + public void setBorrowed(boolean borrowed); + + public int getTxIsolation(); + + public boolean isAutocommit(); + + public long getId(); + + public void discardClose(String reason); + +} diff --git a/src/main/java/io/mycat/backend/ConMap.java b/src/main/java/io/mycat/backend/ConMap.java index 37a58f512..140e6bdd0 100644 --- a/src/main/java/io/mycat/backend/ConMap.java +++ b/src/main/java/io/mycat/backend/ConMap.java @@ -1,14 +1,19 @@ package io.mycat.backend; -import io.mycat.net.Connection; -import io.mycat.net.NetSystem; - import java.util.Collection; import java.util.Iterator; import java.util.Map.Entry; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +import io.mycat.MycatServer; +import io.mycat.backend.datasource.PhysicalDatasource; +import io.mycat.backend.jdbc.JDBCConnection; +import io.mycat.backend.mysql.nio.MySQLConnection; +import io.mycat.net.NIOProcessor; public class ConMap { + // key -schema private final ConcurrentHashMap items = new ConcurrentHashMap(); @@ -59,56 +64,73 @@ public Collection getAllConQueue() { public int getActiveCountForSchema(String schema, PhysicalDatasource dataSouce) { int total = 0; - for (Connection conn : NetSystem.getInstance().getAllConnectios() - .values()) { - if (conn instanceof BackendConnection) { - BackendConnection theCon = (BackendConnection) conn; - if (theCon.getSchema().equals(schema) - && theCon.getPool() == dataSouce) { - if (theCon.isBorrowed()) { - total++; + for (NIOProcessor processor : MycatServer.getInstance().getProcessors()) { + for (BackendConnection con : processor.getBackends().values()) { + if (con instanceof MySQLConnection) { + MySQLConnection mysqlCon = (MySQLConnection) con; + + if (mysqlCon.getSchema().equals(schema) + && mysqlCon.getPool() == dataSouce + && mysqlCon.isBorrowed()) { + total++; } - } - } - } - return total; - } + }else if (con instanceof JDBCConnection) { + JDBCConnection jdbcCon = (JDBCConnection) con; + if (jdbcCon.getSchema().equals(schema) && jdbcCon.getPool() == dataSouce + && jdbcCon.isBorrowed()) { + total++; + } + } + } + } + return total; + } public int getActiveCountForDs(PhysicalDatasource dataSouce) { - int total = 0; - for (Connection conn : NetSystem.getInstance().getAllConnectios() - .values()) { - if (conn instanceof BackendConnection) { - BackendConnection theCon = (BackendConnection) conn; - if (theCon.getPool() == dataSouce) { - if (theCon.isBorrowed()) { - total++; + for (NIOProcessor processor : MycatServer.getInstance().getProcessors()) { + for (BackendConnection con : processor.getBackends().values()) { + if (con instanceof MySQLConnection) { + MySQLConnection mysqlCon = (MySQLConnection) con; + + if (mysqlCon.getPool() == dataSouce + && mysqlCon.isBorrowed() && !mysqlCon.isClosed()) { + total++; } - } - } - } - return total; - } - - public void clearConnections(String reason, PhysicalDatasource dataSouce) { - - Iterator> itor = NetSystem.getInstance() - .getAllConnectios().entrySet().iterator(); - while (itor.hasNext()) { - Entry entry = itor.next(); - Connection con = entry.getValue(); - if (con instanceof BackendConnection) { - if (((BackendConnection) con).getPool() == dataSouce) { - con.close(reason); - itor.remove(); - } - } + } else if (con instanceof JDBCConnection) { + JDBCConnection jdbcCon = (JDBCConnection) con; + if (jdbcCon.getPool() == dataSouce + && jdbcCon.isBorrowed() && !jdbcCon.isClosed()) { + total++; + } + } + } + } + return total; + } + + public void clearConnections(String reason, PhysicalDatasource dataSouce) { + for (NIOProcessor processor : MycatServer.getInstance().getProcessors()) { + ConcurrentMap map = processor.getBackends(); + Iterator> itor = map.entrySet().iterator(); + while (itor.hasNext()) { + Entry entry = itor.next(); + BackendConnection con = entry.getValue(); + if (con instanceof MySQLConnection) { + if (((MySQLConnection) con).getPool() == dataSouce) { + con.close(reason); + itor.remove(); + } + }else if((con instanceof JDBCConnection) + && (((JDBCConnection) con).getPool() == dataSouce)){ + con.close(reason); + itor.remove(); + } + } } - items.clear(); + items.clear(); } - -} +} \ No newline at end of file diff --git a/src/main/java/io/mycat/backend/ConQueue.java b/src/main/java/io/mycat/backend/ConQueue.java index 9072c30ba..38ab556c1 100644 --- a/src/main/java/io/mycat/backend/ConQueue.java +++ b/src/main/java/io/mycat/backend/ConQueue.java @@ -37,10 +37,12 @@ public void incExecuteCount() { this.executeCount++; } - public void removeCon(BackendConnection con) { - if (!autoCommitCons.remove(con)) { - manCommitCons.remove(con); + public boolean removeCon(BackendConnection con) { + boolean removed = autoCommitCons.remove(con); + if (!removed) { + return manCommitCons.remove(con); } + return removed; } public boolean isSameCon(BackendConnection con) { @@ -65,13 +67,13 @@ public ArrayList getIdleConsToClose(int count) { count); while (!manCommitCons.isEmpty() && readyCloseCons.size() < count) { BackendConnection theCon = manCommitCons.poll(); - if (theCon != null) { + if (theCon != null&&!theCon.isBorrowed()) { readyCloseCons.add(theCon); } } while (!autoCommitCons.isEmpty() && readyCloseCons.size() < count) { BackendConnection theCon = autoCommitCons.poll(); - if (theCon != null) { + if (theCon != null&&!theCon.isBorrowed()) { readyCloseCons.add(theCon); } diff --git a/src/main/java/io/mycat/backend/PhysicalDBNode.java b/src/main/java/io/mycat/backend/PhysicalDBNode.java deleted file mode 100644 index 5119fa5f8..000000000 --- a/src/main/java/io/mycat/backend/PhysicalDBNode.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.backend; - -import io.mycat.route.RouteResultsetNode; -import io.mycat.server.executors.ResponseHandler; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class PhysicalDBNode { - protected static final Logger LOGGER = LoggerFactory - .getLogger(PhysicalDBNode.class); - - protected final String name; - protected final String database; - protected final PhysicalDBPool dbPool; - - public PhysicalDBNode(String hostName, String database, - PhysicalDBPool dbPool) { - this.name = hostName; - this.database = database; - this.dbPool = dbPool; - } - - public String getName() { - return name; - } - - public PhysicalDBPool getDbPool() { - return dbPool; - } - - public String getDatabase() { - return database; - } - - /** - * get connection from the same datasource - * - * @param exitsCon - * @throws Exception - */ - public void getConnectionFromSameSource(String schema,boolean autocommit, - BackendConnection exitsCon, ResponseHandler handler, - Object attachment) throws Exception { - - PhysicalDatasource ds = this.dbPool.findDatasouce(exitsCon); - if (ds == null) { - throw new RuntimeException( - "can't find exits connection,maybe fininshed " + exitsCon); - } else { - ds.getConnection(schema,autocommit, handler, attachment); - } - - } - - private void checkRequest(String schema){ - if (schema != null - && !schema.equals(this.database)) { - throw new RuntimeException( - "invalid param ,connection request db is :" - + schema + " and datanode db is " - + this.database); - } - if (!dbPool.isInitSuccess()) { - dbPool.init(dbPool.activedIndex); - } - } - - public void getConnection(String schema,boolean autoCommit, RouteResultsetNode rrs, - ResponseHandler handler, Object attachment) throws Exception { - checkRequest(schema); - if (dbPool.isInitSuccess()) { - if (rrs.canRunnINReadDB(autoCommit)) { - dbPool.getRWBanlanceCon(schema,autoCommit, handler, attachment, - this.database); - } else { - dbPool.getSource().getConnection(schema,autoCommit, handler, attachment); - } - - } else { - throw new IllegalArgumentException("Invalid DataSource:" - + dbPool.getActivedIndex()); - } - } -} \ No newline at end of file diff --git a/src/main/java/io/mycat/backend/PhysicalDBPool.java b/src/main/java/io/mycat/backend/PhysicalDBPool.java deleted file mode 100644 index b3dd5bac8..000000000 --- a/src/main/java/io/mycat/backend/PhysicalDBPool.java +++ /dev/null @@ -1,601 +0,0 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.backend; - -import io.mycat.MycatServer; -import io.mycat.backend.heartbeat.DBHeartbeat; -import io.mycat.server.Alarms; -import io.mycat.server.config.node.DataHostConfig; -import io.mycat.server.executors.GetConnectionHandler; -import io.mycat.server.executors.ResponseHandler; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.*; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.locks.ReentrantLock; - -public class PhysicalDBPool { - public static final int BALANCE_NONE = 0; - public static final int BALANCE_ALL_BACK = 1; - public static final int BALANCE_ALL = 2; - public static final int BALANCE_ALL_READ = 3; - public static final int WRITE_ONLYONE_NODE = 0; - public static final int WRITE_RANDOM_NODE = 1; - public static final int WRITE_ALL_NODE = 2; - public static final long LONG_TIME = 300000; - public static final int WEIGHT = 0; - - protected static final Logger LOGGER = LoggerFactory - .getLogger(PhysicalDBPool.class); - private final String hostName; - protected PhysicalDatasource[] writeSources; - protected Map readSources; - protected volatile int activedIndex; - protected volatile boolean initSuccess; - protected final ReentrantLock switchLock = new ReentrantLock(); - private final Collection allDs; - private final int banlance; - private final int writeType; - private final Random random = new Random(); - private final Random wnrandom = new Random(); - private String[] schemas; - private final DataHostConfig dataHostConfig; - - public PhysicalDBPool(String name, DataHostConfig conf, - PhysicalDatasource[] writeSources, - Map readSources, int balance, - int writeType) { - this.hostName = name; - this.dataHostConfig = conf; - this.writeSources = writeSources; - this.banlance = balance; - this.writeType = writeType; - Iterator> entryItor = readSources - .entrySet().iterator(); - while (entryItor.hasNext()) { - PhysicalDatasource[] values = entryItor.next().getValue(); - if (values.length == 0) { - entryItor.remove(); - } - } - this.readSources = readSources; - this.allDs = this.genAllDataSources(); - LOGGER.info("total resouces of dataHost " + this.hostName + " is :" - + allDs.size()); - setDataSourceProps(); - } - - public int getWriteType() { - return writeType; - } - public int getBalance() { - return banlance; - } - private void setDataSourceProps() { - for (PhysicalDatasource ds : this.allDs) { - ds.setDbPool(this); - } - } - - public PhysicalDatasource findDatasouce(BackendConnection exitsCon) { - - for (PhysicalDatasource ds : this.allDs) { - if (ds.isReadNode() == exitsCon.isFromSlaveDB()) { - if (ds.isMyConnection(exitsCon)) { - return ds; - } - } - } - LOGGER.warn("can't find connection in pool " + this.hostName + " con:" - + exitsCon); - return null; - } - - public String getHostName() { - return hostName; - } - - /** - * all write datanodes - * - * @return - */ - public PhysicalDatasource[] getSources() { - return writeSources; - } - - public PhysicalDatasource getSource() { - switch (writeType) { - case WRITE_ONLYONE_NODE: { - return writeSources[activedIndex]; - } - case WRITE_RANDOM_NODE: { - - int index = Math.abs(wnrandom.nextInt()) % writeSources.length; - PhysicalDatasource result = writeSources[index]; - if (!this.isAlive(result)) { - // find all live nodes - ArrayList alives = new ArrayList( - writeSources.length - 1); - for (int i = 0; i < writeSources.length; i++) { - if (i != index) { - if (this.isAlive(writeSources[i])) { - alives.add(i); - } - } - } - if (alives.isEmpty()) { - result = writeSources[0]; - } else { - // random select one - index = Math.abs(wnrandom.nextInt()) % alives.size(); - result = writeSources[alives.get(index)]; - - } - } - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("select write source " + result.getName() - + " for dataHost:" + this.getHostName()); - } - return result; - } - default: { - throw new java.lang.IllegalArgumentException("writeType is " - + writeType + " ,so can't return one write datasource "); - } - } - - } - - public int getActivedIndex() { - return activedIndex; - } - - public boolean isInitSuccess() { - return initSuccess; - } - - public int next(int i) { - if (checkIndex(i)) { - return (++i == writeSources.length) ? 0 : i; - } else { - return 0; - } - } - - /** - * 鍒囨崲鏁版嵁婧� - */ - public boolean switchSource(int newIndex, boolean isAlarm, String reason) { - if (this.writeType != PhysicalDBPool.WRITE_ONLYONE_NODE - || !checkIndex(newIndex)) { - return false; - } - final ReentrantLock lock = this.switchLock; - lock.lock(); - try { - int current = activedIndex; - if (current != newIndex) { - // switch index - activedIndex = newIndex; - // init again - this.init(activedIndex); - // clear all connections - this.getSources()[current].clearCons("switch datasource"); - // write log - LOGGER.warn(switchMessage(current, newIndex, false, reason)); - return true; - } - } finally { - lock.unlock(); - } - return false; - } - - private String switchMessage(int current, int newIndex, boolean alarm, - String reason) { - StringBuilder s = new StringBuilder(); - if (alarm) { - s.append(Alarms.DATANODE_SWITCH); - } - s.append("[Host=").append(hostName).append(",result=[").append(current) - .append("->"); - s.append(newIndex).append("],reason=").append(reason).append(']'); - return s.toString(); - } - - private int loop(int i) { - return i < writeSources.length ? i : (i - writeSources.length); - } - - public void init(int index) { - if (!checkIndex(index)) { - index = 0; - } - int active = -1; - for (int i = 0; i < writeSources.length; i++) { - int j = loop(i + index); - if (initSource(j, writeSources[j])) { - //不切换-1时,如果主写挂了 不允许切换过去 - if(dataHostConfig.getSwitchType()==DataHostConfig.NOT_SWITCH_DS&&j>0) - { - break; - } - active = j; - activedIndex = active; - initSuccess = true; - LOGGER.info(getMessage(active, " init success")); - - if (this.writeType == WRITE_ONLYONE_NODE) { - // only init one write datasource - MycatServer.getInstance().saveDataHostIndex(hostName, activedIndex); - break; - } - } - } - if (!checkIndex(active)) { - initSuccess = false; - StringBuilder s = new StringBuilder(); - s.append(Alarms.DEFAULT).append(hostName).append(" init failure"); - LOGGER.error(s.toString()); - } - } - - private boolean checkIndex(int i) { - return i >= 0 && i < writeSources.length; - } - - private String getMessage(int index, String info) { - return new StringBuilder().append(hostName).append(" index:") - .append(index).append(info).toString(); - } - - private boolean initSource(int index, PhysicalDatasource ds) { - int initSize = ds.getConfig().getMinCon(); - LOGGER.info("init backend myqsl source ,create connections total " - + initSize + " for " + ds.getName() + " index :" + index); - CopyOnWriteArrayList list = new CopyOnWriteArrayList(); - GetConnectionHandler getConHandler = new GetConnectionHandler(list, - initSize); - // long start=System.currentTimeMillis(); - // long timeOut=start+5000*1000L; - - for (int i = 0; i < initSize; i++) { - try { - - ds.getConnection(this.schemas[i % schemas.length], true, - getConHandler, null); - } catch (Exception e) { - LOGGER.warn(getMessage(index, " init connection error."), e); - } - } - long timeOut = System.currentTimeMillis() + 60 * 1000; - - // waiting for finish - while (!getConHandler.finished() - && (System.currentTimeMillis() < timeOut)) { - try { - Thread.sleep(100); - - } catch (InterruptedException e) { - LOGGER.error("initError", e); - } - } - LOGGER.info("init result :" + getConHandler.getStatusInfo()); -// for (BackendConnection c : list) { -// c.release(); -// } - return !list.isEmpty(); - } - - public void doHeartbeat() { - if (writeSources == null || writeSources.length == 0) { - return; - } - - for (PhysicalDatasource source : this.allDs) { - if (source != null) { - source.doHeartbeat(); - } else { - StringBuilder s = new StringBuilder(); - s.append(Alarms.DEFAULT).append(hostName).append(" current dataSource is null!"); - LOGGER.error(s.toString()); - } - } - } - - /** - * back physical connection heartbeat check - */ - public void heartbeatCheck(long ildCheckPeriod) { - for (PhysicalDatasource ds : allDs) { - // only readnode or all write node or writetype=WRITE_ONLYONE_NODE - // and current write node will check - if (ds != null - && (ds.getHeartbeat().getStatus() == DBHeartbeat.OK_STATUS) - && (ds.isReadNode() - || (this.writeType != WRITE_ONLYONE_NODE) || (this.writeType == WRITE_ONLYONE_NODE && ds == this - .getSource()))) { - ds.heatBeatCheck(ds.getConfig().getIdleTimeout(), - ildCheckPeriod); - } - } - } - - public void startHeartbeat() { - for (PhysicalDatasource source : this.allDs) { - source.startHeartbeat(); - } - } - - public void stopHeartbeat() { - for (PhysicalDatasource source : this.allDs) { - source.stopHeartbeat(); - } - } - - public void clearDataSources(String reason) { - LOGGER.info("clear datasours of pool " + this.hostName); - for (PhysicalDatasource source : this.allDs) { - LOGGER.info("clear datasoure of pool " + this.hostName + " ds:" - + source.getConfig()); - source.clearCons(reason); - source.stopHeartbeat(); - } - - } - - public Collection genAllDataSources() { - LinkedList allSources = new LinkedList(); - for (PhysicalDatasource ds : writeSources) { - if (ds != null) { - allSources.add(ds); - } - } - for (PhysicalDatasource[] dataSources : this.readSources.values()) { - for (PhysicalDatasource ds : dataSources) { - if (ds != null) { - allSources.add(ds); - } - } - } - return allSources; - } - - public Collection getAllDataSources() { - return this.allDs; - } - - /** - * return connection for read balance - * - * @param handler - * @param attachment - * @param database - * @throws Exception - */ - public void getRWBanlanceCon(String schema, boolean autocommit, - ResponseHandler handler, Object attachment, String database) - throws Exception { - PhysicalDatasource theNode = null; - ArrayList okSources = null; - switch (banlance) { - case BALANCE_ALL_BACK: {// all read nodes and the standard by masters - - okSources = getAllActiveRWSources(true, false, checkSlaveSynStatus()); - if (okSources.isEmpty()) { - theNode = this.getSource(); - } else { - theNode = randomSelect(okSources); - } - break; - } - case BALANCE_ALL: { - okSources = getAllActiveRWSources(true, true, checkSlaveSynStatus()); - theNode = randomSelect(okSources); - break; - } - case BALANCE_ALL_READ: { - okSources = getAllActiveRWSources(false, false, checkSlaveSynStatus()); - theNode = randomSelect(okSources); - break; - } - case BALANCE_NONE: - default: - // return default write data source - theNode = this.getSource(); - } - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("select read source " + theNode.getName() - + " for dataHost:" + this.getHostName()); - } - theNode.getConnection(schema, autocommit, handler, attachment); - } - - private boolean checkSlaveSynStatus() { - return (dataHostConfig.getSlaveThreshold() != -1) - && (dataHostConfig.getSwitchType() == DataHostConfig.SYN_STATUS_SWITCH_DS); - } - - /** - * TODO: modify by zhuam - * - * 随机选择,按权重设置随机概率。 - * 在一个截面上碰撞的概率高,但调用量越大分布越均匀,而且按概率使用权重后也比较均匀,有利于动态调整提供者权重。 - * @param okSources - * @return - */ - public PhysicalDatasource randomSelect(ArrayList okSources) { - - if (okSources.isEmpty()) { - return this.getSource(); - - } else { - - int length = okSources.size(); // 总个数 - int totalWeight = 0; // 总权重 - boolean sameWeight = true; // 权重是否都一样 - for (int i = 0; i < length; i++) { - int weight = okSources.get(i).getConfig().getWeight(); - totalWeight += weight; // 累计总权重 - if (sameWeight && i > 0 - && weight != okSources.get(i-1).getConfig().getWeight() ) { // 计算所有权重是否一样 - sameWeight = false; - } - } - - if (totalWeight > 0 && !sameWeight ) { - - // 如果权重不相同且权重大于0则按总权重数随机 - int offset = random.nextInt(totalWeight); - - // 并确定随机值落在哪个片断上 - for (int i = 0; i < length; i++) { - offset -= okSources.get(i).getConfig().getWeight(); - if (offset < 0) { - return okSources.get(i); - } - } - } - - // 如果权重相同或权重为0则均等随机 - return okSources.get( random.nextInt(length) ); - - //int index = Math.abs(random.nextInt()) % okSources.size(); - //return okSources.get(index); - } - } - - private boolean isAlive(PhysicalDatasource theSource) { - return (theSource.getHeartbeat().getStatus() == DBHeartbeat.OK_STATUS); - } - - private boolean canSelectAsReadNode(PhysicalDatasource theSource) { - - if(theSource.getHeartbeat().getSlaveBehindMaster()==null - ||theSource.getHeartbeat().getDbSynStatus()==DBHeartbeat.DB_SYN_ERROR){ - return false; - } - return (theSource.getHeartbeat().getDbSynStatus() == DBHeartbeat.DB_SYN_NORMAL) - && (theSource.getHeartbeat().getSlaveBehindMaster() < this.dataHostConfig - .getSlaveThreshold()); - - } - - /** - * return all backup write sources - * - * @param includeWriteNode if include write nodes - * @param includeCurWriteNode if include current active write node. invalid when includeWriteNode is false - * @param filterWithSlaveThreshold - * - * @return - */ - private ArrayList getAllActiveRWSources( - boolean includeWriteNode, - boolean includeCurWriteNode, boolean filterWithSlaveThreshold) { - int curActive = activedIndex; - ArrayList okSources = new ArrayList( - this.allDs.size()); - for (int i = 0; i < this.writeSources.length; i++) { - PhysicalDatasource theSource = writeSources[i]; - if (isAlive(theSource)) {// write node is active - if (includeWriteNode) { - if (i == curActive && includeCurWriteNode == false) { - // not include cur active source - } else if (filterWithSlaveThreshold) { - if (canSelectAsReadNode(theSource)) { - okSources.add(theSource); - } else { - continue; - } - } else { - okSources.add(theSource); - } - } - if (!readSources.isEmpty()) { - // check all slave nodes - PhysicalDatasource[] allSlaves = this.readSources.get(i); - if (allSlaves != null) { - for (PhysicalDatasource slave : allSlaves) { - if (isAlive(slave)) { - if (filterWithSlaveThreshold) { - if (canSelectAsReadNode(slave)) { - okSources.add(slave); - } else { - continue; - } - } else { - okSources.add(slave); - } - } - } - } - } - - } else { - - // TODO : add by zhuam - // 如果写节点不OK, 也要保证临时的读服务正常 - if ( this.dataHostConfig.isTempReadHostAvailable() ) { - - if (!readSources.isEmpty()) { - // check all slave nodes - PhysicalDatasource[] allSlaves = this.readSources.get(i); - if (allSlaves != null) { - for (PhysicalDatasource slave : allSlaves) { - if (isAlive(slave)) { - - if (filterWithSlaveThreshold) { - if (canSelectAsReadNode(slave)) { - okSources.add(slave); - } else { - continue; - } - - } else { - okSources.add(slave); - } - } - } - } - } - } - } - - } - return okSources; - } - - public String[] getSchemas() { - return schemas; - } - - public void setSchemas(String[] mySchemas) { - this.schemas = mySchemas; - } - -} \ No newline at end of file diff --git a/src/main/java/io/mycat/backend/PhysicalDatasource.java b/src/main/java/io/mycat/backend/PhysicalDatasource.java deleted file mode 100644 index 6da2469bc..000000000 --- a/src/main/java/io/mycat/backend/PhysicalDatasource.java +++ /dev/null @@ -1,429 +0,0 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.backend; - -import io.mycat.backend.heartbeat.DBHeartbeat; -import io.mycat.net.NetSystem; -import io.mycat.server.Alarms; -import io.mycat.server.config.node.DBHostConfig; -import io.mycat.server.config.node.DataHostConfig; -import io.mycat.server.executors.ConnectionHeartBeatHandler; -import io.mycat.server.executors.DelegateResponseHandler; -import io.mycat.server.executors.NewConnectionRespHandler; -import io.mycat.server.executors.ResponseHandler; -import io.mycat.util.TimeUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.concurrent.ConcurrentLinkedQueue; - -public abstract class PhysicalDatasource { - public static final Logger LOGGER = LoggerFactory - .getLogger(PhysicalDatasource.class); - - private final String name; - private final int size; - private final DBHostConfig config; - private final ConMap conMap = new ConMap(); - private DBHeartbeat heartbeat; - private final boolean readNode; - private volatile long heartbeatRecoveryTime; - private final DataHostConfig hostConfig; - private final ConnectionHeartBeatHandler conHeartBeatHanler = new ConnectionHeartBeatHandler(); - private PhysicalDBPool dbPool; - - public PhysicalDatasource(DBHostConfig config, DataHostConfig hostConfig, - boolean isReadNode) { - this.size = config.getMaxCon(); - this.config = config; - this.name = config.getHostName(); - this.hostConfig = hostConfig; - heartbeat = this.createHeartBeat(); - this.readNode = isReadNode; - } - - public boolean isMyConnection(BackendConnection con) { - return (con.getPool() == this); - } - - public DataHostConfig getHostConfig() { - return hostConfig; - } - - public boolean isReadNode() { - return readNode; - } - - public int getSize() { - return size; - } - - public void setDbPool(PhysicalDBPool dbPool) { - this.dbPool = dbPool; - } - - public PhysicalDBPool getDbPool() { - return dbPool; - } - - public abstract DBHeartbeat createHeartBeat(); - - public String getName() { - return name; - } - public int getIndex(){ - int currentIndex = 0; - for(int i=0;i heartBeatCons, ConQueue queue, - ConcurrentLinkedQueue checkLis, - long hearBeatTime, long hearBeatTime2) { - int maxConsInOneCheck = 10; - Iterator checkListItor = checkLis.iterator(); - while (checkListItor.hasNext()) { - BackendConnection con = checkListItor.next(); - if (con.isClosedOrQuit()) { - checkListItor.remove(); - continue; - } - if (validSchema(con.getSchema())) { - if (con.getLastTime() < hearBeatTime - && heartBeatCons.size() < maxConsInOneCheck) { - checkListItor.remove(); - // Heart beat check - con.setBorrowed(true); - heartBeatCons.add(con); - } - } else if (con.getLastTime() < hearBeatTime2) { - // not valid schema conntion should close for idle - // exceed 2*conHeartBeatPeriod - checkListItor.remove(); - con.close(" heart beate idle "); - } - - } - - } - - public void heatBeatCheck(long timeout, long conHeartBeatPeriod) { - int ildeCloseCount = hostConfig.getMinCon() * 3; - int maxConsInOneCheck = 5; - LinkedList heartBeatCons = new LinkedList(); - - long hearBeatTime = TimeUtil.currentTimeMillis() - conHeartBeatPeriod; - long hearBeatTime2 = TimeUtil.currentTimeMillis() - 2 - * conHeartBeatPeriod; - for (ConQueue queue : conMap.getAllConQueue()) { - checkIfNeedHeartBeat(heartBeatCons, queue, - queue.getAutoCommitCons(), hearBeatTime, hearBeatTime2); - if (heartBeatCons.size() < maxConsInOneCheck) { - checkIfNeedHeartBeat(heartBeatCons, queue, - queue.getManCommitCons(), hearBeatTime, hearBeatTime2); - } else if (heartBeatCons.size() >= maxConsInOneCheck) { - break; - } - } - - if (!heartBeatCons.isEmpty()) { - for (BackendConnection con : heartBeatCons) { - conHeartBeatHanler - .doHeartBeat(con, hostConfig.getHeartbeatSQL()); - } - } - - // check if there has timeouted heatbeat cons - conHeartBeatHanler.abandTimeOuttedConns(); - int idleCons = getIdleCount(); - int activeCons = this.getActiveCount(); - int createCount = (hostConfig.getMinCon() - idleCons) / 3; - // create if idle too little - if ((createCount > 0) && (idleCons + activeCons < size) - && (idleCons < hostConfig.getMinCon())) { - createByIdleLitte(idleCons, createCount); - } else if (idleCons > hostConfig.getMinCon()) { - closeByIdleMany(idleCons-hostConfig.getMinCon()); - } else { - int activeCount = this.getActiveCount(); - if (activeCount > size) { - StringBuilder s = new StringBuilder(); - s.append(Alarms.DEFAULT).append("DATASOURCE EXCEED [name=") - .append(name).append(",active="); - s.append(activeCount).append(",size=").append(size).append(']'); - LOGGER.warn(s.toString()); - } - } - } - - private void closeByIdleMany(int ildeCloseCount) { - LOGGER.info("too many ilde cons ,close some for datasouce " + name); - List readyCloseCons = new ArrayList( - ildeCloseCount); - for (ConQueue queue : conMap.getAllConQueue()) { - readyCloseCons.addAll(queue.getIdleConsToClose(ildeCloseCount)); - if (readyCloseCons.size() >= ildeCloseCount) { - break; - } - } - - for (BackendConnection idleCon : readyCloseCons) { - if (idleCon.isBorrowed()) { - LOGGER.warn("find idle con is using " + idleCon); - } - idleCon.close("too many idle con"); - } - } - - private void createByIdleLitte(int idleCons, int createCount) { - LOGGER.info("create connections ,because idle connection not enough ,cur is " - + idleCons - + ", minCon is " - + hostConfig.getMinCon() - + " for " - + name); - NewConnectionRespHandler simpleHandler = new NewConnectionRespHandler(); - - final String[] schemas = dbPool.getSchemas(); - for (int i = 0; i < createCount; i++) { - if (this.getActiveCount() + this.getIdleCount() >= size) { - break; - } - try { - // creat new connection - this.createNewConnection(simpleHandler, null, schemas[i - % schemas.length]); - } catch (IOException e) { - LOGGER.warn("create connection err " + e); - } - - } - } - - public int getActiveCount() { - return this.conMap.getActiveCountForDs(this); - } - - public void clearCons(String reason) { - this.conMap.clearConnections(reason, this); - } - - public void startHeartbeat() { - heartbeat.start(); - } - - public void stopHeartbeat() { - heartbeat.stop(); - } - - public void doHeartbeat() { - // 未到预定恢复时间,不执行心跳检测。 - if (TimeUtil.currentTimeMillis() < heartbeatRecoveryTime) { - return; - } - if (!heartbeat.isStop()) { - try { - heartbeat.heartbeat(); - } catch (Exception e) { - LOGGER.error(name + " heartbeat error.", e); - } - } - } - - private BackendConnection takeCon(BackendConnection conn, - final ResponseHandler handler, final Object attachment, - String schema) { - - conn.setBorrowed(true); - if (!conn.getSchema().equals(schema)) { - // need do schema syn in before sql send - conn.setSchema(schema); - } - ConQueue queue = conMap.getSchemaConQueue(schema); - queue.incExecuteCount(); - conn.setAttachment(attachment); - conn.setLastTime(System.currentTimeMillis()); // 每次取连接的时候,更新下lasttime,防止在前端连接检查的时候,关闭连接,导致sql执行失败 - handler.connectionAcquired(conn); - return conn; - } - - private void createNewConnection(final ResponseHandler handler, - final Object attachment, final String schema) throws IOException { - // aysn create connection - NetSystem.getInstance().getExecutor().execute(new Runnable() { - public void run() { - try { - createNewConnection(new DelegateResponseHandler(handler) { - @Override - public void connectionError(Throwable e, - BackendConnection conn) { - handler.connectionError(e, conn); - } - - @Override - public void connectionAcquired(BackendConnection conn) { - takeCon(conn, handler, attachment, schema); - } - }, schema); - } catch (IOException e) { - handler.connectionError(e, null); - } - } - }); - } - - public void getConnection(String schema, boolean autocommit, - final ResponseHandler handler, final Object attachment) - throws IOException { - BackendConnection con = this.conMap.tryTakeCon(schema, autocommit); - if (con != null) { - takeCon(con, handler, attachment, schema); - return; - } else { - int activeCons = this.getActiveCount();//当前最大活动连接 - if(activeCons+1>size){//下一个连接大于最大连接数 - LOGGER.error("the max activeConnnections size can not be max than maxconnections"); - throw new IOException("the max activeConnnections size can not be max than maxconnections"); - }else{ // create connection - LOGGER.info("no ilde connection in pool,create new connection for " + this.name - + " of schema "+schema); - createNewConnection(handler, attachment, schema); - } - } - - } - - private void returnCon(BackendConnection c) { - c.setAttachment(null); - c.setBorrowed(false); - c.setLastTime(TimeUtil.currentTimeMillis()); - ConQueue queue = this.conMap.getSchemaConQueue(c.getSchema()); - - boolean ok = false; - if (c.isAutocommit()) { - ok = queue.getAutoCommitCons().offer(c); - } else { - ok = queue.getManCommitCons().offer(c); - } - if (!ok) { - - LOGGER.warn("can't return to pool ,so close con " + c); - c.close("can't return to pool "); - } - } - - public void releaseChannel(BackendConnection c) { - returnCon(c); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("release channel " + c); - } - } - - public void connectionClosed(BackendConnection conn) { - ConQueue queue = this.conMap.getSchemaConQueue(conn.getSchema()); - if (queue != null) { - queue.removeCon(conn); - } - - } - - public abstract void createNewConnection(ResponseHandler handler, - String schema) throws IOException; - - public long getHeartbeatRecoveryTime() { - return heartbeatRecoveryTime; - } - - public void setHeartbeatRecoveryTime(long heartbeatRecoveryTime) { - this.heartbeatRecoveryTime = heartbeatRecoveryTime; - } - - public DBHostConfig getConfig() { - return config; - } -} \ No newline at end of file diff --git a/src/main/java/io/mycat/backend/datasource/PhysicalDBNode.java b/src/main/java/io/mycat/backend/datasource/PhysicalDBNode.java new file mode 100644 index 000000000..71deb7ed2 --- /dev/null +++ b/src/main/java/io/mycat/backend/datasource/PhysicalDBNode.java @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.backend.datasource; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.backend.BackendConnection; +import io.mycat.backend.mysql.nio.handler.ResponseHandler; +import io.mycat.route.RouteResultsetNode; + +public class PhysicalDBNode { + protected static final Logger LOGGER = LoggerFactory + .getLogger(PhysicalDBNode.class); + + protected final String name; + protected final String database; + protected final PhysicalDBPool dbPool; + + public PhysicalDBNode(String hostName, String database, + PhysicalDBPool dbPool) { + this.name = hostName; + this.database = database; + this.dbPool = dbPool; + } + + public String getName() { + return name; + } + + public PhysicalDBPool getDbPool() { + return dbPool; + } + + public String getDatabase() { + return database; + } + + /** + * get connection from the same datasource + * + * @param exitsCon + * @throws Exception + */ + public void getConnectionFromSameSource(String schema,boolean autocommit, + BackendConnection exitsCon, ResponseHandler handler, + Object attachment) throws Exception { + + PhysicalDatasource ds = this.dbPool.findDatasouce(exitsCon); + if (ds == null) { + throw new RuntimeException( + "can't find exits connection,maybe fininshed " + exitsCon); + } else { + ds.getConnection(schema,autocommit, handler, attachment); + } + + } + + private void checkRequest(String schema){ + if (schema != null + && !schema.equals(this.database)) { + throw new RuntimeException( + "invalid param ,connection request db is :" + + schema + " and datanode db is " + + this.database); + } + if (!dbPool.isInitSuccess()) { + dbPool.init(dbPool.activedIndex); + } + } + + public void getConnection(String schema,boolean autoCommit, RouteResultsetNode rrs, + ResponseHandler handler, Object attachment) throws Exception { + checkRequest(schema); + if (dbPool.isInitSuccess()) { + LOGGER.debug("rrs.getRunOnSlave() " + rrs.getRunOnSlave()); + if(rrs.getRunOnSlave() != null){ // 带有 /*db_type=master/slave*/ 注解 + // 强制走 slave + if(rrs.getRunOnSlave()){ + LOGGER.debug("rrs.isHasBlanceFlag() " + rrs.isHasBlanceFlag()); + if (rrs.isHasBlanceFlag()) { // 带有 /*balance*/ 注解(目前好像只支持一个注解...) + dbPool.getReadBanlanceCon(schema,autoCommit,handler, attachment, this.database); + }else{ // 没有 /*balance*/ 注解 + LOGGER.debug("rrs.isHasBlanceFlag()" + rrs.isHasBlanceFlag()); + if(!dbPool.getReadCon(schema, autoCommit, handler, attachment, this.database)){ + LOGGER.warn("Do not have slave connection to use, use master connection instead."); + PhysicalDatasource writeSource=dbPool.getSource(); + //记录写节点写负载值 + writeSource.setWriteCount(); + writeSource.getConnection(schema, + autoCommit, handler, attachment); + rrs.setRunOnSlave(false); + rrs.setCanRunInReadDB(false); + } + } + }else{ // 强制走 master + // 默认获得的是 writeSource,也就是 走master + LOGGER.debug("rrs.getRunOnSlave() " + rrs.getRunOnSlave()); + PhysicalDatasource writeSource=dbPool.getSource(); + //记录写节点写负载值 + writeSource.setReadCount(); + writeSource.getConnection(schema, autoCommit, + handler, attachment); + rrs.setCanRunInReadDB(false); + } + }else{ // 没有 /*db_type=master/slave*/ 注解,按照原来的处理方式 + LOGGER.debug("rrs.getRunOnSlave() " + rrs.getRunOnSlave()); // null + if (rrs.canRunnINReadDB(autoCommit)) { + dbPool.getRWBanlanceCon(schema,autoCommit, handler, attachment, this.database); + } else { + PhysicalDatasource writeSource =dbPool.getSource(); + //记录写节点写负载值 + writeSource.setWriteCount(); + writeSource.getConnection(schema, autoCommit, + handler, attachment); + } + } + + } else { + throw new IllegalArgumentException("Invalid DataSource:" + dbPool.getActivedIndex()); + } + } + +// public void getConnection(String schema,boolean autoCommit, RouteResultsetNode rrs, +// ResponseHandler handler, Object attachment) throws Exception { +// checkRequest(schema); +// if (dbPool.isInitSuccess()) { +// if (rrs.canRunnINReadDB(autoCommit)) { +// dbPool.getRWBanlanceCon(schema,autoCommit, handler, attachment, +// this.database); +// } else { +// dbPool.getSource().getConnection(schema,autoCommit, handler, attachment); +// } +// +// } else { +// throw new IllegalArgumentException("Invalid DataSource:" +// + dbPool.getActivedIndex()); +// } +// } +} \ No newline at end of file diff --git a/src/main/java/io/mycat/backend/datasource/PhysicalDBPool.java b/src/main/java/io/mycat/backend/datasource/PhysicalDBPool.java new file mode 100644 index 000000000..56968f2c8 --- /dev/null +++ b/src/main/java/io/mycat/backend/datasource/PhysicalDBPool.java @@ -0,0 +1,721 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.backend.datasource; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.locks.ReentrantLock; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.MycatServer; +import io.mycat.backend.BackendConnection; +import io.mycat.backend.heartbeat.DBHeartbeat; +import io.mycat.backend.mysql.nio.handler.GetConnectionHandler; +import io.mycat.backend.mysql.nio.handler.ResponseHandler; +import io.mycat.config.Alarms; +import io.mycat.config.model.DataHostConfig; + +public class PhysicalDBPool { + + protected static final Logger LOGGER = LoggerFactory.getLogger(PhysicalDBPool.class); + + public static final int BALANCE_NONE = 0; + public static final int BALANCE_ALL_BACK = 1; + public static final int BALANCE_ALL = 2; + public static final int BALANCE_ALL_READ = 3; + + public static final int WRITE_ONLYONE_NODE = 0; + public static final int WRITE_RANDOM_NODE = 1; + public static final int WRITE_ALL_NODE = 2; + + public static final long LONG_TIME = 300000; + public static final int WEIGHT = 0; + + private final String hostName; + + protected PhysicalDatasource[] writeSources; + protected Map readSources; + + protected volatile int activedIndex; + protected volatile boolean initSuccess; + + protected final ReentrantLock switchLock = new ReentrantLock(); + private final Collection allDs; + private final int banlance; + private final int writeType; + private final Random random = new Random(); + private final Random wnrandom = new Random(); + private String[] schemas; + private final DataHostConfig dataHostConfig; + private String slaveIDs; + + public PhysicalDBPool(String name, DataHostConfig conf, + PhysicalDatasource[] writeSources, + Map readSources, int balance, + int writeType) { + + this.hostName = name; + this.dataHostConfig = conf; + this.writeSources = writeSources; + this.banlance = balance; + this.writeType = writeType; + + Iterator> entryItor = readSources.entrySet().iterator(); + while (entryItor.hasNext()) { + PhysicalDatasource[] values = entryItor.next().getValue(); + if (values.length == 0) { + entryItor.remove(); + } + } + + this.readSources = readSources; + this.allDs = this.genAllDataSources(); + + LOGGER.info("total resouces of dataHost " + this.hostName + " is :" + allDs.size()); + + setDataSourceProps(); + } + + public int getWriteType() { + return writeType; + } + + private void setDataSourceProps() { + for (PhysicalDatasource ds : this.allDs) { + ds.setDbPool(this); + } + } + + public PhysicalDatasource findDatasouce(BackendConnection exitsCon) { + for (PhysicalDatasource ds : this.allDs) { + if ((ds.isReadNode() == exitsCon.isFromSlaveDB()) + && ds.isMyConnection(exitsCon)) { + return ds; + } + } + + LOGGER.warn("can't find connection in pool " + this.hostName + " con:" + exitsCon); + return null; + } + + public String getSlaveIDs() { + return slaveIDs; + } + + public void setSlaveIDs(String slaveIDs) { + this.slaveIDs = slaveIDs; + } + + public String getHostName() { + return hostName; + } + + /** + * all write datanodes + * @return + */ + public PhysicalDatasource[] getSources() { + return writeSources; + } + + public PhysicalDatasource getSource() { + + switch (writeType) { + case WRITE_ONLYONE_NODE: { + return writeSources[activedIndex]; + } + case WRITE_RANDOM_NODE: { + + int index = Math.abs(wnrandom.nextInt(Integer.MAX_VALUE)) % writeSources.length; + PhysicalDatasource result = writeSources[index]; + if (!this.isAlive(result)) { + + // find all live nodes + ArrayList alives = new ArrayList(writeSources.length - 1); + for (int i = 0; i < writeSources.length; i++) { + if (i != index + && this.isAlive(writeSources[i])) { + alives.add(i); + } + } + + if (alives.isEmpty()) { + result = writeSources[0]; + } else { + // random select one + index = Math.abs(wnrandom.nextInt(Integer.MAX_VALUE)) % alives.size(); + result = writeSources[alives.get(index)]; + + } + } + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("select write source " + result.getName() + + " for dataHost:" + this.getHostName()); + } + return result; + } + default: { + throw new java.lang.IllegalArgumentException("writeType is " + + writeType + " ,so can't return one write datasource "); + } + } + + } + + public int getActivedIndex() { + return activedIndex; + } + + public boolean isInitSuccess() { + return initSuccess; + } + + public int next(int i) { + if (checkIndex(i)) { + return (++i == writeSources.length) ? 0 : i; + } else { + return 0; + } + } + + public boolean switchSource(int newIndex, boolean isAlarm, String reason) { + if (this.writeType != PhysicalDBPool.WRITE_ONLYONE_NODE || !checkIndex(newIndex)) { + return false; + } + + final ReentrantLock lock = this.switchLock; + lock.lock(); + try { + int current = activedIndex; + if (current != newIndex) { + + // switch index + activedIndex = newIndex; + + // init again + this.init(activedIndex); + + // clear all connections + this.getSources()[current].clearCons("switch datasource"); + + // write log + LOGGER.warn(switchMessage(current, newIndex, false, reason)); + + return true; + } + } finally { + lock.unlock(); + } + return false; + } + + private String switchMessage(int current, int newIndex, boolean alarm, String reason) { + StringBuilder s = new StringBuilder(); + if (alarm) { + s.append(Alarms.DATANODE_SWITCH); + } + s.append("[Host=").append(hostName).append(",result=[").append(current).append("->"); + s.append(newIndex).append("],reason=").append(reason).append(']'); + return s.toString(); + } + + private int loop(int i) { + return i < writeSources.length ? i : (i - writeSources.length); + } + + public void init(int index) { + + if (!checkIndex(index)) { + index = 0; + } + + int active = -1; + for (int i = 0; i < writeSources.length; i++) { + int j = loop(i + index); + if ( initSource(j, writeSources[j]) ) { + + //不切换-1时,如果主写挂了 不允许切换过去 + boolean isNotSwitchDs = ( dataHostConfig.getSwitchType() == DataHostConfig.NOT_SWITCH_DS ); + if ( isNotSwitchDs && j > 0 ) { + break; + } + + active = j; + activedIndex = active; + initSuccess = true; + LOGGER.info(getMessage(active, " init success")); + + if (this.writeType == WRITE_ONLYONE_NODE) { + // only init one write datasource + MycatServer.getInstance().saveDataHostIndex(hostName, activedIndex); + break; + } + } + } + + if (!checkIndex(active)) { + initSuccess = false; + StringBuilder s = new StringBuilder(); + s.append(Alarms.DEFAULT).append(hostName).append(" init failure"); + LOGGER.error(s.toString()); + } + } + + private boolean checkIndex(int i) { + return i >= 0 && i < writeSources.length; + } + + private String getMessage(int index, String info) { + return new StringBuilder().append(hostName).append(" index:").append(index).append(info).toString(); + } + + private boolean initSource(int index, PhysicalDatasource ds) { + int initSize = ds.getConfig().getMinCon(); + + LOGGER.info("init backend myqsl source ,create connections total " + initSize + " for " + ds.getName() + " index :" + index); + + CopyOnWriteArrayList list = new CopyOnWriteArrayList(); + GetConnectionHandler getConHandler = new GetConnectionHandler(list, initSize); + // long start = System.currentTimeMillis(); + // long timeOut = start + 5000 * 1000L; + + for (int i = 0; i < initSize; i++) { + try { + ds.getConnection(this.schemas[i % schemas.length], true, getConHandler, null); + } catch (Exception e) { + LOGGER.warn(getMessage(index, " init connection error."), e); + } + } + long timeOut = System.currentTimeMillis() + 60 * 1000; + + // waiting for finish + while (!getConHandler.finished() && (System.currentTimeMillis() < timeOut)) { + try { + Thread.sleep(100); + + } catch (InterruptedException e) { + LOGGER.error("initError", e); + } + } + LOGGER.info("init result :" + getConHandler.getStatusInfo()); +// for (BackendConnection c : list) { +// c.release(); +// } + return !list.isEmpty(); + } + + public void doHeartbeat() { + + + if (writeSources == null || writeSources.length == 0) { + return; + } + + for (PhysicalDatasource source : this.allDs) { + + if (source != null) { + source.doHeartbeat(); + } else { + StringBuilder s = new StringBuilder(); + s.append(Alarms.DEFAULT).append(hostName).append(" current dataSource is null!"); + LOGGER.error(s.toString()); + } + } + + } + + /** + * back physical connection heartbeat check + */ + public void heartbeatCheck(long ildCheckPeriod) { + + for (PhysicalDatasource ds : allDs) { + // only readnode or all write node or writetype=WRITE_ONLYONE_NODE + // and current write node will check + if (ds != null + && (ds.getHeartbeat().getStatus() == DBHeartbeat.OK_STATUS) + && (ds.isReadNode() + || (this.writeType != WRITE_ONLYONE_NODE) + || (this.writeType == WRITE_ONLYONE_NODE + && ds == this.getSource()))) { + + ds.heatBeatCheck(ds.getConfig().getIdleTimeout(), ildCheckPeriod); + } + } + } + + public void startHeartbeat() { + for (PhysicalDatasource source : this.allDs) { + source.startHeartbeat(); + } + } + + public void stopHeartbeat() { + for (PhysicalDatasource source : this.allDs) { + source.stopHeartbeat(); + } + } + + /** + * 强制清除 dataSources + * @param reason + */ + public void clearDataSources(String reason) { + LOGGER.info("clear datasours of pool " + this.hostName); + for (PhysicalDatasource source : this.allDs) { + LOGGER.info("clear datasoure of pool " + this.hostName + " ds:" + source.getConfig()); + source.clearCons(reason); + source.stopHeartbeat(); + } + } + + public Collection genAllDataSources() { + + LinkedList allSources = new LinkedList(); + for (PhysicalDatasource ds : writeSources) { + if (ds != null) { + allSources.add(ds); + } + } + + for (PhysicalDatasource[] dataSources : this.readSources.values()) { + for (PhysicalDatasource ds : dataSources) { + if (ds != null) { + allSources.add(ds); + } + } + } + return allSources; + } + + public Collection getAllDataSources() { + return this.allDs; + } + + /** + * return connection for read balance + * + * @param handler + * @param attachment + * @param database + * @throws Exception + */ + public void getRWBanlanceCon(String schema, boolean autocommit, + ResponseHandler handler, Object attachment, String database) throws Exception { + + PhysicalDatasource theNode = null; + ArrayList okSources = null; + switch (banlance) { + case BALANCE_ALL_BACK: { + // all read nodes and the standard by masters + okSources = getAllActiveRWSources(true, false, checkSlaveSynStatus()); + if (okSources.isEmpty()) { + theNode = this.getSource(); + + } else { + theNode = randomSelect(okSources); + } + break; + } + case BALANCE_ALL: { + okSources = getAllActiveRWSources(true, true, checkSlaveSynStatus()); + theNode = randomSelect(okSources); + break; + } + case BALANCE_ALL_READ: { + okSources = getAllActiveRWSources(false, false, checkSlaveSynStatus()); + theNode = randomSelect(okSources); + break; + } + case BALANCE_NONE: + default: + // return default write data source + theNode = this.getSource(); + } + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("select read source " + theNode.getName() + " for dataHost:" + this.getHostName()); + } + //统计节点读操作次数 + theNode.setReadCount(); + theNode.getConnection(schema, autocommit, handler, attachment); + } + + /** + * slave 读负载均衡,也就是 readSource 之间实现负载均衡 + * @param schema + * @param autocommit + * @param handler + * @param attachment + * @param database + * @throws Exception + */ + public void getReadBanlanceCon(String schema, boolean autocommit, ResponseHandler handler, + Object attachment, String database)throws Exception { + PhysicalDatasource theNode = null; + ArrayList okSources = null; + okSources = getAllActiveRWSources(false, false, checkSlaveSynStatus()); + theNode = randomSelect(okSources); + //统计节点读操作次数 + theNode.setReadCount(); + theNode.getConnection(schema, autocommit, handler, attachment); + } + + /** + * 从 writeHost 下面的 readHost中随机获取一个 connection, 用于slave注解 + * @param schema + * @param autocommit + * @param handler + * @param attachment + * @param database + * @return + * @throws Exception + */ + public boolean getReadCon(String schema, boolean autocommit, ResponseHandler handler, + Object attachment, String database)throws Exception { + PhysicalDatasource theNode = null; + + LOGGER.debug("!readSources.isEmpty() " + !readSources.isEmpty()); + if (!readSources.isEmpty()) { + int index = Math.abs(random.nextInt(Integer.MAX_VALUE)) % readSources.size(); + PhysicalDatasource[] allSlaves = this.readSources.get(index); +// System.out.println("allSlaves.length " + allSlaves.length); + if (allSlaves != null) { + index = Math.abs(random.nextInt(Integer.MAX_VALUE)) % readSources.size(); + PhysicalDatasource slave = allSlaves[index]; + + for (int i=0; i okSources) { + + if (okSources.isEmpty()) { + return this.getSource(); + + } else { + + int length = okSources.size(); // 总个数 + int totalWeight = 0; // 总权重 + boolean sameWeight = true; // 权重是否都一样 + for (int i = 0; i < length; i++) { + int weight = okSources.get(i).getConfig().getWeight(); + totalWeight += weight; // 累计总权重 + if (sameWeight && i > 0 + && weight != okSources.get(i-1).getConfig().getWeight() ) { // 计算所有权重是否一样 + sameWeight = false; + } + } + + if (totalWeight > 0 && !sameWeight ) { + + // 如果权重不相同且权重大于0则按总权重数随机 + int offset = random.nextInt(totalWeight); + + // 并确定随机值落在哪个片断上 + for (int i = 0; i < length; i++) { + offset -= okSources.get(i).getConfig().getWeight(); + if (offset < 0) { + return okSources.get(i); + } + } + } + + // 如果权重相同或权重为0则均等随机 + return okSources.get( random.nextInt(length) ); + + //int index = Math.abs(random.nextInt()) % okSources.size(); + //return okSources.get(index); + } + } + + // + public int getBalance() { + return banlance; + } + + private boolean isAlive(PhysicalDatasource theSource) { + return (theSource.getHeartbeat().getStatus() == DBHeartbeat.OK_STATUS); + } + + private boolean canSelectAsReadNode(PhysicalDatasource theSource) { + + Integer slaveBehindMaster = theSource.getHeartbeat().getSlaveBehindMaster(); + int dbSynStatus = theSource.getHeartbeat().getDbSynStatus(); + + if ( slaveBehindMaster == null || dbSynStatus == DBHeartbeat.DB_SYN_ERROR) { + return false; + } + boolean isSync = dbSynStatus == DBHeartbeat.DB_SYN_NORMAL; + boolean isNotDelay = slaveBehindMaster < this.dataHostConfig.getSlaveThreshold(); + return isSync && isNotDelay; + } + + /** + * return all backup write sources + * + * @param includeWriteNode if include write nodes + * @param includeCurWriteNode if include current active write node. invalid when includeWriteNode is false + * @param filterWithSlaveThreshold + * + * @return + */ + private ArrayList getAllActiveRWSources( + boolean includeWriteNode, boolean includeCurWriteNode, boolean filterWithSlaveThreshold) { + + int curActive = activedIndex; + ArrayList okSources = new ArrayList(this.allDs.size()); + + for (int i = 0; i < this.writeSources.length; i++) { + PhysicalDatasource theSource = writeSources[i]; + if (isAlive(theSource)) {// write node is active + + if (includeWriteNode) { + boolean isCurWriteNode = ( i == curActive ); + if ( isCurWriteNode && includeCurWriteNode == false) { + // not include cur active source + } else if (filterWithSlaveThreshold && theSource.isSalveOrRead() ) { + boolean selected = canSelectAsReadNode(theSource); + if ( selected ) { + okSources.add(theSource); + } else { + continue; + } + } else { + okSources.add(theSource); + } + } + + if (!readSources.isEmpty()) { + // check all slave nodes + PhysicalDatasource[] allSlaves = this.readSources.get(i); + if (allSlaves != null) { + for (PhysicalDatasource slave : allSlaves) { + if (isAlive(slave)) { + if (filterWithSlaveThreshold) { + boolean selected = canSelectAsReadNode(slave); + if ( selected ) { + okSources.add(slave); + } else { + continue; + } + } else { + okSources.add(slave); + } + } + } + } + } + + } else { + + // TODO : add by zhuam + // 如果写节点不OK, 也要保证临时的读服务正常 + if ( this.dataHostConfig.isTempReadHostAvailable() + && !readSources.isEmpty()) { + + // check all slave nodes + PhysicalDatasource[] allSlaves = this.readSources.get(i); + if (allSlaves != null) { + for (PhysicalDatasource slave : allSlaves) { + if (isAlive(slave)) { + + if (filterWithSlaveThreshold) { + if (canSelectAsReadNode(slave)) { + okSources.add(slave); + } else { + continue; + } + + } else { + okSources.add(slave); + } + } + } + } + } + } + + } + return okSources; + } + + public String[] getSchemas() { + return schemas; + } + + public void setSchemas(String[] mySchemas) { + this.schemas = mySchemas; + } +} diff --git a/src/main/java/io/mycat/backend/datasource/PhysicalDatasource.java b/src/main/java/io/mycat/backend/datasource/PhysicalDatasource.java new file mode 100644 index 000000000..6851f0900 --- /dev/null +++ b/src/main/java/io/mycat/backend/datasource/PhysicalDatasource.java @@ -0,0 +1,635 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.backend.datasource; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.atomic.AtomicLong; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.MycatServer; +import io.mycat.backend.BackendConnection; +import io.mycat.backend.ConMap; +import io.mycat.backend.ConQueue; +import io.mycat.backend.heartbeat.DBHeartbeat; +import io.mycat.backend.mysql.nio.MySQLConnection; +import io.mycat.backend.mysql.nio.handler.ConnectionHeartBeatHandler; +import io.mycat.backend.mysql.nio.handler.DelegateResponseHandler; +import io.mycat.backend.mysql.nio.handler.NewConnectionRespHandler; +import io.mycat.backend.mysql.nio.handler.ResponseHandler; +import io.mycat.config.Alarms; +import io.mycat.config.model.DBHostConfig; +import io.mycat.config.model.DataHostConfig; +import io.mycat.util.TimeUtil; + + +public abstract class PhysicalDatasource { + + private static final Logger LOGGER = LoggerFactory.getLogger(PhysicalDatasource.class); + + private final String name; + private final int size; + private final DBHostConfig config; + private final ConMap conMap = new ConMap(); + private DBHeartbeat heartbeat; + private final boolean readNode; + private volatile long heartbeatRecoveryTime; + private final DataHostConfig hostConfig; + private final ConnectionHeartBeatHandler conHeartBeatHanler = new ConnectionHeartBeatHandler(); + private PhysicalDBPool dbPool; + + // 添加DataSource读计数 + private AtomicLong readCount = new AtomicLong(0); + + // 添加DataSource写计数 + private AtomicLong writeCount = new AtomicLong(0); + + + /** + * edit by dingw at 2017.06.08 + * @see https://github.com/MyCATApache/Mycat-Server/issues/1524 + * + */ + // 当前活动连接 + //private volatile AtomicInteger activeCount = new AtomicInteger(0); + + // 当前存活的总连接数,为什么不直接使用activeCount,主要是因为连接的创建是异步完成的 + //private volatile AtomicInteger totalConnection = new AtomicInteger(0); + + /** + * 由于在Mycat中,returnCon被多次调用(与takeCon并没有成对调用)导致activeCount、totalConnection容易出现负数 + */ + //private static final String TAKE_CONNECTION_FLAG = "1"; + //private ConcurrentMap takeConnectionContext = new ConcurrentHashMap<>(); + + + + public PhysicalDatasource(DBHostConfig config, DataHostConfig hostConfig, + boolean isReadNode) { + this.size = config.getMaxCon(); + this.config = config; + this.name = config.getHostName(); + this.hostConfig = hostConfig; + heartbeat = this.createHeartBeat(); + this.readNode = isReadNode; + } + + public boolean isMyConnection(BackendConnection con) { + if (con instanceof MySQLConnection) { + return ((MySQLConnection) con).getPool() == this; + } else { + return false; + } + + } + + public long getReadCount() { + return readCount.get(); + } + + public void setReadCount() { + readCount.addAndGet(1); + } + + public long getWriteCount() { + return writeCount.get(); + } + + public void setWriteCount() { + writeCount.addAndGet(1); + } + + public DataHostConfig getHostConfig() { + return hostConfig; + } + + public boolean isReadNode() { + return readNode; + } + + public int getSize() { + return size; + } + + public void setDbPool(PhysicalDBPool dbPool) { + this.dbPool = dbPool; + } + + public PhysicalDBPool getDbPool() { + return dbPool; + } + + public abstract DBHeartbeat createHeartBeat(); + + public String getName() { + return name; + } + + public long getExecuteCount() { + long executeCount = 0; + for (ConQueue queue : conMap.getAllConQueue()) { + executeCount += queue.getExecuteCount(); + + } + return executeCount; + } + + public long getExecuteCountForSchema(String schema) { + return conMap.getSchemaConQueue(schema).getExecuteCount(); + + } + + public int getActiveCountForSchema(String schema) { + return conMap.getActiveCountForSchema(schema, this); + } + + public int getIdleCountForSchema(String schema) { + ConQueue queue = conMap.getSchemaConQueue(schema); + int total = 0; + total += queue.getAutoCommitCons().size() + + queue.getManCommitCons().size(); + return total; + } + + public DBHeartbeat getHeartbeat() { + return heartbeat; + } + + public int getIdleCount() { + int total = 0; + for (ConQueue queue : conMap.getAllConQueue()) { + total += queue.getAutoCommitCons().size() + + queue.getManCommitCons().size(); + } + return total; + } + + /** + * 该方法也不是非常精确,因为该操作也不是一个原子操作,相对getIdleCount高效与准确一些 + * @return + */ +// public int getIdleCountSafe() { +// return getTotalConnectionsSafe() - getActiveCountSafe(); +// } + + /** + * 是否需要继续关闭空闲连接 + * @return + */ +// private boolean needCloseIdleConnection() { +// return getIdleCountSafe() > hostConfig.getMinCon(); +// } + + private boolean validSchema(String schema) { + String theSchema = schema; + return theSchema != null && !"".equals(theSchema) + && !"snyn...".equals(theSchema); + } + + private void checkIfNeedHeartBeat( + LinkedList heartBeatCons, ConQueue queue, + ConcurrentLinkedQueue checkLis, + long hearBeatTime, long hearBeatTime2) { + int maxConsInOneCheck = 10; + Iterator checkListItor = checkLis.iterator(); + while (checkListItor.hasNext()) { + BackendConnection con = checkListItor.next(); + if (con.isClosedOrQuit()) { + checkListItor.remove(); + continue; + } + if (validSchema(con.getSchema())) { + if (con.getLastTime() < hearBeatTime + && heartBeatCons.size() < maxConsInOneCheck) { + if(checkLis.remove(con)) { + //如果移除成功,则放入到心跳连接中,如果移除失败,说明该连接已经被其他线程使用,忽略本次心跳检测 + con.setBorrowed(true); + heartBeatCons.add(con); + } + } + } else if (con.getLastTime() < hearBeatTime2) { + // not valid schema conntion should close for idle + // exceed 2*conHeartBeatPeriod + // 同样,这里也需要先移除,避免被业务连接 + if(checkLis.remove(con)) { + con.close(" heart beate idle "); + } + } + + } + + } + + public int getIndex() { + int currentIndex = 0; + for (int i = 0; i < dbPool.getSources().length; i++) { + PhysicalDatasource writeHostDatasource = dbPool.getSources()[i]; + if (writeHostDatasource.getName().equals(getName())) { + currentIndex = i; + break; + } + } + return currentIndex; + } + + public boolean isSalveOrRead() { + int currentIndex = getIndex(); + if (currentIndex != dbPool.activedIndex || this.readNode) { + return true; + } + return false; + } + + public void heatBeatCheck(long timeout, long conHeartBeatPeriod) { +// int ildeCloseCount = hostConfig.getMinCon() * 3; + int maxConsInOneCheck = 5; + LinkedList heartBeatCons = new LinkedList(); + + long hearBeatTime = TimeUtil.currentTimeMillis() - conHeartBeatPeriod; + long hearBeatTime2 = TimeUtil.currentTimeMillis() - 2 + * conHeartBeatPeriod; + for (ConQueue queue : conMap.getAllConQueue()) { + checkIfNeedHeartBeat(heartBeatCons, queue, + queue.getAutoCommitCons(), hearBeatTime, hearBeatTime2); + if (heartBeatCons.size() < maxConsInOneCheck) { + checkIfNeedHeartBeat(heartBeatCons, queue, + queue.getManCommitCons(), hearBeatTime, hearBeatTime2); + } else if (heartBeatCons.size() >= maxConsInOneCheck) { + break; + } + } + + if (!heartBeatCons.isEmpty()) { + for (BackendConnection con : heartBeatCons) { + conHeartBeatHanler + .doHeartBeat(con, hostConfig.getHearbeatSQL()); + } + } + + // check if there has timeouted heatbeat cons + conHeartBeatHanler.abandTimeOuttedConns(); + int idleCons = getIdleCount(); + int activeCons = this.getActiveCount(); + int createCount = (hostConfig.getMinCon() - idleCons) / 3; + // create if idle too little + if ((createCount > 0) && (idleCons + activeCons < size) + && (idleCons < hostConfig.getMinCon())) { + createByIdleLitte(idleCons, createCount); + } else if (idleCons > hostConfig.getMinCon()) { + closeByIdleMany(idleCons - hostConfig.getMinCon()); + } else { + int activeCount = this.getActiveCount(); + if (activeCount > size) { + StringBuilder s = new StringBuilder(); + s.append(Alarms.DEFAULT).append("DATASOURCE EXCEED [name=") + .append(name).append(",active="); + s.append(activeCount).append(",size=").append(size).append(']'); + LOGGER.warn(s.toString()); + } + } + } + + /** + * + * @param ildeCloseCount + * 首先,从已创建的连接中选择本次心跳需要关闭的空闲连接数(由当前连接连接数-减去配置的最小连接数。 + * 然后依次关闭这些连接。由于连接空闲心跳检测与业务是同时并发的,在心跳关闭阶段,可能有连接被使用,导致需要关闭的空闲连接数减少. + * + * 所以每次关闭新连接时,先判断当前空闲连接数是否大于配置的最少空闲连接,如果为否,则结束本次关闭空闲连接操作。 + * 该方法修改之前: + * 首先从ConnMap中获取 ildeCloseCount 个连接,然后关闭;在关闭中,可能又有连接被使用,导致可能多关闭一些链接, + * 导致相对频繁的创建新连接和关闭连接 + * + * 该方法修改之后: + * ildeCloseCount 为预期要关闭的连接 + * 使用循环操作,首先在关闭之前,先再一次判断是否需要关闭连接,然后每次从ConnMap中获取一个空闲连接,然后进行关闭 + * edit by dingw at 2017.06.16 + */ + private void closeByIdleMany(int ildeCloseCount) { + LOGGER.info("too many ilde cons ,close some for datasouce " + name); + List readyCloseCons = new ArrayList( + ildeCloseCount); + for (ConQueue queue : conMap.getAllConQueue()) { + readyCloseCons.addAll(queue.getIdleConsToClose(ildeCloseCount)); + if (readyCloseCons.size() >= ildeCloseCount) { + break; + } + } + + for (BackendConnection idleCon : readyCloseCons) { + if (idleCon.isBorrowed()) { + LOGGER.warn("find idle con is using " + idleCon); + } + idleCon.close("too many idle con"); + } + +// LOGGER.info("too many ilde cons ,close some for datasouce " + name); +// +// Iterator conQueueIt = conMap.getAllConQueue().iterator(); +// ConQueue queue = null; +// if(conQueueIt.hasNext()) { +// queue = conQueueIt.next(); +// } +// +// for(int i = 0; i < ildeCloseCount; i ++ ) { +// +// if(!needCloseIdleConnection() || queue == null) { +// break; //如果当时空闲连接数没有超过最小配置连接数,则结束本次连接关闭 +// } +// +// LOGGER.info("cur conns:" + getTotalConnectionsSafe() ); +// +// BackendConnection idleCon = queue.takeIdleCon(false); +// +// while(idleCon == null && conQueueIt.hasNext()) { +// queue = conQueueIt.next(); +// idleCon = queue.takeIdleCon(false); +// } +// +// if(idleCon == null) { +// break; +// } +// +// if (idleCon.isBorrowed() ) { +// LOGGER.warn("find idle con is using " + idleCon); +// } +// idleCon.close("too many idle con"); +// +// } + + } + + private void createByIdleLitte(int idleCons, int createCount) { + LOGGER.info("create connections ,because idle connection not enough ,cur is " + + idleCons + + ", minCon is " + + hostConfig.getMinCon() + + " for " + + name); + NewConnectionRespHandler simpleHandler = new NewConnectionRespHandler(); + + final String[] schemas = dbPool.getSchemas(); + for (int i = 0; i < createCount; i++) { + if (this.getActiveCount() + this.getIdleCount() >= size) { + break; + } + try { + // creat new connection + this.createNewConnection(simpleHandler, null, schemas[i + % schemas.length]); + } catch (IOException e) { + LOGGER.warn("create connection err " + e); + } + + } + } + + public int getActiveCount() { + return this.conMap.getActiveCountForDs(this); + } + + + + public void clearCons(String reason) { + this.conMap.clearConnections(reason, this); + } + + public void startHeartbeat() { + heartbeat.start(); + } + + public void stopHeartbeat() { + heartbeat.stop(); + } + + public void doHeartbeat() { + // 未到预定恢复时间,不执行心跳检测。 + if (TimeUtil.currentTimeMillis() < heartbeatRecoveryTime) { + return; + } + + if (!heartbeat.isStop()) { + try { + heartbeat.heartbeat(); + } catch (Exception e) { + LOGGER.error(name + " heartbeat error.", e); + } + } + } + + private BackendConnection takeCon(BackendConnection conn, + final ResponseHandler handler, final Object attachment, + String schema) { + + conn.setBorrowed(true); + +// if(takeConnectionContext.putIfAbsent(conn.getId(), TAKE_CONNECTION_FLAG) == null) { +// incrementActiveCountSafe(); +// } + + + if (!conn.getSchema().equals(schema)) { + // need do schema syn in before sql send + conn.setSchema(schema); + } + ConQueue queue = conMap.getSchemaConQueue(schema); + queue.incExecuteCount(); + conn.setAttachment(attachment); + conn.setLastTime(System.currentTimeMillis()); // 每次取连接的时候,更新下lasttime,防止在前端连接检查的时候,关闭连接,导致sql执行失败 + handler.connectionAcquired(conn); + return conn; + } + + private void createNewConnection(final ResponseHandler handler, + final Object attachment, final String schema) throws IOException { + // aysn create connection + MycatServer.getInstance().getBusinessExecutor().execute(new Runnable() { + public void run() { + try { + createNewConnection(new DelegateResponseHandler(handler) { + @Override + public void connectionError(Throwable e, BackendConnection conn) { + //decrementTotalConnectionsSafe(); // 如果创建连接失败,将当前连接数减1 + handler.connectionError(e, conn); + } + + @Override + public void connectionAcquired(BackendConnection conn) { + takeCon(conn, handler, attachment, schema); + } + }, schema); + } catch (IOException e) { + handler.connectionError(e, null); + } + } + }); + } + + public void getConnection(String schema, boolean autocommit, + final ResponseHandler handler, final Object attachment) + throws IOException { + + // 从当前连接map中拿取已建立好的后端连接 + BackendConnection con = this.conMap.tryTakeCon(schema, autocommit); + if (con != null) { + //如果不为空,则绑定对应前端请求的handler + takeCon(con, handler, attachment, schema); + return; + + } else { // this.getActiveCount并不是线程安全的(严格上说该方法获取数量不准确), +// int curTotalConnection = this.totalConnection.get(); +// while(curTotalConnection + 1 <= size) { +// +// if (this.totalConnection.compareAndSet(curTotalConnection, curTotalConnection + 1)) { +// LOGGER.info("no ilde connection in pool,create new connection for " + this.name + " of schema " + schema); +// createNewConnection(handler, attachment, schema); +// return; +// } +// +// curTotalConnection = this.totalConnection.get(); //CAS更新失败,则重新判断当前连接是否超过最大连接数 +// +// } +// +// // 如果后端连接不足,立即失败,故直接抛出连接数超过最大连接异常 +// LOGGER.error("the max activeConnnections size can not be max than maxconnections:" + curTotalConnection); +// throw new IOException("the max activeConnnections size can not be max than maxconnections:" + curTotalConnection); + + int activeCons = this.getActiveCount();// 当前最大活动连接 + if (activeCons + 1 > size) {// 下一个连接大于最大连接数 + LOGGER.error("the max activeConnnections size can not be max than maxconnections"); + throw new IOException("the max activeConnnections size can not be max than maxconnections"); + } else { // create connection + LOGGER.info("no ilde connection in pool,create new connection for " + this.name + " of schema " + schema); + createNewConnection(handler, attachment, schema); + } + } + } + + /** + * 是否超过最大连接数 + * @return + */ +// private boolean exceedMaxConnections() { +// return this.totalConnection.get() + 1 > size; +// } +// +// public int decrementActiveCountSafe() { +// return this.activeCount.decrementAndGet(); +// } +// +// public int incrementActiveCountSafe() { +// return this.activeCount.incrementAndGet(); +// } +// +// public int getActiveCountSafe() { +// return this.activeCount.get(); +// } +// +// public int getTotalConnectionsSafe() { +// return this.totalConnection.get(); +// } +// +// public int decrementTotalConnectionsSafe() { +// return this.totalConnection.decrementAndGet(); +// } +// +// public int incrementTotalConnectionSafe() { +// return this.totalConnection.incrementAndGet(); +// } + + private void returnCon(BackendConnection c) { + + c.setAttachment(null); + c.setBorrowed(false); + c.setLastTime(TimeUtil.currentTimeMillis()); + ConQueue queue = this.conMap.getSchemaConQueue(c.getSchema()); + + boolean ok = false; + if (c.isAutocommit()) { + ok = queue.getAutoCommitCons().offer(c); + } else { + ok = queue.getManCommitCons().offer(c); + } + +// if(c.getId() > 0 && takeConnectionContext.remove(c.getId(), TAKE_CONNECTION_FLAG) ) { +// decrementActiveCountSafe(); +// } + + if(!ok) { + LOGGER.warn("can't return to pool ,so close con " + c); + c.close("can't return to pool "); + + } + + } + + public void releaseChannel(BackendConnection c) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("release channel " + c); + } + // release connection + returnCon(c); + } + + public void connectionClosed(BackendConnection conn) { + ConQueue queue = this.conMap.getSchemaConQueue(conn.getSchema()); + if (queue != null ) { + queue.removeCon(conn); + } + +// decrementTotalConnectionsSafe(); + } + + /** + * 创建新连接 + */ + public abstract void createNewConnection(ResponseHandler handler, String schema) throws IOException; + + /** + * 测试连接,用于初始化及热更新配置检测 + */ + public abstract boolean testConnection(String schema) throws IOException; + + public long getHeartbeatRecoveryTime() { + return heartbeatRecoveryTime; + } + + public void setHeartbeatRecoveryTime(long heartbeatRecoveryTime) { + this.heartbeatRecoveryTime = heartbeatRecoveryTime; + } + + public DBHostConfig getConfig() { + return config; + } + + public boolean isAlive() { + return getHeartbeat().getStatus() == DBHeartbeat.OK_STATUS; + } +} diff --git a/src/main/java/io/mycat/backend/heartbeat/DBHeartbeat.java b/src/main/java/io/mycat/backend/heartbeat/DBHeartbeat.java index cb265c2a1..285517438 100644 --- a/src/main/java/io/mycat/backend/heartbeat/DBHeartbeat.java +++ b/src/main/java/io/mycat/backend/heartbeat/DBHeartbeat.java @@ -23,10 +23,11 @@ */ package io.mycat.backend.heartbeat; -import io.mycat.backend.HeartbeatRecorder; - import java.util.concurrent.atomic.AtomicBoolean; +import io.mycat.statistic.DataSourceSyncRecorder; +import io.mycat.statistic.HeartbeatRecorder; + public abstract class DBHeartbeat { public static final int DB_SYN_ERROR = -1; public static final int DB_SYN_NORMAL = 1; @@ -46,6 +47,7 @@ public abstract class DBHeartbeat { protected int errorCount; protected volatile int status; protected final HeartbeatRecorder recorder = new HeartbeatRecorder(); + protected final DataSourceSyncRecorder asynRecorder = new DataSourceSyncRecorder(); private volatile Integer slaveBehindMaster; private volatile int dbSynStatus = DB_SYN_NORMAL; @@ -124,4 +126,8 @@ public boolean isNeedHeartbeat() { return heartbeatSQL != null; } + public DataSourceSyncRecorder getAsynRecorder() { + return this.asynRecorder; + } + } \ No newline at end of file diff --git a/src/main/java/io/mycat/backend/heartbeat/MySQLConsistencyChecker.java b/src/main/java/io/mycat/backend/heartbeat/MySQLConsistencyChecker.java index 2756fa1ab..ed8699041 100644 --- a/src/main/java/io/mycat/backend/heartbeat/MySQLConsistencyChecker.java +++ b/src/main/java/io/mycat/backend/heartbeat/MySQLConsistencyChecker.java @@ -23,7 +23,7 @@ */ package io.mycat.backend.heartbeat; -import io.mycat.backend.MySQLDataSource; +import io.mycat.backend.mysql.nio.MySQLDataSource; import io.mycat.server.interceptor.impl.GlobalTableUtil; import io.mycat.sqlengine.OneRawSQLQueryResultHandler; import io.mycat.sqlengine.SQLJob; @@ -54,9 +54,14 @@ public class MySQLConsistencyChecker{ private String maxSQL; private String tableName; // global table name private long beginTime; - private String columnExistSQL = "select count(*) as "+GlobalTableUtil.INNER_COLUMN - + " from information_schema.columns where column_name='" - + GlobalTableUtil.GLOBAL_TABLE_MYCAT_COLUMN + "' and table_name='"; +// private String columnExistSQL = "select count(*) as "+GlobalTableUtil.INNER_COLUMN +// + " from information_schema.columns where column_name='" +// + GlobalTableUtil.GLOBAL_TABLE_MYCAT_COLUMN + "' and table_name='"; + + // 此处用到了 mysql 多行转一行 group_concat 的用法,主要是为了简化对结果的处理 + // 得到的结果类似于:id,name,_mycat_op_time + private String columnExistSQL = "select group_concat(COLUMN_NAME separator ',') as " + + GlobalTableUtil.INNER_COLUMN +" from information_schema.columns where TABLE_NAME='"; //user' and TABLE_SCHEMA='db1'; private List>> list = new ArrayList<>(); @@ -74,34 +79,44 @@ public MySQLConsistencyChecker(MySQLDataSource source, String tableName) { public void checkRecordCout() { // ["db3","db2","db1"] - this.jobCount.set(0); - beginTime = new Date().getTime(); - String[] physicalSchemas = source.getDbPool().getSchemas(); - for(String dbName : physicalSchemas){ - MySQLConsistencyHelper detector = new MySQLConsistencyHelper(this, null); - OneRawSQLQueryResultHandler resultHandler = - new OneRawSQLQueryResultHandler(new String[] {GlobalTableUtil.COUNT_COLUMN}, detector); - SQLJob sqlJob = new SQLJob(this.getCountSQL(), dbName, resultHandler, source); - detector.setSqlJob(sqlJob); - sqlJob.run(); - this.jobCount.incrementAndGet(); - } + lock.lock(); + try{ + this.jobCount.set(0); + beginTime = new Date().getTime(); + String[] physicalSchemas = source.getDbPool().getSchemas(); + for(String dbName : physicalSchemas){ + MySQLConsistencyHelper detector = new MySQLConsistencyHelper(this, null); + OneRawSQLQueryResultHandler resultHandler = + new OneRawSQLQueryResultHandler(new String[] {GlobalTableUtil.COUNT_COLUMN}, detector); + SQLJob sqlJob = new SQLJob(this.getCountSQL(), dbName, resultHandler, source); + detector.setSqlJob(sqlJob); + sqlJob.run(); + this.jobCount.incrementAndGet(); + } + }finally{ + lock.unlock(); + } } public void checkMaxTimeStamp() { // ["db3","db2","db1"] - this.jobCount.set(0); - beginTime = new Date().getTime(); - String[] physicalSchemas = source.getDbPool().getSchemas(); - for(String dbName : physicalSchemas){ - MySQLConsistencyHelper detector = new MySQLConsistencyHelper(this, null); - OneRawSQLQueryResultHandler resultHandler = - new OneRawSQLQueryResultHandler(new String[] {GlobalTableUtil.MAX_COLUMN}, detector); - SQLJob sqlJob = new SQLJob(this.getMaxSQL(), dbName, resultHandler, source); - detector.setSqlJob(sqlJob); - sqlJob.run(); - this.jobCount.incrementAndGet(); - } + lock.lock(); + try{ + this.jobCount.set(0); + beginTime = new Date().getTime(); + String[] physicalSchemas = source.getDbPool().getSchemas(); + for(String dbName : physicalSchemas){ + MySQLConsistencyHelper detector = new MySQLConsistencyHelper(this, null); + OneRawSQLQueryResultHandler resultHandler = + new OneRawSQLQueryResultHandler(new String[] {GlobalTableUtil.MAX_COLUMN}, detector); + SQLJob sqlJob = new SQLJob(this.getMaxSQL(), dbName, resultHandler, source); + detector.setSqlJob(sqlJob); + sqlJob.run(); + this.jobCount.incrementAndGet(); + } + }finally{ + lock.unlock(); + } } /** @@ -109,20 +124,25 @@ public void checkMaxTimeStamp() { */ public void checkInnerColumnExist() { // ["db3","db2","db1"] - this.jobCount.set(0); - beginTime = new Date().getTime(); - String[] physicalSchemas = source.getDbPool().getSchemas(); - for(String dbName : physicalSchemas){ - MySQLConsistencyHelper detector = new MySQLConsistencyHelper(this, null, 1); - OneRawSQLQueryResultHandler resultHandler = - new OneRawSQLQueryResultHandler(new String[] {GlobalTableUtil.INNER_COLUMN}, detector); - String db = " and table_schema='" + dbName + "'"; - SQLJob sqlJob = new SQLJob(this.columnExistSQL + db , dbName, resultHandler, source); - detector.setSqlJob(sqlJob);//table_schema='db1' - LOGGER.debug(sqlJob.toString()); - sqlJob.run(); - this.jobCount.incrementAndGet(); - } + lock.lock(); + try{ + this.jobCount.set(0); + beginTime = new Date().getTime(); + String[] physicalSchemas = source.getDbPool().getSchemas(); + for(String dbName : physicalSchemas){ + MySQLConsistencyHelper detector = new MySQLConsistencyHelper(this, null, 1); + OneRawSQLQueryResultHandler resultHandler = + new OneRawSQLQueryResultHandler(new String[] {GlobalTableUtil.INNER_COLUMN}, detector); + String db = " and table_schema='" + dbName + "'"; + SQLJob sqlJob = new SQLJob(this.columnExistSQL + db , dbName, resultHandler, source); + detector.setSqlJob(sqlJob);//table_schema='db1' + LOGGER.debug(sqlJob.toString()); + sqlJob.run(); + this.jobCount.incrementAndGet(); + } + }finally{ + lock.unlock(); + } } public void setResult(SQLQueryResult> result) { diff --git a/src/main/java/io/mycat/backend/heartbeat/MySQLConsistencyHelper.java b/src/main/java/io/mycat/backend/heartbeat/MySQLConsistencyHelper.java index 21488f9f3..67b6400aa 100644 --- a/src/main/java/io/mycat/backend/heartbeat/MySQLConsistencyHelper.java +++ b/src/main/java/io/mycat/backend/heartbeat/MySQLConsistencyHelper.java @@ -129,14 +129,4 @@ public void setSqlJob(SQLJob sqlJob) { this.sqlJob = sqlJob; } -// if(count == null){ -// LOGGER.warn(heartbeat.getCountSQL() + " execute failed in db: " -// + result.getDataNode() +" during global table consistency heartbeat."); -// } -// if(maxTimestamp == null){ -// LOGGER.warn(heartbeat.getMaxSQL() + " execute failed in db: " -// + result.getDataNode() +" during global table consistency heartbeat."); -// } -// return; -// } } diff --git a/src/main/java/io/mycat/backend/heartbeat/MySQLDetector.java b/src/main/java/io/mycat/backend/heartbeat/MySQLDetector.java index 0306907a0..691ae2ad7 100644 --- a/src/main/java/io/mycat/backend/heartbeat/MySQLDetector.java +++ b/src/main/java/io/mycat/backend/heartbeat/MySQLDetector.java @@ -2,8 +2,8 @@ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the * terms of the GNU General Public License version 2 only, as published by the * Free Software Foundation. * @@ -16,145 +16,201 @@ * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address + * + * Any questions about this component can be directed to it's project Web address * https://code.google.com/p/opencloudb/. * */ package io.mycat.backend.heartbeat; -import io.mycat.backend.MySQLDataSource; -import io.mycat.backend.PhysicalDBPool; -import io.mycat.backend.PhysicalDatasource; -import io.mycat.server.config.node.DataHostConfig; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; + +import io.mycat.backend.datasource.PhysicalDBPool; +import io.mycat.backend.datasource.PhysicalDatasource; +import io.mycat.backend.mysql.nio.MySQLDataSource; +import io.mycat.config.model.DataHostConfig; import io.mycat.sqlengine.OneRawSQLQueryResultHandler; import io.mycat.sqlengine.SQLJob; import io.mycat.sqlengine.SQLQueryResult; import io.mycat.sqlengine.SQLQueryResultListener; import io.mycat.util.TimeUtil; -import java.util.Map; -import java.util.concurrent.atomic.AtomicBoolean; - /** * @author mycat */ -public class MySQLDetector implements - SQLQueryResultListener>> { - private MySQLHeartbeat heartbeat; - private long heartbeatTimeout; - private final AtomicBoolean isQuit; - private volatile long lastSendQryTime; - private volatile long lasstReveivedQryTime; - private volatile SQLJob sqlJob; - private static final String[] MYSQL_SLAVE_STAUTS_COLMS = new String[] { - "Seconds_Behind_Master", "Slave_IO_Running", "Slave_SQL_Running" }; - - public MySQLDetector(MySQLHeartbeat heartbeat) { - this.heartbeat = heartbeat; - this.isQuit = new AtomicBoolean(false); - } - - public MySQLHeartbeat getHeartbeat() { - return heartbeat; - } - - public long getHeartbeatTimeout() { - return heartbeatTimeout; - } - - public void setHeartbeatTimeout(long heartbeatTimeout) { - this.heartbeatTimeout = heartbeatTimeout; - } - - public boolean isHeartbeatTimeout() { - return TimeUtil.currentTimeMillis() > Math.max(lastSendQryTime, - lasstReveivedQryTime) + heartbeatTimeout; - } - - public long getLastSendQryTime() { - return lastSendQryTime; - } - - public long getLasstReveivedQryTime() { - return lasstReveivedQryTime; - } - - public void heartbeat() { - lastSendQryTime = System.currentTimeMillis(); - MySQLDataSource ds = heartbeat.getSource(); - String databaseName = ds.getDbPool().getSchemas()[0]; - String[] fetchColms={}; - if (heartbeat.getSource().getHostConfig().isShowSlaveSql() ) { - fetchColms=MYSQL_SLAVE_STAUTS_COLMS; - } - OneRawSQLQueryResultHandler resultHandler = new OneRawSQLQueryResultHandler( - fetchColms, this); - sqlJob = new SQLJob(heartbeat.getHeartbeatSQL(), databaseName, - resultHandler, ds); - sqlJob.run(); - } - - public void quit() { - if (isQuit.compareAndSet(false, true)) { - close("heart beat quit"); - } - - } - - public boolean isQuit() { - return isQuit.get(); - } - - @Override - public void onResult(SQLQueryResult> result) { - if (result.isSuccess()) { - int balance = heartbeat.getSource().getDbPool().getBalance(); - PhysicalDatasource source = heartbeat.getSource(); +public class MySQLDetector implements SQLQueryResultListener>> { + + private MySQLHeartbeat heartbeat; + + private long heartbeatTimeout; + private final AtomicBoolean isQuit; + private volatile long lastSendQryTime; + private volatile long lasstReveivedQryTime; + private volatile SQLJob sqlJob; + + private static final String[] MYSQL_SLAVE_STAUTS_COLMS = new String[] { + "Seconds_Behind_Master", + "Slave_IO_Running", + "Slave_SQL_Running", + "Slave_IO_State", + "Master_Host", + "Master_User", + "Master_Port", + "Connect_Retry", + "Last_IO_Error"}; + + private static final String[] MYSQL_CLUSTER_STAUTS_COLMS = new String[] { + "Variable_name", + "Value"}; + + public MySQLDetector(MySQLHeartbeat heartbeat) { + this.heartbeat = heartbeat; + this.isQuit = new AtomicBoolean(false); + } + + public MySQLHeartbeat getHeartbeat() { + return heartbeat; + } + + public long getHeartbeatTimeout() { + return heartbeatTimeout; + } + + public void setHeartbeatTimeout(long heartbeatTimeout) { + this.heartbeatTimeout = heartbeatTimeout; + } + + public boolean isHeartbeatTimeout() { + return TimeUtil.currentTimeMillis() > Math.max(lastSendQryTime, + lasstReveivedQryTime) + heartbeatTimeout; + } + + public long getLastSendQryTime() { + return lastSendQryTime; + } + + public long getLasstReveivedQryTime() { + return lasstReveivedQryTime; + } + + public void heartbeat() { + lastSendQryTime = System.currentTimeMillis(); + MySQLDataSource ds = heartbeat.getSource(); + String databaseName = ds.getDbPool().getSchemas()[0]; + String[] fetchColms={}; + if (heartbeat.getSource().getHostConfig().isShowSlaveSql() ) { + fetchColms=MYSQL_SLAVE_STAUTS_COLMS; + } + if (heartbeat.getSource().getHostConfig().isShowClusterSql() ) { + fetchColms=MYSQL_CLUSTER_STAUTS_COLMS; + } + OneRawSQLQueryResultHandler resultHandler = new OneRawSQLQueryResultHandler( fetchColms, this); + sqlJob = new SQLJob(heartbeat.getHeartbeatSQL(), databaseName, resultHandler, ds); + sqlJob.run(); + } + + public void quit() { + if (isQuit.compareAndSet(false, true)) { + close("heart beat quit"); + } + + } + + public boolean isQuit() { + return isQuit.get(); + } + + @Override + public void onResult(SQLQueryResult> result) { + + if (result.isSuccess()) { + + int balance = heartbeat.getSource().getDbPool().getBalance(); + + PhysicalDatasource source = heartbeat.getSource(); + int switchType = source.getHostConfig().getSwitchType(); Map resultResult = result.getResult(); - if (source.getHostConfig().isShowSlaveSql() - &&(source.getHostConfig().getSwitchType() == DataHostConfig.SYN_STATUS_SWITCH_DS || - PhysicalDBPool.BALANCE_NONE!=balance ) - ) - { - - String Slave_IO_Running =resultResult!=null? resultResult.get( - "Slave_IO_Running"):null; - String Slave_SQL_Running = resultResult!=null?resultResult.get( - "Slave_SQL_Running"):null; - if (Slave_IO_Running != null - && Slave_IO_Running.equals(Slave_SQL_Running) - && Slave_SQL_Running.equals("Yes")) { - heartbeat.setDbSynStatus(DBHeartbeat.DB_SYN_NORMAL); - String Seconds_Behind_Master = resultResult.get( - "Seconds_Behind_Master"); - if (null != Seconds_Behind_Master - && !"".equals(Seconds_Behind_Master)) { - heartbeat.setSlaveBehindMaster(Integer - .valueOf(Seconds_Behind_Master)); - } - } else if(source.isSalveOrRead()) - { - MySQLHeartbeat.LOGGER - .warn("found MySQL master/slave Replication err !!! " - + heartbeat.getSource().getConfig()); - heartbeat.setDbSynStatus(DBHeartbeat.DB_SYN_ERROR); - } - - } - heartbeat.setResult(MySQLHeartbeat.OK_STATUS, this, null); - } else { - heartbeat.setResult(MySQLHeartbeat.ERROR_STATUS, this, null); - } - lasstReveivedQryTime = System.currentTimeMillis(); - } - - public void close(String msg) { - SQLJob curJob = sqlJob; - if (curJob != null && !curJob.isFinished()) { - curJob.teminate(msg); - sqlJob = null; - } - } - -} \ No newline at end of file + + if ( resultResult!=null&& !resultResult.isEmpty() &&switchType == DataHostConfig.SYN_STATUS_SWITCH_DS + && source.getHostConfig().isShowSlaveSql()) { + + String Slave_IO_Running = resultResult != null ? resultResult.get("Slave_IO_Running") : null; + String Slave_SQL_Running = resultResult != null ? resultResult.get("Slave_SQL_Running") : null; + + if (Slave_IO_Running != null + && Slave_IO_Running.equals(Slave_SQL_Running) + && Slave_SQL_Running.equals("Yes")) { + + heartbeat.setDbSynStatus(DBHeartbeat.DB_SYN_NORMAL); + String Seconds_Behind_Master = resultResult.get( "Seconds_Behind_Master"); + if (null != Seconds_Behind_Master && !"".equals(Seconds_Behind_Master)) { + + int Behind_Master = Integer.parseInt(Seconds_Behind_Master); + if ( Behind_Master > source.getHostConfig().getSlaveThreshold() ) { + MySQLHeartbeat.LOGGER.warn("found MySQL master/slave Replication delay !!! " + + heartbeat.getSource().getConfig() + ", binlog sync time delay: " + Behind_Master + "s" ); + } + heartbeat.setSlaveBehindMaster( Behind_Master ); + } + + } else if( source.isSalveOrRead() ) { + //String Last_IO_Error = resultResult != null ? resultResult.get("Last_IO_Error") : null; + MySQLHeartbeat.LOGGER.warn("found MySQL master/slave Replication err !!! " + + heartbeat.getSource().getConfig() + ", " + resultResult); + heartbeat.setDbSynStatus(DBHeartbeat.DB_SYN_ERROR); + } + + heartbeat.getAsynRecorder().set(resultResult, switchType); + heartbeat.setResult(MySQLHeartbeat.OK_STATUS, this, null); + + } else if ( resultResult!=null&& !resultResult.isEmpty() && switchType==DataHostConfig.CLUSTER_STATUS_SWITCH_DS + && source.getHostConfig().isShowClusterSql() ) { + + //String Variable_name = resultResult != null ? resultResult.get("Variable_name") : null; + String wsrep_cluster_status = resultResult != null ? resultResult.get("wsrep_cluster_status") : null;// Primary + String wsrep_connected = resultResult != null ? resultResult.get("wsrep_connected") : null;// ON + String wsrep_ready = resultResult != null ? resultResult.get("wsrep_ready") : null;// ON + + if ("ON".equals(wsrep_connected) + && "ON".equals(wsrep_ready) + && "Primary".equals(wsrep_cluster_status)) { + + heartbeat.setDbSynStatus(DBHeartbeat.DB_SYN_NORMAL); + heartbeat.setResult(MySQLHeartbeat.OK_STATUS, this, null); + + } else { + MySQLHeartbeat.LOGGER.warn("found MySQL cluster status err !!! " + + heartbeat.getSource().getConfig() + + " wsrep_cluster_status: "+ wsrep_cluster_status + + " wsrep_connected: "+ wsrep_connected + + " wsrep_ready: "+ wsrep_ready + ); + + heartbeat.setDbSynStatus(DBHeartbeat.DB_SYN_ERROR); + heartbeat.setResult(MySQLHeartbeat.ERROR_STATUS, this, null); + } + heartbeat.getAsynRecorder().set(resultResult, switchType); + + } else { + heartbeat.setResult(MySQLHeartbeat.OK_STATUS, this, null); + } + //监测数据库同步状态,在 switchType=-1或者1的情况下,也需要收集主从同步状态 + heartbeat.getAsynRecorder().set(resultResult, switchType); + + } else { + heartbeat.setResult(MySQLHeartbeat.ERROR_STATUS, this, null); + } + + lasstReveivedQryTime = System.currentTimeMillis(); + heartbeat.getRecorder().set((lasstReveivedQryTime - lastSendQryTime)); + } + + public void close(String msg) { + SQLJob curJob = sqlJob; + if (curJob != null && !curJob.isFinished()) { + curJob.teminate(msg); + sqlJob = null; + } + } +} diff --git a/src/main/java/io/mycat/backend/heartbeat/MySQLHeartbeat.java b/src/main/java/io/mycat/backend/heartbeat/MySQLHeartbeat.java index bb11de2d2..1bb66a674 100644 --- a/src/main/java/io/mycat/backend/heartbeat/MySQLHeartbeat.java +++ b/src/main/java/io/mycat/backend/heartbeat/MySQLHeartbeat.java @@ -23,25 +23,24 @@ */ package io.mycat.backend.heartbeat; -import io.mycat.backend.MySQLDataSource; -import io.mycat.backend.PhysicalDBPool; -import io.mycat.backend.PhysicalDatasource; -import io.mycat.server.config.node.DataHostConfig; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.text.SimpleDateFormat; import java.util.Date; import java.util.concurrent.locks.ReentrantLock; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.backend.datasource.PhysicalDBPool; +import io.mycat.backend.datasource.PhysicalDatasource; +import io.mycat.backend.mysql.nio.MySQLDataSource; +import io.mycat.config.model.DataHostConfig; + /** * @author mycat */ public class MySQLHeartbeat extends DBHeartbeat { private static final int MAX_RETRY_COUNT = 5; - public static final Logger LOGGER = LoggerFactory - .getLogger(MySQLHeartbeat.class); + public static final Logger LOGGER = LoggerFactory.getLogger(MySQLHeartbeat.class); private final MySQLDataSource source; @@ -55,7 +54,7 @@ public MySQLHeartbeat(MySQLDataSource source) { this.lock = new ReentrantLock(false); this.maxRetryCount = MAX_RETRY_COUNT; this.status = INIT_STATUS; - this.heartbeatSQL = source.getHostConfig().getHeartbeatSQL(); + this.heartbeatSQL = source.getHostConfig().getHearbeatSQL(); } public MySQLDataSource getSource() { @@ -171,9 +170,6 @@ public void setResult(int result, MySQLDetector detector, String msg) { } private void setOk(MySQLDetector detector) { - - recorder.set(detector.getLasstReveivedQryTime() - - detector.getLastSendQryTime()); switch (status) { case DBHeartbeat.TIMEOUT_STATUS: this.status = DBHeartbeat.INIT_STATUS; @@ -199,19 +195,17 @@ private void setError(MySQLDetector detector) { // should continues check error status if (++errorCount < maxRetryCount) { - if (detector != null && !detector.isQuit()) { - heartbeat(); // error count not enough, heart beat again - } - //return; - } else + if (detector != null && !detector.isQuit()) { + heartbeat(); // error count not enough, heart beat again + } + + }else { if (detector != null ) { detector.quit(); } - this.status = ERROR_STATUS; this.errorCount = 0; - } } @@ -243,8 +237,7 @@ private void switchSourceIfNeed(String reason) { synchronized (pool) { // try to see if need switch datasource curDatasourceHB = pool.getSource().getHeartbeat().getStatus(); - if (curDatasourceHB != DBHeartbeat.INIT_STATUS - && curDatasourceHB != DBHeartbeat.OK_STATUS) { + if (curDatasourceHB != DBHeartbeat.INIT_STATUS && curDatasourceHB != DBHeartbeat.OK_STATUS) { int curIndex = pool.getActivedIndex(); int nextId = pool.next(curIndex); PhysicalDatasource[] allWriteNodes = pool.getSources(); @@ -257,24 +250,20 @@ private void switchSourceIfNeed(String reason) { int theSourceHBStatus = theSourceHB.getStatus(); if (theSourceHBStatus == DBHeartbeat.OK_STATUS) { if (switchType == DataHostConfig.SYN_STATUS_SWITCH_DS) { - if (Integer.valueOf(0).equals( - theSourceHB.getSlaveBehindMaster())) { - LOGGER.info("try to switch datasource ,slave is synchronized to master " - + theSource.getConfig()); + if (Integer.valueOf(0).equals( theSourceHB.getSlaveBehindMaster())) { + LOGGER.info("try to switch datasource ,slave is synchronized to master " + theSource.getConfig()); pool.switchSource(nextId, true, reason); break; } else { LOGGER.warn("ignored datasource ,slave is not synchronized to master , slave behind master :" - + theSourceHB - .getSlaveBehindMaster() + + theSourceHB.getSlaveBehindMaster() + " " + theSource.getConfig()); } } else { // normal switch - LOGGER.info("try to switch datasource ,not checked slave synchronize status " - + theSource.getConfig()); + LOGGER.info("try to switch datasource ,not checked slave synchronize status " + theSource.getConfig()); pool.switchSource(nextId, true, reason); - break; + break; } } diff --git a/src/main/java/io/mycat/backend/jdbc/JDBCConnection.java b/src/main/java/io/mycat/backend/jdbc/JDBCConnection.java index d2972be7c..11ca234e4 100644 --- a/src/main/java/io/mycat/backend/jdbc/JDBCConnection.java +++ b/src/main/java/io/mycat/backend/jdbc/JDBCConnection.java @@ -1,575 +1,877 @@ -package io.mycat.backend.jdbc; - -import io.mycat.backend.BackendConnection; -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.route.RouteResultsetNode; -import io.mycat.server.ErrorCode; -import io.mycat.server.Isolations; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.executors.ConnectionHeartBeatHandler; -import io.mycat.server.executors.ResponseHandler; -import io.mycat.server.packet.*; -import io.mycat.server.parser.ServerParse; -import io.mycat.server.response.ShowVariables; -import io.mycat.util.ResultSetUtil; -import io.mycat.util.StringUtil; -import io.mycat.util.TimeUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.io.UnsupportedEncodingException; -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; - -public class JDBCConnection implements BackendConnection { - protected static final Logger LOGGER = LoggerFactory - .getLogger(JDBCConnection.class); - private JDBCDatasource pool; - private volatile String schema; - private volatile String dbType; - private volatile String oldSchema; - private byte packetId; - private int txIsolation; - private volatile boolean running = false; - private volatile boolean borrowed; - private long id = 0; - private String host; - private int port; - private Connection con; - private ResponseHandler respHandler; - private volatile Object attachement; - - boolean headerOutputed = false; - private volatile boolean modifiedSQLExecuted; - private final long startTime; - private long lastTime; - private boolean isSpark = false; - - - public JDBCConnection() { - startTime = System.currentTimeMillis(); - } - - public Connection getCon() { - return con; - } - - public void setCon(Connection con) { - this.con = con; - - } - - @Override - public void close(String reason) { - try { - con.close(); - - } catch (SQLException e) { - } - - } - - public void setId(long id) { - this.id = id; - } - - public JDBCDatasource getPool() { - return pool; - } - - public void setPool(JDBCDatasource pool) { - this.pool = pool; - } - - public void setHost(String host) { - this.host = host; - } - - public void setPort(int port) { - this.port = port; - } - - @Override - public boolean isClosed() { - try { - return con == null || con.isClosed(); - } catch (SQLException e) { - return true; - } - } - - @Override - public void idleCheck() { - if (TimeUtil.currentTimeMillis() > lastTime - + pool.getConfig().getIdleTimeout()) { - close(" idle check"); - } - } - - @Override - public long getStartupTime() { - return startTime; - } - - public String getHost() { - return this.host; - } - - public int getPort() { - return this.port; - } - - public int getLocalPort() { - return 0; - } - - public long getNetInBytes() { - - return 0; - } - - public long getNetOutBytes() { - return 0; - } - - @Override - public boolean isModifiedSQLExecuted() { - return modifiedSQLExecuted; - } - - @Override - public boolean isFromSlaveDB() { - return false; - } - - public String getDbType() { - return this.dbType; - } - - public void setDbType(String newDbType) { - this.dbType = newDbType.toUpperCase(); - this.isSpark = dbType.equals("SPARK"); - - } - - @Override - public String getSchema() { - return this.schema; - } - - @Override - public void setSchema(String newSchema) { - this.oldSchema = this.schema; - this.schema = newSchema; - - } - - @Override - public long getLastTime() { - - return lastTime; - } - - @Override - public boolean isClosedOrQuit() { - return this.isClosed(); - } - - @Override - public void setAttachment(Object attachment) { - this.attachement = attachment; - - } - - @Override - public void quit() { - this.close("client quit"); - - } - - @Override - public void setLastTime(long currentTimeMillis) { - this.lastTime = currentTimeMillis; - - } - - @Override - public void release() { - modifiedSQLExecuted = false; - setResponseHandler(null); - pool.releaseChannel(this); - } - - public void setRunning(boolean running) { - this.running = running; - - } - - @Override - public void setResponseHandler(ResponseHandler commandHandler) { - respHandler = commandHandler; - } - - @Override - public void commit() { - try { - con.commit(); - - this.respHandler.okResponse(OkPacket.OK, this); - } catch (SQLException e) { - throw new RuntimeException(e); - } - } - - private int convertNativeIsolationToJDBC(int nativeIsolation) { - if (nativeIsolation == Isolations.REPEATED_READ) { - return Connection.TRANSACTION_REPEATABLE_READ; - } else if (nativeIsolation == Isolations.SERIALIZABLE) { - return Connection.TRANSACTION_SERIALIZABLE; - } else { - return nativeIsolation; - } - } - - private void syncIsolation(int nativeIsolation) { - int jdbcIsolation = convertNativeIsolationToJDBC(nativeIsolation); - int srcJdbcIsolation = getTxIsolation(); - if (jdbcIsolation == srcJdbcIsolation) - return; - if ("oracle".equalsIgnoreCase(getDbType()) - && jdbcIsolation != Connection.TRANSACTION_READ_COMMITTED - && jdbcIsolation != Connection.TRANSACTION_SERIALIZABLE) { - // oracle 只支持2个级别 ,且只能更改一次隔离级别,否则会报 ORA-01453 - return; - } - try { - con.setTransactionIsolation(jdbcIsolation); - } catch (SQLException e) { - LOGGER.warn("set txisolation error:", e); - } - } - - private void executeSQL(RouteResultsetNode rrn, MySQLFrontConnection sc, - boolean autocommit) throws IOException { - String orgin = rrn.getStatement(); - // String sql = rrn.getStatement().toLowerCase(); - // LOGGER.info("JDBC SQL:"+orgin+"|"+sc.toString()); - if (!modifiedSQLExecuted && rrn.isModifySQL()) { - modifiedSQLExecuted = true; - } - - try { - - syncIsolation(sc.getTxIsolation()); - if (!this.schema.equals(this.oldSchema)) { - con.setCatalog(schema); - this.oldSchema = schema; - } - if (!this.isSpark) { - con.setAutoCommit(autocommit); - } - int sqlType = rrn.getSqlType(); - - if (sqlType == ServerParse.SELECT || sqlType == ServerParse.SHOW) { - if ((sqlType == ServerParse.SHOW) && (!dbType.equals("MYSQL"))) { - // showCMD(sc, orgin); - // ShowVariables.execute(sc, orgin); - ShowVariables.execute(sc); -// } else if ("SELECT CONNECTION_ID()".equalsIgnoreCase(orgin)) { -// // ShowVariables.justReturnValue(sc,String.valueOf(sc.getId())); -// ShowVariables.justReturnValue(sc, -// String.valueOf(sc.getId()), this); - } else - { - ouputResultSet(sc, orgin); - } - } else { - executeddl(sc, orgin); - } - - } catch (SQLException e) { - - String msg = e.getMessage(); - ErrorPacket error = new ErrorPacket(); - error.packetId = ++packetId; - error.errno = e.getErrorCode(); - error.message = msg.getBytes(); - this.respHandler.errorResponse(error.writeToBytes(), this); - } catch (Exception e) { - String msg = e.getMessage(); - ErrorPacket error = new ErrorPacket(); - error.packetId = ++packetId; - error.errno = ErrorCode.ER_UNKNOWN_ERROR; - error.message = msg.getBytes(); - this.respHandler.errorResponse(error.writeToBytes(), this); - } finally { - this.running = false; - } - - } - - - - private void executeddl(MySQLFrontConnection sc, String sql) - throws SQLException { - Statement stmt = null; - try { - stmt = con.createStatement(); - int count = stmt.executeUpdate(sql); - OkPacket okPck = new OkPacket(); - okPck.affectedRows = count; - okPck.insertId = 0; - okPck.packetId = ++packetId; - okPck.message = " OK!".getBytes(); - this.respHandler.okResponse(okPck.writeToBytes(), this); - } finally { - if (stmt != null) { - try { - stmt.close(); - } catch (SQLException e) { - - } - } - } - } - - private void ouputResultSet(MySQLFrontConnection sc, String sql) - throws SQLException { - ResultSet rs = null; - Statement stmt = null; - - try { - stmt = con.createStatement(); - rs = stmt.executeQuery(sql); - - List fieldPks = new LinkedList(); - ResultSetUtil.resultSetToFieldPacket(sc.getCharset(), fieldPks, rs, - this.isSpark); - int colunmCount = fieldPks.size(); - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); - ResultSetHeaderPacket headerPkg = new ResultSetHeaderPacket(); - headerPkg.fieldCount = fieldPks.size(); - headerPkg.packetId = ++packetId; - - headerPkg.write(bufferArray); - - byte[] header =bufferArray.writeToByteArrayAndRecycle(); - - List fields = new ArrayList(fieldPks.size()); - Iterator itor = fieldPks.iterator(); - while (itor.hasNext()) { - bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); - FieldPacket curField = itor.next(); - curField.packetId = ++packetId; - curField.write(bufferArray); - byte[] field = bufferArray.writeToByteArrayAndRecycle(); - fields.add(field); - itor.remove(); - } - - bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); - EOFPacket eofPckg = new EOFPacket(); - eofPckg.packetId = ++packetId; - eofPckg.write(bufferArray); - byte[] eof = bufferArray.writeToByteArrayAndRecycle(); - this.respHandler.fieldEofResponse(header, fields, eof, this); - - // output row - while (rs.next()) { - bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); - RowDataPacket curRow = new RowDataPacket(colunmCount); - for (int i = 0; i < colunmCount; i++) { - int j = i + 1; - curRow.add(StringUtil.encode(rs.getString(j), - sc.getCharset())); - } - curRow.packetId = ++packetId; - curRow.write(bufferArray); - byte[] row =bufferArray.writeToByteArrayAndRecycle(); - this.respHandler.rowResponse(row, this); - } - - // end row - bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); - eofPckg = new EOFPacket(); - eofPckg.packetId = ++packetId; - eofPckg.write(bufferArray); - eof = bufferArray.writeToByteArrayAndRecycle(); - this.respHandler.rowEofResponse(eof, this); - } finally { - if (rs != null) { - try { - rs.close(); - } catch (SQLException e) { - - } - } - if (stmt != null) { - try { - stmt.close(); - } catch (SQLException e) { - - } - } - } - } - - @Override - public void query(final String sql) throws UnsupportedEncodingException { - if (respHandler instanceof ConnectionHeartBeatHandler) { - justForHeartbeat(sql); - } else { - throw new UnsupportedEncodingException("unsupported yet "); - } - } - - private void justForHeartbeat(String sql) { - - Statement stmt = null; - - try { - stmt = con.createStatement(); - stmt.execute(sql); - if (!isAutocommit()) { // 如果在写库上,如果是事务方式的连接,需要进行手动commit - con.commit(); - } - this.respHandler.okResponse(OkPacket.OK, this); - - } catch (Exception e) { - String msg = e.getMessage(); - ErrorPacket error = new ErrorPacket(); - error.packetId = ++packetId; - error.errno = ErrorCode.ER_UNKNOWN_ERROR; - error.message = msg.getBytes(); - this.respHandler.errorResponse(error.writeToBytes(), this); - } finally { - if (stmt != null) { - try { - stmt.close(); - } catch (SQLException e) { - - } - } - } - } - - @Override - public Object getAttachment() { - return this.attachement; - } - - @Override - public String getCharset() { - return null; - } - - @Override - public void execute(final RouteResultsetNode node, - final MySQLFrontConnection source, final boolean autocommit) - throws IOException { - Runnable runnable = new Runnable() { - @Override - public void run() { - try { - executeSQL(node, source, autocommit); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - }; - - NetSystem.getInstance().getExecutor().execute(runnable); - } - - @Override - public boolean syncAndExcute() { - return true; - } - - @Override - public void rollback() { - try { - con.rollback(); - - this.respHandler.okResponse(OkPacket.OK, this); - } catch (SQLException e) { - throw new RuntimeException(e); - } - } - - public boolean isRunning() { - return this.running; - } - - @Override - public boolean isBorrowed() { - return this.borrowed; - } - - @Override - public void setBorrowed(boolean borrowed) { - this.borrowed = borrowed; - - } - - @Override - public int getTxIsolation() { - if (con != null) { - try { - return con.getTransactionIsolation(); - } catch (SQLException e) { - return 0; - } - } else { - return -1; - } - } - - @Override - public boolean isAutocommit() { - if (con == null) { - return true; - } else { - try { - return con.getAutoCommit(); - } catch (SQLException e) { - - } - } - return true; - } - - @Override - public long getId() { - return id; - } - - @Override - public String toString() { - return "JDBCConnection [id=" + id + ",autocommit=" - + this.isAutocommit() + ",pool=" + pool + ", schema=" + schema - + ", dbType=" + dbType + ", oldSchema=" + oldSchema - + ", packetId=" + packetId + ", txIsolation=" + txIsolation - + ", running=" + running + ", borrowed=" + borrowed + ", host=" - + host + ", port=" + port + ", con=" + con + ", respHandler=" - + respHandler + ", attachement=" + attachement - + ", headerOutputed=" + headerOutputed - + ", modifiedSQLExecuted=" + modifiedSQLExecuted - + ", startTime=" + startTime + ", lastTime=" + lastTime - + ", isSpark=" + isSpark+"]"; - } - -} +package io.mycat.backend.jdbc; + +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.math.BigDecimal; +import java.nio.ByteBuffer; +import java.sql.*; +import java.util.*; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.route.Procedure; +import io.mycat.route.ProcedureParameter; +import io.mycat.util.*; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.MycatServer; +import io.mycat.backend.BackendConnection; +import io.mycat.backend.mysql.nio.handler.ConnectionHeartBeatHandler; +import io.mycat.backend.mysql.nio.handler.ResponseHandler; +import io.mycat.config.ErrorCode; +import io.mycat.config.Isolations; +import io.mycat.net.NIOProcessor; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.ErrorPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.OkPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.route.RouteResultsetNode; +import io.mycat.server.ServerConnection; +import io.mycat.server.parser.ServerParse; + +public class JDBCConnection implements BackendConnection { + protected static final Logger LOGGER = LoggerFactory + .getLogger(JDBCConnection.class); + private JDBCDatasource pool; + private volatile String schema; + private volatile String dbType; + private volatile String oldSchema; + private byte packetId; + private int txIsolation; + private volatile boolean running = false; + private volatile boolean borrowed; + private long id = 0; + private String host; + private int port; + private Connection con; + private ResponseHandler respHandler; + private volatile Object attachement; + + boolean headerOutputed = false; + private volatile boolean modifiedSQLExecuted; + private final long startTime; + private long lastTime; + private boolean isSpark = false; + + private NIOProcessor processor; + + + + public NIOProcessor getProcessor() { + return processor; + } + + public void setProcessor(NIOProcessor processor) { + this.processor = processor; + } + + public JDBCConnection() { + startTime = System.currentTimeMillis(); + } + + public Connection getCon() { + return con; + } + + public void setCon(Connection con) { + this.con = con; + + } + + @Override + public void close(String reason) { + try { + con.close(); + if(processor!=null){ + processor.removeConnection(this); + } + + } catch (SQLException e) { + } + + } + + public void setId(long id) { + this.id = id; + } + + public JDBCDatasource getPool() { + return pool; + } + + public void setPool(JDBCDatasource pool) { + this.pool = pool; + } + + public void setHost(String host) { + this.host = host; + } + + public void setPort(int port) { + this.port = port; + } + + @Override + public boolean isClosed() { + try { + return con == null || con.isClosed(); + } catch (SQLException e) { + return true; + } + } + + @Override + public void idleCheck() { + if(TimeUtil.currentTimeMillis() > lastTime + pool.getConfig().getIdleTimeout()){ + close(" idle check"); + } + } + + @Override + public long getStartupTime() { + return startTime; + } + + @Override + public String getHost() { + return this.host; + } + + @Override + public int getPort() { + return this.port; + } + + @Override + public int getLocalPort() { + return 0; + } + + @Override + public long getNetInBytes() { + + return 0; + } + + @Override + public long getNetOutBytes() { + return 0; + } + + @Override + public boolean isModifiedSQLExecuted() { + return modifiedSQLExecuted; + } + + @Override + public boolean isFromSlaveDB() { + return false; + } + + public String getDbType() { + return this.dbType; + } + + public void setDbType(String newDbType) { + this.dbType = newDbType.toUpperCase(); + this.isSpark = dbType.equals("SPARK"); + + } + + @Override + public String getSchema() { + return this.schema; + } + + @Override + public void setSchema(String newSchema) { + this.oldSchema = this.schema; + this.schema = newSchema; + + } + + @Override + public long getLastTime() { + + return lastTime; + } + + @Override + public boolean isClosedOrQuit() { + return this.isClosed(); + } + + @Override + public void setAttachment(Object attachment) { + this.attachement = attachment; + + } + + @Override + public void quit() { + this.close("client quit"); + + } + + @Override + public void setLastTime(long currentTimeMillis) { + this.lastTime = currentTimeMillis; + + } + + @Override + public void release() { + modifiedSQLExecuted = false; + setResponseHandler(null); + pool.releaseChannel(this); + } + + public void setRunning(boolean running) { + this.running = running; + + } + + @Override + public boolean setResponseHandler(ResponseHandler commandHandler) { + respHandler = commandHandler; + return false; + } + + @Override + public void commit() { + try { + con.commit(); + + this.respHandler.okResponse(OkPacket.OK, this); + } catch (SQLException e) { + throw new RuntimeException(e); + } + } + private int convertNativeIsolationToJDBC(int nativeIsolation) + { + if(nativeIsolation== Isolations.REPEATED_READ) + { + return Connection.TRANSACTION_REPEATABLE_READ; + }else + if(nativeIsolation== Isolations.SERIALIZABLE) + { + return Connection.TRANSACTION_SERIALIZABLE; + } else + { + return nativeIsolation; + } + } + + + + private void syncIsolation(int nativeIsolation) + { + int jdbcIsolation=convertNativeIsolationToJDBC(nativeIsolation); + int srcJdbcIsolation= getTxIsolation(); + if (jdbcIsolation == srcJdbcIsolation || "oracle".equalsIgnoreCase(getDbType()) + && jdbcIsolation != Connection.TRANSACTION_READ_COMMITTED + && jdbcIsolation != Connection.TRANSACTION_SERIALIZABLE) { + return; + } + try + { + con.setTransactionIsolation(jdbcIsolation); + } catch (SQLException e) + { + LOGGER.warn("set txisolation error:",e); + } + } + private void executeSQL(RouteResultsetNode rrn, ServerConnection sc, + boolean autocommit) throws IOException { + String orgin = rrn.getStatement(); + // String sql = rrn.getStatement().toLowerCase(); + // LOGGER.info("JDBC SQL:"+orgin+"|"+sc.toString()); + if (!modifiedSQLExecuted && rrn.isModifySQL()) { + modifiedSQLExecuted = true; + } + + try { + syncIsolation(sc.getTxIsolation()) ; + if (!this.schema.equals(this.oldSchema)) { + con.setCatalog(schema); + this.oldSchema = schema; + } + if (!this.isSpark) { + con.setAutoCommit(autocommit); + } + int sqlType = rrn.getSqlType(); + if(rrn.isCallStatement()&&"oracle".equalsIgnoreCase(getDbType())) + { + //存储过程暂时只支持oracle + ouputCallStatement(rrn,sc,orgin); + } else + if (sqlType == ServerParse.SELECT || sqlType == ServerParse.SHOW) { + if ((sqlType == ServerParse.SHOW) && (!dbType.equals("MYSQL"))) { + // showCMD(sc, orgin); + //ShowVariables.execute(sc, orgin); + ShowVariables.execute(sc, orgin,this); + } else if ("SELECT CONNECTION_ID()".equalsIgnoreCase(orgin)) { + //ShowVariables.justReturnValue(sc,String.valueOf(sc.getId())); + ShowVariables.justReturnValue(sc,String.valueOf(sc.getId()),this); + } else { + ouputResultSet(sc, orgin); + } + } else { + executeddl(sc, orgin); + } + + } catch (SQLException e) { + + String msg = e.getMessage(); + ErrorPacket error = new ErrorPacket(); + error.packetId = ++packetId; + error.errno = e.getErrorCode(); + error.message = msg.getBytes(); + this.respHandler.errorResponse(error.writeToBytes(sc), this); + } + catch (Exception e) { + String msg = e.getMessage(); + ErrorPacket error = new ErrorPacket(); + error.packetId = ++packetId; + error.errno = ErrorCode.ER_UNKNOWN_ERROR; + error.message = ((msg == null) ? e.toString().getBytes() : msg.getBytes()); + String err = null; + if(error.message!=null){ + err = new String(error.message); + } + LOGGER.error("sql execute error, "+ err , e); + this.respHandler.errorResponse(error.writeToBytes(sc), this); + } + finally { + this.running = false; + } + + } + + private FieldPacket getNewFieldPacket(String charset, String fieldName) { + FieldPacket fieldPacket = new FieldPacket(); + fieldPacket.orgName = StringUtil.encode(fieldName, charset); + fieldPacket.name = StringUtil.encode(fieldName, charset); + fieldPacket.length = 20; + fieldPacket.flags = 0; + fieldPacket.decimals = 0; + int javaType = 12; + fieldPacket.type = (byte) (MysqlDefs.javaTypeMysql(javaType) & 0xff); + return fieldPacket; + } + + private void executeddl(ServerConnection sc, String sql) + throws SQLException { + Statement stmt = null; + try { + stmt = con.createStatement(); + int count = stmt.executeUpdate(sql); + OkPacket okPck = new OkPacket(); + okPck.affectedRows = count; + okPck.insertId = 0; + okPck.packetId = ++packetId; + okPck.message = " OK!".getBytes(); + this.respHandler.okResponse(okPck.writeToBytes(sc), this); + } finally { + if (stmt != null) { + try { + stmt.close(); + } catch (SQLException e) { + + } + } + } + } + + + private static int oracleCURSORTypeValue=-10; + static + { + Object cursor = ObjectUtil.getStaticFieldValue("oracle.jdbc.OracleTypes", "CURSOR"); + if(cursor!=null) { + oracleCURSORTypeValue = (int) cursor; + } + } + private void ouputCallStatement(RouteResultsetNode rrn,ServerConnection sc, String sql) + throws SQLException { + + CallableStatement stmt = null; + ResultSet rs = null; + try { + Procedure procedure = rrn.getProcedure(); + Collection paramters= procedure.getParamterMap().values(); + String callSql = procedure.toPreCallSql(null); + stmt = con.prepareCall(callSql); + + for (ProcedureParameter paramter : paramters) + { + if((ProcedureParameter.IN.equalsIgnoreCase(paramter.getParameterType()) + ||ProcedureParameter.INOUT.equalsIgnoreCase(paramter.getParameterType()))) + { + Object value= paramter.getValue()!=null ?paramter.getValue():paramter.getName(); + stmt.setObject(paramter.getIndex(),value); + } + + if(ProcedureParameter.OUT.equalsIgnoreCase(paramter.getParameterType()) + ||ProcedureParameter.INOUT.equalsIgnoreCase(paramter.getParameterType()) ) + { + int jdbcType ="oracle".equalsIgnoreCase(getDbType())&& procedure.getListFields().contains(paramter.getName())?oracleCURSORTypeValue: paramter.getJdbcType(); + stmt.registerOutParameter(paramter.getIndex(), jdbcType); + } + } + + boolean hadResults= stmt.execute(); + + ByteBuffer byteBuf = sc.allocate(); + if(procedure.getSelectColumns().size()>0) + { + List fieldPks = new LinkedList(); + for (ProcedureParameter paramter : paramters) + { + if (!procedure.getListFields().contains(paramter.getName())&&(ProcedureParameter.OUT.equalsIgnoreCase(paramter.getParameterType()) + || ProcedureParameter.INOUT.equalsIgnoreCase(paramter.getParameterType())) ) + { + FieldPacket packet = PacketUtil.getField(paramter.getName(), MysqlDefs.javaTypeMysql(paramter.getJdbcType())); + fieldPks.add(packet); + } + } + int colunmCount = fieldPks.size(); + + ResultSetHeaderPacket headerPkg = new ResultSetHeaderPacket(); + headerPkg.fieldCount = fieldPks.size(); + headerPkg.packetId = ++packetId; + + byteBuf = headerPkg.write(byteBuf, sc, true); + byteBuf.flip(); + byte[] header = new byte[byteBuf.limit()]; + byteBuf.get(header); + byteBuf.clear(); + + + List fields = new ArrayList(fieldPks.size()); + Iterator itor = fieldPks.iterator(); + while (itor.hasNext()) { + FieldPacket curField = itor.next(); + curField.packetId = ++packetId; + byteBuf = curField.write(byteBuf, sc, false); + byteBuf.flip(); + byte[] field = new byte[byteBuf.limit()]; + byteBuf.get(field); + byteBuf.clear(); + fields.add(field); + itor.remove(); + } + EOFPacket eofPckg = new EOFPacket(); + eofPckg.packetId = ++packetId; + byteBuf = eofPckg.write(byteBuf, sc, false); + byteBuf.flip(); + byte[] eof = new byte[byteBuf.limit()]; + byteBuf.get(eof); + byteBuf.clear(); + this.respHandler.fieldEofResponse(header, fields, eof, this); + RowDataPacket curRow = new RowDataPacket(colunmCount); + for (String name : procedure.getSelectColumns()) + { + ProcedureParameter procedureParameter= procedure.getParamterMap().get(name); + curRow.add(StringUtil.encode(String.valueOf(stmt.getObject(procedureParameter.getIndex())), + sc.getCharset())); + } + + curRow.packetId = ++packetId; + byteBuf = curRow.write(byteBuf, sc, false); + byteBuf.flip(); + byte[] row = new byte[byteBuf.limit()]; + byteBuf.get(row); + byteBuf.clear(); + this.respHandler.rowResponse(row, this); + + eofPckg = new EOFPacket(); + eofPckg.packetId = ++packetId; + if(procedure.isResultList()) + { + eofPckg.status = 42; + } + byteBuf = eofPckg.write(byteBuf, sc, false); + byteBuf.flip(); + eof = new byte[byteBuf.limit()]; + byteBuf.get(eof); + byteBuf.clear(); + this.respHandler.rowEofResponse(eof, this); + } + + + if(procedure.isResultList()) + { + List fieldPks = new LinkedList(); + int listSize=procedure.getListFields().size(); + for (ProcedureParameter paramter : paramters) + { + if (procedure.getListFields().contains(paramter.getName())&&(ProcedureParameter.OUT.equalsIgnoreCase(paramter.getParameterType()) + || ProcedureParameter.INOUT.equalsIgnoreCase(paramter.getParameterType())) ) + { + listSize--; + + Object object = stmt.getObject(paramter.getIndex()); + rs= (ResultSet) object; + if(rs==null) { + continue; + } + ResultSetUtil.resultSetToFieldPacket(sc.getCharset(), fieldPks, rs, + this.isSpark); + + int colunmCount = fieldPks.size(); + ResultSetHeaderPacket headerPkg = new ResultSetHeaderPacket(); + headerPkg.fieldCount = fieldPks.size(); + headerPkg.packetId = ++packetId; + + byteBuf = headerPkg.write(byteBuf, sc, true); + byteBuf.flip(); + byte[] header = new byte[byteBuf.limit()]; + byteBuf.get(header); + byteBuf.clear(); + + + List fields = new ArrayList(fieldPks.size()); + Iterator itor = fieldPks.iterator(); + while (itor.hasNext()) { + FieldPacket curField = itor.next(); + curField.packetId = ++packetId; + byteBuf = curField.write(byteBuf, sc, false); + byteBuf.flip(); + byte[] field = new byte[byteBuf.limit()]; + byteBuf.get(field); + byteBuf.clear(); + fields.add(field); + itor.remove(); + } + EOFPacket eofPckg = new EOFPacket(); + eofPckg.packetId = ++packetId; + byteBuf = eofPckg.write(byteBuf, sc, false); + byteBuf.flip(); + byte[] eof = new byte[byteBuf.limit()]; + byteBuf.get(eof); + byteBuf.clear(); + this.respHandler.fieldEofResponse(header, fields, eof, this); + + // output row + while (rs.next()) { + RowDataPacket curRow = new RowDataPacket(colunmCount); + for (int i = 0; i < colunmCount; i++) { + int j = i + 1; + curRow.add(StringUtil.encode(rs.getString(j), + sc.getCharset())); + } + curRow.packetId = ++packetId; + byteBuf = curRow.write(byteBuf, sc, false); + byteBuf.flip(); + byte[] row = new byte[byteBuf.limit()]; + byteBuf.get(row); + byteBuf.clear(); + this.respHandler.rowResponse(row, this); + } + eofPckg = new EOFPacket(); + eofPckg.packetId = ++packetId; + if(listSize!=0) + { + eofPckg.status = 42; + } + byteBuf = eofPckg.write(byteBuf, sc, false); + byteBuf.flip(); + eof = new byte[byteBuf.limit()]; + byteBuf.get(eof); + byteBuf.clear(); + this.respHandler.rowEofResponse(eof, this); + } + } + + } + + + + if(!procedure.isResultSimpleValue()) + { + byte[] OK = new byte[] { 7, 0, 0, 1, 0, 0, 0, 2, 0, 0, + 0 }; + OK[3]=++packetId; + this.respHandler.okResponse(OK,this); + } + sc.recycle(byteBuf); + } finally { + if (rs != null) { + try { + rs.close(); + } catch (SQLException e) { + + } + } + if (stmt != null) { + try { + stmt.close(); + } catch (SQLException e) { + + } + } + } + } + + + private void ouputResultSet(ServerConnection sc, String sql) + throws SQLException { + ResultSet rs = null; + Statement stmt = null; + + try { + stmt = con.createStatement(); + rs = stmt.executeQuery(sql); + + List fieldPks = new LinkedList(); + ResultSetUtil.resultSetToFieldPacket(sc.getCharset(), fieldPks, rs, + this.isSpark); + int colunmCount = fieldPks.size(); + ByteBuffer byteBuf = sc.allocate(); + ResultSetHeaderPacket headerPkg = new ResultSetHeaderPacket(); + headerPkg.fieldCount = fieldPks.size(); + headerPkg.packetId = ++packetId; + + byteBuf = headerPkg.write(byteBuf, sc, true); + byteBuf.flip(); + byte[] header = new byte[byteBuf.limit()]; + byteBuf.get(header); + byteBuf.clear(); + List fields = new ArrayList(fieldPks.size()); + Iterator itor = fieldPks.iterator(); + while (itor.hasNext()) { + FieldPacket curField = itor.next(); + curField.packetId = ++packetId; + byteBuf = curField.write(byteBuf, sc, false); + byteBuf.flip(); + byte[] field = new byte[byteBuf.limit()]; + byteBuf.get(field); + byteBuf.clear(); + fields.add(field); + } + EOFPacket eofPckg = new EOFPacket(); + eofPckg.packetId = ++packetId; + byteBuf = eofPckg.write(byteBuf, sc, false); + byteBuf.flip(); + byte[] eof = new byte[byteBuf.limit()]; + byteBuf.get(eof); + byteBuf.clear(); + this.respHandler.fieldEofResponse(header, fields, eof, this); + + // output row + while (rs.next()) { + RowDataPacket curRow = new RowDataPacket(colunmCount); + for (int i = 0; i < colunmCount; i++) { + int j = i + 1; + if(MysqlDefs.isBianry((byte) fieldPks.get(i).type)) { + curRow.add(rs.getBytes(j)); + } else if(fieldPks.get(i).type == MysqlDefs.FIELD_TYPE_DECIMAL || + fieldPks.get(i).type == (MysqlDefs.FIELD_TYPE_NEW_DECIMAL - 256)) { // field type is unsigned byte + // ensure that do not use scientific notation format + BigDecimal val = rs.getBigDecimal(j); + curRow.add(StringUtil.encode(val != null ? val.toPlainString() : null, + sc.getCharset())); + } else { + curRow.add(StringUtil.encode(rs.getString(j), + sc.getCharset())); + } + + } + curRow.packetId = ++packetId; + byteBuf = curRow.write(byteBuf, sc, false); + byteBuf.flip(); + byte[] row = new byte[byteBuf.limit()]; + byteBuf.get(row); + byteBuf.clear(); + this.respHandler.rowResponse(row, this); + } + + fieldPks.clear(); + + // end row + eofPckg = new EOFPacket(); + eofPckg.packetId = ++packetId; + byteBuf = eofPckg.write(byteBuf, sc, false); + byteBuf.flip(); + eof = new byte[byteBuf.limit()]; + byteBuf.get(eof); + sc.recycle(byteBuf); + this.respHandler.rowEofResponse(eof, this); + } finally { + if (rs != null) { + try { + rs.close(); + } catch (SQLException e) { + + } + } + if (stmt != null) { + try { + stmt.close(); + } catch (SQLException e) { + + } + } + } + } + + @Override + public void query(final String sql) throws UnsupportedEncodingException { + if(respHandler instanceof ConnectionHeartBeatHandler) + { + justForHeartbeat(sql); + } else + { + throw new UnsupportedEncodingException("unsupported yet "); + } + } + private void justForHeartbeat(String sql) + { + + Statement stmt = null; + + try { + stmt = con.createStatement(); + stmt.execute(sql); + if(!isAutocommit()){ //如果在写库上,如果是事务方式的连接,需要进行手动commit + con.commit(); + } + this.respHandler.okResponse(OkPacket.OK, this); + + } + catch (Exception e) + { + String msg = e.getMessage(); + ErrorPacket error = new ErrorPacket(); + error.packetId = ++packetId; + error.errno = ErrorCode.ER_UNKNOWN_ERROR; + error.message = msg.getBytes(); + this.respHandler.errorResponse(error.writeToBytes(), this); + } + finally { + if (stmt != null) { + try { + stmt.close(); + } catch (SQLException e) { + + } + } + } + } + @Override + public Object getAttachment() { + return this.attachement; + } + + @Override + public String getCharset() { + return null; + } + + @Override + public void execute(final RouteResultsetNode node, + final ServerConnection source, final boolean autocommit) + throws IOException { + Runnable runnable = new Runnable() { + @Override + public void run() { + try { + executeSQL(node, source, autocommit); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + }; + + MycatServer.getInstance().getBusinessExecutor().execute(runnable); + } + + @Override + public void recordSql(String host, String schema, String statement) { + + } + + @Override + public boolean syncAndExcute() { + return true; + } + + @Override + public void rollback() { + try { + con.rollback(); + + this.respHandler.okResponse(OkPacket.OK, this); + } catch (SQLException e) { + throw new RuntimeException(e); + } + } + + public boolean isRunning() { + return this.running; + } + + @Override + public boolean isBorrowed() { + return this.borrowed; + } + + @Override + public void setBorrowed(boolean borrowed) { + this.borrowed = borrowed; + + } + + @Override + public int getTxIsolation() { + if (con != null) { + try { + return con.getTransactionIsolation(); + } catch (SQLException e) { + return 0; + } + } else { + return -1; + } + } + + @Override + public boolean isAutocommit() { + if (con == null) { + return true; + } else { + try { + return con.getAutoCommit(); + } catch (SQLException e) { + + } + } + return true; + } + + @Override + public long getId() { + return id; + } + + @Override + public String toString() { + return "JDBCConnection [id=" + id +",autocommit="+this.isAutocommit()+",pool=" + pool + ", schema=" + schema + ", dbType=" + dbType + ", oldSchema=" + + oldSchema + ", packetId=" + packetId + ", txIsolation=" + txIsolation + ", running=" + running + + ", borrowed=" + borrowed + ", host=" + host + ", port=" + port + ", con=" + con + + ", respHandler=" + respHandler + ", attachement=" + attachement + ", headerOutputed=" + + headerOutputed + ", modifiedSQLExecuted=" + modifiedSQLExecuted + ", startTime=" + startTime + + ", lastTime=" + lastTime + ", isSpark=" + isSpark + ", processor=" + processor + "]"; + } + + @Override + public void discardClose(String reason) { + // TODO Auto-generated method stub + + } + + + +} diff --git a/src/main/java/io/mycat/backend/jdbc/JDBCDatasource.java b/src/main/java/io/mycat/backend/jdbc/JDBCDatasource.java index 4a8ccd91b..597c50ec3 100644 --- a/src/main/java/io/mycat/backend/jdbc/JDBCDatasource.java +++ b/src/main/java/io/mycat/backend/jdbc/JDBCDatasource.java @@ -1,54 +1,48 @@ package io.mycat.backend.jdbc; -import io.mycat.backend.PhysicalDatasource; -import io.mycat.backend.heartbeat.DBHeartbeat; -import io.mycat.net.ConnectIdGenerator; -import io.mycat.server.config.loader.LocalLoader; -import io.mycat.server.config.node.DBHostConfig; -import io.mycat.server.config.node.DataHostConfig; -import io.mycat.server.config.node.JdbcDriver; -import io.mycat.server.executors.ResponseHandler; - import java.io.IOException; import java.sql.Connection; -import java.sql.Driver; import java.sql.DriverManager; import java.sql.SQLException; import java.sql.Statement; -import java.util.Enumeration; -import java.util.Map; +import java.util.List; -import org.apache.commons.lang.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import com.google.common.collect.Lists; + +import io.mycat.MycatServer; +import io.mycat.backend.datasource.PhysicalDatasource; +import io.mycat.backend.heartbeat.DBHeartbeat; +import io.mycat.backend.mysql.nio.handler.ResponseHandler; +import io.mycat.config.model.DBHostConfig; +import io.mycat.config.model.DataHostConfig; +import io.mycat.net.NIOConnector; +import io.mycat.net.NIOProcessor; public class JDBCDatasource extends PhysicalDatasource { - public static final Logger logger = LoggerFactory.getLogger(JDBCDatasource.class); - private static Map jdbcDriverConfig = null; - static { // 最多也就3,4个数据库,一次性加载驱动类 - jdbcDriverConfig = LocalLoader.loadJdbcDriverConfig(); - if(jdbcDriverConfig != null && jdbcDriverConfig.size() > 0){ - for(String key : jdbcDriverConfig.keySet()){ - JdbcDriver driver = jdbcDriverConfig.get(key); - if(driver != null && StringUtils.isNotBlank(driver.getClassName())){ - try { - Class.forName(driver.getClassName()); - } catch (ClassNotFoundException e) { - logger.error("Class.forName load jdbcDriver for "+key+" error: " + e.getMessage()); - } - }else{ - logger.error(" driver for " + key + " is not exist or className has no value," - + " please check jdbcDriver-config element in mycat.xml."); - } + static { + // 加载可能的驱动 + List drivers = Lists.newArrayList( + "com.mysql.jdbc.Driver", + "io.mycat.backend.jdbc.mongodb.MongoDriver", + "io.mycat.backend.jdbc.sequoiadb.SequoiaDriver", + "oracle.jdbc.OracleDriver", + "com.microsoft.sqlserver.jdbc.SQLServerDriver", + "net.sourceforge.jtds.jdbc.Driver", + "org.apache.hive.jdbc.HiveDriver", + "com.ibm.db2.jcc.DB2Driver", + "org.postgresql.Driver"); + + for (String driver : drivers) { + try { + Class.forName(driver); + } catch (ClassNotFoundException ignored) { } } } - - public JDBCDatasource(DBHostConfig config, DataHostConfig hostConfig, - boolean isReadNode) { + + public JDBCDatasource(DBHostConfig config, DataHostConfig hostConfig, boolean isReadNode) { super(config, hostConfig, isReadNode); - } @Override @@ -57,69 +51,75 @@ public DBHeartbeat createHeartBeat() { } @Override - public void createNewConnection(ResponseHandler handler, String schema) - throws IOException { + public void createNewConnection(ResponseHandler handler,String schema) throws IOException { DBHostConfig cfg = getConfig(); + JDBCConnection c = new JDBCConnection(); + c.setHost(cfg.getIp()); + c.setPort(cfg.getPort()); + c.setPool(this); + c.setSchema(schema); + c.setDbType(cfg.getDbType()); - JDBCConnection c = null; + NIOProcessor processor = (NIOProcessor) MycatServer.getInstance().nextProcessor(); + c.setProcessor(processor); + c.setId(NIOConnector.ID_GENERATOR.getId()); //复用mysql的Backend的ID,需要在process中存储 + + processor.addBackend(c); try { - // TODO: 这里需要实现连继池 Connection con = getConnection(); - c = new JDBCConnection(); - c.setHost(cfg.getIp()); - c.setPort(cfg.getPort()); - c.setPool(this); - c.setSchema(schema); - c.setDbType(cfg.getDbType()); - c.setId(ConnectIdGenerator.getINSTNCE().getId()); // 复用mysql的Backend的ID,需要在process中存储 - // c.setIdleTimeout(pool.getConfig().getIdleTimeout()); c.setCon(con); // notify handler handler.connectionAcquired(c); - } catch (Exception e) { handler.connectionError(e, c); } + } + + @Override + public boolean testConnection(String schema) throws IOException { + boolean isConnected = false; + + Connection connection = null; + Statement statement = null; + try { + DBHostConfig cfg = getConfig(); + connection = DriverManager.getConnection(cfg.getUrl(), cfg.getUser(), cfg.getPassword()); + statement = connection.createStatement(); + if (connection != null && statement != null) { + isConnected = true; + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + if (statement != null) { + try { statement.close(); } catch (SQLException e) {} + } + + if (connection != null) { + try { connection.close(); } catch (SQLException e) {} + } + } + return isConnected; } - Connection getConnection() throws SQLException { - DBHostConfig cfg = getConfig(); - Enumeration drivers = DriverManager.getDrivers(); - Driver d = drivers.nextElement(); - d.getClass().getName(); - Connection connection = DriverManager.getConnection(cfg.getUrl(), - cfg.getUser(), cfg.getPassword()); - String initSql = getHostConfig().getConnectionInitSql(); - if (StringUtils.isNotBlank(initSql)) { - try (Statement statement = connection.createStatement()){ + Connection getConnection() throws SQLException { + DBHostConfig cfg = getConfig(); + Connection connection = DriverManager.getConnection(cfg.getUrl(), cfg.getUser(), cfg.getPassword()); + String initSql=getHostConfig().getConnectionInitSql(); + if (initSql != null && !"".equals(initSql)) { + Statement statement = null; + try { + statement = connection.createStatement(); statement.execute(initSql); - } catch(SQLException e) { - logger.warn(" getConnection error: " + e.getMessage()); + } finally { + if (statement != null) { + statement.close(); + } } } return connection; - } - - /** - * 根据 dbType 获取 JdbcDriver - * @param dbType mysql - * @return JdbcDriver: {'mysql':'com.mysql.jdbc.Driver'} - */ - public static JdbcDriver getJdbcDriverBydbType(String dbType){ - if(StringUtils.isNotBlank(dbType)){ - return jdbcDriverConfig.get(dbType.toLowerCase()); // 获取对应 dbType 的 JdbcDriver - } - return null; - } - - public static Map getJdbcDriverConfig() { - return jdbcDriverConfig; - } - - public static void setJdbcDriverConfig(Map jdbcDriverConfig) { - JDBCDatasource.jdbcDriverConfig = jdbcDriverConfig; - } - + } + } diff --git a/src/main/java/io/mycat/backend/jdbc/JDBCHeartbeat.java b/src/main/java/io/mycat/backend/jdbc/JDBCHeartbeat.java index 4194c983f..85b2b2524 100644 --- a/src/main/java/io/mycat/backend/jdbc/JDBCHeartbeat.java +++ b/src/main/java/io/mycat/backend/jdbc/JDBCHeartbeat.java @@ -1,33 +1,33 @@ package io.mycat.backend.jdbc; -import io.mycat.backend.HeartbeatRecorder; -import io.mycat.backend.heartbeat.DBHeartbeat; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.sql.Connection; +import java.sql.SQLException; import java.sql.Statement; import java.text.SimpleDateFormat; import java.util.Date; import java.util.concurrent.locks.ReentrantLock; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.backend.heartbeat.DBHeartbeat; +import io.mycat.statistic.HeartbeatRecorder; + public class JDBCHeartbeat extends DBHeartbeat{ private final ReentrantLock lock; private final JDBCDatasource source; private final boolean heartbeatnull; private Long lastSendTime = System.currentTimeMillis(); private Long lastReciveTime = System.currentTimeMillis(); - - - private static final Logger logger = LoggerFactory - .getLogger(JDBCHeartbeat.class); + + + private Logger logger = LoggerFactory.getLogger(this.getClass()); public JDBCHeartbeat(JDBCDatasource source) { this.source = source; lock = new ReentrantLock(false); this.status = INIT_STATUS; - this.heartbeatSQL = source.getHostConfig().getHeartbeatSQL().trim(); + this.heartbeatSQL = source.getHostConfig().getHearbeatSQL().trim(); this.heartbeatnull= heartbeatSQL.length()==0; } @@ -88,8 +88,9 @@ public HeartbeatRecorder getRecorder() { public void heartbeat() { - if (isStop.get()) + if (isStop.get()) { return; + } lastSendTime = System.currentTimeMillis(); lock.lock(); try diff --git a/src/main/java/io/mycat/backend/jdbc/ShowVariables.java b/src/main/java/io/mycat/backend/jdbc/ShowVariables.java new file mode 100644 index 000000000..77130c483 --- /dev/null +++ b/src/main/java/io/mycat/backend/jdbc/ShowVariables.java @@ -0,0 +1,198 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.backend.jdbc; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.backend.BackendConnection; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.server.NonBlockingSession; +import io.mycat.server.ServerConnection; +import io.mycat.util.StringUtil; + +/** + * @author mycat + */ +public final class ShowVariables +{ + private static final Logger LOGGER = LoggerFactory.getLogger(ShowVariables.class); + private static final int FIELD_COUNT = 2; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + private static final Pattern pattern = Pattern.compile("(?:like|=)\\s*'([^']*(?:\\w+)+[^']*)+'",Pattern.CASE_INSENSITIVE); + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("VARIABLE_NAME", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("VALUE", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + eof.packetId = ++packetId; + } + private static List parseVariable(String sql) + { + List variableList=new ArrayList<>(); + Matcher matcher = pattern.matcher(sql); + while (matcher.find()) + { + variableList.add(matcher.group(1)); + } + return variableList; + } + public static void execute(ServerConnection c, String sql) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + + List variableList= parseVariable(sql); + for (String key : variableList) + { + String value= variables.get(key) ; + if(value!=null) + { + RowDataPacket row = getRow(key, value, c.getCharset()); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + } + + + + // write lastEof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // write buffer + c.write(buffer); + } + + public static void justReturnValue(ServerConnection c, String value) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + + + + if(value!=null) + { + + RowDataPacket row = new RowDataPacket(1); + row.add(StringUtil.encode(value, c.getCharset())); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + + + + // write lastEof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // write buffer + c.write(buffer); + } + + private static RowDataPacket getRow(String name, String value, String charset) { + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(StringUtil.encode(name, charset)); + row.add(StringUtil.encode(value, charset)); + return row; + } + + private static final Map variables = new HashMap(); + static { + variables.put("character_set_client", "utf8"); + variables.put("character_set_connection", "utf8"); + variables.put("character_set_results", "utf8"); + variables.put("character_set_server", "utf8"); + variables.put("init_connect", ""); + variables.put("interactive_timeout", "172800"); + variables.put("lower_case_table_names", "1"); + variables.put("max_allowed_packet", "16777216"); + variables.put("net_buffer_length", "16384"); + variables.put("net_write_timeout", "60"); + variables.put("query_cache_size", "0"); + variables.put("query_cache_type", "OFF"); + variables.put("sql_mode", "STRICT_TRANS_TABLES"); + variables.put("system_time_zone", "CST"); + variables.put("time_zone", "SYSTEM"); + variables.put("tx_isolation", "REPEATABLE-READ"); + variables.put("wait_timeout", "172800"); + } + + public static void execute(ServerConnection sc, String orgin, BackendConnection jdbcConnection) { + execute(sc, orgin); + NonBlockingSession session = sc.getSession2(); + session.releaseConnectionIfSafe(jdbcConnection, LOGGER.isDebugEnabled(), false); + } + public static void justReturnValue(ServerConnection sc, String orgin, BackendConnection jdbcConnection) { + justReturnValue(sc, orgin); + NonBlockingSession session = sc.getSession2(); + session.releaseConnectionIfSafe(jdbcConnection, LOGGER.isDebugEnabled(), false); + } +} \ No newline at end of file diff --git a/src/main/java/io/mycat/backend/jdbc/mongodb/DriverPropertyInfoHelper.java b/src/main/java/io/mycat/backend/jdbc/mongodb/DriverPropertyInfoHelper.java index 483e0ef99..43dcb529f 100644 --- a/src/main/java/io/mycat/backend/jdbc/mongodb/DriverPropertyInfoHelper.java +++ b/src/main/java/io/mycat/backend/jdbc/mongodb/DriverPropertyInfoHelper.java @@ -1,70 +1,70 @@ -package io.mycat.backend.jdbc.mongodb; - -import java.sql.DriverPropertyInfo; -import java.util.ArrayList; - - -public class DriverPropertyInfoHelper{ - - public static final String AUTO_CONNECT_RETRY = "autoConnectRetry"; - - public static final String CONNECTIONS_PER_HOST = "connecionsPerHost"; - - public static final String CONNECT_TIMEOUT = "connectTimeout"; - - public static final String CURSOR_FINALIZER_ENABLED = "cursorFinalizerEnabled"; - - public static final String MAX_AUTO_CONNECT_RETRY_TIME = "maxAutoConnectRetryTime"; - - public static final String READ_PREFERENCE = "readPreference"; - - public static final String SOCKET_TIMEOUT = "socketTimeout"; - - public DriverPropertyInfo[] getPropertyInfo() - { - ArrayList propInfos = new ArrayList(); - - addPropInfo( - propInfos, - AUTO_CONNECT_RETRY, - "false", - "If true, the driver will keep trying to connect to the same server in case that the socket " - + "cannot be established. There is maximum amount of time to keep retrying, which is 15s by " - + "default.", null); - - addPropInfo(propInfos, CONNECTIONS_PER_HOST, "10", "The maximum number of connections allowed per " - + "host for this Mongo instance. Those connections will be kept in a pool when idle.", null); - - addPropInfo(propInfos, CONNECT_TIMEOUT, "10000", "The connection timeout in milliseconds. ", null); - - addPropInfo(propInfos, CURSOR_FINALIZER_ENABLED, "true", "Sets whether there is a a finalize " - + "method created that cleans up instances of DBCursor that the client does not close.", - null); - - addPropInfo(propInfos, MAX_AUTO_CONNECT_RETRY_TIME, "0", - "The maximum amount of time in MS to spend retrying to open connection to the same server." - + "Default is 0, which means to use the default 15s if autoConnectRetry is on.", null); - - addPropInfo(propInfos, READ_PREFERENCE, "primary", - "represents preferred replica set members to which a query or command can be sent", new String[] { - "primary", "primary preferred", "secondary", "secondary preferred", "nearest" }); - - addPropInfo(propInfos, SOCKET_TIMEOUT, "0", "The socket timeout in milliseconds It is used for " - + "I/O socket read and write operations " - + "Socket.setSoTimeout(int) Default is 0 and means no timeout.", null); - - return propInfos.toArray(new DriverPropertyInfo[propInfos.size()]); - } - - private void addPropInfo(final ArrayList propInfos, final String propName, - final String defaultVal, final String description, final String[] choices) - { - DriverPropertyInfo newProp = new DriverPropertyInfo(propName, defaultVal); - newProp.description = description; - if (choices != null) - { - newProp.choices = choices; - } - propInfos.add(newProp); - } +package io.mycat.backend.jdbc.mongodb; + +import java.sql.DriverPropertyInfo; +import java.util.ArrayList; + + +public class DriverPropertyInfoHelper{ + + public static final String AUTO_CONNECT_RETRY = "autoConnectRetry"; + + public static final String CONNECTIONS_PER_HOST = "connecionsPerHost"; + + public static final String CONNECT_TIMEOUT = "connectTimeout"; + + public static final String CURSOR_FINALIZER_ENABLED = "cursorFinalizerEnabled"; + + public static final String MAX_AUTO_CONNECT_RETRY_TIME = "maxAutoConnectRetryTime"; + + public static final String READ_PREFERENCE = "readPreference"; + + public static final String SOCKET_TIMEOUT = "socketTimeout"; + + public DriverPropertyInfo[] getPropertyInfo() + { + ArrayList propInfos = new ArrayList(); + + addPropInfo( + propInfos, + AUTO_CONNECT_RETRY, + "false", + "If true, the driver will keep trying to connect to the same server in case that the socket " + + "cannot be established. There is maximum amount of time to keep retrying, which is 15s by " + + "default.", null); + + addPropInfo(propInfos, CONNECTIONS_PER_HOST, "10", "The maximum number of connections allowed per " + + "host for this Mongo instance. Those connections will be kept in a pool when idle.", null); + + addPropInfo(propInfos, CONNECT_TIMEOUT, "10000", "The connection timeout in milliseconds. ", null); + + addPropInfo(propInfos, CURSOR_FINALIZER_ENABLED, "true", "Sets whether there is a a finalize " + + "method created that cleans up instances of DBCursor that the client does not close.", + null); + + addPropInfo(propInfos, MAX_AUTO_CONNECT_RETRY_TIME, "0", + "The maximum amount of time in MS to spend retrying to open connection to the same server." + + "Default is 0, which means to use the default 15s if autoConnectRetry is on.", null); + + addPropInfo(propInfos, READ_PREFERENCE, "primary", + "represents preferred replica set members to which a query or command can be sent", new String[] { + "primary", "primary preferred", "secondary", "secondary preferred", "nearest" }); + + addPropInfo(propInfos, SOCKET_TIMEOUT, "0", "The socket timeout in milliseconds It is used for " + + "I/O socket read and write operations " + + "Socket.setSoTimeout(int) Default is 0 and means no timeout.", null); + + return propInfos.toArray(new DriverPropertyInfo[propInfos.size()]); + } + + private void addPropInfo(final ArrayList propInfos, final String propName, + final String defaultVal, final String description, final String[] choices) + { + DriverPropertyInfo newProp = new DriverPropertyInfo(propName, defaultVal); + newProp.description = description; + if (choices != null) + { + newProp.choices = choices; + } + propInfos.add(newProp); + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoClientPropertyHelper.java b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoClientPropertyHelper.java new file mode 100644 index 000000000..bea00ae65 --- /dev/null +++ b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoClientPropertyHelper.java @@ -0,0 +1,35 @@ +package io.mycat.backend.jdbc.mongodb; + + +import com.google.common.base.Joiner; + +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; +import java.util.Set; + +/** + * @author liuxinsi + * @mail akalxs@gmail.com + */ +public class MongoClientPropertyHelper { + /** + * 格式化pro中的属性为{@link com.mongodb.MongoClientURI}中要求的格式。 + * + * @param pro 配置参数 + * @return 格式化后的字符串 + */ + public static String formatProperties(Properties pro) { + if (pro == null || pro.isEmpty()) { + return null; + } + + Set keys = pro.keySet(); + List props = new ArrayList<>(keys.size()); + for (Object key : keys) { + Object value = pro.get(key); + props.add(key + "=" + value.toString()); + } + return Joiner.on(";").join(props); + } +} diff --git a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoConnection.java b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoConnection.java index a5166e7b7..16590c7ef 100644 --- a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoConnection.java +++ b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoConnection.java @@ -48,7 +48,9 @@ public DB getDB() { if (this._schema!=null) { return this.mc.getDB(this._schema); } - else return null; + else { + return null; + } } @Override @@ -70,10 +72,9 @@ public String nativeSQL(String sql) throws SQLException { } @Override - public void setAutoCommit(boolean autoCommit) throws SQLException { - - if (!autoCommit) - throw new RuntimeException("autoCommit has to be on"); + public void setAutoCommit(boolean autoCommit) throws SQLException { + //if (!autoCommit) + // throw new RuntimeException("autoCommit has to be on"); } @Override diff --git a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoData.java b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoData.java index 5ebf81e8c..86bba46bc 100644 --- a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoData.java +++ b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoData.java @@ -1,130 +1,130 @@ -package io.mycat.backend.jdbc.mongodb; - -import java.sql.Date; -import java.sql.Time; -import java.sql.Timestamp; -import java.sql.Types; -import java.util.HashMap; - -import com.mongodb.BasicDBList; -import com.mongodb.DBCursor; -import com.mongodb.DBObject; - -public class MongoData { - - private DBCursor cursor; - private long count; - private String table; - private DBObject groupby; - - private HashMap map = new HashMap(); - private boolean type=false; - - public MongoData(){ - this.count=0; - this.cursor=null; - } - - public long getCount() { - return this.count; - } - - - public void setCount(long count) { - this.count=count; - } - - public String getTable() { - return this.table; - } - - public void setTable(String table) { - this.table=table; - } - - public DBObject getGrouyBy() { - return this.groupby; - } - - public BasicDBList getGrouyBys() { - if (this.groupby instanceof BasicDBList) { - return (BasicDBList)this.groupby; - } - else { - return null; - } - } - public void setGrouyBy(DBObject gb) { - this.groupby=gb; - this.type=true; - if (gb instanceof BasicDBList) { - Object gb2=((BasicDBList)gb).get(0); - if (gb2 instanceof DBObject) { - for (String field :((DBObject)gb2).keySet()) { - Object val = ((DBObject)gb2).get(field); - setField(field,getObjectToType(val)); - } - } - } - } - - public static int getObjectToType(Object ob){ - if (ob instanceof Integer) { - return Types.INTEGER; - } - else if (ob instanceof Boolean) { - return Types.BOOLEAN; - } - else if (ob instanceof Byte) { - return Types.BIT; - } - else if (ob instanceof Short) { - return Types.INTEGER; - } - else if (ob instanceof Float) { - return Types.FLOAT; - } - else if (ob instanceof Long) { - return Types.BIGINT; - } - else if (ob instanceof Double) { - return Types.DOUBLE; - } - else if (ob instanceof Date) { - return Types.DATE; - } - else if (ob instanceof Time) { - return Types.TIME; - } - else if (ob instanceof Timestamp) { - return Types.TIMESTAMP; - } - else if (ob instanceof String) { - return Types.VARCHAR; - } - else { - return Types.VARCHAR; - } - } - - public void setField(String field,int ftype) { - map.put(field, ftype); - } - - public HashMap getFields() { - return this.map; - } - - public boolean getType() { - return this.type; - } - - public DBCursor getCursor() { - return this.cursor; - } - - public DBCursor setCursor(DBCursor cursor) { - return this.cursor=cursor; - } - -} +package io.mycat.backend.jdbc.mongodb; + +import java.sql.Date; +import java.sql.Time; +import java.sql.Timestamp; +import java.sql.Types; +import java.util.HashMap; + +import com.mongodb.DBCursor; +import com.mongodb.DBObject; +import com.mongodb.BasicDBList; + +public class MongoData { + + private DBCursor cursor; + private long count; + private String table; + private DBObject groupby; + + private HashMap map = new HashMap(); + private boolean type=false; + + public MongoData(){ + this.count=0; + this.cursor=null; + } + + public long getCount() { + return this.count; + } + + + public void setCount(long count) { + this.count=count; + } + + public String getTable() { + return this.table; + } + + public void setTable(String table) { + this.table=table; + } + + public DBObject getGrouyBy() { + return this.groupby; + } + + public BasicDBList getGrouyBys() { + if (this.groupby instanceof BasicDBList) { + return (BasicDBList)this.groupby; + } + else { + return null; + } + } + public void setGrouyBy(DBObject gb) { + this.groupby=gb; + this.type=true; + if (gb instanceof BasicDBList) { + Object gb2=((BasicDBList)gb).get(0); + if (gb2 instanceof DBObject) { + for (String field :((DBObject)gb2).keySet()) { + Object val = ((DBObject)gb2).get(field); + setField(field,getObjectToType(val)); + } + } + } + } + + public static int getObjectToType(Object ob){ + if (ob instanceof Integer) { + return Types.INTEGER; + } + else if (ob instanceof Boolean) { + return Types.BOOLEAN; + } + else if (ob instanceof Byte) { + return Types.BIT; + } + else if (ob instanceof Short) { + return Types.INTEGER; + } + else if (ob instanceof Float) { + return Types.FLOAT; + } + else if (ob instanceof Long) { + return Types.BIGINT; + } + else if (ob instanceof Double) { + return Types.DOUBLE; + } + else if (ob instanceof Date) { + return Types.DATE; + } + else if (ob instanceof Time) { + return Types.TIME; + } + else if (ob instanceof Timestamp) { + return Types.TIMESTAMP; + } + else if (ob instanceof String) { + return Types.VARCHAR; + } + else { + return Types.VARCHAR; + } + } + + public void setField(String field,int ftype) { + map.put(field, ftype); + } + + public HashMap getFields() { + return this.map; + } + + public boolean getType() { + return this.type; + } + + public DBCursor getCursor() { + return this.cursor; + } + + public DBCursor setCursor(DBCursor cursor) { + return this.cursor=cursor; + } + +} diff --git a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoDriver.java b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoDriver.java index 6b9db38d3..713aa2f04 100644 --- a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoDriver.java +++ b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoDriver.java @@ -63,10 +63,11 @@ private MongoClientURI parseURL(String url, Properties defaults) { //删掉开头的 jdbc: //url = url.replace(URL_JDBC, ""); - + + String options = MongoClientPropertyHelper.formatProperties(defaults); + LOGGER.debug("the options:{}",options); try { - //FIXME 判断defaults中的参数,写入URL中? - return new MongoClientURI(url); + return new MongoClientURI(options == null ? url : url + "?" + options); } catch (Exception e) { LOGGER.error("parseURLError",e); return null; diff --git a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoEmbeddedObjectProcessor.java b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoEmbeddedObjectProcessor.java new file mode 100644 index 000000000..88fa0e92b --- /dev/null +++ b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoEmbeddedObjectProcessor.java @@ -0,0 +1,297 @@ +package io.mycat.backend.jdbc.mongodb; + +import com.mongodb.BasicDBList; +import com.mongodb.BasicDBObject; +import org.bson.types.ObjectId; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.lang.reflect.Array; +import java.lang.reflect.Field; +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +/** + * 处理从MongoDB中获取的内嵌对象(Embeedded Object|SubDocument),将MongoDB对象转换为对应的Java对象。 + *
+ * 支持: + *
    + *
  • {@link ObjectId}
  • + *
  • 基本类型
  • + *
  • 枚举
  • + *
  • 内嵌对象
  • + *
  • 内嵌数组
  • + *
+ * eg.
+ * public class A{
+ *   private ObjectId _id;
+ *   private String name;
+ *   private Integer age;
+ *   private B b;
+ *   private Address[] addresses;
+ *   private String[] someCode;
+ *   ...
+ * } + *

+ * 不支持: + *

    + *
  • 第一层的内嵌集合类型
  • + *
+ * eg.
+ * public class A{
+ *   private ObjectId _id;
+ *   private String name;
+ *   private Integer age;
+ *   private B b;
+ *   private List<Address> addresses;
+ *   private Set<String> someCode;
+ *   ...
+ * } + *
+ * 第一次拿不到范型,所以addresses、someCode不支持,直接返回null。B对象里的则没问题。
+ * + * @author liuxinsi + * @mail akalxs@gmail.com + */ +public class MongoEmbeddedObjectProcessor { + private static final Logger LOG = LoggerFactory.getLogger(MongoEmbeddedObjectProcessor.class); + + /** + * 将传入的值value转换成对应的类型type返回。 + * + * @param columnLabel 列名 + * @param value 值 + * @param type 对应的类型 + * @return 转换后的对象 + */ + public static Object valueMapper(String columnLabel, Object value, Class type) { + if (value == null) { + return null; + } + + // mongodb _id field + if (type.isAssignableFrom(ObjectId.class) + && (value instanceof ObjectId || value instanceof String)) { + return new ObjectId(value.toString()); + } + + // enum + if (type.isEnum()) { + return value.toString(); + } + + // embedded collection,内嵌集合 + if ((type.isAssignableFrom(List.class) || type.isAssignableFrom(Set.class)) + && value instanceof BasicDBList) { + // TODO 拿不到范型,list没法转 + LOG.debug("column:[{}],type:[{}]为内嵌列表,无法获取范型类,无法映射.return null.", columnLabel, type); + return null; + } + + // embedded object,内嵌对象 + if (value instanceof BasicDBObject) { + BasicDBObject dbObj = (BasicDBObject) value; + return beanMapper(dbObj, type); + } + + // embedded array,内嵌数组 + if (type.isArray() && value instanceof BasicDBList) { + BasicDBList basicDBList = (BasicDBList) value; + return arrayMapper(basicDBList, type); + } + + LOG.debug("column:[{}],type:[{}] unsupported type yet.return null", columnLabel, type); + return null; + } + + /** + * 加载clazzToMapper下所有field。 + * + * @param clazzToMapper class + * @return filed map,k=field name,v=field + */ + private static Map loadFields(Class clazzToMapper) { + Map fieldMap = new HashMap<>(); + Field[] fields = clazzToMapper.getDeclaredFields(); + for (Field field : fields) { + field.setAccessible(true); + fieldMap.put(field.getName(), field); + } + return fieldMap; + } + + /** + * 获取field字段的范型类。 + * + * @param field field + * @return null 如果没有获取到或异常。 + */ + private static Class getParameterizedClass(Field field) { + Type type = field.getGenericType(); + String parameterizedType; + if (type instanceof ParameterizedType) { + ParameterizedType pt = (ParameterizedType) type; + if (pt.getActualTypeArguments() == null || pt.getActualTypeArguments().length == 0) { + return null; + } + parameterizedType = pt.getActualTypeArguments()[0].toString(); + } else { + return null; + } + + Class clazz; + try { + clazz = Class.forName(parameterizedType); + } catch (ClassNotFoundException e) { + LOG.warn("获取field:{}的范型异常。", field.getName(), e); + return null; + } + return clazz; + } + + /** + * 根据字段field类型创建对应的集合类。
+ * 仅支持List、Set。 + * + * @param field field + * @param size 集合初始大小 + * @return 对应集合的实现类 + */ + private static Collection createCollection(Field field, int size) { + Class fieldType = field.getType(); + Collection collection = null; + if (fieldType.isAssignableFrom(List.class)) { + collection = new ArrayList<>(size); + } else if (fieldType.isAssignableFrom(Set.class)) { + collection = new HashSet<>(size); + } + return collection; + } + + /** + * 将mongodb的数据对象dbObj转换成对应类型clazzToMapper的对象。
+ * key=fieldName。 + * + * @param dbObj mongodb数据对象 + * @param clazzToMapper 目标对象类 + * @return 转换后的对象 + */ + private static Object beanMapper(BasicDBObject dbObj, Class clazzToMapper) { + // load all field + Map fieldMap = loadFields(clazzToMapper); + + // 将dbObj中的数据映射到beanMap中,如数据包含BasicDBObject则递归映射为对应的bean + // k=dbObj中的字段名,v=dbObj中对应的值或对象 + Map beanMap = new HashMap<>(); + for (String s : dbObj.keySet()) { + Object o = dbObj.get(s); + // 嵌套对象 + if (o instanceof BasicDBObject) { + Field field = fieldMap.get(s); + o = beanMapper((BasicDBObject) o, field.getType()); + + // 钳套对象列表 + } else if (o instanceof BasicDBList) { + Field field = fieldMap.get(s); + // 获取对应的范型 + Class parameterizedClazz = getParameterizedClass(field); + + BasicDBList basicDBs = (BasicDBList) o; + + Collection collection = createCollection(field, basicDBs.size()); + for (Object basicDbObj : basicDBs) { + // 基本类型 + if (parameterizedClazz.isPrimitive()) { + collection.add(basicDbObj); + } else if (parameterizedClazz.getName().startsWith("java.lang")) { + collection.add(basicDbObj); + } else { + // 对象类型 + collection.add(beanMapper((BasicDBObject) basicDbObj, parameterizedClazz)); + } + } + o = collection; + } + + beanMap.put(s, o); + } + + // create + Object instance; + try { + instance = clazzToMapper.newInstance(); + } catch (InstantiationException | IllegalAccessException e) { + LOG.warn("实例化:[{}]对象异常.", clazzToMapper, e); + return null; + } + + // 赋值 + Set fieldNames = fieldMap.keySet(); + for (String fieldName : fieldNames) { + if (beanMap.containsKey(fieldName)) { + Field field = fieldMap.get(fieldName); + Object value = beanMap.get(fieldName); + + try { + field.set(instance, value); + } catch (IllegalAccessException e) { + // 应该不会报 + LOG.error("为字段:[{}]设置值异常", + fieldName, e); + } + } + } + return instance; + } + + /** + * 将mongodb的数据对象列表basicDBList转换成对应类型arrayClass的数组。
+ * 基本类型直接转换,对象类型使用 {@link #beanMapper(BasicDBObject, Class)}。 + * + * @param basicDBList mongodb数据对象列表 + * @param arrayClass 目标数组对象类 + * @return 转换后的数组对象 + * @see MongoEmbeddedObjectProcessor#beanMapper(BasicDBObject, Class) + */ + private static Object arrayMapper(BasicDBList basicDBList, Class arrayClass) { + // 具体类 + Class clazzToMapper; + try { + clazzToMapper = Class.forName(arrayClass.getName() + .replace("[L", "") + .replace(";", "")); + } catch (ClassNotFoundException e) { + LOG.warn("实例化:[{}]对象异常.", arrayClass, e); + return null; + } + + // 创建对应的数组 + Object array = Array.newInstance(clazzToMapper, basicDBList.size()); + + // 数组赋值 + int i = 0; + for (Object basicDbObj : basicDBList) { + Object value; + // 基本类型 + if (clazzToMapper.isPrimitive()) { + value = basicDbObj; + } else if (clazzToMapper.getName().startsWith("java.lang")) { + value = basicDbObj; + } else { + // 对象类型 + value = beanMapper((BasicDBObject) basicDbObj, clazzToMapper); + } + + Array.set(array, i, value); + i++; + } + return array; + } +} diff --git a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoPreparedStatement.java b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoPreparedStatement.java index 7880c14e4..6bbe17099 100644 --- a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoPreparedStatement.java +++ b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoPreparedStatement.java @@ -1,408 +1,409 @@ -package io.mycat.backend.jdbc.mongodb; - -import java.io.InputStream; -import java.io.Reader; -import java.math.BigDecimal; -import java.net.URL; -import java.sql.Array; -import java.sql.Blob; -import java.sql.Clob; -import java.sql.Date; -import java.sql.NClob; -import java.sql.ParameterMetaData; -import java.sql.PreparedStatement; -import java.sql.Ref; -import java.sql.ResultSet; -import java.sql.ResultSetMetaData; -import java.sql.RowId; -import java.sql.SQLException; -import java.sql.SQLXML; -import java.sql.Time; -import java.sql.Timestamp; -import java.util.ArrayList; -import java.util.Calendar; -import java.util.List; -/** - * 功能详细描述 - * @author sohudo[http://blog.csdn.net/wind520] - * @create 2014年12月19日 下午6:50:23 - * @version 0.0.1 - */ -public class MongoPreparedStatement extends MongoStatement implements - PreparedStatement { - final String _sql; - final MongoSQLParser _mongosql; - List _params = new ArrayList(); - - public MongoPreparedStatement(MongoConnection conn, int type, - int concurrency, int holdability, String sql) - throws MongoSQLException { - super(conn, type, concurrency, holdability); - this._sql = sql; - this._mongosql = new MongoSQLParser(conn.getDB(), sql); - } - - @Override - public ResultSet executeQuery() throws SQLException { - - return null; - } - - @Override - public int executeUpdate() throws SQLException { - - this._mongosql.setParams(this._params); - return this._mongosql.executeUpdate(); - } - - public void setValue(int idx, Object o) { - while (this._params.size() <= idx) - this._params.add(null); - this._params.set(idx, o); - } - - @Override - public void setNull(int parameterIndex, int sqlType) throws SQLException { - - - } - - @Override - public void setBoolean(int parameterIndex, boolean x) throws SQLException { - - setValue(parameterIndex, Boolean.valueOf(x)); - } - - @Override - public void setByte(int parameterIndex, byte x) throws SQLException { - - setValue(parameterIndex, Byte.valueOf(x)); - } - - @Override - public void setShort(int parameterIndex, short x) throws SQLException { - - setValue(parameterIndex, Short.valueOf(x)); - } - - @Override - public void setInt(int parameterIndex, int x) throws SQLException { - - setValue(parameterIndex, Integer.valueOf(x)); - } - - @Override - public void setLong(int parameterIndex, long x) throws SQLException { - - setValue(parameterIndex, Long.valueOf(x)); - } - - @Override - public void setFloat(int parameterIndex, float x) throws SQLException { - - setValue(parameterIndex, Float.valueOf(x)); - } - - @Override - public void setDouble(int parameterIndex, double x) throws SQLException { - - setValue(parameterIndex, Double.valueOf(x)); - } - - @Override - public void setBigDecimal(int parameterIndex, BigDecimal x) - throws SQLException { - - setValue(parameterIndex, x); - } - - @Override - public void setString(int parameterIndex, String x) throws SQLException { - - setValue(parameterIndex, x); - } - - @Override - public void setBytes(int parameterIndex, byte[] x) throws SQLException { - - setValue(parameterIndex, x); - } - - @Override - public void setDate(int parameterIndex, Date x) throws SQLException { - - setValue(parameterIndex, x); - } - - @Override - public void setTime(int parameterIndex, Time x) throws SQLException { - - setValue(parameterIndex, x); - } - - @Override - public void setTimestamp(int parameterIndex, Timestamp x) - throws SQLException { - - setValue(parameterIndex, x); - } - - @Override - public void setAsciiStream(int parameterIndex, InputStream x, int length) - throws SQLException { - - - } - - @Override - public void setUnicodeStream(int parameterIndex, InputStream x, int length) - throws SQLException { - - - } - - @Override - public void setBinaryStream(int parameterIndex, InputStream x, int length) - throws SQLException { - - - } - - @Override - public void clearParameters() throws SQLException { - - - } - - @Override - public void setObject(int parameterIndex, Object x, int targetSqlType) - throws SQLException { - - - } - - @Override - public void setObject(int parameterIndex, Object x) throws SQLException { - - setValue(parameterIndex,x); - } - - @Override - public boolean execute() throws SQLException { - - return false; - } - - @Override - public void addBatch() throws SQLException { - - - } - - @Override - public void setCharacterStream(int parameterIndex, Reader reader, int length) - throws SQLException { - - - } - - @Override - public void setRef(int parameterIndex, Ref x) throws SQLException { - - - } - - @Override - public void setBlob(int parameterIndex, Blob x) throws SQLException { - - - } - - @Override - public void setClob(int parameterIndex, Clob x) throws SQLException { - - - } - - @Override - public void setArray(int parameterIndex, Array x) throws SQLException { - - - } - - @Override - public ResultSetMetaData getMetaData() throws SQLException { - - return null; - } - - @Override - public void setDate(int parameterIndex, Date x, Calendar cal) - throws SQLException { - - - } - - @Override - public void setTime(int parameterIndex, Time x, Calendar cal) - throws SQLException { - - - } - - @Override - public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) - throws SQLException { - - - } - - @Override - public void setNull(int parameterIndex, int sqlType, String typeName) - throws SQLException { - - - } - - @Override - public void setURL(int parameterIndex, URL x) throws SQLException { - - - } - - @Override - public ParameterMetaData getParameterMetaData() throws SQLException { - - return null; - } - - @Override - public void setRowId(int parameterIndex, RowId x) throws SQLException { - - - } - - @Override - public void setNString(int parameterIndex, String value) - throws SQLException { - - - } - - @Override - public void setNCharacterStream(int parameterIndex, Reader value, - long length) throws SQLException { - - - } - - @Override - public void setNClob(int parameterIndex, NClob value) throws SQLException { - - - } - - @Override - public void setClob(int parameterIndex, Reader reader, long length) - throws SQLException { - - - } - - @Override - public void setBlob(int parameterIndex, InputStream inputStream, long length) - throws SQLException { - - - } - - @Override - public void setNClob(int parameterIndex, Reader reader, long length) - throws SQLException { - - - } - - @Override - public void setSQLXML(int parameterIndex, SQLXML xmlObject) - throws SQLException { - - - } - - @Override - public void setObject(int parameterIndex, Object x, int targetSqlType, - int scaleOrLength) throws SQLException { - - - } - - @Override - public void setAsciiStream(int parameterIndex, InputStream x, long length) - throws SQLException { - - - } - - @Override - public void setBinaryStream(int parameterIndex, InputStream x, long length) - throws SQLException { - - - } - - @Override - public void setCharacterStream(int parameterIndex, Reader reader, - long length) throws SQLException { - - - } - - @Override - public void setAsciiStream(int parameterIndex, InputStream x) - throws SQLException { - - - } - - @Override - public void setBinaryStream(int parameterIndex, InputStream x) - throws SQLException { - - - } - - @Override - public void setCharacterStream(int parameterIndex, Reader reader) - throws SQLException { - - - } - - @Override - public void setNCharacterStream(int parameterIndex, Reader value) - throws SQLException { - - - } - - @Override - public void setClob(int parameterIndex, Reader reader) throws SQLException { - - - } - - @Override - public void setBlob(int parameterIndex, InputStream inputStream) - throws SQLException { - - - } - - @Override - public void setNClob(int parameterIndex, Reader reader) throws SQLException { - - - } - -} +package io.mycat.backend.jdbc.mongodb; + +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.net.URL; +import java.sql.Array; +import java.sql.Blob; +import java.sql.Clob; +import java.sql.Date; +import java.sql.NClob; +import java.sql.ParameterMetaData; +import java.sql.PreparedStatement; +import java.sql.Ref; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.RowId; +import java.sql.SQLException; +import java.sql.SQLXML; +import java.sql.Time; +import java.sql.Timestamp; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.List; +/** + * 功能详细描述 + * @author sohudo[http://blog.csdn.net/wind520] + * @create 2014年12月19日 下午6:50:23 + * @version 0.0.1 + */ +public class MongoPreparedStatement extends MongoStatement implements + PreparedStatement { + final String _sql; + final MongoSQLParser _mongosql; + List _params = new ArrayList(); + + public MongoPreparedStatement(MongoConnection conn, int type, + int concurrency, int holdability, String sql) + throws MongoSQLException { + super(conn, type, concurrency, holdability); + this._sql = sql; + this._mongosql = new MongoSQLParser(conn.getDB(), sql); + } + + @Override + public ResultSet executeQuery() throws SQLException { + + return null; + } + + @Override + public int executeUpdate() throws SQLException { + + this._mongosql.setParams(this._params); + return this._mongosql.executeUpdate(); + } + + public void setValue(int idx, Object o) { + while (this._params.size() <= idx) { + this._params.add(null); + } + this._params.set(idx, o); + } + + @Override + public void setNull(int parameterIndex, int sqlType) throws SQLException { + + + } + + @Override + public void setBoolean(int parameterIndex, boolean x) throws SQLException { + + setValue(parameterIndex, Boolean.valueOf(x)); + } + + @Override + public void setByte(int parameterIndex, byte x) throws SQLException { + + setValue(parameterIndex, Byte.valueOf(x)); + } + + @Override + public void setShort(int parameterIndex, short x) throws SQLException { + + setValue(parameterIndex, Short.valueOf(x)); + } + + @Override + public void setInt(int parameterIndex, int x) throws SQLException { + + setValue(parameterIndex, Integer.valueOf(x)); + } + + @Override + public void setLong(int parameterIndex, long x) throws SQLException { + + setValue(parameterIndex, Long.valueOf(x)); + } + + @Override + public void setFloat(int parameterIndex, float x) throws SQLException { + + setValue(parameterIndex, Float.valueOf(x)); + } + + @Override + public void setDouble(int parameterIndex, double x) throws SQLException { + + setValue(parameterIndex, Double.valueOf(x)); + } + + @Override + public void setBigDecimal(int parameterIndex, BigDecimal x) + throws SQLException { + + setValue(parameterIndex, x); + } + + @Override + public void setString(int parameterIndex, String x) throws SQLException { + + setValue(parameterIndex, x); + } + + @Override + public void setBytes(int parameterIndex, byte[] x) throws SQLException { + + setValue(parameterIndex, x); + } + + @Override + public void setDate(int parameterIndex, Date x) throws SQLException { + + setValue(parameterIndex, x); + } + + @Override + public void setTime(int parameterIndex, Time x) throws SQLException { + + setValue(parameterIndex, x); + } + + @Override + public void setTimestamp(int parameterIndex, Timestamp x) + throws SQLException { + + setValue(parameterIndex, x); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x, int length) + throws SQLException { + + + } + + @Override + public void setUnicodeStream(int parameterIndex, InputStream x, int length) + throws SQLException { + + + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x, int length) + throws SQLException { + + + } + + @Override + public void clearParameters() throws SQLException { + + + } + + @Override + public void setObject(int parameterIndex, Object x, int targetSqlType) + throws SQLException { + + + } + + @Override + public void setObject(int parameterIndex, Object x) throws SQLException { + + setValue(parameterIndex,x); + } + + @Override + public boolean execute() throws SQLException { + + return false; + } + + @Override + public void addBatch() throws SQLException { + + + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader, int length) + throws SQLException { + + + } + + @Override + public void setRef(int parameterIndex, Ref x) throws SQLException { + + + } + + @Override + public void setBlob(int parameterIndex, Blob x) throws SQLException { + + + } + + @Override + public void setClob(int parameterIndex, Clob x) throws SQLException { + + + } + + @Override + public void setArray(int parameterIndex, Array x) throws SQLException { + + + } + + @Override + public ResultSetMetaData getMetaData() throws SQLException { + + return null; + } + + @Override + public void setDate(int parameterIndex, Date x, Calendar cal) + throws SQLException { + + + } + + @Override + public void setTime(int parameterIndex, Time x, Calendar cal) + throws SQLException { + + + } + + @Override + public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) + throws SQLException { + + + } + + @Override + public void setNull(int parameterIndex, int sqlType, String typeName) + throws SQLException { + + + } + + @Override + public void setURL(int parameterIndex, URL x) throws SQLException { + + + } + + @Override + public ParameterMetaData getParameterMetaData() throws SQLException { + + return null; + } + + @Override + public void setRowId(int parameterIndex, RowId x) throws SQLException { + + + } + + @Override + public void setNString(int parameterIndex, String value) + throws SQLException { + + + } + + @Override + public void setNCharacterStream(int parameterIndex, Reader value, + long length) throws SQLException { + + + } + + @Override + public void setNClob(int parameterIndex, NClob value) throws SQLException { + + + } + + @Override + public void setClob(int parameterIndex, Reader reader, long length) + throws SQLException { + + + } + + @Override + public void setBlob(int parameterIndex, InputStream inputStream, long length) + throws SQLException { + + + } + + @Override + public void setNClob(int parameterIndex, Reader reader, long length) + throws SQLException { + + + } + + @Override + public void setSQLXML(int parameterIndex, SQLXML xmlObject) + throws SQLException { + + + } + + @Override + public void setObject(int parameterIndex, Object x, int targetSqlType, + int scaleOrLength) throws SQLException { + + + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x, long length) + throws SQLException { + + + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x, long length) + throws SQLException { + + + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader, + long length) throws SQLException { + + + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x) + throws SQLException { + + + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x) + throws SQLException { + + + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader) + throws SQLException { + + + } + + @Override + public void setNCharacterStream(int parameterIndex, Reader value) + throws SQLException { + + + } + + @Override + public void setClob(int parameterIndex, Reader reader) throws SQLException { + + + } + + @Override + public void setBlob(int parameterIndex, InputStream inputStream) + throws SQLException { + + + } + + @Override + public void setNClob(int parameterIndex, Reader reader) throws SQLException { + + + } + +} diff --git a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoResultSet.java b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoResultSet.java index 8013e2a59..944bfcccc 100644 --- a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoResultSet.java +++ b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoResultSet.java @@ -1,5 +1,9 @@ package io.mycat.backend.jdbc.mongodb; +import com.mongodb.BasicDBList; +import com.mongodb.DBCursor; +import com.mongodb.DBObject; + import java.io.InputStream; import java.io.Reader; import java.math.BigDecimal; @@ -26,10 +30,6 @@ //import java.util.HashMap; import java.util.Map; import java.util.Set; - -import com.mongodb.BasicDBList; -import com.mongodb.DBCursor; -import com.mongodb.DBObject; /** * 功能详细描述 * @author sohudo[http://blog.csdn.net/wind520] @@ -101,7 +101,9 @@ public void SetFieldType(boolean isid) throws SQLException { if (isid) { fieldtype= new int[Types.VARCHAR]; } - else fieldtype= new int[this.select.length]; + else { + fieldtype = new int[this.select.length]; + } if (_cur!=null) { for (int i=0;i T getObject(int columnIndex, Class type) throws SQLException { - - return null; + Object value = getObject(columnIndex); + return (T) MongoEmbeddedObjectProcessor.valueMapper(getField(columnIndex), value, type); } @Override public T getObject(String columnLabel, Class type) throws SQLException { - - return null; + Object value = getObject(columnLabel); + return (T) MongoEmbeddedObjectProcessor.valueMapper(columnLabel, value, type); } diff --git a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoResultSetMetaData.java b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoResultSetMetaData.java index 52d503115..bacbecd82 100644 --- a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoResultSetMetaData.java +++ b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoResultSetMetaData.java @@ -1,185 +1,189 @@ -package io.mycat.backend.jdbc.mongodb; - -import java.sql.ResultSetMetaData; -import java.sql.SQLException; -import java.sql.Types; -//import java.util.Arrays; -/** - * 功能详细描述 - * @author sohudo[http://blog.csdn.net/wind520] - * @create 2014年12月19日 下午6:50:23 - * @version 0.0.1 - */ - -public class MongoResultSetMetaData implements ResultSetMetaData { - - private String[] keySet ; - private int[] keytype ; - private String _schema; - private String _table; - - /* - public MongoResultSetMetaData(Set keySet,String schema) { - super(); - this.keySet = new String[keySet.size()]; - this.keySet = keySet.toArray(this.keySet); - this._schema = schema; - } - */ - public MongoResultSetMetaData(String[] select,int [] ftype,String schema,String table) { - super(); - this.keySet = select; - this.keytype=ftype; - this._schema = schema; - this._table =table; - } - - @Override - public T unwrap(Class iface) throws SQLException { - - return null; - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - - return false; - } - - @Override - public int getColumnCount() throws SQLException { - if (keySet==null) return 0; - else - return keySet.length; - } - - @Override - public boolean isAutoIncrement(int column) throws SQLException { - // 是否为自动编号的字段 - return false; - } - - @Override - public boolean isCaseSensitive(int column) throws SQLException { - //指示列的大小写是否有关系 - return true; - } - - @Override - public boolean isSearchable(int column) throws SQLException { - //指示是否可以在 where 子句中使用指定的列 - return true; - } - - @Override - public boolean isCurrency(int column) throws SQLException { - // 指示指定的列是否是一个哈希代码值 - return false; - } - - @Override - public int isNullable(int column) throws SQLException { - // 指示指定列中的值是否可以为 null。 - return 0; - } - - @Override - public boolean isSigned(int column) throws SQLException { - // 指示指定列中的值是否带正负号 - return false; - } - - @Override - public int getColumnDisplaySize(int column) throws SQLException { - - return 50; - } - - @Override - public String getColumnLabel(int column) throws SQLException { - return keySet[column-1]; - } - - @Override - public String getColumnName(int column) throws SQLException { - return keySet[column-1]; - } - - @Override - public String getSchemaName(int column) throws SQLException { - - return this._schema; - } - - @Override - public int getPrecision(int column) throws SQLException { - //获取指定列的指定列宽 - return 0; - } - - @Override - public int getScale(int column) throws SQLException { - // 检索指定参数的小数点右边的位数。 - return 0; - } - - @Override - public String getTableName(int column) throws SQLException { - - return this._table; - } - - @Override - public String getCatalogName(int column) throws SQLException { - - return this._schema; - } - - @Override - public int getColumnType(int column) throws SQLException { - // 字段类型 - return keytype[column-1];//Types.VARCHAR; - } - - @Override - public String getColumnTypeName(int column) throws SQLException { - // 数据库特定的类型名称 - switch (keytype[column-1]){ - case Types.INTEGER: return "INTEGER"; - case Types.BOOLEAN: return "BOOLEAN"; - case Types.BIT: return "BITT"; - case Types.FLOAT: return "FLOAT"; - case Types.BIGINT: return "BIGINT"; - case Types.DOUBLE: return "DOUBLE"; - case Types.DATE: return "DATE"; - case Types.TIME: return "TIME"; - case Types.TIMESTAMP: return "TIMESTAMP"; - default: return "varchar"; - } - } - - @Override - public boolean isReadOnly(int column) throws SQLException { - //指示指定的列是否明确不可写入 - return false; - } - - @Override - public boolean isWritable(int column) throws SQLException { - - return false; - } - - @Override - public boolean isDefinitelyWritable(int column) throws SQLException { - - return false; - } - - @Override - public String getColumnClassName(int column) throws SQLException { - // 如果调用方法 ResultSet.getObject 从列中获取值,则返回构造其实例的 Java 类的完全限定名称 - return "Object"; - } - -} +package io.mycat.backend.jdbc.mongodb; + +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Types; +//import java.util.Arrays; +import java.util.Set; +/** + * 功能详细描述 + * @author sohudo[http://blog.csdn.net/wind520] + * @create 2014年12月19日 下午6:50:23 + * @version 0.0.1 + */ + +public class MongoResultSetMetaData implements ResultSetMetaData { + + private String[] keySet ; + private int[] keytype ; + private String _schema; + private String _table; + + /* + public MongoResultSetMetaData(Set keySet,String schema) { + super(); + this.keySet = new String[keySet.size()]; + this.keySet = keySet.toArray(this.keySet); + this._schema = schema; + } + */ + public MongoResultSetMetaData(String[] select,int [] ftype,String schema,String table) { + super(); + this.keySet = select; + this.keytype=ftype; + this._schema = schema; + this._table =table; + } + + @Override + public T unwrap(Class iface) throws SQLException { + + return null; + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + + return false; + } + + @Override + public int getColumnCount() throws SQLException { + if (keySet==null) { + return 0; + } + else { + return keySet.length; + } + } + + @Override + public boolean isAutoIncrement(int column) throws SQLException { + // 是否为自动编号的字段 + return false; + } + + @Override + public boolean isCaseSensitive(int column) throws SQLException { + //指示列的大小写是否有关系 + return true; + } + + @Override + public boolean isSearchable(int column) throws SQLException { + //指示是否可以在 where 子句中使用指定的列 + return true; + } + + @Override + public boolean isCurrency(int column) throws SQLException { + // 指示指定的列是否是一个哈希代码值 + return false; + } + + @Override + public int isNullable(int column) throws SQLException { + // 指示指定列中的值是否可以为 null。 + return 0; + } + + @Override + public boolean isSigned(int column) throws SQLException { + // 指示指定列中的值是否带正负号 + return false; + } + + @Override + public int getColumnDisplaySize(int column) throws SQLException { + + return 50; + } + + @Override + public String getColumnLabel(int column) throws SQLException { + return keySet[column-1]; + } + + @Override + public String getColumnName(int column) throws SQLException { + return keySet[column-1]; + } + + @Override + public String getSchemaName(int column) throws SQLException { + + return this._schema; + } + + @Override + public int getPrecision(int column) throws SQLException { + //获取指定列的指定列宽 + return 0; + } + + @Override + public int getScale(int column) throws SQLException { + // 检索指定参数的小数点右边的位数。 + return 0; + } + + @Override + public String getTableName(int column) throws SQLException { + + return this._table; + } + + @Override + public String getCatalogName(int column) throws SQLException { + + return this._schema; + } + + @Override + public int getColumnType(int column) throws SQLException { + // 字段类型 + return keytype[column-1];//Types.VARCHAR; + } + + @Override + public String getColumnTypeName(int column) throws SQLException { + // 数据库特定的类型名称 + switch (keytype[column-1]){ + case Types.INTEGER: return "INTEGER"; + case Types.BOOLEAN: return "BOOLEAN"; + case Types.BIT: return "BITT"; + case Types.FLOAT: return "FLOAT"; + case Types.BIGINT: return "BIGINT"; + case Types.DOUBLE: return "DOUBLE"; + case Types.DATE: return "DATE"; + case Types.TIME: return "TIME"; + case Types.TIMESTAMP: return "TIMESTAMP"; + default: return "varchar"; + } + } + + @Override + public boolean isReadOnly(int column) throws SQLException { + //指示指定的列是否明确不可写入 + return false; + } + + @Override + public boolean isWritable(int column) throws SQLException { + + return false; + } + + @Override + public boolean isDefinitelyWritable(int column) throws SQLException { + + return false; + } + + @Override + public String getColumnClassName(int column) throws SQLException { + // 如果调用方法 ResultSet.getObject 从列中获取值,则返回构造其实例的 Java 类的完全限定名称 + return "Object"; + } + +} diff --git a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoSQLParser.java b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoSQLParser.java index dd8544478..3f033c21d 100644 --- a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoSQLParser.java +++ b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoSQLParser.java @@ -1,428 +1,438 @@ -package io.mycat.backend.jdbc.mongodb; - - - -import java.sql.Types; -import java.util.List; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.alibaba.druid.sql.ast.SQLExpr; -import com.alibaba.druid.sql.ast.SQLOrderBy; -import com.alibaba.druid.sql.ast.SQLOrderingSpecification; -import com.alibaba.druid.sql.ast.SQLStatement; -import com.alibaba.druid.sql.ast.expr.SQLAggregateExpr; -import com.alibaba.druid.sql.ast.expr.SQLAllColumnExpr; -import com.alibaba.druid.sql.ast.expr.SQLBinaryOpExpr; -import com.alibaba.druid.sql.ast.expr.SQLBooleanExpr; -import com.alibaba.druid.sql.ast.expr.SQLCharExpr; -import com.alibaba.druid.sql.ast.expr.SQLIntegerExpr; -import com.alibaba.druid.sql.ast.expr.SQLNullExpr; -import com.alibaba.druid.sql.ast.expr.SQLNumberExpr; -import com.alibaba.druid.sql.ast.expr.SQLVariantRefExpr; -import com.alibaba.druid.sql.ast.statement.SQLCreateTableStatement; -import com.alibaba.druid.sql.ast.statement.SQLDeleteStatement; -import com.alibaba.druid.sql.ast.statement.SQLDropTableStatement; -import com.alibaba.druid.sql.ast.statement.SQLInsertStatement; -import com.alibaba.druid.sql.ast.statement.SQLSelectGroupByClause; -import com.alibaba.druid.sql.ast.statement.SQLSelectItem; -import com.alibaba.druid.sql.ast.statement.SQLSelectOrderByItem; -import com.alibaba.druid.sql.ast.statement.SQLSelectQuery; -import com.alibaba.druid.sql.ast.statement.SQLSelectStatement; -import com.alibaba.druid.sql.ast.statement.SQLTableSource; -import com.alibaba.druid.sql.ast.statement.SQLUpdateSetItem; -import com.alibaba.druid.sql.ast.statement.SQLUpdateStatement; -import com.alibaba.druid.sql.dialect.mysql.ast.expr.MySqlSelectGroupByExpr; -import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlSelectQueryBlock; -import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser; -import com.mongodb.BasicDBList; -import com.mongodb.BasicDBObject; -import com.mongodb.DB; -import com.mongodb.DBCollection; -import com.mongodb.DBCursor; -import com.mongodb.DBObject; -/** - * 功能详细描述 - * @author sohudo[http://blog.csdn.net/wind520] - * @create 2014年12月19日 下午6:50:23 - * @version 0.0.1 - */ -public class MongoSQLParser { - private static final Logger LOGGER = LoggerFactory.getLogger(MongoSQLParser.class); - private final DB _db; -// private final String _sql; - private final SQLStatement statement; - private List _params; - private int _pos; - public MongoSQLParser(DB db, String sql) throws MongoSQLException - { - this._db = db; - // this._sql = sql; - this.statement = parser(sql); - } - - public SQLStatement parser(String s) throws MongoSQLException - { - s = s.trim(); - try - { - MySqlStatementParser parser = new MySqlStatementParser(s); - return parser.parseStatement(); - } - catch (Exception e) - { - LOGGER.error("MongoSQLParser.parserError", e); - } - throw new MongoSQLException.ErrorSQL(s); - } - - public void setParams(List params) - { - this._pos = 1; - this._params = params; - } - - public MongoData query() throws MongoSQLException{ - if (!(statement instanceof SQLSelectStatement)) { - //return null; - throw new IllegalArgumentException("not a query sql statement"); - } - MongoData mongo=new MongoData(); - DBCursor c=null; - SQLSelectStatement selectStmt = (SQLSelectStatement)statement; - SQLSelectQuery sqlSelectQuery =selectStmt.getSelect().getQuery(); - int icount=0; - if(sqlSelectQuery instanceof MySqlSelectQueryBlock) { - MySqlSelectQueryBlock mysqlSelectQuery = (MySqlSelectQueryBlock)selectStmt.getSelect().getQuery(); - - BasicDBObject fields = new BasicDBObject(); - //显示的字段 - for(SQLSelectItem item : mysqlSelectQuery.getSelectList()) { - //System.out.println(item.toString()); - if (!(item.getExpr() instanceof SQLAllColumnExpr)) { - if (item.getExpr() instanceof SQLAggregateExpr) { - SQLAggregateExpr expr =(SQLAggregateExpr)item.getExpr(); - if (expr.getMethodName().equals("COUNT")) { - icount=1; - mongo.setField(getExprFieldName(expr), Types.BIGINT); - } - fields.put(getExprFieldName(expr), Integer.valueOf(1)); - } - else { - fields.put(getFieldName(item), Integer.valueOf(1)); - } - } - - } - - //表名 - SQLTableSource table=mysqlSelectQuery.getFrom(); - DBCollection coll =this._db.getCollection(table.toString()); - mongo.setTable(table.toString()); - - SQLExpr expr=mysqlSelectQuery.getWhere(); - DBObject query = parserWhere(expr); - //System.out.println(query); - SQLSelectGroupByClause groupby=mysqlSelectQuery.getGroupBy(); - BasicDBObject gbkey = new BasicDBObject(); - if (groupby!=null) { - for (SQLExpr gbexpr:groupby.getItems()){ - if (gbexpr instanceof MySqlSelectGroupByExpr) { - SQLExpr gbyexpr=((MySqlSelectGroupByExpr) gbexpr).getExpr(); - gbkey.put(getFieldName2(gbyexpr), Integer.valueOf(1)); - } - } - icount=2; - } - int limitoff=0; - int limitnum=0; - if (mysqlSelectQuery.getLimit()!=null) { - limitoff=getSQLExprToInt(mysqlSelectQuery.getLimit().getOffset()); - limitnum=getSQLExprToInt(mysqlSelectQuery.getLimit().getRowCount()); - } - - if (icount==1) { - mongo.setCount(coll.count(query)); - } - else if (icount==2){ - BasicDBObject initial = new BasicDBObject(); - initial.put("num", 0); - String reduce="function (obj, prev) { " - +" prev.num++}"; - mongo.setGrouyBy(coll.group(gbkey, query, initial, reduce)); - } - else { - if ((limitoff>0) || (limitnum>0)) { - c = coll.find(query, fields).skip(limitoff).limit(limitnum); - } - else { - c = coll.find(query, fields); - } - - SQLOrderBy orderby=mysqlSelectQuery.getOrderBy(); - if (orderby != null ){ - BasicDBObject order = new BasicDBObject(); - for (int i = 0; i < orderby.getItems().size(); i++) - { - SQLSelectOrderByItem orderitem = orderby.getItems().get(i); - order.put(orderitem.getExpr().toString(), Integer.valueOf(getSQLExprToAsc(orderitem.getType()))); - } - c.sort(order); - // System.out.println(order); - } - } - mongo.setCursor(c); - } - return mongo; - } - - public int executeUpdate() throws MongoSQLException { - if (statement instanceof SQLInsertStatement) { - return InsertData((SQLInsertStatement)statement); - } - if (statement instanceof SQLUpdateStatement) { - return UpData((SQLUpdateStatement)statement); - } - if (statement instanceof SQLDropTableStatement) { - return dropTable((SQLDropTableStatement)statement); - } - if (statement instanceof SQLDeleteStatement) { - return DeleteDate((SQLDeleteStatement)statement); - } - if (statement instanceof SQLCreateTableStatement) { - return 1; - } - return 1; - - } - private int InsertData(SQLInsertStatement state) { - if (state.getValues().getValues().size() ==0 ){ - throw new RuntimeException("number of columns error"); - } - if (state.getValues().getValues().size() != state.getColumns().size()){ - throw new RuntimeException("number of values and columns have to match"); - } - SQLTableSource table=state.getTableSource(); - BasicDBObject o = new BasicDBObject(); - int i=0; - for(SQLExpr col : state.getColumns()) { - o.put(getFieldName2(col), getExpValue(state.getValues().getValues().get(i))); - i++; - } - DBCollection coll =this._db.getCollection(table.toString()); - coll.insert(new DBObject[] { o }); - return 1; - } - private int UpData(SQLUpdateStatement state) { - SQLTableSource table=state.getTableSource(); - DBCollection coll =this._db.getCollection(table.toString()); - - SQLExpr expr=state.getWhere(); - DBObject query = parserWhere(expr); - - BasicDBObject set = new BasicDBObject(); - for(SQLUpdateSetItem col : state.getItems()){ - set.put(getFieldName2(col.getColumn()), getExpValue(col.getValue())); - } - DBObject mod = new BasicDBObject("$set", set); - coll.updateMulti(query, mod); - //System.out.println("changs count:"+coll.getStats().size()); - return 1; - } - private int DeleteDate(SQLDeleteStatement state) { - SQLTableSource table=state.getTableSource(); - DBCollection coll =this._db.getCollection(table.toString()); - - SQLExpr expr=state.getWhere(); - if (expr==null) { - throw new RuntimeException("not where of sql"); - } - DBObject query = parserWhere(expr); - - coll.remove(query); - - return 1; - - } - private int dropTable(SQLDropTableStatement state) { - for (SQLTableSource table : state.getTableSources()){ - DBCollection coll =this._db.getCollection(table.toString()); - coll.drop(); - } - return 1; - - } - - private int getSQLExprToInt(SQLExpr expr){ - if (expr instanceof SQLIntegerExpr){ - return ((SQLIntegerExpr)expr).getNumber().intValue(); - } - return 0; - } - private int getSQLExprToAsc(SQLOrderingSpecification ASC){ - if (ASC==null ) return 1; - if (ASC==SQLOrderingSpecification.DESC){ - return -1; - } - else { - return 1; - } - } - public String remove(String resource,char ch) - { - StringBuffer buffer=new StringBuffer(); - int position=0; - char currentChar; - - while(position")) op="$gt"; - if (expr.getOperator().getName().equals(">=")) op="$gte"; - - if (expr.getOperator().getName().equals("!=")) op="$ne"; - if (expr.getOperator().getName().equals("<>")) op="$ne"; - //xo.put(op, getExpValue(expr.getRight())); - // o.put(exprL.toString(),xo); - parserDBObject(o,exprL.toString(),op, getExpValue(expr.getRight())); - } - } - } - private void parserWhere(SQLExpr aexpr,BasicDBObject o){ - if(aexpr instanceof SQLBinaryOpExpr){ - SQLBinaryOpExpr expr=(SQLBinaryOpExpr)aexpr; - SQLExpr exprL=expr.getLeft(); - if (!(exprL instanceof SQLBinaryOpExpr)) - { - //opSQLExpr((SQLBinaryOpExpr)aexpr,o); - if (expr.getOperator().getName().equals("=")) { - o.put(exprL.toString(), getExpValue(expr.getRight())); - } - else { - String op=""; - if (expr.getOperator().getName().equals("<")) op="$lt"; - if (expr.getOperator().getName().equals("<=")) op="$lte"; - if (expr.getOperator().getName().equals(">")) op="$gt"; - if (expr.getOperator().getName().equals(">=")) op="$gte"; - - if (expr.getOperator().getName().equals("!=")) op="$ne"; - if (expr.getOperator().getName().equals("<>")) op="$ne"; - - parserDBObject(o,exprL.toString(),op, getExpValue(expr.getRight())); - } - - } - else { - if (expr.getOperator().getName().equals("AND")) { - parserWhere(exprL,o); - parserWhere(expr.getRight(),o); - } - else if (expr.getOperator().getName().equals("OR")) { - orWhere(exprL,expr.getRight(),o); - } - else { - throw new RuntimeException("Can't identify the operation of of where"); - } - } - } - - } - - - private void orWhere(SQLExpr exprL,SQLExpr exprR ,BasicDBObject ob){ - BasicDBObject xo = new BasicDBObject(); - BasicDBObject yo = new BasicDBObject(); - parserWhere(exprL,xo); - parserWhere(exprR,yo); - ob.put("$or",new Object[]{xo,yo}); - } -} +package io.mycat.backend.jdbc.mongodb; + + + +import java.sql.Types; +import java.util.List; + + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.mongodb.BasicDBList; +import com.mongodb.BasicDBObject; +import com.mongodb.DB; +import com.mongodb.DBCollection; +import com.mongodb.DBCursor; +import com.mongodb.DBObject; +import com.alibaba.druid.sql.ast.SQLExpr; +import com.alibaba.druid.sql.ast.SQLOrderingSpecification; +import com.alibaba.druid.sql.ast.SQLStatement; + +import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlSelectQueryBlock; +import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser; +import com.alibaba.druid.sql.ast.statement.*; +import com.alibaba.druid.sql.ast.expr.*; +import com.alibaba.druid.sql.ast.*; +/** + * 功能详细描述 + * @author sohudo[http://blog.csdn.net/wind520] + * @create 2014年12月19日 下午6:50:23 + * @version 0.0.1 + */ +public class MongoSQLParser { + private static final Logger LOGGER = LoggerFactory.getLogger(MongoSQLParser.class); + private final DB _db; +// private final String _sql; + private final SQLStatement statement; + private List _params; + private int _pos; + public MongoSQLParser(DB db, String sql) throws MongoSQLException + { + this._db = db; + // this._sql = sql; + this.statement = parser(sql); + } + + public SQLStatement parser(String s) throws MongoSQLException + { + s = s.trim(); + try + { + MySqlStatementParser parser = new MySqlStatementParser(s); + return parser.parseStatement(); + } + catch (Exception e) + { + LOGGER.error("MongoSQLParser.parserError", e); + } + throw new MongoSQLException.ErrorSQL(s); + } + + public void setParams(List params) + { + this._pos = 1; + this._params = params; + } + + public MongoData query() throws MongoSQLException{ + if (!(statement instanceof SQLSelectStatement)) { + //return null; + throw new IllegalArgumentException("not a query sql statement"); + } + MongoData mongo=new MongoData(); + DBCursor c=null; + SQLSelectStatement selectStmt = (SQLSelectStatement)statement; + SQLSelectQuery sqlSelectQuery =selectStmt.getSelect().getQuery(); + int icount=0; + if(sqlSelectQuery instanceof MySqlSelectQueryBlock) { + MySqlSelectQueryBlock mysqlSelectQuery = (MySqlSelectQueryBlock)selectStmt.getSelect().getQuery(); + + BasicDBObject fields = new BasicDBObject(); + //显示的字段 + for(SQLSelectItem item : mysqlSelectQuery.getSelectList()) { + //System.out.println(item.toString()); + if (!(item.getExpr() instanceof SQLAllColumnExpr)) { + if (item.getExpr() instanceof SQLAggregateExpr) { + SQLAggregateExpr expr =(SQLAggregateExpr)item.getExpr(); + if (expr.getMethodName().equals("COUNT")) { + icount=1; + mongo.setField(getExprFieldName(expr), Types.BIGINT); + } + fields.put(getExprFieldName(expr), Integer.valueOf(1)); + } + else { + fields.put(getFieldName(item), Integer.valueOf(1)); + } + } + + } + + //表名 + SQLTableSource table=mysqlSelectQuery.getFrom(); + DBCollection coll =this._db.getCollection(table.toString()); + mongo.setTable(table.toString()); + + SQLExpr expr=mysqlSelectQuery.getWhere(); + DBObject query = parserWhere(expr); + //System.out.println(query); + SQLSelectGroupByClause groupby=mysqlSelectQuery.getGroupBy(); + BasicDBObject gbkey = new BasicDBObject(); + if (groupby!=null) { + for (SQLExpr gbexpr:groupby.getItems()){ + if (gbexpr instanceof SQLIdentifierExpr) { + String name=((SQLIdentifierExpr) gbexpr).getName(); + gbkey.put(name, Integer.valueOf(1)); + } + } + icount=2; + } + int limitoff=0; + int limitnum=0; + if (mysqlSelectQuery.getLimit()!=null) { + limitoff=getSQLExprToInt(mysqlSelectQuery.getLimit().getOffset()); + limitnum=getSQLExprToInt(mysqlSelectQuery.getLimit().getRowCount()); + } + + if (icount==1) { + mongo.setCount(coll.count(query)); + } + else if (icount==2){ + BasicDBObject initial = new BasicDBObject(); + initial.put("num", 0); + String reduce="function (obj, prev) { " + +" prev.num++}"; + mongo.setGrouyBy(coll.group(gbkey, query, initial, reduce)); + } + else { + if ((limitoff>0) || (limitnum>0)) { + c = coll.find(query, fields).skip(limitoff).limit(limitnum); + } + else { + c = coll.find(query, fields); + } + + SQLOrderBy orderby=mysqlSelectQuery.getOrderBy(); + if (orderby != null ){ + BasicDBObject order = new BasicDBObject(); + for (int i = 0; i < orderby.getItems().size(); i++) + { + SQLSelectOrderByItem orderitem = orderby.getItems().get(i); + order.put(orderitem.getExpr().toString(), Integer.valueOf(getSQLExprToAsc(orderitem.getType()))); + } + c.sort(order); + // System.out.println(order); + } + } + mongo.setCursor(c); + } + return mongo; + } + + public int executeUpdate() throws MongoSQLException { + if (statement instanceof SQLInsertStatement) { + return InsertData((SQLInsertStatement)statement); + } + if (statement instanceof SQLUpdateStatement) { + return UpData((SQLUpdateStatement)statement); + } + if (statement instanceof SQLDropTableStatement) { + return dropTable((SQLDropTableStatement)statement); + } + if (statement instanceof SQLDeleteStatement) { + return DeleteDate((SQLDeleteStatement)statement); + } + if (statement instanceof SQLCreateTableStatement) { + return 1; + } + return 1; + + } + private int InsertData(SQLInsertStatement state) { + if (state.getValues().getValues().size() ==0 ){ + throw new RuntimeException("number of columns error"); + } + if (state.getValues().getValues().size() != state.getColumns().size()){ + throw new RuntimeException("number of values and columns have to match"); + } + SQLTableSource table=state.getTableSource(); + BasicDBObject o = new BasicDBObject(); + int i=0; + for(SQLExpr col : state.getColumns()) { + o.put(getFieldName2(col), getExpValue(state.getValues().getValues().get(i))); + i++; + } + DBCollection coll =this._db.getCollection(table.toString()); + coll.insert(new DBObject[] { o }); + return 1; + } + private int UpData(SQLUpdateStatement state) { + SQLTableSource table=state.getTableSource(); + DBCollection coll =this._db.getCollection(table.toString()); + + SQLExpr expr=state.getWhere(); + DBObject query = parserWhere(expr); + + BasicDBObject set = new BasicDBObject(); + for(SQLUpdateSetItem col : state.getItems()){ + set.put(getFieldName2(col.getColumn()), getExpValue(col.getValue())); + } + DBObject mod = new BasicDBObject("$set", set); + coll.updateMulti(query, mod); + //System.out.println("changs count:"+coll.getStats().size()); + return 1; + } + private int DeleteDate(SQLDeleteStatement state) { + SQLTableSource table=state.getTableSource(); + DBCollection coll =this._db.getCollection(table.toString()); + + SQLExpr expr=state.getWhere(); + if (expr==null) { + throw new RuntimeException("not where of sql"); + } + DBObject query = parserWhere(expr); + + coll.remove(query); + + return 1; + + } + private int dropTable(SQLDropTableStatement state) { + for (SQLTableSource table : state.getTableSources()){ + DBCollection coll =this._db.getCollection(table.toString()); + coll.drop(); + } + return 1; + + } + + private int getSQLExprToInt(SQLExpr expr){ + if (expr instanceof SQLIntegerExpr){ + return ((SQLIntegerExpr)expr).getNumber().intValue(); + } + return 0; + } + private int getSQLExprToAsc(SQLOrderingSpecification ASC){ + if (ASC==null ) { + return 1; + } + if (ASC==SQLOrderingSpecification.DESC){ + return -1; + } + else { + return 1; + } + } + public String remove(String resource,char ch) + { + StringBuffer buffer=new StringBuffer(); + int position=0; + char currentChar; + + while(position")) { + op = "$gt"; + } + if (expr.getOperator().getName().equals(">=")) { + op = "$gte"; + } + + if (expr.getOperator().getName().equals("!=")) { + op = "$ne"; + } + if (expr.getOperator().getName().equals("<>")) { + op = "$ne"; + } + //xo.put(op, getExpValue(expr.getRight())); + // o.put(exprL.toString(),xo); + parserDBObject(o,exprL.toString(),op, getExpValue(expr.getRight())); + } + } + } + private void parserWhere(SQLExpr aexpr,BasicDBObject o){ + if(aexpr instanceof SQLBinaryOpExpr){ + SQLBinaryOpExpr expr=(SQLBinaryOpExpr)aexpr; + SQLExpr exprL=expr.getLeft(); + if (!(exprL instanceof SQLBinaryOpExpr)) + { + //opSQLExpr((SQLBinaryOpExpr)aexpr,o); + if (expr.getOperator().getName().equals("=")) { + o.put(exprL.toString(), getExpValue(expr.getRight())); + } + else { + String op=""; + if (expr.getOperator().getName().equals("<")) { + op = "$lt"; + } + if (expr.getOperator().getName().equals("<=")) { + op = "$lte"; + } + if (expr.getOperator().getName().equals(">")) { + op = "$gt"; + } + if (expr.getOperator().getName().equals(">=")) { + op = "$gte"; + } + + if (expr.getOperator().getName().equals("!=")) { + op = "$ne"; + } + if (expr.getOperator().getName().equals("<>")) { + op = "$ne"; + } + + parserDBObject(o,exprL.toString(),op, getExpValue(expr.getRight())); + } + + } + else { + if (expr.getOperator().getName().equals("AND")) { + parserWhere(exprL,o); + parserWhere(expr.getRight(),o); + } + else if (expr.getOperator().getName().equals("OR")) { + orWhere(exprL,expr.getRight(),o); + } + else { + throw new RuntimeException("Can't identify the operation of of where"); + } + } + } + + } + + + private void orWhere(SQLExpr exprL,SQLExpr exprR ,BasicDBObject ob){ + BasicDBObject xo = new BasicDBObject(); + BasicDBObject yo = new BasicDBObject(); + parserWhere(exprL,xo); + parserWhere(exprR,yo); + ob.put("$or",new Object[]{xo,yo}); + } +} diff --git a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoStatement.java b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoStatement.java index 48c880159..833af9609 100644 --- a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoStatement.java +++ b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoStatement.java @@ -1,5 +1,7 @@ package io.mycat.backend.jdbc.mongodb; +import com.mongodb.DBCursor; + import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; @@ -28,12 +30,15 @@ public MongoStatement(MongoConnection conn, int type, int concurrency, int holda this._concurrency = concurrency; this._holdability = holdability; - if (this._type != 0) - throw new UnsupportedOperationException("type not supported yet"); - if (this._concurrency != 0) - throw new UnsupportedOperationException("concurrency not supported yet"); - if (this._holdability != 0) - throw new UnsupportedOperationException("holdability not supported yet"); + if (this._type != 0) { + throw new UnsupportedOperationException("type not supported yet"); + } + if (this._concurrency != 0) { + throw new UnsupportedOperationException("concurrency not supported yet"); + } + if (this._holdability != 0) { + throw new UnsupportedOperationException("holdability not supported yet"); + } } @Override @@ -52,12 +57,11 @@ public boolean isWrapperFor(Class iface) throws SQLException { public ResultSet executeQuery(String sql) throws SQLException { MongoData mongo= new MongoSQLParser(this._conn.getDB(), sql).query(); - if (this._fetchSize > 0) { + if ((this._fetchSize > 0) + && (mongo.getCursor()!=null)) { //设置每次网络请求的最大记录数 - if (mongo.getCursor()!=null) { mongo.getCursor().batchSize(this._fetchSize); - } - } + } /* if (this._maxRows > 0) { diff --git a/src/main/java/io/mycat/backend/jdbc/mongodb/StringUtils.java b/src/main/java/io/mycat/backend/jdbc/mongodb/StringUtils.java index 6b6ea4ccd..fcf4243e8 100644 --- a/src/main/java/io/mycat/backend/jdbc/mongodb/StringUtils.java +++ b/src/main/java/io/mycat/backend/jdbc/mongodb/StringUtils.java @@ -1,16 +1,16 @@ -package io.mycat.backend.jdbc.mongodb; - - -public class StringUtils { - - - public static boolean startsWithIgnoreCase(String searchIn, int startAt, - String searchFor) { - return searchIn.regionMatches(true, startAt, searchFor, 0, searchFor - .length()); - } - - public static boolean startsWithIgnoreCase(String searchIn, String searchFor) { - return startsWithIgnoreCase(searchIn, 0, searchFor); - } +package io.mycat.backend.jdbc.mongodb; + + +public class StringUtils { + + + public static boolean startsWithIgnoreCase(String searchIn, int startAt, + String searchFor) { + return searchIn.regionMatches(true, startAt, searchFor, 0, searchFor + .length()); + } + + public static boolean startsWithIgnoreCase(String searchIn, String searchFor) { + return startsWithIgnoreCase(searchIn, 0, searchFor); + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/backend/jdbc/sequoiadb/DriverPropertyInfoHelper.java b/src/main/java/io/mycat/backend/jdbc/sequoiadb/DriverPropertyInfoHelper.java index 4948cee89..16d25f395 100644 --- a/src/main/java/io/mycat/backend/jdbc/sequoiadb/DriverPropertyInfoHelper.java +++ b/src/main/java/io/mycat/backend/jdbc/sequoiadb/DriverPropertyInfoHelper.java @@ -1,70 +1,70 @@ -package io.mycat.backend.jdbc.sequoiadb; - -import java.sql.DriverPropertyInfo; -import java.util.ArrayList; - - -public class DriverPropertyInfoHelper{ - - public static final String AUTO_CONNECT_RETRY = "autoConnectRetry"; - - public static final String CONNECTIONS_PER_HOST = "connecionsPerHost"; - - public static final String CONNECT_TIMEOUT = "connectTimeout"; - - public static final String CURSOR_FINALIZER_ENABLED = "cursorFinalizerEnabled"; - - public static final String MAX_AUTO_CONNECT_RETRY_TIME = "maxAutoConnectRetryTime"; - - public static final String READ_PREFERENCE = "readPreference"; - - public static final String SOCKET_TIMEOUT = "socketTimeout"; - - public DriverPropertyInfo[] getPropertyInfo() - { - ArrayList propInfos = new ArrayList(); - - addPropInfo( - propInfos, - AUTO_CONNECT_RETRY, - "false", - "If true, the driver will keep trying to connect to the same server in case that the socket " - + "cannot be established. There is maximum amount of time to keep retrying, which is 15s by " - + "default.", null); - - addPropInfo(propInfos, CONNECTIONS_PER_HOST, "10", "The maximum number of connections allowed per " - + "host for this Mongo instance. Those connections will be kept in a pool when idle.", null); - - addPropInfo(propInfos, CONNECT_TIMEOUT, "10000", "The connection timeout in milliseconds. ", null); - - addPropInfo(propInfos, CURSOR_FINALIZER_ENABLED, "true", "Sets whether there is a a finalize " - + "method created that cleans up instances of DBCursor that the client does not close.", - null); - - addPropInfo(propInfos, MAX_AUTO_CONNECT_RETRY_TIME, "0", - "The maximum amount of time in MS to spend retrying to open connection to the same server." - + "Default is 0, which means to use the default 15s if autoConnectRetry is on.", null); - - addPropInfo(propInfos, READ_PREFERENCE, "primary", - "represents preferred replica set members to which a query or command can be sent", new String[] { - "primary", "primary preferred", "secondary", "secondary preferred", "nearest" }); - - addPropInfo(propInfos, SOCKET_TIMEOUT, "0", "The socket timeout in milliseconds It is used for " - + "I/O socket read and write operations " - + "Socket.setSoTimeout(int) Default is 0 and means no timeout.", null); - - return propInfos.toArray(new DriverPropertyInfo[propInfos.size()]); - } - - private void addPropInfo(final ArrayList propInfos, final String propName, - final String defaultVal, final String description, final String[] choices) - { - DriverPropertyInfo newProp = new DriverPropertyInfo(propName, defaultVal); - newProp.description = description; - if (choices != null) - { - newProp.choices = choices; - } - propInfos.add(newProp); - } +package io.mycat.backend.jdbc.sequoiadb; + +import java.sql.DriverPropertyInfo; +import java.util.ArrayList; + + +public class DriverPropertyInfoHelper{ + + public static final String AUTO_CONNECT_RETRY = "autoConnectRetry"; + + public static final String CONNECTIONS_PER_HOST = "connecionsPerHost"; + + public static final String CONNECT_TIMEOUT = "connectTimeout"; + + public static final String CURSOR_FINALIZER_ENABLED = "cursorFinalizerEnabled"; + + public static final String MAX_AUTO_CONNECT_RETRY_TIME = "maxAutoConnectRetryTime"; + + public static final String READ_PREFERENCE = "readPreference"; + + public static final String SOCKET_TIMEOUT = "socketTimeout"; + + public DriverPropertyInfo[] getPropertyInfo() + { + ArrayList propInfos = new ArrayList(); + + addPropInfo( + propInfos, + AUTO_CONNECT_RETRY, + "false", + "If true, the driver will keep trying to connect to the same server in case that the socket " + + "cannot be established. There is maximum amount of time to keep retrying, which is 15s by " + + "default.", null); + + addPropInfo(propInfos, CONNECTIONS_PER_HOST, "10", "The maximum number of connections allowed per " + + "host for this Mongo instance. Those connections will be kept in a pool when idle.", null); + + addPropInfo(propInfos, CONNECT_TIMEOUT, "10000", "The connection timeout in milliseconds. ", null); + + addPropInfo(propInfos, CURSOR_FINALIZER_ENABLED, "true", "Sets whether there is a a finalize " + + "method created that cleans up instances of DBCursor that the client does not close.", + null); + + addPropInfo(propInfos, MAX_AUTO_CONNECT_RETRY_TIME, "0", + "The maximum amount of time in MS to spend retrying to open connection to the same server." + + "Default is 0, which means to use the default 15s if autoConnectRetry is on.", null); + + addPropInfo(propInfos, READ_PREFERENCE, "primary", + "represents preferred replica set members to which a query or command can be sent", new String[] { + "primary", "primary preferred", "secondary", "secondary preferred", "nearest" }); + + addPropInfo(propInfos, SOCKET_TIMEOUT, "0", "The socket timeout in milliseconds It is used for " + + "I/O socket read and write operations " + + "Socket.setSoTimeout(int) Default is 0 and means no timeout.", null); + + return propInfos.toArray(new DriverPropertyInfo[propInfos.size()]); + } + + private void addPropInfo(final ArrayList propInfos, final String propName, + final String defaultVal, final String description, final String[] choices) + { + DriverPropertyInfo newProp = new DriverPropertyInfo(propName, defaultVal); + newProp.description = description; + if (choices != null) + { + newProp.choices = choices; + } + propInfos.add(newProp); + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaConnection.java b/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaConnection.java index 368b4b261..e99a2be88 100644 --- a/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaConnection.java +++ b/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaConnection.java @@ -20,8 +20,8 @@ import java.util.Properties; import java.util.concurrent.Executor; -import com.sequoiadb.base.CollectionSpace; import com.sequoiadb.base.Sequoiadb; +import com.sequoiadb.base.CollectionSpace; import com.sequoiadb.exception.BaseException; /** * 功能详细描述 @@ -51,12 +51,16 @@ public SequoiaConnection(String url, String db) throws UnknownHostException { public CollectionSpace getDB() { if (this._schema!=null) { - if (mc.isCollectionSpaceExist(this._schema)) + if (mc.isCollectionSpaceExist(this._schema)) { return this.mc.getCollectionSpace(this._schema); - else + } + else { return this.mc.createCollectionSpace(this._schema); + } + } + else { + return null; } - else return null; } @Override @@ -80,8 +84,9 @@ public String nativeSQL(String sql) throws SQLException { @Override public void setAutoCommit(boolean autoCommit) throws SQLException { - if (!autoCommit) - throw new RuntimeException("autoCommit has to be on"); + if (!autoCommit) { + throw new RuntimeException("autoCommit has to be on"); + } } @Override diff --git a/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaData.java b/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaData.java index 357620069..3de061c8b 100644 --- a/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaData.java +++ b/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaData.java @@ -1,131 +1,131 @@ -package io.mycat.backend.jdbc.sequoiadb; - -import java.sql.Date; -import java.sql.Time; -import java.sql.Timestamp; -import java.sql.Types; -import java.util.HashMap; - -import org.bson.BSONObject; -import org.bson.types.BasicBSONList; - -import com.sequoiadb.base.DBCursor; - -public class SequoiaData { - - private DBCursor cursor; - private long count; - private String table; - private BSONObject groupby; - - private HashMap map = new HashMap(); - private boolean type=false; - - public SequoiaData(){ - this.count=0; - this.cursor=null; - } - - public long getCount() { - return this.count; - } - - - public void setCount(long count) { - this.count=count; - } - - public String getTable() { - return this.table; - } - - public void setTable(String table) { - this.table=table; - } - - public BSONObject getGrouyBy() { - return this.groupby; - } - - public BasicBSONList getGrouyBys() { - if (this.groupby instanceof BasicBSONList) { - return (BasicBSONList)this.groupby; - } - else { - return null; - } - } - public void setGrouyBy(BSONObject gb) { - this.groupby=gb; - this.type=true; - if (gb instanceof BasicBSONList) { - Object gb2=((BasicBSONList)gb).get(0); - if (gb2 instanceof BSONObject) { - for (String field :((BSONObject)gb2).keySet()) { - Object val = ((BSONObject)gb2).get(field); - setField(field,getObjectToType(val)); - } - } - } - } - - public static int getObjectToType(Object ob){ - if (ob instanceof Integer) { - return Types.INTEGER; - } - else if (ob instanceof Boolean) { - return Types.BOOLEAN; - } - else if (ob instanceof Byte) { - return Types.BIT; - } - else if (ob instanceof Short) { - return Types.INTEGER; - } - else if (ob instanceof Float) { - return Types.FLOAT; - } - else if (ob instanceof Long) { - return Types.BIGINT; - } - else if (ob instanceof Double) { - return Types.DOUBLE; - } - else if (ob instanceof Date) { - return Types.DATE; - } - else if (ob instanceof Time) { - return Types.TIME; - } - else if (ob instanceof Timestamp) { - return Types.TIMESTAMP; - } - else if (ob instanceof String) { - return Types.VARCHAR; - } - else { - return Types.VARCHAR; - } - } - - public void setField(String field,int ftype) { - map.put(field, ftype); - } - - public HashMap getFields() { - return this.map; - } - - public boolean getType() { - return this.type; - } - - public DBCursor getCursor() { - return this.cursor; - } - - public DBCursor setCursor(DBCursor cursor) { - return this.cursor=cursor; - } - -} +package io.mycat.backend.jdbc.sequoiadb; + +import java.sql.Date; +import java.sql.Time; +import java.sql.Timestamp; +import java.sql.Types; +import java.util.HashMap; + +import org.bson.BSONObject; +import org.bson.BasicBSONObject; +import org.bson.types.BasicBSONList; +import com.sequoiadb.base.DBCursor; + +public class SequoiaData { + + private DBCursor cursor; + private long count; + private String table; + private BSONObject groupby; + + private HashMap map = new HashMap(); + private boolean type=false; + + public SequoiaData(){ + this.count=0; + this.cursor=null; + } + + public long getCount() { + return this.count; + } + + + public void setCount(long count) { + this.count=count; + } + + public String getTable() { + return this.table; + } + + public void setTable(String table) { + this.table=table; + } + + public BSONObject getGrouyBy() { + return this.groupby; + } + + public BasicBSONList getGrouyBys() { + if (this.groupby instanceof BasicBSONList) { + return (BasicBSONList)this.groupby; + } + else { + return null; + } + } + public void setGrouyBy(BSONObject gb) { + this.groupby=gb; + this.type=true; + if (gb instanceof BasicBSONList) { + Object gb2=((BasicBSONList)gb).get(0); + if (gb2 instanceof BSONObject) { + for (String field :((BSONObject)gb2).keySet()) { + Object val = ((BSONObject)gb2).get(field); + setField(field,getObjectToType(val)); + } + } + } + } + + public static int getObjectToType(Object ob){ + if (ob instanceof Integer) { + return Types.INTEGER; + } + else if (ob instanceof Boolean) { + return Types.BOOLEAN; + } + else if (ob instanceof Byte) { + return Types.BIT; + } + else if (ob instanceof Short) { + return Types.INTEGER; + } + else if (ob instanceof Float) { + return Types.FLOAT; + } + else if (ob instanceof Long) { + return Types.BIGINT; + } + else if (ob instanceof Double) { + return Types.DOUBLE; + } + else if (ob instanceof Date) { + return Types.DATE; + } + else if (ob instanceof Time) { + return Types.TIME; + } + else if (ob instanceof Timestamp) { + return Types.TIMESTAMP; + } + else if (ob instanceof String) { + return Types.VARCHAR; + } + else { + return Types.VARCHAR; + } + } + + public void setField(String field,int ftype) { + map.put(field, ftype); + } + + public HashMap getFields() { + return this.map; + } + + public boolean getType() { + return this.type; + } + + public DBCursor getCursor() { + return this.cursor; + } + + public DBCursor setCursor(DBCursor cursor) { + return this.cursor=cursor; + } + +} diff --git a/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaPreparedStatement.java b/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaPreparedStatement.java index 0b0ee4098..25f5a4f93 100644 --- a/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaPreparedStatement.java +++ b/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaPreparedStatement.java @@ -1,408 +1,409 @@ -package io.mycat.backend.jdbc.sequoiadb; - -import java.io.InputStream; -import java.io.Reader; -import java.math.BigDecimal; -import java.net.URL; -import java.sql.Array; -import java.sql.Blob; -import java.sql.Clob; -import java.sql.Date; -import java.sql.NClob; -import java.sql.ParameterMetaData; -import java.sql.PreparedStatement; -import java.sql.Ref; -import java.sql.ResultSet; -import java.sql.ResultSetMetaData; -import java.sql.RowId; -import java.sql.SQLException; -import java.sql.SQLXML; -import java.sql.Time; -import java.sql.Timestamp; -import java.util.ArrayList; -import java.util.Calendar; -import java.util.List; -/** - * 功能详细描述 - * @author sohudo[http://blog.csdn.net/wind520] - * @create 2014年12月19日 下午6:50:23 - * @version 0.0.1 - */ -public class SequoiaPreparedStatement extends SequoiaStatement implements - PreparedStatement { - final String _sql; - final SequoiaSQLParser _mongosql; - List _params = new ArrayList(); - - public SequoiaPreparedStatement(SequoiaConnection conn, int type, - int concurrency, int holdability, String sql) - throws SequoiaSQLException { - super(conn, type, concurrency, holdability); - this._sql = sql; - this._mongosql = new SequoiaSQLParser(conn.getDB(), sql); - } - - @Override - public ResultSet executeQuery() throws SQLException { - - return null; - } - - @Override - public int executeUpdate() throws SQLException { - - this._mongosql.setParams(this._params); - return this._mongosql.executeUpdate(); - } - - public void setValue(int idx, Object o) { - while (this._params.size() <= idx) - this._params.add(null); - this._params.set(idx, o); - } - - @Override - public void setNull(int parameterIndex, int sqlType) throws SQLException { - - - } - - @Override - public void setBoolean(int parameterIndex, boolean x) throws SQLException { - - setValue(parameterIndex, Boolean.valueOf(x)); - } - - @Override - public void setByte(int parameterIndex, byte x) throws SQLException { - - setValue(parameterIndex, Byte.valueOf(x)); - } - - @Override - public void setShort(int parameterIndex, short x) throws SQLException { - - setValue(parameterIndex, Short.valueOf(x)); - } - - @Override - public void setInt(int parameterIndex, int x) throws SQLException { - - setValue(parameterIndex, Integer.valueOf(x)); - } - - @Override - public void setLong(int parameterIndex, long x) throws SQLException { - - setValue(parameterIndex, Long.valueOf(x)); - } - - @Override - public void setFloat(int parameterIndex, float x) throws SQLException { - - setValue(parameterIndex, Float.valueOf(x)); - } - - @Override - public void setDouble(int parameterIndex, double x) throws SQLException { - - setValue(parameterIndex, Double.valueOf(x)); - } - - @Override - public void setBigDecimal(int parameterIndex, BigDecimal x) - throws SQLException { - - setValue(parameterIndex, x); - } - - @Override - public void setString(int parameterIndex, String x) throws SQLException { - - setValue(parameterIndex, x); - } - - @Override - public void setBytes(int parameterIndex, byte[] x) throws SQLException { - - setValue(parameterIndex, x); - } - - @Override - public void setDate(int parameterIndex, Date x) throws SQLException { - - setValue(parameterIndex, x); - } - - @Override - public void setTime(int parameterIndex, Time x) throws SQLException { - - setValue(parameterIndex, x); - } - - @Override - public void setTimestamp(int parameterIndex, Timestamp x) - throws SQLException { - - setValue(parameterIndex, x); - } - - @Override - public void setAsciiStream(int parameterIndex, InputStream x, int length) - throws SQLException { - - - } - - @Override - public void setUnicodeStream(int parameterIndex, InputStream x, int length) - throws SQLException { - - - } - - @Override - public void setBinaryStream(int parameterIndex, InputStream x, int length) - throws SQLException { - - - } - - @Override - public void clearParameters() throws SQLException { - - - } - - @Override - public void setObject(int parameterIndex, Object x, int targetSqlType) - throws SQLException { - - - } - - @Override - public void setObject(int parameterIndex, Object x) throws SQLException { - - setValue(parameterIndex,x); - } - - @Override - public boolean execute() throws SQLException { - - return false; - } - - @Override - public void addBatch() throws SQLException { - - - } - - @Override - public void setCharacterStream(int parameterIndex, Reader reader, int length) - throws SQLException { - - - } - - @Override - public void setRef(int parameterIndex, Ref x) throws SQLException { - - - } - - @Override - public void setBlob(int parameterIndex, Blob x) throws SQLException { - - - } - - @Override - public void setClob(int parameterIndex, Clob x) throws SQLException { - - - } - - @Override - public void setArray(int parameterIndex, Array x) throws SQLException { - - - } - - @Override - public ResultSetMetaData getMetaData() throws SQLException { - - return null; - } - - @Override - public void setDate(int parameterIndex, Date x, Calendar cal) - throws SQLException { - - - } - - @Override - public void setTime(int parameterIndex, Time x, Calendar cal) - throws SQLException { - - - } - - @Override - public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) - throws SQLException { - - - } - - @Override - public void setNull(int parameterIndex, int sqlType, String typeName) - throws SQLException { - - - } - - @Override - public void setURL(int parameterIndex, URL x) throws SQLException { - - - } - - @Override - public ParameterMetaData getParameterMetaData() throws SQLException { - - return null; - } - - @Override - public void setRowId(int parameterIndex, RowId x) throws SQLException { - - - } - - @Override - public void setNString(int parameterIndex, String value) - throws SQLException { - - - } - - @Override - public void setNCharacterStream(int parameterIndex, Reader value, - long length) throws SQLException { - - - } - - @Override - public void setNClob(int parameterIndex, NClob value) throws SQLException { - - - } - - @Override - public void setClob(int parameterIndex, Reader reader, long length) - throws SQLException { - - - } - - @Override - public void setBlob(int parameterIndex, InputStream inputStream, long length) - throws SQLException { - - - } - - @Override - public void setNClob(int parameterIndex, Reader reader, long length) - throws SQLException { - - - } - - @Override - public void setSQLXML(int parameterIndex, SQLXML xmlObject) - throws SQLException { - - - } - - @Override - public void setObject(int parameterIndex, Object x, int targetSqlType, - int scaleOrLength) throws SQLException { - - - } - - @Override - public void setAsciiStream(int parameterIndex, InputStream x, long length) - throws SQLException { - - - } - - @Override - public void setBinaryStream(int parameterIndex, InputStream x, long length) - throws SQLException { - - - } - - @Override - public void setCharacterStream(int parameterIndex, Reader reader, - long length) throws SQLException { - - - } - - @Override - public void setAsciiStream(int parameterIndex, InputStream x) - throws SQLException { - - - } - - @Override - public void setBinaryStream(int parameterIndex, InputStream x) - throws SQLException { - - - } - - @Override - public void setCharacterStream(int parameterIndex, Reader reader) - throws SQLException { - - - } - - @Override - public void setNCharacterStream(int parameterIndex, Reader value) - throws SQLException { - - - } - - @Override - public void setClob(int parameterIndex, Reader reader) throws SQLException { - - - } - - @Override - public void setBlob(int parameterIndex, InputStream inputStream) - throws SQLException { - - - } - - @Override - public void setNClob(int parameterIndex, Reader reader) throws SQLException { - - - } - -} +package io.mycat.backend.jdbc.sequoiadb; + +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.net.URL; +import java.sql.Array; +import java.sql.Blob; +import java.sql.Clob; +import java.sql.Date; +import java.sql.NClob; +import java.sql.ParameterMetaData; +import java.sql.PreparedStatement; +import java.sql.Ref; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.RowId; +import java.sql.SQLException; +import java.sql.SQLXML; +import java.sql.Time; +import java.sql.Timestamp; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.List; +/** + * 功能详细描述 + * @author sohudo[http://blog.csdn.net/wind520] + * @create 2014年12月19日 下午6:50:23 + * @version 0.0.1 + */ +public class SequoiaPreparedStatement extends SequoiaStatement implements + PreparedStatement { + final String _sql; + final SequoiaSQLParser _mongosql; + List _params = new ArrayList(); + + public SequoiaPreparedStatement(SequoiaConnection conn, int type, + int concurrency, int holdability, String sql) + throws SequoiaSQLException { + super(conn, type, concurrency, holdability); + this._sql = sql; + this._mongosql = new SequoiaSQLParser(conn.getDB(), sql); + } + + @Override + public ResultSet executeQuery() throws SQLException { + + return null; + } + + @Override + public int executeUpdate() throws SQLException { + + this._mongosql.setParams(this._params); + return this._mongosql.executeUpdate(); + } + + public void setValue(int idx, Object o) { + while (this._params.size() <= idx) { + this._params.add(null); + } + this._params.set(idx, o); + } + + @Override + public void setNull(int parameterIndex, int sqlType) throws SQLException { + + + } + + @Override + public void setBoolean(int parameterIndex, boolean x) throws SQLException { + + setValue(parameterIndex, Boolean.valueOf(x)); + } + + @Override + public void setByte(int parameterIndex, byte x) throws SQLException { + + setValue(parameterIndex, Byte.valueOf(x)); + } + + @Override + public void setShort(int parameterIndex, short x) throws SQLException { + + setValue(parameterIndex, Short.valueOf(x)); + } + + @Override + public void setInt(int parameterIndex, int x) throws SQLException { + + setValue(parameterIndex, Integer.valueOf(x)); + } + + @Override + public void setLong(int parameterIndex, long x) throws SQLException { + + setValue(parameterIndex, Long.valueOf(x)); + } + + @Override + public void setFloat(int parameterIndex, float x) throws SQLException { + + setValue(parameterIndex, Float.valueOf(x)); + } + + @Override + public void setDouble(int parameterIndex, double x) throws SQLException { + + setValue(parameterIndex, Double.valueOf(x)); + } + + @Override + public void setBigDecimal(int parameterIndex, BigDecimal x) + throws SQLException { + + setValue(parameterIndex, x); + } + + @Override + public void setString(int parameterIndex, String x) throws SQLException { + + setValue(parameterIndex, x); + } + + @Override + public void setBytes(int parameterIndex, byte[] x) throws SQLException { + + setValue(parameterIndex, x); + } + + @Override + public void setDate(int parameterIndex, Date x) throws SQLException { + + setValue(parameterIndex, x); + } + + @Override + public void setTime(int parameterIndex, Time x) throws SQLException { + + setValue(parameterIndex, x); + } + + @Override + public void setTimestamp(int parameterIndex, Timestamp x) + throws SQLException { + + setValue(parameterIndex, x); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x, int length) + throws SQLException { + + + } + + @Override + public void setUnicodeStream(int parameterIndex, InputStream x, int length) + throws SQLException { + + + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x, int length) + throws SQLException { + + + } + + @Override + public void clearParameters() throws SQLException { + + + } + + @Override + public void setObject(int parameterIndex, Object x, int targetSqlType) + throws SQLException { + + + } + + @Override + public void setObject(int parameterIndex, Object x) throws SQLException { + + setValue(parameterIndex,x); + } + + @Override + public boolean execute() throws SQLException { + + return false; + } + + @Override + public void addBatch() throws SQLException { + + + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader, int length) + throws SQLException { + + + } + + @Override + public void setRef(int parameterIndex, Ref x) throws SQLException { + + + } + + @Override + public void setBlob(int parameterIndex, Blob x) throws SQLException { + + + } + + @Override + public void setClob(int parameterIndex, Clob x) throws SQLException { + + + } + + @Override + public void setArray(int parameterIndex, Array x) throws SQLException { + + + } + + @Override + public ResultSetMetaData getMetaData() throws SQLException { + + return null; + } + + @Override + public void setDate(int parameterIndex, Date x, Calendar cal) + throws SQLException { + + + } + + @Override + public void setTime(int parameterIndex, Time x, Calendar cal) + throws SQLException { + + + } + + @Override + public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) + throws SQLException { + + + } + + @Override + public void setNull(int parameterIndex, int sqlType, String typeName) + throws SQLException { + + + } + + @Override + public void setURL(int parameterIndex, URL x) throws SQLException { + + + } + + @Override + public ParameterMetaData getParameterMetaData() throws SQLException { + + return null; + } + + @Override + public void setRowId(int parameterIndex, RowId x) throws SQLException { + + + } + + @Override + public void setNString(int parameterIndex, String value) + throws SQLException { + + + } + + @Override + public void setNCharacterStream(int parameterIndex, Reader value, + long length) throws SQLException { + + + } + + @Override + public void setNClob(int parameterIndex, NClob value) throws SQLException { + + + } + + @Override + public void setClob(int parameterIndex, Reader reader, long length) + throws SQLException { + + + } + + @Override + public void setBlob(int parameterIndex, InputStream inputStream, long length) + throws SQLException { + + + } + + @Override + public void setNClob(int parameterIndex, Reader reader, long length) + throws SQLException { + + + } + + @Override + public void setSQLXML(int parameterIndex, SQLXML xmlObject) + throws SQLException { + + + } + + @Override + public void setObject(int parameterIndex, Object x, int targetSqlType, + int scaleOrLength) throws SQLException { + + + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x, long length) + throws SQLException { + + + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x, long length) + throws SQLException { + + + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader, + long length) throws SQLException { + + + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x) + throws SQLException { + + + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x) + throws SQLException { + + + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader) + throws SQLException { + + + } + + @Override + public void setNCharacterStream(int parameterIndex, Reader value) + throws SQLException { + + + } + + @Override + public void setClob(int parameterIndex, Reader reader) throws SQLException { + + + } + + @Override + public void setBlob(int parameterIndex, InputStream inputStream) + throws SQLException { + + + } + + @Override + public void setNClob(int parameterIndex, Reader reader) throws SQLException { + + + } + +} diff --git a/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaResultSet.java b/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaResultSet.java index 6501e52a5..8cda05568 100644 --- a/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaResultSet.java +++ b/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaResultSet.java @@ -1,6 +1,10 @@ package io.mycat.backend.jdbc.sequoiadb; +import com.sequoiadb.base.DBCursor; +import org.bson.BSONObject; +import org.bson.types.BasicBSONList; + import java.io.InputStream; import java.io.Reader; import java.math.BigDecimal; @@ -27,11 +31,6 @@ //import java.util.HashMap; import java.util.Map; import java.util.Set; - -import org.bson.BSONObject; -import org.bson.types.BasicBSONList; - -import com.sequoiadb.base.DBCursor; /** * 功能详细描述 * @author sohudo[http://blog.csdn.net/wind520] @@ -103,7 +102,9 @@ public void SetFieldType(boolean isid) throws SQLException { if (isid) { fieldtype= new int[Types.VARCHAR]; } - else fieldtype= new int[this.select.length]; + else { + fieldtype = new int[this.select.length]; + } if (_cur!=null) { for (int i=0;i keySet,String schema) { - super(); - this.keySet = new String[keySet.size()]; - this.keySet = keySet.toArray(this.keySet); - this._schema = schema; - } - */ - public SequoiaResultSetMetaData(String[] select,int [] ftype,String schema,String table) { - super(); - this.keySet = select; - this.keytype=ftype; - this._schema = schema; - this._table =table; - } - - @Override - public T unwrap(Class iface) throws SQLException { - - return null; - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - - return false; - } - - @Override - public int getColumnCount() throws SQLException { - if (keySet==null) return 0; - else - return keySet.length; - } - - @Override - public boolean isAutoIncrement(int column) throws SQLException { - // 是否为自动编号的字段 - return false; - } - - @Override - public boolean isCaseSensitive(int column) throws SQLException { - //指示列的大小写是否有关系 - return true; - } - - @Override - public boolean isSearchable(int column) throws SQLException { - //指示是否可以在 where 子句中使用指定的列 - return true; - } - - @Override - public boolean isCurrency(int column) throws SQLException { - // 指示指定的列是否是一个哈希代码值 - return false; - } - - @Override - public int isNullable(int column) throws SQLException { - // 指示指定列中的值是否可以为 null。 - return 0; - } - - @Override - public boolean isSigned(int column) throws SQLException { - // 指示指定列中的值是否带正负号 - return false; - } - - @Override - public int getColumnDisplaySize(int column) throws SQLException { - - return 50; - } - - @Override - public String getColumnLabel(int column) throws SQLException { - return keySet[column-1]; - } - - @Override - public String getColumnName(int column) throws SQLException { - return keySet[column-1]; - } - - @Override - public String getSchemaName(int column) throws SQLException { - - return this._schema; - } - - @Override - public int getPrecision(int column) throws SQLException { - //获取指定列的指定列宽 - return 0; - } - - @Override - public int getScale(int column) throws SQLException { - // 检索指定参数的小数点右边的位数。 - return 0; - } - - @Override - public String getTableName(int column) throws SQLException { - - return this._table; - } - - @Override - public String getCatalogName(int column) throws SQLException { - - return this._schema; - } - - @Override - public int getColumnType(int column) throws SQLException { - // 字段类型 - return keytype[column-1];//Types.VARCHAR; - } - - @Override - public String getColumnTypeName(int column) throws SQLException { - // 数据库特定的类型名称 - switch (keytype[column-1]){ - case Types.INTEGER: return "INTEGER"; - case Types.BOOLEAN: return "BOOLEAN"; - case Types.BIT: return "BITT"; - case Types.FLOAT: return "FLOAT"; - case Types.BIGINT: return "BIGINT"; - case Types.DOUBLE: return "DOUBLE"; - case Types.DATE: return "DATE"; - case Types.TIME: return "TIME"; - case Types.TIMESTAMP: return "TIMESTAMP"; - default: return "varchar"; - } - } - - @Override - public boolean isReadOnly(int column) throws SQLException { - //指示指定的列是否明确不可写入 - return false; - } - - @Override - public boolean isWritable(int column) throws SQLException { - - return false; - } - - @Override - public boolean isDefinitelyWritable(int column) throws SQLException { - - return false; - } - - @Override - public String getColumnClassName(int column) throws SQLException { - // 如果调用方法 ResultSet.getObject 从列中获取值,则返回构造其实例的 Java 类的完全限定名称 - return "Object"; - } - -} +package io.mycat.backend.jdbc.sequoiadb; + +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Types; + +/** + * 功能详细描述 + * @author sohudo[http://blog.csdn.net/wind520] + * @create 2014年12月19日 下午6:50:23 + * @version 0.0.1 + */ + +public class SequoiaResultSetMetaData implements ResultSetMetaData { + + private String[] keySet ; + private int[] keytype ; + private String _schema; + private String _table; + + /* + public MongoResultSetMetaData(Set keySet,String schema) { + super(); + this.keySet = new String[keySet.size()]; + this.keySet = keySet.toArray(this.keySet); + this._schema = schema; + } + */ + public SequoiaResultSetMetaData(String[] select,int [] ftype,String schema,String table) { + super(); + this.keySet = select; + this.keytype=ftype; + this._schema = schema; + this._table =table; + } + + @Override + public T unwrap(Class iface) throws SQLException { + + return null; + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + + return false; + } + + @Override + public int getColumnCount() throws SQLException { + if (keySet==null) { + return 0; + } + else { + return keySet.length; + } + } + + @Override + public boolean isAutoIncrement(int column) throws SQLException { + // 是否为自动编号的字段 + return false; + } + + @Override + public boolean isCaseSensitive(int column) throws SQLException { + //指示列的大小写是否有关系 + return true; + } + + @Override + public boolean isSearchable(int column) throws SQLException { + //指示是否可以在 where 子句中使用指定的列 + return true; + } + + @Override + public boolean isCurrency(int column) throws SQLException { + // 指示指定的列是否是一个哈希代码值 + return false; + } + + @Override + public int isNullable(int column) throws SQLException { + // 指示指定列中的值是否可以为 null。 + return 0; + } + + @Override + public boolean isSigned(int column) throws SQLException { + // 指示指定列中的值是否带正负号 + return false; + } + + @Override + public int getColumnDisplaySize(int column) throws SQLException { + + return 50; + } + + @Override + public String getColumnLabel(int column) throws SQLException { + return keySet[column-1]; + } + + @Override + public String getColumnName(int column) throws SQLException { + return keySet[column-1]; + } + + @Override + public String getSchemaName(int column) throws SQLException { + + return this._schema; + } + + @Override + public int getPrecision(int column) throws SQLException { + //获取指定列的指定列宽 + return 0; + } + + @Override + public int getScale(int column) throws SQLException { + // 检索指定参数的小数点右边的位数。 + return 0; + } + + @Override + public String getTableName(int column) throws SQLException { + + return this._table; + } + + @Override + public String getCatalogName(int column) throws SQLException { + + return this._schema; + } + + @Override + public int getColumnType(int column) throws SQLException { + // 字段类型 + return keytype[column-1];//Types.VARCHAR; + } + + @Override + public String getColumnTypeName(int column) throws SQLException { + // 数据库特定的类型名称 + switch (keytype[column-1]){ + case Types.INTEGER: return "INTEGER"; + case Types.BOOLEAN: return "BOOLEAN"; + case Types.BIT: return "BITT"; + case Types.FLOAT: return "FLOAT"; + case Types.BIGINT: return "BIGINT"; + case Types.DOUBLE: return "DOUBLE"; + case Types.DATE: return "DATE"; + case Types.TIME: return "TIME"; + case Types.TIMESTAMP: return "TIMESTAMP"; + default: return "varchar"; + } + } + + @Override + public boolean isReadOnly(int column) throws SQLException { + //指示指定的列是否明确不可写入 + return false; + } + + @Override + public boolean isWritable(int column) throws SQLException { + + return false; + } + + @Override + public boolean isDefinitelyWritable(int column) throws SQLException { + + return false; + } + + @Override + public String getColumnClassName(int column) throws SQLException { + // 如果调用方法 ResultSet.getObject 从列中获取值,则返回构造其实例的 Java 类的完全限定名称 + return "Object"; + } + +} diff --git a/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaSQLParser.java b/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaSQLParser.java index c430ac572..17b11c442 100644 --- a/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaSQLParser.java +++ b/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaSQLParser.java @@ -1,440 +1,451 @@ -package io.mycat.backend.jdbc.sequoiadb; - - - -import java.sql.Types; -import java.util.List; - -import org.bson.BSONObject; -import org.bson.BasicBSONObject; -import org.bson.types.BasicBSONList; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.alibaba.druid.sql.ast.SQLExpr; -import com.alibaba.druid.sql.ast.SQLOrderBy; -import com.alibaba.druid.sql.ast.SQLOrderingSpecification; -import com.alibaba.druid.sql.ast.SQLStatement; -import com.alibaba.druid.sql.ast.expr.SQLAggregateExpr; -import com.alibaba.druid.sql.ast.expr.SQLAllColumnExpr; -import com.alibaba.druid.sql.ast.expr.SQLBinaryOpExpr; -import com.alibaba.druid.sql.ast.expr.SQLBooleanExpr; -import com.alibaba.druid.sql.ast.expr.SQLCharExpr; -import com.alibaba.druid.sql.ast.expr.SQLIntegerExpr; -import com.alibaba.druid.sql.ast.expr.SQLNullExpr; -import com.alibaba.druid.sql.ast.expr.SQLNumberExpr; -import com.alibaba.druid.sql.ast.expr.SQLVariantRefExpr; -import com.alibaba.druid.sql.ast.statement.SQLCreateTableStatement; -import com.alibaba.druid.sql.ast.statement.SQLDeleteStatement; -import com.alibaba.druid.sql.ast.statement.SQLDropTableStatement; -import com.alibaba.druid.sql.ast.statement.SQLInsertStatement; -import com.alibaba.druid.sql.ast.statement.SQLSelectGroupByClause; -import com.alibaba.druid.sql.ast.statement.SQLSelectItem; -import com.alibaba.druid.sql.ast.statement.SQLSelectOrderByItem; -import com.alibaba.druid.sql.ast.statement.SQLSelectQuery; -import com.alibaba.druid.sql.ast.statement.SQLSelectStatement; -import com.alibaba.druid.sql.ast.statement.SQLTableSource; -import com.alibaba.druid.sql.ast.statement.SQLUpdateSetItem; -import com.alibaba.druid.sql.ast.statement.SQLUpdateStatement; -import com.alibaba.druid.sql.dialect.mysql.ast.expr.MySqlSelectGroupByExpr; -import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlSelectQueryBlock; -import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser; -import com.sequoiadb.base.CollectionSpace; -import com.sequoiadb.base.DBCollection; -import com.sequoiadb.base.DBCursor; -/** - * 功能详细描述 - * @author sohudo[http://blog.csdn.net/wind520] - * @create 2014年12月19日 下午6:50:23 - * @version 0.0.1 - */ -public class SequoiaSQLParser { - private static final Logger LOGGER = LoggerFactory.getLogger(SequoiaSQLParser.class); - private final CollectionSpace _db; -// private final String _sql; - private final SQLStatement statement; - private List _params; - private int _pos; - public SequoiaSQLParser(CollectionSpace db, String sql) throws SequoiaSQLException - { - this._db = db; - // this._sql = sql; - this.statement = parser(sql); - } - - public SQLStatement parser(String s) throws SequoiaSQLException - { - s = s.trim(); - try - { - MySqlStatementParser parser = new MySqlStatementParser(s); - return parser.parseStatement(); - } - catch (Exception e) - { - LOGGER.error("MongoSQLParser.parserError", e); - } - throw new SequoiaSQLException.ErrorSQL(s); - } - - public void setParams(List params) - { - this._pos = 1; - this._params = params; - } - - public SequoiaData query() throws SequoiaSQLException{ - if (!(statement instanceof SQLSelectStatement)) { - //return null; - throw new IllegalArgumentException("not a query sql statement"); - } - SequoiaData mongo=new SequoiaData(); - DBCursor c=null; - SQLSelectStatement selectStmt = (SQLSelectStatement)statement; - SQLSelectQuery sqlSelectQuery =selectStmt.getSelect().getQuery(); - int icount=0; - if(sqlSelectQuery instanceof MySqlSelectQueryBlock) { - MySqlSelectQueryBlock mysqlSelectQuery = (MySqlSelectQueryBlock)selectStmt.getSelect().getQuery(); - - BasicBSONObject fields = new BasicBSONObject(); - //显示的字段 - for(SQLSelectItem item : mysqlSelectQuery.getSelectList()) { - //System.out.println(item.toString()); - if (!(item.getExpr() instanceof SQLAllColumnExpr)) { - if (item.getExpr() instanceof SQLAggregateExpr) { - SQLAggregateExpr expr =(SQLAggregateExpr)item.getExpr(); - if (expr.getMethodName().equals("COUNT")) { - icount=1; - mongo.setField(getExprFieldName(expr), Types.BIGINT); - } - fields.put(getExprFieldName(expr), Integer.valueOf(1)); - } - else { - fields.put(getFieldName(item), Integer.valueOf(1)); - } - } - - } - - //表名 - SQLTableSource table=mysqlSelectQuery.getFrom(); - DBCollection coll =this._db.getCollection(table.toString()); - mongo.setTable(table.toString()); - - SQLExpr expr=mysqlSelectQuery.getWhere(); - BSONObject query = parserWhere(expr); - //System.out.println(query); - SQLSelectGroupByClause groupby=mysqlSelectQuery.getGroupBy(); - BasicBSONObject gbkey = new BasicBSONObject(); - if (groupby!=null) { - for (SQLExpr gbexpr:groupby.getItems()){ - if (gbexpr instanceof MySqlSelectGroupByExpr) { - SQLExpr gbyexpr=((MySqlSelectGroupByExpr) gbexpr).getExpr(); - gbkey.put(getFieldName2(gbyexpr), Integer.valueOf(1)); - } - } - icount=2; - } - int limitoff=0; - int limitnum=0; - if (mysqlSelectQuery.getLimit()!=null) { - limitoff=getSQLExprToInt(mysqlSelectQuery.getLimit().getOffset()); - limitnum=getSQLExprToInt(mysqlSelectQuery.getLimit().getRowCount()); - } - - SQLOrderBy orderby=mysqlSelectQuery.getOrderBy(); - BasicBSONObject order = new BasicBSONObject(); - if (orderby != null ){ - for (int i = 0; i < orderby.getItems().size(); i++) - { - SQLSelectOrderByItem orderitem = orderby.getItems().get(i); - order.put(orderitem.getExpr().toString(), Integer.valueOf(getSQLExprToAsc(orderitem.getType()))); - } - // c.sort(order); - // System.out.println(order); - } - - if (icount==1) { - mongo.setCount(coll.getCount(query)); - } - else if (icount==2){ - BasicBSONObject initial = new BasicBSONObject(); - initial.put("num", 0); - String reduce="function (obj, prev) { " - +" prev.num++}"; - //mongo.setGrouyBy(coll.group(gbkey, query, initial, reduce)); - } - else { - if ((limitoff>0) || (limitnum>0)) { - c = coll.query(query, fields, order,null, limitoff, limitnum);//.skip(limitoff).limit(limitnum); - } - else { - c = coll.query(query, fields, order,null, 0, -1); - } - - - } - mongo.setCursor(c); - } - return mongo; - } - - public int executeUpdate() throws SequoiaSQLException { - if (statement instanceof SQLInsertStatement) { - return InsertData((SQLInsertStatement)statement); - } - if (statement instanceof SQLUpdateStatement) { - return UpData((SQLUpdateStatement)statement); - } - if (statement instanceof SQLDropTableStatement) { - return dropTable((SQLDropTableStatement)statement); - } - if (statement instanceof SQLDeleteStatement) { - return DeleteDate((SQLDeleteStatement)statement); - } - if (statement instanceof SQLCreateTableStatement) { - return createTable((SQLCreateTableStatement)statement); - } - return 1; - - } - private int InsertData(SQLInsertStatement state) { - if (state.getValues().getValues().size() ==0 ){ - throw new RuntimeException("number of columns error"); - } - if (state.getValues().getValues().size() != state.getColumns().size()){ - throw new RuntimeException("number of values and columns have to match"); - } - SQLTableSource table=state.getTableSource(); - BSONObject o = new BasicBSONObject(); - int i=0; - for(SQLExpr col : state.getColumns()) { - o.put(getFieldName2(col), getExpValue(state.getValues().getValues().get(i))); - i++; - } - DBCollection coll =this._db.getCollection(table.toString()); - //coll.insert(new DBObject[] { o }); - coll.insert(o); - return 1; - } - private int UpData(SQLUpdateStatement state) { - SQLTableSource table=state.getTableSource(); - DBCollection coll =this._db.getCollection(table.toString()); - - SQLExpr expr=state.getWhere(); - BSONObject query = parserWhere(expr); - - BasicBSONObject set = new BasicBSONObject(); - for(SQLUpdateSetItem col : state.getItems()){ - set.put(getFieldName2(col.getColumn()), getExpValue(col.getValue())); - } - BSONObject mod = new BasicBSONObject("$set", set); - //coll.updateMulti(query, mod); - coll.update(query, mod, null); - //System.out.println("changs count:"+coll.getStats().size()); - return 1; - } - private int DeleteDate(SQLDeleteStatement state) { - SQLTableSource table=state.getTableSource(); - DBCollection coll =this._db.getCollection(table.toString()); - - SQLExpr expr=state.getWhere(); - if (expr==null) { - throw new RuntimeException("not where of sql"); - } - BSONObject query = parserWhere(expr); - - //coll.remove(query); - coll.delete(query); - return 1; - - } - private int dropTable(SQLDropTableStatement state) { - for (SQLTableSource table : state.getTableSources()){ - //DBCollection coll =this._db.getCollection(table.toString()); - //coll.drop(); - this._db.dropCollection(table.toString()); - } - return 1; - - } - - private int createTable(SQLCreateTableStatement state) { - //for (SQLTableSource table : state.getTableSource()){ - if (!this._db.isCollectionExist(state.getTableSource().toString())) - this._db.createCollection(state.getTableSource().toString()); - return 1; - } - - private int getSQLExprToInt(SQLExpr expr){ - if (expr instanceof SQLIntegerExpr){ - return ((SQLIntegerExpr)expr).getNumber().intValue(); - } - return 0; - } - private int getSQLExprToAsc(SQLOrderingSpecification ASC){ - if (ASC==null ) return 1; - if (ASC==SQLOrderingSpecification.DESC){ - return -1; - } - else { - return 1; - } - } - public String remove(String resource,char ch) - { - StringBuffer buffer=new StringBuffer(); - int position=0; - char currentChar; - - while(position")) op="$gt"; - if (expr.getOperator().getName().equals(">=")) op="$gte"; - - if (expr.getOperator().getName().equals("!=")) op="$ne"; - if (expr.getOperator().getName().equals("<>")) op="$ne"; - //xo.put(op, getExpValue(expr.getRight())); - // o.put(exprL.toString(),xo); - parserDBObject(o,exprL.toString(),op, getExpValue(expr.getRight())); - } - } - } - private void parserWhere(SQLExpr aexpr,BasicBSONObject o){ - if(aexpr instanceof SQLBinaryOpExpr){ - SQLBinaryOpExpr expr=(SQLBinaryOpExpr)aexpr; - SQLExpr exprL=expr.getLeft(); - if (!(exprL instanceof SQLBinaryOpExpr)) - { - //opSQLExpr((SQLBinaryOpExpr)aexpr,o); - if (expr.getOperator().getName().equals("=")) { - o.put(exprL.toString(), getExpValue(expr.getRight())); - } - else { - String op=""; - if (expr.getOperator().getName().equals("<")) op="$lt"; - if (expr.getOperator().getName().equals("<=")) op="$lte"; - if (expr.getOperator().getName().equals(">")) op="$gt"; - if (expr.getOperator().getName().equals(">=")) op="$gte"; - - if (expr.getOperator().getName().equals("!=")) op="$ne"; - if (expr.getOperator().getName().equals("<>")) op="$ne"; - - parserDBObject(o,exprL.toString(),op, getExpValue(expr.getRight())); - } - - } - else { - if (expr.getOperator().getName().equals("AND")) { - parserWhere(exprL,o); - parserWhere(expr.getRight(),o); - } - else if (expr.getOperator().getName().equals("OR")) { - orWhere(exprL,expr.getRight(),o); - } - else { - throw new RuntimeException("Can't identify the operation of of where"); - } - } - } - - } - - - private void orWhere(SQLExpr exprL,SQLExpr exprR ,BasicBSONObject ob){ - BasicBSONObject xo = new BasicBSONObject(); - BasicBSONObject yo = new BasicBSONObject(); - parserWhere(exprL,xo); - parserWhere(exprR,yo); - ob.put("$or",new Object[]{xo,yo}); - } -} +package io.mycat.backend.jdbc.sequoiadb; + + + +import java.sql.Types; +import java.util.List; + + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.sequoiadb.base.CollectionSpace; +import com.sequoiadb.base.DBCollection; +import com.sequoiadb.base.DBCursor; + +import org.bson.BSONObject; +import org.bson.BasicBSONObject; +import org.bson.types.BasicBSONList; + +import com.alibaba.druid.sql.ast.SQLExpr; +import com.alibaba.druid.sql.ast.SQLOrderingSpecification; +import com.alibaba.druid.sql.ast.SQLStatement; + +import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlSelectQueryBlock; +import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser; +import com.alibaba.druid.sql.ast.statement.*; +import com.alibaba.druid.sql.ast.expr.*; +import com.alibaba.druid.sql.ast.*; +/** + * 功能详细描述 + * @author sohudo[http://blog.csdn.net/wind520] + * @create 2014年12月19日 下午6:50:23 + * @version 0.0.1 + */ +public class SequoiaSQLParser { + private static final Logger LOGGER = LoggerFactory.getLogger(SequoiaSQLParser.class); + private final CollectionSpace _db; +// private final String _sql; + private final SQLStatement statement; + private List _params; + private int _pos; + public SequoiaSQLParser(CollectionSpace db, String sql) throws SequoiaSQLException + { + this._db = db; + // this._sql = sql; + this.statement = parser(sql); + } + + public SQLStatement parser(String s) throws SequoiaSQLException + { + s = s.trim(); + try + { + MySqlStatementParser parser = new MySqlStatementParser(s); + return parser.parseStatement(); + } + catch (Exception e) + { + LOGGER.error("MongoSQLParser.parserError", e); + } + throw new SequoiaSQLException.ErrorSQL(s); + } + + public void setParams(List params) + { + this._pos = 1; + this._params = params; + } + + public SequoiaData query() throws SequoiaSQLException{ + if (!(statement instanceof SQLSelectStatement)) { + //return null; + throw new IllegalArgumentException("not a query sql statement"); + } + SequoiaData mongo=new SequoiaData(); + DBCursor c=null; + SQLSelectStatement selectStmt = (SQLSelectStatement)statement; + SQLSelectQuery sqlSelectQuery =selectStmt.getSelect().getQuery(); + int icount=0; + if(sqlSelectQuery instanceof MySqlSelectQueryBlock) { + MySqlSelectQueryBlock mysqlSelectQuery = (MySqlSelectQueryBlock)selectStmt.getSelect().getQuery(); + + BasicBSONObject fields = new BasicBSONObject(); + //显示的字段 + for(SQLSelectItem item : mysqlSelectQuery.getSelectList()) { + //System.out.println(item.toString()); + if (!(item.getExpr() instanceof SQLAllColumnExpr)) { + if (item.getExpr() instanceof SQLAggregateExpr) { + SQLAggregateExpr expr =(SQLAggregateExpr)item.getExpr(); + if (expr.getMethodName().equals("COUNT")) { + icount=1; + mongo.setField(getExprFieldName(expr), Types.BIGINT); + } + fields.put(getExprFieldName(expr), Integer.valueOf(1)); + } + else { + fields.put(getFieldName(item), Integer.valueOf(1)); + } + } + + } + + //表名 + SQLTableSource table=mysqlSelectQuery.getFrom(); + DBCollection coll =this._db.getCollection(table.toString()); + mongo.setTable(table.toString()); + + SQLExpr expr=mysqlSelectQuery.getWhere(); + BSONObject query = parserWhere(expr); + //System.out.println(query); + SQLSelectGroupByClause groupby=mysqlSelectQuery.getGroupBy(); + BasicBSONObject gbkey = new BasicBSONObject(); + if (groupby!=null) { + for (SQLExpr gbexpr:groupby.getItems()){ + if (gbexpr instanceof SQLIdentifierExpr) { + String name =((SQLIdentifierExpr) gbexpr).getName(); + gbkey.put(name, Integer.valueOf(1)); + } + } + icount=2; + } + int limitoff=0; + int limitnum=0; + if (mysqlSelectQuery.getLimit()!=null) { + limitoff=getSQLExprToInt(mysqlSelectQuery.getLimit().getOffset()); + limitnum=getSQLExprToInt(mysqlSelectQuery.getLimit().getRowCount()); + } + + SQLOrderBy orderby=mysqlSelectQuery.getOrderBy(); + BasicBSONObject order = new BasicBSONObject(); + if (orderby != null ){ + for (int i = 0; i < orderby.getItems().size(); i++) + { + SQLSelectOrderByItem orderitem = orderby.getItems().get(i); + order.put(orderitem.getExpr().toString(), Integer.valueOf(getSQLExprToAsc(orderitem.getType()))); + } + // c.sort(order); + // System.out.println(order); + } + + if (icount==1) { + mongo.setCount(coll.getCount(query)); + } + else if (icount==2){ + BasicBSONObject initial = new BasicBSONObject(); + initial.put("num", 0); + String reduce="function (obj, prev) { " + +" prev.num++}"; + //mongo.setGrouyBy(coll.group(gbkey, query, initial, reduce)); + } + else { + if ((limitoff>0) || (limitnum>0)) { + c = coll.query(query, fields, order,null, limitoff, limitnum);//.skip(limitoff).limit(limitnum); + } + else { + c = coll.query(query, fields, order,null, 0, -1); + } + + + } + mongo.setCursor(c); + } + return mongo; + } + + public int executeUpdate() throws SequoiaSQLException { + if (statement instanceof SQLInsertStatement) { + return InsertData((SQLInsertStatement)statement); + } + if (statement instanceof SQLUpdateStatement) { + return UpData((SQLUpdateStatement)statement); + } + if (statement instanceof SQLDropTableStatement) { + return dropTable((SQLDropTableStatement)statement); + } + if (statement instanceof SQLDeleteStatement) { + return DeleteDate((SQLDeleteStatement)statement); + } + if (statement instanceof SQLCreateTableStatement) { + return createTable((SQLCreateTableStatement)statement); + } + return 1; + + } + private int InsertData(SQLInsertStatement state) { + if (state.getValues().getValues().size() ==0 ){ + throw new RuntimeException("number of columns error"); + } + if (state.getValues().getValues().size() != state.getColumns().size()){ + throw new RuntimeException("number of values and columns have to match"); + } + SQLTableSource table=state.getTableSource(); + BSONObject o = new BasicBSONObject(); + int i=0; + for(SQLExpr col : state.getColumns()) { + o.put(getFieldName2(col), getExpValue(state.getValues().getValues().get(i))); + i++; + } + DBCollection coll =this._db.getCollection(table.toString()); + //coll.insert(new DBObject[] { o }); + coll.insert(o); + return 1; + } + private int UpData(SQLUpdateStatement state) { + SQLTableSource table=state.getTableSource(); + DBCollection coll =this._db.getCollection(table.toString()); + + SQLExpr expr=state.getWhere(); + BSONObject query = parserWhere(expr); + + BasicBSONObject set = new BasicBSONObject(); + for(SQLUpdateSetItem col : state.getItems()){ + set.put(getFieldName2(col.getColumn()), getExpValue(col.getValue())); + } + BSONObject mod = new BasicBSONObject("$set", set); + //coll.updateMulti(query, mod); + coll.update(query, mod, null); + //System.out.println("changs count:"+coll.getStats().size()); + return 1; + } + private int DeleteDate(SQLDeleteStatement state) { + SQLTableSource table=state.getTableSource(); + DBCollection coll =this._db.getCollection(table.toString()); + + SQLExpr expr=state.getWhere(); + if (expr==null) { + throw new RuntimeException("not where of sql"); + } + BSONObject query = parserWhere(expr); + + //coll.remove(query); + coll.delete(query); + return 1; + + } + private int dropTable(SQLDropTableStatement state) { + for (SQLTableSource table : state.getTableSources()){ + //DBCollection coll =this._db.getCollection(table.toString()); + //coll.drop(); + this._db.dropCollection(table.toString()); + } + return 1; + + } + + private int createTable(SQLCreateTableStatement state) { + //for (SQLTableSource table : state.getTableSource()){ + if (!this._db.isCollectionExist(state.getTableSource().toString())) { + this._db.createCollection(state.getTableSource().toString()); + } + return 1; + } + + private int getSQLExprToInt(SQLExpr expr){ + if (expr instanceof SQLIntegerExpr){ + return ((SQLIntegerExpr)expr).getNumber().intValue(); + } + return 0; + } + private int getSQLExprToAsc(SQLOrderingSpecification ASC){ + if (ASC==null ) { + return 1; + } + if (ASC==SQLOrderingSpecification.DESC){ + return -1; + } + else { + return 1; + } + } + public String remove(String resource,char ch) + { + StringBuffer buffer=new StringBuffer(); + int position=0; + char currentChar; + + while(position")) { + op = "$gt"; + } + if (expr.getOperator().getName().equals(">=")) { + op = "$gte"; + } + if (expr.getOperator().getName().equals("!=")) { + op = "$ne"; + } + if (expr.getOperator().getName().equals("<>")) { + op = "$ne"; + } + //xo.put(op, getExpValue(expr.getRight())); + // o.put(exprL.toString(),xo); + parserDBObject(o,exprL.toString(),op, getExpValue(expr.getRight())); + } + } + } + private void parserWhere(SQLExpr aexpr,BasicBSONObject o){ + if(aexpr instanceof SQLBinaryOpExpr){ + SQLBinaryOpExpr expr=(SQLBinaryOpExpr)aexpr; + SQLExpr exprL=expr.getLeft(); + if (!(exprL instanceof SQLBinaryOpExpr)) + { + //opSQLExpr((SQLBinaryOpExpr)aexpr,o); + if (expr.getOperator().getName().equals("=")) { + o.put(exprL.toString(), getExpValue(expr.getRight())); + } + else { + String op=""; + if (expr.getOperator().getName().equals("<")) { + op = "$lt"; + } + if (expr.getOperator().getName().equals("<=")) { + op = "$lte"; + } + if (expr.getOperator().getName().equals(">")) { + op = "$gt"; + } + if (expr.getOperator().getName().equals(">=")) { + op = "$gte"; + } + if (expr.getOperator().getName().equals("!=")) { + op = "$ne"; + } + if (expr.getOperator().getName().equals("<>")) { + op = "$ne"; + } + + parserDBObject(o,exprL.toString(),op, getExpValue(expr.getRight())); + } + + } + else { + if (expr.getOperator().getName().equals("AND")) { + parserWhere(exprL,o); + parserWhere(expr.getRight(),o); + } + else if (expr.getOperator().getName().equals("OR")) { + orWhere(exprL,expr.getRight(),o); + } + else { + throw new RuntimeException("Can't identify the operation of of where"); + } + } + } + + } + + + private void orWhere(SQLExpr exprL,SQLExpr exprR ,BasicBSONObject ob){ + BasicBSONObject xo = new BasicBSONObject(); + BasicBSONObject yo = new BasicBSONObject(); + parserWhere(exprL,xo); + parserWhere(exprR,yo); + ob.put("$or",new Object[]{xo,yo}); + } +} diff --git a/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaStatement.java b/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaStatement.java index ba8641c08..e9f61d88d 100644 --- a/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaStatement.java +++ b/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaStatement.java @@ -28,12 +28,15 @@ public SequoiaStatement(SequoiaConnection conn, int type, int concurrency, int h this._concurrency = concurrency; this._holdability = holdability; - if (this._type != 0) - throw new UnsupportedOperationException("type not supported yet"); - if (this._concurrency != 0) - throw new UnsupportedOperationException("concurrency not supported yet"); - if (this._holdability != 0) - throw new UnsupportedOperationException("holdability not supported yet"); + if (this._type != 0) { + throw new UnsupportedOperationException("type not supported yet"); + } + if (this._concurrency != 0) { + throw new UnsupportedOperationException("concurrency not supported yet"); + } + if (this._holdability != 0) { + throw new UnsupportedOperationException("holdability not supported yet"); + } } @Override @@ -52,12 +55,12 @@ public boolean isWrapperFor(Class iface) throws SQLException { public ResultSet executeQuery(String sql) throws SQLException { SequoiaData mongo= new SequoiaSQLParser(this._conn.getDB(), sql).query(); - if (this._fetchSize > 0) { - //设置每次网络请求的最大记录数 - if (mongo.getCursor()!=null) { - //mongo.getCursor().batchSize(this._fetchSize); - } - } +// if (this._fetchSize > 0) { +// //设置每次网络请求的最大记录数 +// if (mongo.getCursor()!=null) { +// //mongo.getCursor().batchSize(this._fetchSize); +// } +// } /* if (this._maxRows > 0) { diff --git a/src/main/java/io/mycat/backend/jdbc/sequoiadb/StringUtils.java b/src/main/java/io/mycat/backend/jdbc/sequoiadb/StringUtils.java index 063117d22..d4d3758f7 100644 --- a/src/main/java/io/mycat/backend/jdbc/sequoiadb/StringUtils.java +++ b/src/main/java/io/mycat/backend/jdbc/sequoiadb/StringUtils.java @@ -1,16 +1,16 @@ -package io.mycat.backend.jdbc.sequoiadb; - - -public class StringUtils { - - - public static boolean startsWithIgnoreCase(String searchIn, int startAt, - String searchFor) { - return searchIn.regionMatches(true, startAt, searchFor, 0, searchFor - .length()); - } - - public static boolean startsWithIgnoreCase(String searchIn, String searchFor) { - return startsWithIgnoreCase(searchIn, 0, searchFor); - } +package io.mycat.backend.jdbc.sequoiadb; + + +public class StringUtils { + + + public static boolean startsWithIgnoreCase(String searchIn, int startAt, + String searchFor) { + return searchIn.regionMatches(true, startAt, searchFor, 0, searchFor + .length()); + } + + public static boolean startsWithIgnoreCase(String searchIn, String searchFor) { + return startsWithIgnoreCase(searchIn, 0, searchFor); + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/packet/util/BindValue.java b/src/main/java/io/mycat/backend/mysql/BindValue.java similarity index 98% rename from src/main/java/io/mycat/server/packet/util/BindValue.java rename to src/main/java/io/mycat/backend/mysql/BindValue.java index 4d2ec6934..b4c352a3c 100644 --- a/src/main/java/io/mycat/server/packet/util/BindValue.java +++ b/src/main/java/io/mycat/backend/mysql/BindValue.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.packet.util; +package io.mycat.backend.mysql; /** * @author mycat diff --git a/src/main/java/io/mycat/server/packet/util/BindValueUtil.java b/src/main/java/io/mycat/backend/mysql/BindValueUtil.java similarity index 92% rename from src/main/java/io/mycat/server/packet/util/BindValueUtil.java rename to src/main/java/io/mycat/backend/mysql/BindValueUtil.java index 51b737aba..bc93a54f1 100644 --- a/src/main/java/io/mycat/server/packet/util/BindValueUtil.java +++ b/src/main/java/io/mycat/backend/mysql/BindValueUtil.java @@ -21,13 +21,12 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.packet.util; - -import io.mycat.server.Fields; -import io.mycat.server.packet.MySQLMessage; +package io.mycat.backend.mysql; import java.io.UnsupportedEncodingException; +import io.mycat.config.Fields; + /** * @author mycat */ @@ -68,9 +67,9 @@ public static final void read(MySQLMessage mm, BindValue bv, String charset) thr case Fields.FIELD_TYPE_STRING: case Fields.FIELD_TYPE_VARCHAR: bv.value = mm.readStringWithLength(charset); - if (bv.value == null) { - bv.isNull = true; - } +// if (bv.value == null) { +// bv.isNull = true; +// } break; case Fields.FIELD_TYPE_DECIMAL: case Fields.FIELD_TYPE_NEW_DECIMAL: @@ -79,6 +78,9 @@ public static final void read(MySQLMessage mm, BindValue bv, String charset) thr bv.isNull = true; } break; + case Fields.FIELD_TYPE_BLOB: + bv.isLongData = true; + break; default: throw new IllegalArgumentException("bindValue error,unsupported type:" + bv.type); } diff --git a/src/main/java/io/mycat/server/packet/util/BufferUtil.java b/src/main/java/io/mycat/backend/mysql/BufferUtil.java similarity index 99% rename from src/main/java/io/mycat/server/packet/util/BufferUtil.java rename to src/main/java/io/mycat/backend/mysql/BufferUtil.java index 69a456350..920cbffaf 100644 --- a/src/main/java/io/mycat/server/packet/util/BufferUtil.java +++ b/src/main/java/io/mycat/backend/mysql/BufferUtil.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.packet.util; +package io.mycat.backend.mysql; import java.nio.ByteBuffer; diff --git a/src/main/java/io/mycat/server/packet/util/ByteUtil.java b/src/main/java/io/mycat/backend/mysql/ByteUtil.java similarity index 97% rename from src/main/java/io/mycat/server/packet/util/ByteUtil.java rename to src/main/java/io/mycat/backend/mysql/ByteUtil.java index db86b7032..48f445db7 100644 --- a/src/main/java/io/mycat/server/packet/util/ByteUtil.java +++ b/src/main/java/io/mycat/backend/mysql/ByteUtil.java @@ -21,9 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.packet.util; - -import io.mycat.server.packet.MySQLMessage; +package io.mycat.backend.mysql; /** * @author mycat diff --git a/src/main/java/io/mycat/backend/mysql/CharsetUtil.java b/src/main/java/io/mycat/backend/mysql/CharsetUtil.java new file mode 100644 index 000000000..56df40738 --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/CharsetUtil.java @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.backend.mysql; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.FileInputStream; +import java.util.*; + +/** + * @author mycat + */ +public class CharsetUtil { + public static final Logger logger = LoggerFactory + .getLogger(CharsetUtil.class); + private static final Map INDEX_TO_CHARSET = new HashMap<>(); + private static final Map CHARSET_TO_INDEX = new HashMap<>(); + static { + + // index_to_charset.properties + INDEX_TO_CHARSET.put(1,"big5"); + INDEX_TO_CHARSET.put(8,"latin1"); + INDEX_TO_CHARSET.put(9,"latin2"); + INDEX_TO_CHARSET.put(14,"cp1251"); + INDEX_TO_CHARSET.put(28,"gbk"); + INDEX_TO_CHARSET.put(24,"gb2312"); + INDEX_TO_CHARSET.put(33,"utf8"); + INDEX_TO_CHARSET.put(45,"utf8mb4"); + + String filePath = Thread.currentThread().getContextClassLoader() + .getResource("").getPath().replaceAll("%20", " ") + + "index_to_charset.properties"; + Properties prop = new Properties(); + try { + prop.load(new FileInputStream(filePath)); + for (Object index : prop.keySet()){ + INDEX_TO_CHARSET.put(Integer.parseInt((String) index), prop.getProperty((String) index)); + } + } catch (Exception e) { + logger.error("error:",e); + } + + // charset --> index + for(Integer key : INDEX_TO_CHARSET.keySet()){ + String charset = INDEX_TO_CHARSET.get(key); + if(charset != null && CHARSET_TO_INDEX.get(charset) == null){ + CHARSET_TO_INDEX.put(charset, key); + } + } + + CHARSET_TO_INDEX.put("iso-8859-1", 14); + CHARSET_TO_INDEX.put("iso_8859_1", 14); + CHARSET_TO_INDEX.put("utf-8", 33); + } + + public static final String getCharset(int index) { + return INDEX_TO_CHARSET.get(index); + } + + public static final int getIndex(String charset) { + if (charset == null || charset.length() == 0) { + return 0; + } else { + Integer i = CHARSET_TO_INDEX.get(charset.toLowerCase()); + return (i == null) ? 0 : i; + } + } + + + +} diff --git a/src/main/java/io/mycat/backend/mysql/DataType.java b/src/main/java/io/mycat/backend/mysql/DataType.java new file mode 100644 index 000000000..2acc44793 --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/DataType.java @@ -0,0 +1,20 @@ +package io.mycat.backend.mysql; + +/** + * 定义返回的数据类型 + * @author huangyiming + * + */ +public enum DataType { + + STRING("String"),DOUBLE("Double"),FLOAT("Float"),DATE("Date"),INT("Int"); + private String type; + private DataType(String type){ + this.type = type; + } + public String getType() { + return type; + } + + +} diff --git a/src/main/java/io/mycat/backend/mysql/LoadDataUtil.java b/src/main/java/io/mycat/backend/mysql/LoadDataUtil.java new file mode 100644 index 000000000..bbe9cd738 --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/LoadDataUtil.java @@ -0,0 +1,106 @@ +package io.mycat.backend.mysql; + +import java.io.*; +import java.util.List; + +import io.mycat.MycatServer; +import io.mycat.backend.BackendConnection; +import io.mycat.net.BackendAIOConnection; +import io.mycat.net.mysql.BinaryPacket; +import io.mycat.net.mysql.CommandPacket; +import io.mycat.net.mysql.MySQLPacket; +import io.mycat.route.RouteResultsetNode; +import io.mycat.sqlengine.mpp.LoadData; + +/** + * Created by nange on 2015/3/31. + */ +public class LoadDataUtil +{ + public static void requestFileDataResponse(byte[] data, BackendConnection conn) + { + + byte packId= data[3]; + BackendAIOConnection backendAIOConnection= (BackendAIOConnection) conn; + RouteResultsetNode rrn= (RouteResultsetNode) conn.getAttachment(); + LoadData loadData= rrn.getLoadData(); + List loadDataData = loadData.getData(); + try + { + if(loadDataData !=null&&loadDataData.size()>0) + { + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + for (int i = 0, loadDataDataSize = loadDataData.size(); i < loadDataDataSize; i++) + { + String line = loadDataData.get(i); + + + String s =(i==loadDataDataSize-1)?line: line + loadData.getLineTerminatedBy(); + byte[] bytes = s.getBytes(loadData.getCharset()); + bos.write(bytes); + + + } + + packId= writeToBackConnection(packId,new ByteArrayInputStream(bos.toByteArray()),backendAIOConnection); + + } else + { + //从文件读取 + packId= writeToBackConnection(packId,new BufferedInputStream(new FileInputStream(loadData.getFileName())),backendAIOConnection); + + } + }catch (IOException e) + { + + throw new RuntimeException(e); + } finally + { + //结束必须发空包 + byte[] empty = new byte[] { 0, 0, 0,3 }; + empty[3]=++packId; + backendAIOConnection.write(empty); + } + + + + + } + + public static byte writeToBackConnection(byte packID,InputStream inputStream,BackendAIOConnection backendAIOConnection) throws IOException + { + try + { + int packSize = MycatServer.getInstance().getConfig().getSystem().getBufferPoolChunkSize() - 5; + // int packSize = backendAIOConnection.getMaxPacketSize() / 32; + // int packSize=65530; + byte[] buffer = new byte[packSize]; + int len = -1; + + while ((len = inputStream.read(buffer)) != -1) + { + byte[] temp = null; + if (len == packSize) + { + temp = buffer; + } else + { + temp = new byte[len]; + System.arraycopy(buffer, 0, temp, 0, len); + } + BinaryPacket packet = new BinaryPacket(); + packet.packetId = ++packID; + packet.data = temp; + packet.write(backendAIOConnection); + } + + } + finally + { + inputStream.close(); + } + + + return packID; + } +} diff --git a/src/main/java/io/mycat/server/packet/MySQLMessage.java b/src/main/java/io/mycat/backend/mysql/MySQLMessage.java similarity index 99% rename from src/main/java/io/mycat/server/packet/MySQLMessage.java rename to src/main/java/io/mycat/backend/mysql/MySQLMessage.java index 04ee9dbe0..b8f60996c 100644 --- a/src/main/java/io/mycat/server/packet/MySQLMessage.java +++ b/src/main/java/io/mycat/backend/mysql/MySQLMessage.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.packet; +package io.mycat.backend.mysql; import java.io.UnsupportedEncodingException; import java.math.BigDecimal; @@ -298,9 +298,9 @@ public String readStringWithLength() { public String readStringWithLength(String charset) throws UnsupportedEncodingException { int length = (int) readLength(); - if (length <= 0) { - return null; - } +// if (length <= 0) { +// return null; +// } String s = new String(data, position, length, charset); position += length; return s; diff --git a/src/main/java/io/mycat/server/packet/util/PacketUtil.java b/src/main/java/io/mycat/backend/mysql/PacketUtil.java similarity index 92% rename from src/main/java/io/mycat/server/packet/util/PacketUtil.java rename to src/main/java/io/mycat/backend/mysql/PacketUtil.java index 56788f4a2..076751ede 100644 --- a/src/main/java/io/mycat/server/packet/util/PacketUtil.java +++ b/src/main/java/io/mycat/backend/mysql/PacketUtil.java @@ -21,16 +21,16 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.packet.util; - -import io.mycat.server.ErrorCode; -import io.mycat.server.packet.BinaryPacket; -import io.mycat.server.packet.ErrorPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; +package io.mycat.backend.mysql; import java.io.UnsupportedEncodingException; +import io.mycat.config.ErrorCode; +import io.mycat.net.mysql.BinaryPacket; +import io.mycat.net.mysql.ErrorPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; + /** * @author mycat */ diff --git a/src/main/java/io/mycat/server/packet/util/PreparedStatement.java b/src/main/java/io/mycat/backend/mysql/PreparedStatement.java similarity index 61% rename from src/main/java/io/mycat/server/packet/util/PreparedStatement.java rename to src/main/java/io/mycat/backend/mysql/PreparedStatement.java index 90aee80b2..78b5db730 100644 --- a/src/main/java/io/mycat/server/packet/util/PreparedStatement.java +++ b/src/main/java/io/mycat/backend/mysql/PreparedStatement.java @@ -21,10 +21,15 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.packet.util; +package io.mycat.backend.mysql; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; /** - * @author mycat + * @author mycat, CrazyPig */ public class PreparedStatement { @@ -33,6 +38,14 @@ public class PreparedStatement { private int columnsNumber; private int parametersNumber; private int[] parametersType; + /** + * 存放COM_STMT_SEND_LONG_DATA命令发送过来的字节数据 + *
+     * key : param_id
+     * value : byte data
+     * 
+ */ + private Map longDataMap; public PreparedStatement(long id, String statement, int columnsNumber, int parametersNumber) { this.id = id; @@ -40,6 +53,7 @@ public PreparedStatement(long id, String statement, int columnsNumber, int param this.columnsNumber = columnsNumber; this.parametersNumber = parametersNumber; this.parametersType = new int[parametersNumber]; + this.longDataMap = new HashMap(); } public long getId() { @@ -62,4 +76,32 @@ public int[] getParametersType() { return parametersType; } + public ByteArrayOutputStream getLongData(long paramId) { + return longDataMap.get(paramId); + } + + /** + * COM_STMT_RESET命令将调用该方法进行数据重置 + */ + public void resetLongData() { + for(Long paramId : longDataMap.keySet()) { + longDataMap.get(paramId).reset(); + } + } + + /** + * 追加数据到指定的预处理参数 + * @param paramId + * @param data + * @throws IOException + */ + public void appendLongData(long paramId, byte[] data) throws IOException { + if(getLongData(paramId) == null) { + ByteArrayOutputStream out = new ByteArrayOutputStream(); + out.write(data); + longDataMap.put(paramId, out); + } else { + longDataMap.get(paramId).write(data); + } + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/packet/util/SecurityUtil.java b/src/main/java/io/mycat/backend/mysql/SecurityUtil.java similarity index 97% rename from src/main/java/io/mycat/server/packet/util/SecurityUtil.java rename to src/main/java/io/mycat/backend/mysql/SecurityUtil.java index f2dc7a442..ff7cc4dcf 100644 --- a/src/main/java/io/mycat/server/packet/util/SecurityUtil.java +++ b/src/main/java/io/mycat/backend/mysql/SecurityUtil.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.packet.util; +package io.mycat.backend.mysql; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; @@ -67,7 +67,7 @@ public static final String scramble323(String pass, String seed) { chars[i] = (char) b; } seed1 = ((seed1 * 3) + seed2) % max; - seed2 = (seed1 + seed2 + 33) % max; +// seed2 = (seed1 + seed2 + 33) % max; d = (double) seed1 / (double) max; b = (byte) java.lang.Math.floor(d * 31); for (int i = 0; i < seed.length(); i++) { diff --git a/src/main/java/io/mycat/server/packet/util/StreamUtil.java b/src/main/java/io/mycat/backend/mysql/StreamUtil.java similarity index 99% rename from src/main/java/io/mycat/server/packet/util/StreamUtil.java rename to src/main/java/io/mycat/backend/mysql/StreamUtil.java index 9f5ea2543..5cc2fd923 100644 --- a/src/main/java/io/mycat/server/packet/util/StreamUtil.java +++ b/src/main/java/io/mycat/backend/mysql/StreamUtil.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.packet.util; +package io.mycat.backend.mysql; import java.io.EOFException; import java.io.IOException; @@ -183,8 +183,9 @@ public static final void writeDouble(OutputStream out, double d) throws IOExcept public static final long readLength(InputStream in) throws IOException { int length = in.read(); - if (length < 0) + if (length < 0) { throw new EOFException(); + } switch (length) { case 251: return NULL_LENGTH; diff --git a/src/main/java/io/mycat/backend/nio/MySQLBackendConnection.java b/src/main/java/io/mycat/backend/mysql/nio/MySQLConnection.java similarity index 63% rename from src/main/java/io/mycat/backend/nio/MySQLBackendConnection.java rename to src/main/java/io/mycat/backend/mysql/nio/MySQLConnection.java index dd776e916..bc24785a9 100644 --- a/src/main/java/io/mycat/backend/nio/MySQLBackendConnection.java +++ b/src/main/java/io/mycat/backend/mysql/nio/MySQLConnection.java @@ -1,58 +1,65 @@ -package io.mycat.backend.nio; +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.backend.mysql.nio; + +import io.mycat.backend.mysql.xa.TxState; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; import io.mycat.MycatServer; -import io.mycat.backend.BackendConnection; -import io.mycat.backend.MySQLDataSource; +import io.mycat.backend.mysql.CharsetUtil; +import io.mycat.backend.mysql.SecurityUtil; +import io.mycat.backend.mysql.nio.handler.ResponseHandler; +import io.mycat.config.Capabilities; +import io.mycat.config.Isolations; +import io.mycat.net.BackendAIOConnection; +import io.mycat.net.mysql.*; import io.mycat.route.RouteResultsetNode; -import io.mycat.server.Capabilities; -import io.mycat.server.GenalMySQLConnection; -import io.mycat.server.Isolations; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.exception.UnknownTxIsolationException; -import io.mycat.server.executors.ResponseHandler; -import io.mycat.server.packet.CommandPacket; -import io.mycat.server.packet.MySQLPacket; -import io.mycat.server.packet.QuitPacket; -import io.mycat.server.packet.ResultStatus; -import io.mycat.server.packet.util.CharsetUtil; +import io.mycat.server.ServerConnection; import io.mycat.server.parser.ServerParse; import io.mycat.util.TimeUtil; +import io.mycat.util.exception.UnknownTxIsolationException; import java.io.UnsupportedEncodingException; -import java.nio.ByteBuffer; -import java.nio.channels.SocketChannel; +import java.nio.channels.NetworkChannel; +import java.security.NoSuchAlgorithmException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -public class MySQLBackendConnection extends GenalMySQLConnection implements - BackendConnection { - - private static final CommandPacket _READ_UNCOMMITTED = new CommandPacket(); - private static final CommandPacket _READ_COMMITTED = new CommandPacket(); - private static final CommandPacket _REPEATED_READ = new CommandPacket(); - private static final CommandPacket _SERIALIZABLE = new CommandPacket(); - private static final CommandPacket _AUTOCOMMIT_ON = new CommandPacket(); - private static final CommandPacket _AUTOCOMMIT_OFF = new CommandPacket(); - private static final CommandPacket _COMMIT = new CommandPacket(); - private static final CommandPacket _ROLLBACK = new CommandPacket(); +/** + * @author mycat + */ +public class MySQLConnection extends BackendAIOConnection { + private static final Logger LOGGER = LoggerFactory + .getLogger(MySQLConnection.class); private static final long CLIENT_FLAGS = initClientFlags(); + private volatile long lastTime; + private volatile String schema = null; + private volatile String oldSchema; private volatile boolean borrowed = false; - private volatile long lastTime; private volatile boolean modifiedSQLExecuted = false; - private volatile StatusSync statusSync; - private volatile boolean metaDataSyned = true; - private volatile int xaStatus = 0; private volatile int batchCmdCount = 0; - private MySQLDataSource pool; - private boolean fromSlaveDB; - private long threadId; - private final ResultStatus sqlResultStatus = new ResultStatus(); - private Object attachment; - private volatile ResponseHandler respHandler; - - private final AtomicBoolean isQuit; - private static long initClientFlags() { int flag = 0; flag |= Capabilities.CLIENT_LONG_PASSWORD; @@ -60,10 +67,10 @@ private static long initClientFlags() { flag |= Capabilities.CLIENT_LONG_FLAG; flag |= Capabilities.CLIENT_CONNECT_WITH_DB; // flag |= Capabilities.CLIENT_NO_SCHEMA; - boolean usingCompress = MycatServer.getInstance().getConfig() - .getSystem().getUseCompression() == 1; - if (usingCompress) { - flag |= Capabilities.CLIENT_COMPRESS; + boolean usingCompress=MycatServer.getInstance().getConfig().getSystem().getUseCompression()==1 ; + if(usingCompress) + { + flag |= Capabilities.CLIENT_COMPRESS; } flag |= Capabilities.CLIENT_ODBC; flag |= Capabilities.CLIENT_LOCAL_FILES; @@ -81,6 +88,14 @@ private static long initClientFlags() { return flag; } + private static final CommandPacket _READ_UNCOMMITTED = new CommandPacket(); + private static final CommandPacket _READ_COMMITTED = new CommandPacket(); + private static final CommandPacket _REPEATED_READ = new CommandPacket(); + private static final CommandPacket _SERIALIZABLE = new CommandPacket(); + private static final CommandPacket _AUTOCOMMIT_ON = new CommandPacket(); + private static final CommandPacket _AUTOCOMMIT_OFF = new CommandPacket(); + private static final CommandPacket _COMMIT = new CommandPacket(); + private static final CommandPacket _ROLLBACK = new CommandPacket(); static { _READ_UNCOMMITTED.packetId = 0; _READ_UNCOMMITTED.command = MySQLPacket.COM_QUERY; @@ -112,14 +127,33 @@ private static long initClientFlags() { _ROLLBACK.arg = "rollback".getBytes(); } - public MySQLBackendConnection(SocketChannel channel, boolean fromSlaveDB) { + private MySQLDataSource pool; + private boolean fromSlaveDB; + private long threadId; + private HandshakePacket handshake; + private volatile int txIsolation; + private volatile boolean autocommit; + private long clientFlags; + private boolean isAuthenticated; + private String user; + private String password; + private Object attachment; + private ResponseHandler respHandler; + + private final AtomicBoolean isQuit; + private volatile StatusSync statusSync; + private volatile boolean metaDataSyned = true; + private volatile int xaStatus = 0; + + public MySQLConnection(NetworkChannel channel, boolean fromSlaveDB) { super(channel); this.clientFlags = CLIENT_FLAGS; this.lastTime = TimeUtil.currentTimeMillis(); this.isQuit = new AtomicBoolean(false); this.autocommit = true; this.fromSlaveDB = fromSlaveDB; - + // 设为默认值,免得每个初始化好的连接都要去同步一下 + this.txIsolation = MycatServer.getInstance().getConfig().getSystem().getTxIsolation(); } public int getXaStatus() { @@ -130,17 +164,28 @@ public void setXaStatus(int xaStatus) { this.xaStatus = xaStatus; } - // public void onConnectFailed(Throwable t) { - // if (handler instanceof MySQLConnectionHandler) { - // MySQLConnectionHandler theHandler = (MySQLConnectionHandler) handler; - // theHandler.connectionError(t); - // } else { - // ((MySQLConnectionAuthenticator) handler).connectionError(this, t); - // } - // } + public void onConnectFailed(Throwable t) { + if (handler instanceof MySQLConnectionHandler) { + MySQLConnectionHandler theHandler = (MySQLConnectionHandler) handler; + theHandler.connectionError(t); + } else { + ((MySQLConnectionAuthenticator) handler).connectionError(this, t); + } + } + + public String getSchema() { + return this.schema; + } - public ResultStatus getSqlResultStatus() { - return sqlResultStatus; + public void setSchema(String newSchema) { + String curSchema = schema; + if (curSchema == null) { + this.schema = newSchema; + this.oldSchema = newSchema; + } else { + this.oldSchema = curSchema; + this.schema = newSchema; + } } public MySQLDataSource getPool() { @@ -151,6 +196,26 @@ public void setPool(MySQLDataSource pool) { this.pool = pool; } + public String getUser() { + return user; + } + + public void setUser(String user) { + this.user = user; + } + + public void setPassword(String password) { + this.password = password; + } + + public HandshakePacket getHandshake() { + return handshake; + } + + public void setHandshake(HandshakePacket handshake) { + this.handshake = handshake; + } + public long getThreadId() { return threadId; } @@ -159,6 +224,34 @@ public void setThreadId(long threadId) { this.threadId = threadId; } + public boolean isAuthenticated() { + return isAuthenticated; + } + + public void setAuthenticated(boolean isAuthenticated) { + this.isAuthenticated = isAuthenticated; + } + + public String getPassword() { + return password; + } + + public void authenticate() { + AuthPacket packet = new AuthPacket(); + packet.packetId = 1; + packet.clientFlags = clientFlags; + packet.maxPacketSize = maxPacketSize; + packet.charsetIndex = this.charsetIndex; + packet.user = user; + try { + packet.password = passwd(password, handshake); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e.getMessage()); + } + packet.database = schema; + packet.write(this); + } + public boolean isAutocommit() { return autocommit; } @@ -220,10 +313,6 @@ private void getAutocommitCommand(StringBuilder sb, boolean autoCommit) { } } - public ResponseHandler getRespHandler() { - return respHandler; - } - private static class StatusSync { private final String schema; private final Integer charsetIndex; @@ -244,7 +333,7 @@ public StatusSync(boolean xaStarted, String schema, this.synCmdCount = new AtomicInteger(synCount); } - public boolean synAndExecuted(MySQLBackendConnection conn) { + public boolean synAndExecuted(MySQLConnection conn) { int remains = synCmdCount.decrementAndGet(); if (remains == 0) {// syn command finished this.updateConnectionInfo(conn); @@ -256,10 +345,9 @@ public boolean synAndExecuted(MySQLBackendConnection conn) { return false; } - private void updateConnectionInfo(MySQLBackendConnection conn) + private void updateConnectionInfo(MySQLConnection conn) { - conn.xaStatus = (xaStarted == true) ? 1 : 0; if (schema != null) { conn.schema = schema; conn.oldSchema = conn.schema; @@ -295,12 +383,15 @@ public boolean syncAndExcute() { } - public void execute(RouteResultsetNode rrn, MySQLFrontConnection sc, + public void execute(RouteResultsetNode rrn, ServerConnection sc, boolean autocommit) throws UnsupportedEncodingException { if (!modifiedSQLExecuted && rrn.isModifySQL()) { modifiedSQLExecuted = true; } - String xaTXID = sc.getSession2().getXaTXID(); + String xaTXID = null; + if(sc.getSession2().getXaTXID()!=null){ + xaTXID = sc.getSession2().getXaTXID()+",'"+getSchema()+"'"; + } synAndDoExecute(xaTXID, rrn, sc.getCharsetIndex(), sc.getTxIsolation(), autocommit); } @@ -315,17 +406,24 @@ private void synAndDoExecute(String xaTxID, RouteResultsetNode rrn, // never executed modify sql,so auto commit boolean expectAutocommit = !modifiedSQLExecuted || isFromSlaveDB() || clientAutoCommit; - if (expectAutocommit == false && xaTxID != null && xaStatus == 0) { - clientTxIsoLation = Isolations.SERIALIZABLE; + if (expectAutocommit == false && xaTxID != null && xaStatus == TxState.TX_INITIALIZE_STATE) { + //clientTxIsoLation = Isolations.SERIALIZABLE; xaCmd = "XA START " + xaTxID + ';'; - + this.xaStatus = TxState.TX_STARTED_STATE; } int schemaSyn = conSchema.equals(oldSchema) ? 0 : 1; - int charsetSyn = (this.charsetIndex == clientCharSetIndex) ? 0 : 1; + int charsetSyn = 0; + if (this.charsetIndex != clientCharSetIndex) { + //need to syn the charset of connection. + //set current connection charset to client charset. + //otherwise while sending commend to server the charset will not coincidence. + setCharset(CharsetUtil.getCharset(clientCharSetIndex)); + charsetSyn = 1; + } int txIsoLationSyn = (txIsolation == clientTxIsoLation) ? 0 : 1; int autoCommitSyn = (conAutoComit == expectAutocommit) ? 0 : 1; - int synCount = schemaSyn + charsetSyn + txIsoLationSyn + autoCommitSyn; - if (synCount == 0) { + int synCount = schemaSyn + charsetSyn + txIsoLationSyn + autoCommitSyn + (xaCmd!=null?1:0); + if (synCount == 0 && this.xaStatus != TxState.TX_STARTED_STATE) { // not need syn connection sendQueryCmd(rrn.getStatement()); return; @@ -334,6 +432,7 @@ private void synAndDoExecute(String xaTxID, RouteResultsetNode rrn, StringBuilder sb = new StringBuilder(); if (schemaSyn == 1) { schemaCmd = getChangeSchemaCommand(conSchema); + // getChangeSchemaCommand(sb, conSchema); } if (charsetSyn == 1) { @@ -362,7 +461,7 @@ private void synAndDoExecute(String xaTxID, RouteResultsetNode rrn, schemaCmd.write(this); } // and our query sql to multi command at last - sb.append(rrn.getStatement()); + sb.append(rrn.getStatement()+";"); // syn and execute others this.sendQueryCmd(sb.toString()); // waiting syn result... @@ -402,8 +501,8 @@ public void setLastTime(long lastTime) { public void quit() { if (isQuit.compareAndSet(false, true) && !isClosed()) { if (isAuthenticated) { - write(QuitPacket.QUIT); - write(ByteBuffer.allocate(10)); + write(writeToBuffer(QuitPacket.QUIT, allocate())); + write(allocate()); } else { close("normal"); } @@ -412,7 +511,7 @@ public void quit() { @Override public void close(String reason) { - if (!isClosed) { + if (!isClosed.get()) { isQuit.set(true); super.close(reason); pool.connectionClosed(this); @@ -471,18 +570,75 @@ public void release() { pool.releaseChannel(this); } - public void setResponseHandler(ResponseHandler queryHandler) { - respHandler = queryHandler; + public boolean setResponseHandler(ResponseHandler queryHandler) { + if (handler instanceof MySQLConnectionHandler) { + ((MySQLConnectionHandler) handler).setResponseHandler(queryHandler); + respHandler = queryHandler; + return true; + } else if (queryHandler != null) { + LOGGER.warn("set not MySQLConnectionHandler " + + queryHandler.getClass().getCanonicalName()); + } + return false; + } + + /** + * 写队列为空,可以继续写数据 + */ + public void writeQueueAvailable() { + if (respHandler != null) { + respHandler.writeQueueAvailable(); + } + } + + /** + * 记录sql执行信息 + */ + public void recordSql(String host, String schema, String stmt) { + // final long now = TimeUtil.currentTimeMillis(); + // if (now > this.lastTime) { + // // long time = now - this.lastTime; + // // SQLRecorder sqlRecorder = this.pool.getSqlRecorder(); + // // if (sqlRecorder.check(time)) { + // // SQLRecord recorder = new SQLRecord(); + // // recorder.host = host; + // // recorder.schema = schema; + // // recorder.statement = stmt; + // // recorder.startTime = lastTime; + // // recorder.executeTime = time; + // // recorder.dataNode = pool.getName(); + // // recorder.dataNodeIndex = pool.getIndex(); + // // sqlRecorder.add(recorder); + // // } + // } + // this.lastTime = now; + } + + private static byte[] passwd(String pass, HandshakePacket hs) + throws NoSuchAlgorithmException { + if (pass == null || pass.length() == 0) { + return null; + } + byte[] passwd = pass.getBytes(); + int sl1 = hs.seed.length; + int sl2 = hs.restOfScrambleBuff.length; + byte[] seed = new byte[sl1 + sl2]; + System.arraycopy(hs.seed, 0, seed, 0, sl1); + System.arraycopy(hs.restOfScrambleBuff, 0, seed, sl1, sl2); + return SecurityUtil.scramble411(passwd, seed); } + @Override public boolean isFromSlaveDB() { return fromSlaveDB; } + @Override public boolean isBorrowed() { return borrowed; } + @Override public void setBorrowed(boolean borrowed) { this.lastTime = TimeUtil.currentTimeMillis(); this.borrowed = borrowed; @@ -491,6 +647,7 @@ public void setBorrowed(boolean borrowed) { @Override public String toString() { return "MySQLConnection [id=" + id + ", lastTime=" + lastTime + + ", user=" + user + ", schema=" + schema + ", old shema=" + oldSchema + ", borrowed=" + borrowed + ", fromSlaveDB=" + fromSlaveDB + ", threadId=" + threadId + ", charset=" + charset @@ -501,6 +658,7 @@ public String toString() { + ", modifiedSQLExecuted=" + modifiedSQLExecuted + "]"; } + @Override public boolean isModifiedSQLExecuted() { return modifiedSQLExecuted; } diff --git a/src/main/java/io/mycat/backend/mysql/nio/MySQLConnectionAuthenticator.java b/src/main/java/io/mycat/backend/mysql/nio/MySQLConnectionAuthenticator.java new file mode 100644 index 000000000..be6e13ee8 --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/nio/MySQLConnectionAuthenticator.java @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.backend.mysql.nio; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.MycatServer; +import io.mycat.backend.mysql.CharsetUtil; +import io.mycat.backend.mysql.SecurityUtil; +import io.mycat.backend.mysql.nio.handler.ResponseHandler; +import io.mycat.config.Capabilities; +import io.mycat.net.ConnectionException; +import io.mycat.net.NIOHandler; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.ErrorPacket; +import io.mycat.net.mysql.HandshakePacket; +import io.mycat.net.mysql.OkPacket; +import io.mycat.net.mysql.Reply323Packet; + +/** + * MySQL 验证处理器 + * + * @author mycat + */ +public class MySQLConnectionAuthenticator implements NIOHandler { + private static final Logger LOGGER = LoggerFactory + .getLogger(MySQLConnectionAuthenticator.class); + private final MySQLConnection source; + private final ResponseHandler listener; + + public MySQLConnectionAuthenticator(MySQLConnection source, + ResponseHandler listener) { + this.source = source; + this.listener = listener; + } + + public void connectionError(MySQLConnection source, Throwable e) { + listener.connectionError(e, source); + } + + @Override + public void handle(byte[] data) { + try { + switch (data[4]) { + case OkPacket.FIELD_COUNT: + HandshakePacket packet = source.getHandshake(); + if (packet == null) { + processHandShakePacket(data); + // 发送认证数据包 + source.authenticate(); + break; + } + // 处理认证结果 + source.setHandler(new MySQLConnectionHandler(source)); + source.setAuthenticated(true); + boolean clientCompress = Capabilities.CLIENT_COMPRESS==(Capabilities.CLIENT_COMPRESS & packet.serverCapabilities); + boolean usingCompress= MycatServer.getInstance().getConfig().getSystem().getUseCompression()==1 ; + if(clientCompress&&usingCompress) + { + source.setSupportCompress(true); + } + if (listener != null) { + listener.connectionAcquired(source); + } + break; + case ErrorPacket.FIELD_COUNT: + ErrorPacket err = new ErrorPacket(); + err.read(data); + String errMsg = new String(err.message); + LOGGER.warn("can't connect to mysql server ,errmsg:"+errMsg+" "+source); + //source.close(errMsg); + throw new ConnectionException(err.errno, errMsg); + + case EOFPacket.FIELD_COUNT: + auth323(data[3]); + break; + default: + packet = source.getHandshake(); + if (packet == null) { + processHandShakePacket(data); + // 发送认证数据包 + source.authenticate(); + break; + } else { + throw new RuntimeException("Unknown Packet!"); + } + + } + + } catch (RuntimeException e) { + if (listener != null) { + listener.connectionError(e, source); + return; + } + throw e; + } + } + + private void processHandShakePacket(byte[] data) { + // 设置握手数据包 + HandshakePacket packet= new HandshakePacket(); + packet.read(data); + source.setHandshake(packet); + source.setThreadId(packet.threadId); + + // 设置字符集编码 + int charsetIndex = (packet.serverCharsetIndex & 0xff); + String charset = CharsetUtil.getCharset(charsetIndex); + if (charset != null) { + source.setCharset(charset); + } else { + throw new RuntimeException("Unknown charsetIndex:" + charsetIndex); + } + } + + private void auth323(byte packetId) { + // 发送323响应认证数据包 + Reply323Packet r323 = new Reply323Packet(); + r323.packetId = ++packetId; + String pass = source.getPassword(); + if (pass != null && pass.length() > 0) { + byte[] seed = source.getHandshake().seed; + r323.seed = SecurityUtil.scramble323(pass, new String(seed)) + .getBytes(); + } + r323.write(source); + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/backend/mysql/nio/MySQLConnectionFactory.java b/src/main/java/io/mycat/backend/mysql/nio/MySQLConnectionFactory.java new file mode 100644 index 000000000..1c78e56c3 --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/nio/MySQLConnectionFactory.java @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.backend.mysql.nio; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.nio.channels.AsynchronousSocketChannel; +import java.nio.channels.CompletionHandler; +import java.nio.channels.NetworkChannel; + +import io.mycat.MycatServer; +import io.mycat.backend.mysql.nio.handler.ResponseHandler; +import io.mycat.config.model.DBHostConfig; +import io.mycat.net.NIOConnector; +import io.mycat.net.factory.BackendConnectionFactory; + +/** + * @author mycat + */ +public class MySQLConnectionFactory extends BackendConnectionFactory { + @SuppressWarnings({ "unchecked", "rawtypes" }) + public MySQLConnection make(MySQLDataSource pool, ResponseHandler handler, + String schema) throws IOException { + + DBHostConfig dsc = pool.getConfig(); + NetworkChannel channel = openSocketChannel(MycatServer.getInstance() + .isAIO()); + + MySQLConnection c = new MySQLConnection(channel, pool.isReadNode()); + MycatServer.getInstance().getConfig().setSocketParams(c, false); + c.setHost(dsc.getIp()); + c.setPort(dsc.getPort()); + c.setUser(dsc.getUser()); + c.setPassword(dsc.getPassword()); + c.setSchema(schema); + c.setHandler(new MySQLConnectionAuthenticator(c, handler)); + c.setPool(pool); + c.setIdleTimeout(pool.getConfig().getIdleTimeout()); + if (channel instanceof AsynchronousSocketChannel) { + ((AsynchronousSocketChannel) channel).connect( + new InetSocketAddress(dsc.getIp(), dsc.getPort()), c, + (CompletionHandler) MycatServer.getInstance() + .getConnector()); + } else { + ((NIOConnector) MycatServer.getInstance().getConnector()) + .postConnect(c); + + } + return c; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/backend/mysql/nio/MySQLConnectionHandler.java b/src/main/java/io/mycat/backend/mysql/nio/MySQLConnectionHandler.java new file mode 100644 index 000000000..eeaa68e04 --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/nio/MySQLConnectionHandler.java @@ -0,0 +1,230 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.backend.mysql.nio; + +import java.util.ArrayList; +import java.util.List; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.backend.mysql.ByteUtil; +import io.mycat.backend.mysql.nio.handler.LoadDataResponseHandler; +import io.mycat.backend.mysql.nio.handler.ResponseHandler; +import io.mycat.net.handler.BackendAsyncHandler; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.ErrorPacket; +import io.mycat.net.mysql.OkPacket; +import io.mycat.net.mysql.RequestFilePacket; + +/** + * life cycle: from connection establish to close
+ * + * @author mycat + */ +public class MySQLConnectionHandler extends BackendAsyncHandler { + private static final Logger logger = LoggerFactory + .getLogger(MySQLConnectionHandler.class); + private static final int RESULT_STATUS_INIT = 0; + private static final int RESULT_STATUS_HEADER = 1; + private static final int RESULT_STATUS_FIELD_EOF = 2; + + private final MySQLConnection source; + private volatile int resultStatus; + private volatile byte[] header; + private volatile List fields; + + /** + * life cycle: one SQL execution + */ + private volatile ResponseHandler responseHandler; + + public MySQLConnectionHandler(MySQLConnection source) { + this.source = source; + this.resultStatus = RESULT_STATUS_INIT; + } + + public void connectionError(Throwable e) { + if (responseHandler != null) { + responseHandler.connectionError(e, source); + } + + } + + public MySQLConnection getSource() { + return source; + } + + @Override + public void handle(byte[] data) { + offerData(data, source.getProcessor().getExecutor()); + } + + @Override + protected void offerDataError() { + resultStatus = RESULT_STATUS_INIT; + throw new RuntimeException("offer data error!"); + } + + @Override + protected void handleData(byte[] data) { + switch (resultStatus) { + case RESULT_STATUS_INIT: + switch (data[4]) { + case OkPacket.FIELD_COUNT: + handleOkPacket(data); + break; + case ErrorPacket.FIELD_COUNT: + handleErrorPacket(data); + break; + case RequestFilePacket.FIELD_COUNT: + handleRequestPacket(data); + break; + default: + resultStatus = RESULT_STATUS_HEADER; + header = data; + fields = new ArrayList((int) ByteUtil.readLength(data, + 4)); + } + break; + case RESULT_STATUS_HEADER: + switch (data[4]) { + case ErrorPacket.FIELD_COUNT: + resultStatus = RESULT_STATUS_INIT; + handleErrorPacket(data); + break; + case EOFPacket.FIELD_COUNT: + resultStatus = RESULT_STATUS_FIELD_EOF; + handleFieldEofPacket(data); + break; + default: + fields.add(data); + } + break; + case RESULT_STATUS_FIELD_EOF: + switch (data[4]) { + case ErrorPacket.FIELD_COUNT: + resultStatus = RESULT_STATUS_INIT; + handleErrorPacket(data); + break; + case EOFPacket.FIELD_COUNT: + resultStatus = RESULT_STATUS_INIT; + handleRowEofPacket(data); + break; + default: + handleRowPacket(data); + } + break; + default: + throw new RuntimeException("unknown status!"); + } + } + + public void setResponseHandler(ResponseHandler responseHandler) { + // logger.info("set response handler "+responseHandler); + // if (this.responseHandler != null && responseHandler != null) { + // throw new RuntimeException("reset agani!"); + // } + this.responseHandler = responseHandler; + } + + /** + * OK数据包处理 + */ + private void handleOkPacket(byte[] data) { + ResponseHandler respHand = responseHandler; + if (respHand != null) { + respHand.okResponse(data, source); + } + } + + /** + * ERROR数据包处理 + */ + private void handleErrorPacket(byte[] data) { + ResponseHandler respHand = responseHandler; + if (respHand != null) { + respHand.errorResponse(data, source); + } else { + closeNoHandler(); + } + } + + /** + * load data file 请求文件数据包处理 + */ + private void handleRequestPacket(byte[] data) { + ResponseHandler respHand = responseHandler; + if (respHand != null && respHand instanceof LoadDataResponseHandler) { + ((LoadDataResponseHandler) respHand).requestDataResponse(data, + source); + } else { + closeNoHandler(); + } + } + + /** + * 字段数据包结束处理 + */ + private void handleFieldEofPacket(byte[] data) { + ResponseHandler respHand = responseHandler; + if (respHand != null) { + respHand.fieldEofResponse(header, fields, data, source); + } else { + closeNoHandler(); + } + } + + /** + * 行数据包处理 + */ + private void handleRowPacket(byte[] data) { + ResponseHandler respHand = responseHandler; + if (respHand != null) { + respHand.rowResponse(data, source); + } else { + closeNoHandler(); + + } + } + + private void closeNoHandler() { + if (!source.isClosedOrQuit()) { + source.close("no handler"); + logger.warn("no handler bind in this con " + this + " client:" + + source); + } + } + + /** + * 行数据包结束处理 + */ + private void handleRowEofPacket(byte[] data) { + if (responseHandler != null) { + responseHandler.rowEofResponse(data, source); + } else { + closeNoHandler(); + } + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/backend/mysql/nio/MySQLDataSource.java b/src/main/java/io/mycat/backend/mysql/nio/MySQLDataSource.java new file mode 100644 index 000000000..d2cbccaba --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/nio/MySQLDataSource.java @@ -0,0 +1,210 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.backend.mysql.nio; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.Socket; +import java.security.NoSuchAlgorithmException; + +import io.mycat.backend.datasource.PhysicalDatasource; +import io.mycat.backend.heartbeat.DBHeartbeat; +import io.mycat.backend.heartbeat.MySQLHeartbeat; +import io.mycat.backend.mysql.SecurityUtil; +import io.mycat.backend.mysql.nio.handler.ResponseHandler; +import io.mycat.config.Capabilities; +import io.mycat.config.model.DBHostConfig; +import io.mycat.config.model.DataHostConfig; +import io.mycat.net.mysql.AuthPacket; +import io.mycat.net.mysql.BinaryPacket; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.ErrorPacket; +import io.mycat.net.mysql.HandshakePacket; +import io.mycat.net.mysql.OkPacket; +import io.mycat.net.mysql.QuitPacket; +import io.mycat.net.mysql.Reply323Packet; + +/** + * @author mycat + */ +public class MySQLDataSource extends PhysicalDatasource { + + private final MySQLConnectionFactory factory; + + public MySQLDataSource(DBHostConfig config, DataHostConfig hostConfig, + boolean isReadNode) { + super(config, hostConfig, isReadNode); + this.factory = new MySQLConnectionFactory(); + + } + + @Override + public void createNewConnection(ResponseHandler handler,String schema) throws IOException { + factory.make(this, handler,schema); + } + + private long getClientFlags() { + int flag = 0; + flag |= Capabilities.CLIENT_LONG_PASSWORD; + flag |= Capabilities.CLIENT_FOUND_ROWS; + flag |= Capabilities.CLIENT_LONG_FLAG; + flag |= Capabilities.CLIENT_CONNECT_WITH_DB; + // flag |= Capabilities.CLIENT_NO_SCHEMA; + // flag |= Capabilities.CLIENT_COMPRESS; + flag |= Capabilities.CLIENT_ODBC; + // flag |= Capabilities.CLIENT_LOCAL_FILES; + flag |= Capabilities.CLIENT_IGNORE_SPACE; + flag |= Capabilities.CLIENT_PROTOCOL_41; + flag |= Capabilities.CLIENT_INTERACTIVE; + // flag |= Capabilities.CLIENT_SSL; + flag |= Capabilities.CLIENT_IGNORE_SIGPIPE; + flag |= Capabilities.CLIENT_TRANSACTIONS; + // flag |= Capabilities.CLIENT_RESERVED; + flag |= Capabilities.CLIENT_SECURE_CONNECTION; + // client extension + // flag |= Capabilities.CLIENT_MULTI_STATEMENTS; + // flag |= Capabilities.CLIENT_MULTI_RESULTS; + return flag; + } + + + private byte[] passwd(String pass, HandshakePacket hs) throws NoSuchAlgorithmException { + if (pass == null || pass.length() == 0) { + return null; + } + byte[] passwd = pass.getBytes(); + int sl1 = hs.seed.length; + int sl2 = hs.restOfScrambleBuff.length; + byte[] seed = new byte[sl1 + sl2]; + System.arraycopy(hs.seed, 0, seed, 0, sl1); + System.arraycopy(hs.restOfScrambleBuff, 0, seed, sl1, sl2); + return SecurityUtil.scramble411(passwd, seed); + } + + @Override + public boolean testConnection(String schema) throws IOException { + + boolean isConnected = true; + + Socket socket = null; + InputStream in = null; + OutputStream out = null; + try { + socket = new Socket(this.getConfig().getIp(), this.getConfig().getPort()); + socket.setSoTimeout(1000 * 20); + socket.setReceiveBufferSize( 32768 ); + socket.setSendBufferSize( 32768 ); + socket.setTcpNoDelay(true); + socket.setKeepAlive(true); + + in = new BufferedInputStream(socket.getInputStream(), 32768); + out = new BufferedOutputStream( socket.getOutputStream(), 32768 ); + + /** + * Phase 1: MySQL to client. Send handshake packet. + */ + BinaryPacket bin1 = new BinaryPacket(); + bin1.read(in); + + HandshakePacket handshake = new HandshakePacket(); + handshake.read( bin1 ); + + /** + * Phase 2: client to MySQL. Send auth packet. + */ + AuthPacket authPacket = new AuthPacket(); + authPacket.packetId = 1; + authPacket.clientFlags = getClientFlags(); + authPacket.maxPacketSize = 1024 * 1024 * 16; + authPacket.charsetIndex = handshake.serverCharsetIndex & 0xff; + authPacket.user = this.getConfig().getUser();; + try { + authPacket.password = passwd(this.getConfig().getPassword(), handshake); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e.getMessage()); + } + authPacket.database = schema; + authPacket.write(out); + out.flush(); + + /** + * Phase 3: MySQL to client. send OK/ERROR packet. + */ + BinaryPacket bin2 = new BinaryPacket(); + bin2.read(in); + switch (bin2.data[0]) { + case OkPacket.FIELD_COUNT: + break; + case ErrorPacket.FIELD_COUNT: + ErrorPacket err = new ErrorPacket(); + err.read(bin2); + isConnected = false; + case EOFPacket.FIELD_COUNT: + // 发送323响应认证数据包 + Reply323Packet r323 = new Reply323Packet(); + r323.packetId = ++bin2.packetId; + String passwd = this.getConfig().getPassword(); + if (passwd != null && passwd.length() > 0) { + r323.seed = SecurityUtil.scramble323(passwd, new String(handshake.seed)).getBytes(); + } + r323.write(out); + out.flush(); + break; + } + + } catch (IOException e) { + isConnected = false; + } finally { + try { + if (in != null) { + in.close(); + } + } catch (IOException e) {} + + try { + if (out != null) { + out.write(QuitPacket.QUIT); + out.flush(); + out.close(); + } + } catch (IOException e) {} + + try { + if (socket != null) + socket.close(); + } catch (IOException e) {} + } + + return isConnected; + } + + @Override + public DBHeartbeat createHeartBeat() { + return new MySQLHeartbeat(this); + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/executors/CommitNodeHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/CommitNodeHandler.java similarity index 56% rename from src/main/java/io/mycat/server/executors/CommitNodeHandler.java rename to src/main/java/io/mycat/backend/mysql/nio/handler/CommitNodeHandler.java index 97859a4b1..ec58ff973 100644 --- a/src/main/java/io/mycat/server/executors/CommitNodeHandler.java +++ b/src/main/java/io/mycat/backend/mysql/nio/handler/CommitNodeHandler.java @@ -21,22 +21,25 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.executors; +package io.mycat.backend.mysql.nio.handler; + +import java.util.List; + +import io.mycat.backend.mysql.xa.TxState; +import io.mycat.config.ErrorCode; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; import io.mycat.backend.BackendConnection; -import io.mycat.backend.nio.MySQLBackendConnection; +import io.mycat.backend.mysql.nio.MySQLConnection; +import io.mycat.net.mysql.ErrorPacket; import io.mycat.server.NonBlockingSession; -import io.mycat.server.packet.ErrorPacket; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; +import io.mycat.server.ServerConnection; /** * @author mycat */ public class CommitNodeHandler implements ResponseHandler { - public static final Logger LOGGER = LoggerFactory + private static final Logger LOGGER = LoggerFactory .getLogger(CommitNodeHandler.class); private final NonBlockingSession session; @@ -46,19 +49,30 @@ public CommitNodeHandler(NonBlockingSession session) { public void commit(BackendConnection conn) { conn.setResponseHandler(CommitNodeHandler.this); - if (conn instanceof MySQLBackendConnection) { - MySQLBackendConnection mysqlCon = (MySQLBackendConnection) conn; - if (mysqlCon.getXaStatus() == 1) { - String xaTxId = session.getXaTXID(); - String[] cmds = new String[] { "XA END " + xaTxId, - "XA PREPARE " + xaTxId }; - mysqlCon.execBatchCmd(cmds); - } else { - conn.commit(); - } - } else { - conn.commit(); + boolean isClosed=conn.isClosedOrQuit(); + if(isClosed) + { + session.getSource().writeErrMessage(ErrorCode.ER_UNKNOWN_ERROR, + "receive commit,but find backend con is closed or quit"); + LOGGER.error( conn+"receive commit,but fond backend con is closed or quit"); } + if(conn instanceof MySQLConnection) + { + MySQLConnection mysqlCon = (MySQLConnection) conn; + if (mysqlCon.getXaStatus() == 1) + { + String xaTxId = session.getXaTXID()+",'"+mysqlCon.getSchema()+"'"; + String[] cmds = new String[]{"XA END " + xaTxId, + "XA PREPARE " + xaTxId}; + mysqlCon.execBatchCmd(cmds); + } else + { + conn.commit(); + } + }else + { + conn.commit(); + } } @Override @@ -69,25 +83,43 @@ public void connectionAcquired(BackendConnection conn) { @Override public void okResponse(byte[] ok, BackendConnection conn) { - if (conn instanceof MySQLBackendConnection) { - MySQLBackendConnection mysqlCon = (MySQLBackendConnection) conn; - switch (mysqlCon.getXaStatus()) { - case 1: - if (mysqlCon.batchCmdFinished()) { - String xaTxId = session.getXaTXID(); - mysqlCon.execCmd("XA COMMIT " + xaTxId); - mysqlCon.setXaStatus(2); + if(conn instanceof MySQLConnection) + { + MySQLConnection mysqlCon = (MySQLConnection) conn; + switch (mysqlCon.getXaStatus()) + { + case TxState.TX_STARTED_STATE: + if (mysqlCon.batchCmdFinished()) + { + String xaTxId = session.getXaTXID()+",'"+mysqlCon.getSchema()+"'"; + mysqlCon.execCmd("XA COMMIT " + xaTxId); + mysqlCon.setXaStatus(TxState.TX_PREPARED_STATE); + } + return; + case TxState.TX_PREPARED_STATE: + { + mysqlCon.setXaStatus(TxState.TX_INITIALIZE_STATE); + break; } - return; - case 2: { - mysqlCon.setXaStatus(0); - break; + default: + // LOGGER.error("Wrong XA status flag!"); } + + /* 1. 事务提交后,xa 事务结束 */ + if(TxState.TX_INITIALIZE_STATE==mysqlCon.getXaStatus()){ + if(session.getXaTXID()!=null){ + session.setXATXEnabled(false); + } } } + + /* 2. preAcStates 为true,事务结束后,需要设置为true。preAcStates 为ac上一个状态 */ + if(session.getSource().isPreAcStates()&&!session.getSource().isAutocommit()){ + session.getSource().setAutocommit(true); + } session.clearResources(false); - session.getSource().write(ok); - + ServerConnection source = session.getSource(); + source.write(ok); } @Override @@ -111,8 +143,8 @@ public void rowEofResponse(byte[] eof, BackendConnection conn) { public void fieldEofResponse(byte[] header, List fields, byte[] eof, BackendConnection conn) { LOGGER.error(new StringBuilder().append("unexpected packet for ") - .append(conn).append(" bound by ").append(session.getSource()) - .append(": field's eof").toString()); + .append(conn).append(" bound by ").append(session.getSource()) + .append(": field's eof").toString()); } @Override @@ -122,14 +154,21 @@ public void rowResponse(byte[] row, BackendConnection conn) { .append(": row data packet").toString()); } + @Override + public void writeQueueAvailable() { + + } + @Override public void connectionError(Throwable e, BackendConnection conn) { + } @Override public void connectionClose(BackendConnection conn, String reason) { + } -} \ No newline at end of file +} diff --git a/src/main/java/io/mycat/server/executors/ConnectionHeartBeatHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/ConnectionHeartBeatHandler.java similarity index 95% rename from src/main/java/io/mycat/server/executors/ConnectionHeartBeatHandler.java rename to src/main/java/io/mycat/backend/mysql/nio/handler/ConnectionHeartBeatHandler.java index d0ae02d28..583ac885b 100644 --- a/src/main/java/io/mycat/server/executors/ConnectionHeartBeatHandler.java +++ b/src/main/java/io/mycat/backend/mysql/nio/handler/ConnectionHeartBeatHandler.java @@ -21,12 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.executors; - -import io.mycat.backend.BackendConnection; -import io.mycat.server.packet.ErrorPacket; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +package io.mycat.backend.mysql.nio.handler; import java.util.Collection; import java.util.Iterator; @@ -36,6 +31,11 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.locks.ReentrantLock; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.backend.BackendConnection; +import io.mycat.net.mysql.ErrorPacket; + /** * heartbeat check for mysql connections * @@ -43,7 +43,7 @@ * */ public class ConnectionHeartBeatHandler implements ResponseHandler { - public static final Logger LOGGER = LoggerFactory + private static final Logger LOGGER = LoggerFactory .getLogger(ConnectionHeartBeatHandler.class); protected final ReentrantLock lock = new ReentrantLock(); private final ConcurrentHashMap allCons = new ConcurrentHashMap(); @@ -153,6 +153,11 @@ private void removeFinished(BackendConnection con) { this.allCons.remove(id); } + @Override + public void writeQueueAvailable() { + + } + @Override public void connectionClose(BackendConnection conn, String reason) { removeFinished(conn); diff --git a/src/main/java/io/mycat/server/executors/DelegateResponseHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/DelegateResponseHandler.java similarity index 95% rename from src/main/java/io/mycat/server/executors/DelegateResponseHandler.java rename to src/main/java/io/mycat/backend/mysql/nio/handler/DelegateResponseHandler.java index edf2709d2..cedd06407 100644 --- a/src/main/java/io/mycat/server/executors/DelegateResponseHandler.java +++ b/src/main/java/io/mycat/backend/mysql/nio/handler/DelegateResponseHandler.java @@ -21,12 +21,12 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.executors; - -import io.mycat.backend.BackendConnection; +package io.mycat.backend.mysql.nio.handler; import java.util.List; +import io.mycat.backend.BackendConnection; + /** * @author mycat */ @@ -75,7 +75,11 @@ public void rowEofResponse(byte[] eof, BackendConnection conn) { target.rowEofResponse(eof, conn); } - + @Override + public void writeQueueAvailable() { + target.writeQueueAvailable(); + + } @Override public void connectionClose(BackendConnection conn, String reason) { diff --git a/src/main/java/io/mycat/server/executors/FetchStoreNodeOfChildTableHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/FetchStoreNodeOfChildTableHandler.java similarity index 63% rename from src/main/java/io/mycat/server/executors/FetchStoreNodeOfChildTableHandler.java rename to src/main/java/io/mycat/backend/mysql/nio/handler/FetchStoreNodeOfChildTableHandler.java index 20f0550eb..548cb22ab 100644 --- a/src/main/java/io/mycat/server/executors/FetchStoreNodeOfChildTableHandler.java +++ b/src/main/java/io/mycat/backend/mysql/nio/handler/FetchStoreNodeOfChildTableHandler.java @@ -21,24 +21,26 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.executors; +package io.mycat.backend.mysql.nio.handler; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.ReentrantLock; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; import io.mycat.MycatServer; import io.mycat.backend.BackendConnection; -import io.mycat.backend.PhysicalDBNode; +import io.mycat.backend.datasource.PhysicalDBNode; import io.mycat.cache.CachePool; +import io.mycat.config.MycatConfig; +import io.mycat.net.mysql.ErrorPacket; +import io.mycat.net.mysql.RowDataPacket; import io.mycat.route.RouteResultsetNode; -import io.mycat.server.config.node.MycatConfig; -import io.mycat.server.packet.ErrorPacket; -import io.mycat.server.packet.RowDataPacket; +import io.mycat.server.ServerConnection; import io.mycat.server.parser.ServerParse; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.locks.ReentrantLock; /** * company where id=(select company_id from customer where id=3); the one which @@ -55,6 +57,67 @@ public class FetchStoreNodeOfChildTableHandler implements ResponseHandler { private volatile String dataNode; private AtomicInteger finished = new AtomicInteger(0); protected final ReentrantLock lock = new ReentrantLock(); + + public String execute(String schema, String sql, List dataNodes, ServerConnection sc) { + + String key = schema + ":" + sql; + CachePool cache = MycatServer.getInstance().getCacheService() + .getCachePool("ER_SQL2PARENTID"); + String result = (String) cache.get(key); + if (result != null) { + return result; + } + this.sql = sql; + int totalCount = dataNodes.size(); + long startTime = System.currentTimeMillis(); + long endTime = startTime + 5 * 60 * 1000L; + MycatConfig conf = MycatServer.getInstance().getConfig(); + + LOGGER.debug("find child node with sql:" + sql); + for (String dn : dataNodes) { + if (dataNode != null) { + return dataNode; + } + PhysicalDBNode mysqlDN = conf.getDataNodes().get(dn); + try { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("execute in datanode " + dn); + } + RouteResultsetNode node = new RouteResultsetNode(dn, ServerParse.SELECT, sql); + node.setRunOnSlave(false); // 获取 子表节点,最好走master为好 + + /* + * fix #1370 默认应该先从已经持有的连接中取连接, 否则可能因为事务隔离性看不到当前事务内更新的数据 + * Tips: 通过mysqlDN.getConnection获取到的连接不是当前连接 + * + */ + BackendConnection conn = sc.getSession2().getTarget(node); + if(sc.getSession2().tryExistsCon(conn, node)) { + _execute(conn, node, sc); + } else { + mysqlDN.getConnection(mysqlDN.getDatabase(), sc.isAutocommit(), node, this, node); + } + } catch (Exception e) { + LOGGER.warn("get connection err " + e); + } + } + + while (dataNode == null && System.currentTimeMillis() < endTime) { + try { + Thread.sleep(50); + } catch (InterruptedException e) { + break; + } + if (dataNode != null || finished.get() >= totalCount) { + break; + } + } + if (dataNode != null) { + cache.putIfAbsent(key, dataNode); + } + return dataNode; + + } public String execute(String schema, String sql, ArrayList dataNodes) { String key = schema + ":" + sql; @@ -80,9 +143,14 @@ public String execute(String schema, String sql, ArrayList dataNodes) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("execute in datanode " + dn); } - mysqlDN.getConnection(mysqlDN.getDatabase(), true, - new RouteResultsetNode(dn, ServerParse.SELECT, sql), - this, dn); + RouteResultsetNode node = new RouteResultsetNode(dn, ServerParse.SELECT, sql); + node.setRunOnSlave(false); // 获取 子表节点,最好走master为好 + + mysqlDN.getConnection(mysqlDN.getDatabase(), true, node, this, node); + +// mysqlDN.getConnection(mysqlDN.getDatabase(), true, +// new RouteResultsetNode(dn, ServerParse.SELECT, sql), +// this, dn); } catch (Exception e) { LOGGER.warn("get connection err " + e); } @@ -109,6 +177,15 @@ public String execute(String schema, String sql, ArrayList dataNodes) { return dataNode; } + + private void _execute(BackendConnection conn, RouteResultsetNode node, ServerConnection sc) { + conn.setResponseHandler(this); + try { + conn.execute(node, sc, sc.isAutocommit()); + } catch (IOException e) { + connectionError(e, conn); + } + } @Override public void connectionAcquired(BackendConnection conn) { @@ -156,7 +233,7 @@ public void rowResponse(byte[] row, BackendConnection conn) { } if (result == null) { result = getColumn(row); - dataNode = (String) conn.getAttachment(); + dataNode = ((RouteResultsetNode) conn.getAttachment()).getName(); } else { LOGGER.warn("find multi data nodes for child table store, sql is: " + sql); @@ -184,6 +261,11 @@ private void executeException(BackendConnection c, Throwable e) { } + @Override + public void writeQueueAvailable() { + + } + @Override public void connectionClose(BackendConnection conn, String reason) { diff --git a/src/main/java/io/mycat/server/executors/GetConnectionHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/GetConnectionHandler.java similarity index 89% rename from src/main/java/io/mycat/server/executors/GetConnectionHandler.java rename to src/main/java/io/mycat/backend/mysql/nio/handler/GetConnectionHandler.java index e4d0d68d9..d748f2114 100644 --- a/src/main/java/io/mycat/server/executors/GetConnectionHandler.java +++ b/src/main/java/io/mycat/backend/mysql/nio/handler/GetConnectionHandler.java @@ -21,16 +21,16 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.executors; - -import io.mycat.backend.BackendConnection; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +package io.mycat.backend.mysql.nio.handler; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.atomic.AtomicInteger; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.backend.BackendConnection; + /** * wuzh * @@ -52,11 +52,10 @@ public GetConnectionHandler( this.total = totalNumber; } - public String getStatusInfo() { - return "finished " + finishedCount.get() + " success " - + successCons.size() + " target count:" + this.total; + public String getStatusInfo() + { + return "finished "+ finishedCount.get()+" success "+successCons.size()+" target count:"+this.total; } - public boolean finished() { return finishedCount.get() >= total; } @@ -72,7 +71,7 @@ public void connectionAcquired(BackendConnection conn) { @Override public void connectionError(Throwable e, BackendConnection conn) { finishedCount.addAndGet(1); - logger.warn("connect error " + conn + e); + logger.warn("connect error " + conn+ e); conn.release(); } @@ -104,6 +103,11 @@ public void rowEofResponse(byte[] eof, BackendConnection conn) { } + @Override + public void writeQueueAvailable() { + + } + @Override public void connectionClose(BackendConnection conn, String reason) { diff --git a/src/main/java/io/mycat/server/executors/KillConnectionHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/KillConnectionHandler.java similarity index 87% rename from src/main/java/io/mycat/server/executors/KillConnectionHandler.java rename to src/main/java/io/mycat/backend/mysql/nio/handler/KillConnectionHandler.java index 955078d8f..0fd1dbd21 100644 --- a/src/main/java/io/mycat/server/executors/KillConnectionHandler.java +++ b/src/main/java/io/mycat/backend/mysql/nio/handler/KillConnectionHandler.java @@ -21,20 +21,20 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.executors; - -import io.mycat.backend.BackendConnection; -import io.mycat.backend.nio.MySQLBackendConnection; -import io.mycat.server.NonBlockingSession; -import io.mycat.server.packet.CommandPacket; -import io.mycat.server.packet.ErrorPacket; -import io.mycat.server.packet.MySQLPacket; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +package io.mycat.backend.mysql.nio.handler; import java.io.UnsupportedEncodingException; import java.util.List; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.backend.BackendConnection; +import io.mycat.backend.mysql.nio.MySQLConnection; +import io.mycat.net.mysql.CommandPacket; +import io.mycat.net.mysql.ErrorPacket; +import io.mycat.net.mysql.MySQLPacket; +import io.mycat.server.NonBlockingSession; + /** * @author mycat */ @@ -42,18 +42,18 @@ public class KillConnectionHandler implements ResponseHandler { private static final Logger LOGGER = LoggerFactory .getLogger(KillConnectionHandler.class); - private final MySQLBackendConnection killee; + private final MySQLConnection killee; private final NonBlockingSession session; public KillConnectionHandler(BackendConnection killee, NonBlockingSession session) { - this.killee = (MySQLBackendConnection) killee; + this.killee = (MySQLConnection) killee; this.session = session; } @Override public void connectionAcquired(BackendConnection conn) { - MySQLBackendConnection mysqlCon = (MySQLBackendConnection) conn; + MySQLConnection mysqlCon = (MySQLConnection) conn; conn.setResponseHandler(this); CommandPacket packet = new CommandPacket(); packet.packetId = 0; @@ -113,6 +113,11 @@ public void fieldEofResponse(byte[] header, List fields, public void rowResponse(byte[] row, BackendConnection conn) { } + @Override + public void writeQueueAvailable() { + + } + @Override public void connectionClose(BackendConnection conn, String reason) { } diff --git a/src/main/java/io/mycat/server/executors/LoadDataResponseHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/LoadDataResponseHandler.java similarity index 85% rename from src/main/java/io/mycat/server/executors/LoadDataResponseHandler.java rename to src/main/java/io/mycat/backend/mysql/nio/handler/LoadDataResponseHandler.java index b76bc9f91..a621e9a39 100644 --- a/src/main/java/io/mycat/server/executors/LoadDataResponseHandler.java +++ b/src/main/java/io/mycat/backend/mysql/nio/handler/LoadDataResponseHandler.java @@ -1,4 +1,4 @@ -package io.mycat.server.executors; +package io.mycat.backend.mysql.nio.handler; import io.mycat.backend.BackendConnection; diff --git a/src/main/java/io/mycat/backend/mysql/nio/handler/LockTablesHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/LockTablesHandler.java new file mode 100644 index 000000000..cfff92588 --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/nio/handler/LockTablesHandler.java @@ -0,0 +1,135 @@ +package io.mycat.backend.mysql.nio.handler; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.locks.ReentrantLock; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.MycatServer; +import io.mycat.backend.BackendConnection; +import io.mycat.backend.datasource.PhysicalDBNode; +import io.mycat.config.MycatConfig; +import io.mycat.net.mysql.OkPacket; +import io.mycat.route.RouteResultset; +import io.mycat.route.RouteResultsetNode; +import io.mycat.server.NonBlockingSession; + +/** + * lock tables 语句处理器 + * @author songdabin + * + */ +public class LockTablesHandler extends MultiNodeHandler { + + private static final Logger LOGGER = LoggerFactory.getLogger(LockTablesHandler.class); + + private final RouteResultset rrs; + private final ReentrantLock lock; + private final boolean autocommit; + + public LockTablesHandler(NonBlockingSession session, RouteResultset rrs) { + super(session); + this.rrs = rrs; + this.autocommit = session.getSource().isAutocommit(); + this.lock = new ReentrantLock(); + } + + public void execute() throws Exception { + super.reset(this.rrs.getNodes().length); + MycatConfig conf = MycatServer.getInstance().getConfig(); + for (final RouteResultsetNode node : rrs.getNodes()) { + BackendConnection conn = session.getTarget(node); + if (session.tryExistsCon(conn, node)) { + _execute(conn, node); + } else { + // create new connection + PhysicalDBNode dn = conf.getDataNodes().get(node.getName()); + dn.getConnection(dn.getDatabase(), autocommit, node, this, node); + } + } + } + + private void _execute(BackendConnection conn, RouteResultsetNode node) { + if (clearIfSessionClosed(session)) { + return; + } + conn.setResponseHandler(this); + try { + conn.execute(node, session.getSource(), autocommit); + } catch (IOException e) { + connectionError(e, conn); + } + } + + @Override + public void connectionAcquired(BackendConnection conn) { + final RouteResultsetNode node = (RouteResultsetNode) conn.getAttachment(); + session.bindConnection(node, conn); + _execute(conn, node); + } + + @Override + public void okResponse(byte[] data, BackendConnection conn) { + boolean executeResponse = conn.syncAndExcute(); + if (executeResponse) { + if (clearIfSessionClosed(session)) { + return; + } + boolean isEndPack = decrementCountBy(1); + if (isEndPack) { + if (this.isFail() || session.closed()) { + tryErrorFinished(true); + return; + } + OkPacket ok = new OkPacket(); + ok.read(data); + lock.lock(); + try { + ok.packetId = ++ packetId; + ok.serverStatus = session.getSource().isAutocommit() ? 2:1; + } finally { + lock.unlock(); + } + ok.write(session.getSource()); + } + } + } + + protected String byte2Str(byte[] data) { + StringBuilder sb = new StringBuilder(); + for (byte b : data) { + sb.append(Byte.toString(b)); + } + return sb.toString(); + } + + @Override + public void fieldEofResponse(byte[] header, List fields, byte[] eof, BackendConnection conn) { + LOGGER.error(new StringBuilder().append("unexpected packet for ") + .append(conn).append(" bound by ").append(session.getSource()) + .append(": field's eof").toString()); + } + + @Override + public void rowResponse(byte[] row, BackendConnection conn) { + LOGGER.warn(new StringBuilder().append("unexpected packet for ") + .append(conn).append(" bound by ").append(session.getSource()) + .append(": row data packet").toString()); + } + + @Override + public void rowEofResponse(byte[] eof, BackendConnection conn) { + LOGGER.error(new StringBuilder().append("unexpected packet for ") + .append(conn).append(" bound by ").append(session.getSource()) + .append(": row's eof").toString()); + } + + @Override + public void writeQueueAvailable() { + // TODO Auto-generated method stub + + } + +} diff --git a/src/main/java/io/mycat/backend/mysql/nio/handler/MiddlerQueryResultHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/MiddlerQueryResultHandler.java new file mode 100644 index 000000000..e42442de1 --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/nio/handler/MiddlerQueryResultHandler.java @@ -0,0 +1,51 @@ +package io.mycat.backend.mysql.nio.handler; + +import java.util.ArrayList; +import java.util.List; + +import com.alibaba.druid.sql.ast.expr.SQLCharExpr; + +import io.mycat.backend.mysql.DataType; + +/** + * 查询中间结果处理器 + * @author huangyiming + * + * @param + */ +public class MiddlerQueryResultHandler implements MiddlerResultHandler { + + List reusult = new ArrayList<>(); + DataType dataType; + Class clazz; + private SecondHandler secondHandler; + + public MiddlerQueryResultHandler(SecondHandler secondHandler) { + this.secondHandler = secondHandler; + + + } + //确保只有一个构造函数入口 + private MiddlerQueryResultHandler(){ + + } + + @Override + public List getResult() { + return reusult; + } + @Override + public void add(T t ) { + reusult.add(new SQLCharExpr(t==null?null:t.toString())); + } + + @Override + public String getDataType() { + return dataType.name(); + } + + @Override + public void secondEexcute() { + secondHandler.doExecute(getResult()); + } +} diff --git a/src/main/java/io/mycat/backend/mysql/nio/handler/MiddlerResultHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/MiddlerResultHandler.java new file mode 100644 index 000000000..734cec632 --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/nio/handler/MiddlerResultHandler.java @@ -0,0 +1,29 @@ +package io.mycat.backend.mysql.nio.handler; + +import java.util.List; + +import com.alibaba.druid.sql.ast.expr.SQLCharExpr; + +/** + * 中间结果处理器 + * @author huangyiming + * + * @param + */ +public interface MiddlerResultHandler { + + + public List getResult(); + + public void add(T t ); + + public String getDataType(); + + public void secondEexcute(); + + + + + + + } diff --git a/src/main/java/io/mycat/backend/mysql/nio/handler/MultiNodeCoordinator.java b/src/main/java/io/mycat/backend/mysql/nio/handler/MultiNodeCoordinator.java new file mode 100644 index 000000000..33e613ea2 --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/nio/handler/MultiNodeCoordinator.java @@ -0,0 +1,263 @@ +package io.mycat.backend.mysql.nio.handler; + +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import com.esotericsoftware.minlog.Log; +import io.mycat.backend.mysql.nio.MySQLConnection; +import io.mycat.backend.mysql.xa.CoordinatorLogEntry; +import io.mycat.backend.mysql.xa.ParticipantLogEntry; +import io.mycat.backend.mysql.xa.TxState; +import io.mycat.backend.mysql.xa.recovery.Repository; +import io.mycat.backend.mysql.xa.recovery.impl.FileSystemRepository; +import io.mycat.backend.mysql.xa.recovery.impl.InMemoryRepository; +import io.mycat.net.BackendAIOConnection; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.backend.BackendConnection; +import io.mycat.route.RouteResultsetNode; +import io.mycat.server.NonBlockingSession; +import io.mycat.server.sqlcmd.SQLCtrlCommand; + +public class MultiNodeCoordinator implements ResponseHandler { + private static final Logger LOGGER = LoggerFactory + .getLogger(MultiNodeCoordinator.class); + public static final Repository fileRepository = new FileSystemRepository(); + public static final Repository inMemoryRepository = new InMemoryRepository(); + private final AtomicInteger runningCount = new AtomicInteger(0); + private final AtomicInteger faileCount = new AtomicInteger(0); + private volatile int nodeCount; + private final NonBlockingSession session; + private SQLCtrlCommand cmdHandler; + private final AtomicBoolean failed = new AtomicBoolean(false); + + public MultiNodeCoordinator(NonBlockingSession session) { + this.session = session; + } + + /** Multi-nodes 1pc Commit Handle **/ + public void executeBatchNodeCmd(SQLCtrlCommand cmdHandler) { + this.cmdHandler = cmdHandler; + final int initCount = session.getTargetCount(); + runningCount.set(initCount); + nodeCount = initCount; + failed.set(false); + faileCount.set(0); + //recovery nodes log + ParticipantLogEntry[] participantLogEntry = new ParticipantLogEntry[initCount]; + // 执行 + int started = 0; + for (RouteResultsetNode rrn : session.getTargetKeys()) { + if (rrn == null) { + LOGGER.error("null is contained in RoutResultsetNodes, source = " + + session.getSource()); + continue; + } + final BackendConnection conn = session.getTarget(rrn); + if (conn != null) { + conn.setResponseHandler(this); + //process the XA_END XA_PREPARE Command + if(conn instanceof MySQLConnection){ + MySQLConnection mysqlCon = (MySQLConnection) conn; + String xaTxId = null; + if(session.getXaTXID()!=null){ + xaTxId = session.getXaTXID() +",'"+ mysqlCon.getSchema()+"'"; + } + if (mysqlCon.getXaStatus() == TxState.TX_STARTED_STATE) + { + //recovery Log + participantLogEntry[started] = new ParticipantLogEntry(xaTxId,conn.getHost(),0,conn.getSchema(),((MySQLConnection) conn).getXaStatus()); + String[] cmds = new String[]{"XA END " + xaTxId, + "XA PREPARE " + xaTxId}; + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Start execute the batch cmd : "+ cmds[0] + ";" + cmds[1]+","+ + "current connection:"+conn.getHost()+":"+conn.getPort()); + } + mysqlCon.execBatchCmd(cmds); + } else + { + //recovery Log + participantLogEntry[started] = new ParticipantLogEntry(xaTxId,conn.getHost(),0,conn.getSchema(),((MySQLConnection) conn).getXaStatus()); + cmdHandler.sendCommand(session, conn); + } + }else{ + cmdHandler.sendCommand(session, conn); + } + ++started; + } + } + + //xa recovery log + if(session.getXaTXID()!=null) { + CoordinatorLogEntry coordinatorLogEntry = new CoordinatorLogEntry(session.getXaTXID(), false, participantLogEntry); + inMemoryRepository.put(session.getXaTXID(), coordinatorLogEntry); + fileRepository.writeCheckpoint(inMemoryRepository.getAllCoordinatorLogEntries()); + } + if (started < nodeCount) { + runningCount.set(started); + LOGGER.warn("some connection failed to execute " + + (nodeCount - started)); + /** + * assumption: only caused by front-end connection close.
+ * Otherwise, packet must be returned to front-end + */ + failed.set(true); + } + } + + private boolean finished() { + int val = runningCount.decrementAndGet(); + return (val == 0); + } + + @Override + public void connectionError(Throwable e, BackendConnection conn) { + } + + @Override + public void connectionAcquired(BackendConnection conn) { + + } + + @Override + public void errorResponse(byte[] err, BackendConnection conn) { + faileCount.incrementAndGet(); + + //replayCommit + if(conn instanceof MySQLConnection) { + MySQLConnection mysqlCon = (MySQLConnection) conn; + String xaTxId = session.getXaTXID(); + if (xaTxId != null) { + xaTxId += ",'"+mysqlCon.getSchema()+"'"; + String cmd = "XA COMMIT " + xaTxId; + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Replay Commit execute the cmd :" + cmd + ",current host:" + + mysqlCon.getHost() + ":" + mysqlCon.getPort()); + } + mysqlCon.execCmd(cmd); + } + } + + //release connection + if (this.cmdHandler.releaseConOnErr()) { + session.releaseConnection(conn); + } else { + session.releaseConnectionIfSafe(conn, LOGGER.isDebugEnabled(),false); + } + if (this.finished()) { + cmdHandler.errorResponse(session, err, this.nodeCount, + this.faileCount.get()); + if (cmdHandler.isAutoClearSessionCons()) { + session.clearResources(session.getSource().isTxInterrupted()); + } + } + + } + + @Override + public void okResponse(byte[] ok, BackendConnection conn) { + //process the XA Transatcion 2pc commit + if(conn instanceof MySQLConnection) + { + MySQLConnection mysqlCon = (MySQLConnection) conn; + switch (mysqlCon.getXaStatus()) + { + case TxState.TX_STARTED_STATE: + //if there have many SQL execute wait the okResponse,will come to here one by one + //should be wait all nodes ready ,then send xa commit to all nodes. + if (mysqlCon.batchCmdFinished()) + { + String xaTxId = session.getXaTXID(); + String cmd = "XA COMMIT " + xaTxId +",'"+mysqlCon.getSchema()+"'"; + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Start execute the cmd :"+cmd+",current host:"+ + mysqlCon.getHost()+":"+mysqlCon.getPort()); + } + //recovery log + CoordinatorLogEntry coordinatorLogEntry = inMemoryRepository.get(xaTxId); + for(int i=0; i fields, + byte[] eof, BackendConnection conn) { + + } + + @Override + public void rowResponse(byte[] row, BackendConnection conn) { + + } + + @Override + public void rowEofResponse(byte[] eof, BackendConnection conn) { + } + + @Override + public void writeQueueAvailable() { + + } + + @Override + public void connectionClose(BackendConnection conn, String reason) { + + } + +} diff --git a/src/main/java/io/mycat/server/executors/MultiNodeHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/MultiNodeHandler.java similarity index 85% rename from src/main/java/io/mycat/server/executors/MultiNodeHandler.java rename to src/main/java/io/mycat/backend/mysql/nio/handler/MultiNodeHandler.java index da5fde7dc..52581ad14 100644 --- a/src/main/java/io/mycat/server/executors/MultiNodeHandler.java +++ b/src/main/java/io/mycat/backend/mysql/nio/handler/MultiNodeHandler.java @@ -1,229 +1,238 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.server.executors; - -import io.mycat.backend.BackendConnection; -import io.mycat.server.ErrorCode; -import io.mycat.server.NonBlockingSession; -import io.mycat.server.packet.ErrorPacket; -import io.mycat.util.StringUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.locks.ReentrantLock; - -/** - * @author mycat - */ -abstract class MultiNodeHandler implements ResponseHandler, Terminatable { - public static final Logger LOGGER = LoggerFactory - .getLogger(MultiNodeHandler.class); - protected final ReentrantLock lock = new ReentrantLock(); - protected final NonBlockingSession session; - private AtomicBoolean isFailed = new AtomicBoolean(false); - protected volatile String error; - protected byte packetId; - protected final AtomicBoolean errorRepsponsed = new AtomicBoolean(false); - - public MultiNodeHandler(NonBlockingSession session) { - if (session == null) { - throw new IllegalArgumentException("session is null!"); - } - this.session = session; - } - - public void setFail(String errMsg) { - isFailed.set(true); - error = errMsg; - } - - public boolean isFail() { - return isFailed.get(); - } - - private int nodeCount; - - private Runnable terminateCallBack; - - @Override - public void terminate(Runnable terminateCallBack) { - boolean zeroReached = false; - lock.lock(); - try { - if (nodeCount > 0) { - this.terminateCallBack = terminateCallBack; - } else { - zeroReached = true; - } - } finally { - lock.unlock(); - } - if (zeroReached) { - terminateCallBack.run(); - } - } - - protected boolean canClose(BackendConnection conn, boolean tryErrorFinish) { - - // realse this connection if safe - session.releaseConnectionIfSafe(conn, LOGGER.isDebugEnabled(), false); - boolean allFinished = false; - if (tryErrorFinish) { - allFinished = this.decrementCountBy(1); - this.tryErrorFinished(allFinished); - } - - return allFinished; - } - - protected void decrementCountToZero() { - Runnable callback; - lock.lock(); - try { - nodeCount = 0; - callback = this.terminateCallBack; - this.terminateCallBack = null; - } finally { - lock.unlock(); - } - if (callback != null) { - callback.run(); - } - } - - public void connectionError(Throwable e, BackendConnection conn) { - boolean canClose = decrementCountBy(1); - this.tryErrorFinished(canClose); - } - - public void errorResponse(byte[] data, BackendConnection conn) { - session.releaseConnectionIfSafe(conn, LOGGER.isDebugEnabled(), false); - ErrorPacket err = new ErrorPacket(); - err.read(data); - String errmsg = new String(err.message); - this.setFail(errmsg); - LOGGER.warn("error response from " + conn + " err " + errmsg + " code:" - + err.errno); - - this.tryErrorFinished(this.decrementCountBy(1)); - } - - public boolean clearIfSessionClosed(NonBlockingSession session) { - if (session.closed()) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("session closed ,clear resources " + session); - } - - session.clearResources(true); - this.clearResources(); - return true; - } else { - return false; - } - - } - - protected boolean decrementCountBy(int finished) { - boolean zeroReached = false; - Runnable callback = null; - lock.lock(); - try { - if (zeroReached = --nodeCount == 0) { - callback = this.terminateCallBack; - this.terminateCallBack = null; - } - } finally { - lock.unlock(); - } - if (zeroReached && callback != null) { - callback.run(); - } - return zeroReached; - } - - protected void reset(int initCount) { - nodeCount = initCount; - isFailed.set(false); - error = null; - packetId = 0; - } - - protected ErrorPacket createErrPkg(String errmgs) { - ErrorPacket err = new ErrorPacket(); - lock.lock(); - try { - err.packetId = ++packetId; - } finally { - lock.unlock(); - } - err.errno = ErrorCode.ER_UNKNOWN_ERROR; - err.message = StringUtil.encode(errmgs, session.getSource() - .getCharset()); - return err; - } - - protected void tryErrorFinished(boolean allEnd) { - if (allEnd && !session.closed()) { - if (errorRepsponsed.compareAndSet(false, true)) { - createErrPkg(this.error).write(session.getSource()); - } - // clear session resources,release all - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("error all end ,clear session resource "); - } - if (session.getSource().isAutocommit()) { - session.closeAndClearResources(error); - } else { - session.getSource().setTxInterrupt(this.error); - // clear resouces - clearResources(); - } - - } - - } - - public void connectionClose(BackendConnection conn, String reason) { - this.setFail("closed connection:" + reason + " con:" + conn); - boolean finished = false; - lock.lock(); - try { - finished = (this.nodeCount == 0); - - } finally { - lock.unlock(); - } - if (finished == false) { - finished = this.decrementCountBy(1); - } - if (error == null) { - error = "back connection closed "; - } - tryErrorFinished(finished); - } - - public void clearResources() { - } +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.backend.mysql.nio.handler; + +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.ReentrantLock; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.backend.BackendConnection; +import io.mycat.config.ErrorCode; +import io.mycat.net.mysql.ErrorPacket; +import io.mycat.server.NonBlockingSession; +import io.mycat.util.StringUtil; + +/** + * @author mycat + */ +abstract class MultiNodeHandler implements ResponseHandler, Terminatable { + private static final Logger LOGGER = LoggerFactory + .getLogger(MultiNodeHandler.class); + protected final ReentrantLock lock = new ReentrantLock(); + protected final NonBlockingSession session; + private AtomicBoolean isFailed = new AtomicBoolean(false); + protected volatile String error; + protected byte packetId; + protected final AtomicBoolean errorRepsponsed = new AtomicBoolean(false); + + public MultiNodeHandler(NonBlockingSession session) { + if (session == null) { + throw new IllegalArgumentException("session is null!"); + } + this.session = session; + } + + public void setFail(String errMsg) { + isFailed.set(true); + error = errMsg; + } + + public boolean isFail() { + return isFailed.get(); + } + + private int nodeCount; + + private Runnable terminateCallBack; + + @Override + public void terminate(Runnable terminateCallBack) { + boolean zeroReached = false; + lock.lock(); + try { + if (nodeCount > 0) { + this.terminateCallBack = terminateCallBack; + } else { + zeroReached = true; + } + } finally { + lock.unlock(); + } + if (zeroReached) { + terminateCallBack.run(); + } + } + + protected boolean canClose(BackendConnection conn, boolean tryErrorFinish) { + + // realse this connection if safe + session.releaseConnectionIfSafe(conn, LOGGER.isDebugEnabled(), false); + boolean allFinished = false; + if (tryErrorFinish) { + allFinished = this.decrementCountBy(1); + this.tryErrorFinished(allFinished); + } + + return allFinished; + } + + protected void decrementCountToZero() { + Runnable callback; + lock.lock(); + try { + nodeCount = 0; + callback = this.terminateCallBack; + this.terminateCallBack = null; + } finally { + lock.unlock(); + } + if (callback != null) { + callback.run(); + } + } + + public void connectionError(Throwable e, BackendConnection conn) { + final boolean canClose = decrementCountBy(1); + // 需要把Throwable e的错误信息保存下来(setFail()), 否则会导致响应 + //null信息,结果mysql命令行等客户端查询结果是"Query OK"!! + // @author Uncle-pan + // @since 2016-03-26 + if(canClose){ + setFail("backend connect: "+e); + } + LOGGER.warn("backend connect", e); + this.tryErrorFinished(canClose); + } + + public void errorResponse(byte[] data, BackendConnection conn) { + session.releaseConnectionIfSafe(conn, LOGGER.isDebugEnabled(), false); + ErrorPacket err = new ErrorPacket(); + err.read(data); + + String errmsg = new String(err.message); + this.setFail(errmsg); + + LOGGER.warn("error response from " + conn + " err " + errmsg + " code:" + err.errno); + + this.tryErrorFinished(this.decrementCountBy(1)); + } + + public boolean clearIfSessionClosed(NonBlockingSession session) { + if (session.closed()) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("session closed ,clear resources " + session); + } + + session.clearResources(true); + this.clearResources(); + return true; + } else { + return false; + } + + } + + protected boolean decrementCountBy(int finished) { + boolean zeroReached = false; + Runnable callback = null; + lock.lock(); + try { + if (zeroReached = --nodeCount == 0) { + callback = this.terminateCallBack; + this.terminateCallBack = null; + } + } finally { + lock.unlock(); + } + if (zeroReached && callback != null) { + callback.run(); + } + return zeroReached; + } + + protected void reset(int initCount) { + nodeCount = initCount; + isFailed.set(false); + error = null; + packetId = 0; + } + + protected ErrorPacket createErrPkg(String errmgs) { + ErrorPacket err = new ErrorPacket(); + lock.lock(); + try { + err.packetId = ++packetId; + } finally { + lock.unlock(); + } + err.errno = ErrorCode.ER_UNKNOWN_ERROR; + err.message = StringUtil.encode(errmgs, session.getSource().getCharset()); + return err; + } + + protected void tryErrorFinished(boolean allEnd) { + if (allEnd && !session.closed()) { + + if (errorRepsponsed.compareAndSet(false, true)) { + createErrPkg(this.error).write(session.getSource()); + } + // clear session resources,release all + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("error all end ,clear session resource "); + } + if (session.getSource().isAutocommit()) { + session.closeAndClearResources(error); + } else { + session.getSource().setTxInterrupt(this.error); + // clear resouces + clearResources(); + } + + } + + } + + public void connectionClose(BackendConnection conn, String reason) { + this.setFail("closed connection:" + reason + " con:" + conn); + boolean finished = false; + lock.lock(); + try { + finished = (this.nodeCount == 0); + + } finally { + lock.unlock(); + } + if (finished == false) { + finished = this.decrementCountBy(1); + } + if (error == null) { + error = "back connection closed "; + } + tryErrorFinished(finished); + } + + public void clearResources() { + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/backend/mysql/nio/handler/MultiNodeQueryHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/MultiNodeQueryHandler.java new file mode 100644 index 000000000..508ec314b --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/nio/handler/MultiNodeQueryHandler.java @@ -0,0 +1,872 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.backend.mysql.nio.handler; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.ReentrantLock; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.MycatServer; +import io.mycat.backend.BackendConnection; +import io.mycat.backend.datasource.PhysicalDBNode; +import io.mycat.backend.mysql.LoadDataUtil; +import io.mycat.cache.LayerCachePool; +import io.mycat.config.MycatConfig; +import io.mycat.memory.unsafe.row.UnsafeRow; +import io.mycat.net.mysql.BinaryRowDataPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.OkPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.route.RouteResultset; +import io.mycat.route.RouteResultsetNode; +import io.mycat.server.NonBlockingSession; +import io.mycat.server.ServerConnection; +import io.mycat.server.parser.ServerParse; +import io.mycat.sqlengine.mpp.AbstractDataNodeMerge; +import io.mycat.sqlengine.mpp.ColMeta; +import io.mycat.sqlengine.mpp.DataMergeService; +import io.mycat.sqlengine.mpp.DataNodeMergeManager; +import io.mycat.sqlengine.mpp.MergeCol; +import io.mycat.statistic.stat.QueryResult; +import io.mycat.statistic.stat.QueryResultDispatcher; +import io.mycat.util.ResultSetUtil; + +/** + * @author mycat + */ +public class MultiNodeQueryHandler extends MultiNodeHandler implements LoadDataResponseHandler { + + private static final Logger LOGGER = LoggerFactory.getLogger(MultiNodeQueryHandler.class); + + private final RouteResultset rrs; + private final NonBlockingSession session; + // private final CommitNodeHandler icHandler; + private final AbstractDataNodeMerge dataMergeSvr; + private final boolean autocommit; + private String priamaryKeyTable = null; + private int primaryKeyIndex = -1; + private int fieldCount = 0; + private final ReentrantLock lock; + private long affectedRows; + private long selectRows; + private long insertId; + private volatile boolean fieldsReturned; + private int okCount; + private final boolean isCallProcedure; + private long startTime; + private long netInBytes; + private long netOutBytes; + private int execCount = 0; + + private boolean prepared; + private List fieldPackets = new ArrayList(); + private int isOffHeapuseOffHeapForMerge = 1; + //huangyiming add 中间处理结果是否处理完毕 + private final AtomicBoolean isMiddleResultDone; + /** + * Limit N,M + */ + private int limitStart; + private int limitSize; + + private int index = 0; + + private int end = 0; + + //huangyiming + private byte[] header = null; + private List fields = null; + + public MultiNodeQueryHandler(int sqlType, RouteResultset rrs, + boolean autocommit, NonBlockingSession session) { + + super(session); + this.isMiddleResultDone = new AtomicBoolean(false); + + if (rrs.getNodes() == null) { + throw new IllegalArgumentException("routeNode is null!"); + } + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("execute mutinode query " + rrs.getStatement()); + } + + this.rrs = rrs; + isOffHeapuseOffHeapForMerge = MycatServer.getInstance(). + getConfig().getSystem().getUseOffHeapForMerge(); + if (ServerParse.SELECT == sqlType && rrs.needMerge()) { + /** + * 使用Off Heap + */ + if(isOffHeapuseOffHeapForMerge == 1){ + dataMergeSvr = new DataNodeMergeManager(this,rrs,isMiddleResultDone); + }else { + dataMergeSvr = new DataMergeService(this,rrs); + } + } else { + dataMergeSvr = null; + } + + isCallProcedure = rrs.isCallStatement(); + this.autocommit = session.getSource().isAutocommit(); + this.session = session; + this.lock = new ReentrantLock(); + // this.icHandler = new CommitNodeHandler(session); + + this.limitStart = rrs.getLimitStart(); + this.limitSize = rrs.getLimitSize(); + this.end = limitStart + rrs.getLimitSize(); + + if (this.limitStart < 0) + this.limitStart = 0; + + if (rrs.getLimitSize() < 0) + end = Integer.MAX_VALUE; + if ((dataMergeSvr != null) + && LOGGER.isDebugEnabled()) { + LOGGER.debug("has data merge logic "); + } + + if ( rrs != null && rrs.getStatement() != null) { + netInBytes += rrs.getStatement().getBytes().length; + } + } + + protected void reset(int initCount) { + super.reset(initCount); + this.okCount = initCount; + this.execCount = 0; + this.netInBytes = 0; + this.netOutBytes = 0; + } + + public NonBlockingSession getSession() { + return session; + } + + public void execute() throws Exception { + final ReentrantLock lock = this.lock; + lock.lock(); + try { + this.reset(rrs.getNodes().length); + this.fieldsReturned = false; + this.affectedRows = 0L; + this.insertId = 0L; + } finally { + lock.unlock(); + } + MycatConfig conf = MycatServer.getInstance().getConfig(); + startTime = System.currentTimeMillis(); + LOGGER.debug("rrs.getRunOnSlave()-" + rrs.getRunOnSlave()); + for (final RouteResultsetNode node : rrs.getNodes()) { + BackendConnection conn = session.getTarget(node); + if (session.tryExistsCon(conn, node)) { + LOGGER.debug("node.getRunOnSlave()-" + node.getRunOnSlave()); + node.setRunOnSlave(rrs.getRunOnSlave()); // 实现 master/slave注解 + LOGGER.debug("node.getRunOnSlave()-" + node.getRunOnSlave()); + _execute(conn, node); + } else { + // create new connection + LOGGER.debug("node.getRunOnSlave()1-" + node.getRunOnSlave()); + node.setRunOnSlave(rrs.getRunOnSlave()); // 实现 master/slave注解 + LOGGER.debug("node.getRunOnSlave()2-" + node.getRunOnSlave()); + PhysicalDBNode dn = conf.getDataNodes().get(node.getName()); + dn.getConnection(dn.getDatabase(), autocommit, node, this, node); + // 注意该方法不仅仅是获取连接,获取新连接成功之后,会通过层层回调,最后回调到本类 的connectionAcquired + // 这是通过 上面方法的 this 参数的层层传递完成的。 + // connectionAcquired 进行执行操作: + // session.bindConnection(node, conn); + // _execute(conn, node); + } + + } + } + + private void _execute(BackendConnection conn, RouteResultsetNode node) { + if (clearIfSessionClosed(session)) { + return; + } + conn.setResponseHandler(this); + try { + conn.execute(node, session.getSource(), autocommit); + } catch (IOException e) { + connectionError(e, conn); + } + } + + @Override + public void connectionAcquired(final BackendConnection conn) { + final RouteResultsetNode node = (RouteResultsetNode) conn + .getAttachment(); + session.bindConnection(node, conn); + _execute(conn, node); + } + + private boolean decrementOkCountBy(int finished) { + lock.lock(); + try { + return --okCount == 0; + } finally { + lock.unlock(); + } + } + + @Override + public void okResponse(byte[] data, BackendConnection conn) { + + this.netOutBytes += data.length; + + boolean executeResponse = conn.syncAndExcute(); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("received ok response ,executeResponse:" + + executeResponse + " from " + conn); + } + if (executeResponse) { + + ServerConnection source = session.getSource(); + OkPacket ok = new OkPacket(); + ok.read(data); + //存储过程 + boolean isCanClose2Client =(!rrs.isCallStatement()) ||(rrs.isCallStatement() &&!rrs.getProcedure().isResultSimpleValue());; + if(!isCallProcedure) + { + if (clearIfSessionClosed(session)) + { + return; + } else if (canClose(conn, false)) + { + return; + } + } + lock.lock(); + try { + // 判断是否是全局表,如果是,执行行数不做累加,以最后一次执行的为准。 + if (!rrs.isGlobalTable()) { + affectedRows += ok.affectedRows; + } else { + affectedRows = ok.affectedRows; + } + if (ok.insertId > 0) { + insertId = (insertId == 0) ? ok.insertId : Math.min( + insertId, ok.insertId); + } + } finally { + lock.unlock(); + } + // 对于存储过程,其比较特殊,查询结果返回EndRow报文以后,还会再返回一个OK报文,才算结束 + boolean isEndPacket = isCallProcedure ? decrementOkCountBy(1): decrementCountBy(1); + if (isEndPacket && isCanClose2Client) { + + if (this.autocommit && !session.getSource().isLocked()) {// clear all connections + session.releaseConnections(false); + } + + if (this.isFail() || session.closed()) { + tryErrorFinished(true); + return; + } + + lock.lock(); + try { + if (rrs.isLoadData()) { + byte lastPackId = source.getLoadDataInfileHandler() + .getLastPackId(); + ok.packetId = ++lastPackId;// OK_PACKET + ok.message = ("Records: " + affectedRows + " Deleted: 0 Skipped: 0 Warnings: 0") + .getBytes();// 此处信息只是为了控制台给人看的 + source.getLoadDataInfileHandler().clear(); + } else { + ok.packetId = ++packetId;// OK_PACKET + } + + ok.affectedRows = affectedRows; + ok.serverStatus = source.isAutocommit() ? 2 : 1; + if (insertId > 0) { + ok.insertId = insertId; + source.setLastInsertId(insertId); + } + + ok.write(source); + } catch (Exception e) { + handleDataProcessException(e); + } finally { + lock.unlock(); + } + } + + + // add by lian + // 解决sql统计中写操作永远为0 + execCount++; + if (execCount == rrs.getNodes().length) { + source.setExecuteSql(null); //完善show @@connection.sql 监控命令.已经执行完的sql 不再显示 + QueryResult queryResult = new QueryResult(session.getSource().getUser(), + rrs.getSqlType(), rrs.getStatement(), selectRows, netInBytes, netOutBytes, startTime, System.currentTimeMillis(),0); + QueryResultDispatcher.dispatchQuery( queryResult ); + } + } + } + + @Override + public void rowEofResponse(final byte[] eof, BackendConnection conn) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("on row end reseponse " + conn); + } + + this.netOutBytes += eof.length; + MiddlerResultHandler middlerResultHandler = session.getMiddlerResultHandler(); + + if (errorRepsponsed.get()) { + // the connection has been closed or set to "txInterrupt" properly + //in tryErrorFinished() method! If we close it here, it can + // lead to tx error such as blocking rollback tx for ever. + // @author Uncle-pan + // @since 2016-03-25 + // conn.close(this.error); + return; + } + + final ServerConnection source = session.getSource(); + if (!isCallProcedure) { + if (clearIfSessionClosed(session)) { + return; + } else if (canClose(conn, false)) { + return; + } + } + + if (decrementCountBy(1)) { + if (!rrs.isCallStatement()||(rrs.isCallStatement()&&rrs.getProcedure().isResultSimpleValue())) { + if (this.autocommit && !session.getSource().isLocked()) {// clear all connections + session.releaseConnections(false); + } + + if (this.isFail() || session.closed()) { + tryErrorFinished(true); + return; + } + } + if (dataMergeSvr != null) { + //huangyiming add 数据合并前如果有中间过程则先执行数据合并再执行下一步 + if(session.getMiddlerResultHandler() !=null ){ + isMiddleResultDone.set(true); + } + + try { + dataMergeSvr.outputMergeResult(session, eof); + } catch (Exception e) { + handleDataProcessException(e); + } + + } else { + try { + lock.lock(); + eof[3] = ++packetId; + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("last packet id:" + packetId); + } + if( middlerResultHandler ==null ){ + //middlerResultHandler.secondEexcute(); + source.write(eof); + } + } finally { + lock.unlock(); + + } + } + } + execCount++; + if(middlerResultHandler !=null){ + if (execCount != rrs.getNodes().length) { + + return; + } + /*else{ + middlerResultHandler.secondEexcute(); + }*/ + } + if (execCount == rrs.getNodes().length) { + int resultSize = source.getWriteQueue().size()*MycatServer.getInstance().getConfig().getSystem().getBufferPoolPageSize(); + source.setExecuteSql(null); //完善show @@connection.sql 监控命令.已经执行完的sql 不再显示 + //TODO: add by zhuam + //查询结果派发 + QueryResult queryResult = new QueryResult(session.getSource().getUser(), + rrs.getSqlType(), rrs.getStatement(), selectRows, netInBytes, netOutBytes, startTime, System.currentTimeMillis(),resultSize); + QueryResultDispatcher.dispatchQuery( queryResult ); + + + // add huangyiming 如果是中间过程,必须等数据合并好了再进行下一步语句的拼装 + if(middlerResultHandler !=null ){ + while (!this.isMiddleResultDone.compareAndSet(false, true)) { + Thread.yield(); + } + middlerResultHandler.secondEexcute(); + isMiddleResultDone.set(false); + } + } + + } + + /** + * 将汇聚结果集数据真正的发送给Mycat客户端 + * @param source + * @param eof + * @param + */ + public void outputMergeResult(final ServerConnection source, final byte[] eof, Iterator iter,AtomicBoolean isMiddleResultDone) { + + try { + lock.lock(); + ByteBuffer buffer = session.getSource().allocate(); + final RouteResultset rrs = this.dataMergeSvr.getRrs(); + + /** + * 处理limit语句的start 和 end位置,将正确的结果发送给 + * Mycat 客户端 + */ + int start = rrs.getLimitStart(); + int end = start + rrs.getLimitSize(); + int index = 0; + + if (start < 0) + start = 0; + + if (rrs.getLimitSize() < 0) + end = Integer.MAX_VALUE; + + if(prepared) { + while (iter.hasNext()){ + UnsafeRow row = iter.next(); + if(index >= start){ + row.packetId = ++packetId; + BinaryRowDataPacket binRowPacket = new BinaryRowDataPacket(); + binRowPacket.read(fieldPackets, row); + buffer = binRowPacket.write(buffer, source, true); + } + index++; + if(index == end){ + break; + } + } + } else { + while (iter.hasNext()){ + UnsafeRow row = iter.next(); + if(index >= start){ + row.packetId = ++packetId; + buffer = row.write(buffer,source,true); + } + index++; + if(index == end){ + break; + } + } + } + + eof[3] = ++packetId; + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("last packet id:" + packetId); + } + //huangyiming add 中间过程缓存起来,isMiddleResultDone是确保合并部分执行完成后才会执行secondExecute + MiddlerResultHandler middlerResultHandler = source.getSession2().getMiddlerResultHandler(); + if(null != middlerResultHandler){ + if(buffer.position() > 0){ + buffer.flip(); + byte[] data = new byte[buffer.limit()]; + buffer.get(data); + buffer.clear(); + //如果该操作只是一个中间过程则把结果存储起来 + String str = ResultSetUtil.getColumnValAsString(data, fields, 0); + //真的需要数据合并的时候才合并 + if(rrs.isHasAggrColumn()){ + middlerResultHandler.getResult().clear(); + if(str !=null){ + middlerResultHandler.add(str); + } + } + } + isMiddleResultDone.set(false); + }else{ + ByteBuffer byteBuffer = source.writeToBuffer(eof, buffer); + + /** + * 真正的开始把Writer Buffer的数据写入到channel 中 + */ + session.getSource().write(byteBuffer); + } + + + } catch (Exception e) { + e.printStackTrace(); + handleDataProcessException(e); + } finally { + lock.unlock(); + dataMergeSvr.clear(); + } + } + public void outputMergeResult(final ServerConnection source, + final byte[] eof, List results) { + try { + lock.lock(); + ByteBuffer buffer = session.getSource().allocate(); + final RouteResultset rrs = this.dataMergeSvr.getRrs(); + + // 处理limit语句 + int start = rrs.getLimitStart(); + int end = start + rrs.getLimitSize(); + + if (start < 0) { + start = 0; + } + + if (rrs.getLimitSize() < 0) { + end = results.size(); + } + +// // 对于不需要排序的语句,返回的数据只有rrs.getLimitSize() +// if (rrs.getOrderByCols() == null) { +// end = results.size(); +// start = 0; +// } + if (end > results.size()) { + end = results.size(); + } + +// for (int i = start; i < end; i++) { +// RowDataPacket row = results.get(i); +// if( prepared ) { +// BinaryRowDataPacket binRowDataPk = new BinaryRowDataPacket(); +// binRowDataPk.read(fieldPackets, row); +// binRowDataPk.packetId = ++packetId; +// //binRowDataPk.write(source); +// buffer = binRowDataPk.write(buffer, session.getSource(), true); +// } else { +// row.packetId = ++packetId; +// buffer = row.write(buffer, source, true); +// } +// } + + if(prepared) { + for (int i = start; i < end; i++) { + RowDataPacket row = results.get(i); + BinaryRowDataPacket binRowDataPk = new BinaryRowDataPacket(); + binRowDataPk.read(fieldPackets, row); + binRowDataPk.packetId = ++packetId; + //binRowDataPk.write(source); + buffer = binRowDataPk.write(buffer, session.getSource(), true); + } + } else { + for (int i = start; i < end; i++) { + RowDataPacket row = results.get(i); + row.packetId = ++packetId; + buffer = row.write(buffer, source, true); + } + } + + eof[3] = ++packetId; + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("last packet id:" + packetId); + } + source.write(source.writeToBuffer(eof, buffer)); + + } catch (Exception e) { + handleDataProcessException(e); + } finally { + lock.unlock(); + dataMergeSvr.clear(); + } + } + + @Override + public void fieldEofResponse(byte[] header, List fields, + byte[] eof, BackendConnection conn) { + + //huangyiming add + this.header = header; + this.fields = fields; + MiddlerResultHandler middlerResultHandler = session.getMiddlerResultHandler(); + /*if(null !=middlerResultHandler ){ + return; + }*/ + this.netOutBytes += header.length; + this.netOutBytes += eof.length; + for (int i = 0, len = fields.size(); i < len; ++i) { + byte[] field = fields.get(i); + this.netOutBytes += field.length; + } + + ServerConnection source = null; + + if (fieldsReturned) { + return; + } + lock.lock(); + try { + if (fieldsReturned) { + return; + } + fieldsReturned = true; + + boolean needMerg = (dataMergeSvr != null) + && dataMergeSvr.getRrs().needMerge(); + Set shouldRemoveAvgField = new HashSet<>(); + Set shouldRenameAvgField = new HashSet<>(); + if (needMerg) { + Map mergeColsMap = dataMergeSvr.getRrs() + .getMergeCols(); + if (mergeColsMap != null) { + for (Map.Entry entry : mergeColsMap + .entrySet()) { + String key = entry.getKey(); + int mergeType = entry.getValue(); + if (MergeCol.MERGE_AVG == mergeType + && mergeColsMap.containsKey(key + "SUM")) { + shouldRemoveAvgField.add((key + "COUNT") + .toUpperCase()); + shouldRenameAvgField.add((key + "SUM") + .toUpperCase()); + } + } + } + + } + + source = session.getSource(); + ByteBuffer buffer = source.allocate(); + fieldCount = fields.size(); + if (shouldRemoveAvgField.size() > 0) { + ResultSetHeaderPacket packet = new ResultSetHeaderPacket(); + packet.packetId = ++packetId; + packet.fieldCount = fieldCount - shouldRemoveAvgField.size(); + buffer = packet.write(buffer, source, true); + } else { + + header[3] = ++packetId; + buffer = source.writeToBuffer(header, buffer); + } + + String primaryKey = null; + if (rrs.hasPrimaryKeyToCache()) { + String[] items = rrs.getPrimaryKeyItems(); + priamaryKeyTable = items[0]; + primaryKey = items[1]; + } + + Map columToIndx = new HashMap( + fieldCount); + + for (int i = 0, len = fieldCount; i < len; ++i) { + boolean shouldSkip = false; + byte[] field = fields.get(i); + if (needMerg) { + FieldPacket fieldPkg = new FieldPacket(); + fieldPkg.read(field); + fieldPackets.add(fieldPkg); + String fieldName = new String(fieldPkg.name).toUpperCase(); + if (columToIndx != null + && !columToIndx.containsKey(fieldName)) { + if (shouldRemoveAvgField.contains(fieldName)) { + shouldSkip = true; + fieldPackets.remove(fieldPackets.size() - 1); + } + if (shouldRenameAvgField.contains(fieldName)) { + String newFieldName = fieldName.substring(0, + fieldName.length() - 3); + fieldPkg.name = newFieldName.getBytes(); + fieldPkg.packetId = ++packetId; + shouldSkip = true; + // 处理AVG字段位数和精度, AVG位数 = SUM位数 - 14 + fieldPkg.length = fieldPkg.length - 14; + // AVG精度 = SUM精度 + 4 + fieldPkg.decimals = (byte) (fieldPkg.decimals + 4); + buffer = fieldPkg.write(buffer, source, false); + + // 还原精度 + fieldPkg.decimals = (byte) (fieldPkg.decimals - 4); + } + + ColMeta colMeta = new ColMeta(i, fieldPkg.type); + colMeta.decimals = fieldPkg.decimals; + columToIndx.put(fieldName, colMeta); + } + } else { + FieldPacket fieldPkg = new FieldPacket(); + fieldPkg.read(field); + fieldPackets.add(fieldPkg); + fieldCount = fields.size(); + if (primaryKey != null && primaryKeyIndex == -1) { + // find primary key index + String fieldName = new String(fieldPkg.name); + if (primaryKey.equalsIgnoreCase(fieldName)) { + primaryKeyIndex = i; + } + } } + if (!shouldSkip) { + field[3] = ++packetId; + buffer = source.writeToBuffer(field, buffer); + } + } + eof[3] = ++packetId; + buffer = source.writeToBuffer(eof, buffer); + + if(null == middlerResultHandler ){ + //session.getSource().write(row); + source.write(buffer); + } + + if (dataMergeSvr != null) { + dataMergeSvr.onRowMetaData(columToIndx, fieldCount); + + } + } catch (Exception e) { + handleDataProcessException(e); + } finally { + lock.unlock(); + } + } + + public void handleDataProcessException(Exception e) { + if (!errorRepsponsed.get()) { + this.error = e.toString(); + LOGGER.warn("caught exception ", e); + setFail(e.toString()); + this.tryErrorFinished(true); + } + } + + @Override + public void rowResponse(final byte[] row, final BackendConnection conn) { + + if (errorRepsponsed.get()) { + // the connection has been closed or set to "txInterrupt" properly + //in tryErrorFinished() method! If we close it here, it can + // lead to tx error such as blocking rollback tx for ever. + // @author Uncle-pan + // @since 2016-03-25 + //conn.close(error); + return; + } + + + lock.lock(); + try { + + this.selectRows++; + + RouteResultsetNode rNode = (RouteResultsetNode) conn.getAttachment(); + String dataNode = rNode.getName(); + if (dataMergeSvr != null) { + // even through discarding the all rest data, we can't + //close the connection for tx control such as rollback or commit. + // So the "isClosedByDiscard" variable is unnecessary. + // @author Uncle-pan + // @since 2016-03-25 + dataMergeSvr.onNewRecord(dataNode, row); + + MiddlerResultHandler middlerResultHandler = session.getMiddlerResultHandler(); + if(null != middlerResultHandler ){ + if(middlerResultHandler instanceof MiddlerQueryResultHandler){ + byte[] rv = ResultSetUtil.getColumnVal(row, fields, 0); + String rowValue = rv==null? "":new String(rv); + middlerResultHandler.add(rowValue); + } + } + } else { + row[3] = ++packetId; + RowDataPacket rowDataPkg =null; + // cache primaryKey-> dataNode + if (primaryKeyIndex != -1) { + rowDataPkg = new RowDataPacket(fieldCount); + rowDataPkg.read(row); + String primaryKey = new String(rowDataPkg.fieldValues.get(primaryKeyIndex)); + LayerCachePool pool = MycatServer.getInstance().getRouterservice().getTableId2DataNodeCache(); + pool.putIfAbsent(priamaryKeyTable, primaryKey, dataNode); + } + if( prepared ) { + if(rowDataPkg==null) { + rowDataPkg = new RowDataPacket(fieldCount); + rowDataPkg.read(row); + } + BinaryRowDataPacket binRowDataPk = new BinaryRowDataPacket(); + binRowDataPk.read(fieldPackets, rowDataPkg); + binRowDataPk.write(session.getSource()); + } else { + //add huangyiming + MiddlerResultHandler middlerResultHandler = session.getMiddlerResultHandler(); + if(null == middlerResultHandler ){ + session.getSource().write(row); + }else{ + + if(middlerResultHandler instanceof MiddlerQueryResultHandler){ + String rowValue = ResultSetUtil.getColumnValAsString(row, fields, 0); + middlerResultHandler.add(rowValue); + } + + } + } + } + + } catch (Exception e) { + handleDataProcessException(e); + } finally { + lock.unlock(); + } + } + + @Override + public void clearResources() { + if (dataMergeSvr != null) { + dataMergeSvr.clear(); + } + } + + @Override + public void writeQueueAvailable() { + } + + @Override + public void requestDataResponse(byte[] data, BackendConnection conn) { + LoadDataUtil.requestFileDataResponse(data, conn); + } + + public boolean isPrepared() { + return prepared; + } + + public void setPrepared(boolean prepared) { + this.prepared = prepared; + } +} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/executors/NewConnectionRespHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/NewConnectionRespHandler.java similarity index 78% rename from src/main/java/io/mycat/server/executors/NewConnectionRespHandler.java rename to src/main/java/io/mycat/backend/mysql/nio/handler/NewConnectionRespHandler.java index d5689246a..db5e3fe7a 100644 --- a/src/main/java/io/mycat/server/executors/NewConnectionRespHandler.java +++ b/src/main/java/io/mycat/backend/mysql/nio/handler/NewConnectionRespHandler.java @@ -21,65 +21,73 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.executors; - -import io.mycat.backend.BackendConnection; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +package io.mycat.backend.mysql.nio.handler; import java.util.List; -public class NewConnectionRespHandler implements ResponseHandler { +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.backend.BackendConnection; + +public class NewConnectionRespHandler implements ResponseHandler{ private static final Logger LOGGER = LoggerFactory .getLogger(NewConnectionRespHandler.class); - @Override public void connectionError(Throwable e, BackendConnection conn) { - LOGGER.warn(conn + " connectionError " + e); - + LOGGER.warn(conn+" connectionError "+e); + } @Override public void connectionAcquired(BackendConnection conn) { // - conn.release(); - LOGGER.info("connectionAcquired " + conn); - + LOGGER.info("connectionAcquired "+conn); + + conn.release(); // NewConnectionRespHandler ��Ϊ��������ڿ����������������ã���Ҫ�½����ӣ������½����ӵ�ʱ�� + } @Override public void errorResponse(byte[] err, BackendConnection conn) { LOGGER.warn("caught error resp: " + conn + " " + new String(err)); + conn.release(); } @Override public void okResponse(byte[] ok, BackendConnection conn) { - LOGGER.info("okResponse: " + conn); - + LOGGER.info("okResponse: " + conn ); + conn.release(); } @Override public void fieldEofResponse(byte[] header, List fields, byte[] eof, BackendConnection conn) { - LOGGER.info("fieldEofResponse: " + conn); - + LOGGER.info("fieldEofResponse: " + conn ); + } @Override public void rowResponse(byte[] row, BackendConnection conn) { - LOGGER.info("rowResponse: " + conn); - + LOGGER.info("rowResponse: " + conn ); + } @Override public void rowEofResponse(byte[] eof, BackendConnection conn) { - LOGGER.info("rowEofResponse: " + conn); + LOGGER.info("rowEofResponse: " + conn ); + conn.release(); + } + @Override + public void writeQueueAvailable() { + + } @Override public void connectionClose(BackendConnection conn, String reason) { - + + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/executors/ResponseHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/ResponseHandler.java similarity index 94% rename from src/main/java/io/mycat/server/executors/ResponseHandler.java rename to src/main/java/io/mycat/backend/mysql/nio/handler/ResponseHandler.java index e5b714479..9a113fa78 100644 --- a/src/main/java/io/mycat/server/executors/ResponseHandler.java +++ b/src/main/java/io/mycat/backend/mysql/nio/handler/ResponseHandler.java @@ -21,12 +21,12 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.executors; - -import io.mycat.backend.BackendConnection; +package io.mycat.backend.mysql.nio.handler; import java.util.List; +import io.mycat.backend.BackendConnection; + /** * @author mycat * @author mycat @@ -72,9 +72,16 @@ void fieldEofResponse(byte[] header, List fields, byte[] eof, */ void rowEofResponse(byte[] eof, BackendConnection conn); + /** + * 写队列为空,可以写数据了 + * + */ + void writeQueueAvailable(); + /** * on connetion close event */ void connectionClose(BackendConnection conn, String reason); + } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/executors/RollbackNodeHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/RollbackNodeHandler.java similarity index 72% rename from src/main/java/io/mycat/server/executors/RollbackNodeHandler.java rename to src/main/java/io/mycat/backend/mysql/nio/handler/RollbackNodeHandler.java index 923b17f3b..1c6463d77 100644 --- a/src/main/java/io/mycat/server/executors/RollbackNodeHandler.java +++ b/src/main/java/io/mycat/backend/mysql/nio/handler/RollbackNodeHandler.java @@ -21,21 +21,23 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.executors; +package io.mycat.backend.mysql.nio.handler; + +import java.util.List; + +import io.mycat.backend.mysql.nio.MySQLConnection; +import io.mycat.config.ErrorCode; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; import io.mycat.backend.BackendConnection; import io.mycat.route.RouteResultsetNode; import io.mycat.server.NonBlockingSession; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; /** * @author mycat */ public class RollbackNodeHandler extends MultiNodeHandler { - public static final Logger LOGGER = LoggerFactory + private static final Logger LOGGER = LoggerFactory .getLogger(RollbackNodeHandler.class); public RollbackNodeHandler(NonBlockingSession session) { @@ -59,15 +61,20 @@ public void rollback() { int started = 0; for (final RouteResultsetNode node : session.getTargetKeys()) { if (node == null) { - try { LOGGER.error("null is contained in RoutResultsetNodes, source = " + session.getSource()); - } catch (Exception e) { - } continue; } final BackendConnection conn = session.getTarget(node); + if (conn != null) { + boolean isClosed=conn.isClosedOrQuit(); + if(isClosed) + { + session.getSource().writeErrMessage(ErrorCode.ER_UNKNOWN_ERROR, + "receive rollback,but find backend con is closed or quit"); + LOGGER.error( conn+"receive rollback,but fond backend con is closed or quit"); + } if (LOGGER.isDebugEnabled()) { LOGGER.debug("rollback job run for " + conn); } @@ -75,7 +82,18 @@ public void rollback() { return; } conn.setResponseHandler(RollbackNodeHandler.this); - conn.rollback(); + + //support the XA rollback + if(session.getXaTXID()!=null && conn instanceof MySQLConnection) { + MySQLConnection mysqlCon = (MySQLConnection) conn; + String xaTxId = session.getXaTXID() +",'"+ mysqlCon.getSchema()+"'"; + //exeBatch cmd issue : the 2nd package can not receive the response + mysqlCon.execCmd("XA END " + xaTxId + ";"); + mysqlCon.execCmd("XA ROLLBACK " + xaTxId + ";"); + }else { + conn.rollback(); + } + ++started; } @@ -98,6 +116,16 @@ public void okResponse(byte[] ok, BackendConnection conn) { if (this.isFail() || session.closed()) { tryErrorFinished(true); } else { + /* 1. 事务结束后,xa事务结束 */ + if(session.getXaTXID()!=null){ + session.setXATXEnabled(false); + } + + /* 2. preAcStates 为true,事务结束后,需要设置为true。preAcStates 为ac上一个状态 */ + if(session.getSource().isPreAcStates()&&!session.getSource().isAutocommit()){ + session.getSource().setAutocommit(true); + } + session.getSource().write(ok); } } @@ -130,4 +158,9 @@ public void rowResponse(byte[] row, BackendConnection conn) { .append(": field's eof").toString()); } -} \ No newline at end of file + @Override + public void writeQueueAvailable() { + + } + +} diff --git a/src/main/java/io/mycat/server/executors/RollbackReleaseHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/RollbackReleaseHandler.java similarity index 89% rename from src/main/java/io/mycat/server/executors/RollbackReleaseHandler.java rename to src/main/java/io/mycat/backend/mysql/nio/handler/RollbackReleaseHandler.java index fe5d21ad6..477412721 100644 --- a/src/main/java/io/mycat/server/executors/RollbackReleaseHandler.java +++ b/src/main/java/io/mycat/backend/mysql/nio/handler/RollbackReleaseHandler.java @@ -21,14 +21,14 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.executors; - -import io.mycat.backend.BackendConnection; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +package io.mycat.backend.mysql.nio.handler; import java.util.List; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.backend.BackendConnection; + /** * @author mycat */ @@ -56,7 +56,7 @@ public void errorResponse(byte[] err, BackendConnection conn) { @Override public void okResponse(byte[] ok, BackendConnection conn) { - logger.debug("autocomit is false,but no commit or rollback ,so mycat rollbacked backend conn "+conn); + logger.debug("autocomit is false,but no commit or rollback ,so mycat rollbacked backend conn "+conn); conn.release(); } @@ -74,6 +74,11 @@ public void rowEofResponse(byte[] eof, BackendConnection conn) { } + @Override + public void writeQueueAvailable() { + + } + @Override public void connectionClose(BackendConnection conn, String reason) { diff --git a/src/main/java/io/mycat/backend/mysql/nio/handler/SecondHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/SecondHandler.java new file mode 100644 index 000000000..4aa7b8419 --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/nio/handler/SecondHandler.java @@ -0,0 +1,13 @@ +package io.mycat.backend.mysql.nio.handler; + +import java.util.List; + +/** + * 查询分解后的第二部处理 + * @author huangyiming + * + */ +public interface SecondHandler { + + public void doExecute(List params); +} diff --git a/src/main/java/io/mycat/backend/mysql/nio/handler/SecondQueryHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/SecondQueryHandler.java new file mode 100644 index 000000000..1dd701fd7 --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/nio/handler/SecondQueryHandler.java @@ -0,0 +1,19 @@ +package io.mycat.backend.mysql.nio.handler; + +import java.util.List; + +public class SecondQueryHandler implements SecondHandler { + + public MiddlerResultHandler middlerResultHandler; + public SecondQueryHandler(MiddlerResultHandler middlerResultHandler){ + this.middlerResultHandler = middlerResultHandler; + } + + @Override + public void doExecute(List params) { + // TODO Auto-generated method stub + + } + + +} diff --git a/src/main/java/io/mycat/server/executors/SimpleLogHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/SimpleLogHandler.java similarity index 80% rename from src/main/java/io/mycat/server/executors/SimpleLogHandler.java rename to src/main/java/io/mycat/backend/mysql/nio/handler/SimpleLogHandler.java index ce9dd5d72..5b46714af 100644 --- a/src/main/java/io/mycat/server/executors/SimpleLogHandler.java +++ b/src/main/java/io/mycat/backend/mysql/nio/handler/SimpleLogHandler.java @@ -21,28 +21,27 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.executors; - -import io.mycat.backend.BackendConnection; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +package io.mycat.backend.mysql.nio.handler; import java.util.List; -public class SimpleLogHandler implements ResponseHandler { +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.backend.BackendConnection; + +public class SimpleLogHandler implements ResponseHandler{ private static final Logger LOGGER = LoggerFactory .getLogger(SimpleLogHandler.class); - @Override public void connectionError(Throwable e, BackendConnection conn) { - LOGGER.warn(conn + " connectionError " + e); - + LOGGER.warn(conn+" connectionError "+e); + } @Override public void connectionAcquired(BackendConnection conn) { - LOGGER.info("connectionAcquired " + conn); - + LOGGER.info("connectionAcquired "+conn); + } @Override @@ -52,32 +51,39 @@ public void errorResponse(byte[] err, BackendConnection conn) { @Override public void okResponse(byte[] ok, BackendConnection conn) { - LOGGER.info("okResponse: " + conn); - + LOGGER.info("okResponse: " + conn ); + } @Override public void fieldEofResponse(byte[] header, List fields, byte[] eof, BackendConnection conn) { - LOGGER.info("fieldEofResponse: " + conn); - + LOGGER.info("fieldEofResponse: " + conn ); + } @Override public void rowResponse(byte[] row, BackendConnection conn) { - LOGGER.info("rowResponse: " + conn); - + LOGGER.info("rowResponse: " + conn ); + } @Override public void rowEofResponse(byte[] eof, BackendConnection conn) { - LOGGER.info("rowEofResponse: " + conn); + LOGGER.info("rowEofResponse: " + conn ); + + } + @Override + public void writeQueueAvailable() { + + } @Override public void connectionClose(BackendConnection conn, String reason) { - + + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/backend/mysql/nio/handler/SingleNodeHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/SingleNodeHandler.java new file mode 100644 index 000000000..fa105fc3e --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/nio/handler/SingleNodeHandler.java @@ -0,0 +1,531 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.backend.mysql.nio.handler; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +import com.google.common.base.Strings; + +import io.mycat.MycatServer; +import io.mycat.backend.BackendConnection; +import io.mycat.backend.datasource.PhysicalDBNode; +import io.mycat.backend.mysql.LoadDataUtil; +import io.mycat.config.ErrorCode; +import io.mycat.config.MycatConfig; +import io.mycat.config.model.SchemaConfig; +import io.mycat.net.mysql.BinaryRowDataPacket; +import io.mycat.net.mysql.ErrorPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.OkPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.route.RouteResultset; +import io.mycat.route.RouteResultsetNode; +import io.mycat.server.NonBlockingSession; +import io.mycat.server.ServerConnection; +import io.mycat.server.parser.ServerParse; +import io.mycat.server.parser.ServerParseShow; +import io.mycat.server.response.ShowFullTables; +import io.mycat.server.response.ShowTables; +import io.mycat.statistic.stat.QueryResult; +import io.mycat.statistic.stat.QueryResultDispatcher; +import io.mycat.util.ResultSetUtil; +import io.mycat.util.StringUtil; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; +/** + * @author mycat + */ +public class SingleNodeHandler implements ResponseHandler, Terminatable, LoadDataResponseHandler { + + private static final Logger LOGGER = LoggerFactory.getLogger(SingleNodeHandler.class); + + private final RouteResultsetNode node; + private final RouteResultset rrs; + private final NonBlockingSession session; + + // only one thread access at one time no need lock + private volatile byte packetId; + private volatile ByteBuffer buffer; + private volatile boolean isRunning; + private Runnable terminateCallBack; + private long startTime; + private long netInBytes; + private long netOutBytes; + private long selectRows; + private long affectedRows; + + private boolean prepared; + private int fieldCount; + private List fieldPackets = new ArrayList(); + + private volatile boolean isDefaultNodeShowTable; + private volatile boolean isDefaultNodeShowFullTable; + private Set shardingTablesSet; + private byte[] header = null; + private List fields = null; + public SingleNodeHandler(RouteResultset rrs, NonBlockingSession session) { + this.rrs = rrs; + this.node = rrs.getNodes()[0]; + + if (node == null) { + throw new IllegalArgumentException("routeNode is null!"); + } + + if (session == null) { + throw new IllegalArgumentException("session is null!"); + } + + this.session = session; + ServerConnection source = session.getSource(); + String schema = source.getSchema(); + if (schema != null && ServerParse.SHOW == rrs.getSqlType()) { + SchemaConfig schemaConfig = MycatServer.getInstance().getConfig().getSchemas().get(schema); + int type = ServerParseShow.tableCheck(rrs.getStatement(), 0); + isDefaultNodeShowTable = (ServerParseShow.TABLES == type && !Strings.isNullOrEmpty(schemaConfig.getDataNode())); + isDefaultNodeShowFullTable = (ServerParseShow.FULLTABLES == type && !Strings.isNullOrEmpty(schemaConfig.getDataNode())); + if (isDefaultNodeShowTable) { + shardingTablesSet = ShowTables.getTableSet(source, rrs.getStatement()); + + } else if (isDefaultNodeShowFullTable) { + shardingTablesSet = ShowFullTables.getTableSet(source, rrs.getStatement()); + } + } + + if ( rrs != null && rrs.getStatement() != null) { + netInBytes += rrs.getStatement().getBytes().length; + } + + } + + @Override + public void terminate(Runnable callback) { + boolean zeroReached = false; + + if (isRunning) { + terminateCallBack = callback; + } else { + zeroReached = true; + } + + if (zeroReached) { + callback.run(); + } + } + + private void endRunning() { + Runnable callback = null; + if (isRunning) { + isRunning = false; + callback = terminateCallBack; + terminateCallBack = null; + } + + if (callback != null) { + callback.run(); + } + } + + private void recycleResources() { + + ByteBuffer buf = buffer; + if (buf != null) { + session.getSource().recycle(buffer); + buffer = null; + } + } + + public void execute() throws Exception { + startTime=System.currentTimeMillis(); + ServerConnection sc = session.getSource(); + this.isRunning = true; + this.packetId = 0; + final BackendConnection conn = session.getTarget(node); + LOGGER.debug("rrs.getRunOnSlave() " + rrs.getRunOnSlave()); + node.setRunOnSlave(rrs.getRunOnSlave()); // 实现 master/slave注解 + LOGGER.debug("node.getRunOnSlave() " + node.getRunOnSlave()); + + if (session.tryExistsCon(conn, node)) { + _execute(conn); + } else { + // create new connection + + MycatConfig conf = MycatServer.getInstance().getConfig(); + + LOGGER.debug("node.getRunOnSlave() " + node.getRunOnSlave()); + node.setRunOnSlave(rrs.getRunOnSlave()); // 实现 master/slave注解 + LOGGER.debug("node.getRunOnSlave() " + node.getRunOnSlave()); + + PhysicalDBNode dn = conf.getDataNodes().get(node.getName()); + dn.getConnection(dn.getDatabase(), sc.isAutocommit(), node, this, node); + } + + } + + @Override + public void connectionAcquired(final BackendConnection conn) { + session.bindConnection(node, conn); + _execute(conn); + + } + + private void _execute(BackendConnection conn) { + if (session.closed()) { + endRunning(); + session.clearResources(true); + return; + } + conn.setResponseHandler(this); + try { + conn.execute(node, session.getSource(), session.getSource() + .isAutocommit()); + } catch (Exception e1) { + executeException(conn, e1); + return; + } + } + + private void executeException(BackendConnection c, Exception e) { + ErrorPacket err = new ErrorPacket(); + err.packetId = ++packetId; + err.errno = ErrorCode.ERR_FOUND_EXCEPION; + err.message = StringUtil.encode(e.toString(), session.getSource().getCharset()); + + this.backConnectionErr(err, c); + } + + @Override + public void connectionError(Throwable e, BackendConnection conn) { + + endRunning(); + ErrorPacket err = new ErrorPacket(); + err.packetId = ++packetId; + err.errno = ErrorCode.ER_NEW_ABORTING_CONNECTION; + err.message = StringUtil.encode(e.getMessage(), session.getSource().getCharset()); + + ServerConnection source = session.getSource(); + source.write(err.write(allocBuffer(), source, true)); + } + + @Override + public void errorResponse(byte[] data, BackendConnection conn) { + ErrorPacket err = new ErrorPacket(); + err.read(data); + err.packetId = ++packetId; + backConnectionErr(err, conn); + } + + private void backConnectionErr(ErrorPacket errPkg, BackendConnection conn) { + endRunning(); + + ServerConnection source = session.getSource(); + String errUser = source.getUser(); + String errHost = source.getHost(); + int errPort = source.getLocalPort(); + + String errmgs = " errno:" + errPkg.errno + " " + new String(errPkg.message); + LOGGER.warn("execute sql err :" + errmgs + " con:" + conn + + " frontend host:" + errHost + "/" + errPort + "/" + errUser); + + session.releaseConnectionIfSafe(conn, LOGGER.isDebugEnabled(), false); + + source.setTxInterrupt(errmgs); + + /** + * TODO: 修复全版本BUG + * + * BUG复现: + * 1、MysqlClient: SELECT 9223372036854775807 + 1; + * 2、MyCatServer: ERROR 1690 (22003): BIGINT value is out of range in '(9223372036854775807 + 1)' + * 3、MysqlClient: ERROR 2013 (HY000): Lost connection to MySQL server during query + * + * Fixed后 + * 1、MysqlClient: SELECT 9223372036854775807 + 1; + * 2、MyCatServer: ERROR 1690 (22003): BIGINT value is out of range in '(9223372036854775807 + 1)' + * 3、MysqlClient: ERROR 1690 (22003): BIGINT value is out of range in '(9223372036854775807 + 1)' + * + */ + // 由于 pakcetId != 1 造成的问题 + errPkg.packetId = 1; + errPkg.write(source); + + recycleResources(); + } + + + /** + * insert/update/delete + * + * okResponse():读取data字节数组,组成一个OKPacket,并调用ok.write(source)将结果写入前端连接FrontendConnection的写缓冲队列writeQueue中, + * 真正发送给应用是由对应的NIOSocketWR从写队列中读取ByteBuffer并返回的 + */ + @Override + public void okResponse(byte[] data, BackendConnection conn) { + // + this.netOutBytes += data.length; + + boolean executeResponse = conn.syncAndExcute(); + if (executeResponse) { + ServerConnection source = session.getSource(); + OkPacket ok = new OkPacket(); + ok.read(data); + boolean isCanClose2Client =(!rrs.isCallStatement()) ||(rrs.isCallStatement() &&!rrs.getProcedure().isResultSimpleValue()); + if (rrs.isLoadData()) { + byte lastPackId = source.getLoadDataInfileHandler().getLastPackId(); + ok.packetId = ++lastPackId;// OK_PACKET + source.getLoadDataInfileHandler().clear(); + + } else if (isCanClose2Client) { + ok.packetId = ++packetId;// OK_PACKET + } + + + if (isCanClose2Client) { + session.releaseConnectionIfSafe(conn, LOGGER.isDebugEnabled(), false); + endRunning(); + } + ok.serverStatus = source.isAutocommit() ? 2 : 1; + recycleResources(); + + if (isCanClose2Client) { + source.setLastInsertId(ok.insertId); + ok.write(source); + } + + this.affectedRows = ok.affectedRows; + + source.setExecuteSql(null); + // add by lian + // 解决sql统计中写操作永远为0 + QueryResult queryResult = new QueryResult(session.getSource().getUser(), + rrs.getSqlType(), rrs.getStatement(), affectedRows, netInBytes, netOutBytes, startTime, System.currentTimeMillis(),0); + QueryResultDispatcher.dispatchQuery( queryResult ); + } + } + + + /** + * select + * + * 行结束标志返回时触发,将EOF标志写入缓冲区,最后调用source.write(buffer)将缓冲区放入前端连接的写缓冲队列中,等待NIOSocketWR将其发送给应用 + */ + @Override + public void rowEofResponse(byte[] eof, BackendConnection conn) { + + this.netOutBytes += eof.length; + + ServerConnection source = session.getSource(); + conn.recordSql(source.getHost(), source.getSchema(), node.getStatement()); + // 判断是调用存储过程的话不能在这里释放链接 + if (!rrs.isCallStatement()||(rrs.isCallStatement()&&rrs.getProcedure().isResultSimpleValue())) + { + session.releaseConnectionIfSafe(conn, LOGGER.isDebugEnabled(), false); + endRunning(); + } + + eof[3] = ++packetId; + buffer = source.writeToBuffer(eof, allocBuffer()); + int resultSize = source.getWriteQueue().size()*MycatServer.getInstance().getConfig().getSystem().getBufferPoolPageSize(); + resultSize=resultSize+buffer.position(); + MiddlerResultHandler middlerResultHandler = session.getMiddlerResultHandler(); + + if(middlerResultHandler !=null ){ + middlerResultHandler.secondEexcute(); + } else{ + source.write(buffer); + } + source.setExecuteSql(null); + //TODO: add by zhuam + //查询结果派发 + QueryResult queryResult = new QueryResult(session.getSource().getUser(), + rrs.getSqlType(), rrs.getStatement(), affectedRows, netInBytes, netOutBytes, startTime, System.currentTimeMillis(),resultSize); + QueryResultDispatcher.dispatchQuery( queryResult ); + + } + + /** + * lazy create ByteBuffer only when needed + * + * @return + */ + private ByteBuffer allocBuffer() { + if (buffer == null) { + buffer = session.getSource().allocate(); + } + return buffer; + } + + /** + * select + * + * 元数据返回时触发,将header和元数据内容依次写入缓冲区中 + */ + @Override + public void fieldEofResponse(byte[] header, List fields, + byte[] eof, BackendConnection conn) { + this.header = header; + this.fields = fields; + MiddlerResultHandler middlerResultHandler = session.getMiddlerResultHandler(); + if(null !=middlerResultHandler ){ + return; + } + this.netOutBytes += header.length; + for (int i = 0, len = fields.size(); i < len; ++i) { + byte[] field = fields.get(i); + this.netOutBytes += field.length; + } + + header[3] = ++packetId; + ServerConnection source = session.getSource(); + buffer = source.writeToBuffer(header, allocBuffer()); + for (int i = 0, len = fields.size(); i < len; ++i) { + byte[] field = fields.get(i); + field[3] = ++packetId; + + // 保存field信息 + FieldPacket fieldPk = new FieldPacket(); + fieldPk.read(field); + fieldPackets.add(fieldPk); + + buffer = source.writeToBuffer(field, buffer); + } + + fieldCount = fieldPackets.size(); + + eof[3] = ++packetId; + buffer = source.writeToBuffer(eof, buffer); + + if (isDefaultNodeShowTable) { + + for (String name : shardingTablesSet) { + RowDataPacket row = new RowDataPacket(1); + row.add(StringUtil.encode(name.toLowerCase(), source.getCharset())); + row.packetId = ++packetId; + buffer = row.write(buffer, source, true); + } + + } else if (isDefaultNodeShowFullTable) { + + for (String name : shardingTablesSet) { + RowDataPacket row = new RowDataPacket(1); + row.add(StringUtil.encode(name.toLowerCase(), source.getCharset())); + row.add(StringUtil.encode("BASE TABLE", source.getCharset())); + row.packetId = ++packetId; + buffer = row.write(buffer, source, true); + } + } + } + + /** + * select + * + * 行数据返回时触发,将行数据写入缓冲区中 + */ + @Override + public void rowResponse(byte[] row, BackendConnection conn) { + + this.netOutBytes += row.length; + this.selectRows++; + + if (isDefaultNodeShowTable || isDefaultNodeShowFullTable) { + RowDataPacket rowDataPacket = new RowDataPacket(1); + rowDataPacket.read(row); + String table = StringUtil.decode(rowDataPacket.fieldValues.get(0), session.getSource().getCharset()); + if (shardingTablesSet.contains(table.toUpperCase())) { + return; + } + } + row[3] = ++packetId; + + if ( prepared ) { + RowDataPacket rowDataPk = new RowDataPacket(fieldCount); + rowDataPk.read(row); + BinaryRowDataPacket binRowDataPk = new BinaryRowDataPacket(); + binRowDataPk.read(fieldPackets, rowDataPk); + binRowDataPk.packetId = rowDataPk.packetId; +// binRowDataPk.write(session.getSource()); + /* + * [fix bug] : 这里不能直接将包写到前端连接, + * 因为在fieldEofResponse()方法结束后buffer还没写出, + * 所以这里应该将包数据顺序写入buffer(如果buffer满了就写出),然后再将buffer写出 + */ + buffer = binRowDataPk.write(buffer, session.getSource(), true); + } else { + + MiddlerResultHandler middlerResultHandler = session.getMiddlerResultHandler(); + if(null ==middlerResultHandler ){ + buffer = session.getSource().writeToBuffer(row, allocBuffer()); + }else{ + if(middlerResultHandler instanceof MiddlerQueryResultHandler){ + byte[] rv = ResultSetUtil.getColumnVal(row, fields, 0); + String rowValue = rv==null?"":new String(rv); + middlerResultHandler.add(rowValue); + } + } + + } + + } + + @Override + public void writeQueueAvailable() { + + } + + @Override + public void connectionClose(BackendConnection conn, String reason) { + ErrorPacket err = new ErrorPacket(); + err.packetId = ++packetId; + err.errno = ErrorCode.ER_ERROR_ON_CLOSE; + err.message = StringUtil.encode(reason, session.getSource() + .getCharset()); + this.backConnectionErr(err, conn); + + } + + public void clearResources() { + + } + + @Override + public void requestDataResponse(byte[] data, BackendConnection conn) { + LoadDataUtil.requestFileDataResponse(data, conn); + } + + public boolean isPrepared() { + return prepared; + } + + public void setPrepared(boolean prepared) { + this.prepared = prepared; + } + + @Override + public String toString() { + return "SingleNodeHandler [node=" + node + ", packetId=" + packetId + "]"; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/executors/Terminatable.java b/src/main/java/io/mycat/backend/mysql/nio/handler/Terminatable.java similarity index 96% rename from src/main/java/io/mycat/server/executors/Terminatable.java rename to src/main/java/io/mycat/backend/mysql/nio/handler/Terminatable.java index 8085885ce..87188ce46 100644 --- a/src/main/java/io/mycat/server/executors/Terminatable.java +++ b/src/main/java/io/mycat/backend/mysql/nio/handler/Terminatable.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.executors; +package io.mycat.backend.mysql.nio.handler; /** * @author mycat diff --git a/src/main/java/io/mycat/backend/mysql/nio/handler/UnLockTablesHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/UnLockTablesHandler.java new file mode 100644 index 000000000..24cbfbc66 --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/nio/handler/UnLockTablesHandler.java @@ -0,0 +1,138 @@ +package io.mycat.backend.mysql.nio.handler; + +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.backend.BackendConnection; +import io.mycat.net.mysql.OkPacket; +import io.mycat.route.RouteResultsetNode; +import io.mycat.server.NonBlockingSession; +import io.mycat.server.parser.ServerParse; + +/** + * unlock tables 语句处理器 + * @author songdabin + * + */ +public class UnLockTablesHandler extends MultiNodeHandler implements ResponseHandler { + + private static final Logger LOGGER = LoggerFactory.getLogger(UnLockTablesHandler.class); + + private final NonBlockingSession session; + private final boolean autocommit; + private final String srcStatement; + + public UnLockTablesHandler(NonBlockingSession session, boolean autocommit, String sql) { + super(session); + this.session = session; + this.autocommit = autocommit; + this.srcStatement = sql; + } + + public void execute() { + Map lockedConns = session.getTargetMap(); + Set dnSet = lockedConns.keySet(); + this.reset(lockedConns.size()); + // 客户端直接发送unlock tables命令,由于之前未发送lock tables语句,无法获取后端绑定的连接,此时直接返回OK包 + if (lockedConns.size() == 0) { + LOGGER.warn("find no locked backend connection!"+session.getSource()); + OkPacket ok = new OkPacket(); + ok.packetId = ++ packetId; + ok.packetLength = 7; // unlock table 命令返回MySQL协议包长度为7 + ok.serverStatus = session.getSource().isAutocommit() ? 2:1; + ok.write(session.getSource()); + return; + } + for (RouteResultsetNode dataNode : dnSet) { + RouteResultsetNode node = new RouteResultsetNode(dataNode.getName(), ServerParse.UNLOCK, srcStatement); + BackendConnection conn = lockedConns.get(dataNode); + if (clearIfSessionClosed(session)) { + return; + } + conn.setResponseHandler(this); + try { + conn.execute(node, session.getSource(), autocommit); + } catch (Exception e) { + connectionError(e, conn); + } + } + } + + @Override + public void connectionError(Throwable e, BackendConnection conn) { + super.connectionError(e, conn); + } + + @Override + public void connectionAcquired(BackendConnection conn) { + LOGGER.error("unexpected invocation: connectionAcquired from unlock tables"); + } + + @Override + public void errorResponse(byte[] err, BackendConnection conn) { + super.errorResponse(err, conn); + } + + @Override + public void okResponse(byte[] data, BackendConnection conn) { + boolean executeResponse = conn.syncAndExcute(); + if (executeResponse) { + boolean isEndPack = decrementCountBy(1); + session.releaseConnection(conn); + if (isEndPack) { + if (this.isFail() || session.closed()) { + tryErrorFinished(true); + return; + } + OkPacket ok = new OkPacket(); + ok.read(data); + lock.lock(); + try { + ok.packetId = ++ packetId; + ok.serverStatus = session.getSource().isAutocommit() ? 2:1; + } finally { + lock.unlock(); + } + ok.write(session.getSource()); + } + } + } + + @Override + public void fieldEofResponse(byte[] header, List fields, byte[] eof, BackendConnection conn) { + LOGGER.error(new StringBuilder().append("unexpected packet for ") + .append(conn).append(" bound by ").append(session.getSource()) + .append(": field's eof").toString()); + } + + @Override + public void rowResponse(byte[] row, BackendConnection conn) { + LOGGER.warn(new StringBuilder().append("unexpected packet for ") + .append(conn).append(" bound by ").append(session.getSource()) + .append(": row data packet").toString()); + } + + @Override + public void rowEofResponse(byte[] eof, BackendConnection conn) { + LOGGER.error(new StringBuilder().append("unexpected packet for ") + .append(conn).append(" bound by ").append(session.getSource()) + .append(": row's eof").toString()); + } + + @Override + public void writeQueueAvailable() { + // TODO Auto-generated method stub + + } + + @Override + public void connectionClose(BackendConnection conn, String reason) { + // TODO Auto-generated method stub + + } + +} diff --git a/src/main/java/io/mycat/backend/mysql/xa/CoordinatorLogEntry.java b/src/main/java/io/mycat/backend/mysql/xa/CoordinatorLogEntry.java new file mode 100644 index 000000000..62c76c7a8 --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/xa/CoordinatorLogEntry.java @@ -0,0 +1,41 @@ +package io.mycat.backend.mysql.xa; + +import java.io.Serializable; + +/** + * Created by zhangchao on 2016/10/17. + */ +public class CoordinatorLogEntry implements Serializable { + + private static final long serialVersionUID = -919666492191340531L; + + public final String id; + +// public final boolean wasCommitted; + + public final ParticipantLogEntry[] participants; + + + public CoordinatorLogEntry(String coordinatorId, + ParticipantLogEntry[] participantDetails) { + this(coordinatorId, false, participantDetails, null); + } + + public CoordinatorLogEntry(String coordinatorId, boolean wasCommitted, + ParticipantLogEntry[] participants) { + this.id = coordinatorId; +// this.wasCommitted = wasCommitted; + this.participants = participants; + } + + public CoordinatorLogEntry(String coordinatorId, boolean wasCommitted, + ParticipantLogEntry[] participants, String superiorCoordinatorId) { + this.id = coordinatorId; +// this.wasCommitted = wasCommitted; + this.participants = participants; + } + + + + +} diff --git a/src/main/java/io/mycat/backend/mysql/xa/Deserializer.java b/src/main/java/io/mycat/backend/mysql/xa/Deserializer.java new file mode 100644 index 000000000..812f511f9 --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/xa/Deserializer.java @@ -0,0 +1,108 @@ +package io.mycat.backend.mysql.xa; + +import io.mycat.backend.mysql.xa.recovery.DeserialisationException; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Created by zhangchao on 2016/10/17. + */ +public class Deserializer { + + private static final String JSON_ARRAY_END = "]"; + + private static final String JSON_ARRAY_START = "["; + + private static final String OBJECT_START= "{"; + + private static final String OBJECT_END= "}"; + + List tokenize(String content) { + List result = new ArrayList(); + int endObject = content.indexOf(OBJECT_END); + while(endObject >0){ + String object = content.substring(0,endObject+1); + result.add(object); + content = content.substring(endObject+1); + endObject = content.indexOf(OBJECT_END); + } + return result; + } + + String extractArrayPart(String content) { + if(!content.contains(JSON_ARRAY_START) && !content.contains(JSON_ARRAY_END)) { + //no array... + return ""; + } + //else + int start=content.indexOf(JSON_ARRAY_START); + int end=content.indexOf(JSON_ARRAY_END); + + return content.substring(start+1, end); + } + public CoordinatorLogEntry fromJSON(String coordinatorLogEntryStr) throws DeserialisationException { + try { + String jsonContent = coordinatorLogEntryStr.trim(); + validateJSONContent(jsonContent); + Map header = extractHeader(jsonContent); + String coordinatorId = header.get("id"); + String arrayContent = extractArrayPart(jsonContent); + List elements = tokenize(arrayContent); + + ParticipantLogEntry[] participantLogEntries = new ParticipantLogEntry[elements.size()]; + + for (int i = 0; i < participantLogEntries.length; i++) { + participantLogEntries[i]=recreateParticipantLogEntry(coordinatorId,elements.get(i)); + } + + + CoordinatorLogEntry actual = new CoordinatorLogEntry(header.get("id"),Boolean.valueOf(header.get("wasCommitted")), participantLogEntries,header.get("superiorCoordinatorId")); + return actual; + } catch (Exception unexpectedEOF) { + throw new DeserialisationException(coordinatorLogEntryStr); + } + } + + private void validateJSONContent(String coordinatorLogEntryStr) + throws DeserialisationException { + if (!coordinatorLogEntryStr.startsWith(OBJECT_START)){ + throw new DeserialisationException(coordinatorLogEntryStr); + } + if (!coordinatorLogEntryStr.endsWith(OBJECT_END)){ + throw new DeserialisationException(coordinatorLogEntryStr); + } + } + + private Map extractHeader(String coordinatorLogEntryStr) { + Map header = new HashMap(2); + String[] attributes = coordinatorLogEntryStr.split(","); + for (String attribute : attributes) { + String[] pair = attribute.split(":"); + header.put(pair[0].replaceAll("\\{", "").replace("\"", ""), pair[1].replace("\"", "")); + } + return header; + } + + ParticipantLogEntry recreateParticipantLogEntry(String coordinatorId, + String participantLogEntry) { + participantLogEntry = participantLogEntry.replaceAll("\\{", "").replaceAll("\\}", ""); + + Map content = new HashMap(5); + String[] attributes = participantLogEntry.split(","); + for (String attribute : attributes) { + String[] pair = attribute.split(":"); + if(pair.length>1){ + content.put(pair[0].replace("\"", ""), pair[1].replace("\"", "")); + } + + } + + ParticipantLogEntry actual = new ParticipantLogEntry(coordinatorId, + content.get("uri"), Long.valueOf(content.get("expires")), content.get("resourceName"), Integer.parseInt(content.get("state"))); + return actual; + } + +} diff --git a/src/main/java/io/mycat/backend/mysql/xa/LogFileLock.java b/src/main/java/io/mycat/backend/mysql/xa/LogFileLock.java new file mode 100644 index 000000000..c625f4a59 --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/xa/LogFileLock.java @@ -0,0 +1,77 @@ +package io.mycat.backend.mysql.xa; + +import io.mycat.backend.mysql.xa.recovery.LogException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.channels.FileLock; +import java.nio.channels.OverlappingFileLockException; + +/** + * Created by zhangchao on 2016/10/17. + */ +public class LogFileLock { + public static final Logger logger = LoggerFactory + .getLogger(LogFileLock.class); + private static final String FILE_SEPARATOR = String.valueOf(File.separatorChar); + private File lockfileToPreventDoubleStartup_; + private FileOutputStream lockfilestream_ = null; + private FileLock lock_ = null; + + private String dir; + + private String fileName; + + public LogFileLock(String dir, String fileName) { + if(!dir.endsWith(FILE_SEPARATOR)) { + dir += FILE_SEPARATOR; + } + this.dir = dir; + this.fileName = fileName; + } + + public void acquireLock() throws LogException { + try { + File parent = new File(dir); + if(!parent.exists()) { + parent.mkdir(); + } + lockfileToPreventDoubleStartup_ = new File(dir, fileName + ".lck"); + lockfilestream_ = new FileOutputStream(lockfileToPreventDoubleStartup_); + lock_ = lockfilestream_.getChannel().tryLock(); + lockfileToPreventDoubleStartup_.deleteOnExit(); + } catch (OverlappingFileLockException failedToGetLock) { + // happens on windows + lock_ = null; + } catch (IOException failedToGetLock) { + // happens on windows + lock_ = null; + } + if (lock_ == null) { + logger.error("ERROR: the specified log seems to be in use already: " + fileName + " in " + dir + ". Make sure that no other instance is running, or kill any pending process if needed."); + throw new LogException("Log already in use? " + fileName + " in "+ dir); + } + } + + public void releaseLock() { + try { + if (lock_ != null) { + lock_.release(); + } + if (lockfilestream_ != null) + lockfilestream_.close(); + } catch (IOException e) { + logger.warn("Error releasing file lock: " + e.getMessage()); + } finally { + lock_ = null; + } + + if (lockfileToPreventDoubleStartup_ != null) { + lockfileToPreventDoubleStartup_.delete(); + lockfileToPreventDoubleStartup_ = null; + } + } +} diff --git a/src/main/java/io/mycat/backend/mysql/xa/ParticipantLogEntry.java b/src/main/java/io/mycat/backend/mysql/xa/ParticipantLogEntry.java new file mode 100644 index 000000000..54bd77bd5 --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/xa/ParticipantLogEntry.java @@ -0,0 +1,76 @@ +package io.mycat.backend.mysql.xa; + +import java.io.Serializable; + +/** + * Created by zhangchao on 2016/10/17. + */ +public class ParticipantLogEntry implements Serializable { + + private static final long serialVersionUID = 1728296701394899871L; + + /** + * The ID of the global transaction as known by the transaction core. + */ + + public String coordinatorId; + + /** + * Identifies the participant within the global transaction. + */ + + public String uri; + + /** + * When does this participant expire (expressed in millis since Jan 1, 1970)? + */ + + public long expires; + + /** + * Best-known state of the participant. + */ + public int txState; + + /** + * For diagnostic purposes, null if not relevant. + */ + public String resourceName; + + public ParticipantLogEntry(String coordinatorId, String uri, + long expires, String resourceName, int txState) { + this.coordinatorId = coordinatorId; + this.uri = uri; + this.expires = expires; + this.resourceName = resourceName; + this.txState = txState; + } + + + + @Override + public boolean equals(Object other) { + boolean ret = false; + if (other instanceof ParticipantLogEntry) { + ParticipantLogEntry o = (ParticipantLogEntry) other; + if (o.coordinatorId.equals(coordinatorId) && o.uri.equals(uri)) ret = true; + } + return ret; + } + + @Override + public int hashCode() { + return coordinatorId.hashCode(); + } + + + + @Override + public String toString() { + return "ParticipantLogEntry [id=" + coordinatorId + + ", uri=" + uri + ", expires=" + expires + + ", state=" + txState + ", resourceName=" + resourceName + "]"; + } + + +} diff --git a/src/main/java/io/mycat/backend/mysql/xa/Serializer.java b/src/main/java/io/mycat/backend/mysql/xa/Serializer.java new file mode 100644 index 000000000..c16fad701 --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/xa/Serializer.java @@ -0,0 +1,54 @@ +package io.mycat.backend.mysql.xa; + +/** + * Created by zhangchao on 2016/10/17. + */ +public class Serializer { + private static final String PROPERTY_SEPARATOR = ","; + private static final String QUOTE = "\""; + private static final String END_ARRAY = "]"; + private static final String START_ARRAY = "["; + private static final String START_OBJECT = "{"; + private static final String END_OBJECT = "}"; + private static final String LINE_SEPARATOR = System.getProperty("line.separator"); + + public String toJSON(CoordinatorLogEntry coordinatorLogEntry) { + StringBuilder strBuilder = new StringBuilder(600); + strBuilder.append(START_OBJECT); + strBuilder.append(QUOTE).append("id").append(QUOTE).append(":").append(QUOTE).append(coordinatorLogEntry.id).append(QUOTE); + strBuilder.append(PROPERTY_SEPARATOR); + //strBuilder.append(QUOTE).append("wasCommitted").append(QUOTE).append(":").append(coordinatorLogEntry.wasCommitted); + //strBuilder.append(PROPERTY_SEPARATOR); + + String prefix = ""; + if(coordinatorLogEntry.participants.length>0){ + strBuilder.append(QUOTE).append("participants").append(QUOTE); + strBuilder.append(":"); + strBuilder.append(START_ARRAY); + + for(ParticipantLogEntry participantLogEntry :coordinatorLogEntry.participants){ + if(participantLogEntry==null){continue;} + strBuilder.append(prefix); + prefix = PROPERTY_SEPARATOR; + strBuilder.append(START_OBJECT); + strBuilder.append(QUOTE).append("uri").append(QUOTE).append(":").append(QUOTE).append(participantLogEntry.uri).append(QUOTE); + strBuilder.append(PROPERTY_SEPARATOR); + strBuilder.append(QUOTE).append("state").append(QUOTE).append(":").append(QUOTE).append(participantLogEntry.txState).append(QUOTE); + strBuilder.append(PROPERTY_SEPARATOR); + strBuilder.append(QUOTE).append("expires").append(QUOTE).append(":").append(participantLogEntry.expires); + if (participantLogEntry.resourceName!=null) { + strBuilder.append(PROPERTY_SEPARATOR); + strBuilder.append(QUOTE).append("resourceName").append(QUOTE).append(":").append(QUOTE).append(participantLogEntry.resourceName).append(QUOTE); + } + strBuilder.append(END_OBJECT); + } +// for (ParticipantLogEntry participantLogEntry : coordinatorLogEntry.participants) { +// +// } + strBuilder.append(END_ARRAY); + } + strBuilder.append(END_OBJECT); + strBuilder.append(LINE_SEPARATOR); + return strBuilder.toString(); + } +} diff --git a/src/main/java/io/mycat/backend/mysql/xa/TxState.java b/src/main/java/io/mycat/backend/mysql/xa/TxState.java new file mode 100644 index 000000000..fa84e048e --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/xa/TxState.java @@ -0,0 +1,17 @@ +package io.mycat.backend.mysql.xa; + +/** + * Created by zhangchao on 2016/10/13. + */ +public class TxState { + /** XA INIT STATUS **/ + public static final int TX_INITIALIZE_STATE = 0; + /** XA STARTED STATUS **/ + public static final int TX_STARTED_STATE = 1; + /** XA is prepared **/ + public static final int TX_PREPARED_STATE = 2; + /** XA is commited **/ + public static final int TX_COMMITED_STATE = 3; + /** XA is rollbacked **/ + public static final int TX_ROLLBACKED_STATE = 4; +} diff --git a/src/main/java/io/mycat/backend/mysql/xa/VersionedFile.java b/src/main/java/io/mycat/backend/mysql/xa/VersionedFile.java new file mode 100644 index 000000000..efd4414e0 --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/xa/VersionedFile.java @@ -0,0 +1,231 @@ +package io.mycat.backend.mysql.xa; + +import java.io.*; +import java.nio.channels.FileChannel; + +/** + * Created by zhangchao on 2016/10/17. + */ +public class VersionedFile { + + private static final String FILE_SEPARATOR = String.valueOf(File.separatorChar); + private String baseDir; + private String suffix; + private String baseName; + + //state attributes below + + private long version; + private FileInputStream inputStream; + + private RandomAccessFile randomAccessFile; + + + /** + * Creates a new instance based on the given name parameters. + * The actual complete name(s) of the physical file(s) will be based on a version number + * inserted in between, to identify versions. + * + * @param baseDir The base folder. + * @param baseName The base name for of the file path/name. + * @param suffix The suffix to append to the complete file name. + */ + public VersionedFile ( String baseDir , String baseName , String suffix ) + { + + if(!baseDir.endsWith(FILE_SEPARATOR)) { + baseDir += FILE_SEPARATOR; + } + this.baseDir = baseDir; + this.suffix = suffix; + this.baseName = baseName; + resetVersion(); + } + + private void resetVersion() + { + this.version = extractLastValidVersionNumberFromFileNames(); + } + + private long extractLastValidVersionNumberFromFileNames() { + long version = -1; + File cd = new File ( getBaseDir() ); + String[] names = cd.list ( new FilenameFilter() { + public boolean accept ( File dir , String name ) + { + return (name.startsWith ( getBaseName() ) && name + .endsWith ( getSuffix() )); + } + } ); + if ( names!= null ) { + for ( int i = 0; i < names.length; i++ ) { + long sfx = extractVersion ( names[i] ); + if ( version < 0 || sfx < version ) + version = sfx; + } + } + + return version; + } + + private long extractVersion ( String name ) + { + long ret = 0; + int lastpos = name.lastIndexOf ( '.' ); + int startpos = getBaseName().length (); + String suffix = name.substring ( startpos, lastpos ); + try { + + ret = Long.valueOf( suffix ); + } catch ( NumberFormatException e ) { + IllegalArgumentException err = new IllegalArgumentException ( "Error extracting version from file: " + name+" in " + getBaseDir() ); + err.initCause ( e ); + throw err; + } + return ret; + } + + private String getBackupVersionFileName() + { + return getBaseUrl() + (version - 1) + getSuffix(); + } + + public String getCurrentVersionFileName() + { + return getBaseUrl() + version + getSuffix(); + } + + public String getBaseUrl() + { + return baseDir + baseName; + } + + public String getBaseDir() + { + return this.baseDir; + } + + public String getBaseName() + { + return this.baseName; + } + + public String getSuffix() + { + return this.suffix; + } + + /** + * Opens the last valid version for reading. + * + * @return A stream to read the last valid contents + * of the file: either the backup version (if present) + * or the current (and only) version if no backup is found. + * + * @throws IllegalStateException If a newer version was opened for writing. + * @throws FileNotFoundException If no last version was found. + */ + public FileInputStream openLastValidVersionForReading() + throws IllegalStateException, FileNotFoundException + { + if ( randomAccessFile != null ) throw new IllegalStateException ( "Already started writing." ); + inputStream = new FileInputStream ( getCurrentVersionFileName() ); + return inputStream; + } + + /** + * Opens a new version for writing to. Note that + * this new version is tentative and cannot be read + * by {@link #openLastValidVersionForReading()} until + * {@link #discardBackupVersion()} is called. + * + * @return A stream for writing to. + * @throws IllegalStateException If called more than once + * without a close in between. + * @throws IOException If the file cannot be opened for writing. + */ + public FileOutputStream openNewVersionForWriting() throws IOException + { + openNewVersionForNioWriting(); + return new FileOutputStream(randomAccessFile.getFD()); + } + + /** + * Opens a new version for writing to. Note that + * this new version is tentative and cannot be read + * by {@link #openLastValidVersionForReading()} until + * {@link #discardBackupVersion()} is called. + * + * @return A file for writing to. + * @throws IOException + * + * @throws IllegalStateException If called more than once + * without a close in between. + * @throws FileNotFoundException If the file cannot be opened for writing. + * @throws IOException + */ + public FileChannel openNewVersionForNioWriting() throws FileNotFoundException + { + if ( randomAccessFile != null ) throw new IllegalStateException ( "Already writing a new version." ); + //version++; + randomAccessFile = new RandomAccessFile(getCurrentVersionFileName(), "rw"); + return randomAccessFile.getChannel(); + } + /** + * Discards the backup version (if any). + * After calling this method, the newer version + * produced after calling {@link #openNewVersionForWriting()} + * becomes valid for reading next time when + * {@link #openLastValidVersionForReading()} is called. + * + * Note: it is the caller's responsibility to make sure that + * all new data has been flushed to disk before calling this method! + * + * @throws IllegalStateException If {@link #openNewVersionForWriting()} has not been called yet. + * @throws IOException If the previous version exists but could no be deleted. + */ + public void discardBackupVersion() throws IllegalStateException, IOException + { + if ( randomAccessFile == null ) throw new IllegalStateException ( "No new version yet!" ); + String fileName = getBackupVersionFileName(); + + File temp = new File ( fileName ); + if ( temp.exists() && !temp.delete() ) throw new IOException ( "Failed to delete backup version: " + fileName ); + + } + + /** + * Closes any open resources and resets the file for reading again. + * @throws IOException If the output stream could not be closed. + */ + + public void close() throws IOException + { + resetVersion(); + if ( inputStream != null ) { + try { + inputStream.close(); + } catch (IOException e) { + //don't care and won't happen: closing an input stream + //does nothing says the JDK javadoc! + } finally { + inputStream = null; + } + } + if ( randomAccessFile != null ) { + try { + if ( randomAccessFile.getFD().valid() ) randomAccessFile.close(); + } finally { + randomAccessFile = null; + } + } + } + + public long getSize() + { + long res = -1; + File f = new File ( getCurrentVersionFileName() ); + res = f.length(); + return res; + } +} diff --git a/src/main/java/io/mycat/backend/mysql/xa/XARollbackCallback.java b/src/main/java/io/mycat/backend/mysql/xa/XARollbackCallback.java new file mode 100644 index 000000000..fcca9d7c7 --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/xa/XARollbackCallback.java @@ -0,0 +1,23 @@ +package io.mycat.backend.mysql.xa; + +import io.mycat.sqlengine.SQLQueryResult; +import io.mycat.sqlengine.SQLQueryResultListener; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Map; + +/** + * Created by zhangchao on 2016/10/18. + */ +public class XARollbackCallback implements SQLQueryResultListener>> { + + private static final Logger LOGGER = LoggerFactory.getLogger(XARollbackCallback.class); + + public void onResult(SQLQueryResult> result) { + + LOGGER.debug("[CALLBACK][XA ROLLBACK] when Mycat start"); + + + } +} diff --git a/src/main/java/io/mycat/backend/mysql/xa/recovery/DeserialisationException.java b/src/main/java/io/mycat/backend/mysql/xa/recovery/DeserialisationException.java new file mode 100644 index 000000000..d0c614d80 --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/xa/recovery/DeserialisationException.java @@ -0,0 +1,12 @@ +package io.mycat.backend.mysql.xa.recovery; + +/** + * Created by zhangchao on 2016/10/17. + */ +public class DeserialisationException extends Exception{ + private static final long serialVersionUID = -3835526236269555460L; + + public DeserialisationException(String content) { + super(content); + } +} diff --git a/src/main/java/io/mycat/backend/mysql/xa/recovery/LogException.java b/src/main/java/io/mycat/backend/mysql/xa/recovery/LogException.java new file mode 100644 index 000000000..5feb8355c --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/xa/recovery/LogException.java @@ -0,0 +1,20 @@ +package io.mycat.backend.mysql.xa.recovery; + +/** + * Created by zhangchao on 2016/10/13. + */ +public class LogException extends Exception{ + private static final long serialVersionUID = 3259337218182873867L; + + public LogException() { + super(); + } + + public LogException(String message) { + super(message); + } + + public LogException(Throwable cause) { + super(cause); + } +} diff --git a/src/main/java/io/mycat/backend/mysql/xa/recovery/LogReadException.java b/src/main/java/io/mycat/backend/mysql/xa/recovery/LogReadException.java new file mode 100644 index 000000000..28f6f2b42 --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/xa/recovery/LogReadException.java @@ -0,0 +1,22 @@ +package io.mycat.backend.mysql.xa.recovery; + +/** + * Created by zhangchao on 2016/10/17. + */ +public class LogReadException extends LogException{ + + private static final long serialVersionUID = -4835268355879075429L; + + public LogReadException() { + super(); + } + + public LogReadException(Throwable cause) { + super(cause); + } + + public LogReadException(String message) { + super(message); + } + +} diff --git a/src/main/java/io/mycat/backend/mysql/xa/recovery/LogWriteException.java b/src/main/java/io/mycat/backend/mysql/xa/recovery/LogWriteException.java new file mode 100644 index 000000000..1c9d284d4 --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/xa/recovery/LogWriteException.java @@ -0,0 +1,17 @@ +package io.mycat.backend.mysql.xa.recovery; + +/** + * Created by zhangchao on 2016/10/17. + */ +public class LogWriteException extends LogException{ + + private static final long serialVersionUID = 5648208124041649641L; + + public LogWriteException() { + super(); + } + public LogWriteException(Throwable cause) { + super(cause); + } + +} diff --git a/src/main/java/io/mycat/backend/mysql/xa/recovery/Repository.java b/src/main/java/io/mycat/backend/mysql/xa/recovery/Repository.java new file mode 100644 index 000000000..d6a50ff9e --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/xa/recovery/Repository.java @@ -0,0 +1,26 @@ +package io.mycat.backend.mysql.xa.recovery; + +import io.mycat.backend.mysql.xa.CoordinatorLogEntry; + +import java.util.Collection; + +/** + * Created by zhangchao on 2016/10/13. + */ +public interface Repository { + + void init() ; + + void put(String id, CoordinatorLogEntry coordinatorLogEntry); + + CoordinatorLogEntry get(String coordinatorId); + + Collection findAllCommittingCoordinatorLogEntries() ; + + Collection getAllCoordinatorLogEntries() ; + + void writeCheckpoint(Collection checkpointContent) ; + + void close(); + +} diff --git a/src/main/java/io/mycat/backend/mysql/xa/recovery/impl/FileSystemRepository.java b/src/main/java/io/mycat/backend/mysql/xa/recovery/impl/FileSystemRepository.java new file mode 100644 index 000000000..9e2febdb0 --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/xa/recovery/impl/FileSystemRepository.java @@ -0,0 +1,233 @@ +package io.mycat.backend.mysql.xa.recovery.impl; + +import io.mycat.MycatServer; +import io.mycat.backend.mysql.xa.*; +import io.mycat.backend.mysql.xa.recovery.*; +import io.mycat.config.MycatConfig; +import io.mycat.config.model.SystemConfig; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.*; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +/** + * Created by zhangchao on 2016/10/13. + */ +public class FileSystemRepository implements Repository{ + public static final Logger logger = LoggerFactory + .getLogger(FileSystemRepository.class); + private VersionedFile file; + private FileChannel rwChannel = null; + + public FileSystemRepository() { + init(); + } + + @Override + public void init(){ +// ConfigProperties configProperties = Configuration.getConfigProperties(); +// String baseDir = configProperties.getLogBaseDir(); +// String baseName = configProperties.getLogBaseName(); + MycatConfig mycatconfig = MycatServer.getInstance().getConfig(); + SystemConfig systemConfig = mycatconfig.getSystem(); + + String baseDir =systemConfig.getXARecoveryLogBaseDir(); + String baseName = systemConfig.getXARecoveryLogBaseName(); + + logger.debug("baseDir " + baseDir); + logger.debug("baseName " + baseName); + + //Judge whether exist the basedir + createBaseDir(baseDir); + + file = new VersionedFile(baseDir, baseName, ".log"); + + } + + private Serializer serializer = new Serializer(); + + @Override + public void put(String id, CoordinatorLogEntry coordinatorLogEntry) { + + try { + initChannelIfNecessary(); + write(coordinatorLogEntry, true); + } catch (IOException e) { + logger.error(e.getMessage(),e); + } + } + + private synchronized void initChannelIfNecessary() + throws FileNotFoundException { + if (rwChannel == null) { + rwChannel = file.openNewVersionForNioWriting(); + } + } + + private void write(CoordinatorLogEntry coordinatorLogEntry, + boolean flushImmediately) throws IOException { + String str = serializer.toJSON(coordinatorLogEntry); + byte[] buffer = str.getBytes(); + ByteBuffer buff = ByteBuffer.wrap(buffer); + writeToFile(buff, flushImmediately); + } + + private synchronized void writeToFile(ByteBuffer buff, boolean force) + throws IOException { + rwChannel.write(buff); + rwChannel.force(force); + } + + @Override + public CoordinatorLogEntry get(String coordinatorId) { + throw new UnsupportedOperationException(); + } + + @Override + public Collection findAllCommittingCoordinatorLogEntries() { + throw new UnsupportedOperationException(); + } + + @Override + public Collection getAllCoordinatorLogEntries() { + FileInputStream fis = null; + try { + fis = file.openLastValidVersionForReading(); + } catch (FileNotFoundException firstStart) { + // the file could not be opened for reading; + // merely return the default empty vector + } + if (fis != null) { + return readFromInputStream(fis); + } + //else + return Collections.emptyList(); + } + + public static Collection readFromInputStream( + InputStream in) { + Map coordinatorLogEntries = new HashMap(); + BufferedReader br = null; + try { + InputStreamReader isr = new InputStreamReader(in); + br = new BufferedReader(isr); + coordinatorLogEntries = readContent(br); + } catch (Exception e) { + logger.error("Error in recover", e); + } finally { + closeSilently(br); + } + return coordinatorLogEntries.values(); + } + + static Map readContent(BufferedReader br) + throws IOException { + + Map coordinatorLogEntries = new HashMap(); + try { + String line; + while ((line = br.readLine()) != null) { + CoordinatorLogEntry coordinatorLogEntry = deserialize(line); + coordinatorLogEntries.put(coordinatorLogEntry.id, + coordinatorLogEntry); + } + + } catch (EOFException unexpectedEOF) { + logger.info( + "Unexpected EOF - logfile not closed properly last time?", + unexpectedEOF); + // merely return what was read so far... + } catch (StreamCorruptedException unexpectedEOF) { + logger.info( + "Unexpected EOF - logfile not closed properly last time?", + unexpectedEOF); + // merely return what was read so far... + } catch (ObjectStreamException unexpectedEOF) { + logger.info( + "Unexpected EOF - logfile not closed properly last time?", + unexpectedEOF); + // merely return what was read so far... + } catch (DeserialisationException unexpectedEOF) { + logger.info("Unexpected EOF - logfile not closed properly last time? " + + unexpectedEOF); + } + return coordinatorLogEntries; + } + + private static void closeSilently(BufferedReader fis) { + try { + if (fis != null) + fis.close(); + } catch (IOException io) { + logger.warn("Fail to close logfile after reading - ignoring"); + } + } + + private static Deserializer deserializer = new Deserializer(); + + private static CoordinatorLogEntry deserialize(String line) + throws DeserialisationException { + return deserializer.fromJSON(line); + } + + @Override + public void close() { + try { + closeOutput(); + } catch (Exception e) { + logger.warn("Error closing file - ignoring", e); + } + + } + + protected void closeOutput() throws IllegalStateException { + try { + if (file != null) { + file.close(); + } + } catch (IOException e) { + throw new IllegalStateException("Error closing previous output", e); + } + } + + @Override + public synchronized void writeCheckpoint( + Collection checkpointContent) + { + + try { + closeOutput(); + + rwChannel = file.openNewVersionForNioWriting(); + for (CoordinatorLogEntry coordinatorLogEntry : checkpointContent) { + write(coordinatorLogEntry, false); + } + rwChannel.force(false); + file.discardBackupVersion(); + } catch (FileNotFoundException firstStart) { + // the file could not be opened for reading; + // merely return the default empty vector + } catch (Exception e) { + logger.error("Failed to write checkpoint", e); + } + + } + + /** + * create the log base dir + * @param baseDir + */ + public void createBaseDir(String baseDir){ + File baseDirFolder = new File (baseDir); + if (!baseDirFolder.exists()){ + baseDirFolder.mkdirs(); + } + } + +} diff --git a/src/main/java/io/mycat/backend/mysql/xa/recovery/impl/InMemoryRepository.java b/src/main/java/io/mycat/backend/mysql/xa/recovery/impl/InMemoryRepository.java new file mode 100644 index 000000000..937a14482 --- /dev/null +++ b/src/main/java/io/mycat/backend/mysql/xa/recovery/impl/InMemoryRepository.java @@ -0,0 +1,76 @@ +package io.mycat.backend.mysql.xa.recovery.impl; + +import io.mycat.backend.mysql.xa.CoordinatorLogEntry; +import io.mycat.backend.mysql.xa.TxState; +import io.mycat.backend.mysql.xa.recovery.Repository; + +import java.util.Collection; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +/** + * Created by zhangchao on 2016/10/18. + */ +public class InMemoryRepository implements Repository { + + private Map storage = new ConcurrentHashMap(); + + + private boolean closed = true; + @Override + public void init() { + closed=false; + } + + @Override + public synchronized void put(String id, CoordinatorLogEntry coordinatorLogEntry) { + storage.put(id, coordinatorLogEntry); + } + + @Override + public synchronized CoordinatorLogEntry get(String coordinatorId) { + return storage.get(coordinatorId); + } + + @Override + public synchronized Collection findAllCommittingCoordinatorLogEntries() { +// Set res = new HashSet(); +// Collection allCoordinatorLogEntry = storage.values(); +// for (CoordinatorLogEntry coordinatorLogEntry : allCoordinatorLogEntry) { +// if(coordinatorLogEntry.getResultingState() == TxState.TX_PREPARED_STATE){ +// res.add(coordinatorLogEntry); +// } +// } +// return res; + return null; + } + + @Override + public void close() { + storage.clear(); + closed=true; + } + + @Override + public Collection getAllCoordinatorLogEntries() { + return storage.values(); + } + + @Override + public void writeCheckpoint( + Collection checkpointContent) { + storage.clear(); + for (CoordinatorLogEntry coordinatorLogEntry : checkpointContent) { + storage.put(coordinatorLogEntry.id, coordinatorLogEntry); + } + + } + + + + public boolean isClosed() { + return closed; + } +} diff --git a/src/main/java/io/mycat/backend/nio/MySQLBackendConnectionFactory.java b/src/main/java/io/mycat/backend/nio/MySQLBackendConnectionFactory.java deleted file mode 100644 index 0a477bd3e..000000000 --- a/src/main/java/io/mycat/backend/nio/MySQLBackendConnectionFactory.java +++ /dev/null @@ -1,37 +0,0 @@ -package io.mycat.backend.nio; - -import io.mycat.backend.MySQLDataSource; -import io.mycat.net.NetSystem; -import io.mycat.server.config.node.DBHostConfig; -import io.mycat.server.executors.ResponseHandler; - -import java.io.IOException; -import java.nio.channels.SocketChannel; - -public class MySQLBackendConnectionFactory { - private final MySQLBackendConnectionHandler nioHandler = new MySQLBackendConnectionHandler(); - - public MySQLBackendConnection make(MySQLDataSource pool, - ResponseHandler handler, String schema) throws IOException { - - DBHostConfig dsc = pool.getConfig(); - SocketChannel channel = SocketChannel.open(); - channel.configureBlocking(false); - - MySQLBackendConnection c = new MySQLBackendConnection(channel, - pool.isReadNode()); - NetSystem.getInstance().setSocketParams(c, false); - // 设置NIOHandler - c.setHandler(nioHandler); - c.setHost(dsc.getIp()); - c.setPort(dsc.getPort()); - c.setUser(dsc.getUser()); - c.setPassword(dsc.getPassword()); - c.setSchema(schema); - c.setPool(pool); - c.setResponseHandler(handler); - c.setIdleTimeout(pool.getConfig().getIdleTimeout()); - NetSystem.getInstance().getConnector().postConnect(c); - return c; - } -} diff --git a/src/main/java/io/mycat/backend/nio/MySQLBackendConnectionHandler.java b/src/main/java/io/mycat/backend/nio/MySQLBackendConnectionHandler.java deleted file mode 100644 index 08e9bf855..000000000 --- a/src/main/java/io/mycat/backend/nio/MySQLBackendConnectionHandler.java +++ /dev/null @@ -1,333 +0,0 @@ -package io.mycat.backend.nio; - -import io.mycat.MycatServer; -import io.mycat.net.Connection; -import io.mycat.net.ConnectionException; -import io.mycat.net.NIOHandler; -import io.mycat.server.Capabilities; -import io.mycat.server.executors.LoadDataResponseHandler; -import io.mycat.server.executors.ResponseHandler; -import io.mycat.server.packet.*; -import io.mycat.server.packet.util.ByteUtil; -import io.mycat.server.packet.util.CharsetUtil; -import io.mycat.server.packet.util.SecurityUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; - -public class MySQLBackendConnectionHandler implements - NIOHandler { - - private static final Logger LOGGER = LoggerFactory - .getLogger(MySQLBackendConnectionHandler.class); - private static final int RESULT_STATUS_INIT = 0; - private static final int RESULT_STATUS_HEADER = 1; - private static final int RESULT_STATUS_FIELD_EOF = 2; - - @Override - public void onConnected(MySQLBackendConnection con) throws IOException { - - //con.asynRead(); - } - - @Override - public void handle(MySQLBackendConnection con, ByteBuffer buf, int start, - int readedLength) { - switch (con.getState()) { - case connecting: { - doConnecting(con, buf, start, readedLength); - return; - } - case connected: { - try { - doHandleBusinessMsg(con, buf, start, readedLength); - } catch (Exception e) { - LOGGER.warn("caught err of con "+con, e); - } - return; - } - - default: - LOGGER.warn("not handled connecton state err " + con.getState() - + " for con " + con); - break; - - } - - } - - @Override - public void onConnectFailed(MySQLBackendConnection source, Throwable e) { - ResponseHandler respHand = source.getRespHandler(); - if (respHand != null) { - respHand.connectionError(e, source); - } - - } - - private void handleLogin(MySQLBackendConnection source, byte[] data) { - try { - switch (data[4]) { - case OkPacket.FIELD_COUNT: - HandshakePacket packet = source.getHandshake(); - if (packet == null) { - processHandShakePacket(source, data); - // 发送认证数据包 - source.authenticate(); - break; - } - // 处理认证结果 - source.setAuthenticated(true); - source.setState(Connection.State.connected); - boolean clientCompress = Capabilities.CLIENT_COMPRESS == (Capabilities.CLIENT_COMPRESS & packet.serverCapabilities); - boolean usingCompress = MycatServer.getInstance().getConfig() - .getSystem().getUseCompression() == 1; - - if (clientCompress && usingCompress) { - source.setSupportCompress(true); - } - - if (source.getRespHandler() != null) { - source.getRespHandler().connectionAcquired(source); - } - - break; - case ErrorPacket.FIELD_COUNT: - ErrorPacket err = new ErrorPacket(); - err.read(data); - String errMsg = new String(err.message); - LOGGER.warn("can't connect to mysql server ,errmsg:" + errMsg - + " " + source); - // source.close(errMsg); - throw new ConnectionException(err.errno, errMsg); - - case EOFPacket.FIELD_COUNT: - auth323(source, data[3]); - break; - default: - packet = source.getHandshake(); - if (packet == null) { - processHandShakePacket(source, data); - // 发送认证数据包 - source.authenticate(); - break; - } else { - throw new RuntimeException("Unknown Packet!"); - } - - } - - } catch (RuntimeException e) { - if (source.getRespHandler() != null) { - source.getRespHandler().connectionError(e, source); - return; - } - throw e; - } - } - - @Override - public void onClosed(MySQLBackendConnection source, String reason) { - ResponseHandler respHand = source.getRespHandler(); - if (respHand != null) { - respHand.connectionClose(source, reason); - } - - } - - private void doConnecting(MySQLBackendConnection con, ByteBuffer buf, - int start, int readedLength) { - byte[] data = new byte[readedLength]; - buf.position(start); - buf.get(data, 0, readedLength); - handleLogin(con, data); - } - - public void doHandleBusinessMsg(final MySQLBackendConnection source, - final ByteBuffer buf, final int start, final int readedLength) { - byte[] data = new byte[readedLength]; - buf.position(start); - buf.get(data, 0, readedLength); - handleData(source, data); - } - - public void connectionError(Throwable e) { - - } - - protected void handleData(final MySQLBackendConnection source, byte[] data) { - ResultStatus resultStatus = source.getSqlResultStatus(); - switch (resultStatus.getResultStatus()) { - case RESULT_STATUS_INIT: - switch (data[4]) { - case OkPacket.FIELD_COUNT: - handleOkPacket(source, data); - break; - case ErrorPacket.FIELD_COUNT: - handleErrorPacket(source, data); - break; - case RequestFilePacket.FIELD_COUNT: - handleRequestPacket(source, data); - break; - default: - resultStatus.setResultStatus(RESULT_STATUS_HEADER); - resultStatus.setHeader(data); - resultStatus.setFields(new ArrayList((int) ByteUtil - .readLength(data, 4))); - } - break; - case RESULT_STATUS_HEADER: - switch (data[4]) { - case ErrorPacket.FIELD_COUNT: - resultStatus.setResultStatus(RESULT_STATUS_INIT); - handleErrorPacket(source, data); - break; - case EOFPacket.FIELD_COUNT: - resultStatus.setResultStatus(RESULT_STATUS_FIELD_EOF); - handleFieldEofPacket(source, data); - break; - default: - resultStatus.getFields().add(data); - } - break; - case RESULT_STATUS_FIELD_EOF: - switch (data[4]) { - case ErrorPacket.FIELD_COUNT: - resultStatus.setResultStatus(RESULT_STATUS_INIT); - handleErrorPacket(source, data); - break; - case EOFPacket.FIELD_COUNT: - resultStatus.setResultStatus(RESULT_STATUS_INIT); - handleRowEofPacket(source, data); - break; - default: - handleRowPacket(source, data); - } - break; - default: - throw new RuntimeException("unknown status!"); - } - } - - /** - * OK数据包处理 - */ - private void handleOkPacket(MySQLBackendConnection source, byte[] data) { - ResponseHandler respHand = source.getRespHandler(); - if (respHand != null) { - respHand.okResponse(data, source); - }else { - closeNoHandler(source); - } - } - - /** - * ERROR数据包处理 - */ - private void handleErrorPacket(MySQLBackendConnection source, byte[] data) { - ResponseHandler respHand = source.getRespHandler(); - if (respHand != null) { - respHand.errorResponse(data, source); - } else { - closeNoHandler(source); - } - } - - /** - * load data file 请求文件数据包处理 - */ - private void handleRequestPacket(MySQLBackendConnection source, byte[] data) { - ResponseHandler respHand = source.getRespHandler(); - if (respHand != null && respHand instanceof LoadDataResponseHandler) { - ((LoadDataResponseHandler) respHand).requestDataResponse(data, - source); - } else { - closeNoHandler(source); - } - } - - /** - * 字段数据包结束处理 - */ - private void handleFieldEofPacket(final MySQLBackendConnection source, - byte[] data) { - ResponseHandler respHand = source.getRespHandler(); - if (respHand != null) { - respHand.fieldEofResponse(source.getSqlResultStatus().getHeader(), - source.getSqlResultStatus().getFields(), data, source); - } else { - closeNoHandler(source); - } - } - - /** - * 行数据包处理 - */ - private void handleRowPacket(final MySQLBackendConnection source, - byte[] data) { - ResponseHandler respHand = source.getRespHandler(); - if (respHand != null) { - respHand.rowResponse(data, source); - } else { - closeNoHandler(source); - - } - } - - private void closeNoHandler(final MySQLBackendConnection source) { - if (!source.isClosedOrQuit()) { - source.close("no handler"); - LOGGER.warn("no handler bind in this con " + this + " client:" - + source); - } - } - - /** - * 行数据包结束处理 - */ - private void handleRowEofPacket(final MySQLBackendConnection source, - byte[] data) { - ResponseHandler responseHandler = source.getRespHandler(); - if (responseHandler != null) { - responseHandler.rowEofResponse(data, source); - } else { - closeNoHandler(source); - } - } - - private void processHandShakePacket(MySQLBackendConnection source, - byte[] data) { - // 设置握手数据包 - HandshakePacket packet = new HandshakePacket(); - packet.read(data); - source.setHandshake(packet); - source.setThreadId(packet.threadId); - - // 设置字符集编码 - int charsetIndex = (packet.serverCharsetIndex & 0xff); - String charset = CharsetUtil.getCharset(charsetIndex); - if (charset != null) { - source.setCharset(charset); - } else { - LOGGER.warn("Unknown charsetIndex:" + charsetIndex); - throw new RuntimeException("Unknown charsetIndex:" + charsetIndex); - } - } - - private void auth323(MySQLBackendConnection source, byte packetId) { - // 发送323响应认证数据包 - Reply323Packet r323 = new Reply323Packet(); - r323.packetId = ++packetId; - String pass = source.getPassword(); - if (pass != null && pass.length() > 0) { - byte[] seed = source.getHandshake().seed; - r323.seed = SecurityUtil.scramble323(pass, new String(seed)) - .getBytes(); - } - r323.write(source); - } - -} diff --git a/src/main/java/io/mycat/backend/postgresql/PostgreSQLBackendConnection.java b/src/main/java/io/mycat/backend/postgresql/PostgreSQLBackendConnection.java index 4c25bf2f9..becfd1609 100644 --- a/src/main/java/io/mycat/backend/postgresql/PostgreSQLBackendConnection.java +++ b/src/main/java/io/mycat/backend/postgresql/PostgreSQLBackendConnection.java @@ -3,25 +3,26 @@ import java.io.IOException; import java.io.UnsupportedEncodingException; import java.nio.ByteBuffer; +import java.nio.channels.NetworkChannel; import java.nio.channels.SocketChannel; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import io.mycat.backend.BackendConnection; -import io.mycat.backend.PhysicalDatasource; +import io.mycat.backend.jdbc.ShowVariables; +import io.mycat.backend.mysql.CharsetUtil; +import io.mycat.backend.mysql.nio.MySQLConnectionHandler; +import io.mycat.backend.mysql.nio.handler.ResponseHandler; import io.mycat.backend.postgresql.packet.Query; import io.mycat.backend.postgresql.packet.Terminate; +import io.mycat.backend.postgresql.utils.PIOUtils; +import io.mycat.backend.postgresql.utils.PacketUtils; import io.mycat.backend.postgresql.utils.PgSqlApaterUtils; -import io.mycat.net.Connection; -import io.mycat.net.NetSystem; +import io.mycat.config.Isolations; +import io.mycat.net.BackendAIOConnection; import io.mycat.route.RouteResultsetNode; -import io.mycat.server.Isolations; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.exception.UnknownTxIsolationException; -import io.mycat.server.executors.ResponseHandler; -import io.mycat.server.packet.util.CharsetUtil; +import io.mycat.server.ServerConnection; import io.mycat.server.parser.ServerParse; -import io.mycat.util.TimeUtil; +import io.mycat.util.exception.UnknownTxIsolationException; /************************************************************* * PostgreSQL Native Connection impl @@ -29,139 +30,251 @@ * @author Coollf * */ -public class PostgreSQLBackendConnection extends Connection implements - BackendConnection { +public class PostgreSQLBackendConnection extends BackendAIOConnection { + + public static enum BackendConnectionState { + closed, connected, connecting + } + + private static class StatusSync { + private final Boolean autocommit; + private final Integer charsetIndex; + private final String schema; + private final AtomicInteger synCmdCount; + private final Integer txtIsolation; + private final boolean xaStarted; + + public StatusSync(boolean xaStarted, String schema, Integer charsetIndex, Integer txtIsolation, + Boolean autocommit, int synCount) { + super(); + this.xaStarted = xaStarted; + this.schema = schema; + this.charsetIndex = charsetIndex; + this.txtIsolation = txtIsolation; + this.autocommit = autocommit; + this.synCmdCount = new AtomicInteger(synCount); + } + + public boolean synAndExecuted(PostgreSQLBackendConnection conn) { + int remains = synCmdCount.decrementAndGet(); + if (remains == 0) {// syn command finished + this.updateConnectionInfo(conn); + conn.metaDataSyned = true; + return false; + } else if (remains < 0) { + return true; + } + return false; + } + + private void updateConnectionInfo(PostgreSQLBackendConnection conn) + + { + conn.xaStatus = (xaStarted) ? 1 : 0; + if (schema != null) { + conn.schema = schema; + conn.oldSchema = conn.schema; + } + if (charsetIndex != null) { + conn.setCharset(CharsetUtil.getCharset(charsetIndex)); + } + if (txtIsolation != null) { + conn.txIsolation = txtIsolation; + } + if (autocommit != null) { + conn.autocommit = autocommit; + } + } + + } + private static final Query _COMMIT = new Query("commit"); + private static final Query _ROLLBACK = new Query("rollback"); + private static void getCharsetCommand(StringBuilder sb, int clientCharIndex) { + sb.append("SET names '").append(CharsetUtil.getCharset(clientCharIndex).toUpperCase()).append("';"); + } + /** - * 来自子接口 + * 获取 更改事物级别sql + * + * @param + * @param txIsolation */ - private boolean fromSlaveDB; + private static void getTxIsolationCommand(StringBuilder sb, int txIsolation) { + switch (txIsolation) { + case Isolations.READ_UNCOMMITTED: + sb.append("SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;"); + return; + case Isolations.READ_COMMITTED: + sb.append("SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;"); + return; + case Isolations.REPEATED_READ: + sb.append("SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;"); + return; + case Isolations.SERIALIZABLE: + sb.append("SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;"); + return; + default: + throw new UnknownTxIsolationException("txIsolation:" + txIsolation); + } + } - /*** - * 用户名 - */ - private String user; + private Object attachment; + + private volatile boolean autocommit=true; + + private volatile boolean borrowed; + + protected volatile String charset = "utf8"; - /** - * 密码 - */ - private String password; /*** - * 对应数据库空间 + * 当前事物ID */ - private String schema; + private volatile String currentXaTxId; /** - * 数据源配置 + * 来自子接口 */ - private PostgreSQLDataSource pool; - private Object attachment; - protected volatile String charset = "utf8"; - private volatile boolean autocommit; + private volatile boolean fromSlaveDB; /**** * PG是否在事物中 */ private volatile boolean inTransaction = false; - /*** - * 响应handler - */ - private volatile ResponseHandler responseHandler; - private boolean borrowed; - private volatile int txIsolation; - private volatile boolean modifiedSQLExecuted = false; - private long lastTime; - private AtomicBoolean isQuit; + private AtomicBoolean isQuit = new AtomicBoolean(false); - // PostgreSQL服务端密码 - private int serverSecretKey; - - protected volatile int charsetIndex; - - private int xaStatus; - - private String oldSchema; - - // 已经认证通过 - private boolean isAuthenticated; + private volatile long lastTime; /** * 元数据同步 */ private volatile boolean metaDataSyned = true; + private volatile boolean modifiedSQLExecuted = false; + private volatile String oldSchema; + + /** + * 密码 + */ + private volatile String password; + + /** + * 数据源配置 + */ + private PostgreSQLDataSource pool; + /*** + * 响应handler + */ + private volatile ResponseHandler responseHandler; + /*** + * 对应数据库空间 + */ + private volatile String schema; + // PostgreSQL服务端密码 + private volatile int serverSecretKey; + private volatile BackendConnectionState state = BackendConnectionState.connecting; private volatile StatusSync statusSync; + private volatile int txIsolation; + /*** - * 当前事物ID + * 用户名 */ - private volatile String currentXaTxId; - private long currentTimeMillis; + private volatile String user; - public PostgreSQLBackendConnection(SocketChannel channel, - boolean fromSlaveDB) { + private volatile int xaStatus; + + public PostgreSQLBackendConnection(NetworkChannel channel, boolean fromSlaveDB) { super(channel); this.fromSlaveDB = fromSlaveDB; - this.lastTime = TimeUtil.currentTimeMillis(); - this.isQuit = new AtomicBoolean(false); - this.autocommit = true; } @Override - public boolean isFromSlaveDB() { - return fromSlaveDB; + public void commit() { + ByteBuffer buf = this.allocate(); + _COMMIT.write(buf); + this.write(buf); } @Override - public String getSchema() { - return schema; + public void execute(RouteResultsetNode rrn, ServerConnection sc, boolean autocommit) throws IOException { + int sqlType = rrn.getSqlType(); + String orgin = rrn.getStatement(); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("{}查询任务。。。。{}", id, rrn.getStatement()); + LOGGER.debug(orgin); + } + + //FIX BUG https://github.com/MyCATApache/Mycat-Server/issues/1185 + if (sqlType == ServerParse.SELECT || sqlType == ServerParse.SHOW) { + if (sqlType == ServerParse.SHOW) { + //此处进行部分SHOW 语法适配 + String _newSql = PgSqlApaterUtils.apater(orgin); + if(_newSql.trim().substring(0,4).equalsIgnoreCase("show")){//未能适配成功 + ShowVariables.execute(sc, orgin, this); + return; + } + } else if ("SELECT CONNECTION_ID()".equalsIgnoreCase(orgin)) { + ShowVariables.justReturnValue(sc, String.valueOf(sc.getId()), this); + return; + } + } + + if (!modifiedSQLExecuted && rrn.isModifySQL()) { + modifiedSQLExecuted = true; + } + String xaTXID = null; + if(sc.getSession2().getXaTXID()!=null){ + xaTXID = sc.getSession2().getXaTXID() +",'"+getSchema()+"'"; + } + synAndDoExecute(xaTXID, rrn, sc.getCharsetIndex(), sc.getTxIsolation(), autocommit); } @Override - public void setSchema(String newSchema) { - String curSchema = schema; - if (curSchema == null) { - this.schema = newSchema; - this.oldSchema = newSchema; + public Object getAttachment() { + return attachment; + } + + private void getAutocommitCommand(StringBuilder sb, boolean autoCommit) { + if (autoCommit) { + sb.append(/*"SET autocommit=1;"*/"");//Fix bug 由于 PG9.0 开始不支持此选项,默认是为自动提交逻辑。 } else { - this.oldSchema = curSchema; - this.schema = newSchema; + sb.append("begin transaction;"); } } @Override - public void setAttachment(Object attachment) { - this.attachment = attachment; + public long getLastTime() { + return lastTime; } - @Override - public void setLastTime(long currentTimeMillis) { - this.currentTimeMillis = currentTimeMillis; + public String getPassword() { + return password; } - @Override - public void setResponseHandler(ResponseHandler queryHandler) { - this.responseHandler = queryHandler; + public PostgreSQLDataSource getPool() { + return pool; } - @Override - public Object getAttachment() { - return attachment; + public ResponseHandler getResponseHandler() { + return responseHandler; } @Override - public boolean isBorrowed() { - return borrowed; + public String getSchema() { + return this.schema; } - @Override - public void setBorrowed(boolean borrowed) { - this.lastTime = TimeUtil.currentTimeMillis(); - this.borrowed = borrowed; + public int getServerSecretKey() { + return serverSecretKey; + } + + public BackendConnectionState getState() { + return state; } @Override @@ -169,89 +282,80 @@ public int getTxIsolation() { return txIsolation; } + public String getUser() { + return user; + } + @Override public boolean isAutocommit() { return autocommit; } @Override - public String getCharset() { - return charset; + public boolean isBorrowed() { + return borrowed; } @Override - public PhysicalDatasource getPool() { - return pool; - } - - /** - * @return the user - */ - public String getUser() { - return user; + public boolean isClosedOrQuit() { + return isClosed() || isQuit.get(); } - /** - * @param user - * the user to set - */ - public void setUser(String user) { - this.user = user; + @Override + public boolean isFromSlaveDB() { + return fromSlaveDB; } - /** - * @return the password - */ - public String getPassword() { - return password; + public boolean isInTransaction() { + return inTransaction; } - /** - * @param password - * the password to set - */ - public void setPassword(String password) { - this.password = password; + @Override + public boolean isModifiedSQLExecuted() { + return modifiedSQLExecuted; } - /** - * @param fromSlaveDB - * the fromSlaveDB to set - */ - public void setFromSlaveDB(boolean fromSlaveDB) { - this.fromSlaveDB = fromSlaveDB; - } + @Override + public void onConnectFailed(Throwable t) { + if (handler instanceof MySQLConnectionHandler) { - /** - * @param pool - * the pool to set - */ - public void setPool(PostgreSQLDataSource pool) { - this.pool = pool; + } } @Override - public boolean isModifiedSQLExecuted() { - return modifiedSQLExecuted; + public void onConnectfinish() { + LOGGER.debug("连接后台真正完成"); + try { + SocketChannel chan = (SocketChannel) this.channel; + ByteBuffer buf = PacketUtils.makeStartUpPacket(user, schema); + buf.flip(); + chan.write(buf); + } catch (Exception e) { + LOGGER.error("Connected PostgreSQL Send StartUpPacket ERROR", e); + throw new RuntimeException(e); + } } - @Override - public long getLastTime() { - return lastTime; + protected final int getPacketLength(ByteBuffer buffer, int offset) { + // Pg 协议获取包长度的方法和mysql 不一样 + return PIOUtils.redInteger4(buffer, offset + 1) + 1; } + /********** + * 此查询用于心跳检查和获取连接后的健康检查 + */ @Override - public boolean isClosedOrQuit() { - return isClosed() || isQuit.get(); + public void query(String query) throws UnsupportedEncodingException { + RouteResultsetNode rrn = new RouteResultsetNode("default", ServerParse.SELECT, query); + synAndDoExecute(null, rrn, this.charsetIndex, this.txIsolation, true); } @Override public void quit() { if (isQuit.compareAndSet(false, true) && !isClosed()) { - if (isAuthenticated) {// 断开 与PostgreSQL连接 + if (state == BackendConnectionState.connected) {// 断开 与PostgreSQL连接 Terminate terminate = new Terminate(); - ByteBuffer buf = NetSystem.getInstance().getBufferPool() - .allocate(); + ByteBuffer buf = this.allocate(); terminate.write(buf); write(buf); } else { @@ -260,13 +364,21 @@ public void quit() { } } + /******* + * 记录sql执行信息 + */ + @Override + public void recordSql(String host, String schema, String statement) { + LOGGER.debug(String.format("executed sql: host=%s,schema=%s,statement=%s", host, schema, statement)); + } + @Override public void release() { - if (metaDataSyned == false) {// indicate connection not normalfinished - // ,and - // we can't know it's syn status ,so - // close - // it + if (!metaDataSyned) {/* + * indicate connection not normalfinished ,and + * we can't know it's syn status ,so close it + */ + LOGGER.warn("can't sure connection syn result,so close it " + this); this.responseHandler = null; this.close("syn status unkown "); @@ -281,35 +393,78 @@ public void release() { } @Override - public void query(String query) throws UnsupportedEncodingException { - RouteResultsetNode rrn = new RouteResultsetNode("default", - ServerParse.SELECT, query); - synAndDoExecute(null, rrn, this.charsetIndex, this.txIsolation, true); + public void rollback() { + ByteBuffer buf = this.allocate(); + _ROLLBACK.write(buf); + this.write(buf); } @Override - public void execute(RouteResultsetNode rrn, MySQLFrontConnection sc, - boolean autocommit) throws IOException { - if (!modifiedSQLExecuted && rrn.isModifySQL()) { - modifiedSQLExecuted = true; + public void setAttachment(Object attachment) { + this.attachment = attachment; + } + + @Override + public void setBorrowed(boolean borrowed) { + this.borrowed = borrowed; + } + + public void setInTransaction(boolean inTransaction) { + this.inTransaction = inTransaction; + } + + @Override + public void setLastTime(long currentTimeMillis) { + this.lastTime = currentTimeMillis; + } + + public void setPassword(String password) { + this.password = password; + } + + public void setPool(PostgreSQLDataSource pool) { + this.pool = pool; + } + + @Override + public boolean setResponseHandler(ResponseHandler commandHandler) { + this.responseHandler = commandHandler; + return true; + } + + @Override + public void setSchema(String newSchema) { + String curSchema = schema; + if (curSchema == null) { + this.schema = newSchema; + this.oldSchema = newSchema; + } else { + this.oldSchema = curSchema; + this.schema = newSchema; } - String xaTXID = sc.getSession2().getXaTXID(); - synAndDoExecute(xaTXID, rrn,sc.getCharsetIndex(), - sc.getTxIsolation(), autocommit); + } + + public void setServerSecretKey(int serverSecretKey) { + this.serverSecretKey = serverSecretKey; + } + public void setState(BackendConnectionState state) { + this.state = state; } - private void synAndDoExecute(String xaTxID, RouteResultsetNode rrn, - int clientCharSetIndex, int clientTxIsoLation, + public void setUser(String user) { + this.user = user; + } + + private void synAndDoExecute(String xaTxID, RouteResultsetNode rrn, int clientCharSetIndex, int clientTxIsoLation, boolean clientAutoCommit) { String xaCmd = null; boolean conAutoComit = this.autocommit; String conSchema = this.schema; // never executed modify sql,so auto commit - boolean expectAutocommit = !modifiedSQLExecuted || isFromSlaveDB() - || clientAutoCommit; - if (expectAutocommit == false && xaTxID != null && xaStatus == 0) { + boolean expectAutocommit = !modifiedSQLExecuted || isFromSlaveDB() || clientAutoCommit; + if (!expectAutocommit && xaTxID != null && xaStatus == 0) { clientTxIsoLation = Isolations.SERIALIZABLE; xaCmd = "XA START " + xaTxID + ';'; currentXaTxId = xaTxID; @@ -319,16 +474,16 @@ private void synAndDoExecute(String xaTxID, RouteResultsetNode rrn, int txIsoLationSyn = (txIsolation == clientTxIsoLation) ? 0 : 1; int autoCommitSyn = (conAutoComit == expectAutocommit) ? 0 : 1; int synCount = schemaSyn + charsetSyn + txIsoLationSyn + autoCommitSyn; - - if(synCount == 0){ + + if (synCount == 0) { String sql = rrn.getStatement(); Query query = new Query(PgSqlApaterUtils.apater(sql)); - ByteBuffer buf = NetSystem.getInstance().getBufferPool().allocate(); + ByteBuffer buf = this.allocate();// XXX 此处处理问题 query.write(buf); this.write(buf); return; } - + // TODO COOLLF 此处大锅待实现. 相关 事物, 切换 库,自动提交等功能实现 StringBuilder sb = new StringBuilder(); if (charsetSyn == 1) { @@ -344,202 +499,52 @@ private void synAndDoExecute(String xaTxID, RouteResultsetNode rrn, sb.append(xaCmd); } if (LOGGER.isDebugEnabled()) { - LOGGER.debug("con need syn ,total syn cmd " + synCount - + " commands " + sb.toString() + "schema change:" + LOGGER.debug("con need syn ,total syn cmd " + synCount + " commands " + sb.toString() + "schema change:" + ("" != null) + " con:" + this); } - + metaDataSyned = false; - statusSync = new StatusSync(xaCmd != null, conSchema, - clientCharSetIndex, clientTxIsoLation, expectAutocommit, + statusSync = new StatusSync(xaCmd != null, conSchema, clientCharSetIndex, clientTxIsoLation, expectAutocommit, synCount); String sql = sb.append(PgSqlApaterUtils.apater(rrn.getStatement())).toString(); - System.err.println("con="+ this.hashCode() + ":SQL:"+sql); + if(LOGGER.isDebugEnabled()){ + LOGGER.debug("con={}, SQL={}", this, sql); + } Query query = new Query(sql); - ByteBuffer buf = NetSystem.getInstance().getBufferPool().allocate(); + ByteBuffer buf = allocate();// 申请ByetBuffer query.write(buf); this.write(buf); metaDataSyned = true; } - - /** - * 获取 更改事物级别sql - * @param - * @param txIsolation - */ - private static void getTxIsolationCommand(StringBuilder sb, int txIsolation) { - switch (txIsolation) { - case Isolations.READ_UNCOMMITTED: - sb.append("SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;"); - return; - case Isolations.READ_COMMITTED: - sb.append("SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;"); - return; - case Isolations.REPEATED_READ: - sb.append("SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;"); - return; - case Isolations.SERIALIZABLE: - sb.append("SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE;"); - return; - default: - throw new UnknownTxIsolationException("txIsolation:" + txIsolation); - } - } - - private void getAutocommitCommand(StringBuilder sb, boolean autoCommit) { - if (autoCommit) { - sb.append("SET autocommit=1;"); - } else { - sb.append("begin transaction;"); - } - } - - private static void getCharsetCommand(StringBuilder sb, int clientCharIndex) { - sb.append("SET names '").append(CharsetUtil.getCharset(clientCharIndex).toUpperCase()).append("';"); - } - - @Override - public void commit() { - ByteBuffer buf = NetSystem.getInstance().getBufferPool().allocate(); - _COMMIT.write(buf); - this.write(buf); + public void close(String reason) { + if (!isClosed.get()) { + isQuit.set(true); + super.close(reason); + pool.connectionClosed(this); + if (this.responseHandler != null) { + this.responseHandler.connectionClose(this, reason); + responseHandler = null; + } + } } @Override public boolean syncAndExcute() { StatusSync sync = this.statusSync; - if (sync == null) { - return true; - } else { + if (sync != null) { boolean executed = sync.synAndExecuted(this); if (executed) { statusSync = null; } return executed; } + return true; } @Override - public void rollback() { - ByteBuffer buf = NetSystem.getInstance().getBufferPool().allocate(); - _ROLLBACK.write(buf); - this.write(buf); - } - - @SuppressWarnings("unchecked") - @Override - public void onReadData(int got) throws IOException { - ByteBuffer buf = getReadBuffer(); - if (buf != null) { - this.handler.handle(this, buf, 0, got); - buf.clear();// 使用完成后清理 - } else { - System.err.println("getReadBuffer()为空"); - } - } - - public void setServerSecretKey(int serverSecretKey) { - this.serverSecretKey = serverSecretKey; - } - - /** - * @return the serverSecretKey - */ - public int getServerSecretKey() { - return serverSecretKey; - } - - /** - * @return the responseHandler - */ - public ResponseHandler getResponseHandler() { - return responseHandler; - } - - private static class StatusSync { - private final String schema; - private final Integer charsetIndex; - private final Integer txtIsolation; - private final Boolean autocommit; - private final AtomicInteger synCmdCount; - private final boolean xaStarted; - - public StatusSync(boolean xaStarted, String schema, - Integer charsetIndex, Integer txtIsolation, Boolean autocommit, - int synCount) { - super(); - this.xaStarted = xaStarted; - this.schema = schema; - this.charsetIndex = charsetIndex; - this.txtIsolation = txtIsolation; - this.autocommit = autocommit; - this.synCmdCount = new AtomicInteger(synCount); - } - - public boolean synAndExecuted(PostgreSQLBackendConnection conn) { - int remains = synCmdCount.decrementAndGet(); - if (remains == 0) {// syn command finished - this.updateConnectionInfo(conn); - conn.metaDataSyned = true; - return false; - } else if (remains < 0) { - return true; - } - return false; - } - - private void updateConnectionInfo(PostgreSQLBackendConnection conn) - - { - conn.xaStatus = (xaStarted == true) ? 1 : 0; - if (schema != null) { - conn.schema = schema; - conn.oldSchema = conn.schema; - } - if (charsetIndex != null) { - conn.setCharset(CharsetUtil.getCharset(charsetIndex)); - } - if (txtIsolation != null) { - conn.txIsolation = txtIsolation; - } - if (autocommit != null) { - conn.autocommit = autocommit; - } - } - - } - - public boolean setCharset(String charset) { - if ( charset != null ) { - charset = charset.replace("'", ""); - } - - int ci = CharsetUtil.getIndex(charset); - if (ci > 0) { - this.charset = charset.equalsIgnoreCase("utf8mb4") ? "utf8" - : charset; - this.charsetIndex = ci; - return true; - } else { - return false; - } + public String toString() { + return "PostgreSQLBackendConnection [id=" + id + ", host=" + host + ", port=" + port + ", localPort=" + + localPort + "]"; } - - /** - * @return the inTransaction - */ - public boolean isInTransaction() { - return inTransaction; - } - - /** - * @param inTransaction - * the inTransaction to set - */ - public void setInTransaction(boolean inTransaction) { - - this.inTransaction = inTransaction; - } - } diff --git a/src/main/java/io/mycat/backend/postgresql/PostgreSQLBackendConnectionFactory.java b/src/main/java/io/mycat/backend/postgresql/PostgreSQLBackendConnectionFactory.java index 5a6e1727a..f9efe140e 100644 --- a/src/main/java/io/mycat/backend/postgresql/PostgreSQLBackendConnectionFactory.java +++ b/src/main/java/io/mycat/backend/postgresql/PostgreSQLBackendConnectionFactory.java @@ -1,27 +1,33 @@ package io.mycat.backend.postgresql; -import io.mycat.net.NetSystem; -import io.mycat.server.config.node.DBHostConfig; -import io.mycat.server.executors.ResponseHandler; +import io.mycat.MycatServer; +import io.mycat.backend.mysql.nio.handler.ResponseHandler; +import io.mycat.config.model.DBHostConfig; +import io.mycat.net.NIOConnector; +import io.mycat.net.factory.BackendConnectionFactory; import java.io.IOException; -import java.nio.channels.SocketChannel; +import java.net.InetSocketAddress; +import java.nio.channels.AsynchronousSocketChannel; +import java.nio.channels.CompletionHandler; +import java.nio.channels.NetworkChannel; -public class PostgreSQLBackendConnectionFactory { - PostgreSQLBackendConnectionHandler nioHandler = new PostgreSQLBackendConnectionHandler(); +public class PostgreSQLBackendConnectionFactory extends + BackendConnectionFactory { + @SuppressWarnings({ "unchecked", "rawtypes" }) public PostgreSQLBackendConnection make(PostgreSQLDataSource pool, - ResponseHandler handler, String schema) throws IOException { + ResponseHandler handler, final String schema) throws IOException { - DBHostConfig dsc = pool.getConfig(); - SocketChannel channel = SocketChannel.open(); - channel.configureBlocking(false); + final DBHostConfig dsc = pool.getConfig(); + NetworkChannel channel = this.openSocketChannel(MycatServer + .getInstance().isAIO()); - PostgreSQLBackendConnection c = new PostgreSQLBackendConnection( + final PostgreSQLBackendConnection c = new PostgreSQLBackendConnection( channel, pool.isReadNode()); - NetSystem.getInstance().setSocketParams(c, false); + MycatServer.getInstance().getConfig().setSocketParams(c, false); // 设置NIOHandler - c.setHandler(nioHandler); + c.setHandler(new PostgreSQLBackendConnectionHandler(c)); c.setHost(dsc.getIp()); c.setPort(dsc.getPort()); c.setUser(dsc.getUser()); @@ -30,8 +36,18 @@ public PostgreSQLBackendConnection make(PostgreSQLDataSource pool, c.setPool(pool); c.setResponseHandler(handler); c.setIdleTimeout(pool.getConfig().getIdleTimeout()); - NetSystem.getInstance().getConnector().postConnect(c); + if (channel instanceof AsynchronousSocketChannel) { + ((AsynchronousSocketChannel) channel).connect( + new InetSocketAddress(dsc.getIp(), dsc.getPort()), c, + (CompletionHandler) MycatServer.getInstance() + .getConnector()); + } else { + ((NIOConnector) MycatServer.getInstance().getConnector()) + .postConnect(c); + + } return c; } - + + } diff --git a/src/main/java/io/mycat/backend/postgresql/PostgreSQLBackendConnectionHandler.java b/src/main/java/io/mycat/backend/postgresql/PostgreSQLBackendConnectionHandler.java index 6672c7021..28f219366 100644 --- a/src/main/java/io/mycat/backend/postgresql/PostgreSQLBackendConnectionHandler.java +++ b/src/main/java/io/mycat/backend/postgresql/PostgreSQLBackendConnectionHandler.java @@ -1,5 +1,8 @@ package io.mycat.backend.postgresql; +import io.mycat.MycatServer; +import io.mycat.backend.mysql.nio.handler.ResponseHandler; +import io.mycat.backend.postgresql.PostgreSQLBackendConnection.BackendConnectionState; import io.mycat.backend.postgresql.packet.AuthenticationPacket; import io.mycat.backend.postgresql.packet.AuthenticationPacket.AuthType; import io.mycat.backend.postgresql.packet.BackendKeyData; @@ -19,18 +22,15 @@ import io.mycat.backend.postgresql.packet.RowDescription; import io.mycat.backend.postgresql.utils.PacketUtils; import io.mycat.backend.postgresql.utils.PgPacketApaterUtils; -import io.mycat.net.BufferArray; -import io.mycat.net.Connection.State; -import io.mycat.net.NIOHandler; -import io.mycat.net.NetSystem; -import io.mycat.server.ErrorCode; -import io.mycat.server.executors.ResponseHandler; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.ErrorPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.OkPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; +import io.mycat.buffer.BufferArray; +import io.mycat.config.ErrorCode; +import io.mycat.net.handler.BackendAsyncHandler; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.ErrorPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.OkPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; import java.io.IOException; import java.nio.ByteBuffer; @@ -43,51 +43,108 @@ import com.alibaba.fastjson.JSON; -public class PostgreSQLBackendConnectionHandler implements NIOHandler { +public class PostgreSQLBackendConnectionHandler extends BackendAsyncHandler { + static class SelectResponse { + private List dataRows = new ArrayList<>(); - private static final Logger LOGGER = LoggerFactory.getLogger(PostgreSQLBackendConnectionHandler.class); - private byte packetId; + private RowDescription description; - @Override - public void onConnected(PostgreSQLBackendConnection con) throws IOException { - } + public SelectResponse(RowDescription description) { + this.description = description; + } - @Override - public void onConnectFailed(PostgreSQLBackendConnection source, Throwable e) { - ResponseHandler respHand = source.getResponseHandler(); - if (respHand != null) { - respHand.connectionError(e, source); + public void addDataRow(DataRow packet) { + this.dataRows.add(packet); } - } - @Override - public void onClosed(PostgreSQLBackendConnection source, String reason) { - ResponseHandler respHand = source.getResponseHandler(); - if (respHand != null) { - respHand.connectionClose(source, reason); + public List getDataRows() { + return dataRows; } - } - @Override - public void handle(PostgreSQLBackendConnection con, ByteBuffer buf, int start, int readedLength) { - switch (con.getState()) { - case connecting: { - doConnecting(con, buf, start, readedLength); - return; + public RowDescription getDescription() { + return description; } - case connected: { - try { - doHandleBusinessMsg(con, buf, start, readedLength); - } catch (Exception e) { - LOGGER.warn("caught err of con " + con, e); - } - return; + + public void setDataRows(List dataRows) { + this.dataRows = dataRows; } - default: - LOGGER.warn("not handled connecton state err " + con.getState() + " for con " + con); - break; + } + + private static final Logger LOGGER = LoggerFactory + .getLogger(PostgreSQLBackendConnection.class); + private static final int RESULT_STATUS_INIT = 0; + + private byte packetId = 1; + + /***** + * 每个后台响应有唯一的连接 + */ + private final PostgreSQLBackendConnection source; + + /** + * 响应数据 + */ + private volatile SelectResponse response = null; + + /** + * 响应状态 + */ + private int resultStatus; + + public PostgreSQLBackendConnectionHandler(PostgreSQLBackendConnection source) { + this.source = source; + } + /*** + * 进行连接处理 + * + * @param con + * @param buf + * @param start + * @param readedLength + */ + private void doConnecting(PostgreSQLBackendConnection con, ByteBuffer buf, + int start, int readedLength) { + try { + List packets = PacketUtils.parsePacket(buf, 0, + readedLength); + LOGGER.debug(JSON.toJSONString(packets)); + if (!packets.isEmpty() + && packets.get(0) instanceof AuthenticationPacket) { + // pg认证信息 + AuthenticationPacket packet = (AuthenticationPacket) packets + .get(0); + AuthType aut = packet.getAuthType(); + if (aut != AuthType.Ok) { + PasswordMessage pak = new PasswordMessage( + con.getUser(), con.getPassword(), aut, + ((AuthenticationPacket) packet).getSalt()); + + ByteBuffer buffer = con.allocate(); //allocate(pak.getLength() + 1); + pak.write(buffer); + + con.write(buffer); + } else {// 登入成功了.... + + for (int i = 1; i < packets.size(); i++) { + PostgreSQLPacket _p = packets.get(i); + if (_p instanceof BackendKeyData) { + con.setServerSecretKey(((BackendKeyData) _p) + .getSecretKey()); + } + } + LOGGER.debug("SUCCESS Connected TO PostgreSQL , con id is {}",con.getId()); + con.setState(BackendConnectionState.connected); + con.getResponseHandler().connectionAcquired(con);// 连接已经可以用来 + + } + + + } + + } catch (IOException e) { + LOGGER.error("error",e); } } @@ -99,36 +156,42 @@ public void handle(PostgreSQLBackendConnection con, ByteBuffer buf, int start, i * @param start * @param readedLength */ - private void doHandleBusinessMsg(PostgreSQLBackendConnection con, ByteBuffer buf, int start, int readedLength) { + private void doHandleBusinessMsg(PostgreSQLBackendConnection con, + ByteBuffer buf, int start, int readedLength) { try { - List packets = PacketUtils.parsePacket(buf, 0, readedLength); - if(packets== null || packets.isEmpty()){ - throw new RuntimeException("数据包解析出错"); + List packets = PacketUtils.parsePacket(buf, 0, + readedLength); + if (packets == null || packets.isEmpty()) { + return ; + //throw new RuntimeException("数据包解析出错"); } - SelectResponse response = null; - for(PostgreSQLPacket packet: packets){ - if(packet instanceof ErrorResponse){ - doProcessErrorResponse(con,(ErrorResponse)packet); - }else if(packet instanceof RowDescription){ + + for (PostgreSQLPacket packet : packets) { + if (packet instanceof ErrorResponse) { + doProcessErrorResponse(con, (ErrorResponse) packet); + } else if (packet instanceof RowDescription) { response = new SelectResponse((RowDescription) packet); - }else if(packet instanceof DataRow){ - response.addDataRow((DataRow)packet); - }else if(packet instanceof ParameterStatus){ - doProcessParameterStatus(con,(ParameterStatus)packet); - }else if(packet instanceof CommandComplete){ - doProcessCommandComplete(con, (CommandComplete) packet,response); - }else if(packet instanceof NoticeResponse){ - doProcessNoticeResponse(con, (NoticeResponse)packet); - }else if(packet instanceof ReadyForQuery){ + } else if (packet instanceof DataRow) { + response.addDataRow((DataRow) packet); + } else if (packet instanceof ParameterStatus) { + doProcessParameterStatus(con, (ParameterStatus) packet); + } else if (packet instanceof CommandComplete) { + doProcessCommandComplete(con, (CommandComplete) packet, + response); + } else if (packet instanceof NoticeResponse) { + doProcessNoticeResponse(con, (NoticeResponse) packet); + } else if (packet instanceof ReadyForQuery) { doProcessReadyForQuery(con, (ReadyForQuery) packet); - }else if(packet instanceof NotificationResponse){ - doProcessNotificationResponse(con,(NotificationResponse)packet); - }else if(packet instanceof CopyInResponse){ - doProcessCopyInResponse(con,(CopyInResponse)packet); - }else if(packet instanceof CopyOutResponse){ - doProcessCopyOutResponse(con,(CopyOutResponse)packet); - }else if(packet instanceof EmptyQueryResponse){ - doProcessEmptyQueryResponse(con,(EmptyQueryResponse)packet); + } else if (packet instanceof NotificationResponse) { + doProcessNotificationResponse(con, + (NotificationResponse) packet); + } else if (packet instanceof CopyInResponse) { + doProcessCopyInResponse(con, (CopyInResponse) packet); + } else if (packet instanceof CopyOutResponse) { + doProcessCopyOutResponse(con, (CopyOutResponse) packet); + } else if (packet instanceof EmptyQueryResponse) { + doProcessEmptyQueryResponse(con, + (EmptyQueryResponse) packet); } } } catch (Exception e) { @@ -141,109 +204,31 @@ private void doHandleBusinessMsg(PostgreSQLBackendConnection con, ByteBuffer buf if (respHand != null) { respHand.errorResponse(err.writeToBytes(), con); } else { - System.err.println("respHand 不为空"); + LOGGER.error("{},respHand 为空",this); } } } - private void doProcessEmptyQueryResponse(PostgreSQLBackendConnection con,EmptyQueryResponse packet) { - // TODO(现阶段无空白sql) - } - private void doProcessCopyOutResponse(PostgreSQLBackendConnection con,CopyOutResponse packet) { - // TODO(复制数据暂时不需要) - } - - private void doProcessCopyInResponse(PostgreSQLBackendConnection con,CopyInResponse packet) { - // TODO(复制数据暂时不需要) - } - - private void doProcessNotificationResponse(PostgreSQLBackendConnection con,NotificationResponse notificationResponse) { - // TODO(后台参数改变通知) - } - - private void doProcessParameterStatus(PostgreSQLBackendConnection con,ParameterStatus parameterStatus) { - // TODO(设置参数响应) - } - - /** - * 后台已经完成了. - * - * @param con - * @param packet - */ - private void doProcessReadyForQuery(PostgreSQLBackendConnection con, ReadyForQuery readyForQuery) { - if(con.isInTransaction() != (readyForQuery.getState() == TransactionState.IN)){//设置连接的后台事物状态 - con.setInTransaction((readyForQuery.getState() == TransactionState.IN)); - } - } - - /****** - * 执行成功但是又警告信息 - * - * @param con - * @param packet - */ - private void doProcessNoticeResponse(PostgreSQLBackendConnection con, NoticeResponse noticeResponse) { - //TODO (通知提醒信息) - } - - /*** - * 处理查询出错数据包 - * - * @param con - * @param errMg - */ - private void doProcessErrorResponse(PostgreSQLBackendConnection con, ErrorResponse errorResponse) { - LOGGER.debug("查询出错了!"); - ErrorPacket err = new ErrorPacket(); - err.packetId = ++packetId; - err.message = errorResponse.getErrMsg().trim().replaceAll("\0", " ").getBytes(); - err.errno = ErrorCode.ER_UNKNOWN_ERROR; - con.getResponseHandler().errorResponse(err.writeToBytes(), con); - - } - - /*** - * 数据操作语言 - * - * @param con - * @param commandComplete + /*************** + * 处理简单查询结果 ,每一个查询都是一件 CommandComplete 为结束 + * @param con PostgreSQL 后端连接 * @param response - */ - private void doProcessCommandComplete(PostgreSQLBackendConnection con, CommandComplete commandComplete, SelectResponse response) { - if(commandComplete.isSelectComplete()){ - if(response == null){ - throw new RuntimeException("the select proess err ,the SelectResponse is empty"); - } - doProcessBusinessQuery(con, response ,commandComplete); - }else{ - OkPacket okPck = new OkPacket(); - okPck.affectedRows = 0; - okPck.insertId = 0; - okPck.packetId = ++packetId; - okPck.message = commandComplete.getCommandResponse().trim().getBytes(); - con.getResponseHandler().okResponse(okPck.writeToBytes(), con); - } - } - - - - /***** - * 处理简单查询 - * - * @param con - * @param packets - */ - private void doProcessBusinessQuery(PostgreSQLBackendConnection con, SelectResponse response,CommandComplete commandComplete) { + * @param commandComplete + */ + private void doProcessBusinessQuery(PostgreSQLBackendConnection con, + SelectResponse response, CommandComplete commandComplete) { RowDescription rowHd = response.getDescription(); - List fieldPks = PgPacketApaterUtils.rowDescConvertFieldPacket(rowHd); + List fieldPks = PgPacketApaterUtils + .rowDescConvertFieldPacket(rowHd); List rowDatas = new ArrayList<>(); for (DataRow dataRow : response.getDataRows()) { - rowDatas.add(PgPacketApaterUtils.rowDataConvertRowDataPacket(dataRow)); + rowDatas.add(PgPacketApaterUtils + .rowDataConvertRowDataPacket(dataRow)); } - BufferArray bufferArray = NetSystem.getInstance().getBufferPool().allocateArray(); + BufferArray bufferArray = MycatServer.getInstance().getBufferPool() + .allocateArray(); ResultSetHeaderPacket headerPkg = new ResultSetHeaderPacket(); headerPkg.fieldCount = fieldPks.size(); headerPkg.packetId = ++packetId; @@ -254,7 +239,8 @@ private void doProcessBusinessQuery(PostgreSQLBackendConnection con, SelectRespo List fields = new ArrayList(fieldPks.size()); Iterator itor = fieldPks.iterator(); while (itor.hasNext()) { - bufferArray = NetSystem.getInstance().getBufferPool().allocateArray(); + bufferArray = MycatServer.getInstance().getBufferPool() + .allocateArray(); FieldPacket curField = itor.next(); curField.packetId = ++packetId; curField.write(bufferArray); @@ -263,7 +249,7 @@ private void doProcessBusinessQuery(PostgreSQLBackendConnection con, SelectRespo itor.remove(); } - bufferArray = NetSystem.getInstance().getBufferPool().allocateArray(); + bufferArray = MycatServer.getInstance().getBufferPool().allocateArray(); EOFPacket eofPckg = new EOFPacket(); eofPckg.packetId = ++packetId; eofPckg.write(bufferArray); @@ -275,7 +261,8 @@ private void doProcessBusinessQuery(PostgreSQLBackendConnection con, SelectRespo } // output row for (RowDataPacket curRow : rowDatas) { - bufferArray = NetSystem.getInstance().getBufferPool().allocateArray(); + bufferArray = MycatServer.getInstance().getBufferPool() + .allocateArray(); curRow.packetId = ++packetId; curRow.write(bufferArray); byte[] row = bufferArray.writeToByteArrayAndRecycle(); @@ -283,7 +270,7 @@ private void doProcessBusinessQuery(PostgreSQLBackendConnection con, SelectRespo } // end row - bufferArray = NetSystem.getInstance().getBufferPool().allocateArray(); + bufferArray = MycatServer.getInstance().getBufferPool().allocateArray(); eofPckg = new EOFPacket(); eofPckg.packetId = ++packetId; eofPckg.write(bufferArray); @@ -295,74 +282,144 @@ private void doProcessBusinessQuery(PostgreSQLBackendConnection con, SelectRespo } } + private void doProcessCommandComplete(PostgreSQLBackendConnection con, + CommandComplete commandComplete, SelectResponse response) { + if (commandComplete.isSelectComplete()) { + if (response == null) { + throw new RuntimeException( + "the select proess err ,the SelectResponse is empty"); + } + doProcessBusinessQuery(con, response, commandComplete); + } else { + OkPacket okPck = new OkPacket(); + + okPck.affectedRows =commandComplete.getAffectedRows(); + okPck.insertId =commandComplete.getInsertId(); + okPck.packetId = ++packetId; + okPck.message = commandComplete.getCommandResponse().getBytes(); + con.getResponseHandler().okResponse(okPck.writeToBytes(), con); + } + } + + private void doProcessCopyInResponse(PostgreSQLBackendConnection con, + CopyInResponse packet) { + // TODO(复制数据暂时不需要) + } + + private void doProcessCopyOutResponse(PostgreSQLBackendConnection con, + CopyOutResponse packet) { + // TODO(复制数据暂时不需要) + } + + private void doProcessEmptyQueryResponse(PostgreSQLBackendConnection con, + EmptyQueryResponse packet) { + // TODO(现阶段无空白sql) + } + /*** - * 进行连接处理 + * 处理查询出错数据包 * * @param con - * @param buf - * @param start - * @param readedLength + * @param errorResponse */ - private void doConnecting(PostgreSQLBackendConnection con, ByteBuffer buf, int start, int readedLength) { - try { - List packets = PacketUtils.parsePacket(buf, 0, readedLength); - if (!packets.isEmpty()) { - if (packets.get(0) instanceof AuthenticationPacket) {// pg认证信息 - AuthenticationPacket packet = (AuthenticationPacket) packets.get(0); - AuthType aut = packet.getAuthType(); - if (aut != AuthType.Ok) { - PasswordMessage pak = new PasswordMessage(con.getUser(), con.getPassword(), aut, - ((AuthenticationPacket) packet).getSalt()); - ByteBuffer buffer = ByteBuffer.allocate(pak.getLength() + 1); - pak.write(buffer); - con.write(buffer); - } else {// 登入成功了.... + private void doProcessErrorResponse(PostgreSQLBackendConnection con, + ErrorResponse errorResponse) { + LOGGER.debug("查询出错了!"); + ErrorPacket err = new ErrorPacket(); + err.packetId = ++packetId; + err.message = errorResponse.getErrMsg().trim().replaceAll("\0", " ") + .getBytes(); + err.errno = ErrorCode.ER_UNKNOWN_ERROR; + con.getResponseHandler().errorResponse(err.writeToBytes(), con); - for (int i = 1; i < packets.size(); i++) { - PostgreSQLPacket _p = packets.get(i); - if (_p instanceof BackendKeyData) { - con.setServerSecretKey(((BackendKeyData) _p).getSecretKey()); - } - } - con.setState(State.connected); - con.getResponseHandler().connectionAcquired(con);// 连接已经可以用来 - } - LOGGER.debug(JSON.toJSONString(packets)); - } - } + } - } catch (IOException e) { - e.printStackTrace(); - } + /****** + * 执行成功但是又警告信息 + * + * @param con + * @param noticeResponse + */ + private void doProcessNoticeResponse(PostgreSQLBackendConnection con, + NoticeResponse noticeResponse) { + // TODO (通知提醒信息) } - - static class SelectResponse{ - private RowDescription description; - - private List dataRows = new ArrayList<>(); - public List getDataRows() { - return dataRows; - } + private void doProcessNotificationResponse(PostgreSQLBackendConnection con, + NotificationResponse notificationResponse) { + // TODO(后台参数改变通知) + } - public void addDataRow(DataRow packet) { - this.dataRows.add(packet); - } + private void doProcessParameterStatus(PostgreSQLBackendConnection con, + ParameterStatus parameterStatus) { + // TODO(设置参数响应) + } - public void setDataRows(List dataRows) { - this.dataRows = dataRows; - } - public RowDescription getDescription() { - return description; + /**** + * PostgreSQL 已经处理完成一个任务等等下一个任务 + * @param con + * @param readyForQuery + */ + private void doProcessReadyForQuery(PostgreSQLBackendConnection con, + ReadyForQuery readyForQuery) { + if (con.isInTransaction() != (readyForQuery.getState() == TransactionState.IN)) {// 设置连接的后台事物状态 + con.setInTransaction((readyForQuery.getState() == TransactionState.IN)); } - - - public SelectResponse(RowDescription description) { - this.description = description; + } + + @Override + public void handle(byte[] data) { + offerData(data, source.getProcessor().getExecutor()); + } + + /* + * 真正处理 数据库发过来的数据 + * + * + * + * @see io.mycat.net.handler.BackendAsyncHandler#handleData(byte[]) + */ + @Override + protected void handleData(byte[] data) { + ByteBuffer theBuf = null; + try { + theBuf = source.allocate(); + theBuf.put(data); + switch (source.getState()) { + case connecting: { + doConnecting(source, theBuf, 0, data.length); + return; + } + case connected: { + try { + doHandleBusinessMsg(source, theBuf , 0, + data.length); + } catch (Exception e) { + LOGGER.warn("caught err of con " + source, e); + } + return; + } + + default: + LOGGER.warn("not handled connecton state err " + + source.getState() + " for con " + source); + break; + + } + } catch (Exception e) { + LOGGER.error("读取数据包出错",e); + }finally{ + if(theBuf!=null){ + source.recycle(theBuf); + } } - - + } + + @Override + protected void offerDataError() { + resultStatus = RESULT_STATUS_INIT; + throw new RuntimeException("offer data error!"); } } diff --git a/src/main/java/io/mycat/backend/postgresql/PostgreSQLDataSource.java b/src/main/java/io/mycat/backend/postgresql/PostgreSQLDataSource.java index f1aa4e13f..9c1033881 100644 --- a/src/main/java/io/mycat/backend/postgresql/PostgreSQLDataSource.java +++ b/src/main/java/io/mycat/backend/postgresql/PostgreSQLDataSource.java @@ -1,18 +1,24 @@ package io.mycat.backend.postgresql; -import java.io.IOException; - -import io.mycat.backend.PhysicalDatasource; +import io.mycat.backend.datasource.PhysicalDatasource; import io.mycat.backend.heartbeat.DBHeartbeat; +import io.mycat.backend.mysql.nio.handler.ResponseHandler; import io.mycat.backend.postgresql.heartbeat.PostgreSQLHeartbeat; -import io.mycat.server.config.node.DBHostConfig; -import io.mycat.server.config.node.DataHostConfig; -import io.mycat.server.executors.ResponseHandler; +import io.mycat.config.model.DBHostConfig; +import io.mycat.config.model.DataHostConfig; + +import java.io.IOException; +/******************* + * PostgreSQL 后端数据源实现 + * @author Coollf + * + */ public class PostgreSQLDataSource extends PhysicalDatasource { private final PostgreSQLBackendConnectionFactory factory; - - public PostgreSQLDataSource(DBHostConfig config, DataHostConfig hostConfig, boolean isReadNode) { + + public PostgreSQLDataSource(DBHostConfig config, DataHostConfig hostConfig, + boolean isReadNode) { super(config, hostConfig, isReadNode); this.factory = new PostgreSQLBackendConnectionFactory(); } @@ -23,8 +29,15 @@ public DBHeartbeat createHeartBeat() { } @Override - public void createNewConnection(ResponseHandler handler, String schema) throws IOException { - factory.make(this, handler,schema); + public void createNewConnection(ResponseHandler handler, String schema) + throws IOException { + factory.make(this, handler, schema); + } + + @Override + public boolean testConnection(String schema) throws IOException { + // TODO Auto-generated method stub + return true; } } diff --git a/src/main/java/io/mycat/backend/postgresql/heartbeat/PostgreSQLDetector.java b/src/main/java/io/mycat/backend/postgresql/heartbeat/PostgreSQLDetector.java index 5300f30d0..7617e1b26 100644 --- a/src/main/java/io/mycat/backend/postgresql/heartbeat/PostgreSQLDetector.java +++ b/src/main/java/io/mycat/backend/postgresql/heartbeat/PostgreSQLDetector.java @@ -1,24 +1,25 @@ package io.mycat.backend.postgresql.heartbeat; -import java.util.Map; -import java.util.concurrent.atomic.AtomicBoolean; - -import io.mycat.backend.PhysicalDBPool; -import io.mycat.backend.PhysicalDatasource; +import io.mycat.backend.datasource.PhysicalDBPool; +import io.mycat.backend.datasource.PhysicalDatasource; import io.mycat.backend.heartbeat.DBHeartbeat; import io.mycat.backend.heartbeat.MySQLHeartbeat; import io.mycat.backend.postgresql.PostgreSQLDataSource; -import io.mycat.server.config.node.DataHostConfig; +import io.mycat.config.model.DataHostConfig; import io.mycat.sqlengine.OneRawSQLQueryResultHandler; import io.mycat.sqlengine.SQLJob; import io.mycat.sqlengine.SQLQueryResult; import io.mycat.sqlengine.SQLQueryResultListener; import io.mycat.util.TimeUtil; -public class PostgreSQLDetector implements SQLQueryResultListener>> { +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; + +public class PostgreSQLDetector implements + SQLQueryResultListener>> { - private static final String[] MYSQL_SLAVE_STAUTS_COLMS = new String[] { "Seconds_Behind_Master", "Slave_IO_Running", - "Slave_SQL_Running" }; + private static final String[] MYSQL_SLAVE_STAUTS_COLMS = new String[] { + "Seconds_Behind_Master", "Slave_IO_Running", "Slave_SQL_Running" }; private PostgreSQLHeartbeat heartbeat; @@ -39,45 +40,41 @@ public PostgreSQLDetector(PostgreSQLHeartbeat heartbeat) { @Override public void onResult(SQLQueryResult> result) { - if (result.isSuccess()) { - int balance = heartbeat.getSource().getDbPool().getBalance(); - PhysicalDatasource source = heartbeat.getSource(); - Map resultResult = result.getResult(); - if (source.getHostConfig().isShowSlaveSql() - &&(source.getHostConfig().getSwitchType() == DataHostConfig.SYN_STATUS_SWITCH_DS || - PhysicalDBPool.BALANCE_NONE!=balance ) - ) - { - - String Slave_IO_Running =resultResult!=null? resultResult.get( - "Slave_IO_Running"):null; - String Slave_SQL_Running = resultResult!=null?resultResult.get( - "Slave_SQL_Running"):null; - if (Slave_IO_Running != null - && Slave_IO_Running.equals(Slave_SQL_Running) - && Slave_SQL_Running.equals("Yes")) { - heartbeat.setDbSynStatus(DBHeartbeat.DB_SYN_NORMAL); - String Seconds_Behind_Master = resultResult.get( - "Seconds_Behind_Master"); - if (null != Seconds_Behind_Master - && !"".equals(Seconds_Behind_Master)) { - heartbeat.setSlaveBehindMaster(Integer - .valueOf(Seconds_Behind_Master)); - } - } else if(source.isSalveOrRead()) - { - MySQLHeartbeat.LOGGER - .warn("found MySQL master/slave Replication err !!! " - + heartbeat.getSource().getConfig()); - heartbeat.setDbSynStatus(DBHeartbeat.DB_SYN_ERROR); - } - - } - heartbeat.setResult(PostgreSQLHeartbeat.OK_STATUS, this, null); - } else { - heartbeat.setResult(PostgreSQLHeartbeat.ERROR_STATUS, this, null); - } - lasstReveivedQryTime = System.currentTimeMillis(); + if (result.isSuccess()) { + int balance = heartbeat.getSource().getDbPool().getBalance(); + PhysicalDatasource source = heartbeat.getSource(); + Map resultResult = result.getResult(); + if (source.getHostConfig().isShowSlaveSql() + && (source.getHostConfig().getSwitchType() == DataHostConfig.SYN_STATUS_SWITCH_DS || PhysicalDBPool.BALANCE_NONE != balance)) { + + String Slave_IO_Running = resultResult != null ? resultResult + .get("Slave_IO_Running") : null; + String Slave_SQL_Running = resultResult != null ? resultResult + .get("Slave_SQL_Running") : null; + if (Slave_IO_Running != null + && Slave_IO_Running.equals(Slave_SQL_Running) + && Slave_SQL_Running.equals("Yes")) { + heartbeat.setDbSynStatus(DBHeartbeat.DB_SYN_NORMAL); + String Seconds_Behind_Master = resultResult + .get("Seconds_Behind_Master"); + if (null != Seconds_Behind_Master + && !"".equals(Seconds_Behind_Master)) { + heartbeat.setSlaveBehindMaster(Integer + .valueOf(Seconds_Behind_Master)); + } + } else if (source.isSalveOrRead()) { + MySQLHeartbeat.LOGGER + .warn("found MySQL master/slave Replication err !!! " + + heartbeat.getSource().getConfig()); + heartbeat.setDbSynStatus(DBHeartbeat.DB_SYN_ERROR); + } + + } + heartbeat.setResult(PostgreSQLHeartbeat.OK_STATUS, this, null); + } else { + heartbeat.setResult(PostgreSQLHeartbeat.ERROR_STATUS, this, null); + } + lasstReveivedQryTime = System.currentTimeMillis(); } public PostgreSQLHeartbeat getHeartbeat() { @@ -96,8 +93,10 @@ public void heartbeat() { if (heartbeat.getSource().getHostConfig().isShowSlaveSql()) { fetchColms = MYSQL_SLAVE_STAUTS_COLMS; } - OneRawSQLQueryResultHandler resultHandler = new OneRawSQLQueryResultHandler(fetchColms, this); - sqlJob = new SQLJob(heartbeat.getHeartbeatSQL(), databaseName, resultHandler, ds); + OneRawSQLQueryResultHandler resultHandler = new OneRawSQLQueryResultHandler( + fetchColms, this); + sqlJob = new SQLJob(heartbeat.getHeartbeatSQL(), databaseName, + resultHandler, ds); sqlJob.run(); } @@ -108,19 +107,19 @@ public void close(String msg) { sqlJob = null; } } - - public boolean isHeartbeatTimeout() { - return TimeUtil.currentTimeMillis() > Math.max(lastSendQryTime, - lasstReveivedQryTime) + heartbeatTimeout; - } - - public long getLastSendQryTime() { - return lastSendQryTime; - } - - public long getLasstReveivedQryTime() { - return lasstReveivedQryTime; - } + + public boolean isHeartbeatTimeout() { + return TimeUtil.currentTimeMillis() > Math.max(lastSendQryTime, + lasstReveivedQryTime) + heartbeatTimeout; + } + + public long getLastSendQryTime() { + return lastSendQryTime; + } + + public long getLasstReveivedQryTime() { + return lasstReveivedQryTime; + } public void quit() { } diff --git a/src/main/java/io/mycat/backend/postgresql/heartbeat/PostgreSQLHeartbeat.java b/src/main/java/io/mycat/backend/postgresql/heartbeat/PostgreSQLHeartbeat.java index ba0616f18..805f880c7 100644 --- a/src/main/java/io/mycat/backend/postgresql/heartbeat/PostgreSQLHeartbeat.java +++ b/src/main/java/io/mycat/backend/postgresql/heartbeat/PostgreSQLHeartbeat.java @@ -1,261 +1,252 @@ -package io.mycat.backend.postgresql.heartbeat; - -import java.text.SimpleDateFormat; -import java.util.Date; -import java.util.concurrent.locks.ReentrantLock; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import io.mycat.backend.PhysicalDBPool; -import io.mycat.backend.PhysicalDatasource; -import io.mycat.backend.heartbeat.DBHeartbeat; -import io.mycat.backend.postgresql.PostgreSQLDataSource; -import io.mycat.server.config.node.DataHostConfig; - -public class PostgreSQLHeartbeat extends DBHeartbeat { - - private static final int MAX_RETRY_COUNT = 5; - - public static final Logger LOGGER = LoggerFactory.getLogger(PostgreSQLHeartbeat.class); - - private PostgreSQLDataSource source; - - private ReentrantLock lock; - - private int maxRetryCount; - - private PostgreSQLDetector detector; - - - public PostgreSQLHeartbeat(PostgreSQLDataSource source) { - this.source = source; - this.lock = new ReentrantLock(false); - this.maxRetryCount = MAX_RETRY_COUNT; - this.status = INIT_STATUS; - this.heartbeatSQL = source.getHostConfig().getHeartbeatSQL(); - } - - @Override - public void start() { - final ReentrantLock lock = this.lock; - lock.lock(); - try { - isStop.compareAndSet(true, false); - super.status = DBHeartbeat.OK_STATUS; - } finally { - lock.unlock(); - } - } - - @Override - public void stop() { - final ReentrantLock lock = this.lock; - lock.lock(); - try { - if (isStop.compareAndSet(false, true)) { - if (isChecking.get()) { - // nothing - } else { - PostgreSQLDetector detector = this.detector; - if (detector != null) { - detector.quit(); - isChecking.set(false); - } - } - } - } finally { - lock.unlock(); - } - } - - @Override - public String getLastActiveTime() { - PostgreSQLDetector detector = this.detector; - if (detector == null) { - return null; - } - long t = detector.getLasstReveivedQryTime(); - SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); - return sdf.format(new Date(t)); - } - - @Override - public long getTimeout() { - PostgreSQLDetector detector = this.detector; - if (detector == null) { - return -1L; - } - return detector.getHeartbeatTimeout(); - } - - @Override - public void heartbeat() { - final ReentrantLock lock = this.lock; - lock.lock(); - try { - if (isChecking.compareAndSet(false, true)) { - PostgreSQLDetector detector = this.detector; - if (detector == null || detector.isQuit()) { - try { - detector = new PostgreSQLDetector(this); - detector.heartbeat(); - } catch (Exception e) { - LOGGER.warn(source.getConfig().toString(), e); - setResult(ERROR_STATUS, detector, null); - return; - } - this.detector = detector; - } else { - detector.heartbeat(); - } - } else { - PostgreSQLDetector detector = this.detector; - if (detector != null) { - if (detector.isQuit()) { - isChecking.compareAndSet(true, false); - } else if (detector.isHeartbeatTimeout()) { - setResult(TIMEOUT_STATUS, detector, null); - } - } - } - } finally { - lock.unlock(); - } - } - - public PostgreSQLDataSource getSource() { - return source; - } - - public void setResult(int result, PostgreSQLDetector detector, Object attr) { - this.isChecking.set(false); - switch (result) { - case OK_STATUS: - setOk(detector); - break; - case ERROR_STATUS: - setError(detector); - break; - case TIMEOUT_STATUS: - setTimeout(detector); - break; - } - if (this.status != OK_STATUS) { - switchSourceIfNeed("heartbeat error"); - } - } - - private void switchSourceIfNeed(String reason) { - int switchType = source.getHostConfig().getSwitchType(); - if (switchType == DataHostConfig.NOT_SWITCH_DS) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("not switch datasource ,for switchType is " - + DataHostConfig.NOT_SWITCH_DS); - return; - } - return; - } - PhysicalDBPool pool = this.source.getDbPool(); - int curDatasourceHB = pool.getSource().getHeartbeat().getStatus(); - // read node can't switch ,only write node can switch - if (pool.getWriteType() == PhysicalDBPool.WRITE_ONLYONE_NODE - && !source.isReadNode() - && curDatasourceHB != DBHeartbeat.OK_STATUS - && pool.getSources().length > 1) { - synchronized (pool) { - // try to see if need switch datasource - curDatasourceHB = pool.getSource().getHeartbeat().getStatus(); - if (curDatasourceHB != DBHeartbeat.INIT_STATUS - && curDatasourceHB != DBHeartbeat.OK_STATUS) { - int curIndex = pool.getActivedIndex(); - int nextId = pool.next(curIndex); - PhysicalDatasource[] allWriteNodes = pool.getSources(); - while (true) { - if (nextId == curIndex) { - break; - } - PhysicalDatasource theSource = allWriteNodes[nextId]; - DBHeartbeat theSourceHB = theSource.getHeartbeat(); - int theSourceHBStatus = theSourceHB.getStatus(); - if (theSourceHBStatus == DBHeartbeat.OK_STATUS) { - if (switchType == DataHostConfig.SYN_STATUS_SWITCH_DS) { - if (Integer.valueOf(0).equals( - theSourceHB.getSlaveBehindMaster())) { - LOGGER.info("try to switch datasource ,slave is synchronized to master " - + theSource.getConfig()); - pool.switchSource(nextId, true, reason); - break; - } else { - LOGGER.warn("ignored datasource ,slave is not synchronized to master , slave behind master :" - + theSourceHB - .getSlaveBehindMaster() - + " " + theSource.getConfig()); - } - } else { - // normal switch - LOGGER.info("try to switch datasource ,not checked slave synchronize status " - + theSource.getConfig()); - pool.switchSource(nextId, true, reason); - break; - } - - } - nextId = pool.next(nextId); - } - - } - } - } - } - - private void setTimeout(PostgreSQLDetector detector) { - this.isChecking.set(false); - status = DBHeartbeat.TIMEOUT_STATUS; - } - - private void setError(PostgreSQLDetector detector) { - // should continues check error status - if (++errorCount < maxRetryCount) { - - if (detector != null && !detector.isQuit()) { - heartbeat(); // error count not enough, heart beat again - } - //return; - } else - { - if (detector != null ) { - detector.quit(); - } - - this.status = ERROR_STATUS; - this.errorCount = 0; - - } - } - - private void setOk(PostgreSQLDetector detector) { - recorder.set(detector.getLasstReveivedQryTime() - - detector.getLastSendQryTime()); - switch (status) { - case DBHeartbeat.TIMEOUT_STATUS: - this.status = DBHeartbeat.INIT_STATUS; - this.errorCount = 0; - if (isStop.get()) { - detector.quit(); - } else { - heartbeat();// timeout, heart beat again - } - break; - case DBHeartbeat.OK_STATUS: - break; - default: - this.status = OK_STATUS; - this.errorCount = 0; - } - if (isStop.get()) { - detector.quit(); - } - } - -} +package io.mycat.backend.postgresql.heartbeat; + +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.concurrent.locks.ReentrantLock; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.backend.datasource.PhysicalDBPool; +import io.mycat.backend.datasource.PhysicalDatasource; +import io.mycat.backend.heartbeat.DBHeartbeat; +import io.mycat.backend.postgresql.PostgreSQLDataSource; +import io.mycat.config.model.DataHostConfig; + +public class PostgreSQLHeartbeat extends DBHeartbeat { + + private static final int MAX_RETRY_COUNT = 5; + + public static final Logger LOGGER = LoggerFactory.getLogger(PostgreSQLHeartbeat.class); + + private PostgreSQLDataSource source; + + private ReentrantLock lock; + + private int maxRetryCount; + + private PostgreSQLDetector detector; + + public PostgreSQLHeartbeat(PostgreSQLDataSource source) { + this.source = source; + this.lock = new ReentrantLock(false); + this.maxRetryCount = MAX_RETRY_COUNT; + this.status = INIT_STATUS; + this.heartbeatSQL = source.getHostConfig().getHearbeatSQL(); + } + + @Override + public void start() { + final ReentrantLock lock = this.lock; + lock.lock(); + try { + isStop.compareAndSet(true, false); + super.status = DBHeartbeat.OK_STATUS; + } finally { + lock.unlock(); + } + } + + @Override + public void stop() { + final ReentrantLock lock = this.lock; + lock.lock(); + try { + if (isStop.compareAndSet(false, true)) { + if (isChecking.get()) { + // nothing + } else { + PostgreSQLDetector detector = this.detector; + if (detector != null) { + detector.quit(); + isChecking.set(false); + } + } + } + } finally { + lock.unlock(); + } + } + + @Override + public String getLastActiveTime() { + PostgreSQLDetector detector = this.detector; + if (detector == null) { + return null; + } + long t = detector.getLasstReveivedQryTime(); + SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); + return sdf.format(new Date(t)); + } + + @Override + public long getTimeout() { + PostgreSQLDetector detector = this.detector; + if (detector == null) { + return -1L; + } + return detector.getHeartbeatTimeout(); + } + + @Override + public void heartbeat() { + final ReentrantLock lock = this.lock; + lock.lock(); + try { + if (isChecking.compareAndSet(false, true)) { + PostgreSQLDetector detector = this.detector; + if (detector == null || detector.isQuit()) { + try { + detector = new PostgreSQLDetector(this); + detector.heartbeat(); + } catch (Exception e) { + LOGGER.warn(source.getConfig().toString(), e); + setResult(ERROR_STATUS, detector, null); + return; + } + this.detector = detector; + } else { + detector.heartbeat(); + } + } else { + PostgreSQLDetector detector = this.detector; + if (detector != null) { + if (detector.isQuit()) { + isChecking.compareAndSet(true, false); + } else if (detector.isHeartbeatTimeout()) { + setResult(TIMEOUT_STATUS, detector, null); + } + } + } + } finally { + lock.unlock(); + } + } + + public PostgreSQLDataSource getSource() { + return source; + } + + public void setResult(int result, PostgreSQLDetector detector, Object attr) { + this.isChecking.set(false); + switch (result) { + case OK_STATUS: + setOk(detector); + break; + case ERROR_STATUS: + setError(detector); + break; + case TIMEOUT_STATUS: + setTimeout(detector); + break; + } + if (this.status != OK_STATUS) { + switchSourceIfNeed("heartbeat error"); + } + } + + private void switchSourceIfNeed(String reason) { + int switchType = source.getHostConfig().getSwitchType(); + if (switchType == DataHostConfig.NOT_SWITCH_DS) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("not switch datasource ,for switchType is " + DataHostConfig.NOT_SWITCH_DS); + return; + } + return; + } + PhysicalDBPool pool = this.source.getDbPool(); + int curDatasourceHB = pool.getSource().getHeartbeat().getStatus(); + // read node can't switch ,only write node can switch + if (pool.getWriteType() == PhysicalDBPool.WRITE_ONLYONE_NODE && !source.isReadNode() + && curDatasourceHB != DBHeartbeat.OK_STATUS && pool.getSources().length > 1) { + synchronized (pool) { + // try to see if need switch datasource + curDatasourceHB = pool.getSource().getHeartbeat().getStatus(); + if (curDatasourceHB != DBHeartbeat.INIT_STATUS && curDatasourceHB != DBHeartbeat.OK_STATUS) { + int curIndex = pool.getActivedIndex(); + int nextId = pool.next(curIndex); + PhysicalDatasource[] allWriteNodes = pool.getSources(); + while (true) { + if (nextId == curIndex) { + break; + } + PhysicalDatasource theSource = allWriteNodes[nextId]; + DBHeartbeat theSourceHB = theSource.getHeartbeat(); + int theSourceHBStatus = theSourceHB.getStatus(); + if (theSourceHBStatus == DBHeartbeat.OK_STATUS) { + if (switchType == DataHostConfig.SYN_STATUS_SWITCH_DS) { + if (Integer.valueOf(0).equals(theSourceHB.getSlaveBehindMaster())) { + LOGGER.info("try to switch datasource ,slave is synchronized to master " + + theSource.getConfig()); + pool.switchSource(nextId, true, reason); + break; + } else { + LOGGER.warn( + "ignored datasource ,slave is not synchronized to master , slave behind master :" + + theSourceHB.getSlaveBehindMaster() + " " + theSource.getConfig()); + } + } else { + // normal switch + LOGGER.info("try to switch datasource ,not checked slave synchronize status " + + theSource.getConfig()); + pool.switchSource(nextId, true, reason); + break; + } + + } + nextId = pool.next(nextId); + } + + } + } + } + } + + private void setTimeout(PostgreSQLDetector detector) { + this.isChecking.set(false); + status = DBHeartbeat.TIMEOUT_STATUS; + } + + private void setError(PostgreSQLDetector detector) { + // should continues check error status + if (++errorCount < maxRetryCount) { + + if (detector != null && !detector.isQuit()) { + heartbeat(); // error count not enough, heart beat again + } + // return; + } else { + if (detector != null) { + detector.quit(); + } + + this.status = ERROR_STATUS; + this.errorCount = 0; + + } + } + + private void setOk(PostgreSQLDetector detector) { + recorder.set(detector.getLasstReveivedQryTime() - detector.getLastSendQryTime()); + switch (status) { + case DBHeartbeat.TIMEOUT_STATUS: + this.status = DBHeartbeat.INIT_STATUS; + this.errorCount = 0; + if (isStop.get()) { + detector.quit(); + } else { + heartbeat();// timeout, heart beat again + } + break; + case DBHeartbeat.OK_STATUS: + break; + default: + this.status = OK_STATUS; + this.errorCount = 0; + } + if (isStop.get()) { + detector.quit(); + } + } + +} diff --git a/src/main/java/io/mycat/backend/postgresql/packet/AuthenticationPacket.java b/src/main/java/io/mycat/backend/postgresql/packet/AuthenticationPacket.java index c420de28a..ec545cdf3 100644 --- a/src/main/java/io/mycat/backend/postgresql/packet/AuthenticationPacket.java +++ b/src/main/java/io/mycat/backend/postgresql/packet/AuthenticationPacket.java @@ -1,93 +1,93 @@ -package io.mycat.backend.postgresql.packet; - -import java.nio.ByteBuffer; - -import io.mycat.backend.postgresql.utils.PIOUtils; - -public class AuthenticationPacket extends PostgreSQLPacket { - public static enum AuthType { - Ok(0), KerberosV5(2), CleartextPassword(3), CryptPassword(4), MD5Password(5), SCMCredential(6); - - private int value; - - AuthType(int v) { - this.value = v; - } - - public int getValue() { - return value; - } - - public static AuthType valueOf(int v) { - if (v == Ok.value) { - return Ok; - } - if (v == KerberosV5.value) { - return KerberosV5; - } - if (v == CleartextPassword.value) { - return CleartextPassword; - } - if (v == MD5Password.value) { - return MD5Password; - } - if (v == SCMCredential.value) { - return SCMCredential; - } - - return null; - } - } - - /*** - * 标记 - */ - private char marker = PacketMarker.B_Auth.getValue(); - - /**** - * 数据包长度 - */ - private int length; - - /*** - * 盐粒 - */ - private byte[] salt; - - private AuthType authType; - - public AuthType getAuthType() { - return authType; - } - - @Override - public int getLength() { - return length; - } - - @Override - public char getMarker() { - return marker; - } - - public byte[] getSalt() { - return salt; - } - - public void setSalt(byte[] salt) { - this.salt = salt; - } - - public static AuthenticationPacket parse(ByteBuffer buffer, int offset){ - if (buffer.get(offset) != PacketMarker.B_Auth.getValue()) { - throw new IllegalArgumentException("this packetData not is AuthenticationPacket"); - } - AuthenticationPacket packet = new AuthenticationPacket(); - packet.length = PIOUtils.redInteger4(buffer, offset + 1); - packet.authType = AuthType.valueOf(PIOUtils.redInteger4(buffer, offset + 1 + 4)); - if (packet.authType == AuthType.MD5Password) { - packet.salt = PIOUtils.redByteArray(buffer, offset + 1 + 4 + 4, 4); - } - return packet; - } -} +package io.mycat.backend.postgresql.packet; + +import java.nio.ByteBuffer; + +import io.mycat.backend.postgresql.utils.PIOUtils; + +public class AuthenticationPacket extends PostgreSQLPacket { + public static enum AuthType { + Ok(0), KerberosV5(2), CleartextPassword(3), CryptPassword(4), MD5Password(5), SCMCredential(6); + + private int value; + + AuthType(int v) { + this.value = v; + } + + public int getValue() { + return value; + } + + public static AuthType valueOf(int v) { + if (v == Ok.value) { + return Ok; + } + if (v == KerberosV5.value) { + return KerberosV5; + } + if (v == CleartextPassword.value) { + return CleartextPassword; + } + if (v == MD5Password.value) { + return MD5Password; + } + if (v == SCMCredential.value) { + return SCMCredential; + } + + return null; + } + } + + /*** + * 标记 + */ + private char marker = PacketMarker.B_Auth.getValue(); + + /**** + * 数据包长度 + */ + private int length; + + /*** + * 盐粒 + */ + private byte[] salt; + + private AuthType authType; + + public AuthType getAuthType() { + return authType; + } + + @Override + public int getLength() { + return length; + } + + @Override + public char getMarker() { + return marker; + } + + public byte[] getSalt() { + return salt; + } + + public void setSalt(byte[] salt) { + this.salt = salt; + } + + public static AuthenticationPacket parse(ByteBuffer buffer, int offset){ + if (buffer.get(offset) != PacketMarker.B_Auth.getValue()) { + throw new IllegalArgumentException("this packetData not is AuthenticationPacket"); + } + AuthenticationPacket packet = new AuthenticationPacket(); + packet.length = PIOUtils.redInteger4(buffer, offset + 1); + packet.authType = AuthType.valueOf(PIOUtils.redInteger4(buffer, offset + 1 + 4)); + if (packet.authType == AuthType.MD5Password) { + packet.salt = PIOUtils.redByteArray(buffer, offset + 1 + 4 + 4, 4); + } + return packet; + } +} diff --git a/src/main/java/io/mycat/backend/postgresql/packet/BackendKeyData.java b/src/main/java/io/mycat/backend/postgresql/packet/BackendKeyData.java index 996ce2f03..d2b2b8ba6 100644 --- a/src/main/java/io/mycat/backend/postgresql/packet/BackendKeyData.java +++ b/src/main/java/io/mycat/backend/postgresql/packet/BackendKeyData.java @@ -1,77 +1,77 @@ -package io.mycat.backend.postgresql.packet; - -import java.nio.ByteBuffer; - -/** - * 后端数据包信息 - * - * @author Coollf - * - */ - -// BackendKeyData (B) -// Byte1('K') -// 标识该消息是一个取消键字数据。 如果前端希望能够在稍后发出 CancelRequest 消息, 那么它必须保存这个值。 -// -// Int32(12) -// 以字节记的消息内容的长度,包括长度本身。 -// -// Int32 -// 后端的进程号(PID)。 -// -// Int32 -// 此后端的密钥(secret key )。 -public class BackendKeyData extends PostgreSQLPacket { - /** - * 长度 - */ - private int length; - - /*** - * 进程ID - */ - private int pid; - - /*** - * 此后端的密钥(secret key ) - */ - private int secretKey; - - public int getPid() { - return pid; - } - - public int getSecretKey() { - return secretKey; - } - - @Override - public int getLength() { - return length; - } - - @Override - public char getMarker() { - return PacketMarker.B_BackendKey.getValue(); - } - - /*** - * 解析数据包 - * - * @param buffer - * @param offset - * @return - * @throws IllegalArgumentException - */ - public static BackendKeyData parse(ByteBuffer buffer, int offset) { - if (buffer.get(offset) != PacketMarker.B_BackendKey.getValue()) { - throw new IllegalArgumentException("this packet not is BackendKeyData"); - } - BackendKeyData pac = new BackendKeyData(); - pac.length = buffer.getInt(offset + 1); - pac.pid = buffer.getInt(offset + 1 + 4); - pac.secretKey = buffer.getInt(offset + 1 + 4 + 4); - return pac; - } - -} +package io.mycat.backend.postgresql.packet; + +import java.nio.ByteBuffer; + +/** + * 后端数据包信息 + * + * @author Coollf + * + */ + +// BackendKeyData (B) +// Byte1('K') +// 标识该消息是一个取消键字数据。 如果前端希望能够在稍后发出 CancelRequest 消息, 那么它必须保存这个值。 +// +// Int32(12) +// 以字节记的消息内容的长度,包括长度本身。 +// +// Int32 +// 后端的进程号(PID)。 +// +// Int32 +// 此后端的密钥(secret key )。 +public class BackendKeyData extends PostgreSQLPacket { + /** + * 长度 + */ + private int length; + + /*** + * 进程ID + */ + private int pid; + + /*** + * 此后端的密钥(secret key ) + */ + private int secretKey; + + public int getPid() { + return pid; + } + + public int getSecretKey() { + return secretKey; + } + + @Override + public int getLength() { + return length; + } + + @Override + public char getMarker() { + return PacketMarker.B_BackendKey.getValue(); + } + + /*** + * 解析数据包 + * + * @param buffer + * @param offset + * @return + * @throws IllegalArgumentException + */ + public static BackendKeyData parse(ByteBuffer buffer, int offset) { + if (buffer.get(offset) != PacketMarker.B_BackendKey.getValue()) { + throw new IllegalArgumentException("this packet not is BackendKeyData"); + } + BackendKeyData pac = new BackendKeyData(); + pac.length = buffer.getInt(offset + 1); + pac.pid = buffer.getInt(offset + 1 + 4); + pac.secretKey = buffer.getInt(offset + 1 + 4 + 4); + return pac; + } + +} diff --git a/src/main/java/io/mycat/backend/postgresql/packet/BindComplete.java b/src/main/java/io/mycat/backend/postgresql/packet/BindComplete.java index 63fb93d3e..2ef71eb40 100644 --- a/src/main/java/io/mycat/backend/postgresql/packet/BindComplete.java +++ b/src/main/java/io/mycat/backend/postgresql/packet/BindComplete.java @@ -1,9 +1,9 @@ package io.mycat.backend.postgresql.packet; -import io.mycat.backend.postgresql.utils.PIOUtils; - import java.nio.ByteBuffer; +import io.mycat.backend.postgresql.utils.PIOUtils; + // BindComplete (B) // Byte1('2') // 标识消息为一个绑定结束标识符。 diff --git a/src/main/java/io/mycat/backend/postgresql/packet/CancelRequest.java b/src/main/java/io/mycat/backend/postgresql/packet/CancelRequest.java index 6d0ff3b1c..cb37c2fc0 100644 --- a/src/main/java/io/mycat/backend/postgresql/packet/CancelRequest.java +++ b/src/main/java/io/mycat/backend/postgresql/packet/CancelRequest.java @@ -1,9 +1,9 @@ package io.mycat.backend.postgresql.packet; -import io.mycat.backend.postgresql.utils.PIOUtils; - import java.nio.ByteBuffer; +import io.mycat.backend.postgresql.utils.PIOUtils; + // CancelRequest (F) // Int32(16) // 以字节计的消息长度。包括长度本身。 diff --git a/src/main/java/io/mycat/backend/postgresql/packet/CommandComplete.java b/src/main/java/io/mycat/backend/postgresql/packet/CommandComplete.java index 508af1641..a55bab60e 100644 --- a/src/main/java/io/mycat/backend/postgresql/packet/CommandComplete.java +++ b/src/main/java/io/mycat/backend/postgresql/packet/CommandComplete.java @@ -1,94 +1,138 @@ -package io.mycat.backend.postgresql.packet; - -import io.mycat.backend.postgresql.packet.PostgreSQLPacket.PacketMarker; -import io.mycat.backend.postgresql.utils.PIOUtils; - -import java.nio.ByteBuffer; - -// CommandComplete (B) -// Byte1('C') -// 标识此消息是一个命令结束响应。 -// -// Int32 -// 以字节记的消息内容的长度,包括长度本身。 -// -// String -// 命令标记。它通常是一个单字,标识那个命令完成。 -// -// 对于INSERT命令,标记是INSERT oidrows, 这里的rows是插入的行数。oid 在row为 1 并且目标表有 OID 的时候是插入行的对象 ID; 否则oid就是 0。 -// -// 对于DELETE 命令,标记是 DELETE rows, 这里的 rows 是删除的行数。 -// -// 对于 UPDATE 命令,标记是 UPDATE rows 这里的 rows 是更新的行数。 -// -// 对于 MOVE 命令,标记是 MOVE rows,这里的 rows 是游标未知改变的行数。 -// -// 对于 FETCH 命令,标记是 FETCH rows,这里的 rows 是从游标中检索出来的行数。 -public class CommandComplete extends PostgreSQLPacket { - - private int length; - - /** - * 命令 - */ - private String commandResponse; - - @Override - public int getLength() { - return length; - } - - public boolean isDDLComplete() { - return commandResponse != null && (commandResponse.startsWith("INSERT") || commandResponse.startsWith("DELETE") - || commandResponse.startsWith("UPDATE")); - } - - public boolean isTranComplete(){ - return commandResponse != null && (commandResponse.startsWith("ROLLBACK") || commandResponse.startsWith("COMMIT")); - } - - public boolean isSelectComplete() { - return commandResponse != null && (commandResponse.startsWith("SELECT")); - } - - public int getRows() { - if(!isDDLComplete()){ - return 0; - } - if (commandResponse != null) { - String[] s = commandResponse.split(" +"); - if (s.length == 0) { - return 0; - } - try { - return Integer.valueOf(s[s.length - 1].trim()); - } catch (Exception e) { - e.printStackTrace(); - System.out.println(commandResponse); - } - } - return 0; - } - - @Override - public char getMarker() { - return PacketMarker.B_CommandComplete.getValue(); - } - - public static CommandComplete parse(ByteBuffer buffer, int offset) { - if (buffer.get(offset) != PacketMarker.B_CommandComplete.getValue()) { - throw new IllegalArgumentException("this packetData not is CommandComplete"); - } - CommandComplete packet = new CommandComplete(); - packet.length = PIOUtils.redInteger4(buffer, offset + 1); - packet.commandResponse = new String(PIOUtils.redByteArray(buffer, offset + 1 + 4, packet.length - 4), UTF8); - return packet; - - } - - public String getCommandResponse() { - return commandResponse; - } - - -} +package io.mycat.backend.postgresql.packet; + +import java.nio.ByteBuffer; + +import io.mycat.backend.postgresql.utils.PIOUtils; + +// CommandComplete (B) +// Byte1('C') +// 标识此消息是一个命令结束响应。 +// +// Int32 +// 以字节记的消息内容的长度,包括长度本身。 +// +// String +// 命令标记。它通常是一个单字,标识那个命令完成。 +// +// 对于INSERT命令,标记是INSERT oid rows, 这里的rows是插入的行数。oid 在row为 1 并且目标表有 OID 的时候是插入行的对象 ID; 否则oid就是 0。 +// +// 对于DELETE 命令,标记是 DELETE rows, 这里的 rows 是删除的行数。 +// +// 对于 UPDATE 命令,标记是 UPDATE rows 这里的 rows 是更新的行数。 +// +// 对于 MOVE 命令,标记是 MOVE rows,这里的 rows 是游标未知改变的行数。 +// +// 对于 FETCH 命令,标记是 FETCH rows,这里的 rows 是从游标中检索出来的行数。 +public class CommandComplete extends PostgreSQLPacket { + + private int length; + + /** + * 命令 + */ + private String commandResponse; + + // 存储状态。 + + public int getAffectedRows() { + return affectedRows; + } + + public void setAffectedRows(int affectedRows) { + this.affectedRows = affectedRows; + } + + public int getInsertId() { + return insertId; + } + + public void setInsertId(int insertId) { + this.insertId = insertId; + } + + // 修改影响条数 + private int affectedRows = 0; + + // 插入ID + private int insertId = 0; + + @Override + public int getLength() { + return length; + } + + public boolean isDDLComplete() { + return commandResponse != null && (commandResponse.startsWith("INSERT") || commandResponse.startsWith("DELETE") + || commandResponse.startsWith("UPDATE")); + } + + public boolean isTranComplete() { + return commandResponse != null + && (commandResponse.startsWith("ROLLBACK") || commandResponse.startsWith("COMMIT")); + } + + public boolean isSelectComplete() { + return commandResponse != null && (commandResponse.startsWith("SELECT")); + } + + public int getRows() { + if (!isDDLComplete()) { + return 0; + } + if (commandResponse != null) { + String[] s = commandResponse.split(" +"); + if (s.length == 0) { + return 0; + } + try { + return Integer.valueOf(s[s.length - 1].trim()); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + return 0; + } + + @Override + public char getMarker() { + return PacketMarker.B_CommandComplete.getValue(); + } + + public static CommandComplete parse(ByteBuffer buffer, int offset) { + if (buffer.get(offset) != PacketMarker.B_CommandComplete.getValue()) { + throw new IllegalArgumentException("this packetData not is CommandComplete"); + } + CommandComplete packet = new CommandComplete(); + packet.length = PIOUtils.redInteger4(buffer, offset + 1); + packet.commandResponse = new String(PIOUtils.redByteArray(buffer, offset + 1 + 4, packet.length - 4), UTF8) + .trim(); + if (packet.commandResponse.startsWith("INSERT")) { + String vs[] = packet.commandResponse.replace("INSERT", "").trim().split(" +"); + packet.insertId = parseInt(vs[0]); + + packet.affectedRows =parseInt(vs[1]); + } else if (packet.commandResponse.startsWith("UPDATE")) { + packet.affectedRows = parseInt(packet.commandResponse.replace("UPDATE", "").trim()); + }else if(packet.commandResponse.startsWith("DELETE")){ + packet.affectedRows = parseInt(packet.commandResponse.replace("DELETE", "").trim()); + } + return packet; + + } + + + + private static int parseInt(String value) { + try{ + return Integer.parseInt(value); + }catch (Exception e) { + e.printStackTrace(); + } + return 0; + } + + public String getCommandResponse() { + return commandResponse; + } + +} diff --git a/src/main/java/io/mycat/backend/postgresql/packet/CopyInResponse.java b/src/main/java/io/mycat/backend/postgresql/packet/CopyInResponse.java index 64057f311..34a7ab3c1 100644 --- a/src/main/java/io/mycat/backend/postgresql/packet/CopyInResponse.java +++ b/src/main/java/io/mycat/backend/postgresql/packet/CopyInResponse.java @@ -1,7 +1,5 @@ package io.mycat.backend.postgresql.packet; -import io.mycat.backend.postgresql.utils.PIOUtils; - import java.nio.ByteBuffer; // CopyInResponse (B) // Byte1('G') @@ -19,6 +17,8 @@ // Int16[N] // 每个字段将要用的格式代码,目前每个都必须是零(文本)或者一(二进制)。 如果全部拷贝格式都是文本的,那么所有的都必须是零。 +import io.mycat.backend.postgresql.utils.PIOUtils; + /*** * 拷贝数据开始 * diff --git a/src/main/java/io/mycat/backend/postgresql/packet/CopyOutResponse.java b/src/main/java/io/mycat/backend/postgresql/packet/CopyOutResponse.java index 76346cf15..04f46d8d6 100644 --- a/src/main/java/io/mycat/backend/postgresql/packet/CopyOutResponse.java +++ b/src/main/java/io/mycat/backend/postgresql/packet/CopyOutResponse.java @@ -1,9 +1,9 @@ package io.mycat.backend.postgresql.packet; -import io.mycat.backend.postgresql.utils.PIOUtils; - import java.nio.ByteBuffer; +import io.mycat.backend.postgresql.utils.PIOUtils; + // CopyOutResponse (B) // Byte1('H') // 标识这条消息是一条 Start Copy Out (开始拷贝进出)响应消息。 这条消息后面将跟着一条拷贝出数据消息。 diff --git a/src/main/java/io/mycat/backend/postgresql/packet/DataRow.java b/src/main/java/io/mycat/backend/postgresql/packet/DataRow.java index d46bf0134..8f8cb2bc0 100644 --- a/src/main/java/io/mycat/backend/postgresql/packet/DataRow.java +++ b/src/main/java/io/mycat/backend/postgresql/packet/DataRow.java @@ -1,9 +1,9 @@ package io.mycat.backend.postgresql.packet; -import io.mycat.backend.postgresql.utils.PIOUtils; - import java.nio.ByteBuffer; +import io.mycat.backend.postgresql.utils.PIOUtils; + // DataRow (B) // Byte1('D') // 标识这个消息是一个数据行。 diff --git a/src/main/java/io/mycat/backend/postgresql/packet/EmptyQueryResponse.java b/src/main/java/io/mycat/backend/postgresql/packet/EmptyQueryResponse.java index 952d09b58..f8a559e9e 100644 --- a/src/main/java/io/mycat/backend/postgresql/packet/EmptyQueryResponse.java +++ b/src/main/java/io/mycat/backend/postgresql/packet/EmptyQueryResponse.java @@ -1,9 +1,9 @@ package io.mycat.backend.postgresql.packet; -import io.mycat.backend.postgresql.utils.PIOUtils; - import java.nio.ByteBuffer; +import io.mycat.backend.postgresql.utils.PIOUtils; + // EmptyQueryResponse (B) // Byte1('I') diff --git a/src/main/java/io/mycat/backend/postgresql/packet/ErrorResponse.java b/src/main/java/io/mycat/backend/postgresql/packet/ErrorResponse.java index 7341f7358..67012d9c1 100644 --- a/src/main/java/io/mycat/backend/postgresql/packet/ErrorResponse.java +++ b/src/main/java/io/mycat/backend/postgresql/packet/ErrorResponse.java @@ -1,69 +1,69 @@ -package io.mycat.backend.postgresql.packet; - -import java.io.UnsupportedEncodingException; -import java.nio.ByteBuffer; - -//ErrorResponse (B) -//Byte1('E') -//标识消息是一条错误。 -// -//Int32 -//以字节记的消息内容的长度,包括长度本身。 -// -//消息体由一个或多个标识出来的字段组成,后面跟着一个字节零作为终止符。 字段可以以任何顺序出现。对于每个字段都有下面的东西: -// -//Byte1 -//一个标识字段类型的代码;如果为零,这就是消息终止符并且不会跟着有字串。 目前定义的字段类型在 Section 43.5 列出。 因为将来可能增加更多的字段类型,所以前端应该不声不响地忽略不认识类型的字段。 -// -//String -//字段值。 - -public class ErrorResponse extends PostgreSQLPacket { - /********* - * 解析错误包 - * - * @param buffer - * @param offset - * @return - * @throws UnsupportedEncodingException - * @throws IllegalAccessException - */ - public static ErrorResponse parse(ByteBuffer buffer, int offset) - throws IllegalArgumentException { - if ((char) buffer.get(offset) != PacketMarker.B_Error.getValue()) { - throw new IllegalArgumentException("this packet not is ErrorResponse"); - } - ErrorResponse err = new ErrorResponse(); - err.length = buffer.getInt(offset + 1); - err.mark = buffer.get(offset + 1 + 4); - if (err.mark != 0) { - byte[] str = new byte[err.length - (4+4)]; - for(int i =0;i params; // 协议参数 - - @Override - public int getLength() { - return 0; - } - - @Override - @Deprecated - public char getMarker() { - return marker; - } -} +package io.mycat.backend.postgresql.packet; + +import java.util.List; + +public class StartupMessage extends PostgreSQLPacket { + private char marker = PacketMarker.F_StartupMessage.getValue(); //标准 + public int major; // 协议版本 + + public List params; // 协议参数 + + @Override + public int getLength() { + return 0; + } + + @Override + @Deprecated + public char getMarker() { + return marker; + } +} diff --git a/src/main/java/io/mycat/backend/postgresql/packet/Terminate.java b/src/main/java/io/mycat/backend/postgresql/packet/Terminate.java index b3702bdad..c2fcbaee5 100644 --- a/src/main/java/io/mycat/backend/postgresql/packet/Terminate.java +++ b/src/main/java/io/mycat/backend/postgresql/packet/Terminate.java @@ -1,7 +1,5 @@ package io.mycat.backend.postgresql.packet; -import io.mycat.backend.postgresql.utils.PIOUtils; - import java.nio.ByteBuffer; // // Terminate (F) @@ -11,6 +9,8 @@ // Int32(4) // 以字节记的消息内容的长度,包括长度自身。 +import io.mycat.backend.postgresql.utils.PIOUtils; + /*** * 终止命令 * diff --git a/src/main/java/io/mycat/backend/postgresql/utils/MD5Digest.java b/src/main/java/io/mycat/backend/postgresql/utils/MD5Digest.java index bff7ac758..20baeacc0 100644 --- a/src/main/java/io/mycat/backend/postgresql/utils/MD5Digest.java +++ b/src/main/java/io/mycat/backend/postgresql/utils/MD5Digest.java @@ -14,7 +14,7 @@ * @author Jeremy Wohl */ -import java.security.*; +import java.security.MessageDigest; public class MD5Digest { diff --git a/src/main/java/io/mycat/backend/postgresql/utils/PIOUtils.java b/src/main/java/io/mycat/backend/postgresql/utils/PIOUtils.java index e5e147582..265ed7e6c 100644 --- a/src/main/java/io/mycat/backend/postgresql/utils/PIOUtils.java +++ b/src/main/java/io/mycat/backend/postgresql/utils/PIOUtils.java @@ -57,10 +57,11 @@ public static short redInteger2(ByteBuffer buffer, int offset) { */ public static void SendInteger2(int val, ByteBuffer buffer) throws IOException { - if (val < Short.MIN_VALUE || val > Short.MAX_VALUE) + if (val < Short.MIN_VALUE || val > Short.MAX_VALUE) { throw new IOException( "Tried to send an out-of-range integer as a 2-byte value: " + val); + } byte[] _int2buf = new byte[2]; _int2buf[0] = (byte) (val >>> 8); @@ -99,11 +100,10 @@ public static void SendString(String string, ByteBuffer buffer) { public static String redString(ByteBuffer buffer, int offset, Charset charset) throws IOException { ByteArrayOutputStream out =new ByteArrayOutputStream(); for(int i=offset ;i< buffer.limit();i++){ - out.write(new byte[]{buffer.get(i)}); if(((char)buffer.get(i)) == '\0'){ - //System.out.println(i - offset); break; } + out.write(new byte[]{buffer.get(i)}); } return new String(out.toByteArray(),charset); } diff --git a/src/main/java/io/mycat/backend/postgresql/utils/PacketUtils.java b/src/main/java/io/mycat/backend/postgresql/utils/PacketUtils.java index 00ee118f3..7ce4e483a 100644 --- a/src/main/java/io/mycat/backend/postgresql/utils/PacketUtils.java +++ b/src/main/java/io/mycat/backend/postgresql/utils/PacketUtils.java @@ -84,7 +84,7 @@ public static List parsePacket(ByteBuffer buffer,int offset,in } @Deprecated - public static List parsePacket(byte[] bytes, int offset, + private static List parsePacket(byte[] bytes, int offset, int readLength) throws IOException { List pgs = new ArrayList<>(); while (offset < readLength) { @@ -182,8 +182,9 @@ public static ByteBuffer makeStartUpPacket(String user, String database) String[][] params = paramList.toArray(new String[0][]); StringBuilder details = new StringBuilder(); for (int i = 0; i < params.length; ++i) { - if (i != 0) + if (i != 0) { details.append(", "); + } details.append(params[i][0]); details.append("="); details.append(params[i][1]); @@ -215,7 +216,7 @@ public static ByteBuffer makeStartUpPacket(String user, String database) PIOUtils.Send(encodedParam, buffer); PIOUtils.SendChar(0, buffer); } - PIOUtils.Send(new byte[] { 0 }, buffer); + PIOUtils.Send(new byte[] { 0 }, buffer); return buffer; } diff --git a/src/main/java/io/mycat/backend/postgresql/utils/PgPacketApaterUtils.java b/src/main/java/io/mycat/backend/postgresql/utils/PgPacketApaterUtils.java index df654b8e5..0bbf0c218 100644 --- a/src/main/java/io/mycat/backend/postgresql/utils/PgPacketApaterUtils.java +++ b/src/main/java/io/mycat/backend/postgresql/utils/PgPacketApaterUtils.java @@ -1,67 +1,76 @@ -package io.mycat.backend.postgresql.utils; - -import io.mycat.backend.postgresql.packet.DataRow; -import io.mycat.backend.postgresql.packet.DataRow.DataColumn; -import io.mycat.backend.postgresql.packet.PostgreSQLPacket.DateType; -import io.mycat.backend.postgresql.packet.RowDescription; -import io.mycat.backend.postgresql.packet.RowDescription.ColumnDescription; -import io.mycat.server.Fields; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.RowDataPacket; - -import java.nio.charset.Charset; -import java.util.ArrayList; -import java.util.List; - - -/********* - * 数据包适配 - * @author Coollf - * - */ -public class PgPacketApaterUtils { - private static final Charset UTF8 = Charset.forName("utf-8"); - - /** - * 列标示转换成Mysql的数据 - * @param description - * @return - */ - public static List rowDescConvertFieldPacket(RowDescription description){ - List fieldPks = new ArrayList(description.getColumnNumber()); - for(ColumnDescription c: description.getColumns()){ - FieldPacket fieldPk = new FieldPacket(); - fieldPk.name = c.getColumnName().getBytes(UTF8); - fieldPk.type = convertFieldType(c.getColumnType()); - fieldPks.add(fieldPk); - } - //TODO 等待实现 - return fieldPks; - } - - /*** - * 将pg的sql类型转换成 - * @param columnType - * @return - */ - private static int convertFieldType(DateType columnType) { - if(columnType == DateType.timestamp_){ - return Fields.FIELD_TYPE_TIMESTAMP; - } - - return Fields.FIELD_TYPE_VARCHAR; - } - - /*** - * 行数据转换成mysql的数据 - * @param dataRow - * @return - */ - public static RowDataPacket rowDataConvertRowDataPacket(DataRow dataRow){ - RowDataPacket curRow = new RowDataPacket(dataRow.getColumnNumber()); - for(DataColumn c: dataRow.getColumns()){ - curRow.add(c.getData()); - } - return curRow; - } -} +package io.mycat.backend.postgresql.utils; + +import io.mycat.backend.postgresql.packet.DataRow; +import io.mycat.backend.postgresql.packet.DataRow.DataColumn; +import io.mycat.backend.postgresql.packet.PostgreSQLPacket.DateType; +import io.mycat.backend.postgresql.packet.RowDescription; +import io.mycat.backend.postgresql.packet.RowDescription.ColumnDescription; +import io.mycat.config.Fields; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.RowDataPacket; + +import java.nio.charset.Charset; +import java.util.ArrayList; +import java.util.List; + + +/********* + * 数据包适配 + * @author Coollf + * + */ +public class PgPacketApaterUtils { + private static final Charset UTF8 = Charset.forName("utf-8"); + + /** + * 列标示转换成Mysql的数据 + * @param description + * @return + */ + public static List rowDescConvertFieldPacket(RowDescription description){ + List fieldPks = new ArrayList(description.getColumnNumber()); + for(ColumnDescription c: description.getColumns()){ + FieldPacket fieldPk = new FieldPacket(); + fieldPk.name = c.getColumnName().trim().getBytes(UTF8); + fieldPk.type = convertFieldType(c.getColumnType()); + fieldPks.add(fieldPk); + } + //TODO 等待实现 + return fieldPks; + } + + /*** + * 将pg的sql类型转换成 + * @param columnType + * @return + */ + private static int convertFieldType(DateType columnType) { + if(columnType == DateType.timestamp_){ + return Fields.FIELD_TYPE_TIMESTAMP; + } + if(columnType == DateType.int2_ || columnType == DateType.int4_ || columnType == DateType.int8_ ){ + return Fields.FIELD_TYPE_INT24; + } + if(columnType == DateType.decimal_){ + return Fields.FIELD_TYPE_NEW_DECIMAL; + } + if(columnType == DateType.UNKNOWN){ + + } + return Fields.FIELD_TYPE_VARCHAR; + } + + + /*** + * 行数据转换成mysql的数据 + * @param dataRow + * @return + */ + public static RowDataPacket rowDataConvertRowDataPacket(DataRow dataRow){ + RowDataPacket curRow = new RowDataPacket(dataRow.getColumnNumber()); + for(DataColumn c: dataRow.getColumns()){ + curRow.add(c.getData()); + } + return curRow; + } +} diff --git a/src/main/java/io/mycat/backend/postgresql/utils/PgSqlApaterUtils.java b/src/main/java/io/mycat/backend/postgresql/utils/PgSqlApaterUtils.java index d3a2769f2..3dcfd6f22 100644 --- a/src/main/java/io/mycat/backend/postgresql/utils/PgSqlApaterUtils.java +++ b/src/main/java/io/mycat/backend/postgresql/utils/PgSqlApaterUtils.java @@ -1,25 +1,150 @@ -package io.mycat.backend.postgresql.utils; - -import java.util.HashMap; -import java.util.Map; - - - -public class PgSqlApaterUtils { - public static String apater(String sql){ - if(stream.get(sql.toUpperCase())!=null){ - return stream.get(sql.toUpperCase()); - } - return sql; - } - - public static Map stream = new HashMap<>(); - - - static{ - stream.put("SELECT @@CHARACTER_SET_DATABASE, @@COLLATION_DATABASE".toUpperCase(), "SELECT 'utf8' as \"@@character_set_database\", 'utf8_general_ci' as \"@@collation_database\""); - stream.put("SHOW STATUS", "SELECT 'Aborted_clients' as \"Variable\" , 0 as \"Value\" where 1=2 "); - stream.put("SHOW FULL TABLES WHERE Table_type != 'VIEW'".toUpperCase(), "select tablename as \"Tables_In_\",'BASE TABLE' as \"Table_Type\" from pg_tables where schemaname ='public'"); - // stream.put("SHOW TABLE STATUS LIKE 'company'".toUpperCase(), "select 1 where 1=2"); - } -} +package io.mycat.backend.postgresql.utils; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + + + +public class PgSqlApaterUtils { + /** + * 查询表结构 + */ + private static final String SHOW_TABLE_STATUS_SQL_PREFIX ="SHOW TABLE STATUS LIKE"; + + /******* + * 展示建表语句 + */ + private static final String SHOW_CREATE_TABLE_SQL_PREFIX = "SHOW CREATE TABLE"; + + /** + * 表机构信息 ,包含主键 + */ + private static final String SHOW_COLUMNS_SQL_PREFIX ="SHOW COLUMNS FROM"; + + + + public static String apater(String sql){ + sql = sql.replaceAll("`","\""); + final String SQL = sql.toUpperCase().replaceAll("`","\""); + String _mapperSql = stream.get(SQL); + if(_mapperSql!=null){ + return _mapperSql; + } + if (SQL.startsWith(SHOW_TABLE_STATUS_SQL_PREFIX)){ + return doApaterTableStatusSql(SQL); + } + + if(SQL.startsWith(SHOW_CREATE_TABLE_SQL_PREFIX)){ + return doApaterCreateTabelSql(SQL); + } + + if(SQL.startsWith(SHOW_COLUMNS_SQL_PREFIX)){ + return doApaterColumnsSql(SQL); + } + + if(SQL.indexOf("LIMIT")!=-1 && SQL.indexOf("OFFSET") == -1){//非pgsql 分页语句 + return doApaterPagingSql(SQL,sql); + } + + return sql; + } + + + /******* + * 获取列信息SQL 语句 + * @param sql + * @return + */ + private static String doApaterColumnsSql(String sql) { + return "SELECT '' as \"Field\" ,'' as \"Type\" ,''as \"Null\" ,'' as \"Key\" ,'' as \"Default\" , '' as \"Extra\" from pg_namespace where 1=2"; + } + + + private static String doApaterPagingSql(final String SQL, String sql) { + int index = SQL.indexOf("LIMIT"); + String pagingPart = sql.substring(index); + String selectPart = sql.substring(0, index); + String[] pk = pagingPart.split("(\\s+)|(,)"); + List slices = new ArrayList(); + for (String token : pk) { + if (token.trim().length() > 0) { + slices.add(token); + } + } + + if (slices.size() == 3) { + return selectPart + + String.format("%s %s offset %s", slices.get(0), + slices.get(2), slices.get(1)); + } + if (slices.size() == 2) { + return selectPart + + String.format(" %s %s offset 0 ", slices.get(0), + slices.get(1)); + } + + return sql;// 无法处理分页sql原样返回 + } + + private static String doApaterCreateTabelSql(String sql) { + return "select '' as Table ,'' as \"Create Table\" from pg_namespace where 1=2"; + } + + + /******** + * 进行表结构语句适配 + * @param sql + * @return + */ + private static String doApaterTableStatusSql(String sql) { + String tableName =sql.substring(SHOW_TABLE_STATUS_SQL_PREFIX.length()); + StringBuilder sb = new StringBuilder(); + sb.append("SELECT").append(" "); + sb.append(" attname AS NAME,").append(" "); + sb.append(" 'InnoDB' AS Engine,").append(" "); + sb.append(" 10 AS VERSION,").append(" "); + sb.append(" 'Compact' AS Row_format,").append(" "); + sb.append(" 0 AS ROWS,").append(" "); + sb.append(" 10000 AS Avg_row_length,").append(" "); + sb.append(" 10000 AS Data_length,").append(" "); + sb.append(" 0 AS Max_data_length,").append(" "); + sb.append(" 0 AS Index_length,").append(" "); + sb.append(" 0 AS Data_free,").append(" "); + sb.append(" NULL AS Auto_increment,").append(" "); + sb.append(" NULL AS Create_time,").append(" "); + sb.append(" NULL AS Update_time,").append(" "); + sb.append(" NULL AS Check_time,").append(" "); + sb.append(" 'utf8_general_ci' AS COLLATION,").append(" "); + sb.append(" NULL AS Checksum,").append(" "); + sb.append(" '' AS Create_options,").append(" "); + sb.append(" '' AS COMMENT").append(" "); + sb.append("FROM").append(" "); + sb.append(" pg_attribute").append(" "); + sb.append("INNER JOIN pg_class ON pg_attribute.attrelid = pg_class.oid").append(" "); + sb.append("INNER JOIN pg_type ON pg_attribute.atttypid = pg_type.oid").append(" "); + sb.append("LEFT OUTER JOIN pg_attrdef ON pg_attrdef.adrelid = pg_class.oid").append(" "); + sb.append("AND pg_attrdef.adnum = pg_attribute.attnum").append(" "); + sb.append("LEFT OUTER JOIN pg_description ON pg_description.objoid = pg_class.oid").append(" "); + sb.append("AND pg_description.objsubid = pg_attribute.attnum").append(" "); + sb.append("WHERE").append(" "); + sb.append(" pg_attribute.attnum > 0").append(" "); + sb.append("AND attisdropped <> 't'").append(" "); + sb.append("AND pg_class.relname =").append(tableName).append(" "); + sb.append("ORDER BY").append(" "); + sb.append(" pg_attribute.attnum").append(" "); + return sb.toString(); + } + + + public static Map stream = new HashMap<>(); + + + static{ + stream.put("SELECT @@CHARACTER_SET_DATABASE, @@COLLATION_DATABASE".toUpperCase(), "SELECT 'utf8' as \"@@character_set_database\", 'utf8_general_ci' as \"@@collation_database\""); + stream.put("SHOW STATUS", "SELECT 'Aborted_clients' as \"Variable\" , 0 as \"Value\" where 1=2 "); + stream.put("SHOW FULL TABLES WHERE Table_type != 'VIEW'".toUpperCase(), "select tablename as \"Tables_In_\",'BASE TABLE' as \"Table_Type\" from pg_tables where schemaname ='public'"); + stream.put("SHOW ENGINES","SELECT DISTINCT 'InnoDB' as Engine ,\t'DEFAULT' as Support , \t'Supports transactions,row-level locking and foreign keys' as \"Comment\"\t,'YES' as \"Transactions\" ,\t'YES' as \"XA\",'YES' as \"Savepoints\" from pg_tablespace\n"); + } +} diff --git a/src/main/java/io/mycat/net/BufferArray.java b/src/main/java/io/mycat/buffer/BufferArray.java similarity index 91% rename from src/main/java/io/mycat/net/BufferArray.java rename to src/main/java/io/mycat/buffer/BufferArray.java index 8b6070270..8345d3b88 100644 --- a/src/main/java/io/mycat/net/BufferArray.java +++ b/src/main/java/io/mycat/buffer/BufferArray.java @@ -1,4 +1,4 @@ -package io.mycat.net; +package io.mycat.buffer; import io.mycat.util.ByteBufferUtil; @@ -10,12 +10,12 @@ /** * used for large data write ,composed by buffer array, when a large MySQL * package write ,shoud use this object to write data - * + * + * use DirectByteBuffer for alloc buffer * @author wuzhih - * + * @author zagnix */ public class BufferArray { - private final BufferPool bufferPool; private ByteBuffer curWritingBlock; private List writedBlockLst = Collections.emptyList(); @@ -23,7 +23,7 @@ public class BufferArray { public BufferArray(BufferPool bufferPool) { super(); this.bufferPool = bufferPool; - curWritingBlock = bufferPool.allocate(); + curWritingBlock = bufferPool.allocate(bufferPool.getChunkSize()); } public ByteBuffer checkWriteBuffer(int capacity) { @@ -76,10 +76,9 @@ public ByteBuffer write(byte[] src) { offset += writeable; remains -= writeable; addtoBlock(curWritingBlock); - curWritingBlock = bufferPool.allocate(); + curWritingBlock = bufferPool.allocate(bufferPool.getChunkSize()); continue; } - } return curWritingBlock; } @@ -113,15 +112,14 @@ public byte[] writeToByteArrayAndRecycle() { ByteBufferUtil.arrayCopy(tBuf,0,all,offset,tBuf.remaining()); offset+=tBuf.remaining(); - NetSystem.getInstance().getBufferPool().recycle(tBuf); + bufferPool.recycle(tBuf); } } ByteBuffer tBuf = bufferArray.getCurWritingBlock(); if(tBuf.hasRemaining()) { ByteBufferUtil.arrayCopy(tBuf,0,all,offset,tBuf.remaining()); - - NetSystem.getInstance().getBufferPool().recycle(tBuf); + bufferPool.recycle(tBuf); // offset += curBuf.remaining(); } return all; diff --git a/src/main/java/io/mycat/buffer/BufferPool.java b/src/main/java/io/mycat/buffer/BufferPool.java new file mode 100644 index 000000000..e8715a372 --- /dev/null +++ b/src/main/java/io/mycat/buffer/BufferPool.java @@ -0,0 +1,23 @@ +package io.mycat.buffer; + +import java.nio.ByteBuffer; +import java.util.concurrent.ConcurrentHashMap; + +/** + * 缓冲池 + * + * @author Hash Zhang + * @version 1.0 + * @time 12:19 2016/5/23 + */ +public interface BufferPool { + public ByteBuffer allocate(int size); + public void recycle(ByteBuffer theBuf); + public long capacity(); + public long size(); + public int getConReadBuferChunk(); + public int getSharedOptsCount(); + public int getChunkSize(); + public ConcurrentHashMap getNetDirectMemoryUsage(); + public BufferArray allocateArray(); +} diff --git a/src/main/java/io/mycat/buffer/ByteBufferArena.java b/src/main/java/io/mycat/buffer/ByteBufferArena.java new file mode 100644 index 000000000..87c74dabc --- /dev/null +++ b/src/main/java/io/mycat/buffer/ByteBufferArena.java @@ -0,0 +1,214 @@ +package io.mycat.buffer; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.nio.ByteBuffer; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; + +/** + * 仿照Netty的思路,针对MyCat内存缓冲策略优化 + * ByteBufferArena维护着锁还有所有list + * + * @author Hash Zhang + * @version 1.0 + * @time 17:19 2016/5/17 + * @see @https://github.com/netty/netty + */ +public class ByteBufferArena implements BufferPool { + private static final Logger LOGGER = LoggerFactory.getLogger(ByteBufferChunkList.class); + private final ByteBufferChunkList q[]; + + private final AtomicInteger chunkCount = new AtomicInteger(0); + private final AtomicInteger failCount = new AtomicInteger(0); + + private static final int FAIL_THRESHOLD = 1000; + private final int pageSize; + private final int chunkSize; + + private final AtomicLong capacity; + private final AtomicLong size; + + private final ConcurrentHashMap sharedOptsCount; + + /** + * 记录对线程ID->该线程的所使用Direct Buffer的size + */ + private final ConcurrentHashMap memoryUsage; + private final int conReadBuferChunk; + + public ByteBufferArena(int chunkSize, int pageSize, int chunkCount, int conReadBuferChunk) { + try { + this.chunkSize = chunkSize; + this.pageSize = pageSize; + this.chunkCount.set(chunkCount); + this.conReadBuferChunk = conReadBuferChunk; + + q = new ByteBufferChunkList[6]; + q[5] = new ByteBufferChunkList(100, Integer.MAX_VALUE, chunkSize, pageSize, 0); + q[4] = new ByteBufferChunkList(75, 100, chunkSize, pageSize, 0); + q[3] = new ByteBufferChunkList(50, 100, chunkSize, pageSize, 0); + q[2] = new ByteBufferChunkList(25, 75, chunkSize, pageSize, 0); + q[1] = new ByteBufferChunkList(1, 50, chunkSize, pageSize, 0); + q[0] = new ByteBufferChunkList(Integer.MIN_VALUE, 25, chunkSize, pageSize, chunkCount); + + q[0].nextList = q[1]; + q[1].nextList = q[2]; + q[2].nextList = q[3]; + q[3].nextList = q[4]; + q[4].nextList = q[5]; + q[5].nextList = null; + + q[5].prevList = q[4]; + q[4].prevList = q[3]; + q[3].prevList = q[2]; + q[2].prevList = q[1]; + q[1].prevList = q[0]; + q[0].prevList = null; + + capacity = new AtomicLong(6 * chunkCount * chunkSize); + size = new AtomicLong(6 * chunkCount * chunkSize); + sharedOptsCount = new ConcurrentHashMap<>(); + memoryUsage = new ConcurrentHashMap<>(); + } finally { + } + } + + @Override + public ByteBuffer allocate(int reqCapacity) { + try { + ByteBuffer byteBuffer = null; + int i = 0, count = 0; + while (byteBuffer == null) { + if (i > 5) { + i = 0; + count = failCount.incrementAndGet(); + if (count > FAIL_THRESHOLD) { + try { + expand(); + } finally { + } + } + } + byteBuffer = q[i].allocate(reqCapacity); + i++; + } +// if (count > 0) { +// System.out.println("count: " + count); +// System.out.println(failCount.get()); +// } +// printList(); + capacity.addAndGet(-reqCapacity); + final Thread thread = Thread.currentThread(); + final long threadId = thread.getId(); + + if (memoryUsage.containsKey(threadId)){ + memoryUsage.put(threadId,memoryUsage.get(thread.getId())+reqCapacity); + }else { + memoryUsage.put(threadId, (long) reqCapacity); + } + if (sharedOptsCount.contains(thread)) { + int currentCount = sharedOptsCount.get(thread); + currentCount++; + sharedOptsCount.put(thread,currentCount); + } else{ + sharedOptsCount.put(thread,0); + } + return byteBuffer; + } finally { + } + } + + private void expand() { + LOGGER.warn("Current Buffer Size is not enough! Expanding Byte buffer!"); + ByteBufferChunk byteBufferChunk = new ByteBufferChunk(pageSize, chunkSize); + q[0].byteBufferChunks.add(byteBufferChunk); + failCount.set(0); + } + + @Override + public void recycle(ByteBuffer byteBuffer) { + final long size = byteBuffer != null?byteBuffer.capacity():0; + try { + int i; + for (i = 0; i < 6; i++) { + if (q[i].free(byteBuffer)) { + break; + } + } + if (i > 5) { + LOGGER.warn("This ByteBuffer is not maintained in ByteBufferArena!"); + return; + } + final Thread thread = Thread.currentThread(); + final long threadId = thread.getId(); + + if (memoryUsage.containsKey(threadId)){ + memoryUsage.put(threadId,memoryUsage.get(thread.getId())-size); + } + if (sharedOptsCount.contains(thread)) { + int currentCount = sharedOptsCount.get(thread); + currentCount--; + sharedOptsCount.put(thread,currentCount); + } else{ + sharedOptsCount.put(thread,0); + } + capacity.addAndGet(byteBuffer.capacity()); + return; + } finally { + } + } + + private void printList() { + for (int i = 0; i < 6; i++) { + System.out.println(i + ":" + q[i].byteBufferChunks.toString()); + } + } + + @Override + public long capacity() { + return capacity.get(); + } + + @Override + public long size() { + return size.get(); + } + + @Override + public int getConReadBuferChunk() { + return conReadBuferChunk; + } + + @Override + public int getSharedOptsCount() { + final Set integers = (Set) sharedOptsCount.values(); + int count = 0; + for(int i : integers){ + count += i; + } + return count; + } + + /** + * 这里pageSize就是DirectByteBuffer的chunksize + * @return + */ + @Override + public int getChunkSize() { + return pageSize; + } + + @Override + public ConcurrentHashMap getNetDirectMemoryUsage() { + return memoryUsage; + } + + @Override + public BufferArray allocateArray() { + return new BufferArray(this); + } +} diff --git a/src/main/java/io/mycat/buffer/ByteBufferChunk.java b/src/main/java/io/mycat/buffer/ByteBufferChunk.java new file mode 100644 index 000000000..d75ad30ac --- /dev/null +++ b/src/main/java/io/mycat/buffer/ByteBufferChunk.java @@ -0,0 +1,267 @@ +package io.mycat.buffer; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import sun.nio.ch.DirectBuffer; + +import java.nio.ByteBuffer; + +/** + * 仿照Netty的思路,针对MyCat内存缓冲策略优化 + * Chunk由Page组成,是一块连续内存,由memoryMap和depthMap定义成一种平衡二叉树的管理结构 + * + * @author Hash Zhang + * @version 1.0 + * @time 17:19 2016/5/17 + * @see @https://github.com/netty/netty + */ +public class ByteBufferChunk implements Comparable{ + private static final Logger LOGGER = LoggerFactory.getLogger(ByteBufferChunk.class); + private final byte[] memoryMap; + private final byte[] depthMap; + private final ByteBuffer buf; + + //in bytes + private final int pageSize; + //in bytes + private final int chunkSize; + private final int chunkPageSize; + private final int maxOrder; + private final byte unusable; + private final int log2PageSize; + final long bufAddress; + + private int freeBytes; + + ByteBufferChunk prev; + ByteBufferChunk next; + ByteBufferChunkList parent; + + public ByteBufferChunk(int pageSize, int chunkSize) { + + this.pageSize = pageSize; + this.chunkSize = chunkSize; + this.chunkPageSize = chunkSize / pageSize; + this.maxOrder = log2(this.chunkPageSize) + 1; + this.unusable = (byte) this.maxOrder; + this.freeBytes = chunkSize; + this.buf = ByteBuffer.allocateDirect(chunkSize); + this.bufAddress = ((DirectBuffer) buf).address(); + + this.depthMap = new byte[(1 << this.maxOrder)]; + this.memoryMap = new byte[this.depthMap.length]; + + this.log2PageSize = log2(pageSize); + + int memoryMapIndex = 1; + for (int d = 0; d < maxOrder; ++d) { // move down the tree one level at a time + int depth = 1 << d; + for (int p = 0; p < depth; ++p) { + // in each level traverse left to right and set value to the depth of subtree + memoryMap[memoryMapIndex] = (byte) d; + depthMap[memoryMapIndex] = (byte) d; + memoryMapIndex++; + } + } + } + + public boolean isInThisChunk(ByteBuffer byteBuffer) { + long address = ((DirectBuffer) byteBuffer).address(); + return (address >= bufAddress) && (address < bufAddress + chunkSize); + } + + public int usage() { + final int freeBytes = this.freeBytes; + if (freeBytes == 0) { + return 100; + } + + int freePercentage = (int) (freeBytes * 100L / chunkSize); + if (freePercentage == 0) { + return 99; + } + return 100 - freePercentage; + } + + public synchronized ByteBuffer allocateRun(int normCapacity) { + if(normCapacity > chunkSize){ + LOGGER.warn("try to acquire a buffer with larger size than chunkSize!"); + return null; + } + int d = this.maxOrder - 2 - (log2(normCapacity) - this.log2PageSize); + if (d > this.maxOrder - 1) { + d = maxOrder - 1; + } + int id = allocateNode(d); + if (id < 0) { + return null; + } + freeBytes -= runLength(id); + + int start = calculateStart(id); + int end = start + runLength(id); + + buf.limit(end); + buf.position(start); + +// printMemoryMap(); + + return buf.slice(); + } + + + private int calculateStart(int id) { + int count = 0; + for (int i = 1; i < depthMap.length; i++) { + if (depthMap[i] < depthMap[id]) { + continue; + } else if (depthMap[i] == depthMap[id]) { + if (i == id) { + break; + } else { + count += runLength(i); + } + } else { + break; + } + } + return count; + } + + private int runLength(int id) { + // represents the size in #bytes supported by node 'id' in the tree + return 1 << log2(chunkSize) - depthMap[id]; + } + + private int allocateNode(int d) { + int id = 1; + int initial = -(1 << d); // has last d bits = 0 and rest all = 1 + byte val = memoryMap[id]; + if (val > d) { // unusable + return -1; + } + + while (val < d || (id & initial) == 0) { // id & initial == 1 << d for all ids at depth d, for < d it is 0 + id <<= 1; + val = memoryMap[id]; + if (val > d) { + id ^= 1; + val = memoryMap[id]; + } + } + byte value = memoryMap[id]; + assert value == d && (id & initial) == 1 << d : String.format("val = %d, id & initial = %d, d = %d", + value, id & initial, d); + memoryMap[id] = unusable; // mark as unusable + updateParentsAlloc(id); + return id; + } + + private void updateParentsAlloc(int id) { + while (id > 1) { + int parentId = id >>> 1; + byte val1 = memoryMap[id]; + byte val2 = memoryMap[id ^ 1]; + byte val = val1 < val2 ? val1 : val2; + memoryMap[parentId] = val; + id = parentId; + } + } + + public synchronized void freeByteBuffer(ByteBuffer byteBuffer) { + long address = ((DirectBuffer) byteBuffer).address(); + int relativeAddress = (int) (address - bufAddress); + int length = byteBuffer.capacity(); + + int depth = maxOrder - 1 - log2(length / pageSize); + int count = 0; + int i; + for (i = 0; i < depthMap.length; i++) { + if (depthMap[i] == depth) { + if (count == relativeAddress) { + break; + } + count += length; + } + if (depthMap[i] > depth) { + break; + } + } + free(i); + } + + private void free(int handle) { + if (memoryMap[handle] != depthMap[handle]) { + freeBytes += runLength(handle); + memoryMap[handle] = depthMap[handle]; + updateParentsFree(handle); + } + } + + private void updateParentsFree(int id) { + int logChild = depthMap[id] + 1; + while (id > 1) { + int parentId = id >>> 1; + byte val1 = memoryMap[id]; + byte val2 = memoryMap[id ^ 1]; + logChild -= 1; // in first iteration equals log, subsequently reduce 1 from logChild as we traverse up + + if (val1 == logChild && val2 == logChild) { + memoryMap[parentId] = (byte) (logChild - 1); + } else { + byte val = val1 < val2 ? val1 : val2; + memoryMap[parentId] = val; + } + + id = parentId; + } + } + + private static int log2(int chunkSize) { + if (chunkSize <= 0) { + LOGGER.warn("invalid parameter!"); + throw new IllegalArgumentException(); + } + return Integer.SIZE - 1 - Integer.numberOfLeadingZeros(chunkSize); + } + + private void printMemoryMap() { + int l = 1; + for (int i = 0; i < this.maxOrder; i++) { + int j = (int) Math.pow(2, i); + for (int k = 0; k < j; k++) { + System.out.print(this.memoryMap[l] + "|"); + l++; + } + System.out.println(); + } + System.out.println(); + } + + public static void main(String[] args) { + + int pageSize = 256; + int chunkSize = 1024 * 1024 * 64; + ByteBufferChunk byteBufferChunk = new ByteBufferChunk(pageSize, chunkSize); + int chunkCount = 8; + int allocTimes = 102400; + long start = System.currentTimeMillis(); + for (int i = 0; i < allocTimes; i++) { +// System.out.println("allocate "+i); +// long start=System.nanoTime(); + int size = 256; + ByteBuffer byteBufer = byteBufferChunk.allocateRun(size); +// System.out.println("alloc "+size+" usage "+(System.nanoTime()-start)); +// start=System.nanoTime(); +// byteBufferArena.recycle(byteBufer); +// System.out.println("recycle usage "+(System.nanoTime()-start)); + } + long used = (System.currentTimeMillis() - start); + System.out.println("total used time " + used + " avg speed " + allocTimes / used); + } + + @Override + public int compareTo(Object o) { + return -1; + } +} diff --git a/src/main/java/io/mycat/buffer/ByteBufferChunkList.java b/src/main/java/io/mycat/buffer/ByteBufferChunkList.java new file mode 100644 index 000000000..8f02c1163 --- /dev/null +++ b/src/main/java/io/mycat/buffer/ByteBufferChunkList.java @@ -0,0 +1,98 @@ +package io.mycat.buffer; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ConcurrentSkipListSet; + +/** + * 仿照Netty的思路,针对MyCat内存缓冲策略优化 + * ChunkList维护着一个指向一串Chunk的头结点,访问策略由minUsage,maxUsage决定 + * + * @author Hash Zhang + * @version 1.0 + * @time 17:19 2016/5/17 + * @see @https://github.com/netty/netty + */ +public class ByteBufferChunkList { + private static final Logger LOGGER = LoggerFactory.getLogger(ByteBufferChunkList.class); + private final int minUsage; + private final int maxUsage; + + Set byteBufferChunks; + ByteBufferChunkList prevList; + ByteBufferChunkList nextList; + + public ByteBufferChunkList(int minUsage, int maxUsage, int chunkSize, int pageSize, int numOfChunks) { + this.minUsage = minUsage; + this.maxUsage = maxUsage; + byteBufferChunks = new ConcurrentSkipListSet<>(); + for (int i = 0; i < numOfChunks; i++) { + ByteBufferChunk chunk = new ByteBufferChunk(pageSize, chunkSize); + byteBufferChunks.add(chunk); + } + } + + public ByteBufferChunk getIndex(ByteBuffer buffer) { + for(ByteBufferChunk byteBufferChunk : byteBufferChunks){ + if (byteBufferChunk.isInThisChunk(buffer)) { + return byteBufferChunk; + } + } + return null; + } + + ByteBuffer allocate(int reqCapacity) { + for (ByteBufferChunk cur : byteBufferChunks) { + ByteBuffer buf = cur.allocateRun(reqCapacity); + if (buf == null) { + continue; + } else { + final int usage = cur.usage(); + if (usage >= maxUsage) { + ByteBufferChunkList next = nextList; + ByteBufferChunkList current = this; + while (next != null) { + current.byteBufferChunks.remove(cur); + next.byteBufferChunks.add(cur); + if (next.maxUsage > usage) { + break; + } + current = next; + next = next.nextList; + } + } + return buf; + } + } + return null; + } + + boolean free(ByteBuffer buffer) { + ByteBufferChunk cur = getIndex(buffer); + if (cur == null) { + LOGGER.info("not in this list!"); + return false; + } + cur.freeByteBuffer(buffer); + final int usage = cur.usage(); + if (usage < minUsage) { + ByteBufferChunkList prev = prevList; + ByteBufferChunkList current = this; + while (prev != null) { + current.byteBufferChunks.remove(cur); + prev.byteBufferChunks.add(cur); + if (prev.minUsage < usage) { + break; + } + current = prev; + prev = prev.prevList; + } + } + return true; + } +} diff --git a/src/main/java/io/mycat/buffer/ByteBufferPage.java b/src/main/java/io/mycat/buffer/ByteBufferPage.java new file mode 100644 index 000000000..65a229db7 --- /dev/null +++ b/src/main/java/io/mycat/buffer/ByteBufferPage.java @@ -0,0 +1,102 @@ +package io.mycat.buffer; + +import java.nio.ByteBuffer; +import java.util.BitSet; +import java.util.concurrent.atomic.AtomicBoolean; + +/* + * 用来保存一个一个ByteBuffer为底层存储的内存页 + */ +@SuppressWarnings("restriction") +public class ByteBufferPage { + + private final ByteBuffer buf; + private final int chunkSize; + private final int chunkCount; + private final BitSet chunkAllocateTrack; + private final AtomicBoolean allocLockStatus = new AtomicBoolean(false); + private final long startAddress; + + public ByteBufferPage(ByteBuffer buf, int chunkSize) { + super(); + this.chunkSize = chunkSize; + chunkCount = buf.capacity() / chunkSize; + chunkAllocateTrack = new BitSet(chunkCount); + this.buf = buf; + startAddress = ((sun.nio.ch.DirectBuffer) buf).address(); + } + + public ByteBuffer allocatChunk(int theChunkCount) { + if (!allocLockStatus.compareAndSet(false, true)) { + return null; + } + int startChunk = -1; + int contiueCount = 0; + try { + for (int i = 0; i < chunkCount; i++) { + if (chunkAllocateTrack.get(i) == false) { + if (startChunk == -1) { + startChunk = i; + contiueCount = 1; + if (theChunkCount == 1) { + break; + } + } else { + if (++contiueCount == theChunkCount) { + break; + } + } + } else { + startChunk = -1; + contiueCount = 0; + } + } + if (contiueCount == theChunkCount) { + int offStart = startChunk * chunkSize; + int offEnd = offStart + theChunkCount * chunkSize; + buf.limit(offEnd); + buf.position(offStart); + + ByteBuffer newBuf = buf.slice(); + //sun.nio.ch.DirectBuffer theBuf = (DirectBuffer) newBuf; + //System.out.println("offAddress " + (theBuf.address() - startAddress)); + markChunksUsed(startChunk, theChunkCount); + return newBuf; + } else { + //System.out.println("contiueCount " + contiueCount + " theChunkCount " + theChunkCount); + return null; + } + } finally { + allocLockStatus.set(false); + } + } + + private void markChunksUsed(int startChunk, int theChunkCount) { + for (int i = 0; i < theChunkCount; i++) { + chunkAllocateTrack.set(startChunk + i); + } + } + + private void markChunksUnused(int startChunk, int theChunkCount) { + for (int i = 0; i < theChunkCount; i++) { + chunkAllocateTrack.clear(startChunk + i); + } + } + + public boolean recycleBuffer(ByteBuffer parent, int startChunk, int chunkCount) { + + if (parent == this.buf) { + + while (!this.allocLockStatus.compareAndSet(false, true)) { + Thread.yield(); + } + try { + markChunksUnused(startChunk,chunkCount); + } finally { + allocLockStatus.set(false); + } + return true; + } + return false; + } +} diff --git a/src/main/java/io/mycat/buffer/DirectByteBufferPool.java b/src/main/java/io/mycat/buffer/DirectByteBufferPool.java new file mode 100644 index 000000000..bcba629e7 --- /dev/null +++ b/src/main/java/io/mycat/buffer/DirectByteBufferPool.java @@ -0,0 +1,179 @@ +package io.mycat.buffer; + +import java.nio.ByteBuffer; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import sun.nio.ch.DirectBuffer; + +/** + * DirectByteBuffer池,可以分配任意指定大小的DirectByteBuffer,用完需要归还 + * @author wuzhih + * @author zagnix + */ +@SuppressWarnings("restriction") +public class DirectByteBufferPool implements BufferPool{ + private static final Logger LOGGER = LoggerFactory.getLogger(DirectByteBufferPool.class); + public static final String LOCAL_BUF_THREAD_PREX = "$_"; + private ByteBufferPage[] allPages; + private final int chunkSize; + // private int prevAllocatedPage = 0; + //private AtomicInteger prevAllocatedPage; + private AtomicLong prevAllocatedPage; + private final int pageSize; + private final short pageCount; + private final int conReadBuferChunk ; + + /** + * 记录对线程ID->该线程的所使用Direct Buffer的size + */ + private final ConcurrentHashMap memoryUsage; + + public DirectByteBufferPool(int pageSize, short chunkSize, short pageCount,int conReadBuferChunk) { + allPages = new ByteBufferPage[pageCount]; + this.chunkSize = chunkSize; + this.pageSize = pageSize; + this.pageCount = pageCount; + this.conReadBuferChunk = conReadBuferChunk; + //prevAllocatedPage = new AtomicInteger(0); + prevAllocatedPage = new AtomicLong(0); + for (int i = 0; i < pageCount; i++) { + allPages[i] = new ByteBufferPage(ByteBuffer.allocateDirect(pageSize), chunkSize); + } + memoryUsage = new ConcurrentHashMap<>(); + } + + public BufferArray allocateArray() { + return new BufferArray(this); + } + /** + * TODO 当页不够时,考虑扩展内存池的页的数量........... + * @param buffer + * @return + */ + public ByteBuffer expandBuffer(ByteBuffer buffer){ + int oldCapacity = buffer.capacity(); + int newCapacity = oldCapacity << 1; + ByteBuffer newBuffer = allocate(newCapacity); + if(newBuffer != null){ + int newPosition = buffer.position(); + buffer.flip(); + newBuffer.put(buffer); + newBuffer.position(newPosition); + recycle(buffer); + return newBuffer; + } + return null; + } + + public ByteBuffer allocate(int size) { + final int theChunkCount = size / chunkSize + (size % chunkSize == 0 ? 0 : 1); + int selectedPage = (int)(prevAllocatedPage.incrementAndGet() % allPages.length); + ByteBuffer byteBuf = allocateBuffer(theChunkCount, 0, selectedPage); + if (byteBuf == null) { + byteBuf = allocateBuffer(theChunkCount, selectedPage, allPages.length); + } + final long threadId = Thread.currentThread().getId(); + + if(byteBuf !=null){ + if (memoryUsage.containsKey(threadId)){ + memoryUsage.put(threadId,memoryUsage.get(threadId)+byteBuf.capacity()); + }else { + memoryUsage.put(threadId,(long)byteBuf.capacity()); + } + } + + if(byteBuf==null){ + return ByteBuffer.allocate(size); + } + return byteBuf; + } + + public void recycle(ByteBuffer theBuf) { + if(theBuf !=null && (!(theBuf instanceof DirectBuffer) )){ + theBuf.clear(); + return; + } + + final long size = theBuf.capacity(); + + boolean recycled = false; + DirectBuffer thisNavBuf = (DirectBuffer) theBuf; + int chunkCount = theBuf.capacity() / chunkSize; + DirectBuffer parentBuf = (DirectBuffer) thisNavBuf.attachment(); + int startChunk = (int) ((thisNavBuf.address() - parentBuf.address()) / chunkSize); + for (int i = 0; i < allPages.length; i++) { + if ((recycled = allPages[i].recycleBuffer((ByteBuffer) parentBuf, startChunk, + chunkCount) == true)) { + break; + } + } + final long threadId = Thread.currentThread().getId(); + + if (memoryUsage.containsKey(threadId)) { + memoryUsage.put(threadId, memoryUsage.get(threadId) - size); + } + if (recycled == false) { + LOGGER.warn("warning ,not recycled buffer " + theBuf); + } + + } + + private ByteBuffer allocateBuffer(int theChunkCount, int startPage, int endPage) { + for (int i = startPage; i < endPage; i++) { + ByteBuffer buffer = allPages[i].allocatChunk(theChunkCount); + if (buffer != null) { + prevAllocatedPage.getAndSet(i); + return buffer; + } + } + return null; + } + + public int getChunkSize() { + return chunkSize; + } + + @Override + public ConcurrentHashMap getNetDirectMemoryUsage() { + return memoryUsage; + } + + public int getPageSize() { + return pageSize; + } + + public short getPageCount() { + return pageCount; + } + + public long capacity() { + return (long) pageSize * pageCount; + } + + public long size(){ + return (long) pageSize * chunkSize * pageCount; + } + + //TODO + public int getSharedOptsCount(){ + return 0; + } + + + + public ByteBufferPage[] getAllPages() { + return allPages; + } + + public int getConReadBuferChunk() { + return conReadBuferChunk; + } + +} diff --git a/src/main/java/io/mycat/buffer/MyCatMemoryAllocator.java b/src/main/java/io/mycat/buffer/MyCatMemoryAllocator.java new file mode 100644 index 000000000..aa404734c --- /dev/null +++ b/src/main/java/io/mycat/buffer/MyCatMemoryAllocator.java @@ -0,0 +1,212 @@ +package io.mycat.buffer; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.CompositeByteBuf; +import io.netty.buffer.PooledByteBufAllocator; +import io.netty.util.internal.PlatformDependent; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.nio.ByteBuffer; +import java.util.concurrent.*; + +/** + * Netty Direct Memory 分配器,为mycat提供内存池管理功能 + * + * @author zagnix + * @create 2017-01-18 11:01 + */ + +public class MyCatMemoryAllocator implements ByteBufAllocator { + + private static final Logger LOGGER = LoggerFactory.getLogger(MyCatMemoryAllocator.class); + public final ConcurrentHashMap recycleMaps = new ConcurrentHashMap<>(); + + private final static MyCatMemoryAllocator INSTANCE = + new MyCatMemoryAllocator(Runtime.getRuntime().availableProcessors()*2); + + /** netty memory pool alloctor*/ + private final PooledByteBufAllocator alloc; + /**arena 的数量,一般设置cpu cores*2 */ + private final int numberOfArenas; + + /** ChunkSize 大小 = pageSize << maxOrder */ + private final int chunkSize; + + /**页大小*/ + private final int pageSize; + + /** + * numberOfArenas 设置为处理器cores*2 + * @param numberOfArenas + */ + public MyCatMemoryAllocator(int numberOfArenas){ + this.numberOfArenas = numberOfArenas; + if (!PlatformDependent.hasUnsafe()) { + LOGGER.warn("Using direct memory, but sun.misc.Unsafe not available."); + } + boolean preferDirect = true; + + this.pageSize = 8192*2; + int maxOrder = 11; + this.chunkSize = pageSize << maxOrder; + int numDirectArenas = numberOfArenas; + int numHeapArenas = 0; + + /** for 4.1.x*/ + this.alloc = new PooledByteBufAllocator( + preferDirect, + numHeapArenas, + numDirectArenas, + pageSize, + maxOrder, + 512, + 256, + 64, + true); + + + /**for 5.0.x + this.alloc = new PooledByteBufAllocator(preferDirect);**/ + } + + public static MyCatMemoryAllocator getINSTANCE() { + return INSTANCE; + } + + /** + * @return alloc + */ + public PooledByteBufAllocator getAlloc() { + return alloc; + } + + /** + * Returns the number of arenas. + * + * @return Number of arenas. + */ + public int getNumberOfArenas() { + return numberOfArenas; + } + + /** + * Returns the chunk size. + * + * @return Chunk size. + */ + public int getChunkSize() { + return chunkSize; + } + + /** + * page Size + * @return page Size + */ + public int getPageSize() { + return pageSize; + } + + + @Override + public ByteBuf buffer() { + return alloc.buffer(); + } + + @Override + public ByteBuf buffer(int initialCapacity) { + return alloc.buffer(initialCapacity); + } + + @Override + public ByteBuf buffer(int initialCapacity, int maxCapacity) { + return alloc.buffer(initialCapacity, maxCapacity); + } + + @Override + public ByteBuf ioBuffer() { + return alloc.ioBuffer(); + } + + @Override + public ByteBuf ioBuffer(int initialCapacity) { + return alloc.ioBuffer(initialCapacity); + } + + @Override + public ByteBuf ioBuffer(int initialCapacity, int maxCapacity) { + return alloc.ioBuffer(initialCapacity, maxCapacity); + } + + @Override + public ByteBuf heapBuffer() { + throw new UnsupportedOperationException("Heap buffer"); + } + + @Override + public ByteBuf heapBuffer(int initialCapacity) { + throw new UnsupportedOperationException("Heap buffer"); + } + + @Override + public ByteBuf heapBuffer(int initialCapacity, int maxCapacity) { + throw new UnsupportedOperationException("Heap buffer"); + } + + @Override + public ByteBuf directBuffer() { + return alloc.directBuffer(); + } + + @Override + public ByteBuf directBuffer(int initialCapacity) { + return alloc.directBuffer(initialCapacity); + } + + @Override + public ByteBuf directBuffer(int initialCapacity, int maxCapacity) { + return alloc.directBuffer(initialCapacity, maxCapacity); + } + + @Override + public CompositeByteBuf compositeBuffer() { + return alloc.compositeBuffer(); + } + + @Override + public CompositeByteBuf compositeBuffer(int maxNumComponents) { + return alloc.compositeBuffer(maxNumComponents); + } + + @Override + public CompositeByteBuf compositeHeapBuffer() { + throw new UnsupportedOperationException("Heap buffer"); + } + + @Override + public CompositeByteBuf compositeHeapBuffer(int maxNumComponents) { + throw new UnsupportedOperationException("Heap buffer"); + } + + @Override + public CompositeByteBuf compositeDirectBuffer() { + return alloc.compositeDirectBuffer(); + } + + @Override + public CompositeByteBuf compositeDirectBuffer(int maxNumComponents) { + return alloc.compositeDirectBuffer(maxNumComponents); + } + + @Override + public boolean isDirectBufferPooled() { + return alloc.isDirectBufferPooled(); + } + + @Override + public int calculateNewCapacity(int i, int i1) { + return 0; + } +} diff --git a/src/main/java/io/mycat/buffer/NettyBufferPool.java b/src/main/java/io/mycat/buffer/NettyBufferPool.java new file mode 100644 index 000000000..5f75b2f84 --- /dev/null +++ b/src/main/java/io/mycat/buffer/NettyBufferPool.java @@ -0,0 +1,129 @@ +package io.mycat.buffer; + + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.PoolArenaMetric; +import io.netty.buffer.PoolChunkListMetric; +import io.netty.buffer.PoolChunkMetric; +import io.netty.util.internal.PlatformDependent; + +import java.nio.ByteBuffer; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; + +/** + * 封装netty pooled Direct Memory 接口,为mycat提供内存分配功能 + * 由于Mycat目前使用ByteBuffer,而Netty分配的是ByteBuf,为了管理ByteBuf + * 在MyCatMemoryAllocator中定义recycleMaps ByteBuffer(address) -->ByteBuf + * 的映射关系,通过address来回收ByteBuf. + * + * @author zagnix + * @create 2017-04-13 + */ + +public class NettyBufferPool implements BufferPool { + + + MyCatMemoryAllocator allocator; + private int chunkSize = 0; + + public NettyBufferPool(int chunkSize) { + allocator = MyCatMemoryAllocator.getINSTANCE(); + this.chunkSize = chunkSize; + } + + @Override + public ByteBuffer allocate(int size) { + ByteBuf byteBuf = allocator.directBuffer(size); + ByteBuffer byteBuffer = byteBuf.nioBuffer(0, size); + allocator.recycleMaps.put(PlatformDependent.directBufferAddress(byteBuffer), byteBuf); + return byteBuffer; + } + + @Override + public void recycle(ByteBuffer byteBuffer) { + ByteBuf byteBuf = + allocator.recycleMaps.get(PlatformDependent.directBufferAddress(byteBuffer)); + + if (byteBuf != null) { + byteBuf.release(); + allocator.recycleMaps.remove(PlatformDependent.directBufferAddress(byteBuffer)); + } + + } + + /** + * return memory allocator + * + * @return + */ + public MyCatMemoryAllocator getAllocator() { + return allocator; + } + + /** + * TODO + * 下面函数需要将netty相关内存信息导出处理,然后实现 + * 计算逻辑就是, + * 1.先计算PoolChunk分配的页,表示已经消耗的内存, + * 2.然后计算小于一页情况,记录小于一页内存使用情况, + * 上面二者合起来就是整个netty 使用的内存, + * 已经分配了,但是没有使用的内存的情况 + */ + + @Override + public long capacity() { + return size(); + } + + @Override + public long size() { + + List list = allocator.getAlloc().directArenas(); + long chunkSizeBytes = allocator.getChunkSize(); + int chunkCount = 0; + + synchronized (this) { + /**PoolArenas*/ + for (PoolArenaMetric pool : list) { + List pcks = pool.chunkLists(); + /**针对PoolChunkList*/ + for (PoolChunkListMetric pck : pcks) { + Iterator it = pck.iterator(); + while (it.hasNext()) { + PoolChunkMetric p = it.next(); + chunkCount++; + } + } + } + } + + return chunkCount * chunkSizeBytes; + } + + @Override + public int getConReadBuferChunk() { + return 0; + } + + @Override + public int getSharedOptsCount() { + return 0; + } + + @Override + public int getChunkSize() { + return chunkSize; + } + + @Override + public ConcurrentHashMap getNetDirectMemoryUsage() { + return null; + } + + @Override + public BufferArray allocateArray() { + return new BufferArray(this); + } +} diff --git a/src/main/java/io/mycat/cache/CacheService.java b/src/main/java/io/mycat/cache/CacheService.java index e3ff26ddf..e591df7c5 100644 --- a/src/main/java/io/mycat/cache/CacheService.java +++ b/src/main/java/io/mycat/cache/CacheService.java @@ -2,8 +2,8 @@ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the * terms of the GNU General Public License version 2 only, as published by the * Free Software Foundation. * @@ -16,30 +16,28 @@ * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address + * + * Any questions about this component can be directed to it's project Web address * https://code.google.com/p/opencloudb/. * */ package io.mycat.cache; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.Arrays; import java.util.HashMap; import java.util.Map; import java.util.Properties; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + /** * cache service for other component default using memory cache encache - * + * * @author wuzhih - * + * */ public class CacheService { - private static final Logger logger = LoggerFactory - .getLogger(CacheService.class); + private static final Logger logger = LoggerFactory.getLogger(CacheService.class); private final Map poolFactorys = new HashMap(); private final Map allPools = new HashMap(); @@ -66,7 +64,7 @@ public Map getAllCachePools() private void init() throws Exception { Properties props = new Properties(); props.load(CacheService.class - .getResourceAsStream("/caches.properties")); + .getResourceAsStream("/cacheservice.properties")); final String poolFactoryPref = "factory."; final String poolKeyPref = "pool."; final String layedPoolKeyPref = "layedpool."; @@ -87,8 +85,8 @@ private void init() throws Exception { + value); } String type = valueItems[0]; - int size = Integer.valueOf(valueItems[1]); - int timeOut = Integer.valueOf(valueItems[2]); + int size = Integer.parseInt(valueItems[1]); + int timeOut = Integer.parseInt(valueItems[2]); createPool(cacheName, type, size, timeOut); } else if (key.startsWith(layedPoolKeyPref)) { String cacheName = key.substring(layedPoolKeyPref.length()); @@ -169,7 +167,7 @@ private CachePoolFactory getCacheFact(String type) { /** * get cache pool by name ,caller should cache result - * + * * @param poolName * @return CachePool */ diff --git a/src/main/java/io/mycat/cache/DefaultLayedCachePool.java b/src/main/java/io/mycat/cache/DefaultLayedCachePool.java index bf302ba70..ff2fa77ba 100644 --- a/src/main/java/io/mycat/cache/DefaultLayedCachePool.java +++ b/src/main/java/io/mycat/cache/DefaultLayedCachePool.java @@ -23,13 +23,12 @@ */ package io.mycat.cache; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.HashMap; import java.util.Map; import java.util.concurrent.locks.ReentrantLock; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + public class DefaultLayedCachePool implements LayerCachePool { private static final Logger LOGGER = LoggerFactory .getLogger(DefaultLayedCachePool.class); diff --git a/src/main/java/io/mycat/cache/impl/EnchachePool.java b/src/main/java/io/mycat/cache/impl/EnchachePool.java index 810775cb6..c9f9ea95d 100644 --- a/src/main/java/io/mycat/cache/impl/EnchachePool.java +++ b/src/main/java/io/mycat/cache/impl/EnchachePool.java @@ -23,22 +23,22 @@ */ package io.mycat.cache.impl; -import io.mycat.cache.CachePool; -import io.mycat.cache.CacheStatic; import net.sf.ehcache.Cache; import net.sf.ehcache.Element; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.cache.CachePool; +import io.mycat.cache.CacheStatic; /** - * encache based cache pool + * ehcache based cache pool * * @author wuzhih * */ public class EnchachePool implements CachePool { - private static final Logger LOGGER = LoggerFactory - .getLogger(EnchachePool.class); + private static final Logger LOGGER = LoggerFactory.getLogger(EnchachePool.class); private final Cache enCache; private final CacheStatic cacheStati = new CacheStatic(); private final String name; diff --git a/src/main/java/io/mycat/cache/impl/LevelDBCachePooFactory.java b/src/main/java/io/mycat/cache/impl/LevelDBCachePooFactory.java index 9e71a761d..09e3cba96 100644 --- a/src/main/java/io/mycat/cache/impl/LevelDBCachePooFactory.java +++ b/src/main/java/io/mycat/cache/impl/LevelDBCachePooFactory.java @@ -1,33 +1,34 @@ -package io.mycat.cache.impl; - - -import static org.iq80.leveldb.impl.Iq80DBFactory.factory; -import io.mycat.cache.CachePool; -import io.mycat.cache.CachePoolFactory; - -import java.io.File; - -import org.iq80.leveldb.DB; -import org.iq80.leveldb.Options; - -public class LevelDBCachePooFactory extends CachePoolFactory { - - @Override - public CachePool createCachePool(String poolName, int cacheSize, - int expireSeconds) { - Options options = new Options(); - options.cacheSize(cacheSize * 1048576);//cacheSize M 大小 - options.createIfMissing(true); - DB db =null; - try { - db=factory.open(new File("leveldb\\"+poolName), options); - // Use the db in here.... - } catch (Exception e) { - // Make sure you close the db to shutdown the - // database and avoid resource leaks. - // db.close(); - } - return new LevelDBPool(poolName,db,cacheSize); - } - -} +package io.mycat.cache.impl; + + +import java.io.File; + +import static org.iq80.leveldb.impl.Iq80DBFactory.factory; + +import org.iq80.leveldb.DB; +import org.iq80.leveldb.Options; + +import io.mycat.cache.CachePool; +import io.mycat.cache.CachePoolFactory; + +public class LevelDBCachePooFactory extends CachePoolFactory { + + @Override + public CachePool createCachePool(String poolName, int cacheSize, + int expireSeconds) { + Options options = new Options(); + options.cacheSize(cacheSize * 1048576);//cacheSize M 大小 + options.createIfMissing(true); + DB db =null; + try { + db=factory.open(new File("leveldb\\"+poolName), options); + // Use the db in here.... + } catch (Exception e) { + // Make sure you close the db to shutdown the + // database and avoid resource leaks. + // db.close(); + } + return new LevelDBPool(poolName,db,cacheSize); + } + +} diff --git a/src/main/java/io/mycat/cache/impl/LevelDBPool.java b/src/main/java/io/mycat/cache/impl/LevelDBPool.java index 584c0fd91..ac7161976 100644 --- a/src/main/java/io/mycat/cache/impl/LevelDBPool.java +++ b/src/main/java/io/mycat/cache/impl/LevelDBPool.java @@ -1,127 +1,131 @@ -package io.mycat.cache.impl; - - -import io.mycat.cache.CachePool; -import io.mycat.cache.CacheStatic; -import org.iq80.leveldb.DB; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.*; - - -public class LevelDBPool implements CachePool { - private static final Logger LOGGER = LoggerFactory.getLogger(LevelDBPool.class); - private final DB cache; - private final CacheStatic cacheStati = new CacheStatic(); - private final String name; - private final long maxSize; - - public LevelDBPool(String name,DB db,long maxSize) { - this.cache = db; - this.name=name; - this.maxSize=maxSize; - cacheStati.setMaxSize(maxSize); - } - @Override - public void putIfAbsent(Object key, Object value) { - - cache.put(toByteArray(key),toByteArray(value)); - cacheStati.incPutTimes(); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(name+" add leveldb cache ,key:" + key + " value:" + value); - } - } - - @Override - public Object get(Object key) { - - Object ob= toObject(cache.get(toByteArray(key))); - if (ob != null) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(name+" hit cache ,key:" + key); - } - cacheStati.incHitTimes(); - return ob; - } else { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(name+" miss cache ,key:" + key); - } - cacheStati.incAccessTimes(); - return null; - } - } - - @Override - public void clearCache() { - LOGGER.info("clear cache "+name); - //cache.delete(key); - cacheStati.reset(); - //cacheStati.setMemorySize(cache.g); - - } - - @Override - public CacheStatic getCacheStatic() { - - /* - int i=0; - try { - // DBIterator iterator = cache.iterator(); - for(cache.iterator().seekToFirst(); cache.iterator().hasNext(); cache.iterator().next()) { - i++; - } - cache.iterator().close(); - } catch (Exception e) { - // Make sure you close the iterator to avoid resource leaks. - } - //long[] sizes = cache.getApproximateSizes(new Range(bytes("TESTDB"), bytes("TESTDC"))); - */ - //cacheStati.setItemSize(cache.getSize());//sizes[0]);//需要修改leveldb的代码 - cacheStati.setItemSize(cacheStati.getPutTimes()); - return cacheStati; - } - - @Override - public long getMaxSize() { - - return maxSize; - } - - public byte[] toByteArray (Object obj) { - byte[] bytes = null; - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - try { - ObjectOutputStream oos = new ObjectOutputStream(bos); - oos.writeObject(obj); - oos.flush(); - bytes = bos.toByteArray (); - oos.close(); - bos.close(); - } catch (IOException ex) { - LOGGER.error("toByteArrayError", ex); - } - return bytes; - } - - - public Object toObject (byte[] bytes) { - Object obj = null; - if ((bytes==null) || (bytes.length<=0)) { - return obj; - } - try { - ByteArrayInputStream bis = new ByteArrayInputStream (bytes); - ObjectInputStream ois = new ObjectInputStream (bis); - obj = ois.readObject(); - ois.close(); - bis.close(); - } catch (IOException ex) { - LOGGER.error("toObjectError", ex); - } catch (ClassNotFoundException ex) { - LOGGER.error("toObjectError", ex); - } - return obj; - } - -} +package io.mycat.cache.impl; + + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.iq80.leveldb.DB; + +import io.mycat.cache.CachePool; +import io.mycat.cache.CacheStatic; + + +public class LevelDBPool implements CachePool { + private static final Logger LOGGER = LoggerFactory.getLogger(LevelDBPool.class); + private final DB cache; + private final CacheStatic cacheStati = new CacheStatic(); + private final String name; + private final long maxSize; + + public LevelDBPool(String name,DB db,long maxSize) { + this.cache = db; + this.name=name; + this.maxSize=maxSize; + cacheStati.setMaxSize(maxSize); + } + @Override + public void putIfAbsent(Object key, Object value) { + + cache.put(toByteArray(key),toByteArray(value)); + cacheStati.incPutTimes(); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(name+" add leveldb cache ,key:" + key + " value:" + value); + } + } + + @Override + public Object get(Object key) { + + Object ob= toObject(cache.get(toByteArray(key))); + if (ob != null) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(name+" hit cache ,key:" + key); + } + cacheStati.incHitTimes(); + return ob; + } else { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(name+" miss cache ,key:" + key); + } + cacheStati.incAccessTimes(); + return null; + } + } + + @Override + public void clearCache() { + LOGGER.info("clear cache "+name); + //cache.delete(key); + cacheStati.reset(); + //cacheStati.setMemorySize(cache.g); + + } + + @Override + public CacheStatic getCacheStatic() { + + /* + int i=0; + try { + // DBIterator iterator = cache.iterator(); + for(cache.iterator().seekToFirst(); cache.iterator().hasNext(); cache.iterator().next()) { + i++; + } + cache.iterator().close(); + } catch (Exception e) { + // Make sure you close the iterator to avoid resource leaks. + } + //long[] sizes = cache.getApproximateSizes(new Range(bytes("TESTDB"), bytes("TESTDC"))); + */ + //cacheStati.setItemSize(cache.getSize());//sizes[0]);//需要修改leveldb的代码 + cacheStati.setItemSize(cacheStati.getPutTimes()); + return cacheStati; + } + + @Override + public long getMaxSize() { + + return maxSize; + } + + public byte[] toByteArray (Object obj) { + byte[] bytes = null; + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + try { + ObjectOutputStream oos = new ObjectOutputStream(bos); + oos.writeObject(obj); + oos.flush(); + bytes = bos.toByteArray (); + oos.close(); + bos.close(); + } catch (IOException ex) { + LOGGER.error("toByteArrayError", ex); + } + return bytes; + } + + + public Object toObject (byte[] bytes) { + Object obj = null; + if ((bytes==null) || (bytes.length<=0)) { + return obj; + } + try { + ByteArrayInputStream bis = new ByteArrayInputStream (bytes); + ObjectInputStream ois = new ObjectInputStream (bis); + obj = ois.readObject(); + ois.close(); + bis.close(); + } catch (IOException ex) { + LOGGER.error("toObjectError", ex); + } catch (ClassNotFoundException ex) { + LOGGER.error("toObjectError", ex); + } + return obj; + } + +} diff --git a/src/main/java/io/mycat/cache/impl/MapDBCachePooFactory.java b/src/main/java/io/mycat/cache/impl/MapDBCachePooFactory.java index 72837ff7c..820ed24e1 100644 --- a/src/main/java/io/mycat/cache/impl/MapDBCachePooFactory.java +++ b/src/main/java/io/mycat/cache/impl/MapDBCachePooFactory.java @@ -23,15 +23,15 @@ */ package io.mycat.cache.impl; -import io.mycat.cache.CachePool; -import io.mycat.cache.CachePoolFactory; - import java.util.concurrent.TimeUnit; import org.mapdb.DB; import org.mapdb.DBMaker; import org.mapdb.HTreeMap; +import io.mycat.cache.CachePool; +import io.mycat.cache.CachePoolFactory; + public class MapDBCachePooFactory extends CachePoolFactory { private DB db = DBMaker.newMemoryDirectDB().cacheSize(1000).cacheLRUEnable().make(); diff --git a/src/main/java/io/mycat/cache/impl/MapDBCachePool.java b/src/main/java/io/mycat/cache/impl/MapDBCachePool.java index 4bc613a5a..c12844b3c 100644 --- a/src/main/java/io/mycat/cache/impl/MapDBCachePool.java +++ b/src/main/java/io/mycat/cache/impl/MapDBCachePool.java @@ -23,11 +23,11 @@ */ package io.mycat.cache.impl; +import org.mapdb.HTreeMap; + import io.mycat.cache.CachePool; import io.mycat.cache.CacheStatic; -import org.mapdb.HTreeMap; - public class MapDBCachePool implements CachePool { private final HTreeMap htreeMap; diff --git a/src/main/java/io/mycat/cache/index/Shard.java b/src/main/java/io/mycat/cache/index/Shard.java new file mode 100644 index 000000000..a58e1e1c5 --- /dev/null +++ b/src/main/java/io/mycat/cache/index/Shard.java @@ -0,0 +1,119 @@ +package io.mycat.cache.index; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.util.ArrayList; +import java.util.List; +import java.util.SortedMap; +import java.util.TreeMap; + +/** + * 分布式索引一致性 + * + * @author Hash Zhang + * @version 1.0 + * @time 00:15:03 2016/5/23 + */ +public class Shard { // S类封装了机器节点的信息 ,如name、password、ip、port等 + + private TreeMap nodes; // 虚拟节点 + private List shards; // 真实机器节点 + private static final int NODE_NUM = 100; // 每个机器节点关联的虚拟节点个数 + + public Shard(List shards) { + super(); + this.shards = shards; + init(); + } + + private void init() { // 初始化一致性hash环 + nodes = new TreeMap(); + for (int i = 0; i != shards.size(); ++i) { // 每个真实机器节点都需要关联虚拟节点 + final S shardInfo = shards.get(i); + + for (int n = 0; n < NODE_NUM; n++) { + // 一个真实机器节点关联NODE_NUM个虚拟节点 + nodes.put(hash("SHARD-" + i + "-NODE-" + n), shardInfo); + } + } + } + + public S getShardInfo(String key) { + SortedMap tail = nodes.tailMap(hash(key)); // 沿环的顺时针找到一个虚拟节点 + if (tail.size() == 0) { + return nodes.get(nodes.firstKey()); + } + return tail.get(tail.firstKey()); // 返回该虚拟节点对应的真实机器节点的信息 + } + + /** + * MurMurHash算法,是非加密HASH算法,性能很高, + * 比传统的CRC32,MD5,SHA-1(这两个算法都是加密HASH算法,复杂度本身就很高,带来的性能上的损害也不可避免) + * 等HASH算法要快很多,而且据说这个算法的碰撞率很低. + * http://murmurhash.googlepages.com/ + */ + private Long hash(String key) { + + ByteBuffer buf = ByteBuffer.wrap(key.getBytes()); + int seed = 0x1234ABCD; + + ByteOrder byteOrder = buf.order(); + buf.order(ByteOrder.LITTLE_ENDIAN); + + long m = 0xc6a4a7935bd1e995L; + int r = 47; + + long h = seed ^ (buf.remaining() * m); + + long k; + while (buf.remaining() >= 8) { + k = buf.getLong(); + + k *= m; + k ^= k >>> r; + k *= m; + + h ^= k; + h *= m; + } + + if (buf.remaining() > 0) { + ByteBuffer finish = ByteBuffer.allocate(8).order( + ByteOrder.LITTLE_ENDIAN); + // for big-endian version, do this first: + // finish.position(8-buf.remaining()); + finish.put(buf).rewind(); + h ^= finish.getLong(); + h *= m; + } + + h ^= h >>> r; + h *= m; + h ^= h >>> r; + + buf.order(byteOrder); + return h; + } + + public static void main(String[] args) { + List stringList = new ArrayList<>(); + stringList.add("host1"); + stringList.add("host2"); + stringList.add("host3"); + stringList.add("host4"); + stringList.add("host5"); + Shard stringShard = new Shard<>(stringList); + for (int i = 0; i < 10; i++) { + System.out.println(i+":"+stringShard.getShardInfo(""+i)); + } + stringList = new ArrayList<>(); + stringList.add("host1"); + stringList.add("host2"); + stringList.add("host3"); + stringList.add("host4"); + stringShard = new Shard<>(stringList); + for (int i = 0; i < 10; i++) { + System.out.println(i+":"+stringShard.getShardInfo(""+i)); + } + } +} diff --git a/src/main/java/io/mycat/sqlengine/Catlet.java b/src/main/java/io/mycat/catlets/Catlet.java similarity index 66% rename from src/main/java/io/mycat/sqlengine/Catlet.java rename to src/main/java/io/mycat/catlets/Catlet.java index 323d98254..c6c245d6e 100644 --- a/src/main/java/io/mycat/sqlengine/Catlet.java +++ b/src/main/java/io/mycat/catlets/Catlet.java @@ -1,9 +1,10 @@ -package io.mycat.sqlengine; +package io.mycat.catlets; import io.mycat.cache.LayerCachePool; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SystemConfig; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.server.ServerConnection; +import io.mycat.sqlengine.EngineCtx; /** * mycat catlet ,used to execute sql and return result to client,some like * database's procedure. @@ -20,7 +21,7 @@ public interface Catlet { void processSQL(String sql, EngineCtx ctx); void route(SystemConfig sysConfig, SchemaConfig schema, - int sqlType, String realSQL, String charset, MySQLFrontConnection sc, + int sqlType, String realSQL, String charset, ServerConnection sc, LayerCachePool cachePool) ; //void setRoute(RouteResultset rrs); } diff --git a/src/main/java/io/mycat/sqlengine/sharejoin/JoinParser.java b/src/main/java/io/mycat/catlets/JoinParser.java similarity index 72% rename from src/main/java/io/mycat/sqlengine/sharejoin/JoinParser.java rename to src/main/java/io/mycat/catlets/JoinParser.java index aa26105f4..6499c8866 100644 --- a/src/main/java/io/mycat/sqlengine/sharejoin/JoinParser.java +++ b/src/main/java/io/mycat/catlets/JoinParser.java @@ -1,308 +1,364 @@ -package io.mycat.sqlengine.sharejoin; - - -import com.alibaba.druid.sql.ast.SQLExpr; -import com.alibaba.druid.sql.ast.SQLOrderBy; -import com.alibaba.druid.sql.ast.SQLOrderingSpecification; -import com.alibaba.druid.sql.ast.expr.*; -import com.alibaba.druid.sql.ast.statement.SQLJoinTableSource; -import com.alibaba.druid.sql.ast.statement.SQLJoinTableSource.JoinType; -import com.alibaba.druid.sql.ast.statement.SQLSelectItem; -import com.alibaba.druid.sql.ast.statement.SQLSelectOrderByItem; -import com.alibaba.druid.sql.ast.statement.SQLTableSource; -import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlSelectQueryBlock; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; - -/** - * 功能详细描述:分片join,解析join语句 - * @author sohudo[http://blog.csdn.net/wind520] - * @create 2015年01月25日 - * @version 0.0.1 - */ - - -public class JoinParser { - - public static final Logger LOGGER = LoggerFactory - .getLogger(JoinParser.class); - - private MySqlSelectQueryBlock mysqlQuery; - private String stmt=""; - private String joinType; - private String masterTable; - private TableFilter tableFilter; - - //private LinkedHashMap fieldAliasMap = new LinkedHashMap(); - - public JoinParser(MySqlSelectQueryBlock selectQuery,String stmt) { - this.mysqlQuery=selectQuery; - this.stmt=stmt; - } - - public void parser(){ - masterTable=""; - - SQLTableSource table=mysqlQuery.getFrom(); - parserTable(table,tableFilter,false); - - parserFields(mysqlQuery.getSelectList()); - parserMaserTable(); - - parserWhere(mysqlQuery.getWhere(),""); - // getJoinField(); - parserOrderBy(mysqlQuery.getOrderBy()); - parserLimit(); - // LOGGER.info("field "+fieldAliasMap); - // LOGGER.info("master "+masterTable); - // LOGGER.info("join Lkey "+getJoinLkey()); - // LOGGER.info("join Rkey "+getJoinRkey()); - LOGGER.info("SQL: "+this.stmt); - } - - private void parserTable(SQLTableSource table,TableFilter tFilter,boolean isOutJoin){ - if(table instanceof SQLJoinTableSource){ - SQLJoinTableSource table1=(SQLJoinTableSource)table; - joinType=table1.getJoinType().toString(); - if ((table1.getJoinType()==JoinType.COMMA)||(table1.getJoinType()==JoinType.JOIN)||(table1.getJoinType()==JoinType.INNER_JOIN) - ||(table1.getJoinType()==JoinType.LEFT_OUTER_JOIN)) { - tFilter=setTableFilter(tFilter,getTableFilter(table1.getLeft(),isOutJoin)); - if (tableFilter==null){ - tableFilter=tFilter; - } - } - //parserTable(table1.getLeft()); //SQLExprTableSource - parserTable(table1.getRight(),tFilter,true); - - SQLExpr expr=table1.getCondition();//SQLBinaryOpExpr - parserJoinKey(expr); - } - else { - tFilter=setTableFilter(tFilter,getTableFilter(table,isOutJoin)); - // LOGGER.info("table "+table.toString() +" Alias:"+table.getAlias()+" Hints:"+table.getHints()); - } - } - private TableFilter setTableFilter(TableFilter tFilter,TableFilter newFilter){ - if (tFilter==null) { - tFilter=newFilter; - return tFilter; - } - else { - tFilter.setTableJoin(newFilter); - return tFilter.getTableJoin(); - } - } - private TableFilter getTableFilter(SQLTableSource table,boolean isOutJoin){ - String key ; - String value = table.toString().trim(); - if (table.getAlias()==null) { - key=value; - } - else { - key = table.getAlias().trim(); - } - return new TableFilter(value,key,isOutJoin); - } - - private void parserJoinKey(SQLExpr expr){ - if (expr==null) return; - parserWhere(expr,""); - } - - private String getExprFieldName(SQLAggregateExpr expr){ - String field=""; - for (SQLExpr item :expr.getArguments()){ - field+=item.toString(); - } - return expr.getMethodName()+"("+field+")"; - } - - private String getFieldName(SQLSelectItem item){ - if (item.getExpr() instanceof SQLPropertyExpr) { - return item.getExpr().toString();//字段别名 - } - else - return item.toString(); - } - - private void parserFields(List mysqlSelectList){ - //显示的字段 - String key=""; - String value =""; - for(SQLSelectItem item : mysqlSelectList) { - if (item.getExpr() instanceof SQLAllColumnExpr) { - //*解析 - setField(item.toString(), item.toString()); - } - else { - if (item.getExpr() instanceof SQLAggregateExpr) { - SQLAggregateExpr expr =(SQLAggregateExpr)item.getExpr(); - key = getExprFieldName(expr); - //value=expr. - } - else { - key=getFieldName(item); - value=item.getAlias(); - } - setField(key, value); - } - } - } - private void setField(String key,String value){ - //fieldAliasMap.put(key, value); - if (tableFilter!=null){ - tableFilter.addField(key, value); - } - } - - //判断并获得主表 - private void parserMaserTable(){ - if (tableFilter!=null){ - masterTable=tableFilter.getTableAlia(); - } - } - - private boolean checkJoinField(String value){ - if (value==null){ - return false; - } - else { - int i=value.indexOf('.'); - return i>0; - } - } - - - private void parserWhere(SQLExpr aexpr,String Operator){ - if (aexpr==null) return; - if (aexpr instanceof SQLBinaryOpExpr){ - SQLBinaryOpExpr expr=(SQLBinaryOpExpr)aexpr; - SQLExpr exprL=expr.getLeft(); - if (!(exprL instanceof SQLBinaryOpExpr)) - { - opSQLExpr((SQLBinaryOpExpr)aexpr,Operator); - } - else { - // if (expr.getOperator().getName().equals("AND")) { - if (expr.getOperator()==SQLBinaryOperator.BooleanAnd) { - //parserWhere(exprL); - //parserWhere(expr.getRight()); - andorWhere(exprL,expr.getOperator().getName(),expr.getRight()); - } - else if (expr.getOperator()==SQLBinaryOperator.BooleanOr){//.getName().equals("OR")) { - andorWhere(exprL,expr.getOperator().getName(),expr.getRight()); - } - else { - throw new RuntimeException("Can't identify the operation of of where"); - } - } - } - } - - private void andorWhere(SQLExpr exprL,String Operator,SQLExpr exprR ){ - parserWhere(exprL,""); - parserWhere(exprR,Operator); - } - - private void opSQLExpr(SQLBinaryOpExpr expr,String Operator) { - if (expr==null) return; - SQLExpr exprL=expr.getLeft(); - if (!(exprL instanceof SQLBinaryOpExpr)) - { - String field=exprL.toString(); - String value=getExpValue(expr.getRight()).toString(); - if (expr.getOperator()==SQLBinaryOperator.Equality) { - if (checkJoinField(value)) { - //joinLkey=field; - //joinRkey=value; - tableFilter.setJoinKey(field,value); - } - else - tableFilter.addWhere(field, value, expr.getOperator().getName(),Operator); - } - else - tableFilter.addWhere(field, value, expr.getOperator().getName(),Operator); - } - } - - private Object getExpValue(SQLExpr expr){ - if (expr instanceof SQLIntegerExpr){ - return ((SQLIntegerExpr)expr).getNumber().intValue(); - } - if (expr instanceof SQLNumberExpr){ - return ((SQLNumberExpr)expr).getNumber().doubleValue(); - } - if (expr instanceof SQLCharExpr){ - String va=((SQLCharExpr)expr).toString(); - return va;//remove(va,'\''); - } - if (expr instanceof SQLBooleanExpr){ - return ((SQLBooleanExpr)expr).getValue(); - } - if (expr instanceof SQLNullExpr){ - return null; - } - - return expr; - } - - private void parserOrderBy(SQLOrderBy orderby) - { - if (orderby != null ){ - for (int i = 0; i < orderby.getItems().size(); i++) - { - SQLSelectOrderByItem orderitem = orderby.getItems().get(i); - tableFilter.addOrders(orderitem.getExpr().toString(), getSQLExprToAsc(orderitem.getType())); - } - } - } - private void parserLimit(){ - int limitoff=0; - int limitnum=0; - if (this.mysqlQuery.getLimit()!=null) { - limitoff=getSQLExprToInt(this.mysqlQuery.getLimit().getOffset()); - limitnum=getSQLExprToInt(this.mysqlQuery.getLimit().getRowCount()); - tableFilter.addLimit(limitoff,limitnum); - } - } - - private int getSQLExprToInt(SQLExpr expr){ - if (expr instanceof SQLIntegerExpr){ - return ((SQLIntegerExpr)expr).getNumber().intValue(); - } - return 0; - } - - private String getSQLExprToAsc(SQLOrderingSpecification ASC){ - if (ASC==null ) return " ASC "; - if (ASC==SQLOrderingSpecification.DESC){ - return " DESC "; - } - else { - return " ASC "; - } - } - - public String getChildSQL(){ - //String sql="select "+joinRkey+","+sql+" from "+mtable+" where "+joinRkey+" in "; - String sql=tableFilter.getTableJoin().getSQL(); - return sql; - } - - public String getSql(){ - stmt=tableFilter.getSQL(); - return stmt; - } - - public String getJoinType(){ - return joinType; - } - public String getJoinLkey(){ - return tableFilter.getJoinKey(true); - } - public String getJoinRkey(){ - return tableFilter.getJoinKey(false); - } -} +package io.mycat.catlets; + + +import java.util.LinkedHashMap; +import java.util.List; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import com.alibaba.druid.sql.SQLUtils; +import com.alibaba.druid.sql.ast.SQLExpr; +import com.alibaba.druid.sql.ast.SQLOrderBy; +import com.alibaba.druid.sql.ast.SQLOrderingSpecification; +import com.alibaba.druid.sql.ast.expr.SQLAggregateExpr; +import com.alibaba.druid.sql.ast.expr.SQLAllColumnExpr; +import com.alibaba.druid.sql.ast.expr.SQLBinaryOpExpr; +import com.alibaba.druid.sql.ast.expr.SQLBinaryOperator; +import com.alibaba.druid.sql.ast.expr.SQLBooleanExpr; +import com.alibaba.druid.sql.ast.expr.SQLCharExpr; +import com.alibaba.druid.sql.ast.expr.SQLIdentifierExpr; +import com.alibaba.druid.sql.ast.expr.SQLInListExpr; +import com.alibaba.druid.sql.ast.expr.SQLIntegerExpr; +import com.alibaba.druid.sql.ast.expr.SQLMethodInvokeExpr; +import com.alibaba.druid.sql.ast.expr.SQLNullExpr; +import com.alibaba.druid.sql.ast.expr.SQLNumberExpr; +import com.alibaba.druid.sql.ast.expr.SQLPropertyExpr; +import com.alibaba.druid.sql.ast.statement.SQLJoinTableSource; +import com.alibaba.druid.sql.ast.statement.SQLJoinTableSource.JoinType; +import com.alibaba.druid.sql.ast.statement.SQLSelectItem; +import com.alibaba.druid.sql.ast.statement.SQLSelectOrderByItem; +import com.alibaba.druid.sql.ast.statement.SQLTableSource; +import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlSelectQueryBlock; + +/** + * 功能详细描述:分片join,解析join语句 + * @author sohudo[http://blog.csdn.net/wind520] + * @create 2015年01月25日 + * @version 0.0.1 + */ + + +public class JoinParser { + + protected static final Logger LOGGER = LoggerFactory.getLogger(JoinParser.class); + + private MySqlSelectQueryBlock mysqlQuery; + private String stmt=""; + private String joinType; + private String masterTable; + private TableFilter tableFilter; + + //private LinkedHashMap fieldAliasMap = new LinkedHashMap(); + + public JoinParser(MySqlSelectQueryBlock selectQuery,String stmt) { + this.mysqlQuery=selectQuery; + this.stmt=stmt; + } + + public void parser(){ + masterTable=""; + + SQLTableSource table=mysqlQuery.getFrom(); + parserTable(table,tableFilter,false); + + parserFields(mysqlQuery.getSelectList()); + parserMasterTable(); + + parserWhere(mysqlQuery.getWhere(),""); + // getJoinField(); + parserOrderBy(mysqlQuery.getOrderBy()); + parserLimit(); + // LOGGER.info("field "+fieldAliasMap); + // LOGGER.info("master "+masterTable); + // LOGGER.info("join Lkey "+getJoinLkey()); + // LOGGER.info("join Rkey "+getJoinRkey()); + LOGGER.info("SQL: "+this.stmt); + } + + private void parserTable(SQLTableSource table,TableFilter tFilter,boolean isOutJoin){ + if(table instanceof SQLJoinTableSource){ + SQLJoinTableSource table1=(SQLJoinTableSource)table; + joinType=table1.getJoinType().toString(); + if ((table1.getJoinType()==JoinType.COMMA)||(table1.getJoinType()==JoinType.JOIN)||(table1.getJoinType()==JoinType.INNER_JOIN) + ||(table1.getJoinType()==JoinType.LEFT_OUTER_JOIN)) { + tFilter=setTableFilter(tFilter,getTableFilter(table1.getLeft(),isOutJoin)); + if (tableFilter==null){ + tableFilter=tFilter; + } + } + //parserTable(table1.getLeft()); //SQLExprTableSource + parserTable(table1.getRight(),tFilter,true); + + SQLExpr expr=table1.getCondition();//SQLBinaryOpExpr + parserJoinKey(expr); + } + else { + tFilter=setTableFilter(tFilter,getTableFilter(table,isOutJoin)); + LOGGER.info("table "+table.toString() +" Alias:"+table.getAlias()+" Hints:"+table.getHints()); + } + } + private TableFilter setTableFilter(TableFilter tFilter,TableFilter newFilter){ + if (tFilter==null) { + tFilter=newFilter; + return tFilter; + } + else { + tFilter.setTableJoin(newFilter); + return tFilter.getTableJoin(); + } + } + private TableFilter getTableFilter(SQLTableSource table,boolean isOutJoin){ + String key ; + String value = table.toString().trim(); + if (table.getAlias()==null) { + key=value; + } + else { + key = table.getAlias().trim(); + } + return new TableFilter(value,key,isOutJoin); + } + + private void parserJoinKey(SQLExpr expr){ + if (expr==null) { + return; + } + parserWhere(expr,""); + } + + private String getExprFieldName(SQLAggregateExpr expr){ + StringBuilder field = new StringBuilder(); + for (SQLExpr item :expr.getArguments()){ + field.append(item.toString()); + } + return expr.getMethodName()+"("+field.toString()+")"; + } + + private String getFieldName(SQLSelectItem item){ + if (item.getExpr() instanceof SQLPropertyExpr) { + return item.getExpr().toString();//字段别名 + } + else { + return item.toString(); + } + } + + private String getMethodInvokeFieldName(SQLSelectItem item){ + SQLMethodInvokeExpr invoke = (SQLMethodInvokeExpr)item.getExpr(); + List itemExprs = invoke.getParameters(); + for(SQLExpr itemExpr:itemExprs){ + if (itemExpr instanceof SQLPropertyExpr) { + return itemExpr.toString();//字段别名 + } + } + return item.toString(); + } + + + private void parserFields(List mysqlSelectList){ + //显示的字段 + String key=""; + String value =""; + String exprfield = ""; + for(SQLSelectItem item : mysqlSelectList) { + if (item.getExpr() instanceof SQLAllColumnExpr) { + //*解析 + setField(item.toString(), item.toString()); + } + else { + if (item.getExpr() instanceof SQLAggregateExpr) { + SQLAggregateExpr expr =(SQLAggregateExpr)item.getExpr(); + key = getExprFieldName(expr); + setField(key, value); + }else if(item.getExpr() instanceof SQLMethodInvokeExpr){ + key = getMethodInvokeFieldName(item); + exprfield=getFieldName(item); +// value=item.getAlias(); + setField(key, value,exprfield); + }else { + key=getFieldName(item); + value=item.getAlias(); + setField(key, value); + } + + } + } + } + private void setField(String key,String value){ + //fieldAliasMap.put(key, value); + if (tableFilter!=null){ + tableFilter.addField(key, value); + } + } + + private void setField(String key,String value,String expr){ + //fieldAliasMap.put(key, value); + if (tableFilter!=null){ + tableFilter.addField(key, value,expr); + } + } + + + //判断并获得主表 + private void parserMasterTable(){ + if (tableFilter!=null){ + masterTable=tableFilter.getTableAlia(); + } + } + + private boolean checkJoinField(String value){ + if (value==null){ + return false; + } + else { + int i=value.indexOf('.'); + return i>0; + } + } + + + private void parserWhere(SQLExpr aexpr,String Operator){ + if (aexpr==null) { + return; + } + if (aexpr instanceof SQLBinaryOpExpr){ + SQLBinaryOpExpr expr=(SQLBinaryOpExpr)aexpr; + SQLExpr exprL=expr.getLeft(); + if (!(exprL instanceof SQLBinaryOpExpr)) + { + opSQLExpr((SQLBinaryOpExpr)aexpr,Operator); + } + else { + // if (expr.getOperator().getName().equals("AND")) { + if (expr.getOperator()==SQLBinaryOperator.BooleanAnd) { + //parserWhere(exprL); + //parserWhere(expr.getRight()); + andorWhere(exprL,expr.getOperator().getName(),expr.getRight()); + } + else if (expr.getOperator()==SQLBinaryOperator.BooleanOr){//.getName().equals("OR")) { + andorWhere(exprL,expr.getOperator().getName(),expr.getRight()); + } + else { + throw new RuntimeException("Can't identify the operation of of where"); + } + } + }else if(aexpr instanceof SQLInListExpr){ + SQLInListExpr expr = (SQLInListExpr)aexpr; + SQLExpr exprL = expr.getExpr(); + String field=exprL.toString(); + tableFilter.addWhere(field, SQLUtils.toMySqlString(expr), Operator); + } + + } + + private void andorWhere(SQLExpr exprL,String Operator,SQLExpr exprR ){ + parserWhere(exprL,""); + parserWhere(exprR,Operator); + } + + private void opSQLExpr(SQLBinaryOpExpr expr,String Operator) { + if (expr==null) { + return; + } + SQLExpr exprL=expr.getLeft(); + if (!(exprL instanceof SQLBinaryOpExpr)) + { + String field=exprL.toString(); + String value=getExpValue(expr.getRight()).toString(); + if (expr.getOperator()==SQLBinaryOperator.Equality) { + if (checkJoinField(value)) { + //joinLkey=field; + //joinRkey=value; + tableFilter.setJoinKey(field,value); + } + else { + tableFilter.addWhere(field, value, expr.getOperator().getName(), Operator); + } + } + else { + tableFilter.addWhere(field, value, expr.getOperator().getName(), Operator); + } + } + } + + private Object getExpValue(SQLExpr expr){ + if (expr instanceof SQLIntegerExpr){ + return ((SQLIntegerExpr)expr).getNumber().intValue(); + } + if (expr instanceof SQLNumberExpr){ + return ((SQLNumberExpr)expr).getNumber().doubleValue(); + } + if (expr instanceof SQLCharExpr){ + String va=((SQLCharExpr)expr).toString(); + return va;//remove(va,'\''); + } + if (expr instanceof SQLBooleanExpr){ + return ((SQLBooleanExpr)expr).getValue(); + } + if (expr instanceof SQLNullExpr){ + return null; + } + + return expr; + } + + private void parserOrderBy(SQLOrderBy orderby) + { + if (orderby != null ){ + for (int i = 0; i < orderby.getItems().size(); i++) + { + SQLSelectOrderByItem orderitem = orderby.getItems().get(i); + tableFilter.addOrders(orderitem.getExpr().toString(), getSQLExprToAsc(orderitem.getType())); + } + } + } + private void parserLimit(){ + int limitoff=0; + int limitnum=0; + if (this.mysqlQuery.getLimit()!=null) { + limitoff=getSQLExprToInt(this.mysqlQuery.getLimit().getOffset()); + limitnum=getSQLExprToInt(this.mysqlQuery.getLimit().getRowCount()); + tableFilter.addLimit(limitoff,limitnum); + } + } + + private int getSQLExprToInt(SQLExpr expr){ + if (expr instanceof SQLIntegerExpr){ + return ((SQLIntegerExpr)expr).getNumber().intValue(); + } + return 0; + } + + private String getSQLExprToAsc(SQLOrderingSpecification ASC){ + if (ASC==null ) { + return " ASC "; + } + if (ASC==SQLOrderingSpecification.DESC){ + return " DESC "; + } + else { + return " ASC "; + } + } + + public String getChildSQL(){ + //String sql="select "+joinRkey+","+sql+" from "+mtable+" where "+joinRkey+" in "; + String sql=tableFilter.getTableJoin().getSQL(); + return sql; + } + + public String getSql(){ + stmt=tableFilter.getSQL(); + return stmt; + } + + public String getJoinType(){ + return joinType; + } + public String getJoinLkey(){ + return tableFilter.getJoinKey(true); + } + public String getJoinRkey(){ + return tableFilter.getJoinKey(false); + } +} diff --git a/src/main/java/demo/catlets/ShareJoin.java b/src/main/java/io/mycat/catlets/ShareJoin.java similarity index 68% rename from src/main/java/demo/catlets/ShareJoin.java rename to src/main/java/io/mycat/catlets/ShareJoin.java index aaba0219d..0fcbd5dc7 100644 --- a/src/main/java/demo/catlets/ShareJoin.java +++ b/src/main/java/io/mycat/catlets/ShareJoin.java @@ -1,21 +1,28 @@ -package demo.catlets; +package io.mycat.catlets; +import com.alibaba.druid.sql.ast.SQLStatement; +import com.alibaba.druid.sql.ast.statement.SQLSelectQuery; +import com.alibaba.druid.sql.ast.statement.SQLSelectStatement; +import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlSelectQueryBlock; +import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser; +import io.mycat.backend.mysql.nio.handler.MiddlerQueryResultHandler; +import io.mycat.backend.mysql.nio.handler.MiddlerResultHandler; import io.mycat.cache.LayerCachePool; +import io.mycat.config.ErrorCode; +import io.mycat.config.Fields; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.RowDataPacket; import io.mycat.route.RouteResultset; import io.mycat.route.RouteResultsetNode; import io.mycat.route.factory.RouteStrategyFactory; -import io.mycat.server.ErrorCode; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SystemConfig; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.RowDataPacket; +import io.mycat.server.NonBlockingSession; +import io.mycat.server.ServerConnection; import io.mycat.server.parser.ServerParse; import io.mycat.sqlengine.AllJobFinishedListener; -import io.mycat.sqlengine.Catlet; import io.mycat.sqlengine.EngineCtx; import io.mycat.sqlengine.SQLJobHandler; -import io.mycat.sqlengine.sharejoin.JoinParser; import io.mycat.util.ByteUtil; import io.mycat.util.ResultSetUtil; @@ -23,15 +30,6 @@ import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; - -//import org.opencloudb.route.RouteStrategy; -//import org.opencloudb.route.impl.DruidMysqlRouteStrategy; -//import org.opencloudb.parser.druid.DruidParser; -import com.alibaba.druid.sql.ast.SQLStatement; -import com.alibaba.druid.sql.ast.statement.SQLSelectQuery; -import com.alibaba.druid.sql.ast.statement.SQLSelectStatement; -import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlSelectQueryBlock; -import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser; /** * 功能详细描述:分片join * @author sohudo[http://blog.csdn.net/wind520] @@ -57,19 +55,23 @@ public class ShareJoin implements Catlet { private int sendField=0; private boolean childRoute=false; private boolean jointTableIsData=false; - + // join 字段的类型,一般情况都是int, long; 增加该字段为了支持非int,long类型的(一般为varchar)joinkey的sharejoin + // 参见:io.mycat.server.packet.FieldPacket 属性: public int type; + // 参见:http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnDefinition + private int joinKeyType = Fields.FIELD_TYPE_LONG; // 默认 join 字段为int型 + //重新路由使用 private SystemConfig sysConfig; private SchemaConfig schema; private int sqltype; private String charset; - private MySQLFrontConnection sc; + private ServerConnection sc; private LayerCachePool cachePool; public void setRoute(RouteResultset rrs){ this.rrs =rrs; } - public void route(SystemConfig sysConfig, SchemaConfig schema,int sqlType, String realSQL, String charset, MySQLFrontConnection sc, LayerCachePool cachePool) { + public void route(SystemConfig sysConfig, SchemaConfig schema,int sqlType, String realSQL, String charset, ServerConnection sc, LayerCachePool cachePool) { int rs = ServerParse.parse(realSQL); this.sqltype = rs & 0xff; this.sysConfig=sysConfig; @@ -143,7 +145,10 @@ public void processSQL(String sql, EngineCtx ctx) { this.ctx=ctx; String[] dataNodes =getDataNodes(); maxjob=dataNodes.length; - ShareDBJoinHandler joinHandler = new ShareDBJoinHandler(this,joinParser.getJoinLkey()); + + + //huangyiming + ShareDBJoinHandler joinHandler = new ShareDBJoinHandler(this,joinParser.getJoinLkey(),sc.getSession2()); ctx.executeNativeSQLSequnceJob(dataNodes, ssql, joinHandler); EngineCtx.LOGGER.info("Catlet exec:"+getDataNode(getDataNodes())+" sql:" +ssql); @@ -153,7 +158,15 @@ public void onAllJobFinished(EngineCtx ctx) { if (!jointTableIsData) { ctx.writeHeader(fields); } - ctx.writeEof(); + + MiddlerResultHandler middlerResultHandler = sc.getSession2().getMiddlerResultHandler(); + + if( middlerResultHandler !=null ){ + //sc.getSession2().setCanClose(false); + middlerResultHandler.secondEexcute(); + } else{ + ctx.writeEof(); + } EngineCtx.LOGGER.info("发送数据OK"); } }); @@ -195,9 +208,17 @@ private void createQryJob(int batchSize) { String svalue=""; for(Map.Entry e: ids.entrySet() ){ theId=e.getKey(); - batchRows.put(theId, rows.remove(theId)); + byte[] rowbyte = rows.remove(theId); + if(rowbyte!=null){ + batchRows.put(theId, rowbyte); + } if (!svalue.equals(e.getValue())){ - sb.append(e.getValue()).append(','); + if(joinKeyType == Fields.FIELD_TYPE_VAR_STRING + || joinKeyType == Fields.FIELD_TYPE_STRING){ // joinkey 为varchar + sb.append("'").append(e.getValue()).append("'").append(','); // ('digdeep','yuanfang') + }else{ // 默认joinkey为int/long + sb.append(e.getValue()).append(','); // (1,2,3) + } } svalue=e.getValue(); if (count++ > batchSize) { @@ -223,14 +244,18 @@ private void createQryJob(int batchSize) { getRoute(sql); //childRoute=true; //} - ctx.executeNativeSQLParallJob(getDataNodes(),sql, new ShareRowOutPutDataHandler(this,fields,joinindex,joinParser.getJoinRkey(), batchRows)); + ctx.executeNativeSQLParallJob(getDataNodes(),sql, new ShareRowOutPutDataHandler(this,fields,joinindex,joinParser.getJoinRkey(), batchRows,ctx.getSession())); EngineCtx.LOGGER.info("SQLParallJob:"+getDataNode(getDataNodes())+" sql:" + sql); } public void writeHeader(String dataNode,List afields, List bfields) { sendField++; - if (sendField==1){ - ctx.writeHeader(afields, bfields); - setAllFields(afields, bfields); + if (sendField==1){ + //huangyiming add 只是中间过程数据不能发送给客户端 + MiddlerResultHandler middlerResultHandler = sc.getSession2().getMiddlerResultHandler(); + if(middlerResultHandler ==null ){ + ctx.writeHeader(afields, bfields); + } + setAllFields(afields, bfields); // EngineCtx.LOGGER.info("发送字段2:" + dataNode); } @@ -253,12 +278,13 @@ public void writeRow(RowDataPacket rowDataPkg){ ctx.writeRow(rowDataPkg); } - public static int getFieldIndex(List fields,String fkey){ + public int getFieldIndex(List fields,String fkey){ int i=0; for (byte[] field :fields) { FieldPacket fieldPacket = new FieldPacket(); fieldPacket.read(field); - if (ByteUtil.getString(fieldPacket.name).equals(fkey)){ + if (ByteUtil.getString(fieldPacket.orgName).equals(fkey)){ + joinKeyType = fieldPacket.type; return i; } i++; @@ -271,11 +297,12 @@ class ShareDBJoinHandler implements SQLJobHandler { private List fields; private final ShareJoin ctx; private String joinkey; - - public ShareDBJoinHandler(ShareJoin ctx,String joinField) { + private NonBlockingSession session; + public ShareDBJoinHandler(ShareJoin ctx,String joinField,NonBlockingSession session) { super(); this.ctx = ctx; this.joinkey=joinField; + this.session = session; //EngineCtx.LOGGER.info("二次查询:" +" sql:" + querySQL+"/"+joinkey); } @@ -318,8 +345,12 @@ public boolean onRowData(String dataNode, byte[] rowData) { } @Override - public void finished(String dataNode, boolean failed) { - ctx.endJobInput(dataNode,failed); + public void finished(String dataNode, boolean failed, String errorMsg) { + if(failed){ + session.getSource().writeErrMessage(ErrorCode.ER_UNKNOWN_ERROR, errorMsg); + }else{ + ctx.endJobInput(dataNode,failed); + } } } @@ -332,13 +363,16 @@ class ShareRowOutPutDataHandler implements SQLJobHandler { private int joinL;//A表(左边)关联字段的位置 private int joinR;//B表(右边)关联字段的位置 private String joinRkey;//B表(右边)关联字段 - public ShareRowOutPutDataHandler(ShareJoin ctx,List afields,int joini,String joinField,Map arows) { + public NonBlockingSession session; + + public ShareRowOutPutDataHandler(ShareJoin ctx,List afields,int joini,String joinField,Map arows,NonBlockingSession session) { super(); this.afields = afields; this.ctx = ctx; this.arows = arows; this.joinL =joini; this.joinRkey= joinField; + this.session = session; //EngineCtx.LOGGER.info("二次查询:" +arows.size()+ " afields:"+FenDBJoinHandler.getFieldNames(afields)); } @@ -346,46 +380,80 @@ public ShareRowOutPutDataHandler(ShareJoin ctx,List afields,int joini,St public void onHeader(String dataNode, byte[] header, List bfields) { this.bfields=bfields; joinR=this.ctx.getFieldIndex(bfields,joinRkey); - ctx.writeHeader(dataNode,afields, bfields); - } - + MiddlerResultHandler middlerResultHandler = session.getMiddlerResultHandler(); + + if( middlerResultHandler ==null ){ + ctx.writeHeader(dataNode,afields, bfields); + + } + } + //不是主键,获取join左边的的记录 - private byte[] getRow(String value,int index){ - for(Map.Entry e: arows.entrySet() ){ - String key=e.getKey(); - RowDataPacket rowDataPkg = ResultSetUtil.parseRowData(e.getValue(), afields); - String id = ByteUtil.getString(rowDataPkg.fieldValues.get(index)); - if (id.equals(value)){ - return arows.remove(key); - } - } - return null; + private byte[] getRow(Map batchRowsCopy,String value,int index){ + for(Map.Entry e: batchRowsCopy.entrySet() ){ + String key=e.getKey(); + RowDataPacket rowDataPkg = ResultSetUtil.parseRowData(e.getValue(), afields); + byte[] columnValue = rowDataPkg.fieldValues.get(index); + if(columnValue == null ) + continue; + + String id = ByteUtil.getString(columnValue); + if (id.equals(value)){ + return batchRowsCopy.remove(key); + } + } + return null; } + @Override public boolean onRowData(String dataNode, byte[] rowData) { RowDataPacket rowDataPkgold = ResultSetUtil.parseRowData(rowData, bfields); + //拷贝一份batchRows + Map batchRowsCopy = new ConcurrentHashMap(); + batchRowsCopy.putAll(arows); // 获取Id字段, - String id = ByteUtil.getString(rowDataPkgold.fieldValues.get(joinR)); + String id = ByteUtil.getString(rowDataPkgold.fieldValues.get(joinR)); // 查找ID对应的A表的记录 - byte[] arow = getRow(id,joinL);//arows.remove(id); + byte[] arow = getRow(batchRowsCopy,id,joinL);//arows.remove(id); +// byte[] arow = getRow(id,joinL);//arows.remove(id); while (arow!=null) { - RowDataPacket rowDataPkg = ResultSetUtil.parseRowData(arow,afields );//ctx.getAllFields()); - for (int i=1;i0){ + String rowValue = new String(columnData); + middlerResultHandler.add(rowValue); + } + //} + } + + } + + arow = getRow(batchRowsCopy,id,joinL); +// arow = getRow(id,joinL); } return false; } @Override - public void finished(String dataNode, boolean failed) { - // EngineCtx.LOGGER.info("完成2:" + dataNode+" failed:"+failed); + public void finished(String dataNode, boolean failed, String errorMsg) { + if(failed){ + session.getSource().writeErrMessage(ErrorCode.ER_UNKNOWN_ERROR, errorMsg); + } } -} +} \ No newline at end of file diff --git a/src/main/java/io/mycat/sqlengine/sharejoin/TableFilter.java b/src/main/java/io/mycat/catlets/TableFilter.java similarity index 72% rename from src/main/java/io/mycat/sqlengine/sharejoin/TableFilter.java rename to src/main/java/io/mycat/catlets/TableFilter.java index 9be0911ec..75c0f2195 100644 --- a/src/main/java/io/mycat/sqlengine/sharejoin/TableFilter.java +++ b/src/main/java/io/mycat/catlets/TableFilter.java @@ -1,315 +1,367 @@ -package io.mycat.sqlengine.sharejoin; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.Map.Entry; - -/** - * 功能详细描述:分片join,单独的语句 - * @author sohudo[http://blog.csdn.net/wind520] - * @create 2015年02月01日 - * @version 0.0.1 - */ - -public class TableFilter { - public static final Logger LOGGER = LoggerFactory - .getLogger(TableFilter.class); - - private LinkedHashMap fieldAliasMap = new LinkedHashMap(); - private String tName; - private String tAlia; - private String where=""; - private String order=""; - - private String parenTable="";//左连接的join的表 - private String joinParentkey="";//左连接的join字段 - private String joinKey=""; //join字段 - - private TableFilter join; - private TableFilter parent; - - private int offset=0; - private int rowCount=0; - - private boolean outJoin; - private boolean allField; - public TableFilter(String taName,String taAlia,boolean outJoin) { - this.tName=taName; - this.tAlia=taAlia; - this.outJoin=outJoin; - this.where=""; - } - - private String getTablefrom(String key){ - if (key==null){ - return ""; - } - else { - int i=key.indexOf('.'); - if (i==-1){ - return key; - } - else - return key.substring(0, i); - } - - } - private String getFieldfrom(String key){ - if (key==null){ - return ""; - } - else { - int i=key.indexOf('.'); - if (i==-1){ - return key; - } - else - return key.substring(i+1); - } - } - - public void addField(String fieldName,String fieldAlia){ - String atable=getTablefrom(fieldName); - String afield=getFieldfrom(fieldName); - boolean allfield=afield.equals("*")?true:false; - if (atable.equals("*")) { - fieldAliasMap.put(afield, null); - setAllField(allfield); - if (join!=null) { - join.addField(fieldName,null); - join.setAllField(allfield); - } - } - else { - if (atable.equals(tAlia)) { - fieldAliasMap.put(afield, fieldAlia); - setAllField(allfield); - } - else { - if (join!=null) { - join.addField(fieldName,fieldAlia); - join.setAllField(allfield); - } - } - } - } - - public void addWhere(String fieldName,String value,String Operator,String and){ - String atable=getTablefrom(fieldName); - String afield=getFieldfrom(fieldName); - if (atable.equals(tAlia)) { - where=unionsql(where,afield+Operator+value,and); - } - else { - if (join!=null) { - join.addWhere(fieldName,value,Operator,and); - } - } - } - - private String unionsql(String key,String value,String Operator){ - if (key.trim().equals("")){ - key=value; - } - else { - key+=" "+Operator+" "+value; - } - return key; - } - - public void addOrders(String fieldName,String des){ - String atable=getTablefrom(fieldName); - String afield=getFieldfrom(fieldName); - if (atable.equals(tAlia)) { - order=unionsql(order,afield+" "+des,","); - } - else { - if (join!=null) { - join.addOrders(fieldName,des); - } - } - } - public void addLimit(int offset,int rowCount){ - this.offset=offset; - this.rowCount=rowCount; - } - public void setJoinKey(String fieldName,String value){ - if (parent==null){ - if (join!=null) { - join.setJoinKey(fieldName,value); - } - } - else { - int i=joinLkey(fieldName,value); - if (i==1){ - joinParentkey=getFieldfrom(value); - parenTable =getTablefrom(value); - joinKey=getFieldfrom(fieldName); - } - else { - if (i==2){ - joinParentkey=getFieldfrom(fieldName); - parenTable =getTablefrom(fieldName); - joinKey=getFieldfrom(value); - } - else { - if (join!=null) { - join.setJoinKey(fieldName,value); - } - } - } - } - } - - private String getChildJoinKey(boolean left){ - if (join!=null){ - if (left) { - return join.joinParentkey; - } - else { - return join.joinKey; - } - } - else { - return ""; - } - } - public String getJoinKey(boolean left){ - return getChildJoinKey(left); - } - private int joinLkey(String fieldName,String value){ - String key1=getTablefrom(fieldName); - String key2=getTablefrom(value); - if (key1.equals(tAlia) ) { - return 1; - } - - if (key2.equals(tAlia) ) { - return 2; - } - /* - String bAlia=""; - if (join!=null){ - bAlia=join.getTableAlia(); - } - if (key1.equals(tAlia)&& key2.equals(bAlia) ) { - return 1; - } - - if (key2.equals(tAlia)&& key1.equals(bAlia) ) { - return 2; - } - */ - return 0; - } - - public String getTableName(){ - return tName; - } - public void setTableName(String value){ - tName=value; - } - - public String getTableAlia(){ - return tAlia; - } - public void setTableAlia(String value){ - tAlia=value; - } - - public boolean getOutJoin(){ - return outJoin; - } - public void setOutJoin(boolean value){ - outJoin=value; - } - - - public boolean getAllField(){ - return allField; - } - public void setAllField(boolean value){ - allField=value; - } - - public TableFilter getTableJoin(){ - return join; - } - public void setTableJoin(TableFilter value){ - join=value; - join.setParent(this); - } - public TableFilter getParent() { - return parent; - } - - public void setParent(TableFilter parent) { - this.parent = parent; - } - - private String unionField(String field,String key,String Operator){ - if (key.trim().equals("")){ - key=field; - } - else { - key=field+Operator+" "+key; - } - return key; - } - - public String getSQL(){ - String sql=""; - Iterator> iter = fieldAliasMap.entrySet().iterator(); - while (iter.hasNext()) { - Map.Entry entry = (Map.Entry) iter.next(); - String key = entry.getKey(); - String val = entry.getValue(); - if (val==null) { - sql=unionsql(sql,getFieldfrom(key),","); - } - else - sql=unionsql(sql,getFieldfrom(key)+" as "+val,","); - } - if (parent==null){ - sql="select "+sql+" from "+tName; - if (!(where.trim().equals(""))){ - sql+=" where "+where.trim(); - } - } - else { - if (allField) { - sql="select "+sql+" from "+tName; - } - else { - sql=unionField("select "+joinKey,sql,","); - sql=sql+" from "+tName; - //sql="select "+joinKey+","+sql+" from "+tName; - } - if (!(where.trim().equals(""))){ - sql+=" where "+where.trim()+" and ("+joinKey+" in %s )"; - } - else { - sql+=" where "+joinKey+" in %s "; - } - } - - if (!(order.trim().equals(""))){ - sql+=" order by "+order.trim(); - } - if (parent==null){ - if ((rowCount>0)&& (offset>0)){ - sql+=" limit"+offset+","+rowCount; - } - else { - if (rowCount>0){ - sql+=" limit "+rowCount; - } - } - } - return sql; - } -} +package io.mycat.catlets; + +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Map.Entry; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import com.alibaba.druid.sql.SQLUtils; +/** + * 功能详细描述:分片join,单独的语句 + * @author sohudo[http://blog.csdn.net/wind520] + * @create 2015年02月01日 + * @version 0.0.1 + */ + +public class TableFilter { + protected static final Logger LOGGER = LoggerFactory.getLogger(TableFilter.class); + + private LinkedHashMap fieldAliasMap = new LinkedHashMap(); + private String tName; + private String tAlia; + private String where=""; + private String order=""; + + private String parenTable="";//左连接的join的表 + private String joinParentkey="";//左连接的join字段 + private String joinKey=""; //join字段 + + private TableFilter join; + private TableFilter parent; + + private int offset=0; + private int rowCount=0; + + private boolean outJoin; + private boolean allField; + public TableFilter(String taName,String taAlia,boolean outJoin) { + this.tName=taName; + this.tAlia=taAlia; + this.outJoin=outJoin; + this.where=""; + } + + private String getTablefrom(String key){ + if (key==null){ + return ""; + } + else { + int i=key.indexOf('.'); + if (i==-1){ + return key; + } + else { + return key.substring(0, i); + } + } + + } + private String getFieldfrom(String key){ + if (key==null){ + return ""; + } + else { + int i=key.indexOf('.'); + if (i==-1){ + return key; + } + else { + return key.substring(i + 1); + } + } + } + + public void addField(String fieldName,String fieldAlia){ + String atable=getTablefrom(fieldName); + String afield=getFieldfrom(fieldName); + boolean allfield=afield.equals("*")?true:false; + if (atable.equals("*")) { + fieldAliasMap.put(afield, null); + setAllField(allfield); + if (join!=null) { + join.addField(fieldName,null); + join.setAllField(allfield); + } + } + else { + if (atable.equals(tAlia)) { + fieldAliasMap.put(afield, fieldAlia); + setAllField(allfield); + } + else { + if (join!=null) { + join.addField(fieldName,fieldAlia); + join.setAllField(allfield); + } + } + } + } + + public void addField(String fieldName,String fieldAlia,String expr){ + String atable=getTablefrom(fieldName); + String afield=getFieldfrom(fieldName); + boolean allfield=afield.equals("*")?true:false; + if (atable.equals("*")) { + fieldAliasMap.put(afield, null); + setAllField(allfield); + if (join!=null) { + join.addField(fieldName,null); + join.setAllField(allfield); + } + } + else { + if (atable.equals(tAlia)) { + expr = expr.replace(fieldName, afield); + fieldAliasMap.put(expr, fieldAlia); + setAllField(allfield); + } + else { + if (join!=null) { + join.addField(fieldName,fieldAlia,expr); + join.setAllField(allfield); + } + } + } + } + + + public void addWhere(String fieldName,String value,String Operator,String and){ + String atable=getTablefrom(fieldName); + String afield=getFieldfrom(fieldName); + if (atable.equals(tAlia)) { + where=unionsql(where,afield+Operator+value,and); + } + else { + if (join!=null) { + join.addWhere(fieldName,value,Operator,and); + } + } + } + + public void addWhere(String fieldName,String condition,String and){ + String atable=getTablefrom(fieldName); + String afield=getFieldfrom(fieldName); + condition = condition.replace(fieldName, afield); + if (atable.equals(tAlia)) { + where=unionsql(where,condition,and); + } + else { + if (join!=null) { + join.addWhere(fieldName,condition,and); + } + } + } + + + private String unionsql(String key,String value,String Operator){ + if (key.trim().equals("")){ + key=value; + } + else { + key+=" "+Operator+" "+value; + } + return key; + } + + public void addOrders(String fieldName,String des){ + String atable=getTablefrom(fieldName); + String afield=getFieldfrom(fieldName); + if (atable.equals(tAlia)) { + order=unionsql(order,afield+" "+des,","); + } + else { + if (join!=null) { + join.addOrders(fieldName,des); + } + } + } + public void addLimit(int offset,int rowCount){ + this.offset=offset; + this.rowCount=rowCount; + } + public void setJoinKey(String fieldName,String value){ + if (parent==null){ + if (join!=null) { + join.setJoinKey(fieldName,value); + } + } + else { + int i=joinLkey(fieldName,value); + if (i==1){ + joinParentkey=getFieldfrom(value); + parenTable =getTablefrom(value); + joinKey=getFieldfrom(fieldName); + } + else { + if (i==2){ + joinParentkey=getFieldfrom(fieldName); + parenTable =getTablefrom(fieldName); + joinKey=getFieldfrom(value); + } + else { + if (join!=null) { + join.setJoinKey(fieldName,value); + } + } + } + } + } + + private String getChildJoinKey(boolean left){ + if (join!=null){ + if (left) { + return join.joinParentkey; + } + else { + return join.joinKey; + } + } + else { + return ""; + } + } + public String getJoinKey(boolean left){ + return getChildJoinKey(left); + } + private int joinLkey(String fieldName,String value){ + String key1=getTablefrom(fieldName); + String key2=getTablefrom(value); + if (key1.equals(tAlia) ) { + return 1; + } + + if (key2.equals(tAlia) ) { + return 2; + } + /* + String bAlia=""; + if (join!=null){ + bAlia=join.getTableAlia(); + } + if (key1.equals(tAlia)&& key2.equals(bAlia) ) { + return 1; + } + + if (key2.equals(tAlia)&& key1.equals(bAlia) ) { + return 2; + } + */ + return 0; + } + + public String getTableName(){ + return tName; + } + public void setTableName(String value){ + tName=value; + } + + public String getTableAlia(){ + return tAlia; + } + public void setTableAlia(String value){ + tAlia=value; + } + + public boolean getOutJoin(){ + return outJoin; + } + public void setOutJoin(boolean value){ + outJoin=value; + } + + + public boolean getAllField(){ + return allField; + } + public void setAllField(boolean value){ + allField=value; + } + + public TableFilter getTableJoin(){ + return join; + } + public void setTableJoin(TableFilter value){ + join=value; + join.setParent(this); + } + public TableFilter getParent() { + return parent; + } + + public void setParent(TableFilter parent) { + this.parent = parent; + } + + private String unionField(String field,String key,String Operator){ + if (key.trim().equals("")){ + key=field; + } + else { + key=field+Operator+" "+key; + } + return key; + } + + public String getSQL(){ + String sql=""; + Iterator> iter = fieldAliasMap.entrySet().iterator(); + while (iter.hasNext()) { + Map.Entry entry = (Map.Entry) iter.next(); + String key = entry.getKey(); + String val = entry.getValue(); + if (val==null) { + sql=unionsql(sql,getFieldfrom(key),","); + } + else { + sql = unionsql(sql, getFieldfrom(key) + " as " + val, ","); + } + } + if (parent==null){ // on/where 等于号左边的表 + String parentJoinKey = getJoinKey(true); + // fix sharejoin bug: + // (AbstractConnection.java:458) -close connection,reason:program err:java.lang.IndexOutOfBoundsException: + // 原因是左表的select列没有包含 join 列,在获取结果时报上面的错误 + if(sql != null && parentJoinKey != null && + sql.toUpperCase().indexOf(parentJoinKey.trim().toUpperCase()) == -1){ + sql += ", " + parentJoinKey; + } + sql="select "+sql+" from "+tName; + if (!(where.trim().equals(""))){ + sql+=" where "+where.trim(); + } + } + else { // on/where 等于号右边边的表 + if (allField) { + sql="select "+sql+" from "+tName; + } + else { + sql=unionField("select "+joinKey,sql,","); + sql=sql+" from "+tName; + //sql="select "+joinKey+","+sql+" from "+tName; + } + if (!(where.trim().equals(""))){ + sql+=" where "+where.trim()+" and ("+joinKey+" in %s )"; + } + else { + sql+=" where "+joinKey+" in %s "; + } + } + + if (!(order.trim().equals(""))){ + sql+=" order by "+order.trim(); + } + if (parent==null){ + if ((rowCount>0)&& (offset>0)){ + sql+=" limit"+offset+","+rowCount; + } + else { + if (rowCount>0){ + sql+=" limit "+rowCount; + } + } + } + return sql; + } +} diff --git a/src/main/java/io/mycat/server/Alarms.java b/src/main/java/io/mycat/config/Alarms.java similarity index 87% rename from src/main/java/io/mycat/server/Alarms.java rename to src/main/java/io/mycat/config/Alarms.java index b71573ca5..f954db8b6 100644 --- a/src/main/java/io/mycat/server/Alarms.java +++ b/src/main/java/io/mycat/config/Alarms.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server; +package io.mycat.config; /** * Mycat报警关键词定义 @@ -38,7 +38,10 @@ public interface Alarms { /** 数据节点的数据源发生切换 **/ public static final String DATANODE_SWITCH = "#!DN_SWITCH#"; - /** 隔离区非法用户访问 **/ - public static final String QUARANTINE_ATTACK = "#!QT_ATTACK#"; + /** 防火墙非法用户访问 **/ + public static final String FIREWALL_ATTACK = "#!QT_ATTACK#"; + + /** 非法DML **/ + public static final String DML_ATTACK = "#!DML_ATTACK#"; } diff --git a/src/main/java/io/mycat/server/Capabilities.java b/src/main/java/io/mycat/config/Capabilities.java similarity index 97% rename from src/main/java/io/mycat/server/Capabilities.java rename to src/main/java/io/mycat/config/Capabilities.java index 675fbb786..4a0e4ec76 100644 --- a/src/main/java/io/mycat/server/Capabilities.java +++ b/src/main/java/io/mycat/config/Capabilities.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server; +package io.mycat.config; /** * 处理能力标识定义 @@ -108,5 +108,7 @@ public interface Capabilities { // 通知服务器客户端可以处理由多语句或者存储过程执行生成的多结果集。 // 当打开CLIENT_MULTI_STATEMENTS时,这个标志自动的被打开。 public static final int CLIENT_MULTI_RESULTS = 131072; + + public static final int CLIENT_PLUGIN_AUTH = 0x00080000; // 524288 } \ No newline at end of file diff --git a/src/main/java/io/mycat/config/ConfigInitializer.java b/src/main/java/io/mycat/config/ConfigInitializer.java new file mode 100644 index 000000000..24b9cce1a --- /dev/null +++ b/src/main/java/io/mycat/config/ConfigInitializer.java @@ -0,0 +1,344 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.config; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +import io.mycat.config.loader.zkprocess.comm.ZkConfig; +import io.mycat.config.loader.zkprocess.comm.ZkParamCfg; +import org.apache.log4j.Logger; + +import io.mycat.backend.datasource.PhysicalDBNode; +import io.mycat.backend.datasource.PhysicalDBPool; +import io.mycat.backend.datasource.PhysicalDatasource; +import io.mycat.backend.jdbc.JDBCDatasource; +import io.mycat.backend.mysql.nio.MySQLDataSource; +import io.mycat.backend.postgresql.PostgreSQLDataSource; +import io.mycat.config.loader.ConfigLoader; +import io.mycat.config.loader.SchemaLoader; +import io.mycat.config.loader.xml.XMLConfigLoader; +import io.mycat.config.loader.xml.XMLSchemaLoader; +import io.mycat.config.model.DBHostConfig; +import io.mycat.config.model.DataHostConfig; +import io.mycat.config.model.DataNodeConfig; +import io.mycat.config.model.FirewallConfig; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.config.model.UserConfig; +import io.mycat.config.util.ConfigException; +import io.mycat.route.sequence.handler.DistributedSequenceHandler; +import io.mycat.route.sequence.handler.IncrSequenceMySQLHandler; +import io.mycat.route.sequence.handler.IncrSequenceTimeHandler; +import io.mycat.route.sequence.handler.IncrSequenceZKHandler; + +/** + * @author mycat + */ +public class ConfigInitializer { + + private static final Logger LOGGER = Logger.getLogger( ConfigInitializer.class ); + + private volatile SystemConfig system; + private volatile MycatCluster cluster; + private volatile FirewallConfig firewall; + private volatile Map users; + private volatile Map schemas; + private volatile Map dataNodes; + private volatile Map dataHosts; + + public ConfigInitializer(boolean loadDataHost) { + + //读取rule.xml和schema.xml + SchemaLoader schemaLoader = new XMLSchemaLoader(); + + //读取server.xml + XMLConfigLoader configLoader = new XMLConfigLoader(schemaLoader); + + schemaLoader = null; + + //加载配置 + this.system = configLoader.getSystemConfig(); + this.users = configLoader.getUserConfigs(); + this.schemas = configLoader.getSchemaConfigs(); + + //是否重新加载DataHost和对应的DataNode + if (loadDataHost) { + this.dataHosts = initDataHosts(configLoader); + this.dataNodes = initDataNodes(configLoader); + } + + //权限管理 + this.firewall = configLoader.getFirewallConfig(); + this.cluster = initCobarCluster(configLoader); + + //不同类型的全局序列处理器的配置加载 + if (system.getSequnceHandlerType() == SystemConfig.SEQUENCEHANDLER_MYSQLDB) { + IncrSequenceMySQLHandler.getInstance().load(); + } + + if (system.getSequnceHandlerType() == SystemConfig.SEQUENCEHANDLER_LOCAL_TIME) { + IncrSequenceTimeHandler.getInstance().load(); + } + + if (system.getSequnceHandlerType() == SystemConfig.SEQUENCEHANDLER_ZK_DISTRIBUTED) { + DistributedSequenceHandler.getInstance(system).load(); + } + + if (system.getSequnceHandlerType() == SystemConfig.SEQUENCEHANDLER_ZK_GLOBAL_INCREMENT) { + IncrSequenceZKHandler.getInstance().load(); + } + + /** + * 配置文件初始化, 自检 + */ + this.selfChecking0(); + } + + private void selfChecking0() throws ConfigException { + + // 检查user与schema配置对应以及schema配置不为空 + if (users == null || users.isEmpty()) { + throw new ConfigException("SelfCheck### user all node is empty!"); + + } else { + + for (UserConfig uc : users.values()) { + if (uc == null) { + throw new ConfigException("SelfCheck### users node within the item is empty!"); + } + + Set authSchemas = uc.getSchemas(); + if (authSchemas == null) { + throw new ConfigException("SelfCheck### user " + uc.getName() + "refered schemas is empty!"); + } + + for (String schema : authSchemas) { + if ( !schemas.containsKey(schema) ) { + String errMsg = "SelfCheck### schema " + schema + " refered by user " + uc.getName() + " is not exist!"; + throw new ConfigException(errMsg); + } + } + } + } + + + // schema 配置检测 + for (SchemaConfig sc : schemas.values()) { + if (null == sc) { + throw new ConfigException("SelfCheck### schema all node is empty!"); + + } else { + // check dataNode / dataHost 节点 + if ( this.dataNodes != null && this.dataHosts != null ) { + Set dataNodeNames = sc.getAllDataNodes(); + for(String dataNodeName: dataNodeNames) { + + PhysicalDBNode node = this.dataNodes.get(dataNodeName); + if ( node == null ) { + throw new ConfigException("SelfCheck### schema dbnode is empty!"); + } + } + } + } + } + + } + + public void testConnection() { + + // 实际链路的连接测试 + if ( this.dataNodes != null && this.dataHosts != null ) { + + Map map = new HashMap(); + + for(PhysicalDBNode dataNode: dataNodes.values() ) { + + String database = dataNode.getDatabase(); + PhysicalDBPool pool = dataNode.getDbPool(); + + for (PhysicalDatasource ds : pool.getAllDataSources()) { + String key = ds.getName() + "_" + database; + if ( map.get( key ) == null ) { + map.put( key, false ); + + boolean isConnected = false; + try { + isConnected = ds.testConnection( database ); + map.put( key, isConnected ); + } catch (IOException e) { + LOGGER.warn("test conn error:", e); + } + } + } + } + + // + boolean isConnectivity = true; + for (Map.Entry entry : map.entrySet()) { + String key = entry.getKey(); + Boolean value = entry.getValue(); + if ( !value && isConnectivity ) { + LOGGER.warn("SelfCheck### test " + key + " database connection failed "); + isConnectivity = false; + + } else { + LOGGER.info("SelfCheck### test " + key + " database connection success "); + } + } + + if ( !isConnectivity ) { + throw new ConfigException("SelfCheck### there are some datasource connection failed, pls check!"); + } + + } + + } + + public SystemConfig getSystem() { + return system; + } + + public MycatCluster getCluster() { + return cluster; + } + + public FirewallConfig getFirewall() { + return firewall; + } + + public Map getUsers() { + return users; + } + + public Map getSchemas() { + return schemas; + } + + public Map getDataNodes() { + return dataNodes; + } + + public Map getDataHosts() { + return this.dataHosts; + } + + private MycatCluster initCobarCluster(ConfigLoader configLoader) { + return new MycatCluster(configLoader.getClusterConfig()); + } + + private Map initDataHosts(ConfigLoader configLoader) { + Map nodeConfs = configLoader.getDataHosts(); + boolean isBooster="booster".equalsIgnoreCase(ZkConfig.getInstance().getValue(ZkParamCfg.MYCAT_SERVER_TYPE) ) ; + //根据DataHost建立PhysicalDBPool,其实就是实际数据库连接池,每个DataHost对应一个PhysicalDBPool + Map nodes = new HashMap( + nodeConfs.size()); + for (DataHostConfig conf : nodeConfs.values()) { + if(isBooster){ + conf.setMinCon(2); + } + //建立PhysicalDBPool + PhysicalDBPool pool = getPhysicalDBPool(conf, configLoader); + nodes.put(pool.getHostName(), pool); + } + return nodes; + } + + private PhysicalDatasource[] createDataSource(DataHostConfig conf, + String hostName, String dbType, String dbDriver, + DBHostConfig[] nodes, boolean isRead) { + PhysicalDatasource[] dataSources = new PhysicalDatasource[nodes.length]; + if (dbType.equals("mysql") && dbDriver.equals("native")) { + for (int i = 0; i < nodes.length; i++) { + //设置最大idle时间,默认为30分钟 + nodes[i].setIdleTimeout(system.getIdleTimeout()); + MySQLDataSource ds = new MySQLDataSource(nodes[i], conf, isRead); + dataSources[i] = ds; + } + + } else if (dbDriver.equals("jdbc")) { + for (int i = 0; i < nodes.length; i++) { + nodes[i].setIdleTimeout(system.getIdleTimeout()); + JDBCDatasource ds = new JDBCDatasource(nodes[i], conf, isRead); + dataSources[i] = ds; + } + } else if ("postgresql".equalsIgnoreCase(dbType) && dbDriver.equalsIgnoreCase("native")){ + for (int i = 0; i < nodes.length; i++) { + nodes[i].setIdleTimeout(system.getIdleTimeout()); + PostgreSQLDataSource ds = new PostgreSQLDataSource(nodes[i], conf, isRead); + dataSources[i] = ds; + } + } else{ + throw new ConfigException("not supported yet !" + hostName); + } + return dataSources; + } + + private PhysicalDBPool getPhysicalDBPool(DataHostConfig conf, + ConfigLoader configLoader) { + String name = conf.getName(); + //数据库类型,我们这里只讨论MySQL + String dbType = conf.getDbType(); + //连接数据库驱动,我们这里只讨论MyCat自己实现的native + String dbDriver = conf.getDbDriver(); + //针对所有写节点创建PhysicalDatasource + PhysicalDatasource[] writeSources = createDataSource(conf, name, + dbType, dbDriver, conf.getWriteHosts(), false); + Map readHostsMap = conf.getReadHosts(); + Map readSourcesMap = new HashMap( + readHostsMap.size()); + //对于每个读节点建立key为writeHost下标value为readHost的PhysicalDatasource[]的哈希表 + for (Map.Entry entry : readHostsMap.entrySet()) { + PhysicalDatasource[] readSources = createDataSource(conf, name, + dbType, dbDriver, entry.getValue(), true); + readSourcesMap.put(entry.getKey(), readSources); + } + PhysicalDBPool pool = new PhysicalDBPool(conf.getName(), conf, + writeSources, readSourcesMap, conf.getBalance(), + conf.getWriteType()); + pool.setSlaveIDs(conf.getSlaveIDs()); + return pool; + } + + private Map initDataNodes(ConfigLoader configLoader) { + Map nodeConfs = configLoader.getDataNodes(); + Map nodes = new HashMap( + nodeConfs.size()); + for (DataNodeConfig conf : nodeConfs.values()) { + PhysicalDBPool pool = this.dataHosts.get(conf.getDataHost()); + if (pool == null) { + throw new ConfigException("dataHost not exists " + + conf.getDataHost()); + + } + PhysicalDBNode dataNode = new PhysicalDBNode(conf.getName(), + conf.getDatabase(), pool); + nodes.put(dataNode.getName(), dataNode); + } + return nodes; + } + +} diff --git a/src/main/java/io/mycat/server/ErrorCode.java b/src/main/java/io/mycat/config/ErrorCode.java similarity index 99% rename from src/main/java/io/mycat/server/ErrorCode.java rename to src/main/java/io/mycat/config/ErrorCode.java index b36b0f3a2..2bd3f3cd8 100644 --- a/src/main/java/io/mycat/server/ErrorCode.java +++ b/src/main/java/io/mycat/config/ErrorCode.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server; +package io.mycat.config; /** * @author mycat diff --git a/src/main/java/io/mycat/server/Fields.java b/src/main/java/io/mycat/config/Fields.java similarity index 99% rename from src/main/java/io/mycat/server/Fields.java rename to src/main/java/io/mycat/config/Fields.java index 80abc475a..3a73b0c34 100644 --- a/src/main/java/io/mycat/server/Fields.java +++ b/src/main/java/io/mycat/config/Fields.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server; +package io.mycat.config; /** * 字段类型及标识定义 diff --git a/src/main/java/io/mycat/server/Isolations.java b/src/main/java/io/mycat/config/Isolations.java similarity index 98% rename from src/main/java/io/mycat/server/Isolations.java rename to src/main/java/io/mycat/config/Isolations.java index 40de53ae1..f5205823d 100644 --- a/src/main/java/io/mycat/server/Isolations.java +++ b/src/main/java/io/mycat/config/Isolations.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server; +package io.mycat.config; /** * 事务隔离级别定义 diff --git a/src/main/java/io/mycat/server/config/cluster/MycatClusterConfig.java b/src/main/java/io/mycat/config/MycatCluster.java similarity index 69% rename from src/main/java/io/mycat/server/config/cluster/MycatClusterConfig.java rename to src/main/java/io/mycat/config/MycatCluster.java index 3bbf47de2..9b1f47173 100644 --- a/src/main/java/io/mycat/server/config/cluster/MycatClusterConfig.java +++ b/src/main/java/io/mycat/config/MycatCluster.java @@ -2,8 +2,8 @@ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the * terms of the GNU General Public License version 2 only, as published by the * Free Software Foundation. * @@ -16,28 +16,36 @@ * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address + * + * Any questions about this component can be directed to it's project Web address * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.config.cluster; - +package io.mycat.config; +import java.util.HashMap; import java.util.List; import java.util.Map; +import io.mycat.config.model.ClusterConfig; +import io.mycat.config.model.MycatNodeConfig; + /** * @author mycat */ -public final class MycatClusterConfig { +public final class MycatCluster { private final Map nodes; private final Map> groups; - public MycatClusterConfig( Map nodes,Map> groups) { - this.nodes = nodes; - this.groups = groups; + public MycatCluster(ClusterConfig clusterConf) { + this.nodes = new HashMap(clusterConf.getNodes().size()); + this.groups = clusterConf.getGroups(); + for (MycatNodeConfig conf : clusterConf.getNodes().values()) { + String name = conf.getName(); + MycatNode node = new MycatNode(conf); + this.nodes.put(name, node); + } } public Map getNodes() { diff --git a/src/main/java/io/mycat/config/MycatConfig.java b/src/main/java/io/mycat/config/MycatConfig.java new file mode 100644 index 000000000..908331b59 --- /dev/null +++ b/src/main/java/io/mycat/config/MycatConfig.java @@ -0,0 +1,298 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.config; + +import java.io.IOException; +import java.net.StandardSocketOptions; +import java.nio.channels.NetworkChannel; +import java.util.ArrayList; +import java.util.Map; +import java.util.concurrent.locks.ReentrantLock; + +import io.mycat.backend.datasource.PhysicalDBNode; +import io.mycat.backend.datasource.PhysicalDBPool; +import io.mycat.config.model.FirewallConfig; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.config.model.UserConfig; +import io.mycat.net.AbstractConnection; +import io.mycat.util.TimeUtil; + +/** + * @author mycat + */ +public class MycatConfig { + + private static final int RELOAD = 1; + private static final int ROLLBACK = 2; + private static final int RELOAD_ALL = 3; + + private volatile SystemConfig system; + private volatile MycatCluster cluster; + private volatile MycatCluster _cluster; + private volatile FirewallConfig firewall; + private volatile FirewallConfig _firewall; + private volatile Map users; + private volatile Map _users; + private volatile Map schemas; + private volatile Map _schemas; + private volatile Map dataNodes; + private volatile Map _dataNodes; + private volatile Map dataHosts; + private volatile Map _dataHosts; + private long reloadTime; + private long rollbackTime; + private int status; + private final ReentrantLock lock; + + public MycatConfig() { + + //读取schema.xml,rule.xml和server.xml + ConfigInitializer confInit = new ConfigInitializer(true); + this.system = confInit.getSystem(); + this.users = confInit.getUsers(); + this.schemas = confInit.getSchemas(); + this.dataHosts = confInit.getDataHosts(); + + this.dataNodes = confInit.getDataNodes(); + for (PhysicalDBPool dbPool : dataHosts.values()) { + dbPool.setSchemas(getDataNodeSchemasOfDataHost(dbPool.getHostName())); + } + + this.firewall = confInit.getFirewall(); + this.cluster = confInit.getCluster(); + + //初始化重加载配置时间 + this.reloadTime = TimeUtil.currentTimeMillis(); + this.rollbackTime = -1L; + this.status = RELOAD; + + //配置加载锁 + this.lock = new ReentrantLock(); + } + + public SystemConfig getSystem() { + return system; + } + + public void setSocketParams(AbstractConnection con, boolean isFrontChannel) + throws IOException { + + int sorcvbuf = 0; + int sosndbuf = 0; + int soNoDelay = 0; + if ( isFrontChannel ) { + sorcvbuf = system.getFrontsocketsorcvbuf(); + sosndbuf = system.getFrontsocketsosndbuf(); + soNoDelay = system.getFrontSocketNoDelay(); + } else { + sorcvbuf = system.getBacksocketsorcvbuf(); + sosndbuf = system.getBacksocketsosndbuf(); + soNoDelay = system.getBackSocketNoDelay(); + } + + NetworkChannel channel = con.getChannel(); + channel.setOption(StandardSocketOptions.SO_RCVBUF, sorcvbuf); + channel.setOption(StandardSocketOptions.SO_SNDBUF, sosndbuf); + channel.setOption(StandardSocketOptions.TCP_NODELAY, soNoDelay == 1); + channel.setOption(StandardSocketOptions.SO_REUSEADDR, true); + channel.setOption(StandardSocketOptions.SO_KEEPALIVE, true); + + con.setMaxPacketSize(system.getMaxPacketSize()); + con.setPacketHeaderSize(system.getPacketHeaderSize()); + con.setIdleTimeout(system.getIdleTimeout()); + con.setCharset(system.getCharset()); + + } + + public Map getUsers() { + return users; + } + + public Map getBackupUsers() { + return _users; + } + + public Map getSchemas() { + return schemas; + } + + public Map getBackupSchemas() { + return _schemas; + } + + public Map getDataNodes() { + return dataNodes; + } + + public void setDataNodes( Map map) { + this.dataNodes = map; + } + + public String[] getDataNodeSchemasOfDataHost(String dataHost) { + ArrayList schemas = new ArrayList(30); + for (PhysicalDBNode dn: dataNodes.values()) { + if (dn.getDbPool().getHostName().equals(dataHost)) { + schemas.add(dn.getDatabase()); + } + } + return schemas.toArray(new String[schemas.size()]); + } + + public Map getBackupDataNodes() { + return _dataNodes; + } + + public Map getDataHosts() { + return dataHosts; + } + + public Map getBackupDataHosts() { + return _dataHosts; + } + + public MycatCluster getCluster() { + return cluster; + } + + public MycatCluster getBackupCluster() { + return _cluster; + } + + public FirewallConfig getFirewall() { + return firewall; + } + + public FirewallConfig getBackupFirewall() { + return _firewall; + } + + public ReentrantLock getLock() { + return lock; + } + + public long getReloadTime() { + return reloadTime; + } + + public long getRollbackTime() { + return rollbackTime; + } + + public void reload( + Map newUsers, + Map newSchemas, + Map newDataNodes, + Map newDataHosts, + MycatCluster newCluster, + FirewallConfig newFirewall, + boolean reloadAll) { + + apply(newUsers, newSchemas, newDataNodes, newDataHosts, newCluster, newFirewall, reloadAll); + this.reloadTime = TimeUtil.currentTimeMillis(); + this.status = reloadAll?RELOAD_ALL:RELOAD; + } + + public boolean canRollback() { + if (_users == null || _schemas == null || _dataNodes == null + || _dataHosts == null || _cluster == null + || _firewall == null || status == ROLLBACK) { + return false; + } else { + return true; + } + } + + public void rollback( + Map users, + Map schemas, + Map dataNodes, + Map dataHosts, + MycatCluster cluster, + FirewallConfig firewall) { + + apply(users, schemas, dataNodes, dataHosts, cluster, firewall, status==RELOAD_ALL); + this.rollbackTime = TimeUtil.currentTimeMillis(); + this.status = ROLLBACK; + } + + private void apply(Map newUsers, + Map newSchemas, + Map newDataNodes, + Map newDataHosts, + MycatCluster newCluster, + FirewallConfig newFirewall, + boolean isLoadAll) { + + final ReentrantLock lock = this.lock; + lock.lock(); + try { + + // old 处理 + // 1、停止老的数据源心跳 + // 2、备份老的数据源配置 + //-------------------------------------------- + if (isLoadAll) { + Map oldDataHosts = this.dataHosts; + if (oldDataHosts != null) { + for (PhysicalDBPool oldDbPool : oldDataHosts.values()) { + if (oldDbPool != null) { + oldDbPool.stopHeartbeat(); + } + } + } + this._dataNodes = this.dataNodes; + this._dataHosts = this.dataHosts; + } + + this._users = this.users; + this._schemas = this.schemas; + this._cluster = this.cluster; + this._firewall = this.firewall; + + // new 处理 + // 1、启动新的数据源心跳 + // 2、执行新的配置 + //--------------------------------------------------- + if (isLoadAll) { + if (newDataHosts != null) { + for (PhysicalDBPool newDbPool : newDataHosts.values()) { + if ( newDbPool != null) { + newDbPool.startHeartbeat(); + } + } + } + this.dataNodes = newDataNodes; + this.dataHosts = newDataHosts; + } + this.users = newUsers; + this.schemas = newSchemas; + this.cluster = newCluster; + this.firewall = newFirewall; + + } finally { + lock.unlock(); + } + } +} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/config/cluster/MycatNode.java b/src/main/java/io/mycat/config/MycatNode.java similarity index 84% rename from src/main/java/io/mycat/server/config/cluster/MycatNode.java rename to src/main/java/io/mycat/config/MycatNode.java index 878298e48..41482ae6f 100644 --- a/src/main/java/io/mycat/server/config/cluster/MycatNode.java +++ b/src/main/java/io/mycat/config/MycatNode.java @@ -2,8 +2,8 @@ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the * terms of the GNU General Public License version 2 only, as published by the * Free Software Foundation. * @@ -16,22 +16,22 @@ * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address + * + * Any questions about this component can be directed to it's project Web address * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.config.cluster; +package io.mycat.config; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import io.mycat.config.model.MycatNodeConfig; /** * @author mycat */ public class MycatNode { - private static final Logger LOGGER = LoggerFactory - .getLogger(MycatNode.class); + private static final Logger LOGGER = LoggerFactory.getLogger(MycatNode.class); private final String name; private final MycatNodeConfig config; diff --git a/src/main/java/io/mycat/config/MycatPrivileges.java b/src/main/java/io/mycat/config/MycatPrivileges.java new file mode 100644 index 000000000..08aa050fe --- /dev/null +++ b/src/main/java/io/mycat/config/MycatPrivileges.java @@ -0,0 +1,344 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.config; + +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.regex.Pattern; + +import io.mycat.config.loader.xml.XMLServerLoader; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.alibaba.druid.sql.ast.SQLStatement; +import com.alibaba.druid.sql.ast.statement.SQLDeleteStatement; +import com.alibaba.druid.sql.ast.statement.SQLInsertStatement; +import com.alibaba.druid.sql.ast.statement.SQLSelectStatement; +import com.alibaba.druid.sql.ast.statement.SQLShowTablesStatement; +import com.alibaba.druid.sql.ast.statement.SQLUpdateStatement; +import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlReplaceStatement; +import com.alibaba.druid.sql.parser.SQLStatementParser; +import com.alibaba.druid.sql.visitor.SchemaStatVisitor; +import com.alibaba.druid.wall.WallCheckResult; +import com.alibaba.druid.wall.WallProvider; + +import io.mycat.MycatServer; +import io.mycat.config.model.FirewallConfig; +import io.mycat.config.model.UserConfig; +import io.mycat.config.model.UserPrivilegesConfig; +import io.mycat.net.handler.FrontendPrivileges; +import io.mycat.route.parser.druid.MycatSchemaStatVisitor; +import io.mycat.route.parser.druid.MycatStatementParser; + +/** + * @author mycat + */ +public class MycatPrivileges implements FrontendPrivileges { + /** + * 无需每次建立连接都new实例。 + */ + private static MycatPrivileges instance = new MycatPrivileges(); + + private static final Logger ALARM = LoggerFactory.getLogger("alarm"); + + private static boolean check = false; + private final static ThreadLocal contextLocal = new ThreadLocal(); + + public static MycatPrivileges instance() { + return instance; + } + + private MycatPrivileges() { + super(); + } + + @Override + public boolean schemaExists(String schema) { + MycatConfig conf = MycatServer.getInstance().getConfig(); + return conf.getSchemas().containsKey(schema); + } + + @Override + public boolean userExists(String user, String host) { + //检查用户及白名单 + return checkFirewallWhiteHostPolicy(user, host); + } + + @Override + public String getPassword(String user) { + MycatConfig conf = MycatServer.getInstance().getConfig(); + if (user != null && user.equals(conf.getSystem().getClusterHeartbeatUser())) { + return conf.getSystem().getClusterHeartbeatPass(); + } else { + UserConfig uc = conf.getUsers().get(user); + if (uc != null) { + return uc.getPassword(); + } else { + return null; + } + } + } + + @Override + public Set getUserSchemas(String user) { + MycatConfig conf = MycatServer.getInstance().getConfig(); + + UserConfig uc = conf.getUsers().get(user); + if (uc != null) { + return uc.getSchemas(); + } else { + return null; + } + + } + + @Override + public Boolean isReadOnly(String user) { + MycatConfig conf = MycatServer.getInstance().getConfig(); + + UserConfig uc = conf.getUsers().get(user); + if (uc != null) { + return uc.isReadOnly(); + } else { + return null; + } + } + + @Override + public int getBenchmark(String user) { + MycatConfig conf = MycatServer.getInstance().getConfig(); + UserConfig uc = conf.getUsers().get(user); + if (uc != null) { + return uc.getBenchmark(); + } else { + return 0; + } + } + + /** + * 防火墙白名单处理,根据防火墙配置,判断目前主机是否可以通过某用户登陆 + * 白名单配置请参考: + * @see XMLServerLoader + * @see FirewallConfig + * + * @modification 修改增加网段白名单识别配置 + * @date 2016/12/8 + * @modifiedBy Hash Zhang + */ + @Override + public boolean checkFirewallWhiteHostPolicy(String user, String host) { + + MycatConfig mycatConfig = MycatServer.getInstance().getConfig(); + FirewallConfig firewallConfig = mycatConfig.getFirewall(); + + //防火墙 白名单处理 + boolean isPassed = false; + + Map> whitehost = firewallConfig.getWhitehost(); + Map> whitehostMask = firewallConfig.getWhitehostMask(); + if ((whitehost == null || whitehost.size() == 0)&&(whitehostMask == null || whitehostMask.size() == 0)) { + Map users = mycatConfig.getUsers(); + isPassed = users.containsKey(user); + + } else { + List list = whitehost.get(host); + Set patterns = whitehostMask.keySet(); + if(patterns != null && patterns.size() > 0){ + for(Pattern pattern : patterns) { + if(pattern.matcher(host).find()){ + isPassed = true; + break; + } + } + } + if (list != null) { + for (UserConfig userConfig : list) { + if (userConfig.getName().equals(user)) { + isPassed = true; + break; + } + } + } + } + + if ( !isPassed ) { + ALARM.error(new StringBuilder().append(Alarms.FIREWALL_ATTACK).append("[host=").append(host) + .append(",user=").append(user).append(']').toString()); + return false; + } + return true; + } + + + /** + * @see https://github.com/alibaba/druid/wiki/%E9%85%8D%E7%BD%AE-wallfilter + */ + @Override + public boolean checkFirewallSQLPolicy(String user, String sql) { + + boolean isPassed = true; + + if( contextLocal.get() == null ){ + FirewallConfig firewallConfig = MycatServer.getInstance().getConfig().getFirewall(); + if ( firewallConfig != null) { + if ( firewallConfig.isCheck() ) { + contextLocal.set(firewallConfig.getProvider()); + check = true; + } + } + } + + if( check ){ + WallCheckResult result = contextLocal.get().check(sql); + + // 修复 druid 防火墙在处理SHOW FULL TABLES WHERE Table_type != 'VIEW' 的时候存在的 BUG + // 此代码有问题,由于Druid WallCheck 对同一条SQL语句只做一次解析,下面代码会导致第二次拦截失效 + // 并且 目前已经提供 ShowFullTables 来处理show full tables 命令,故对代码进行修改 +// List stmts = result.getStatementList(); +// if ( !stmts.isEmpty() && !( stmts.get(0) instanceof SQLShowTablesStatement) ) { +// if ( !result.getViolations().isEmpty()) { +// isPassed = false; +// ALARM.warn("Firewall to intercept the '" + user + "' unsafe SQL , errMsg:" +// + result.getViolations().get(0).getMessage() + +// " \r\n " + sql); +// } +// } + + if ( !result.getViolations().isEmpty()) { + isPassed = false; + ALARM.warn("Firewall to intercept the '" + user + "' unsafe SQL , errMsg:" + + result.getViolations().get(0).getMessage() + + " \r\n " + sql); + } + + + } + return isPassed; + } + + // 审计SQL权限 + @Override + public boolean checkDmlPrivilege(String user, String schema, String sql) { + + if ( schema == null ) { + return true; + } + + boolean isPassed = false; + + MycatConfig conf = MycatServer.getInstance().getConfig(); + UserConfig userConfig = conf.getUsers().get(user); + if (userConfig != null) { + + UserPrivilegesConfig userPrivilege = userConfig.getPrivilegesConfig(); + if ( userPrivilege != null && userPrivilege.isCheck() ) { + + UserPrivilegesConfig.SchemaPrivilege schemaPrivilege = userPrivilege.getSchemaPrivilege( schema ); + if ( schemaPrivilege != null ) { + + String tableName = null; + int index = -1; + + //TODO 此处待优化,寻找更优SQL 解析器 + + //修复bug + // https://github.com/alibaba/druid/issues/1309 + //com.alibaba.druid.sql.parser.ParserException: syntax error, error in :'begin',expect END, actual EOF begin + if ( sql != null && sql.length() == 5 && sql.equalsIgnoreCase("begin") ) { + return true; + } + + SQLStatementParser parser = new MycatStatementParser(sql); + SQLStatement stmt = parser.parseStatement(); + + if (stmt instanceof MySqlReplaceStatement || stmt instanceof SQLInsertStatement ) { + index = 0; + } else if (stmt instanceof SQLUpdateStatement ) { + index = 1; + } else if (stmt instanceof SQLSelectStatement ) { + index = 2; + } else if (stmt instanceof SQLDeleteStatement ) { + index = 3; + } + + if ( index > -1) { + + SchemaStatVisitor schemaStatVisitor = new MycatSchemaStatVisitor(); + stmt.accept(schemaStatVisitor); + String key = schemaStatVisitor.getCurrentTable(); + if ( key != null ) { + + if (key.contains("`")) { + key = key.replaceAll("`", ""); + } + + int dotIndex = key.indexOf("."); + if (dotIndex > 0) { + tableName = key.substring(dotIndex + 1); + } else { + tableName = key; + } + + //获取table 权限, 此处不需要检测空值, 无设置则自动继承父级权限 + UserPrivilegesConfig.TablePrivilege tablePrivilege = schemaPrivilege.getTablePrivilege( tableName ); + if ( tablePrivilege.getDml()[index] > 0 ) { + isPassed = true; + } + + } else { + //skip + isPassed = true; + } + + + } else { + //skip + isPassed = true; + } + + } else { + //skip + isPassed = true; + } + + } else { + //skip + isPassed = true; + } + + } else { + //skip + isPassed = true; + } + + if( !isPassed ) { + ALARM.error(new StringBuilder().append(Alarms.DML_ATTACK ).append("[sql=").append( sql ) + .append(",user=").append(user).append(']').toString()); + } + + return isPassed; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/config/Versions.java b/src/main/java/io/mycat/config/Versions.java new file mode 100644 index 000000000..4d7215da4 --- /dev/null +++ b/src/main/java/io/mycat/config/Versions.java @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.config; + +/** + * @author mycat + */ +public abstract class Versions { + + /**协议版本**/ + public static final byte PROTOCOL_VERSION = 10; + + /**服务器版本**/ + public static byte[] SERVER_VERSION = "5.6.29-mycat-1.6.5-BETA-20170424174212".getBytes(); + + public static void setServerVersion(String version) { + byte[] mysqlVersionPart = version.getBytes(); + int startIndex; + for (startIndex = 0; startIndex < SERVER_VERSION.length; startIndex++) { + if (SERVER_VERSION[startIndex] == '-') + break; + } + + // 生成mycat version信息 + byte[] newMycatVersion = new byte[mysqlVersionPart.length + SERVER_VERSION.length - startIndex]; + System.arraycopy(mysqlVersionPart, 0, newMycatVersion, 0, mysqlVersionPart.length); + System.arraycopy(SERVER_VERSION, startIndex, newMycatVersion, mysqlVersionPart.length, + SERVER_VERSION.length - startIndex); + SERVER_VERSION = newMycatVersion; + } +} diff --git a/src/main/java/io/mycat/config/Versions.template b/src/main/java/io/mycat/config/Versions.template new file mode 100644 index 000000000..a9828eff8 --- /dev/null +++ b/src/main/java/io/mycat/config/Versions.template @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.config; + +/** + * @author mycat + */ +public abstract class Versions { + + /**协议版本**/ + public static final byte PROTOCOL_VERSION = 10; + + /**服务器版本**/ + public static byte[] SERVER_VERSION = "@server-version@".getBytes(); + + public static void setServerVersion(String version) { + byte[] mysqlVersionPart = version.getBytes(); + int startIndex; + for (startIndex = 0; startIndex < SERVER_VERSION.length; startIndex++) { + if (SERVER_VERSION[startIndex] == '-') + break; + } + + // 重新拼接mycat version字节数组 + byte[] newMycatVersion = new byte[mysqlVersionPart.length + SERVER_VERSION.length - startIndex]; + System.arraycopy(mysqlVersionPart, 0, newMycatVersion, 0, mysqlVersionPart.length); + System.arraycopy(SERVER_VERSION, startIndex, newMycatVersion, mysqlVersionPart.length, + SERVER_VERSION.length - startIndex); + SERVER_VERSION = newMycatVersion; + } +} diff --git a/src/main/java/io/mycat/server/classloader/DynaClassLoader.java b/src/main/java/io/mycat/config/classloader/DynaClassLoader.java similarity index 91% rename from src/main/java/io/mycat/server/classloader/DynaClassLoader.java rename to src/main/java/io/mycat/config/classloader/DynaClassLoader.java index 213919f9e..2f39b9ef5 100644 --- a/src/main/java/io/mycat/server/classloader/DynaClassLoader.java +++ b/src/main/java/io/mycat/config/classloader/DynaClassLoader.java @@ -1,13 +1,16 @@ -package io.mycat.server.classloader; +package io.mycat.config.classloader; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.*; +import java.io.BufferedInputStream; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; import java.util.Iterator; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + /** * used for mycat's catlet class loader ,catlet's class file is stored in * Mycat_home/catlet dir @@ -16,8 +19,7 @@ * */ public class DynaClassLoader { - private static final Logger LOGGER = LoggerFactory - .getLogger("DynaClassLoader"); + private static final Logger LOGGER = LoggerFactory.getLogger("DynaClassLoader"); /** key- class full name */ private static Map loadedDynaClassMap = new ConcurrentHashMap(); private final String extClassHome; @@ -119,7 +121,7 @@ public MyDynaClassLoader(ClassLoader parentLoader) { */ public Class loadClass(String name) throws ClassNotFoundException { if (name.startsWith("java") || name.startsWith("sun") - || name.startsWith("org.opencloudb")) { + || name.startsWith("io.mycat")) { return super.loadClass(name); } DynaClass dynaClass = loadedDynaClassMap.get(name); @@ -163,10 +165,11 @@ public Class loadClass(String name) throws ClassNotFoundException { private DynaClass searchFile(String classpath, String fileName) throws Exception { DynaClass dynCls = null; String path = fileName.replace('.', File.separatorChar) + ".class"; + System.out.println("class " + classpath + " file " + path); File f = new File(classpath, path); if (f.isFile()) { String theName = f.getPath(); - LOGGER.info("found " + theName); + System.out.println("found " + theName); dynCls = new DynaClass(f.getPath()); dynCls.lastModified = f.lastModified(); @@ -175,7 +178,7 @@ private DynaClass searchFile(String classpath, String fileName) throws Exception else { path = fileName.replace('.', File.separatorChar) + ".jar"; //classpath="D:\\code\\mycat\\Mycat-Server\\catlet\\"; - LOGGER.info("jar " + classpath + " file " + path); + System.out.println("jar " + classpath + " file " + path); f = new File(classpath, path); if (f.isFile()) { try { diff --git a/src/main/java/io/mycat/server/classloader/JarLoader.java b/src/main/java/io/mycat/config/classloader/JarLoader.java similarity index 91% rename from src/main/java/io/mycat/server/classloader/JarLoader.java rename to src/main/java/io/mycat/config/classloader/JarLoader.java index 9ad9e1216..45b743339 100644 --- a/src/main/java/io/mycat/server/classloader/JarLoader.java +++ b/src/main/java/io/mycat/config/classloader/JarLoader.java @@ -1,160 +1,154 @@ -package io.mycat.server.classloader; - - -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.net.URL; -import java.net.URLClassLoader; -import java.util.ArrayList; -import java.util.Enumeration; -import java.util.jar.JarEntry; -import java.util.jar.JarFile; -import java.util.jar.Manifest; - -public class JarLoader { - /** Unpack a jar file into a directory. */ - public static void unJar(File jarFile, File toDir) throws IOException { - JarFile jar = new JarFile(jarFile); - try { - Enumeration entries = jar.entries(); - while (entries.hasMoreElements()) { - JarEntry entry = (JarEntry)entries.nextElement(); - if (!entry.isDirectory()) { - InputStream in = jar.getInputStream(entry); - try { - File file = new File(toDir, entry.getName()); - if (!file.getParentFile().mkdirs()) { - if (!file.getParentFile().isDirectory()) { - throw new IOException("Mkdirs failed to create " + - file.getParentFile().toString()); - } - } - OutputStream out = new FileOutputStream(file); - try { - byte[] buffer = new byte[8192]; - int i; - while ((i = in.read(buffer)) != -1) { - out.write(buffer, 0, i); - } - } finally { - out.close(); - } - } finally { - in.close(); - } - } - } - } finally { - jar.close(); - } - } - - public static Class loadJar(String fileName,String mainJavaclass) throws Exception { - - File file = new File(fileName); - String mainClassName = null; - - JarFile jarFile; - try { - jarFile = new JarFile(fileName); - } catch(IOException io) { - throw new IOException("Error opening jar: " + fileName); - } - - Manifest manifest = jarFile.getManifest(); - if (manifest != null) { - mainClassName = manifest.getMainAttributes().getValue("Main-Class"); - } - jarFile.close(); - - if (mainClassName == null) { - mainClassName = mainJavaclass; - } - mainClassName = mainClassName.replaceAll("/", "."); - - File tmpDir = new File(System.getProperty("java.io.tmpdir")); - tmpDir.mkdirs(); - if (!tmpDir.isDirectory()) { - System.out.println("Mkdirs failed to create " + tmpDir); - } - final File workDir = File.createTempFile("unjar", "", tmpDir); - workDir.delete(); - workDir.mkdirs(); - if (!workDir.isDirectory()) { - System.out.println("Mkdirs failed to create " + workDir); - } - - Runtime.getRuntime().addShutdownHook(new Thread() { - public void run() { - try { - fullyDelete(workDir); - } catch (IOException e) { - } - } - }); - - unJar(file, workDir); - - ArrayList classPath = new ArrayList(); - classPath.add(new File(workDir+"/").toURL()); - classPath.add(file.toURL()); - classPath.add(new File(workDir, "classes/").toURL()); - File[] libs = new File(workDir, "lib").listFiles(); - if (libs != null) { - for (int i = 0; i < libs.length; i++) { - classPath.add(libs[i].toURL()); - } - } - - ClassLoader loader = new URLClassLoader(classPath.toArray(new URL[0])); - - Thread.currentThread().setContextClassLoader(loader); - Class mainClass = Class.forName(mainClassName, true, loader); - return mainClass; - } - - public static boolean fullyDelete(File dir) throws IOException { - if (!fullyDeleteContents(dir)) { - return false; - } - return dir.delete(); - } - - /** - * Delete the contents of a directory, not the directory itself. If - * we return false, the directory may be partially-deleted. - */ - public static boolean fullyDeleteContents(File dir) throws IOException { - boolean deletionSucceeded = true; - File contents[] = dir.listFiles(); - if (contents != null) { - for (int i = 0; i < contents.length; i++) { - if (contents[i].isFile()) { - if (!contents[i].delete()) { - deletionSucceeded = false; - continue; // continue deletion of other files/dirs under dir - } - } else { - //try deleting the directory - // this might be a symlink - boolean b = false; - b = contents[i].delete(); - if (b){ - //this was indeed a symlink or an empty directory - continue; - } - // if not an empty directory or symlink let - // fullydelete handle it. - if (!fullyDelete(contents[i])) { - deletionSucceeded = false; - continue; // continue deletion of other files/dirs under dir - } - } - } - } - return deletionSucceeded; - } -} +package io.mycat.config.classloader; + + +import java.util.jar.*; +import java.lang.reflect.*; +import java.net.URL; +import java.net.URLClassLoader; +import java.io.*; +import java.util.*; + +public class JarLoader { + /** Unpack a jar file into a directory. */ + public static void unJar(File jarFile, File toDir) throws IOException { + JarFile jar = new JarFile(jarFile); + try { + Enumeration entries = jar.entries(); + while (entries.hasMoreElements()) { + JarEntry entry = (JarEntry)entries.nextElement(); + if (!entry.isDirectory()) { + InputStream in = jar.getInputStream(entry); + try { + File file = new File(toDir, entry.getName()); + if (!file.getParentFile().mkdirs() && !file.getParentFile().isDirectory()) { + + throw new IOException("Mkdirs failed to create " + + file.getParentFile().toString()); + + } + OutputStream out = new FileOutputStream(file); + try { + byte[] buffer = new byte[8192]; + int i; + while ((i = in.read(buffer)) != -1) { + out.write(buffer, 0, i); + } + } finally { + out.close(); + } + } finally { + in.close(); + } + } + } + } finally { + jar.close(); + } + } + + public static Class loadJar(String fileName,String mainJavaclass) throws Exception { + + File file = new File(fileName); + String mainClassName = null; + + JarFile jarFile; + try { + jarFile = new JarFile(fileName); + } catch(IOException io) { + throw new IOException("Error opening jar: " + fileName); + } + + Manifest manifest = jarFile.getManifest(); + if (manifest != null) { + mainClassName = manifest.getMainAttributes().getValue("Main-Class"); + } + jarFile.close(); + + if (mainClassName == null) { + mainClassName = mainJavaclass; + } + mainClassName = mainClassName.replaceAll("/", "."); + + File tmpDir = new File(System.getProperty("java.io.tmpdir")); + tmpDir.mkdirs(); + if (!tmpDir.isDirectory()) { + System.out.println("Mkdirs failed to create " + tmpDir); + } + final File workDir = File.createTempFile("unjar", "", tmpDir); + workDir.delete(); + workDir.mkdirs(); + if (!workDir.isDirectory()) { + System.out.println("Mkdirs failed to create " + workDir); + } + + Runtime.getRuntime().addShutdownHook(new Thread() { + public void run() { + try { + fullyDelete(workDir); + } catch (IOException e) { + } + } + }); + + unJar(file, workDir); + + ArrayList classPath = new ArrayList(); + classPath.add(new File(workDir+"/").toURL()); + classPath.add(file.toURL()); + classPath.add(new File(workDir, "classes/").toURL()); + File[] libs = new File(workDir, "lib").listFiles(); + if (libs != null) { + for (int i = 0; i < libs.length; i++) { + classPath.add(libs[i].toURL()); + } + } + + ClassLoader loader = new URLClassLoader(classPath.toArray(new URL[0])); + + Thread.currentThread().setContextClassLoader(loader); + Class mainClass = Class.forName(mainClassName, true, loader); + return mainClass; + } + + public static boolean fullyDelete(File dir) throws IOException { + if (!fullyDeleteContents(dir)) { + return false; + } + return dir.delete(); + } + + /** + * Delete the contents of a directory, not the directory itself. If + * we return false, the directory may be partially-deleted. + */ + public static boolean fullyDeleteContents(File dir) throws IOException { + boolean deletionSucceeded = true; + File contents[] = dir.listFiles(); + if (contents != null) { + for (int i = 0; i < contents.length; i++) { + if (contents[i].isFile()) { + if (!contents[i].delete()) { + deletionSucceeded = false; + continue; // continue deletion of other files/dirs under dir + } + } else { + //try deleting the directory + // this might be a symlink + boolean b = false; + b = contents[i].delete(); + if (b){ + //this was indeed a symlink or an empty directory + continue; + } + // if not an empty directory or symlink let + // fullydelete handle it. + if (!fullyDelete(contents[i])) { + deletionSucceeded = false; + continue; // continue deletion of other files/dirs under dir + } + } + } + } + return deletionSucceeded; + } +} diff --git a/src/main/java/io/mycat/config/loader/ConfigLoader.java b/src/main/java/io/mycat/config/loader/ConfigLoader.java new file mode 100644 index 000000000..6da490802 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/ConfigLoader.java @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.config.loader; + +import java.util.Map; + +import io.mycat.config.model.ClusterConfig; +import io.mycat.config.model.DataHostConfig; +import io.mycat.config.model.DataNodeConfig; +import io.mycat.config.model.FirewallConfig; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.config.model.UserConfig; + +/** + * @author mycat + */ +public interface ConfigLoader { + SchemaConfig getSchemaConfig(String schema); + + Map getSchemaConfigs(); + + Map getDataNodes(); + + Map getDataHosts(); + + SystemConfig getSystemConfig(); + + UserConfig getUserConfig(String user); + + Map getUserConfigs(); + + FirewallConfig getFirewallConfig(); + + ClusterConfig getClusterConfig(); +} \ No newline at end of file diff --git a/src/main/java/io/mycat/config/loader/SchemaLoader.java b/src/main/java/io/mycat/config/loader/SchemaLoader.java new file mode 100644 index 000000000..40a09d1fb --- /dev/null +++ b/src/main/java/io/mycat/config/loader/SchemaLoader.java @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.config.loader; + +import java.util.Map; + +import io.mycat.config.model.DataHostConfig; +import io.mycat.config.model.DataNodeConfig; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.rule.TableRuleConfig; + +/** + * @author mycat + */ +public interface SchemaLoader { + + Map getTableRules(); + + Map getDataHosts(); + + Map getDataNodes(); + + Map getSchemas(); + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/config/loader/console/ZookeeperPath.java b/src/main/java/io/mycat/config/loader/console/ZookeeperPath.java new file mode 100644 index 000000000..2a758b758 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/console/ZookeeperPath.java @@ -0,0 +1,218 @@ +package io.mycat.config.loader.console; + +/** + * 专门用来操作zookeeper路径的文件信息 +* 源文件名:ZkPath.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月15日 +* 修改作者:liujun +* 修改日期:2016年9月15日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public enum ZookeeperPath { + + /** + * zk的路径分隔符 + * @字段说明 ZK_SEPARATOR + */ + ZK_SEPARATOR("/"), + + /** + * 最基础的mycat节点 + * @字段说明 FLOW_ZK_PATH_LINE + */ + FLOW_ZK_PATH_BASE("mycat"), + + /** + * 在当前在线的节点 + * @字段说明 FLOW_ZK_PATH_LINE + */ + FLOW_ZK_PATH_LINE("line"), + + /** + * schema父路径 + * @字段说明 FOW_ZK_PATH_SCHEMA + */ + FOW_ZK_PATH_SCHEMA("schema"), + + /** + * 配制schema信息 + * @字段说明 FLOW_ZK_PATH_SCHEMA + */ + FLOW_ZK_PATH_SCHEMA_SCHEMA("schema"), + + /** + * 对应数据库信息 + * @字段说明 FLOW_ZK_PATH_SCHEMA_DATANODE + */ + FLOW_ZK_PATH_SCHEMA_DATANODE("dataNode"), + + /** + * 数据库信息dataHost + * @字段说明 FLOW_ZK_PATH_SCHEMA_DATANODE + */ + FLOW_ZK_PATH_SCHEMA_DATAHOST("dataHost"), + + /** + * 路由信息 + * @字段说明 FLOW_ZK_PATH_SCHEMA_DATANODE + */ + FLOW_ZK_PATH_RULE("rules"), + + /** + * 路由信息 + * @字段说明 FLOW_ZK_PATH_SCHEMA_DATANODE + */ + FLOW_ZK_PATH_RULE_TABLERULE("tableRule"), + + /** + * 路由信息 + * @字段说明 FLOW_ZK_PATH_SCHEMA_DATANODE + */ + FLOW_ZK_PATH_RULE_FUNCTION("function"), + + /** + * 服务端配制路径 + * @字段说明 FLOW_ZK_PATH_SERVER + */ + FLOW_ZK_PATH_SERVER("server"), + + /** + * 默认配制信息 + * @字段说明 FLOW_ZK_PATH_SERVER_DEFAULT + */ + FLOW_ZK_PATH_SERVER_DEFAULT("default"), + + /** + * 针对集群的配制信息 + * @字段说明 FLOW_ZK_PATH_SERVER_DEFAULT + */ + FLOW_ZK_PATH_SERVER_CLUSTER("cluster"), + + /** + * 配制的用户信息 + * @字段说明 FLOW_ZK_PATH_SERVER_DEFAULT + */ + FLOW_ZK_PATH_SERVER_USER("user"), + + /** + * 配制的防火墙信息,如黑白名单信息 + * @字段说明 FLOW_ZK_PATH_SERVER_DEFAULT + */ + FLOW_ZK_PATH_SERVER_FIREWALL("firewall"), + + /** + * 表的权限信息 + * @字段说明 FLOW_ZK_PATH_SERVER_AUTH + */ + FLOW_ZK_PATH_SERVER_AUTH("auth"), + + /** + * 序列信息 + * @字段说明 FLOW_ZK_PATH_SERVER_AUTH + */ + FLOW_ZK_PATH_SEQUENCE("sequences"), + + /** + * 序列信息中公共配制信息 + * @字段说明 FLOW_ZK_PATH_SERVER_AUTH + */ + FLOW_ZK_PATH_SEQUENCE_COMMON("common"), + + /** + * 用来存放序列值的信息 + * @字段说明 FLOW_ZK_PATH_SERVER_AUTH + */ + FLOW_ZK_PATH_SEQUENCE_INSTANCE("instance"), + + /** + * 用来存放序列值的 + * @字段说明 FLOW_ZK_PATH_SERVER_AUTH + */ + FLOW_ZK_PATH_SEQUENCE_LEADER("leader"), + + /** + * 递增序列号 + * @字段说明 FLOW_ZK_PATH_SERVER_AUTH + */ + FLOW_ZK_PATH_SEQUENCE_INCREMENT_SEQ("incr_sequence"), + + /** + * 序列信息中需要单独节点配制的信息 + * @字段说明 FLOW_ZK_PATH_SERVER_AUTH + */ + FLOW_ZK_PATH_SEQUENCE_CLUSTER("cluster"), + + /** + * 缓存信息 + * @字段说明 FLOW_ZK_PATH_CACHE + */ + FLOW_ZK_PATH_CACHE("cache"), + + /** + * 配制切换及状态目录信息 + * @字段说明 FLOW_ZK_PATH_BINDATA + */ + FLOW_ZK_PATH_BINDATA("bindata"), + + + /** + * 配制切换及状态目录信息 + * @字段说明 FLOW_ZK_PATH_RULEDATA + */ + FLOW_ZK_PATH_RULEATA("ruledata"), + /** + * dnindex切换信息 + * @字段说明 FLOW_ZK_PATH_CACHE + */ + FLOW_ZK_PATH_BINDATA_DNINDEX("dnindex"), + + /** + * 迁移的信息 + * @字段说明 FLOW_ZK_PATH_CACHE + */ + FLOW_ZK_PATH_BINDATA_MOVE("move"), + + /** + * 节点单独的配制信息 + * @字段说明 FLOW_ZK_PATH_NODE + */ + FLOW_ZK_PATH_NODE("node"), + + /**zk写入本地的路径信息 + * @字段说明 ZK_LOCAL_WRITE_PATH + */ + ZK_LOCAL_WRITE_PATH("./"), + // /**zk写入本地的路径信息 + // * @字段说明 ZK_LOCAL_WRITE_PATH + // */ + // ZK_LOCAL_WRITE_PATH("zkdownload/"), + + /** + * zk本地配制目录信息 + * @字段说明 ZK_LOCAL_WRITE_PATH + */ + ZK_LOCAL_CFG_PATH("/zkconf/"), + + ; + /** + * 配制的key的信息 + * @字段说明 key + */ + private String key; + + private ZookeeperPath(String key) { + this.key = key; + } + + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } + +} diff --git a/src/main/java/io/mycat/config/loader/xml/XMLConfigLoader.java b/src/main/java/io/mycat/config/loader/xml/XMLConfigLoader.java new file mode 100644 index 000000000..cc8fd64d8 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/xml/XMLConfigLoader.java @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.config.loader.xml; + +import java.util.Map; + +import io.mycat.config.loader.ConfigLoader; +import io.mycat.config.loader.SchemaLoader; +import io.mycat.config.model.ClusterConfig; +import io.mycat.config.model.DataHostConfig; +import io.mycat.config.model.DataNodeConfig; +import io.mycat.config.model.FirewallConfig; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.config.model.UserConfig; + +/** + * @author mycat + */ +public class XMLConfigLoader implements ConfigLoader { + + /** unmodifiable */ + private final Map dataHosts; + /** unmodifiable */ + private final Map dataNodes; + /** unmodifiable */ + private final Map schemas; + private final SystemConfig system; + /** unmodifiable */ + private final Map users; + private final FirewallConfig firewall; + private final ClusterConfig cluster; + + public XMLConfigLoader(SchemaLoader schemaLoader) { + XMLServerLoader serverLoader = new XMLServerLoader(); + this.system = serverLoader.getSystem(); + this.users = serverLoader.getUsers(); + this.firewall = serverLoader.getFirewall(); + this.cluster = serverLoader.getCluster(); + this.dataHosts = schemaLoader.getDataHosts(); + this.dataNodes = schemaLoader.getDataNodes(); + this.schemas = schemaLoader.getSchemas(); + schemaLoader = null; + } + + @Override + public ClusterConfig getClusterConfig() { + return cluster; + } + + @Override + public FirewallConfig getFirewallConfig() { + return firewall; + } + + @Override + public UserConfig getUserConfig(String user) { + return users.get(user); + } + + @Override + public Map getUserConfigs() { + return users; + } + + @Override + public SystemConfig getSystemConfig() { + return system; + } + @Override + public Map getSchemaConfigs() { + return schemas; + } + + @Override + public Map getDataNodes() { + return dataNodes; + } + + @Override + public Map getDataHosts() { + return dataHosts; + } + + @Override + public SchemaConfig getSchemaConfig(String schema) { + return schemas.get(schema); + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/config/loader/xml/XMLRuleLoader.java b/src/main/java/io/mycat/config/loader/xml/XMLRuleLoader.java new file mode 100644 index 000000000..084fcff1d --- /dev/null +++ b/src/main/java/io/mycat/config/loader/xml/XMLRuleLoader.java @@ -0,0 +1,228 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.config.loader.xml; + +import java.io.IOException; +import java.io.InputStream; +import java.lang.reflect.InvocationTargetException; +import java.sql.SQLSyntaxErrorException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import org.w3c.dom.Element; +import org.w3c.dom.Node; +import org.w3c.dom.NodeList; + +import io.mycat.config.model.rule.RuleConfig; +import io.mycat.config.model.rule.TableRuleConfig; +import io.mycat.config.util.ConfigException; +import io.mycat.config.util.ConfigUtil; +import io.mycat.config.util.ParameterMapping; +import io.mycat.route.function.AbstractPartitionAlgorithm; +import io.mycat.util.SplitUtil; + +/** + * @author mycat + */ +@SuppressWarnings("unchecked") +public class XMLRuleLoader { + private final static String DEFAULT_DTD = "/rule.dtd"; + private final static String DEFAULT_XML = "/rule.xml"; + + private final Map tableRules; + // private final Set rules; + private final Map functions; + + public XMLRuleLoader(String ruleFile) { + // this.rules = new HashSet(); + //rule名 -> rule + this.tableRules = new HashMap(); + //function名 -> 具体分片算法 + this.functions = new HashMap(); + load(DEFAULT_DTD, ruleFile == null ? DEFAULT_XML : ruleFile); + } + + public XMLRuleLoader() { + this(null); + } + + public Map getTableRules() { + return (Map) (tableRules.isEmpty() ? Collections + .emptyMap() : tableRules); + } + + + + + private void load(String dtdFile, String xmlFile) { + InputStream dtd = null; + InputStream xml = null; + try { + dtd = XMLRuleLoader.class.getResourceAsStream(dtdFile); + xml = XMLRuleLoader.class.getResourceAsStream(xmlFile); + //读取出语意树 + Element root = ConfigUtil.getDocument(dtd, xml) + .getDocumentElement(); + //加载Function + loadFunctions(root); + //加载TableRule + loadTableRules(root); + } catch (ConfigException e) { + throw e; + } catch (Exception e) { + throw new ConfigException(e); + } finally { + if (dtd != null) { + try { + dtd.close(); + } catch (IOException e) { + } + } + if (xml != null) { + try { + xml.close(); + } catch (IOException e) { + } + } + } + } + + /** + * tableRule标签结构: + * + * + * create_date + * partbymonth + * + * + * @param root + * @throws SQLSyntaxErrorException + */ + private void loadTableRules(Element root) throws SQLSyntaxErrorException { + //获取每个tableRule标签 + NodeList list = root.getElementsByTagName("tableRule"); + for (int i = 0, n = list.getLength(); i < n; ++i) { + Node node = list.item(i); + if (node instanceof Element) { + Element e = (Element) node; + //先判断是否重复 + String name = e.getAttribute("name"); + if (tableRules.containsKey(name)) { + throw new ConfigException("table rule " + name + + " duplicated!"); + } + //获取rule标签 + NodeList ruleNodes = e.getElementsByTagName("rule"); + int length = ruleNodes.getLength(); + if (length > 1) { + throw new ConfigException("only one rule can defined :" + + name); + } + //目前只处理第一个,未来可能有多列复合逻辑需求 + //RuleConfig是保存着rule与function对应关系的对象 + RuleConfig rule = loadRule((Element) ruleNodes.item(0)); + String funName = rule.getFunctionName(); + //判断function是否存在,获取function + AbstractPartitionAlgorithm func = functions.get(funName); + if (func == null) { + throw new ConfigException("can't find function of name :" + + funName); + } + rule.setRuleAlgorithm(func); + //保存到tableRules + tableRules.put(name, new TableRuleConfig(name, rule)); + } + } + } + + private RuleConfig loadRule(Element element) throws SQLSyntaxErrorException { + //读取columns + Element columnsEle = ConfigUtil.loadElement(element, "columns"); + String column = columnsEle.getTextContent(); + String[] columns = SplitUtil.split(column, ',', true); + if (columns.length > 1) { + throw new ConfigException("table rule coulmns has multi values:" + + columnsEle.getTextContent()); + } + //读取algorithm + Element algorithmEle = ConfigUtil.loadElement(element, "algorithm"); + String algorithm = algorithmEle.getTextContent(); + return new RuleConfig(column.toUpperCase(), algorithm); + } + + /** + * function标签结构: + * + * yyyy-MM-dd + * 2015-01-01 + * + * @param root + * @throws ClassNotFoundException + * @throws InstantiationException + * @throws IllegalAccessException + * @throws InvocationTargetException + */ + private void loadFunctions(Element root) throws ClassNotFoundException, + InstantiationException, IllegalAccessException, + InvocationTargetException { + NodeList list = root.getElementsByTagName("function"); + for (int i = 0, n = list.getLength(); i < n; ++i) { + Node node = list.item(i); + if (node instanceof Element) { + Element e = (Element) node; + //获取name标签 + String name = e.getAttribute("name"); + //如果Map已有,则function重复 + if (functions.containsKey(name)) { + throw new ConfigException("rule function " + name + + " duplicated!"); + } + //获取class标签 + String clazz = e.getAttribute("class"); + //根据class利用反射新建分片算法 + AbstractPartitionAlgorithm function = createFunction(name, clazz); + //根据读取参数配置分片算法 + ParameterMapping.mapping(function, ConfigUtil.loadElements(e)); + //每个AbstractPartitionAlgorithm可能会实现init来初始化 + function.init(); + //放入functions map + functions.put(name, function); + } + } + } + + private AbstractPartitionAlgorithm createFunction(String name, String clazz) + throws ClassNotFoundException, InstantiationException, + IllegalAccessException, InvocationTargetException { + Class clz = Class.forName(clazz); + //判断是否继承AbstractPartitionAlgorithm + if (!AbstractPartitionAlgorithm.class.isAssignableFrom(clz)) { + throw new IllegalArgumentException("rule function must implements " + + AbstractPartitionAlgorithm.class.getName() + ", name=" + name); + } + return (AbstractPartitionAlgorithm) clz.newInstance(); + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/config/loader/xml/XMLSchemaLoader.java b/src/main/java/io/mycat/config/loader/xml/XMLSchemaLoader.java new file mode 100644 index 000000000..65fda5329 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/xml/XMLSchemaLoader.java @@ -0,0 +1,820 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.config.loader.xml; + +import java.io.IOException; +import java.io.InputStream; +import java.net.URI; +import java.text.SimpleDateFormat; +import java.util.*; + +import io.mycat.config.model.rule.RuleConfig; +import io.mycat.route.function.TableRuleAware; +import io.mycat.util.ObjectUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.w3c.dom.Element; +import org.w3c.dom.Node; +import org.w3c.dom.NodeList; + +import io.mycat.backend.datasource.PhysicalDBPool; +import io.mycat.config.loader.SchemaLoader; +import io.mycat.config.model.DBHostConfig; +import io.mycat.config.model.DataHostConfig; +import io.mycat.config.model.DataNodeConfig; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.TableConfig; +import io.mycat.config.model.TableConfigMap; +import io.mycat.config.model.rule.TableRuleConfig; +import io.mycat.config.util.ConfigException; +import io.mycat.config.util.ConfigUtil; +import io.mycat.route.function.AbstractPartitionAlgorithm; +import io.mycat.util.DecryptUtil; +import io.mycat.util.SplitUtil; + +/** + * @author mycat + */ +@SuppressWarnings("unchecked") +public class XMLSchemaLoader implements SchemaLoader { + + private static final Logger LOGGER = LoggerFactory.getLogger(XMLSchemaLoader.class); + + private final static String DEFAULT_DTD = "/schema.dtd"; + private final static String DEFAULT_XML = "/schema.xml"; + + private final Map tableRules; + private final Map dataHosts; + private final Map dataNodes; + private final Map schemas; + + public XMLSchemaLoader(String schemaFile, String ruleFile) { + //先读取rule.xml + XMLRuleLoader ruleLoader = new XMLRuleLoader(ruleFile); + //将tableRules拿出,用于这里加载Schema做rule有效判断,以及之后的分片路由计算 + this.tableRules = ruleLoader.getTableRules(); + //释放ruleLoader + ruleLoader = null; + this.dataHosts = new HashMap(); + this.dataNodes = new HashMap(); + this.schemas = new HashMap(); + //读取加载schema配置 + this.load(DEFAULT_DTD, schemaFile == null ? DEFAULT_XML : schemaFile); + } + + public XMLSchemaLoader() { + this(null, null); + } + + @Override + public Map getTableRules() { + return tableRules; + } + + @Override + public Map getDataHosts() { + return (Map) (dataHosts.isEmpty() ? Collections.emptyMap() : dataHosts); + } + + @Override + public Map getDataNodes() { + return (Map) (dataNodes.isEmpty() ? Collections.emptyMap() : dataNodes); + } + + @Override + public Map getSchemas() { + return (Map) (schemas.isEmpty() ? Collections.emptyMap() : schemas); + } + + private void load(String dtdFile, String xmlFile) { + InputStream dtd = null; + InputStream xml = null; + try { + dtd = XMLSchemaLoader.class.getResourceAsStream(dtdFile); + xml = XMLSchemaLoader.class.getResourceAsStream(xmlFile); + Element root = ConfigUtil.getDocument(dtd, xml).getDocumentElement(); + //先加载所有的DataHost + loadDataHosts(root); + //再加载所有的DataNode + loadDataNodes(root); + //最后加载所有的Schema + loadSchemas(root); + } catch (ConfigException e) { + throw e; + } catch (Exception e) { + throw new ConfigException(e); + } finally { + + if (dtd != null) { + try { + dtd.close(); + } catch (IOException e) { + } + } + + if (xml != null) { + try { + xml.close(); + } catch (IOException e) { + } + } + } + } + + private void loadSchemas(Element root) { + NodeList list = root.getElementsByTagName("schema"); + for (int i = 0, n = list.getLength(); i < n; i++) { + Element schemaElement = (Element) list.item(i); + //读取各个属性 + String name = schemaElement.getAttribute("name"); + String dataNode = schemaElement.getAttribute("dataNode"); + String checkSQLSchemaStr = schemaElement.getAttribute("checkSQLschema"); + String sqlMaxLimitStr = schemaElement.getAttribute("sqlMaxLimit"); + int sqlMaxLimit = -1; + //读取sql返回结果集限制 + if (sqlMaxLimitStr != null && !sqlMaxLimitStr.isEmpty()) { + sqlMaxLimit = Integer.parseInt(sqlMaxLimitStr); + } + + // check dataNode already exists or not,看schema标签中是否有datanode + String defaultDbType = null; + //校验检查并添加dataNode + if (dataNode != null && !dataNode.isEmpty()) { + List dataNodeLst = new ArrayList(1); + dataNodeLst.add(dataNode); + checkDataNodeExists(dataNodeLst); + String dataHost = dataNodes.get(dataNode).getDataHost(); + defaultDbType = dataHosts.get(dataHost).getDbType(); + } else { + dataNode = null; + } + //加载schema下所有tables + Map tables = loadTables(schemaElement); + //判断schema是否重复 + if (schemas.containsKey(name)) { + throw new ConfigException("schema " + name + " duplicated!"); + } + + // 设置了table的不需要设置dataNode属性,没有设置table的必须设置dataNode属性 + if (dataNode == null && tables.size() == 0) { + throw new ConfigException( + "schema " + name + " didn't config tables,so you must set dataNode property!"); + } + + SchemaConfig schemaConfig = new SchemaConfig(name, dataNode, + tables, sqlMaxLimit, "true".equalsIgnoreCase(checkSQLSchemaStr)); + + //设定DB类型,这对之后的sql语句路由解析有帮助 + if (defaultDbType != null) { + schemaConfig.setDefaultDataNodeDbType(defaultDbType); + if (!"mysql".equalsIgnoreCase(defaultDbType)) { + schemaConfig.setNeedSupportMultiDBType(true); + } + } + + // 判断是否有不是mysql的数据库类型,方便解析判断是否启用多数据库分页语法解析 + for (TableConfig tableConfig : tables.values()) { + if (isHasMultiDbType(tableConfig)) { + schemaConfig.setNeedSupportMultiDBType(true); + break; + } + } + //记录每种dataNode的DB类型 + Map dataNodeDbTypeMap = new HashMap<>(); + for (String dataNodeName : dataNodes.keySet()) { + DataNodeConfig dataNodeConfig = dataNodes.get(dataNodeName); + String dataHost = dataNodeConfig.getDataHost(); + DataHostConfig dataHostConfig = dataHosts.get(dataHost); + if (dataHostConfig != null) { + String dbType = dataHostConfig.getDbType(); + dataNodeDbTypeMap.put(dataNodeName, dbType); + } + } + schemaConfig.setDataNodeDbTypeMap(dataNodeDbTypeMap); + schemas.put(name, schemaConfig); + } + } + + + /** + * 处理动态日期表, 支持 YYYYMM、YYYYMMDD 两种格式 + * + * YYYYMM格式: yyyymm,2015,01,60 + * YYYYMMDD格式: yyyymmdd,2015,01,10,50 + * + * @param tableNameElement + * @param tableNameSuffixElement + * @return + */ + private String doTableNameSuffix(String tableNameElement, String tableNameSuffixElement) { + + String newTableName = tableNameElement; + + String[] params = tableNameSuffixElement.split(","); + String suffixFormat = params[0].toUpperCase(); + if ( suffixFormat.equals("YYYYMM") ) { + + //读取参数 + int yyyy = Integer.parseInt( params[1] ); + int mm = Integer.parseInt( params[2] ); + int mmEndIdx = Integer.parseInt( params[3] ); + + //日期处理 + SimpleDateFormat yyyyMMSDF = new SimpleDateFormat("yyyyMM"); + + Calendar cal = Calendar.getInstance(); + cal.set(Calendar.YEAR, yyyy ); + cal.set(Calendar.MONTH, mm - 1 ); + cal.set(Calendar.DATE, 0 ); + + //表名改写 + StringBuffer tableNameBuffer = new StringBuffer(); + for(int mmIdx = 0; mmIdx <= mmEndIdx; mmIdx++) { + tableNameBuffer.append( tableNameElement ); + tableNameBuffer.append( yyyyMMSDF.format(cal.getTime()) ); + cal.add(Calendar.MONTH, 1); + + if ( mmIdx != mmEndIdx) { + tableNameBuffer.append(","); + } + } + newTableName = tableNameBuffer.toString(); + + } else if ( suffixFormat.equals("YYYYMMDD") ) { + + //读取参数 + int yyyy = Integer.parseInt( params[1] ); + int mm = Integer.parseInt( params[2] ); + int dd = Integer.parseInt( params[3] ); + int ddEndIdx = Integer.parseInt( params[4] ); + + //日期处理 + SimpleDateFormat yyyyMMddSDF = new SimpleDateFormat("yyyyMMdd"); + + Calendar cal = Calendar.getInstance(); + cal.set(Calendar.YEAR, yyyy ); + cal.set(Calendar.MONTH, mm - 1 ); + cal.set(Calendar.DATE, dd ); + + //表名改写 + StringBuffer tableNameBuffer = new StringBuffer(); + for(int ddIdx = 0; ddIdx <= ddEndIdx; ddIdx++) { + tableNameBuffer.append( tableNameElement ); + tableNameBuffer.append( yyyyMMddSDF.format(cal.getTime()) ); + + cal.add(Calendar.DATE, 1); + + if ( ddIdx != ddEndIdx) { + tableNameBuffer.append(","); + } + } + newTableName = tableNameBuffer.toString(); + } + return newTableName; + } + + + private Map loadTables(Element node) { + + // Map tables = new HashMap(); + + // 支持表名中包含引号[`] BEN GONG + Map tables = new TableConfigMap(); + NodeList nodeList = node.getElementsByTagName("table"); + for (int i = 0; i < nodeList.getLength(); i++) { + Element tableElement = (Element) nodeList.item(i); + String tableNameElement = tableElement.getAttribute("name").toUpperCase(); + + //TODO:路由, 增加对动态日期表的支持 + String tableNameSuffixElement = tableElement.getAttribute("nameSuffix").toUpperCase(); + if ( !"".equals( tableNameSuffixElement ) ) { + + if( tableNameElement.split(",").length > 1 ) { + throw new ConfigException("nameSuffix " + tableNameSuffixElement + ", require name parameter cannot multiple breaks!"); + } + //前缀用来标明日期格式 + tableNameElement = doTableNameSuffix(tableNameElement, tableNameSuffixElement); + } + //记录主键,用于之后路由分析,以及启用自增长主键 + String[] tableNames = tableNameElement.split(","); + String primaryKey = tableElement.hasAttribute("primaryKey") ? tableElement.getAttribute("primaryKey").toUpperCase() : null; + //记录是否主键自增,默认不是,(启用全局sequence handler) + boolean autoIncrement = false; + if (tableElement.hasAttribute("autoIncrement")) { + autoIncrement = Boolean.parseBoolean(tableElement.getAttribute("autoIncrement")); + } + //记录是否需要加返回结果集限制,默认需要加 + boolean needAddLimit = true; + if (tableElement.hasAttribute("needAddLimit")) { + needAddLimit = Boolean.parseBoolean(tableElement.getAttribute("needAddLimit")); + } + //记录type,是否为global + String tableTypeStr = tableElement.hasAttribute("type") ? tableElement.getAttribute("type") : null; + int tableType = TableConfig.TYPE_GLOBAL_DEFAULT; + if ("global".equalsIgnoreCase(tableTypeStr)) { + tableType = TableConfig.TYPE_GLOBAL_TABLE; + } + //记录dataNode,就是分布在哪些dataNode上 + String dataNode = tableElement.getAttribute("dataNode"); + TableRuleConfig tableRule = null; + if (tableElement.hasAttribute("rule")) { + String ruleName = tableElement.getAttribute("rule"); + tableRule = tableRules.get(ruleName); + if (tableRule == null) { + throw new ConfigException("rule " + ruleName + " is not found!"); + } + } + + boolean ruleRequired = false; + //记录是否绑定有分片规则 + if (tableElement.hasAttribute("ruleRequired")) { + ruleRequired = Boolean.parseBoolean(tableElement.getAttribute("ruleRequired")); + } + + if (tableNames == null) { + throw new ConfigException("table name is not found!"); + } + //distribute函数,重新编排dataNode + String distPrex = "distribute("; + boolean distTableDns = dataNode.startsWith(distPrex); + if (distTableDns) { + dataNode = dataNode.substring(distPrex.length(), dataNode.length() - 1); + } + //分表功能 + String subTables = tableElement.getAttribute("subTables"); + + for (int j = 0; j < tableNames.length; j++) { + + String tableName = tableNames[j]; + TableRuleConfig tableRuleConfig=tableRule ; + if(tableRuleConfig!=null) { + //对于实现TableRuleAware的function进行特殊处理 根据每个表新建个实例 + RuleConfig rule= tableRuleConfig.getRule(); + if(rule.getRuleAlgorithm() instanceof TableRuleAware) { + tableRuleConfig = (TableRuleConfig) ObjectUtil.copyObject(tableRuleConfig); + tableRules.remove(tableRuleConfig.getName()) ; + String newRuleName = tableRuleConfig.getName() + "_" + tableName; + tableRuleConfig. setName(newRuleName); + TableRuleAware tableRuleAware= (TableRuleAware) tableRuleConfig.getRule().getRuleAlgorithm(); + tableRuleAware.setRuleName(newRuleName); + tableRuleAware.setTableName(tableName); + tableRuleConfig.getRule().getRuleAlgorithm().init(); + tableRules.put(newRuleName,tableRuleConfig); + } + } + + TableConfig table = new TableConfig(tableName, primaryKey, + autoIncrement, needAddLimit, tableType, dataNode, + getDbType(dataNode), + (tableRuleConfig != null) ? tableRuleConfig.getRule() : null, + ruleRequired, null, false, null, null,subTables); + + checkDataNodeExists(table.getDataNodes()); + // 检查分片表分片规则配置是否合法 + if(table.getRule() != null) { + checkRuleSuitTable(table); + } + + if (distTableDns) { + distributeDataNodes(table.getDataNodes()); + } + //检查去重 + if (tables.containsKey(table.getName())) { + throw new ConfigException("table " + tableName + " duplicated!"); + } + //放入map + tables.put(table.getName(), table); + } + //只有tableName配置的是单个表(没有逗号)的时候才能有子表 + if (tableNames.length == 1) { + TableConfig table = tables.get(tableNames[0]); + // process child tables + processChildTables(tables, table, dataNode, tableElement); + } + } + return tables; + } + + /** + * distribute datanodes in multi hosts,means ,dn1 (host1),dn100 + * (host2),dn300(host3),dn2(host1),dn101(host2),dn301(host3)...etc + * 将每个host上的datanode按照host重新排列。比如上面的例子host1拥有dn1,dn2,host2拥有dn100,dn101,host3拥有dn300,dn301, + * 按照host重新排列: 0->dn1 (host1),1->dn100(host2),2->dn300(host3),3->dn2(host1),4->dn101(host2),5->dn301(host3) + * + * @param theDataNodes + */ + private void distributeDataNodes(ArrayList theDataNodes) { + Map> newDataNodeMap = new HashMap>(dataHosts.size()); + for (String dn : theDataNodes) { + DataNodeConfig dnConf = dataNodes.get(dn); + String host = dnConf.getDataHost(); + ArrayList hostDns = newDataNodeMap.get(host); + hostDns = (hostDns == null) ? new ArrayList() : hostDns; + hostDns.add(dn); + newDataNodeMap.put(host, hostDns); + } + + ArrayList result = new ArrayList(theDataNodes.size()); + boolean hasData = true; + while (hasData) { + hasData = false; + for (ArrayList dns : newDataNodeMap.values()) { + if (!dns.isEmpty()) { + result.add(dns.remove(0)); + hasData = true; + } + } + } + theDataNodes.clear(); + theDataNodes.addAll(result); + } + + private Set getDbType(String dataNode) { + Set dbTypes = new HashSet<>(); + String[] dataNodeArr = SplitUtil.split(dataNode, ',', '$', '-'); + for (String node : dataNodeArr) { + DataNodeConfig datanode = dataNodes.get(node); + DataHostConfig datahost = dataHosts.get(datanode.getDataHost()); + dbTypes.add(datahost.getDbType()); + } + + return dbTypes; + } + + private Set getDataNodeDbTypeMap(String dataNode) { + Set dbTypes = new HashSet<>(); + String[] dataNodeArr = SplitUtil.split(dataNode, ',', '$', '-'); + for (String node : dataNodeArr) { + DataNodeConfig datanode = dataNodes.get(node); + DataHostConfig datahost = dataHosts.get(datanode.getDataHost()); + dbTypes.add(datahost.getDbType()); + } + return dbTypes; + } + + private boolean isHasMultiDbType(TableConfig table) { + Set dbTypes = table.getDbTypes(); + for (String dbType : dbTypes) { + if (!"mysql".equalsIgnoreCase(dbType)) { + return true; + } + } + return false; + } + + private void processChildTables(Map tables, + TableConfig parentTable, String dataNodes, Element tableNode) { + + // parse child tables + NodeList childNodeList = tableNode.getChildNodes(); + for (int j = 0; j < childNodeList.getLength(); j++) { + Node theNode = childNodeList.item(j); + if (!theNode.getNodeName().equals("childTable")) { + continue; + } + Element childTbElement = (Element) theNode; + //读取子表信息 + String cdTbName = childTbElement.getAttribute("name").toUpperCase(); + String primaryKey = childTbElement.hasAttribute("primaryKey") ? childTbElement.getAttribute("primaryKey").toUpperCase() : null; + + boolean autoIncrement = false; + if (childTbElement.hasAttribute("autoIncrement")) { + autoIncrement = Boolean.parseBoolean(childTbElement.getAttribute("autoIncrement")); + } + boolean needAddLimit = true; + if (childTbElement.hasAttribute("needAddLimit")) { + needAddLimit = Boolean.parseBoolean(childTbElement.getAttribute("needAddLimit")); + } + String subTables = childTbElement.getAttribute("subTables"); + //子表join键,和对应的parent的键,父子表通过这个关联 + String joinKey = childTbElement.getAttribute("joinKey").toUpperCase(); + String parentKey = childTbElement.getAttribute("parentKey").toUpperCase(); + TableConfig table = new TableConfig(cdTbName, primaryKey, + autoIncrement, needAddLimit, + TableConfig.TYPE_GLOBAL_DEFAULT, dataNodes, + getDbType(dataNodes), null, false, parentTable, true, + joinKey, parentKey, subTables); + + if (tables.containsKey(table.getName())) { + throw new ConfigException("table " + table.getName() + " duplicated!"); + } + tables.put(table.getName(), table); + //对于子表的子表,递归处理 + processChildTables(tables, table, dataNodes, childTbElement); + } + } + + private void checkDataNodeExists(Collection nodes) { + if (nodes == null || nodes.size() < 1) { + return; + } + for (String node : nodes) { + if (!dataNodes.containsKey(node)) { + throw new ConfigException("dataNode '" + node + "' is not found!"); + } + } + } + + /** + * 检查分片表分片规则配置, 目前主要检查分片表分片算法定义与分片dataNode是否匹配
+ * 例如分片表定义如下:
+ * {@code + *
+ * } + *
+ * 分片算法如下:
+ * {@code + * + + 3 + + * } + *
+ * shard table datanode(2) < function count(3) 此时检测为不匹配 + */ + private void checkRuleSuitTable(TableConfig tableConf) { + AbstractPartitionAlgorithm function = tableConf.getRule().getRuleAlgorithm(); + int suitValue = function.suitableFor(tableConf); + switch(suitValue) { + case -1: + // 少节点,给提示并抛异常 + throw new ConfigException("Illegal table conf : table [ " + tableConf.getName() + " ] rule function [ " + + tableConf.getRule().getFunctionName() + " ] partition size : " + tableConf.getRule().getRuleAlgorithm().getPartitionNum() + " > table datanode size : " + + tableConf.getDataNodes().size() + ", please make sure table datanode size = function partition size"); + case 0: + // table datanode size == rule function partition size + break; + case 1: + // 有些节点是多余的,给出warn log + LOGGER.warn("table conf : table [ {} ] rule function [ {} ] partition size : {} < table datanode size : {} , this cause some datanode to be redundant", + new String[]{ + tableConf.getName(), + tableConf.getRule().getFunctionName(), + String.valueOf(tableConf.getRule().getRuleAlgorithm().getPartitionNum()), + String.valueOf(tableConf.getDataNodes().size()) + }); + break; + } + } + + private void loadDataNodes(Element root) { + //读取DataNode分支 + NodeList list = root.getElementsByTagName("dataNode"); + for (int i = 0, n = list.getLength(); i < n; i++) { + Element element = (Element) list.item(i); + String dnNamePre = element.getAttribute("name"); + + String databaseStr = element.getAttribute("database"); + String host = element.getAttribute("dataHost"); + //字符串不为空 + if (empty(dnNamePre) || empty(databaseStr) || empty(host)) { + throw new ConfigException("dataNode " + dnNamePre + " define error ,attribute can't be empty"); + } + //dnNames(name),databases(database),hostStrings(dataHost)都可以配置多个,以',', '$', '-'区分,但是需要保证database的个数*dataHost的个数=name的个数 + //多个dataHost与多个database如果写在一个标签,则每个dataHost拥有所有database + //例如: + //则为:localhost1拥有dn1$0-75,localhost2也拥有dn1$0-75(对应db$76-151) + String[] dnNames = io.mycat.util.SplitUtil.split(dnNamePre, ',', '$', '-'); + String[] databases = io.mycat.util.SplitUtil.split(databaseStr, ',', '$', '-'); + String[] hostStrings = io.mycat.util.SplitUtil.split(host, ',', '$', '-'); + + if (dnNames.length > 1 && dnNames.length != databases.length * hostStrings.length) { + throw new ConfigException("dataNode " + dnNamePre + + " define error ,dnNames.length must be=databases.length*hostStrings.length"); + } + if (dnNames.length > 1) { + + List mhdList = mergerHostDatabase(hostStrings, databases); + for (int k = 0; k < dnNames.length; k++) { + String[] hd = mhdList.get(k); + String dnName = dnNames[k]; + String databaseName = hd[1]; + String hostName = hd[0]; + createDataNode(dnName, databaseName, hostName); + } + + } else { + createDataNode(dnNamePre, databaseStr, host); + } + + } + } + + /** + * 匹配DataHost和Database,每个DataHost拥有每个Database名字 + * @param hostStrings + * @param databases + * @return + */ + private List mergerHostDatabase(String[] hostStrings, String[] databases) { + List mhdList = new ArrayList<>(); + for (int i = 0; i < hostStrings.length; i++) { + String hostString = hostStrings[i]; + for (int i1 = 0; i1 < databases.length; i1++) { + String database = databases[i1]; + String[] hd = new String[2]; + hd[0] = hostString; + hd[1] = database; + mhdList.add(hd); + } + } + return mhdList; + } + + private void createDataNode(String dnName, String database, String host) { + + DataNodeConfig conf = new DataNodeConfig(dnName, database, host); + if (dataNodes.containsKey(conf.getName())) { + throw new ConfigException("dataNode " + conf.getName() + " duplicated!"); + } + + if (!dataHosts.containsKey(host)) { + throw new ConfigException("dataNode " + dnName + " reference dataHost:" + host + " not exists!"); + } + + dataHosts.get(host).addDataNode(conf.getName()); + dataNodes.put(conf.getName(), conf); + } + + private boolean empty(String dnName) { + return dnName == null || dnName.length() == 0; + } + + private DBHostConfig createDBHostConf(String dataHost, Element node, + String dbType, String dbDriver, int maxCon, int minCon, String filters, long logTime) { + + String nodeHost = node.getAttribute("host"); + String nodeUrl = node.getAttribute("url"); + String user = node.getAttribute("user"); + String password = node.getAttribute("password"); + String usingDecrypt = node.getAttribute("usingDecrypt"); + String passwordEncryty= DecryptUtil.DBHostDecrypt(usingDecrypt, nodeHost, user, password); + + String weightStr = node.getAttribute("weight"); + int weight = "".equals(weightStr) ? PhysicalDBPool.WEIGHT : Integer.parseInt(weightStr) ; + + String ip = null; + int port = 0; + if (empty(nodeHost) || empty(nodeUrl) || empty(user)) { + throw new ConfigException( + "dataHost " + + dataHost + + " define error,some attributes of this element is empty: " + + nodeHost); + } + if ("native".equalsIgnoreCase(dbDriver)) { + int colonIndex = nodeUrl.indexOf(':'); + ip = nodeUrl.substring(0, colonIndex).trim(); + port = Integer.parseInt(nodeUrl.substring(colonIndex + 1).trim()); + } else { + URI url; + try { + url = new URI(nodeUrl.substring(5)); + } catch (Exception e) { + throw new ConfigException("invalid jdbc url " + nodeUrl + " of " + dataHost); + } + ip = url.getHost(); + port = url.getPort(); + } + + DBHostConfig conf = new DBHostConfig(nodeHost, ip, port, nodeUrl, user, passwordEncryty,password); + conf.setDbType(dbType); + conf.setMaxCon(maxCon); + conf.setMinCon(minCon); + conf.setFilters(filters); + conf.setLogTime(logTime); + conf.setWeight(weight); //新增权重 + return conf; + } + + private void loadDataHosts(Element root) { + NodeList list = root.getElementsByTagName("dataHost"); + for (int i = 0, n = list.getLength(); i < n; ++i) { + + Element element = (Element) list.item(i); + String name = element.getAttribute("name"); + //判断是否重复 + if (dataHosts.containsKey(name)) { + throw new ConfigException("dataHost name " + name + "duplicated!"); + } + //读取最大连接数 + int maxCon = Integer.parseInt(element.getAttribute("maxCon")); + //读取最小连接数 + int minCon = Integer.parseInt(element.getAttribute("minCon")); + /** + * 读取负载均衡配置 + * 1. balance="0", 不开启分离机制,所有读操作都发送到当前可用的 writeHost 上。 + * 2. balance="1",全部的 readHost 和 stand by writeHost 参不 select 的负载均衡 + * 3. balance="2",所有读操作都随机的在 writeHost、readhost 上分发。 + * 4. balance="3",所有读请求随机的分发到 wiriterHost 对应的 readhost 执行,writerHost 不负担读压力 + */ + int balance = Integer.parseInt(element.getAttribute("balance")); + /** + * 读取切换类型 + * -1 表示不自动切换 + * 1 默认值,自动切换 + * 2 基于MySQL主从同步的状态决定是否切换 + * 心跳询句为 show slave status + * 3 基于 MySQL galary cluster 的切换机制 + */ + String switchTypeStr = element.getAttribute("switchType"); + int switchType = switchTypeStr.equals("") ? -1 : Integer.parseInt(switchTypeStr); + //读取从延迟界限 + String slaveThresholdStr = element.getAttribute("slaveThreshold"); + int slaveThreshold = slaveThresholdStr.equals("") ? -1 : Integer.parseInt(slaveThresholdStr); + + //如果 tempReadHostAvailable 设置大于 0 则表示写主机如果挂掉, 临时的读服务依然可用 + String tempReadHostAvailableStr = element.getAttribute("tempReadHostAvailable"); + boolean tempReadHostAvailable = !tempReadHostAvailableStr.equals("") && Integer.parseInt(tempReadHostAvailableStr) > 0; + /** + * 读取 写类型 + * 这里只支持 0 - 所有写操作仅配置的第一个 writeHost + */ + String writeTypStr = element.getAttribute("writeType"); + int writeType = "".equals(writeTypStr) ? PhysicalDBPool.WRITE_ONLYONE_NODE : Integer.parseInt(writeTypStr); + + + String dbDriver = element.getAttribute("dbDriver"); + String dbType = element.getAttribute("dbType"); + String filters = element.getAttribute("filters"); + String logTimeStr = element.getAttribute("logTime"); + String slaveIDs = element.getAttribute("slaveIDs"); + long logTime = "".equals(logTimeStr) ? PhysicalDBPool.LONG_TIME : Long.parseLong(logTimeStr) ; + //读取心跳语句 + String heartbeatSQL = element.getElementsByTagName("heartbeat").item(0).getTextContent(); + //读取 初始化sql配置,用于oracle + NodeList connectionInitSqlList = element.getElementsByTagName("connectionInitSql"); + String initConSQL = null; + if (connectionInitSqlList.getLength() > 0) { + initConSQL = connectionInitSqlList.item(0).getTextContent(); + } + //读取writeHost + NodeList writeNodes = element.getElementsByTagName("writeHost"); + DBHostConfig[] writeDbConfs = new DBHostConfig[writeNodes.getLength()]; + Map readHostsMap = new HashMap(2); + Set writeHostNameSet = new HashSet(writeNodes.getLength()); + for (int w = 0; w < writeDbConfs.length; w++) { + Element writeNode = (Element) writeNodes.item(w); + writeDbConfs[w] = createDBHostConf(name, writeNode, dbType, dbDriver, maxCon, minCon,filters,logTime); + if(writeHostNameSet.contains(writeDbConfs[w].getHostName())) { + throw new ConfigException("writeHost " + writeDbConfs[w].getHostName() + " duplicated!"); + } else { + writeHostNameSet.add(writeDbConfs[w].getHostName()); + } + NodeList readNodes = writeNode.getElementsByTagName("readHost"); + //读取对应的每一个readHost + if (readNodes.getLength() != 0) { + DBHostConfig[] readDbConfs = new DBHostConfig[readNodes.getLength()]; + Set readHostNameSet = new HashSet(readNodes.getLength()); + for (int r = 0; r < readDbConfs.length; r++) { + Element readNode = (Element) readNodes.item(r); + readDbConfs[r] = createDBHostConf(name, readNode, dbType, dbDriver, maxCon, minCon,filters, logTime); + if(readHostNameSet.contains(readDbConfs[r].getHostName())) { + throw new ConfigException("readHost " + readDbConfs[r].getHostName() + " duplicated!"); + } else { + readHostNameSet.add(readDbConfs[r].getHostName()); + } + } + readHostsMap.put(w, readDbConfs); + } + } + + DataHostConfig hostConf = new DataHostConfig(name, dbType, dbDriver, + writeDbConfs, readHostsMap, switchType, slaveThreshold, tempReadHostAvailable); + + hostConf.setMaxCon(maxCon); + hostConf.setMinCon(minCon); + hostConf.setBalance(balance); + hostConf.setWriteType(writeType); + hostConf.setHearbeatSQL(heartbeatSQL); + hostConf.setConnectionInitSql(initConSQL); + hostConf.setFilters(filters); + hostConf.setLogTime(logTime); + hostConf.setSlaveIDs(slaveIDs); + dataHosts.put(hostConf.getName(), hostConf); + } + } + +} diff --git a/src/main/java/io/mycat/config/loader/xml/XMLServerLoader.java b/src/main/java/io/mycat/config/loader/xml/XMLServerLoader.java new file mode 100644 index 000000000..e2b926214 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/xml/XMLServerLoader.java @@ -0,0 +1,335 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.config.loader.xml; + +import java.io.IOException; +import java.io.InputStream; +import java.lang.reflect.InvocationTargetException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.regex.Pattern; + +import io.mycat.config.Versions; +import org.w3c.dom.Element; +import org.w3c.dom.Node; +import org.w3c.dom.NodeList; + +import com.alibaba.druid.wall.WallConfig; + +import io.mycat.config.model.ClusterConfig; +import io.mycat.config.model.FirewallConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.config.model.UserConfig; +import io.mycat.config.model.UserPrivilegesConfig; +import io.mycat.config.util.ConfigException; +import io.mycat.config.util.ConfigUtil; +import io.mycat.config.util.ParameterMapping; +import io.mycat.util.DecryptUtil; +import io.mycat.util.SplitUtil; + +/** + * @author mycat + */ +@SuppressWarnings("unchecked") +public class XMLServerLoader { + private final SystemConfig system; + private final Map users; + private final FirewallConfig firewall; + private ClusterConfig cluster; + + public XMLServerLoader() { + this.system = new SystemConfig(); + this.users = new HashMap(); + this.firewall = new FirewallConfig(); + this.load(); + } + + public SystemConfig getSystem() { + return system; + } + + public Map getUsers() { + return (Map) (users.isEmpty() ? Collections.emptyMap() : Collections.unmodifiableMap(users)); + } + + public FirewallConfig getFirewall() { + return firewall; + } + + public ClusterConfig getCluster() { + return cluster; + } + + private void load() { + //读取server.xml配置 + InputStream dtd = null; + InputStream xml = null; + try { + dtd = XMLServerLoader.class.getResourceAsStream("/server.dtd"); + xml = XMLServerLoader.class.getResourceAsStream("/server.xml"); + Element root = ConfigUtil.getDocument(dtd, xml).getDocumentElement(); + + //加载System标签 + loadSystem(root); + + //加载User标签 + loadUsers(root); + + //加载集群配置 + this.cluster = new ClusterConfig(root, system.getServerPort()); + + //加载全局SQL防火墙 + loadFirewall(root); + } catch (ConfigException e) { + throw e; + } catch (Exception e) { + throw new ConfigException(e); + } finally { + if (dtd != null) { + try { + dtd.close(); + } catch (IOException e) { + } + } + if (xml != null) { + try { + xml.close(); + } catch (IOException e) { + } + } + } + } + + /** + * 初始载入配置获取防火墙配置,配置防火墙方法之一,一共有两处,另一处: + * @see FirewallConfig + * + * @modification 修改增加网段白名单 + * @date 2016/12/8 + * @modifiedBy Hash Zhang + */ + private void loadFirewall(Element root) throws IllegalAccessException, InvocationTargetException { + NodeList list = root.getElementsByTagName("host"); + Map> whitehost = new HashMap<>(); + Map> whitehostMask = new HashMap<>(); + + for (int i = 0, n = list.getLength(); i < n; i++) { + Node node = list.item(i); + if (node instanceof Element) { + Element e = (Element) node; + String host = e.getAttribute("host").trim(); + String userStr = e.getAttribute("user").trim(); + if (this.firewall.existsHost(host)) { + throw new ConfigException("host duplicated : " + host); + } + String []users = userStr.split(","); + List userConfigs = new ArrayList(); + for(String user : users){ + UserConfig uc = this.users.get(user); + if (null == uc) { + throw new ConfigException("[user: " + user + "] doesn't exist in [host: " + host + "]"); + } + if (uc.getSchemas() == null || uc.getSchemas().size() == 0) { + throw new ConfigException("[host: " + host + "] contains one root privileges user: " + user); + } + userConfigs.add(uc); + } + if(host.contains("*")||host.contains("%")){ + whitehostMask.put(FirewallConfig.getMaskPattern(host),userConfigs); + }else{ + whitehost.put(host, userConfigs); + } + } + } + + firewall.setWhitehost(whitehost); + firewall.setWhitehostMask(whitehostMask); + + WallConfig wallConfig = new WallConfig(); + NodeList blacklist = root.getElementsByTagName("blacklist"); + for (int i = 0, n = blacklist.getLength(); i < n; i++) { + Node node = blacklist.item(i); + if (node instanceof Element) { + Element e = (Element) node; + String check = e.getAttribute("check"); + if (null != check) { + firewall.setCheck(Boolean.parseBoolean(check)); + } + + Map props = ConfigUtil.loadElements((Element) node); + ParameterMapping.mapping(wallConfig, props); + } + } + firewall.setWallConfig(wallConfig); + firewall.init(); + + } + + private void loadUsers(Element root) { + NodeList list = root.getElementsByTagName("user"); + for (int i = 0, n = list.getLength(); i < n; i++) { + Node node = list.item(i); + if (node instanceof Element) { + Element e = (Element) node; + String name = e.getAttribute("name"); + //huangyiming add + String defaultAccount = e.getAttribute("defaultAccount"); + + UserConfig user = new UserConfig(); + Map props = ConfigUtil.loadElements(e); + String password = (String)props.get("password"); + String usingDecrypt = (String)props.get("usingDecrypt"); + String passwordDecrypt = DecryptUtil.mycatDecrypt(usingDecrypt,name,password); + user.setName(name); + user.setDefaultAccount(Boolean.parseBoolean(defaultAccount)); + user.setPassword(passwordDecrypt); + user.setEncryptPassword(password); + + String benchmark = (String) props.get("benchmark"); + if(null != benchmark) { + user.setBenchmark( Integer.parseInt(benchmark) ); + } + + String readOnly = (String) props.get("readOnly"); + if (null != readOnly) { + user.setReadOnly(Boolean.parseBoolean(readOnly)); + } + + + String schemas = (String) props.get("schemas"); + if (schemas != null) { + String[] strArray = SplitUtil.split(schemas, ',', true); + user.setSchemas(new HashSet(Arrays.asList(strArray))); + } + + //加载用户 DML 权限 + loadPrivileges(user, e); + + if (users.containsKey(name)) { + throw new ConfigException("user " + name + " duplicated!"); + } + users.put(name, user); + } + } + } + + private void loadPrivileges(UserConfig userConfig, Element node) { + + UserPrivilegesConfig privilegesConfig = new UserPrivilegesConfig(); + + NodeList privilegesNodes = node.getElementsByTagName("privileges"); + int privilegesNodesLength = privilegesNodes.getLength(); + for (int i = 0; i < privilegesNodesLength; ++i) { + Element privilegesNode = (Element) privilegesNodes.item(i); + String check = privilegesNode.getAttribute("check"); + if (null != check) { + privilegesConfig.setCheck(Boolean.valueOf(check)); + } + + + NodeList schemaNodes = privilegesNode.getElementsByTagName("schema"); + int schemaNodeLength = schemaNodes.getLength(); + + for (int j = 0; j < schemaNodeLength; j++ ) { + Element schemaNode = (Element) schemaNodes.item(j); + String name1 = schemaNode.getAttribute("name"); + String dml1 = schemaNode.getAttribute("dml"); + + int[] dml1Array = new int[ dml1.length() ]; + for(int offset1 = 0; offset1 < dml1.length(); offset1++ ) { + dml1Array[offset1] = Character.getNumericValue( dml1.charAt( offset1 ) ); + } + + UserPrivilegesConfig.SchemaPrivilege schemaPrivilege = new UserPrivilegesConfig.SchemaPrivilege(); + schemaPrivilege.setName( name1 ); + schemaPrivilege.setDml( dml1Array ); + + NodeList tableNodes = schemaNode.getElementsByTagName("table"); + int tableNodeLength = tableNodes.getLength(); + for (int z = 0; z < tableNodeLength; z++) { + + UserPrivilegesConfig.TablePrivilege tablePrivilege = new UserPrivilegesConfig.TablePrivilege(); + + Element tableNode = (Element) tableNodes.item(z); + String name2 = tableNode.getAttribute("name"); + String dml2 = tableNode.getAttribute("dml"); + + int[] dml2Array = new int[ dml2.length() ]; + for(int offset2 = 0; offset2 < dml2.length(); offset2++ ) { + dml2Array[offset2] = Character.getNumericValue( dml2.charAt( offset2 ) ); + } + + tablePrivilege.setName( name2 ); + tablePrivilege.setDml( dml2Array ); + + schemaPrivilege.addTablePrivilege(name2, tablePrivilege); + } + + privilegesConfig.addSchemaPrivilege(name1, schemaPrivilege); + } + } + + userConfig.setPrivilegesConfig(privilegesConfig); + } + + private void loadSystem(Element root) throws IllegalAccessException, InvocationTargetException { + NodeList list = root.getElementsByTagName("system"); + for (int i = 0, n = list.getLength(); i < n; i++) { + Node node = list.item(i); + if (node instanceof Element) { + Map props = ConfigUtil.loadElements((Element) node); + ParameterMapping.mapping(system, props); + } + } + + if (system.getFakeMySQLVersion() != null) { + boolean validVersion = false; + String majorMySQLVersion = system.getFakeMySQLVersion(); + /* + * 注意!!! 目前MySQL官方主版本号仍然是5.x, 以后万一前面的大版本号变成2位数字, + * 比如 10.x...,下面获取主版本的代码要做修改 + */ + majorMySQLVersion = majorMySQLVersion.substring(0, majorMySQLVersion.indexOf(".", 2)); + for (String ver : SystemConfig.MySQLVersions) { + // 这里只是比较mysql前面的大版本号 + if (majorMySQLVersion.equals(ver)) { + validVersion = true; + } + } + + if (validVersion) { + Versions.setServerVersion(system.getFakeMySQLVersion()); + } else { + throw new ConfigException("The specified MySQL Version (" + system.getFakeMySQLVersion() + + ") is not valid."); + } + } + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/comm/NotiflyService.java b/src/main/java/io/mycat/config/loader/zkprocess/comm/NotiflyService.java new file mode 100644 index 000000000..e34f0eda1 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/comm/NotiflyService.java @@ -0,0 +1,19 @@ +package io.mycat.config.loader.zkprocess.comm; + +/** + * 通过接口 + * @author liujun + * + * @date 2015年2月4日 + * @vsersion 0.0.1 + */ +public interface NotiflyService { + + /** + * 进行通知接口 + * @throws Exception 异常操作 + * @return true 通知更新成功,false ,更新失败 + */ + public boolean notiflyProcess() throws Exception; + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/comm/ZkConfig.java b/src/main/java/io/mycat/config/loader/zkprocess/comm/ZkConfig.java new file mode 100644 index 000000000..13a25f568 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/comm/ZkConfig.java @@ -0,0 +1,135 @@ +package io.mycat.config.loader.zkprocess.comm; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Properties; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.base.Strings; + +import io.mycat.config.loader.zkprocess.zktoxml.ZktoXmlMain; + + +/** + * 进行zk的配制信息 +* 源文件名:ZkConfig.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月15日 +* 修改作者:liujun +* 修改日期:2016年9月15日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public class ZkConfig { + /** + * 日志信息 + * @字段说明 LOGGER + */ + private static final Logger LOGGER = LoggerFactory.getLogger(ZkConfig.class); + + private static final String ZK_CONFIG_FILE_NAME = "/myid.properties"; + + private ZkConfig() { + } + + /** + * 实例对象信息 + * @字段说明 ZKCFGINSTANCE + */ + private static ZkConfig ZKCFGINSTANCE = new ZkConfig(); + + + /** + * myid的属性文件信息 + * @字段说明 ZKPROPERTIES + */ + private static Properties ZKPROPERTIES = null; + + static { + ZKPROPERTIES = LoadMyidPropersites(); + } + + + public String getZkURL() + { + return ZKPROPERTIES==null?null:ZKPROPERTIES.getProperty(ZkParamCfg.ZK_CFG_URL.getKey()) ; + } + public void initZk() + { + try { + if (Boolean.parseBoolean(ZKPROPERTIES.getProperty(ZkParamCfg.ZK_CFG_FLAG.getKey()))) { + ZktoXmlMain.loadZktoFile(); + } + } catch (Exception e) { + LOGGER.error("error:",e); + } + } + + /** + * 获得实例对象信息 + * 方法描述 + * @return + * @创建日期 2016年9月15日 + */ + public static ZkConfig getInstance() { + + return ZKCFGINSTANCE; + } + + /** + * 获取myid属性文件中的属性值 + * 方法描述 + * @param param 参数信息 + * @return + * @创建日期 2016年9月15日 + */ + public String getValue(ZkParamCfg param) { + if (null != param) { + return ZKPROPERTIES.getProperty(param.getKey()); + } + + return null; + } + + /** + * 加载myid配制文件信息 + * 方法描述 + * @return + * @创建日期 2016年9月15日 + */ + private static Properties LoadMyidPropersites() { + Properties pros = new Properties(); + + try (InputStream configIS = ZkConfig.class.getResourceAsStream(ZK_CONFIG_FILE_NAME)) { + if (configIS == null) { + return null; + } + + pros.load(configIS); + } catch (IOException e) { + LOGGER.error("ZkConfig LoadMyidPropersites error:", e); + throw new RuntimeException("can't find myid properties file : " + ZK_CONFIG_FILE_NAME); + } + + // validate + String zkURL = pros.getProperty(ZkParamCfg.ZK_CFG_URL.getKey()); + String myid = pros.getProperty(ZkParamCfg.ZK_CFG_MYID.getKey()); + + String clusterId = pros.getProperty(ZkParamCfg.ZK_CFG_CLUSTERID.getKey()); + + if (Strings.isNullOrEmpty(clusterId) ||Strings.isNullOrEmpty(zkURL) || Strings.isNullOrEmpty(myid)) { + throw new RuntimeException("clusterId and zkURL and myid must not be null or empty!"); + } + return pros; + + } + + public static void main(String[] args) { + String zk = ZkConfig.getInstance().getValue(ZkParamCfg.ZK_CFG_CLUSTERID); + System.out.println(zk); + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/comm/ZkParamCfg.java b/src/main/java/io/mycat/config/loader/zkprocess/comm/ZkParamCfg.java new file mode 100644 index 000000000..ec07894ed --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/comm/ZkParamCfg.java @@ -0,0 +1,75 @@ +package io.mycat.config.loader.zkprocess.comm; + +/** + * 当前zk的配制参数信息 +* 源文件名:ZkParamCfg.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月17日 +* 修改作者:liujun +* 修改日期:2016年9月17日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public enum ZkParamCfg { + + /** + * zk是否启用标识 + * @字段说明 ZK_CFG_OPEN + */ + ZK_CFG_FLAG("loadZk"), + + /** + * zk配制的url地址信息 + * @字段说明 ZK_CFG_URL + */ + ZK_CFG_URL("zkURL"), + + /** + * 集群的id + * @字段说明 ZK_CFG_CLUSTERID + */ + ZK_CFG_CLUSTERID("clusterId"), + + ZK_CFG_CLUSTER_SIZE("clusterSize"), + + /** + * 当前mycat节点的id + * @字段说明 zk_CFG_MYID + */ + ZK_CFG_MYID("myid"), + + + MYCAT_SERVER_TYPE("type"), + + MYCAT_BOOSTER_DATAHOSTS("boosterDataHosts"), + + /** + * 集群中所有节点的名称信息 + * @字段说明 ZK_CFG_CLUSTER_NODES + */ + ZK_CFG_CLUSTER_NODES("clusterNodes"), + + /** + * 集群中所有节点的名称信息的分隔符 + * @字段说明 ZK_CFG_CLUSTER_NODES + */ + ZK_CFG_CLUSTER_NODES_SEPARATE(","), + + ; + + private ZkParamCfg(String key) { + this.key = key; + } + + private String key; + + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/comm/ZookeeperProcessListen.java b/src/main/java/io/mycat/config/loader/zkprocess/comm/ZookeeperProcessListen.java new file mode 100644 index 000000000..0f1cf911a --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/comm/ZookeeperProcessListen.java @@ -0,0 +1,185 @@ +package io.mycat.config.loader.zkprocess.comm; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.config.loader.console.ZookeeperPath; +import io.mycat.config.loader.zkprocess.console.ZkNofiflyCfg; + +/** + * 进行zookeeper操作的监控器器父类信息 + * + * @author liujun + * + * @date 2015年2月4日 + * @vsersion 0.0.1 + */ +public class ZookeeperProcessListen { + + /** + * 日志 + * @字段说明 LOGGER + */ + private static final Logger lOG = LoggerFactory.getLogger(ZookeeperProcessListen.class); + + /** + * 所有更新缓存操作的集合 + */ + private Map listenCache = new HashMap(); + + /** + * 监控的路径信息 + * @字段说明 watchPath + */ + private Map> watchPathMap = new HashMap<>(); + + /** + * 监控路径对应的缓存key的对应表 + * @字段说明 watchToListen + */ + private Map watchToListenMap = new HashMap<>(); + + /** + * 基本路径信息 + * @字段说明 basePath + */ + private String basePath; + + public String getBasePath() { + return basePath; + } + + public void setBasePath(String basePath) { + this.basePath = basePath; + } + + /** + * 添加缓存更新操作 + * + * @param key + * @param cacheNotiflySercie + */ + public void addListen(String key, NotiflyService cacheNotiflySercie) { + listenCache.put(key, cacheNotiflySercie); + } + + /** + * 专门针对zk设置的监控路径 + * 方法描述 + * @param key + * @param path + * @param cacheNotiflySercie + * @创建日期 2016年9月19日 + */ + public void watchPath(String key, String path) { + Set watchPaths = watchPathMap.get(key); + + if (null == watchPaths) { + watchPaths = new HashSet<>(); + } + + watchPaths.add(path); + watchPathMap.put(key, watchPaths); + } + + /** + * 进行监控路径的转换 + * 方法描述 + * @创建日期 2016年9月20日 + */ + public void watchToParse() { + if (null != watchPathMap && !watchPathMap.isEmpty()) { + for (Entry> watchPathEntry : watchPathMap.entrySet()) { + for (String path : watchPathEntry.getValue()) { + watchToListenMap.put(watchPathEntry.getKey() + ZookeeperPath.ZK_SEPARATOR.getKey() + path, + watchPathEntry.getKey()); + } + } + } + } + + /** + * 返回路径集合 + * 方法描述 + * @return + * @创建日期 2016年9月19日 + */ + public Set getWatchPath() { + + if (watchToListenMap.isEmpty()) { + this.watchToParse(); + } + + return watchToListenMap.keySet(); + } + + /** + * 进行缓存更新通知 + * + * @param key + * 缓存模块的key + * @return true 当前缓存模块数据更新成功,false,当前缓存数据更新失败 + */ + public boolean notifly(String key) { + boolean result = false; + + if (null != key && !"".equals(key)) { + + // 进行配制加载所有 + if (ZkNofiflyCfg.ZK_NOTIFLY_LOAD_ALL.getKey().equals(key)) { + this.notiflyAll(); + } + // 如果是具体的单独更新,则进行单业务的业务刷新 + else { + String watchListen = watchToListenMap.get(key); + + if (null != watchListen) { + // 取得具体的业务监听信息 + NotiflyService cacheService = listenCache.get(watchListen); + + if (null != cacheService) { + try { + result = cacheService.notiflyProcess(); + } catch (Exception e) { + lOG.error("ZookeeperProcessListen notifly key :" + key + " error:Exception info:", e); + } + } + } + } + } + + return result; + } + + /** + * 进行通知所有缓存进行更新操作 + */ + private void notiflyAll() { + + Iterator> notiflyIter = listenCache.entrySet().iterator(); + + Entry item = null; + + while (notiflyIter.hasNext()) { + item = notiflyIter.next(); + + // 进行缓存更新通知操作 + if (null != item.getValue()) { + try { + item.getValue().notiflyProcess(); + } catch (Exception e) { + lOG.error("ZookeeperProcessListen notiflyAll key :" + item.getKey() + ";value " + item.getValue() + + ";error:Exception info:", e); + } + } + } + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/console/ParseParamEnum.java b/src/main/java/io/mycat/config/loader/zkprocess/console/ParseParamEnum.java new file mode 100644 index 000000000..cc898518f --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/console/ParseParamEnum.java @@ -0,0 +1,42 @@ +package io.mycat.config.loader.zkprocess.console; + +/** + * 转换的流程参数配制信息 +* 源文件名:ParseParamEnum.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月18日 +* 修改作者:liujun +* 修改日期:2016年9月18日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public enum ParseParamEnum { + + /** + * mapfile配制的参数名 + * @字段说明 ZK_PATH_RULE_MAPFILE_NAME + */ + ZK_PATH_RULE_MAPFILE_NAME("mapFile"), + + ; + + /** + * 配制的key的信息 + * @字段说明 key + */ + private String key; + + private ParseParamEnum(String key) { + this.key = key; + } + + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/console/ZkNofiflyCfg.java b/src/main/java/io/mycat/config/loader/zkprocess/console/ZkNofiflyCfg.java new file mode 100644 index 000000000..bf0812698 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/console/ZkNofiflyCfg.java @@ -0,0 +1,42 @@ +package io.mycat.config.loader.zkprocess.console; + +/** + * 进行zk通知的参数配制信息 +* 源文件名:ZkNofiflyCfg.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月15日 +* 修改作者:liujun +* 修改日期:2016年9月15日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public enum ZkNofiflyCfg { + + /** + * 通知更新所有节点的信息 + * @字段说明 ZK_NOTIFLY_LOAD_ALL + */ + ZK_NOTIFLY_LOAD_ALL("all"), + + ; + + /** + * 配制的key的信息 + * @字段说明 key + */ + private String key; + + private ZkNofiflyCfg(String key) { + this.key = key; + } + + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/entity/Named.java b/src/main/java/io/mycat/config/loader/zkprocess/entity/Named.java new file mode 100644 index 000000000..49e7506be --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/entity/Named.java @@ -0,0 +1,14 @@ +package io.mycat.config.loader.zkprocess.entity; + +/** + * presentation a object have a filed name. + */ +public interface Named { + /** + * 获得属性的名称 + * 方法描述 + * @return + * @创建日期 2016年9月15日 + */ + String getName(); +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/entity/Propertied.java b/src/main/java/io/mycat/config/loader/zkprocess/entity/Propertied.java new file mode 100644 index 000000000..f6c3f61cd --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/entity/Propertied.java @@ -0,0 +1,8 @@ +package io.mycat.config.loader.zkprocess.entity; + +/** + * Created by lion on 12/8/15. + */ +public interface Propertied { + void addProperty(Property property); +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/entity/Property.java b/src/main/java/io/mycat/config/loader/zkprocess/entity/Property.java new file mode 100644 index 000000000..2461cfb06 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/entity/Property.java @@ -0,0 +1,70 @@ +package io.mycat.config.loader.zkprocess.entity; + +import java.util.Objects; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlType; +import javax.xml.bind.annotation.XmlValue; + +/** + * 键值对信息 +* 源文件名:Property.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月16日 +* 修改作者:liujun +* 修改日期:2016年9月16日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +@XmlAccessorType(XmlAccessType.FIELD) +@XmlType(name = "Property") +public class Property implements Named { + + @XmlValue + protected String value; + @XmlAttribute(name = "name") + protected String name; + + public String getValue() { + return value; + } + + public Property setValue(String value) { + this.value = value; + return this; + } + + public String getName() { + return name; + } + + public Property setName(String value) { + this.name = value; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Property property = (Property) o; + return value.equals(property.value) && name.equals(property.name); + } + + @Override + public int hashCode() { + return Objects.hash(value, name); + } + + @Override + public String toString() { + return "Property{" + "value='" + value + '\'' + ", name='" + name + '\'' + '}'; + } +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/entity/Rules.java b/src/main/java/io/mycat/config/loader/zkprocess/entity/Rules.java new file mode 100644 index 000000000..c49fc8f9a --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/entity/Rules.java @@ -0,0 +1,63 @@ +package io.mycat.config.loader.zkprocess.entity; + +import java.util.ArrayList; +import java.util.List; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +import io.mycat.config.loader.zkprocess.entity.rule.function.Function; +import io.mycat.config.loader.zkprocess.entity.rule.tablerule.TableRule; + +@XmlAccessorType(XmlAccessType.FIELD) +@XmlRootElement(namespace = "http://io.mycat/", name = "rule") +public class Rules { + + /** + * 表的路由配制信息 + * @字段说明 tableRule + */ + protected List tableRule; + + /** + * 指定的方法信息 + * @字段说明 function + */ + protected List function; + + public List getTableRule() { + if (this.tableRule == null) { + tableRule = new ArrayList<>(); + } + return tableRule; + } + + public void setTableRule(List tableRule) { + this.tableRule = tableRule; + } + + public List getFunction() { + if (this.function == null) { + function = new ArrayList<>(); + } + return function; + } + + public void setFunction(List function) { + this.function = function; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("Rules [tableRule="); + builder.append(tableRule); + builder.append(", function="); + builder.append(function); + builder.append("]"); + return builder.toString(); + } + + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/entity/Schemas.java b/src/main/java/io/mycat/config/loader/zkprocess/entity/Schemas.java new file mode 100644 index 000000000..9f0792ca2 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/entity/Schemas.java @@ -0,0 +1,81 @@ +package io.mycat.config.loader.zkprocess.entity; + +import java.util.ArrayList; +import java.util.List; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +import io.mycat.config.loader.zkprocess.entity.schema.datahost.DataHost; +import io.mycat.config.loader.zkprocess.entity.schema.datanode.DataNode; +import io.mycat.config.loader.zkprocess.entity.schema.schema.Schema; + +@XmlAccessorType(XmlAccessType.FIELD) +@XmlRootElement(namespace = "http://io.mycat/", name = "schema") +public class Schemas { + /** + * 配制的逻辑表信息 + * @字段说明 schema + */ + private List schema; + + /** + * 配制的表对应的数据库信息 + * @字段说明 dataNode + */ + private List dataNode; + + /** + * 用于指定数据信息 + * @字段说明 dataHost + */ + private List dataHost; + + public List getSchema() { + if (this.schema == null) { + schema = new ArrayList<>(); + } + return schema; + } + + public void setSchema(List schema) { + this.schema = schema; + } + + public List getDataNode() { + if (this.dataNode == null) { + dataNode = new ArrayList<>(); + } + return dataNode; + } + + public void setDataNode(List dataNode) { + this.dataNode = dataNode; + } + + public List getDataHost() { + if (this.dataHost == null) { + dataHost = new ArrayList<>(); + } + return dataHost; + } + + public void setDataHost(List dataHost) { + this.dataHost = dataHost; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("Schemas [schema="); + builder.append(schema); + builder.append(", dataNode="); + builder.append(dataNode); + builder.append(", dataHost="); + builder.append(dataHost); + builder.append("]"); + return builder.toString(); + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/entity/Server.java b/src/main/java/io/mycat/config/loader/zkprocess/entity/Server.java new file mode 100644 index 000000000..318e4472f --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/entity/Server.java @@ -0,0 +1,50 @@ +package io.mycat.config.loader.zkprocess.entity; + +import java.util.List; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; + +import io.mycat.config.loader.zkprocess.entity.server.System; +import io.mycat.config.loader.zkprocess.entity.server.user.User; + +@XmlAccessorType(XmlAccessType.FIELD) +@XmlRootElement(namespace = "http://io.mycat/", name = "server") +public class Server { + + @XmlElement(required = true) + protected System system; + + @XmlElement(required = true) + protected List user; + + public System getSystem() { + return system; + } + + public void setSystem(System value) { + this.system = value; + } + + public List getUser() { + return user; + } + + public void setUser(List user) { + this.user = user; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("Server [system="); + builder.append(system); + builder.append(", user="); + builder.append(user); + builder.append("]"); + return builder.toString(); + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/entity/cache/CacheInfo.java b/src/main/java/io/mycat/config/loader/zkprocess/entity/cache/CacheInfo.java new file mode 100644 index 000000000..3a9bc7a61 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/entity/cache/CacheInfo.java @@ -0,0 +1,168 @@ +package io.mycat.config.loader.zkprocess.entity.cache; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlRootElement; + +/** + * 缓存配制信息 +* 源文件名:CacheInfo.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月19日 +* 修改作者:liujun +* 修改日期:2016年9月19日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +@XmlAccessorType(XmlAccessType.NONE) +@XmlRootElement(name = "defaultCache") +public class CacheInfo { + + /** + * maxElementsInMemory:在内存中最大的对象数量 + * @字段说明 maxEntriesLocalHeap + */ + @XmlAttribute + private int maxElementsInMemory; + + /** + * eternal:设置元素是否永久的,如果为永久,则timeout忽略 + * @字段说明 maxBytesLocalDisk + */ + @XmlAttribute + private boolean eternal; + + /** + * overflowToDisk:是否当memory中的数量达到限制后,保存到Disk + * @字段说明 updateCheck + */ + @XmlAttribute + private boolean overflowToDisk; + + /** + * diskSpoolBufferSizeMB:这个参数设置DiskStore(磁盘缓存)的缓存区大小。默认是30MB。每个Cache都应该有自己的一个缓冲区。 + * @字段说明 diskSpoolBufferSizeMB + */ + @XmlAttribute + private int diskSpoolBufferSizeMB; + + /** + * maxElementsOnDisk:硬盘最大缓存个数。 + * @字段说明 maxElementsOnDisk + */ + @XmlAttribute + private int maxElementsOnDisk; + + /** + * diskPersistent:是否缓存虚拟机重启期数据 + * @字段说明 diskPersistent + */ + @XmlAttribute + private boolean diskPersistent; + + /** + * diskExpiryThreadIntervalSeconds:磁盘失效线程运行时间间隔,默认是120秒。 + * @字段说明 diskExpiryThreadIntervalSeconds + */ + @XmlAttribute + private int diskExpiryThreadIntervalSeconds; + + /** + * memoryStoreEvictionPolicy:当达到maxElementsInMemory限制时, + * Ehcache将会根据指定的策略去清理内存。默认策略是LRU(最近最少使用)。 + * 你可以设置为FIFO(先进先出)或是LFU(较少使用)。 + * @字段说明 memoryStoreEvictionPolicy + */ + @XmlAttribute + private String memoryStoreEvictionPolicy; + + public int getMaxElementsInMemory() { + return maxElementsInMemory; + } + + public void setMaxElementsInMemory(int maxElementsInMemory) { + this.maxElementsInMemory = maxElementsInMemory; + } + + public boolean isEternal() { + return eternal; + } + + public void setEternal(boolean eternal) { + this.eternal = eternal; + } + + public boolean isOverflowToDisk() { + return overflowToDisk; + } + + public void setOverflowToDisk(boolean overflowToDisk) { + this.overflowToDisk = overflowToDisk; + } + + public int getDiskSpoolBufferSizeMB() { + return diskSpoolBufferSizeMB; + } + + public void setDiskSpoolBufferSizeMB(int diskSpoolBufferSizeMB) { + this.diskSpoolBufferSizeMB = diskSpoolBufferSizeMB; + } + + public int getMaxElementsOnDisk() { + return maxElementsOnDisk; + } + + public void setMaxElementsOnDisk(int maxElementsOnDisk) { + this.maxElementsOnDisk = maxElementsOnDisk; + } + + public boolean isDiskPersistent() { + return diskPersistent; + } + + public void setDiskPersistent(boolean diskPersistent) { + this.diskPersistent = diskPersistent; + } + + public int getDiskExpiryThreadIntervalSeconds() { + return diskExpiryThreadIntervalSeconds; + } + + public void setDiskExpiryThreadIntervalSeconds(int diskExpiryThreadIntervalSeconds) { + this.diskExpiryThreadIntervalSeconds = diskExpiryThreadIntervalSeconds; + } + + public String getMemoryStoreEvictionPolicy() { + return memoryStoreEvictionPolicy; + } + + public void setMemoryStoreEvictionPolicy(String memoryStoreEvictionPolicy) { + this.memoryStoreEvictionPolicy = memoryStoreEvictionPolicy; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("CacheInfo [maxElementsInMemory="); + builder.append(maxElementsInMemory); + builder.append(", eternal="); + builder.append(eternal); + builder.append(", overflowToDisk="); + builder.append(overflowToDisk); + builder.append(", diskSpoolBufferSizeMB="); + builder.append(diskSpoolBufferSizeMB); + builder.append(", maxElementsOnDisk="); + builder.append(maxElementsOnDisk); + builder.append(", diskPersistent="); + builder.append(diskPersistent); + builder.append(", diskExpiryThreadIntervalSeconds="); + builder.append(diskExpiryThreadIntervalSeconds); + builder.append(", memoryStoreEvictionPolicy="); + builder.append(memoryStoreEvictionPolicy); + builder.append("]"); + return builder.toString(); + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/entity/cache/Ehcache.java b/src/main/java/io/mycat/config/loader/zkprocess/entity/cache/Ehcache.java new file mode 100644 index 000000000..09b185415 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/entity/cache/Ehcache.java @@ -0,0 +1,97 @@ +package io.mycat.config.loader.zkprocess.entity.cache; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; + +/** + * ehcache配制信息 +* 源文件名:Ehcache.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月19日 +* 修改作者:liujun +* 修改日期:2016年9月19日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +@XmlAccessorType(XmlAccessType.FIELD) +@XmlRootElement(name = "ehcache") +public class Ehcache { + + /** + * + * @字段说明 maxEntriesLocalHeap + */ + @XmlAttribute + private int maxEntriesLocalHeap; + + /** + * @字段说明 maxBytesLocalDisk + */ + @XmlAttribute + private String maxBytesLocalDisk; + + /** + * @字段说明 updateCheck + */ + @XmlAttribute + private boolean updateCheck; + + /** + * 缓存信息 + * @字段说明 defaultCache + */ + @XmlElement + private CacheInfo defaultCache; + + public int getMaxEntriesLocalHeap() { + return maxEntriesLocalHeap; + } + + public void setMaxEntriesLocalHeap(int maxEntriesLocalHeap) { + this.maxEntriesLocalHeap = maxEntriesLocalHeap; + } + + public String getMaxBytesLocalDisk() { + return maxBytesLocalDisk; + } + + public void setMaxBytesLocalDisk(String maxBytesLocalDisk) { + this.maxBytesLocalDisk = maxBytesLocalDisk; + } + + public boolean isUpdateCheck() { + return updateCheck; + } + + public void setUpdateCheck(boolean updateCheck) { + this.updateCheck = updateCheck; + } + + public CacheInfo getDefaultCache() { + return defaultCache; + } + + public void setDefaultCache(CacheInfo defaultCache) { + this.defaultCache = defaultCache; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("Ehcache [maxEntriesLocalHeap="); + builder.append(maxEntriesLocalHeap); + builder.append(", maxBytesLocalDisk="); + builder.append(maxBytesLocalDisk); + builder.append(", updateCheck="); + builder.append(updateCheck); + builder.append(", defaultCache="); + builder.append(defaultCache); + builder.append("]"); + return builder.toString(); + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/entity/package-info.java b/src/main/java/io/mycat/config/loader/zkprocess/entity/package-info.java new file mode 100644 index 000000000..b4612832f --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/entity/package-info.java @@ -0,0 +1,6 @@ +@XmlSchema(xmlns = @XmlNs(prefix = "mycat", namespaceURI = "http://io.mycat/") , elementFormDefault = javax.xml.bind.annotation.XmlNsForm.QUALIFIED) + +package io.mycat.config.loader.zkprocess.entity; + +import javax.xml.bind.annotation.XmlNs; +import javax.xml.bind.annotation.XmlSchema; diff --git a/src/main/java/io/mycat/config/loader/zkprocess/entity/rule/function/Function.java b/src/main/java/io/mycat/config/loader/zkprocess/entity/rule/function/Function.java new file mode 100644 index 000000000..a2be0fe66 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/entity/rule/function/Function.java @@ -0,0 +1,87 @@ +package io.mycat.config.loader.zkprocess.entity.rule.function; + +import java.util.ArrayList; +import java.util.List; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlType; + +import io.mycat.config.loader.zkprocess.entity.Named; +import io.mycat.config.loader.zkprocess.entity.Propertied; +import io.mycat.config.loader.zkprocess.entity.Property; + +/** + * + * * 3 + * +* 源文件名:Function.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月18日 +* 修改作者:liujun +* 修改日期:2016年9月18日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +@XmlAccessorType(XmlAccessType.FIELD) +@XmlType(name = "function") +public class Function implements Propertied, Named { + + + @XmlAttribute(required = true) + protected String name; + + @XmlAttribute(required = true, name = "class") + protected String clazz; + + protected List property; + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getClazz() { + return clazz; + } + + public void setClazz(String clazz) { + this.clazz = clazz; + } + + public List getProperty() { + if (this.property == null) { + property = new ArrayList<>(); + } + return property; + } + + public void setProperty(List property) { + this.property = property; + } + + @Override + public void addProperty(Property property) { + this.getProperty().add(property); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("Function [name="); + builder.append(name); + builder.append(", clazz="); + builder.append(clazz); + builder.append(", property="); + builder.append(property); + builder.append("]"); + return builder.toString(); + } + + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/entity/rule/tablerule/Rule.java b/src/main/java/io/mycat/config/loader/zkprocess/entity/rule/tablerule/Rule.java new file mode 100644 index 000000000..df5346d3c --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/entity/rule/tablerule/Rule.java @@ -0,0 +1,58 @@ +package io.mycat.config.loader.zkprocess.entity.rule.tablerule; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlType; + + +/** + * * + * * *id + * * *func1 + * * +* 源文件名:Rule.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月18日 +* 修改作者:liujun +* 修改日期:2016年9月18日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +@XmlAccessorType(XmlAccessType.FIELD) +@XmlType(name = "rule", propOrder = { "columns", "algorithm" }) +public class Rule { + + protected String columns; + protected String algorithm; + + public String getColumns() { + return columns; + } + + public Rule setColumns(String columns) { + this.columns = columns; + return this; + } + + public String getAlgorithm() { + return algorithm; + } + + public Rule setAlgorithm(String algorithm) { + this.algorithm = algorithm; + return this; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("Rule [columns="); + builder.append(columns); + builder.append(", algorithm="); + builder.append(algorithm); + builder.append("]"); + return builder.toString(); + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/entity/rule/tablerule/TableRule.java b/src/main/java/io/mycat/config/loader/zkprocess/entity/rule/tablerule/TableRule.java new file mode 100644 index 000000000..1a1286d89 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/entity/rule/tablerule/TableRule.java @@ -0,0 +1,66 @@ +package io.mycat.config.loader.zkprocess.entity.rule.tablerule; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlType; + +import io.mycat.config.loader.zkprocess.entity.Named; + +/** + * * + * * * + * * * *id + * * * *func1 + * * + * +* 源文件名:TableRule.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月18日 +* 修改作者:liujun +* 修改日期:2016年9月18日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +@XmlAccessorType(XmlAccessType.FIELD) +@XmlType(name = "tableRule") +public class TableRule implements Named { + + @XmlElement(required = true, name = "rule") + protected Rule rule; + + @XmlAttribute(required = true) + protected String name; + + public Rule getRule() { + return rule; + } + + public TableRule setRule(Rule rule) { + this.rule = rule; + return this; + } + + public String getName() { + return name; + } + + public TableRule setName(String name) { + this.name = name; + return this; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("TableRule [rule="); + builder.append(rule); + builder.append(", name="); + builder.append(name); + builder.append("]"); + return builder.toString(); + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/entity/schema/datahost/DataHost.java b/src/main/java/io/mycat/config/loader/zkprocess/entity/schema/datahost/DataHost.java new file mode 100644 index 000000000..258365ada --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/entity/schema/datahost/DataHost.java @@ -0,0 +1,195 @@ +package io.mycat.config.loader.zkprocess.entity.schema.datahost; + +import java.util.ArrayList; +import java.util.List; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlType; + +import io.mycat.config.loader.zkprocess.entity.Named; + +/** + * + * +* 源文件名:DataHost.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月15日 +* 修改作者:liujun +* 修改日期:2016年9月15日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +@XmlAccessorType(XmlAccessType.FIELD) +@XmlType(name = "dataHost") +public class DataHost implements Named { + + @XmlAttribute(required = true) + protected Integer balance; + @XmlAttribute(required = true) + protected Integer maxCon; + @XmlAttribute(required = true) + protected Integer minCon; + @XmlAttribute(required = true) + protected String name; + @XmlAttribute + protected Integer writeType; + @XmlAttribute + protected Integer switchType; + @XmlAttribute + protected Integer slaveThreshold; + @XmlAttribute(required = true) + protected String dbType; + @XmlAttribute(required = true) + protected String dbDriver; + + @XmlAttribute() + protected String slaveIDs; + + protected String heartbeat; + protected String connectionInitSql; + + protected List writeHost; + + public String getHeartbeat() { + return heartbeat; + } + + public void setHeartbeat(String heartbeat) { + this.heartbeat = heartbeat; + } + + public String getConnectionInitSql() { + return connectionInitSql; + } + + public void setConnectionInitSql(String connectionInitSql) { + this.connectionInitSql = connectionInitSql; + } + + public List getWriteHost() { + if (this.writeHost == null) { + writeHost = new ArrayList<>(); + } + return writeHost; + } + + public String getSlaveIDs() { + return slaveIDs; + } + + public void setSlaveIDs(String slaveIDs) { + this.slaveIDs = slaveIDs; + } + + public void setWriteHost(List writeHost) { + this.writeHost = writeHost; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public Integer getMaxCon() { + return maxCon; + } + + public void setMaxCon(Integer maxCon) { + this.maxCon = maxCon; + } + + public Integer getMinCon() { + return minCon; + } + + public void setMinCon(Integer minCon) { + this.minCon = minCon; + } + + public Integer getBalance() { + return balance; + } + + public void setBalance(Integer balance) { + this.balance = balance; + } + + public String getDbType() { + return dbType; + } + + public void setDbType(String dbType) { + this.dbType = dbType; + } + + public String getDbDriver() { + return dbDriver; + } + + public void setDbDriver(String dbDriver) { + this.dbDriver = dbDriver; + } + + public Integer getWriteType() { + return writeType; + } + + public void setWriteType(Integer writeType) { + this.writeType = writeType; + } + + public Integer getSwitchType() { + return switchType; + } + + public void setSwitchType(Integer switchType) { + this.switchType = switchType; + } + + public Integer getSlaveThreshold() { + return slaveThreshold; + } + + public void setSlaveThreshold(Integer slaveThreshold) { + this.slaveThreshold = slaveThreshold; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("DataHost [balance="); + builder.append(balance); + builder.append(", maxCon="); + builder.append(maxCon); + builder.append(", minCon="); + builder.append(minCon); + builder.append(", name="); + builder.append(name); + builder.append(", writeType="); + builder.append(writeType); + builder.append(", switchType="); + builder.append(switchType); + builder.append(", slaveThreshold="); + builder.append(slaveThreshold); + builder.append(", dbType="); + builder.append(dbType); + builder.append(", dbDriver="); + builder.append(dbDriver); + builder.append(", heartbeat="); + builder.append(heartbeat); + builder.append(", connectionInitSql="); + builder.append(connectionInitSql); + builder.append(", writeHost="); + builder.append(writeHost); + builder.append("]"); + return builder.toString(); + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/entity/schema/datahost/ReadHost.java b/src/main/java/io/mycat/config/loader/zkprocess/entity/schema/datahost/ReadHost.java new file mode 100644 index 000000000..31feb4fdb --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/entity/schema/datahost/ReadHost.java @@ -0,0 +1,52 @@ +package io.mycat.config.loader.zkprocess.entity.schema.datahost; + +import java.util.List; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlTransient; +import javax.xml.bind.annotation.XmlType; + +/** + * +* 源文件名:ReadHost.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月15日 +* 修改作者:liujun +* 修改日期:2016年9月15日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +@XmlAccessorType(XmlAccessType.FIELD) +@XmlType(name = "readHost") +public class ReadHost extends WriteHost { + + @XmlAttribute + protected String weight; + + public String getWeight() { + return weight; + } + + public void setWeight(String weight) { + this.weight = weight; + } + + @XmlTransient + @Override + public List getReadHost() { + return super.getReadHost(); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("ReadHost [weight="); + builder.append(weight); + builder.append("]"); + return builder.toString(); + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/entity/schema/datahost/WriteHost.java b/src/main/java/io/mycat/config/loader/zkprocess/entity/schema/datahost/WriteHost.java new file mode 100644 index 000000000..21cca8110 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/entity/schema/datahost/WriteHost.java @@ -0,0 +1,109 @@ +package io.mycat.config.loader.zkprocess.entity.schema.datahost; + +import java.util.ArrayList; +import java.util.List; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlType; + +/** + * +* 源文件名:WriteHost.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月15日 +* 修改作者:liujun +* 修改日期:2016年9月15日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +@XmlAccessorType(XmlAccessType.FIELD) +@XmlType(name = "writeHost") +public class WriteHost { + + @XmlAttribute(required = true) + protected String host; + @XmlAttribute(required = true) + protected String url; + @XmlAttribute(required = true) + protected String password; + @XmlAttribute(required = true) + protected String user; + @XmlAttribute + protected Boolean usingDecrypt; + + private List readHost; + + public String getHost() { + return host; + } + + public void setHost(String host) { + this.host = host; + } + + public String getUrl() { + return url; + } + + public void setUrl(String url) { + this.url = url; + } + + public String getPassword() { + return password; + } + + public void setPassword(String password) { + this.password = password; + } + + public String getUser() { + return user; + } + + public void setUser(String user) { + this.user = user; + } + + public Boolean isUsingDecrypt() { + return usingDecrypt; + } + + public void setUsingDecrypt(Boolean usingDecrypt) { + this.usingDecrypt = usingDecrypt; + } + + public List getReadHost() { + if (this.readHost == null) { + readHost = new ArrayList<>(); + } + return readHost; + } + + public void setReadHost(List readHost) { + this.readHost = readHost; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("WriteHost [host="); + builder.append(host); + builder.append(", url="); + builder.append(url); + builder.append(", password="); + builder.append(password); + builder.append(", user="); + builder.append(user); + builder.append(", usingDecrypt="); + builder.append(usingDecrypt); + builder.append(", readHost="); + builder.append(readHost); + builder.append("]"); + return builder.toString(); + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/entity/schema/datanode/DataNode.java b/src/main/java/io/mycat/config/loader/zkprocess/entity/schema/datanode/DataNode.java new file mode 100644 index 000000000..956ebcfca --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/entity/schema/datanode/DataNode.java @@ -0,0 +1,71 @@ +package io.mycat.config.loader.zkprocess.entity.schema.datanode; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlType; + +import io.mycat.config.loader.zkprocess.entity.Named; + +/** + * +* 源文件名:DataNode.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月15日 +* 修改作者:liujun +* 修改日期:2016年9月15日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +@XmlAccessorType(XmlAccessType.FIELD) +@XmlType(name = "dataNode") +public class DataNode implements Named { + + @XmlAttribute(required = true) + private String name; + + @XmlAttribute(required = true) + private String dataHost; + + @XmlAttribute(required = true) + private String database; + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getDataHost() { + return dataHost; + } + + public void setDataHost(String dataHost) { + this.dataHost = dataHost; + } + + public String getDatabase() { + return database; + } + + public void setDatabase(String database) { + this.database = database; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("DataNode [name="); + builder.append(name); + builder.append(", dataHost="); + builder.append(dataHost); + builder.append(", database="); + builder.append(database); + builder.append("]"); + return builder.toString(); + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/entity/schema/schema/ChildTable.java b/src/main/java/io/mycat/config/loader/zkprocess/entity/schema/schema/ChildTable.java new file mode 100644 index 000000000..2e3c38ffc --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/entity/schema/schema/ChildTable.java @@ -0,0 +1,113 @@ +package io.mycat.config.loader.zkprocess.entity.schema.schema; + +import java.util.ArrayList; +import java.util.List; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlType; + +import io.mycat.config.loader.zkprocess.entity.Named; + +/** + * + * + * 配制子表信息 +* 源文件名:ChildTable.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月15日 +* 修改作者:liujun +* 修改日期:2016年9月15日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +@XmlAccessorType(XmlAccessType.FIELD) +@XmlType(name = "childTable") +public class ChildTable implements Named { + + @XmlAttribute(required = true) + protected String name; + @XmlAttribute(required = true) + protected String joinKey; + @XmlAttribute(required = true) + protected String parentKey; + @XmlAttribute + protected String primaryKey; + @XmlAttribute + protected Boolean autoIncrement; + + protected List childTable; + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getJoinKey() { + return joinKey; + } + + public void setJoinKey(String joinKey) { + this.joinKey = joinKey; + } + + public String getParentKey() { + return parentKey; + } + + public void setParentKey(String parentKey) { + this.parentKey = parentKey; + } + + public String getPrimaryKey() { + return primaryKey; + } + + public void setPrimaryKey(String primaryKey) { + this.primaryKey = primaryKey; + } + + public Boolean isAutoIncrement() { + return autoIncrement; + } + + public void setAutoIncrement(Boolean autoIncrement) { + this.autoIncrement = autoIncrement; + } + + public List getChildTable() { + if (this.childTable == null) { + childTable = new ArrayList<>(); + } + return childTable; + } + + public void setChildTable(List childTable) { + this.childTable = childTable; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("ChildTable [name="); + builder.append(name); + builder.append(", joinKey="); + builder.append(joinKey); + builder.append(", parentKey="); + builder.append(parentKey); + builder.append(", primaryKey="); + builder.append(primaryKey); + builder.append(", autoIncrement="); + builder.append(autoIncrement); + builder.append(", childTable="); + builder.append(childTable); + builder.append("]"); + return builder.toString(); + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/entity/schema/schema/Schema.java b/src/main/java/io/mycat/config/loader/zkprocess/entity/schema/schema/Schema.java new file mode 100644 index 000000000..2d09ba2ab --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/entity/schema/schema/Schema.java @@ -0,0 +1,130 @@ +package io.mycat.config.loader.zkprocess.entity.schema.schema; + +import java.util.ArrayList; +import java.util.List; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlType; + +import io.mycat.config.loader.zkprocess.entity.Named; + +/** + * + * *
+ * * + * + * +* 源文件名:Schema.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月15日 +* 修改作者:liujun +* 修改日期:2016年9月15日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +@XmlAccessorType(XmlAccessType.FIELD) +@XmlType(name = "schema") +public class Schema implements Named { + + /** + * schema的名称 + * @字段说明 name + */ + @XmlAttribute(required = true) + protected String name; + + /** + * 当诠值讴置为 true 时, + * 如果我们执行询句**select * from TESTDB.travelrecord; + * **则MyCat会把询句修改为**select * from travelrecord;** + * @字段说明 checkSQLschema + */ + @XmlAttribute + protected Boolean checkSQLschema; + + /** + * 当诠值设置为某个数值时。每条执行癿SQL询句,如果没有加上limit询句,MyCat也会自劢癿加上所对应癿 + * @字段说明 sqlMaxLimit + */ + @XmlAttribute + protected Integer sqlMaxLimit; + + /** + * 诠属性用二绊定逡辑库刡某个具体癿database上, + * 1.3版本如果配置了dataNode,则不可以配置分片表, + * 1.4可以配置默讣分片,叧雹要配置需要分片的表即可 + * @字段说明 dataNode + */ + @XmlAttribute + protected String dataNode; + + /** + * 配制表信息 + * @字段说明 table + */ + protected List
table; + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public Boolean isCheckSQLschema() { + return checkSQLschema; + } + + public void setCheckSQLschema(Boolean checkSQLschema) { + this.checkSQLschema = checkSQLschema; + } + + public Integer getSqlMaxLimit() { + return sqlMaxLimit; + } + + public void setSqlMaxLimit(Integer sqlMaxLimit) { + this.sqlMaxLimit = sqlMaxLimit; + } + + public String getDataNode() { + return dataNode; + } + + public void setDataNode(String dataNode) { + this.dataNode = dataNode; + } + + public List
getTable() { + if (this.table == null) { + table = new ArrayList<>(); + } + return table; + } + + public void setTable(List
table) { + this.table = table; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("Schema [name="); + builder.append(name); + builder.append(", checkSQLschema="); + builder.append(checkSQLschema); + builder.append(", sqlMaxLimit="); + builder.append(sqlMaxLimit); + builder.append(", dataNode="); + builder.append(dataNode); + builder.append(", table="); + builder.append(table); + builder.append("]"); + return builder.toString(); + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/entity/schema/schema/Table.java b/src/main/java/io/mycat/config/loader/zkprocess/entity/schema/schema/Table.java new file mode 100644 index 000000000..855425723 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/entity/schema/schema/Table.java @@ -0,0 +1,164 @@ +package io.mycat.config.loader.zkprocess.entity.schema.schema; + +import java.util.ArrayList; +import java.util.List; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlType; + +import io.mycat.config.loader.zkprocess.entity.Named; + +/** + *
+ * 用于具体的表信息 +* 源文件名:Table.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月15日 +* 修改作者:liujun +* 修改日期:2016年9月15日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +@XmlAccessorType(XmlAccessType.FIELD) +@XmlType(name = "table") +public class Table implements Named { + + @XmlAttribute(required = true) + protected String name; + @XmlAttribute + protected String nameSuffix; + @XmlAttribute(required = true) + protected String dataNode; + @XmlAttribute + protected String rule; + @XmlAttribute + protected Boolean ruleRequired; + @XmlAttribute + protected String primaryKey; + @XmlAttribute + protected Boolean autoIncrement; + @XmlAttribute + protected Boolean needAddLimit; + @XmlAttribute + protected String type; + + /** + * 子节点信息 + * @字段说明 childTable + */ + protected List childTable; + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getDataNode() { + return dataNode; + } + + public void setDataNode(String dataNode) { + this.dataNode = dataNode; + } + + public String getRule() { + return rule; + } + + public void setRule(String rule) { + this.rule = rule; + } + + public List getChildTable() { + if (this.childTable == null) { + childTable = new ArrayList<>(); + } + return childTable; + } + + public void setChildTable(List childTable) { + this.childTable = childTable; + } + + public String getNameSuffix() { + return nameSuffix; + } + + public void setNameSuffix(String nameSuffix) { + this.nameSuffix = nameSuffix; + } + + public Boolean isRuleRequired() { + return ruleRequired; + } + + public void setRuleRequired(Boolean ruleRequired) { + this.ruleRequired = ruleRequired; + } + + public String getPrimaryKey() { + return primaryKey; + } + + public void setPrimaryKey(String primaryKey) { + this.primaryKey = primaryKey; + } + + public Boolean isAutoIncrement() { + return autoIncrement; + } + + public void setAutoIncrement(Boolean autoIncrement) { + this.autoIncrement = autoIncrement; + } + + public Boolean isNeedAddLimit() { + return needAddLimit; + } + + public void setNeedAddLimit(Boolean needAddLimit) { + this.needAddLimit = needAddLimit; + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("Table [name="); + builder.append(name); + builder.append(", nameSuffix="); + builder.append(nameSuffix); + builder.append(", dataNode="); + builder.append(dataNode); + builder.append(", rule="); + builder.append(rule); + builder.append(", ruleRequired="); + builder.append(ruleRequired); + builder.append(", primaryKey="); + builder.append(primaryKey); + builder.append(", autoIncrement="); + builder.append(autoIncrement); + builder.append(", needAddLimit="); + builder.append(needAddLimit); + builder.append(", type="); + builder.append(type); + builder.append(", childTable="); + builder.append(childTable); + builder.append("]"); + return builder.toString(); + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/entity/server/System.java b/src/main/java/io/mycat/config/loader/zkprocess/entity/server/System.java new file mode 100644 index 000000000..659c67e32 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/entity/server/System.java @@ -0,0 +1,77 @@ +package io.mycat.config.loader.zkprocess.entity.server; + +import java.util.ArrayList; +import java.util.List; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlType; + +import io.mycat.config.loader.zkprocess.entity.Propertied; +import io.mycat.config.loader.zkprocess.entity.Property; + +/** + * 系统信息 +* 源文件名:System.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月16日 +* 修改作者:liujun +* 修改日期:2016年9月16日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +@XmlAccessorType(XmlAccessType.FIELD) +@XmlType(name = "system") +public class System implements Propertied { + + protected List property; + + public List getProperty() { + if (this.property == null) { + property = new ArrayList<>(); + } + return property; + } + + public void setProperty(List property) { + this.property = property; + } + + @Override + public void addProperty(Property property) { + this.getProperty().add(property); + } + + /** + * 设置最新的方法值 + * 方法描述 + * @param newSet + * @创建日期 2016年9月17日 + */ + public void setNewValue(System newSet) { + if (null != newSet) { + List valuePro = newSet.getProperty(); + // 最新设置的属性值 + for (Property netsetProper : valuePro) { + // 当前已经设置的属性值 + for (Property property : this.getProperty()) { + // 如果新设置的属性名称与当前的已经存在的名称相同,则设置为新值 + if (netsetProper.getName().equals(property.getName())) { + property.setValue(netsetProper.getValue()); + } + } + } + } + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("System [property="); + builder.append(property); + builder.append("]"); + return builder.toString(); + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/entity/server/user/User.java b/src/main/java/io/mycat/config/loader/zkprocess/entity/server/user/User.java new file mode 100644 index 000000000..9b3ae6a2a --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/entity/server/user/User.java @@ -0,0 +1,53 @@ +package io.mycat.config.loader.zkprocess.entity.server.user; + +import java.util.ArrayList; +import java.util.List; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlType; + +import io.mycat.config.loader.zkprocess.entity.Named; +import io.mycat.config.loader.zkprocess.entity.Propertied; +import io.mycat.config.loader.zkprocess.entity.Property; + +@XmlAccessorType(XmlAccessType.FIELD) +@XmlType(name = "user") +public class User implements Propertied, Named { + + @XmlAttribute(required = true) + protected String name; + + protected List property; + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public List getProperty() { + if (this.property == null) { + property = new ArrayList<>(); + } + return property; + } + + public void setProperty(List property) { + this.property = property; + } + + @Override + public void addProperty(Property property) { + this.getProperty().add(property); + } + + @Override + public String toString() { + return "User{" + "name='" + name + '\'' + ", property=" + property + '}'; + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/parse/JsonProcessBase.java b/src/main/java/io/mycat/config/loader/zkprocess/parse/JsonProcessBase.java new file mode 100644 index 000000000..4f2db97ea --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/parse/JsonProcessBase.java @@ -0,0 +1,125 @@ +package io.mycat.config.loader.zkprocess.parse; + +import java.lang.reflect.Type; +import java.util.ArrayList; +import java.util.List; + +import com.google.gson.Gson; +import com.google.gson.reflect.TypeToken; + +import io.mycat.config.loader.zkprocess.entity.Schemas; +import io.mycat.config.loader.zkprocess.entity.schema.datanode.DataNode; + +/** + * json数据与实体类的类的信息 +* 源文件名:XmlProcessBase.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月15日 +* 修改作者:liujun +* 修改日期:2016年9月15日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public class JsonProcessBase { + + /** + * 进行消息转换的类的信息 + * @字段说明 gson + */ + private Gson gson = new Gson(); + + /** + * 进行json字符串化 + * 方法描述 + * @param obj + * @return + * @创建日期 2016年9月17日 + */ + public String toJsonFromBean(Object obj) { + if (null != obj) { + return gson.toJson(obj); + } + + return null; + } + + /** + * 将json字符串至类,根据指定的类型信息,一般用于集合的转换 + * 方法描述 + * @param json + * @param typeSchema + * @return + * @创建日期 2016年9月17日 + */ + public T toBeanformJson(String json, Type typeSchema) { + T result = this.gson.fromJson(json, typeSchema); + + return result; + } + + /** + * 将json字符串至类,根据指定的类型信息,用于转换单对象实体 + * 方法描述 + * @param + * @param json + * @param typeSchema + * @return + * @创建日期 2016年9月17日 + */ + public T toBeanformJson(String json, Class classinfo) { + T result = this.gson.fromJson(json, classinfo); + + return result; + } + + public static void main(String[] args) { + + DataNode datanode = new DataNode(); + + datanode.setDatabase("db1"); + datanode.setDataHost("os1"); + datanode.setName("dn1"); + + JsonProcessBase jsonParse = new JsonProcessBase(); + + String jsonStr = jsonParse.toJsonFromBean(datanode); + + System.out.println("单对象当前的json:" + jsonStr); + + // 转换实体 + DataNode node = jsonParse.toBeanformJson(jsonStr, DataNode.class); + + System.out.println("单对象:" + node); + + List listNode = new ArrayList<>(); + + listNode.add(datanode); + listNode.add(datanode); + + String listJson = jsonParse.toJsonFromBean(listNode); + + System.out.println("当前集合的json:" + listJson); + + // 转换为集合的bean + Type parseType = new TypeToken>() { + }.getType(); + List list = jsonParse.toBeanformJson(listJson, parseType); + + System.out.println("集合对象:" + list); + + // 复杂对象的转换 + Schemas schema = new Schemas(); + schema.setDataNode(listNode); + + String jsonMultStr = jsonParse.toJsonFromBean(schema); + + System.out.println("复杂单对象当前的json:" + jsonMultStr); + + // 转换实体 + Schemas nodeMult = jsonParse.toBeanformJson(jsonMultStr, Schemas.class); + + System.out.println("复杂单对象:" + nodeMult); + + } +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/parse/ParseJsonServiceInf.java b/src/main/java/io/mycat/config/loader/zkprocess/parse/ParseJsonServiceInf.java new file mode 100644 index 000000000..1940e0ef5 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/parse/ParseJsonServiceInf.java @@ -0,0 +1,34 @@ +package io.mycat.config.loader.zkprocess.parse; + +/** + * json转化服务 +* 源文件名:JsonParseServiceInf.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月16日 +* 修改作者:liujun +* 修改日期:2016年9月16日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public interface ParseJsonServiceInf { + + /** + * 将对象T转换为json字符串 + * 方法描述 + * @param data + * @return + * @创建日期 2016年9月16日 + */ + public String parseBeanToJson(T t); + + /** + * 将json字符串转换为javabean对象 + * 方法描述 + * @param json + * @return + * @创建日期 2016年9月16日 + */ + public T parseJsonToBean(String json); + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/parse/ParseXmlServiceInf.java b/src/main/java/io/mycat/config/loader/zkprocess/parse/ParseXmlServiceInf.java new file mode 100644 index 000000000..eb3325c95 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/parse/ParseXmlServiceInf.java @@ -0,0 +1,34 @@ +package io.mycat.config.loader.zkprocess.parse; + +/** + *xml转化服务 +* 源文件名:JsonParseServiceInf.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月16日 +* 修改作者:liujun +* 修改日期:2016年9月16日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public interface ParseXmlServiceInf { + + /** + * 将对象T写入xml文件 + * 方法描述 + * @param data + * @return + * @创建日期 2016年9月16日 + */ + public void parseToXmlWrite(T data, String outputPath, String dataName); + + /** + * 将指定的xml转换为javabean对象 + * 方法描述 + * @param path xml文件路径信息 + * @return + * @创建日期 2016年9月16日 + */ + public T parseXmlToBean(String path); + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/parse/XmlProcessBase.java b/src/main/java/io/mycat/config/loader/zkprocess/parse/XmlProcessBase.java new file mode 100644 index 000000000..8920a4430 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/parse/XmlProcessBase.java @@ -0,0 +1,202 @@ +package io.mycat.config.loader.zkprocess.parse; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardOpenOption; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import javax.xml.bind.JAXBContext; +import javax.xml.bind.JAXBException; +import javax.xml.bind.Marshaller; +import javax.xml.bind.Unmarshaller; +import javax.xml.stream.XMLInputFactory; +import javax.xml.stream.XMLStreamException; +import javax.xml.stream.XMLStreamReader; +import javax.xml.transform.stream.StreamSource; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * xml文件操作转换的类的信息 +* 源文件名:XmlProcessBase.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月15日 +* 修改作者:liujun +* 修改日期:2016年9月15日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public class XmlProcessBase { + + /** + * 日志 + * @字段说明 LOGGER + */ + private static final Logger lOG = LoggerFactory.getLogger(XmlProcessBase.class); + + /** + * 转换对象 + * @字段说明 jaxContext + */ + private JAXBContext jaxContext; + + /** + * 反序列化xml文件的对象 + * @字段说明 unmarshaller + */ + private Unmarshaller unmarshaller; + + /** + * 转换的实体对象的class信息 + * @字段说明 parseXmlClass + */ + @SuppressWarnings("rawtypes") + public List parseXmlClass = new ArrayList(); + + /** + * 添加转换的class信息 + * 方法描述 + * @param parseClass + * @创建日期 2016年9月15日 + */ + @SuppressWarnings("rawtypes") + public void addParseClass(Class parseClass) { + this.parseXmlClass.add(parseClass); + } + + /** + * 进行jaxb对象的初始化 + * 方法描述 + * @throws JAXBException + * @创建日期 2016年9月15日 + */ + @SuppressWarnings("rawtypes") + public void initJaxbClass() throws JAXBException { + + // 将集合转换为数组 + Class[] classArray = new Class[parseXmlClass.size()]; + parseXmlClass.toArray(classArray); + + try { + this.jaxContext = JAXBContext.newInstance(classArray, Collections. emptyMap()); + } catch (JAXBException e) { + lOG.error("ZookeeperProcessListen initJaxbClass error:Exception info:", e); + throw e; + } + + // 创建解反序化对象 + unmarshaller = jaxContext.createUnmarshaller(); + } + + /** + * 默认将bean序列化为xml对象信息并写入文件 + * 方法描述 + * @param user 用户对象 + * @param inputPath + * @param name 当前的转换xml的dtd文件的信息 + * @创建日期 2016年9月15日 + */ + public void baseParseAndWriteToXml(Object user, String inputPath, String name) throws IOException { + try { + Marshaller marshaller = this.jaxContext.createMarshaller(); + marshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE); + marshaller.setProperty(Marshaller.JAXB_FRAGMENT, Boolean.TRUE); + + if (null != name) { + marshaller.setProperty("com.sun.xml.internal.bind.xmlHeaders", + String.format("", name)); + } + + Path path = Paths.get(inputPath); + + OutputStream out = Files.newOutputStream(path, StandardOpenOption.CREATE, + StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.WRITE); + + marshaller.marshal(user, out); + + } catch (JAXBException e) { + lOG.error("ZookeeperProcessListen parseToXml error:Exception info:", e); + } catch (IOException e) { + lOG.error("ZookeeperProcessListen parseToXml error:Exception info:", e); + } + } + + /** + * 默认将bean序列化为xml对象信息并写入文件 + * 方法描述 + * @param user 用户对象 + * @param inputPath + * @param name 当前的转换xml的dtd文件的信息 + * @创建日期 2016年9月15日 + */ + @SuppressWarnings("restriction") + public void baseParseAndWriteToXml(Object user, String inputPath, String name, Map map) + throws IOException { + try { + Marshaller marshaller = this.jaxContext.createMarshaller(); + marshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE); + marshaller.setProperty(Marshaller.JAXB_FRAGMENT, Boolean.TRUE); + + if (null != name) { + marshaller.setProperty("com.sun.xml.internal.bind.xmlHeaders", + String.format("", name)); + } + + if (null != map && !map.isEmpty()) { + for (Entry entry : map.entrySet()) { + marshaller.setProperty(entry.getKey(), entry.getValue()); + } + } + + Path path = Paths.get(inputPath); + + OutputStream out = Files.newOutputStream(path, StandardOpenOption.CREATE, + StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.WRITE); + + marshaller.marshal(user, out); + + } catch (JAXBException e) { + lOG.error("ZookeeperProcessListen parseToXml error:Exception info:", e); + } catch (IOException e) { + lOG.error("ZookeeperProcessListen parseToXml error:Exception info:", e); + } + } + + /** + * 默认转换将指定的xml转化为 + * 方法描述 + * @param inputStream + * @param fileName + * @return + * @throws JAXBException + * @throws XMLStreamException + * @创建日期 2016年9月16日 + */ + public Object baseParseXmlToBean(String fileName) throws JAXBException, XMLStreamException { + // 搜索当前转化的文件 + InputStream inputStream = XmlProcessBase.class.getResourceAsStream(fileName); + + // 如果能够搜索到文件 + if (inputStream != null) { + // 进行文件反序列化信息 + XMLInputFactory xif = XMLInputFactory.newFactory(); + xif.setProperty(XMLInputFactory.SUPPORT_DTD, false); + XMLStreamReader xmlRead = xif.createXMLStreamReader(new StreamSource(inputStream)); + + return unmarshaller.unmarshal(xmlRead); + } + + return null; + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/cache/json/EhcacheJsonParse.java b/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/cache/json/EhcacheJsonParse.java new file mode 100644 index 000000000..4b00add9a --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/cache/json/EhcacheJsonParse.java @@ -0,0 +1,30 @@ +package io.mycat.config.loader.zkprocess.parse.entryparse.cache.json; + +import io.mycat.config.loader.zkprocess.entity.cache.Ehcache; +import io.mycat.config.loader.zkprocess.parse.JsonProcessBase; +import io.mycat.config.loader.zkprocess.parse.ParseJsonServiceInf; + +/** + * 进行Function节点的转换 +* 源文件名:FunctionJsonParse.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月17日 +* 修改作者:liujun +* 修改日期:2016年9月17日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public class EhcacheJsonParse extends JsonProcessBase implements ParseJsonServiceInf { + + @Override + public String parseBeanToJson(Ehcache t) { + return this.toJsonFromBean(t); + } + + @Override + public Ehcache parseJsonToBean(String json) { + return this.toBeanformJson(json, Ehcache.class); + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/cache/xml/EhcacheParseXmlImpl.java b/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/cache/xml/EhcacheParseXmlImpl.java new file mode 100644 index 000000000..e07076cec --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/cache/xml/EhcacheParseXmlImpl.java @@ -0,0 +1,89 @@ +package io.mycat.config.loader.zkprocess.parse.entryparse.cache.xml; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import javax.xml.bind.JAXBException; +import javax.xml.bind.Marshaller; +import javax.xml.stream.XMLStreamException; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.config.loader.zkprocess.entity.cache.Ehcache; +import io.mycat.config.loader.zkprocess.parse.ParseXmlServiceInf; +import io.mycat.config.loader.zkprocess.parse.XmlProcessBase; + +/** + * rule.xml与javabean之间的转化 +* 源文件名:SchemasParseXmlImpl.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月16日 +* 修改作者:liujun +* 修改日期:2016年9月16日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public class EhcacheParseXmlImpl implements ParseXmlServiceInf { + + /** + * 日志 + * @字段说明 LOGGER + */ + private static final Logger lOG = LoggerFactory.getLogger(EhcacheParseXmlImpl.class); + + /** + * 基本的转换类的信息 + * @字段说明 parseBean + */ + private XmlProcessBase parseBean; + + /** + * 转换的类的信息 + * 构造方法 + * @param parseBase + */ + public EhcacheParseXmlImpl(XmlProcessBase parseBase) { + + this.parseBean = parseBase; + // 添加xml的转换的实体类信息 + parseBean.addParseClass(Ehcache.class); + } + + @Override + public Ehcache parseXmlToBean(String path) { + + Ehcache schema = null; + + try { + schema = (Ehcache) this.parseBean.baseParseXmlToBean(path); + } catch (JAXBException e) { + e.printStackTrace(); + lOG.error("EhcacheParseXmlImpl parseXmlToBean JAXBException", e); + } catch (XMLStreamException e) { + e.printStackTrace(); + lOG.error("EhcacheParseXmlImpl parseXmlToBean XMLStreamException", e); + } + + return schema; + } + + @Override + public void parseToXmlWrite(Ehcache data, String outputFile, String dataName) { + try { + // 设置 + Map paramMap = new HashMap<>(); + paramMap.put(Marshaller.JAXB_NO_NAMESPACE_SCHEMA_LOCATION, "ehcache.xsd"); + + this.parseBean.baseParseAndWriteToXml(data, outputFile, dataName, paramMap); + } catch (IOException e) { + e.printStackTrace(); + lOG.error("EhcacheParseXmlImpl parseToXmlWrite IOException", e); + } + } + +} + + diff --git a/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/rule/json/FunctionJsonParse.java b/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/rule/json/FunctionJsonParse.java new file mode 100644 index 000000000..840f746ff --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/rule/json/FunctionJsonParse.java @@ -0,0 +1,40 @@ +package io.mycat.config.loader.zkprocess.parse.entryparse.rule.json; + +import java.lang.reflect.Type; +import java.util.List; + +import com.google.gson.reflect.TypeToken; + +import io.mycat.config.loader.zkprocess.entity.rule.function.Function; +import io.mycat.config.loader.zkprocess.parse.JsonProcessBase; +import io.mycat.config.loader.zkprocess.parse.ParseJsonServiceInf; + +/** + * 进行Function节点的转换 +* 源文件名:FunctionJsonParse.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月17日 +* 修改作者:liujun +* 修改日期:2016年9月17日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public class FunctionJsonParse extends JsonProcessBase implements ParseJsonServiceInf> { + + @Override + public String parseBeanToJson(List t) { + return this.toJsonFromBean(t); + } + + @Override + public List parseJsonToBean(String json) { + + // 转换为集合的bean + Type parseType = new TypeToken>() { + }.getType(); + + return this.toBeanformJson(json, parseType); + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/rule/json/TableRuleJsonParse.java b/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/rule/json/TableRuleJsonParse.java new file mode 100644 index 000000000..4a38d77dc --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/rule/json/TableRuleJsonParse.java @@ -0,0 +1,40 @@ +package io.mycat.config.loader.zkprocess.parse.entryparse.rule.json; + +import java.lang.reflect.Type; +import java.util.List; + +import com.google.gson.reflect.TypeToken; + +import io.mycat.config.loader.zkprocess.entity.rule.tablerule.TableRule; +import io.mycat.config.loader.zkprocess.parse.JsonProcessBase; +import io.mycat.config.loader.zkprocess.parse.ParseJsonServiceInf; + +/** + * 进行TableRule节点的转换 +* 源文件名:TableRuleJsonParse.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月17日 +* 修改作者:liujun +* 修改日期:2016年9月17日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public class TableRuleJsonParse extends JsonProcessBase implements ParseJsonServiceInf> { + + @Override + public String parseBeanToJson(List t) { + return this.toJsonFromBean(t); + } + + @Override + public List parseJsonToBean(String json) { + + // 转换为集合的bean + Type parseType = new TypeToken>() { + }.getType(); + + return this.toBeanformJson(json, parseType); + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/rule/xml/RuleParseXmlImpl.java b/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/rule/xml/RuleParseXmlImpl.java new file mode 100644 index 000000000..c01746d27 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/rule/xml/RuleParseXmlImpl.java @@ -0,0 +1,80 @@ +package io.mycat.config.loader.zkprocess.parse.entryparse.rule.xml; + +import java.io.IOException; + +import javax.xml.bind.JAXBException; +import javax.xml.stream.XMLStreamException; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.config.loader.zkprocess.entity.Rules; +import io.mycat.config.loader.zkprocess.parse.ParseXmlServiceInf; +import io.mycat.config.loader.zkprocess.parse.XmlProcessBase; + +/** + * rule.xml与javabean之间的转化 +* 源文件名:SchemasParseXmlImpl.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月16日 +* 修改作者:liujun +* 修改日期:2016年9月16日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public class RuleParseXmlImpl implements ParseXmlServiceInf { + + /** + * 日志 + * @字段说明 LOGGER + */ + private static final Logger lOG = LoggerFactory.getLogger(RuleParseXmlImpl.class); + + /** + * 基本的转换类的信息 + * @字段说明 parseBean + */ + private XmlProcessBase parseBean; + + /** + * 转换的类的信息 + * 构造方法 + * @param parseBase + */ + public RuleParseXmlImpl(XmlProcessBase parseBase) { + + this.parseBean = parseBase; + // 添加xml的转换的实体类信息 + parseBean.addParseClass(Rules.class); + } + + @Override + public Rules parseXmlToBean(String path) { + + Rules schema = null; + + try { + schema = (Rules) this.parseBean.baseParseXmlToBean(path); + } catch (JAXBException e) { + e.printStackTrace(); + lOG.error("RulesParseXmlImpl parseXmlToBean JAXBException", e); + } catch (XMLStreamException e) { + e.printStackTrace(); + lOG.error("RulesParseXmlImpl parseXmlToBean XMLStreamException", e); + } + + return schema; + } + + @Override + public void parseToXmlWrite(Rules data, String outputFile, String dataName) { + try { + this.parseBean.baseParseAndWriteToXml(data, outputFile, dataName); + } catch (IOException e) { + e.printStackTrace(); + lOG.error("RulesParseXmlImpl parseToXmlWrite IOException", e); + } + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/schema/json/DataHostJsonParse.java b/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/schema/json/DataHostJsonParse.java new file mode 100644 index 000000000..966ff0fe7 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/schema/json/DataHostJsonParse.java @@ -0,0 +1,40 @@ +package io.mycat.config.loader.zkprocess.parse.entryparse.schema.json; + +import java.lang.reflect.Type; +import java.util.List; + +import com.google.gson.reflect.TypeToken; + +import io.mycat.config.loader.zkprocess.entity.schema.datahost.DataHost; +import io.mycat.config.loader.zkprocess.parse.JsonProcessBase; +import io.mycat.config.loader.zkprocess.parse.ParseJsonServiceInf; + +/** + * 进行datahost节点的转换 +* 源文件名:DataHostJsonParse.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月17日 +* 修改作者:liujun +* 修改日期:2016年9月17日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public class DataHostJsonParse extends JsonProcessBase implements ParseJsonServiceInf> { + + @Override + public String parseBeanToJson(List t) { + return this.toJsonFromBean(t); + } + + @Override + public List parseJsonToBean(String json) { + + // 转换为集合的bean + Type parseType = new TypeToken>() { + }.getType(); + + return this.toBeanformJson(json, parseType); + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/schema/json/DataNodeJsonParse.java b/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/schema/json/DataNodeJsonParse.java new file mode 100644 index 000000000..f8919bbef --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/schema/json/DataNodeJsonParse.java @@ -0,0 +1,39 @@ +package io.mycat.config.loader.zkprocess.parse.entryparse.schema.json; + +import java.lang.reflect.Type; +import java.util.List; + +import com.google.gson.reflect.TypeToken; + +import io.mycat.config.loader.zkprocess.entity.schema.datanode.DataNode; +import io.mycat.config.loader.zkprocess.parse.JsonProcessBase; +import io.mycat.config.loader.zkprocess.parse.ParseJsonServiceInf; + +/** + * 进行将datanode数据与json的转化 +* 源文件名:DataNodeJsonParse.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月17日 +* 修改作者:liujun +* 修改日期:2016年9月17日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public class DataNodeJsonParse extends JsonProcessBase implements ParseJsonServiceInf> { + + @Override + public String parseBeanToJson(List t) { + return this.toJsonFromBean(t); + } + + @Override + public List parseJsonToBean(String json) { + // 转换为集合的bean + Type parseType = new TypeToken>() { + }.getType(); + + return this.toBeanformJson(json, parseType); + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/schema/json/SchemaJsonParse.java b/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/schema/json/SchemaJsonParse.java new file mode 100644 index 000000000..da3e8c46c --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/schema/json/SchemaJsonParse.java @@ -0,0 +1,39 @@ +package io.mycat.config.loader.zkprocess.parse.entryparse.schema.json; + +import java.lang.reflect.Type; +import java.util.List; + +import com.google.gson.reflect.TypeToken; + +import io.mycat.config.loader.zkprocess.entity.schema.schema.Schema; +import io.mycat.config.loader.zkprocess.parse.JsonProcessBase; +import io.mycat.config.loader.zkprocess.parse.ParseJsonServiceInf; + +/** + * 进行schema部分的转换 +* 源文件名:SchemaJsonParse.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月17日 +* 修改作者:liujun +* 修改日期:2016年9月17日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public class SchemaJsonParse extends JsonProcessBase implements ParseJsonServiceInf> { + + @Override + public String parseBeanToJson(List t) { + return this.toJsonFromBean(t); + } + + @Override + public List parseJsonToBean(String json) { + // 转换为集合的bean + Type parseType = new TypeToken>() { + }.getType(); + + return this.toBeanformJson(json, parseType); + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/schema/xml/SchemasParseXmlImpl.java b/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/schema/xml/SchemasParseXmlImpl.java new file mode 100644 index 000000000..f6ef5ccd5 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/schema/xml/SchemasParseXmlImpl.java @@ -0,0 +1,80 @@ +package io.mycat.config.loader.zkprocess.parse.entryparse.schema.xml; + +import java.io.IOException; + +import javax.xml.bind.JAXBException; +import javax.xml.stream.XMLStreamException; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.config.loader.zkprocess.entity.Schemas; +import io.mycat.config.loader.zkprocess.parse.ParseXmlServiceInf; +import io.mycat.config.loader.zkprocess.parse.XmlProcessBase; + +/** + * schema.xml与javabean之间的转化 +* 源文件名:SchemasParseXmlImpl.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月16日 +* 修改作者:liujun +* 修改日期:2016年9月16日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public class SchemasParseXmlImpl implements ParseXmlServiceInf { + + /** + * 日志 + * @字段说明 LOGGER + */ + private static final Logger lOG = LoggerFactory.getLogger(SchemasParseXmlImpl.class); + + /** + * 基本的转换类的信息 + * @字段说明 parseBean + */ + private XmlProcessBase parseBean; + + /** + * 转换的类的信息 + * 构造方法 + * @param parseBase + */ + public SchemasParseXmlImpl(XmlProcessBase parseBase) { + + this.parseBean = parseBase; + // 添加xml的转换的实体类信息 + parseBean.addParseClass(Schemas.class); + } + + @Override + public Schemas parseXmlToBean(String path) { + + Schemas schema = null; + + try { + schema = (Schemas) this.parseBean.baseParseXmlToBean(path); + } catch (JAXBException e) { + e.printStackTrace(); + lOG.error("SchemasParseXmlImpl parseXmlToBean JAXBException", e); + } catch (XMLStreamException e) { + e.printStackTrace(); + lOG.error("SchemasParseXmlImpl parseXmlToBean XMLStreamException", e); + } + + return schema; + } + + @Override + public void parseToXmlWrite(Schemas data, String outputFile, String dataName) { + try { + this.parseBean.baseParseAndWriteToXml(data, outputFile, dataName); + } catch (IOException e) { + e.printStackTrace(); + lOG.error("SchemasParseXmlImpl parseToXmlWrite IOException", e); + } + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/server/json/SystemJsonParse.java b/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/server/json/SystemJsonParse.java new file mode 100644 index 000000000..da4e6caf1 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/server/json/SystemJsonParse.java @@ -0,0 +1,31 @@ +package io.mycat.config.loader.zkprocess.parse.entryparse.server.json; + +import io.mycat.config.loader.zkprocess.entity.server.System; +import io.mycat.config.loader.zkprocess.parse.JsonProcessBase; +import io.mycat.config.loader.zkprocess.parse.ParseJsonServiceInf; + +/** + * 进行datahost节点的转换 +* 源文件名:DataHostJsonParse.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月17日 +* 修改作者:liujun +* 修改日期:2016年9月17日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public class SystemJsonParse extends JsonProcessBase implements ParseJsonServiceInf { + + @Override + public String parseBeanToJson(System t) { + return this.toJsonFromBean(t); + } + + @Override + public System parseJsonToBean(String json) { + + return this.toBeanformJson(json, System.class); + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/server/json/UserJsonParse.java b/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/server/json/UserJsonParse.java new file mode 100644 index 000000000..53e815ef5 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/server/json/UserJsonParse.java @@ -0,0 +1,40 @@ +package io.mycat.config.loader.zkprocess.parse.entryparse.server.json; + +import java.lang.reflect.Type; +import java.util.List; + +import com.google.gson.reflect.TypeToken; + +import io.mycat.config.loader.zkprocess.entity.server.user.User; +import io.mycat.config.loader.zkprocess.parse.JsonProcessBase; +import io.mycat.config.loader.zkprocess.parse.ParseJsonServiceInf; + +/** + * 进行datahost节点的转换 +* 源文件名:DataHostJsonParse.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月17日 +* 修改作者:liujun +* 修改日期:2016年9月17日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public class UserJsonParse extends JsonProcessBase implements ParseJsonServiceInf> { + + @Override + public String parseBeanToJson(List t) { + return this.toJsonFromBean(t); + } + + @Override + public List parseJsonToBean(String json) { + + // 转换为集合的bean + Type parseType = new TypeToken>() { + }.getType(); + + return this.toBeanformJson(json, parseType); + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/server/xml/ServerParseXmlImpl.java b/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/server/xml/ServerParseXmlImpl.java new file mode 100644 index 000000000..ce5c0da1f --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/parse/entryparse/server/xml/ServerParseXmlImpl.java @@ -0,0 +1,80 @@ +package io.mycat.config.loader.zkprocess.parse.entryparse.server.xml; + +import java.io.IOException; + +import javax.xml.bind.JAXBException; +import javax.xml.stream.XMLStreamException; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.config.loader.zkprocess.entity.Server; +import io.mycat.config.loader.zkprocess.parse.ParseXmlServiceInf; +import io.mycat.config.loader.zkprocess.parse.XmlProcessBase; + +/** + * schema.xml与javabean之间的转化 +* 源文件名:ServerParseXmlImpl.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月16日 +* 修改作者:liujun +* 修改日期:2016年9月16日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public class ServerParseXmlImpl implements ParseXmlServiceInf { + + /** + * 日志 + * @字段说明 LOGGER + */ + private static final Logger lOG = LoggerFactory.getLogger(ServerParseXmlImpl.class); + + /** + * 基本的转换类的信息 + * @字段说明 parseBean + */ + private XmlProcessBase parseBean; + + /** + * 转换的类的信息 + * 构造方法 + * @param parseBase + */ + public ServerParseXmlImpl(XmlProcessBase parseBase) { + + this.parseBean = parseBase; + // 添加xml的转换的实体类信息 + parseBean.addParseClass(Server.class); + } + + @Override + public Server parseXmlToBean(String path) { + + Server server = null; + + try { + server = (Server) this.parseBean.baseParseXmlToBean(path); + } catch (JAXBException e) { + e.printStackTrace(); + lOG.error("ServerParseXmlImpl parseXmlToBean JAXBException", e); + } catch (XMLStreamException e) { + e.printStackTrace(); + lOG.error("ServerParseXmlImpl parseXmlToBean XMLStreamException", e); + } + + return server; + } + + @Override + public void parseToXmlWrite(Server data, String outputFile, String dataName) { + try { + this.parseBean.baseParseAndWriteToXml(data, outputFile, dataName); + } catch (IOException e) { + e.printStackTrace(); + lOG.error("ServerParseXmlImpl parseToXmlWrite IOException", e); + } + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/xmltozk/BindataToZK.java b/src/main/java/io/mycat/config/loader/zkprocess/xmltozk/BindataToZK.java new file mode 100644 index 000000000..215064d14 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/xmltozk/BindataToZK.java @@ -0,0 +1,38 @@ +package io.mycat.config.loader.zkprocess.xmltozk; + +import com.google.common.io.Files; +import io.mycat.config.loader.console.ZookeeperPath; +import io.mycat.config.loader.zkprocess.zktoxml.listen.RuleszkToxmlLoader; +import io.mycat.config.model.SystemConfig; +import io.mycat.util.ZKUtils; +import org.apache.curator.framework.CuratorFramework; + +import java.io.File; +import java.nio.file.Paths; + +/** + * Created by magicdoom on 2016/10/26. + * only for test + */ +public class BindataToZK { + public static void main(String[] args) { + File file = new File(SystemConfig.getHomePath()+ "/conf","ruledata" ); + if(file.exists()&&file.isDirectory()) + { + File[] binFiles=file.listFiles(); + for (File binFile : binFiles) { + + String path= ZKUtils.getZKBasePath()+"ruledata/"+binFile.getName(); + CuratorFramework zk= ZKUtils.getConnection(); + try { + zk.create().creatingParentsIfNeeded().forPath(path) ; + zk.setData().forPath(path, Files.toByteArray(binFile)) ; + } catch (Exception e) { + e.printStackTrace(); + } + + } + + } + } +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/xmltozk/XmltoZkMain.java b/src/main/java/io/mycat/config/loader/zkprocess/xmltozk/XmltoZkMain.java new file mode 100644 index 000000000..13eec695a --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/xmltozk/XmltoZkMain.java @@ -0,0 +1,88 @@ +package io.mycat.config.loader.zkprocess.xmltozk; + +import javax.xml.bind.JAXBException; + +import com.alibaba.fastjson.JSON; +import io.mycat.config.loader.zkprocess.zookeeper.ClusterInfo; +import org.apache.curator.framework.CuratorFramework; + +import io.mycat.config.loader.console.ZookeeperPath; +import io.mycat.config.loader.zkprocess.comm.ZkConfig; +import io.mycat.config.loader.zkprocess.comm.ZkParamCfg; +import io.mycat.config.loader.zkprocess.comm.ZookeeperProcessListen; +import io.mycat.config.loader.zkprocess.console.ZkNofiflyCfg; +import io.mycat.config.loader.zkprocess.parse.XmlProcessBase; +import io.mycat.config.loader.zkprocess.xmltozk.listen.EcachesxmlTozkLoader; +import io.mycat.config.loader.zkprocess.xmltozk.listen.OthermsgTozkLoader; +import io.mycat.config.loader.zkprocess.xmltozk.listen.RulesxmlTozkLoader; +import io.mycat.config.loader.zkprocess.xmltozk.listen.SchemasxmlTozkLoader; +import io.mycat.config.loader.zkprocess.xmltozk.listen.SequenceTozkLoader; +import io.mycat.config.loader.zkprocess.xmltozk.listen.ServerxmlTozkLoader; +import io.mycat.util.ZKUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class XmltoZkMain { + private static final Logger LOGGER = LoggerFactory.getLogger(XmltoZkMain.class); + public static void main(String[] args) throws JAXBException, InterruptedException { + // 加载zk总服务 + ZookeeperProcessListen zkListen = new ZookeeperProcessListen(); + + // 得到集群名称 + String custerName = ZkConfig.getInstance().getValue(ZkParamCfg.ZK_CFG_CLUSTERID); + // 得到基本路径 + String basePath = ZookeeperPath.ZK_SEPARATOR.getKey() + ZookeeperPath.FLOW_ZK_PATH_BASE.getKey(); + basePath = basePath + ZookeeperPath.ZK_SEPARATOR.getKey() + custerName; + zkListen.setBasePath(basePath); + + // 获得zk的连接信息 + CuratorFramework zkConn = buildConnection(ZkConfig.getInstance().getValue(ZkParamCfg.ZK_CFG_URL)); + + // 获得公共的xml转换器对象 + XmlProcessBase xmlProcess = new XmlProcessBase(); + + // 进行xmltozk的schema文件的操作 + new SchemasxmlTozkLoader(zkListen, zkConn, xmlProcess); + + // 进行xmltozk的server文件的操作 + new ServerxmlTozkLoader(zkListen, zkConn, xmlProcess); + + // 进行rule文件到zk的操作 + new RulesxmlTozkLoader(zkListen, zkConn, xmlProcess); + + // 进行序列信息入zk中 + new SequenceTozkLoader(zkListen, zkConn, xmlProcess); + + // 缓存配制信息 + new EcachesxmlTozkLoader(zkListen, zkConn, xmlProcess); + + // 将其他信息加载的zk中 + new OthermsgTozkLoader(zkListen, zkConn, xmlProcess); + + // 初始化xml转换操作 + xmlProcess.initJaxbClass(); + + + // 加载通知进程 + zkListen.notifly(ZkNofiflyCfg.ZK_NOTIFLY_LOAD_ALL.getKey()); + + + + String clusterNodes= ZkConfig.getInstance().getValue(ZkParamCfg.ZK_CFG_CLUSTER_NODES); + String clusterSize= ZkConfig.getInstance().getValue(ZkParamCfg.ZK_CFG_CLUSTER_SIZE); + ClusterInfo info=new ClusterInfo(); + info.setClusterNodes(clusterNodes); + info.setClusterSize(Integer.parseInt(clusterSize)); + try { + zkConn.setData().forPath(basePath, JSON.toJSONBytes(info)); + } catch (Exception e) { + LOGGER.error("error",e); + } + + } + + private static CuratorFramework buildConnection(String url) { + + return ZKUtils.getConnection(); + } +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/xmltozk/listen/EcachesxmlTozkLoader.java b/src/main/java/io/mycat/config/loader/zkprocess/xmltozk/listen/EcachesxmlTozkLoader.java new file mode 100644 index 000000000..5f25b6021 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/xmltozk/listen/EcachesxmlTozkLoader.java @@ -0,0 +1,165 @@ +package io.mycat.config.loader.zkprocess.xmltozk.listen; + +import static com.google.common.base.Preconditions.checkNotNull; + +import java.io.IOException; +import java.io.InputStream; + +import org.apache.curator.framework.CuratorFramework; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.alibaba.fastjson.util.IOUtils; + +import io.mycat.config.loader.console.ZookeeperPath; +import io.mycat.config.loader.zkprocess.comm.NotiflyService; +import io.mycat.config.loader.zkprocess.comm.ZookeeperProcessListen; +import io.mycat.config.loader.zkprocess.entity.cache.Ehcache; +import io.mycat.config.loader.zkprocess.parse.ParseJsonServiceInf; +import io.mycat.config.loader.zkprocess.parse.ParseXmlServiceInf; +import io.mycat.config.loader.zkprocess.parse.XmlProcessBase; +import io.mycat.config.loader.zkprocess.parse.entryparse.cache.json.EhcacheJsonParse; +import io.mycat.config.loader.zkprocess.parse.entryparse.cache.xml.EhcacheParseXmlImpl; +import io.mycat.config.loader.zkprocess.zookeeper.process.ZkMultLoader; + +/** + * 进行从ecache.xml加载到zk中加载 +* 源文件名:SchemasLoader.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月15日 +* 修改作者:liujun +* 修改日期:2016年9月15日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public class EcachesxmlTozkLoader extends ZkMultLoader implements NotiflyService { + + /** + * 日志 + * @字段说明 LOGGER + */ + private static final Logger LOGGER = LoggerFactory.getLogger(EcachesxmlTozkLoader.class); + + /** + * 当前文件中的zkpath信息 + * @字段说明 currZkPath + */ + private final String currZkPath; + + /** + * Ehcache文件的路径信息 + * @字段说明 SCHEMA_PATH + */ + private static final String EHCACHE_PATH = ZookeeperPath.ZK_LOCAL_CFG_PATH.getKey() + "ehcache.xml"; + + /** + * 缓存文件名称 + * @字段说明 CACHESERVER_NAME + */ + private static final String CACHESERVER_NAME = "cacheservice.properties"; + + /** + * 缓存的xml文件配制信息 + * @字段说明 EHCACHE_NAME + */ + private static final String EHCACHE_NAME = "ehcache.xml"; + + /** + * ehcache的xml的转换信息 + * @字段说明 parseEhcacheXMl + */ + private final ParseXmlServiceInf parseEcacheXMl; + + /** + * 表的路由信息 + * @字段说明 parseJsonService + */ + private ParseJsonServiceInf parseJsonEhcacheService = new EhcacheJsonParse(); + + public EcachesxmlTozkLoader(ZookeeperProcessListen zookeeperListen, CuratorFramework curator, + XmlProcessBase xmlParseBase) { + + this.setCurator(curator); + + // 获得当前集群的名称 + String schemaPath = zookeeperListen.getBasePath(); + schemaPath = schemaPath + ZookeeperPath.ZK_SEPARATOR.getKey() + ZookeeperPath.FLOW_ZK_PATH_CACHE.getKey(); + + currZkPath = schemaPath; + // 将当前自己注册为事件接收对象 + zookeeperListen.addListen(schemaPath, this); + + // 生成xml与类的转换信息 + parseEcacheXMl = new EhcacheParseXmlImpl(xmlParseBase); + } + + @Override + public boolean notiflyProcess() throws Exception { + // 1,读取本地的xml文件 + Ehcache Ehcache = this.parseEcacheXMl.parseXmlToBean(EHCACHE_PATH); + LOGGER.info("EhcachexmlTozkLoader notiflyProcessxml to zk Ehcache Object :" + Ehcache); + // 将实体信息写入至zk中 + this.xmlTozkEhcacheJson(currZkPath, Ehcache); + + LOGGER.info("EhcachexmlTozkLoader notiflyProcess xml to zk is success"); + + return true; + } + + /** + * 将xml文件的信息写入到zk中 + * 方法描述 + * @param basePath 基本路径 + * @param schema schema文件的信息 + * @throws Exception 异常信息 + * @创建日期 2016年9月17日 + */ + private void xmlTozkEhcacheJson(String basePath, Ehcache ehcache) throws Exception { + // ehcache节点信息 + String ehcacheFile = ZookeeperPath.ZK_SEPARATOR.getKey() + EHCACHE_NAME; + String ehcacheJson = this.parseJsonEhcacheService.parseBeanToJson(ehcache); + this.checkAndwriteString(basePath, ehcacheFile, ehcacheJson); + + // 读取文件信息 + String cacheServicePath = ZookeeperPath.ZK_SEPARATOR.getKey() + CACHESERVER_NAME; + String serviceValue = this.readSeqFile(CACHESERVER_NAME); + this.checkAndwriteString(basePath, cacheServicePath, serviceValue); + } + + /** + * 读取 mapFile文件的信息 + * 方法描述 + * @param name 名称信息 + * @return + * @创建日期 2016年9月18日 + */ + private String readSeqFile(String name) { + + StringBuilder mapFileStr = new StringBuilder(); + + String path = ZookeeperPath.ZK_LOCAL_CFG_PATH.getKey() + name; + // 加载数据 + InputStream input = EcachesxmlTozkLoader.class.getResourceAsStream(path); + + checkNotNull(input, "read SeqFile file curr Path :" + path + " is null! must is not null"); + + byte[] buffers = new byte[256]; + + try { + int readIndex = -1; + + while ((readIndex = input.read(buffers)) != -1) { + mapFileStr.append(new String(buffers, 0, readIndex)); + } + } catch (IOException e) { + e.printStackTrace(); + LOGGER.error("EhcachexmlTozkLoader readMapFile IOException", e); + } finally { + IOUtils.close(input); + } + + return mapFileStr.toString(); + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/xmltozk/listen/OthermsgTozkLoader.java b/src/main/java/io/mycat/config/loader/zkprocess/xmltozk/listen/OthermsgTozkLoader.java new file mode 100644 index 000000000..bf93186e4 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/xmltozk/listen/OthermsgTozkLoader.java @@ -0,0 +1,85 @@ +package io.mycat.config.loader.zkprocess.xmltozk.listen; + +import org.apache.curator.framework.CuratorFramework; +import org.apache.curator.utils.ZKPaths; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.config.loader.console.ZookeeperPath; +import io.mycat.config.loader.zkprocess.comm.NotiflyService; +import io.mycat.config.loader.zkprocess.comm.ZookeeperProcessListen; +import io.mycat.config.loader.zkprocess.parse.XmlProcessBase; +import io.mycat.config.loader.zkprocess.zookeeper.process.ZkMultLoader; + +/** + * 其他一些信息加载到zk中 +* 源文件名:SchemasLoader.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月15日 +* 修改作者:liujun +* 修改日期:2016年9月15日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public class OthermsgTozkLoader extends ZkMultLoader implements NotiflyService { + + /** + * 日志 + * @字段说明 LOGGER + */ + private static final Logger LOGGER = LoggerFactory.getLogger(OthermsgTozkLoader.class); + + /** + * 当前文件中的zkpath信息 + * @字段说明 currZkPath + */ + private final String currZkPath; + + public OthermsgTozkLoader(ZookeeperProcessListen zookeeperListen, CuratorFramework curator, + XmlProcessBase xmlParseBase) { + + this.setCurator(curator); + + // 获得当前集群的名称 + String schemaPath = zookeeperListen.getBasePath(); + + currZkPath = schemaPath; + // 将当前自己注册为事件接收对象 + zookeeperListen.addListen(schemaPath, this); + + } + + @Override + public boolean notiflyProcess() throws Exception { + // 添加line目录,用作集群中节点,在线的基本目录信息 + String line = currZkPath + ZookeeperPath.ZK_SEPARATOR.getKey() + ZookeeperPath.FLOW_ZK_PATH_LINE.getKey(); + ZKPaths.mkdirs(this.getCurator().getZookeeperClient().getZooKeeper(), line); + LOGGER.info("OthermsgTozkLoader zookeeper mkdir " + line + " success"); + + // 添加序列目录信息 + String seqLine = currZkPath + ZookeeperPath.ZK_SEPARATOR.getKey() + + ZookeeperPath.FLOW_ZK_PATH_SEQUENCE.getKey(); + seqLine = seqLine + ZookeeperPath.ZK_SEPARATOR.getKey() + ZookeeperPath.FLOW_ZK_PATH_SEQUENCE_INSTANCE.getKey(); + ZKPaths.mkdirs(this.getCurator().getZookeeperClient().getZooKeeper(), seqLine); + + String seqLeader = currZkPath + ZookeeperPath.ZK_SEPARATOR.getKey() + + ZookeeperPath.FLOW_ZK_PATH_SEQUENCE.getKey(); + seqLeader = seqLeader + ZookeeperPath.ZK_SEPARATOR.getKey() + + ZookeeperPath.FLOW_ZK_PATH_SEQUENCE_LEADER.getKey(); + ZKPaths.mkdirs(this.getCurator().getZookeeperClient().getZooKeeper(), seqLeader); + + String incrSeq = currZkPath + ZookeeperPath.ZK_SEPARATOR.getKey() + + ZookeeperPath.FLOW_ZK_PATH_SEQUENCE.getKey(); + incrSeq = incrSeq + ZookeeperPath.ZK_SEPARATOR.getKey() + + ZookeeperPath.FLOW_ZK_PATH_SEQUENCE_INCREMENT_SEQ.getKey(); + ZKPaths.mkdirs(this.getCurator().getZookeeperClient().getZooKeeper(), incrSeq); + + LOGGER.info("OthermsgTozkLoader zookeeper mkdir " + seqLine + " success"); + LOGGER.info("OthermsgTozkLoader zookeeper mkdir " + seqLeader + " success"); + LOGGER.info("OthermsgTozkLoader zookeeper mkdir " + incrSeq + " success"); + + return true; + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/xmltozk/listen/RulesxmlTozkLoader.java b/src/main/java/io/mycat/config/loader/zkprocess/xmltozk/listen/RulesxmlTozkLoader.java new file mode 100644 index 000000000..abfa933bb --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/xmltozk/listen/RulesxmlTozkLoader.java @@ -0,0 +1,202 @@ +package io.mycat.config.loader.zkprocess.xmltozk.listen; + +import static com.google.common.base.Preconditions.checkNotNull; + +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.List; + +import org.apache.curator.framework.CuratorFramework; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.alibaba.fastjson.util.IOUtils; + +import io.mycat.config.loader.console.ZookeeperPath; +import io.mycat.config.loader.zkprocess.comm.NotiflyService; +import io.mycat.config.loader.zkprocess.comm.ZookeeperProcessListen; +import io.mycat.config.loader.zkprocess.console.ParseParamEnum; +import io.mycat.config.loader.zkprocess.entity.Property; +import io.mycat.config.loader.zkprocess.entity.Rules; +import io.mycat.config.loader.zkprocess.entity.rule.function.Function; +import io.mycat.config.loader.zkprocess.entity.rule.tablerule.TableRule; +import io.mycat.config.loader.zkprocess.parse.ParseJsonServiceInf; +import io.mycat.config.loader.zkprocess.parse.ParseXmlServiceInf; +import io.mycat.config.loader.zkprocess.parse.XmlProcessBase; +import io.mycat.config.loader.zkprocess.parse.entryparse.rule.json.FunctionJsonParse; +import io.mycat.config.loader.zkprocess.parse.entryparse.rule.json.TableRuleJsonParse; +import io.mycat.config.loader.zkprocess.parse.entryparse.rule.xml.RuleParseXmlImpl; +import io.mycat.config.loader.zkprocess.zookeeper.process.ZkMultLoader; + +/** + * 进行从rule.xml加载到zk中加载 +* 源文件名:SchemasLoader.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月15日 +* 修改作者:liujun +* 修改日期:2016年9月15日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public class RulesxmlTozkLoader extends ZkMultLoader implements NotiflyService { + + /** + * 日志 + * @字段说明 LOGGER + */ + private static final Logger LOGGER = LoggerFactory.getLogger(RulesxmlTozkLoader.class); + + /** + * 当前文件中的zkpath信息 + * @字段说明 currZkPath + */ + private final String currZkPath; + + /** + * Rules文件的路径信息 + * @字段说明 SCHEMA_PATH + */ + private static final String RULE_PATH = ZookeeperPath.ZK_LOCAL_CFG_PATH.getKey() + "rule.xml"; + + /** + * Rules的xml的转换信息 + * @字段说明 parseRulesXMl + */ + private ParseXmlServiceInf parseRulesXMl; + + /** + * 表的路由信息 + * @字段说明 parseJsonService + */ + private ParseJsonServiceInf> parseJsonTableRuleService = new TableRuleJsonParse(); + + /** + * 表对应的字段信息 + * @字段说明 parseJsonFunctionService + */ + private ParseJsonServiceInf> parseJsonFunctionService = new FunctionJsonParse(); + + public RulesxmlTozkLoader(ZookeeperProcessListen zookeeperListen, CuratorFramework curator, + XmlProcessBase xmlParseBase) { + + this.setCurator(curator); + + // 获得当前集群的名称 + String schemaPath = zookeeperListen.getBasePath(); + schemaPath = schemaPath + ZookeeperPath.ZK_SEPARATOR.getKey() + ZookeeperPath.FLOW_ZK_PATH_RULE.getKey(); + + currZkPath = schemaPath; + // 将当前自己注册为事件接收对象 + zookeeperListen.addListen(schemaPath, this); + + // 生成xml与类的转换信息 + parseRulesXMl = new RuleParseXmlImpl(xmlParseBase); + } + + @Override + public boolean notiflyProcess() throws Exception { + // 1,读取本地的xml文件 + Rules Rules = this.parseRulesXMl.parseXmlToBean(RULE_PATH); + LOGGER.info("RulesxmlTozkLoader notiflyProcessxml to zk Rules Object :" + Rules); + // 将实体信息写入至zk中 + this.xmlTozkRulesJson(currZkPath, Rules); + + LOGGER.info("RulesxmlTozkLoader notiflyProcess xml to zk is success"); + + return true; + } + + /** + * 将xml文件的信息写入到zk中 + * 方法描述 + * @param basePath 基本路径 + * @param schema schema文件的信息 + * @throws Exception 异常信息 + * @创建日期 2016年9月17日 + */ + private void xmlTozkRulesJson(String basePath, Rules Rules) throws Exception { + // tablerune节点信息 + String tableRulePath = ZookeeperPath.ZK_SEPARATOR.getKey() + ZookeeperPath.FLOW_ZK_PATH_RULE_TABLERULE.getKey(); + String tableRuleJson = this.parseJsonTableRuleService.parseBeanToJson(Rules.getTableRule()); + this.checkAndwriteString(basePath, tableRulePath, tableRuleJson); + + // 读取mapFile文件,并加入到function中 + this.readMapFileAddFunction(Rules.getFunction()); + + // 方法设置信息 + String functionPath = ZookeeperPath.ZK_SEPARATOR.getKey() + ZookeeperPath.FLOW_ZK_PATH_RULE_FUNCTION.getKey(); + String functionJson = this.parseJsonFunctionService.parseBeanToJson(Rules.getFunction()); + this.checkAndwriteString(basePath, functionPath, functionJson); + } + + /** + * 读取序列配制文件便利店 + * 方法描述 + * @param functionList + * @创建日期 2016年9月18日 + */ + private void readMapFileAddFunction(List functionList) { + + List tempData = new ArrayList<>(); + + for (Function function : functionList) { + List proList = function.getProperty(); + if (null != proList && !proList.isEmpty()) { + // 进行数据遍历 + for (Property property : proList) { + // 如果为mapfile,则需要去读取数据信息,并存到json中 + if (ParseParamEnum.ZK_PATH_RULE_MAPFILE_NAME.getKey().equals(property.getName())) { + Property mapFilePro = new Property(); + mapFilePro.setName(property.getValue()); + // 加载属性的值信息 + mapFilePro.setValue(this.readMapFile(property.getValue())); + tempData.add(mapFilePro); + } + } + // 将数据添加的集合中 + proList.addAll(tempData); + // 清空,以进行下一次的添加 + tempData.clear(); + } + } + } + + /** + * 读取 mapFile文件的信息 + * 方法描述 + * @param name 名称信息 + * @return + * @创建日期 2016年9月18日 + */ + private String readMapFile(String name) { + + StringBuilder mapFileStr = new StringBuilder(); + + String path = ZookeeperPath.ZK_LOCAL_CFG_PATH.getKey() + name; + // 加载数据 + InputStream input = RulesxmlTozkLoader.class.getResourceAsStream(path); + + checkNotNull(input, "read Map file curr Path :" + path + " is null! must is not null"); + + byte[] buffers = new byte[256]; + + try { + int readIndex = -1; + + while ((readIndex = input.read(buffers)) != -1) { + mapFileStr.append(new String(buffers, 0, readIndex)); + } + } catch (IOException e) { + e.printStackTrace(); + LOGGER.error("RulesxmlTozkLoader readMapFile IOException", e); + + } finally { + IOUtils.close(input); + } + + return mapFileStr.toString(); + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/xmltozk/listen/SchemasxmlTozkLoader.java b/src/main/java/io/mycat/config/loader/zkprocess/xmltozk/listen/SchemasxmlTozkLoader.java new file mode 100644 index 000000000..95653af17 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/xmltozk/listen/SchemasxmlTozkLoader.java @@ -0,0 +1,144 @@ +package io.mycat.config.loader.zkprocess.xmltozk.listen; + +import java.util.List; + +import org.apache.curator.framework.CuratorFramework; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.config.loader.console.ZookeeperPath; +import io.mycat.config.loader.zkprocess.comm.ZookeeperProcessListen; +import io.mycat.config.loader.zkprocess.entity.Schemas; +import io.mycat.config.loader.zkprocess.entity.schema.datahost.DataHost; +import io.mycat.config.loader.zkprocess.entity.schema.datanode.DataNode; +import io.mycat.config.loader.zkprocess.entity.schema.schema.Schema; +import io.mycat.config.loader.zkprocess.comm.NotiflyService; +import io.mycat.config.loader.zkprocess.parse.ParseJsonServiceInf; +import io.mycat.config.loader.zkprocess.parse.ParseXmlServiceInf; +import io.mycat.config.loader.zkprocess.parse.XmlProcessBase; +import io.mycat.config.loader.zkprocess.parse.entryparse.schema.json.DataHostJsonParse; +import io.mycat.config.loader.zkprocess.parse.entryparse.schema.json.DataNodeJsonParse; +import io.mycat.config.loader.zkprocess.parse.entryparse.schema.json.SchemaJsonParse; +import io.mycat.config.loader.zkprocess.parse.entryparse.schema.xml.SchemasParseXmlImpl; +import io.mycat.config.loader.zkprocess.zookeeper.process.ZkMultLoader; + +/** + * 进行从xml加载到zk中加载 +* 源文件名:SchemasLoader.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月15日 +* 修改作者:liujun +* 修改日期:2016年9月15日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public class SchemasxmlTozkLoader extends ZkMultLoader implements NotiflyService { + + /** + * 日志 + * @字段说明 LOGGER + */ + private static final Logger LOGGER = LoggerFactory.getLogger(SchemasxmlTozkLoader.class); + + /** + * 当前文件中的zkpath信息 + * @字段说明 currZkPath + */ + private final String currZkPath; + + /** + * schema文件的路径信息 + * @字段说明 SCHEMA_PATH + */ + private static final String SCHEMA_PATH = ZookeeperPath.ZK_LOCAL_CFG_PATH.getKey() + "schema.xml"; + + /** + * schema类与xml转换服务 + * @字段说明 parseSchemaService + */ + private ParseXmlServiceInf parseSchemaXmlService; + + /** + * 进行将schema + * @字段说明 parseJsonSchema + */ + private ParseJsonServiceInf> parseJsonSchema = new SchemaJsonParse(); + + /** + * 进行将dataNode + * @字段说明 parseJsonSchema + */ + private ParseJsonServiceInf> parseJsonDataNode = new DataNodeJsonParse(); + + /** + * 进行将dataNode + * @字段说明 parseJsonSchema + */ + private ParseJsonServiceInf> parseJsonDataHost = new DataHostJsonParse(); + + public SchemasxmlTozkLoader(ZookeeperProcessListen zookeeperListen, CuratorFramework curator, + XmlProcessBase xmlParseBase) { + + this.setCurator(curator); + + // 获得当前集群的名称 + String schemaPath = zookeeperListen.getBasePath(); + schemaPath = schemaPath + ZookeeperPath.ZK_SEPARATOR.getKey() + ZookeeperPath.FOW_ZK_PATH_SCHEMA.getKey(); + + currZkPath = schemaPath; + // 将当前自己注册为事件接收对象 + zookeeperListen.addListen(schemaPath, this); + + // 生成xml与类的转换信息 + this.parseSchemaXmlService = new SchemasParseXmlImpl(xmlParseBase); + } + + @Override + public boolean notiflyProcess() throws Exception { + // 1,读取本地的xml文件 + Schemas schema = this.parseSchemaXmlService.parseXmlToBean(SCHEMA_PATH); + + LOGGER.info("SchemasxmlTozkLoader notiflyProcessxml to zk schema Object :" + schema); + + // 将实体信息写入至zk中 + this.xmlTozkSchemasJson(currZkPath, schema); + + LOGGER.info("SchemasxmlTozkLoader notiflyProcess xml to zk is success"); + + return true; + } + + /** + * 将xml文件的信息写入到zk中 + * 方法描述 + * @param basePath 基本路径 + * @param schema schema文件的信息 + * @throws Exception 异常信息 + * @创建日期 2016年9月17日 + */ + private void xmlTozkSchemasJson(String basePath, Schemas schema) throws Exception { + + // 设置schema目录的值 + String schemaStr = ZookeeperPath.ZK_SEPARATOR.getKey() + ZookeeperPath.FLOW_ZK_PATH_SCHEMA_SCHEMA.getKey(); + + String schemaValueStr = this.parseJsonSchema.parseBeanToJson(schema.getSchema()); + + this.checkAndwriteString(basePath, schemaStr, schemaValueStr); + // 设置datanode + String dataNodeStr = ZookeeperPath.ZK_SEPARATOR.getKey() + ZookeeperPath.FLOW_ZK_PATH_SCHEMA_DATANODE.getKey(); + + String dataNodeValueStr = this.parseJsonDataNode.parseBeanToJson(schema.getDataNode()); + + this.checkAndwriteString(basePath, dataNodeStr, dataNodeValueStr); + + // 设置dataHost + String dataHostStr = ZookeeperPath.ZK_SEPARATOR.getKey() + ZookeeperPath.FLOW_ZK_PATH_SCHEMA_DATAHOST.getKey(); + + String dataHostValueStr = this.parseJsonDataHost.parseBeanToJson(schema.getDataHost()); + + this.checkAndwriteString(basePath, dataHostStr, dataHostValueStr); + + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/xmltozk/listen/SequenceTozkLoader.java b/src/main/java/io/mycat/config/loader/zkprocess/xmltozk/listen/SequenceTozkLoader.java new file mode 100644 index 000000000..12a44302e --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/xmltozk/listen/SequenceTozkLoader.java @@ -0,0 +1,199 @@ +package io.mycat.config.loader.zkprocess.xmltozk.listen; + +import java.io.IOException; +import java.io.InputStream; + +import org.apache.curator.framework.CuratorFramework; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.alibaba.fastjson.util.IOUtils; + +import io.mycat.config.loader.console.ZookeeperPath; +import io.mycat.config.loader.zkprocess.comm.NotiflyService; +import io.mycat.config.loader.zkprocess.comm.ZkConfig; +import io.mycat.config.loader.zkprocess.comm.ZkParamCfg; +import io.mycat.config.loader.zkprocess.comm.ZookeeperProcessListen; +import io.mycat.config.loader.zkprocess.parse.XmlProcessBase; +import io.mycat.config.loader.zkprocess.zookeeper.process.ZkMultLoader; + +/** + * 进行从sequence加载到zk中加载 +* 源文件名:SchemasLoader.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月15日 +* 修改作者:liujun +* 修改日期:2016年9月15日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public class SequenceTozkLoader extends ZkMultLoader implements NotiflyService { + + /** + * 日志 + * @字段说明 LOGGER + */ + private static final Logger LOGGER = LoggerFactory.getLogger(SequenceTozkLoader.class); + + /** + * 当前文件中的zkpath信息 + * @字段说明 currZkPath + */ + private final String currZkPath; + + /** + * 后缀名 + * @字段说明 PROPERTIES_SUFFIX + */ + private static final String PROPERTIES_SUFFIX = ".properties"; + + /** + * 序列配制信息 + * @字段说明 PROPERTIES_SEQUENCE_CONF + */ + private static final String PROPERTIES_SEQUENCE_CONF = "sequence_conf"; + + /** + * db序列配制信息 + * @字段说明 PROPERTIES_SEQUENCE_CONF + */ + private static final String PROPERTIES_SEQUENCE_DB_CONF = "sequence_db_conf"; + + /** + * 分布式的序列配制 + * @字段说明 PROPERTIES_SEQUENCE_CONF + */ + private static final String PROPERTIES_SEQUENCE_DISTRIBUTED_CONF = "sequence_distributed_conf"; + + /** + * 时间的序列配制 + * @字段说明 PROPERTIES_SEQUENCE_CONF + */ + private static final String PROPERTIES_SEQUENCE_TIME_CONF = "sequence_time_conf"; + + public SequenceTozkLoader(ZookeeperProcessListen zookeeperListen, CuratorFramework curator, + XmlProcessBase xmlParseBase) { + + this.setCurator(curator); + + // 获得当前集群的名称 + String schemaPath = zookeeperListen.getBasePath(); + schemaPath = schemaPath + ZookeeperPath.ZK_SEPARATOR.getKey() + ZookeeperPath.FLOW_ZK_PATH_SEQUENCE.getKey(); + + currZkPath = schemaPath; + // 将当前自己注册为事件接收对象 + zookeeperListen.addListen(schemaPath, this); + + } + + @Override + public boolean notiflyProcess() throws Exception { + + // 将zk序列配配制信息入zk + this.sequenceTozk(currZkPath, PROPERTIES_SEQUENCE_CONF); + + LOGGER.info("SequenceTozkLoader notiflyProcess sequence_conf to zk success"); + + // 将zk的db方式信息入zk + this.sequenceTozk(currZkPath, PROPERTIES_SEQUENCE_DB_CONF); + + LOGGER.info("SequenceTozkLoader notiflyProcess sequence_db_conf to zk success"); + + // 将zk的分布式信息入zk + this.sequenceTozk(currZkPath, PROPERTIES_SEQUENCE_DISTRIBUTED_CONF); + + LOGGER.info("SequenceTozkLoader notiflyProcess sequence_distributed_conf to zk success"); + + // 将时间序列入zk + this.sequenceTozk(currZkPath, PROPERTIES_SEQUENCE_TIME_CONF); + + LOGGER.info("SequenceTozkLoader notiflyProcess sequence_time_conf to zk success"); + + LOGGER.info("SequenceTozkLoader notiflyProcess xml to zk is success"); + + return true; + } + + /** + * 将xml文件的信息写入到zk中 + * 方法描述 + * @param basePath 基本路径 + * @param schema schema文件的信息 + * @throws Exception 异常信息 + * @创建日期 2016年9月17日 + */ + private void sequenceTozk(String basePath, String name) throws Exception { + // 读取当前节的信息 + String commPath = ZookeeperPath.ZK_SEPARATOR.getKey() + ZookeeperPath.FLOW_ZK_PATH_SEQUENCE_COMMON.getKey() + + ZookeeperPath.ZK_SEPARATOR.getKey(); + + String readFile = name + PROPERTIES_SUFFIX; + // 读取公共节点的信息 + String commSequence = this.readSequenceCfg(readFile); + String sequenceZkPath = commPath + readFile; + this.checkAndwriteString(basePath, sequenceZkPath, commSequence); + + // 集群中特有的节点的配制信息 + String culsterPath = ZookeeperPath.ZK_SEPARATOR.getKey() + ZookeeperPath.FLOW_ZK_PATH_SEQUENCE_CLUSTER.getKey() + + ZookeeperPath.ZK_SEPARATOR.getKey(); + + String[] clusters = ZkConfig.getInstance().getValue(ZkParamCfg.ZK_CFG_CLUSTER_NODES) + .split(ZkParamCfg.ZK_CFG_CLUSTER_NODES_SEPARATE.getKey()); + + if (null != clusters) { + String nodeName = null; + for (String clusterName : clusters) { + nodeName = name + "-" + clusterName + PROPERTIES_SUFFIX; + // 读取当前集群中特有的节点的信息 + String clusterSequence = this.readSequenceCfg(nodeName); + + // 如果配制了特定节点的信息,则将往上入zk中 + if (null != clusterSequence) { + String seqclusterZkPath = culsterPath + nodeName; + this.checkAndwriteString(basePath, seqclusterZkPath, clusterSequence); + } + } + + } + } + + /** + * 读取 sequence配制文件的信息 + * 方法描述 + * @param name 名称信息 + * @return + * @创建日期 2016年9月18日 + */ + private String readSequenceCfg(String name) { + + String path = ZookeeperPath.ZK_LOCAL_CFG_PATH.getKey() + name; + // 加载数据 + InputStream input = SequenceTozkLoader.class.getResourceAsStream(path); + + if (null != input) { + + StringBuilder mapFileStr = new StringBuilder(); + + byte[] buffers = new byte[256]; + + try { + int readIndex = -1; + + while ((readIndex = input.read(buffers)) != -1) { + mapFileStr.append(new String(buffers, 0, readIndex)); + } + } catch (IOException e) { + e.printStackTrace(); + LOGGER.error("SequenceTozkLoader readMapFile IOException", e); + + } finally { + IOUtils.close(input); + } + + return mapFileStr.toString(); + } + return null; + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/xmltozk/listen/ServerxmlTozkLoader.java b/src/main/java/io/mycat/config/loader/zkprocess/xmltozk/listen/ServerxmlTozkLoader.java new file mode 100644 index 000000000..df0c04028 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/xmltozk/listen/ServerxmlTozkLoader.java @@ -0,0 +1,225 @@ +package io.mycat.config.loader.zkprocess.xmltozk.listen; + +import java.io.IOException; +import java.io.InputStream; +import java.util.List; + +import org.apache.curator.framework.CuratorFramework; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.alibaba.fastjson.util.IOUtils; + +import io.mycat.config.loader.console.ZookeeperPath; +import io.mycat.config.loader.zkprocess.comm.NotiflyService; +import io.mycat.config.loader.zkprocess.comm.ZkConfig; +import io.mycat.config.loader.zkprocess.comm.ZkParamCfg; +import io.mycat.config.loader.zkprocess.comm.ZookeeperProcessListen; +import io.mycat.config.loader.zkprocess.entity.Server; +import io.mycat.config.loader.zkprocess.entity.server.System; +import io.mycat.config.loader.zkprocess.entity.server.user.User; +import io.mycat.config.loader.zkprocess.parse.ParseJsonServiceInf; +import io.mycat.config.loader.zkprocess.parse.ParseXmlServiceInf; +import io.mycat.config.loader.zkprocess.parse.XmlProcessBase; +import io.mycat.config.loader.zkprocess.parse.entryparse.server.json.SystemJsonParse; +import io.mycat.config.loader.zkprocess.parse.entryparse.server.json.UserJsonParse; +import io.mycat.config.loader.zkprocess.parse.entryparse.server.xml.ServerParseXmlImpl; +import io.mycat.config.loader.zkprocess.zookeeper.process.ZkMultLoader; + +/** + * 进行从server.xml加载到zk中加载 +* 源文件名:SchemasLoader.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月15日 +* 修改作者:liujun +* 修改日期:2016年9月15日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public class ServerxmlTozkLoader extends ZkMultLoader implements NotiflyService { + + /** + * 日志 + * @字段说明 LOGGER + */ + private static final Logger LOGGER = LoggerFactory.getLogger(ServerxmlTozkLoader.class); + + /** + * 当前文件中的zkpath信息 + * @字段说明 currZkPath + */ + private final String currZkPath; + + /** + * server文件的路径信息 + * @字段说明 SCHEMA_PATH + */ + private static final String SERVER_PATH = ZookeeperPath.ZK_LOCAL_CFG_PATH.getKey() + "server.xml"; + + /** + * index_to_charset文件的路径信息 + * @字段说明 SCHEMA_PATH + */ + private static final String INDEX_TOCHARSET_PATH = "index_to_charset.properties"; + + /** + * server的xml的转换信息 + * @字段说明 parseServerXMl + */ + private ParseXmlServiceInf parseServerXMl; + + /** + * system信息 + * @字段说明 parseJsonSchema + */ + private ParseJsonServiceInf parseJsonSystem = new SystemJsonParse(); + + /** + * system信息 + * @字段说明 parseJsonSchema + */ + private ParseJsonServiceInf> parseJsonUser = new UserJsonParse(); + + public ServerxmlTozkLoader(ZookeeperProcessListen zookeeperListen, CuratorFramework curator, + XmlProcessBase xmlParseBase) { + + this.setCurator(curator); + + // 获得当前集群的名称 + String schemaPath = zookeeperListen.getBasePath(); + schemaPath = schemaPath + ZookeeperPath.ZK_SEPARATOR.getKey() + ZookeeperPath.FLOW_ZK_PATH_SERVER.getKey(); + + currZkPath = schemaPath; + // 将当前自己注册为事件接收对象 + zookeeperListen.addListen(schemaPath, this); + + // 生成xml与类的转换信息 + parseServerXMl = new ServerParseXmlImpl(xmlParseBase); + } + + @Override + public boolean notiflyProcess() throws Exception { + // 1,读取本地的xml文件 + Server server = this.parseServerXMl.parseXmlToBean(SERVER_PATH); + LOGGER.info("ServerxmlTozkLoader notiflyProcessxml to zk server Object :" + server); + // 将实体信息写入至zk中 + this.xmlTozkServerJson(currZkPath, server); + + // 2,读取集群中的节点信息 + this.writeClusterNode(currZkPath); + + // 读取properties + String charSetValue = readProperties(INDEX_TOCHARSET_PATH); + // 将文件上传 + this.checkAndwriteString(currZkPath, INDEX_TOCHARSET_PATH, charSetValue); + + LOGGER.info("ServerxmlTozkLoader notiflyProcess xml to zk is success"); + + return true; + } + + /** + * 写入集群节点的信息 + * 方法描述 + * @throws Exception + * @创建日期 2016年9月17日 + */ + private void writeClusterNode(String basePath) throws Exception { + // 1,读取集群节点信息 + String[] zkNodes = ZkConfig.getInstance().getValue(ZkParamCfg.ZK_CFG_CLUSTER_NODES) + .split(ZkParamCfg.ZK_CFG_CLUSTER_NODES_SEPARATE.getKey()); + + if (null != zkNodes && zkNodes.length > 0) { + for (String node : zkNodes) { + String nodePath = ZookeeperPath.ZK_LOCAL_CFG_PATH.getKey() + "server-" + node + ".xml"; + // 将当前的xml文件写入到zk中 + Server serverNode = this.parseServerXMl.parseXmlToBean(nodePath); + + LOGGER.info("ServerxmlTozkLoader writeClusterNode to zk server Object :" + serverNode); + + // 如果当前不存在此配制文件则不写入 + if (null != serverNode) { + // 以集群的节点的名称写入 + this.xmlTozkClusterNodeJson(basePath, node, serverNode); + + LOGGER.info("ServerxmlTozkLoader writeClusterNode xml to zk is success"); + } + } + } + } + + /** + * 将xml文件的信息写入到zk中 + * 方法描述 + * @param basePath 基本路径 + * @param schema schema文件的信息 + * @throws Exception 异常信息 + * @创建日期 2016年9月17日 + */ + private void xmlTozkServerJson(String basePath, Server server) throws Exception { + // 设置默认的节点信息 + String defaultSystem = ZookeeperPath.ZK_SEPARATOR.getKey() + ZookeeperPath.FLOW_ZK_PATH_SERVER_DEFAULT.getKey(); + String defaultSystemValue = this.parseJsonSystem.parseBeanToJson(server.getSystem()); + this.checkAndwriteString(basePath, defaultSystem, defaultSystemValue); + + // 设置用户信息 + String userStr = ZookeeperPath.ZK_SEPARATOR.getKey() + ZookeeperPath.FLOW_ZK_PATH_SERVER_USER.getKey(); + String userValueStr = this.parseJsonUser.parseBeanToJson(server.getUser()); + this.checkAndwriteString(basePath, userStr, userValueStr); + } + + /** + * 将xml文件的信息写入到zk中 + * 方法描述 + * @param basePath 基本路径 + * @param schema schema文件的信息 + * @throws Exception 异常信息 + * @创建日期 2016年9月17日 + */ + private void xmlTozkClusterNodeJson(String basePath, String node, Server server) throws Exception { + // 设置集群中的节点信息 + basePath = basePath + ZookeeperPath.ZK_SEPARATOR.getKey() + ZookeeperPath.FLOW_ZK_PATH_SERVER_CLUSTER.getKey(); + String clusterSystemValue = this.parseJsonSystem.parseBeanToJson(server.getSystem()); + this.checkAndwriteString(basePath, node, clusterSystemValue); + } + + /** + * 读取 properties配制文件的信息 + * 方法描述 + * @param name 名称信息 + * @return + * @创建日期 2016年9月18日 + */ + private String readProperties(String name) { + + String path = ZookeeperPath.ZK_LOCAL_CFG_PATH.getKey() + name; + // 加载数据 + InputStream input = SequenceTozkLoader.class.getResourceAsStream(path); + + if (null != input) { + + StringBuilder mapFileStr = new StringBuilder(); + + byte[] buffers = new byte[256]; + + try { + int readIndex = -1; + + while ((readIndex = input.read(buffers)) != -1) { + mapFileStr.append(new String(buffers, 0, readIndex)); + } + } catch (IOException e) { + e.printStackTrace(); + LOGGER.error("SequenceTozkLoader readMapFile IOException", e); + + } finally { + IOUtils.close(input); + } + + return mapFileStr.toString(); + } + return null; + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/zktoxml/ZktoXmlMain.java b/src/main/java/io/mycat/config/loader/zkprocess/zktoxml/ZktoXmlMain.java new file mode 100644 index 000000000..cdd25823f --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/zktoxml/ZktoXmlMain.java @@ -0,0 +1,206 @@ +package io.mycat.config.loader.zkprocess.zktoxml; + +import java.util.Set; + +import io.mycat.config.loader.zkprocess.zktoxml.command.CommandPathListener; +import io.mycat.config.loader.zkprocess.zktoxml.listen.*; +import org.apache.curator.framework.CuratorFramework; +import org.apache.curator.framework.recipes.cache.NodeCache; +import org.apache.curator.framework.recipes.cache.NodeCacheListener; +import org.apache.curator.framework.recipes.cache.PathChildrenCache; +import org.apache.curator.utils.ZKPaths; +import org.apache.zookeeper.CreateMode; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.config.loader.console.ZookeeperPath; +import io.mycat.config.loader.zkprocess.comm.ZkConfig; +import io.mycat.config.loader.zkprocess.comm.ZkParamCfg; +import io.mycat.config.loader.zkprocess.comm.ZookeeperProcessListen; +import io.mycat.config.loader.zkprocess.console.ZkNofiflyCfg; +import io.mycat.config.loader.zkprocess.parse.XmlProcessBase; +import io.mycat.config.loader.zkprocess.zookeeper.process.ZkMultLoader; +import io.mycat.manager.handler.ZKHandler; +import io.mycat.migrate.MigrateTaskWatch; +import io.mycat.util.ZKUtils; + +/** + * 将xk的信息转换为xml文件的操作 +* 源文件名:ZktoxmlMain.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月20日 +* 修改作者:liujun +* 修改日期:2016年9月20日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public class ZktoXmlMain { + + /** + * 日志 + * @字段说明 LOGGER + */ + private static final Logger LOGGER = LoggerFactory.getLogger(ZkMultLoader.class); + + /** + * 加载zk监听服务 + */ + public static final ZookeeperProcessListen ZKLISTENER = new ZookeeperProcessListen(); + + public static void main(String[] args) throws Exception { + loadZktoFile(); + System.out.println(Long.MAX_VALUE); + } + + /** + * 将zk数据放到到本地 + * 方法描述 + * @throws Exception + * @创建日期 2016年9月21日 + */ + public static void loadZktoFile() throws Exception { + + // 得到集群名称 + String custerName = ZkConfig.getInstance().getValue(ZkParamCfg.ZK_CFG_CLUSTERID); + // 得到基本路径 + String basePath = ZookeeperPath.ZK_SEPARATOR.getKey() + ZookeeperPath.FLOW_ZK_PATH_BASE.getKey(); + basePath = basePath + ZookeeperPath.ZK_SEPARATOR.getKey() + custerName; + ZKLISTENER.setBasePath(basePath); + + // 获得zk的连接信息 + CuratorFramework zkConn = buildConnection(ZkConfig.getInstance().getValue(ZkParamCfg.ZK_CFG_URL)); + + // 获得公共的xml转换器对象 + XmlProcessBase xmlProcess = new XmlProcessBase(); + + // 加载以接收者 + new SchemaszkToxmlLoader(ZKLISTENER, zkConn, xmlProcess); + + // server加载 + new ServerzkToxmlLoader(ZKLISTENER, zkConn, xmlProcess); + + // rule文件加载 + // new RuleszkToxmlLoader(zkListen, zkConn, xmlProcess); + ZKUtils.addChildPathCache(ZKUtils.getZKBasePath() + "rules", new RuleFunctionCacheListener()); + // 将序列配制信息加载 + new SequenceTopropertiesLoader(ZKLISTENER, zkConn, xmlProcess); + + // 进行ehcache转换 + new EcacheszkToxmlLoader(ZKLISTENER, zkConn, xmlProcess); + + // 将bindata目录的数据进行转换到本地文件 + ZKUtils.addChildPathCache(ZKUtils.getZKBasePath() + "bindata", new BinDataPathChildrenCacheListener()); + + // ruledata + ZKUtils.addChildPathCache(ZKUtils.getZKBasePath() + "ruledata", new RuleDataPathChildrenCacheListener()); + + // 初始化xml转换操作 + xmlProcess.initJaxbClass(); + + // 通知所有人 + ZKLISTENER.notifly(ZkNofiflyCfg.ZK_NOTIFLY_LOAD_ALL.getKey()); + + // 加载watch + loadZkWatch(ZKLISTENER.getWatchPath(), zkConn, ZKLISTENER); + + // 创建临时节点 + createTempNode(ZKUtils.getZKBasePath() + "line", ZkConfig.getInstance().getValue(ZkParamCfg.ZK_CFG_MYID), + zkConn, ZkConfig.getInstance().getValue(ZkParamCfg.MYCAT_SERVER_TYPE)); + + // 接收zk发送过来的命令 + runCommandWatch(zkConn, ZKUtils.getZKBasePath() + ZKHandler.ZK_NODE_PATH); + + MigrateTaskWatch.start(); + } + + private static void loadZkWatch(Set setPaths, final CuratorFramework zkConn, + final ZookeeperProcessListen zkListen) throws Exception { + + if (null != setPaths && !setPaths.isEmpty()) { + for (String path : setPaths) { + // 进行本地节点的监控操作 + NodeCache node = runWatch(zkConn, path, zkListen); + node.start(); + + LOGGER.info("ZktoxmlMain loadZkWatch path:" + path + " regist success"); + } + } + } + + /** + * 进行命令的监听操作 + * 方法描述 + * @param zkConn zk的连接信息 + * @param path 路径信息 + * @param ZKLISTENER 监控路径信息 + * @throws Exception + * @创建日期 2016年9月20日 + */ + @SuppressWarnings("resource") + private static void runCommandWatch(final CuratorFramework zkConn, final String path) throws Exception { + + PathChildrenCache children = new PathChildrenCache(zkConn, path, true); + + CommandPathListener commandListener = new CommandPathListener(); + + // 移除原来的监听再进行添加 + children.getListenable().addListener(commandListener); + + children.start(); + } + + /** + * 创建临时节点测试 + * 方法描述 + * @param parent + * @param node + * @param zkConn + * @throws Exception + * @创建日期 2016年9月20日 + */ + private static void createTempNode(String parent, String node, final CuratorFramework zkConn, String type) + throws Exception { + + String path = ZKPaths.makePath(parent, node); + + zkConn.create().withMode(CreateMode.EPHEMERAL).inBackground().forPath(path, type.getBytes("UTF-8")); + + } + + /** + * 进行zk的watch操作 + * 方法描述 + * @param zkConn zk的连接信息 + * @param path 路径信息 + * @param zkListen 监控路径信息 + * @throws Exception + * @创建日期 2016年9月20日 + */ + private static NodeCache runWatch(final CuratorFramework zkConn, final String path, + final ZookeeperProcessListen zkListen) throws Exception { + final NodeCache cache = new NodeCache(zkConn, path); + + NodeCacheListener listen = new NodeCacheListener() { + @Override + public void nodeChanged() throws Exception { + LOGGER.info("ZktoxmlMain runWatch process path event start "); + LOGGER.info("NodeCache changed, path is: " + cache.getCurrentData().getPath()); + String notPath = cache.getCurrentData().getPath(); + // 进行通知更新 + zkListen.notifly(notPath); + LOGGER.info("ZktoxmlMain runWatch process path event over"); + } + }; + + // 添加监听 + cache.getListenable().addListener(listen); + + return cache; + } + + private static CuratorFramework buildConnection(String url) { + + return ZKUtils.getConnection(); + } +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/zktoxml/command/CommandPathListener.java b/src/main/java/io/mycat/config/loader/zkprocess/zktoxml/command/CommandPathListener.java new file mode 100644 index 000000000..1708b9dd0 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/zktoxml/command/CommandPathListener.java @@ -0,0 +1,105 @@ +package io.mycat.config.loader.zkprocess.zktoxml.command; + +import java.util.concurrent.Callable; +import java.util.concurrent.locks.ReentrantLock; + +import org.apache.curator.framework.CuratorFramework; +import org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent; +import org.apache.curator.framework.recipes.cache.PathChildrenCacheListener; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; + +import io.mycat.MycatServer; +import io.mycat.config.loader.zkprocess.comm.ZkConfig; +import io.mycat.config.loader.zkprocess.comm.ZkParamCfg; +import io.mycat.config.loader.zkprocess.console.ZkNofiflyCfg; +import io.mycat.config.loader.zkprocess.zktoxml.ZktoXmlMain; +import io.mycat.manager.handler.ZKHandler; +import io.mycat.manager.response.ReloadConfig; +import io.mycat.net.NIOProcessor; +import io.mycat.util.ZKUtils; + +/** + * zk命令监听器 + * @author kk + * @date 2017年1月18日 + * @version 0.0.1 + */ +public class CommandPathListener implements PathChildrenCacheListener { + + private static final Logger LOGGER = LoggerFactory.getLogger(CommandPathListener.class); + + @Override + public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception { + switch (event.getType()) { + case CHILD_ADDED: + // 在发生节点添加的时候,则执行接收命令并执行 + // 1,首先检查 + String path = event.getData().getPath(); + String basePath = ZKUtils.getZKBasePath() + ZKHandler.ZK_NODE_PATH + "/"; + + // 检查节点与当前的节点是否一致 + String node = path.substring(basePath.length()); + + if (node.equals(ZkConfig.getInstance().getValue(ZkParamCfg.ZK_CFG_MYID))) { + // 检查命令内容是否为 + if (ZKHandler.RELOAD_FROM_ZK.equals(new String(client.getData().forPath(path)))) { + // 从服务器上下载最新的配制文件信息 + ZktoXmlMain.ZKLISTENER.notifly(ZkNofiflyCfg.ZK_NOTIFLY_LOAD_ALL.getKey()); + // 重新加载配制信息 + reload(path); + // 完成之后,删除命令信息, 以供下次读取 + client.delete().forPath(event.getData().getPath()); + LOGGER.info("CommandPathListener path:" + path + " reload success"); + } + } + + break; + case CHILD_UPDATED: + break; + case CHILD_REMOVED: + break; + default: + break; + } + + } + + public void reload(final String path) { + // reload @@config_all 校验前一次的事务完成情况 + if (!NIOProcessor.backends_old.isEmpty()) { + return; + } + + final ReentrantLock lock = MycatServer.getInstance().getConfig().getLock(); + lock.lock(); + try { + ListenableFuture listenableFuture = MycatServer.getInstance().getListeningExecutorService() + .submit(new Callable() { + @Override + public Boolean call() throws Exception { + return ReloadConfig.reload_all(); + } + }); + Futures.addCallback(listenableFuture, new FutureCallback() { + @Override + public void onSuccess(Boolean result) { + LOGGER.info("CommandPathListener path:" + path + " reload success"); + } + + @Override + public void onFailure(Throwable t) { + LOGGER.error("CommandPathListener path:" + path + " reload error", t); + } + + }, MycatServer.getInstance().getListeningExecutorService()); + } finally { + lock.unlock(); + } + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/zktoxml/listen/BinDataPathChildrenCacheListener.java b/src/main/java/io/mycat/config/loader/zkprocess/zktoxml/listen/BinDataPathChildrenCacheListener.java new file mode 100644 index 000000000..320f965d7 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/zktoxml/listen/BinDataPathChildrenCacheListener.java @@ -0,0 +1,56 @@ +package io.mycat.config.loader.zkprocess.zktoxml.listen; + +import com.google.common.io.Files; +import io.mycat.MycatServer; +import io.mycat.config.model.SystemConfig; +import org.apache.curator.framework.CuratorFramework; +import org.apache.curator.framework.recipes.cache.ChildData; +import org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent; +import org.apache.curator.framework.recipes.cache.PathChildrenCacheListener; + +import java.io.File; +import java.io.IOException; + +/** + * Created by magicdoom on 2016/10/27. + */ +public class BinDataPathChildrenCacheListener implements PathChildrenCacheListener { + @Override public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception { + ChildData data = event.getData(); + switch (event.getType()) { + + case CHILD_ADDED: + + add(data.getPath().substring(data.getPath().lastIndexOf("/")+1),event.getData().getData()) ; + break; + case CHILD_REMOVED: + delete(data.getPath().substring(data.getPath().lastIndexOf("/")+1),event.getData().getData()); ; + break; + case CHILD_UPDATED: + add(data.getPath().substring(data.getPath().lastIndexOf("/")+1),event.getData().getData()) ; + break; + default: + break; + } + } + + private void add(String name,byte[] data) throws IOException { + File file = new File( + SystemConfig.getHomePath() + File.separator + "conf" , + name); + Files.write(data,file); + //try to reload dnindex + if("dnindex.properties".equals(name)) { + MycatServer.getInstance().reloadDnIndex(); + } + } + + private void delete(String name,byte[] data) throws IOException { + File file = new File( + SystemConfig.getHomePath() + File.separator + "conf" , + name); + if(file.exists()) + file.delete(); + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/zktoxml/listen/EcacheszkToxmlLoader.java b/src/main/java/io/mycat/config/loader/zkprocess/zktoxml/listen/EcacheszkToxmlLoader.java new file mode 100644 index 000000000..8ca7b6d34 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/zktoxml/listen/EcacheszkToxmlLoader.java @@ -0,0 +1,190 @@ +package io.mycat.config.loader.zkprocess.zktoxml.listen; + +import static com.google.common.base.Preconditions.checkNotNull; + +import java.io.File; +import java.io.IOException; + +import org.apache.curator.framework.CuratorFramework; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.io.Files; + +import io.mycat.config.loader.console.ZookeeperPath; +import io.mycat.config.loader.zkprocess.comm.NotiflyService; +import io.mycat.config.loader.zkprocess.comm.ZookeeperProcessListen; +import io.mycat.config.loader.zkprocess.entity.cache.Ehcache; +import io.mycat.config.loader.zkprocess.parse.ParseJsonServiceInf; +import io.mycat.config.loader.zkprocess.parse.ParseXmlServiceInf; +import io.mycat.config.loader.zkprocess.parse.XmlProcessBase; +import io.mycat.config.loader.zkprocess.parse.entryparse.cache.json.EhcacheJsonParse; +import io.mycat.config.loader.zkprocess.parse.entryparse.cache.xml.EhcacheParseXmlImpl; +import io.mycat.config.loader.zkprocess.zookeeper.DataInf; +import io.mycat.config.loader.zkprocess.zookeeper.DiretoryInf; +import io.mycat.config.loader.zkprocess.zookeeper.process.ZkDataImpl; +import io.mycat.config.loader.zkprocess.zookeeper.process.ZkDirectoryImpl; +import io.mycat.config.loader.zkprocess.zookeeper.process.ZkMultLoader; + +/** + * 进行从ecache.xml加载到zk中加载 +* 源文件名:SchemasLoader.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月15日 +* 修改作者:liujun +* 修改日期:2016年9月15日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public class EcacheszkToxmlLoader extends ZkMultLoader implements NotiflyService { + + /** + * 日志 + * @字段说明 LOGGER + */ + private static final Logger LOGGER = LoggerFactory.getLogger(EcacheszkToxmlLoader.class); + + /** + * 当前文件中的zkpath信息 + * @字段说明 currZkPath + */ + private final String currZkPath; + + /** + * 缓存文件名称 + * @字段说明 CACHESERVER_NAME + */ + private static final String CACHESERVER_NAME = "cacheservice.properties"; + + /** + * 缓存的xml文件配制信息 + * @字段说明 EHCACHE_NAME + */ + private static final String EHCACHE_NAME = "ehcache.xml"; + + /** + * ehcache的xml的转换信息 + * @字段说明 parseEhcacheXMl + */ + private final ParseXmlServiceInf parseEcacheXMl; + + /** + * 表的路由信息 + * @字段说明 parseJsonService + */ + private ParseJsonServiceInf parseJsonEhcacheService = new EhcacheJsonParse(); + + /** + * 监控类信息 + * @字段说明 zookeeperListen + */ + private ZookeeperProcessListen zookeeperListen; + + public EcacheszkToxmlLoader(ZookeeperProcessListen zookeeperListen, CuratorFramework curator, + XmlProcessBase xmlParseBase) { + + this.setCurator(curator); + + this.zookeeperListen = zookeeperListen; + + // 获得当前集群的名称 + String schemaPath = zookeeperListen.getBasePath(); + schemaPath = schemaPath + ZookeeperPath.ZK_SEPARATOR.getKey() + ZookeeperPath.FLOW_ZK_PATH_CACHE.getKey(); + + currZkPath = schemaPath; + // 将当前自己注册为事件接收对象 + this.zookeeperListen.addListen(schemaPath, this); + + // 生成xml与类的转换信息 + parseEcacheXMl = new EhcacheParseXmlImpl(xmlParseBase); + } + + @Override + public boolean notiflyProcess() throws Exception { + + // 通过组合模式进行zk目录树的加载 + DiretoryInf RulesDirectory = new ZkDirectoryImpl(currZkPath, null); + // 进行递归的数据获取 + this.getTreeDirectory(currZkPath, ZookeeperPath.FLOW_ZK_PATH_CACHE.getKey(), RulesDirectory); + + // 从当前的下一级开始进行遍历,获得到 + ZkDirectoryImpl zkDirectory = (ZkDirectoryImpl) RulesDirectory.getSubordinateInfo().get(0); + + // 进行写入操作 + zktoEhcacheWrite(zkDirectory); + + LOGGER.info("EcacheszkToxmlLoader notiflyProcess zk ehcache write success "); + + return true; + } + + /** + * 将zk上面的信息转换为javabean对象 + * 方法描述 + * @param zkDirectory + * @return + * @创建日期 2016年9月17日 + */ + private void zktoEhcacheWrite(ZkDirectoryImpl zkDirectory) { + + // 得到schema对象的目录信息 + DataInf ehcacheZkDirectory = this.getZkData(zkDirectory, EHCACHE_NAME); + + Ehcache ehcache = parseJsonEhcacheService.parseJsonToBean(ehcacheZkDirectory.getDataValue()); + + String outputPath = EcacheszkToxmlLoader.class.getClassLoader() + .getResource(ZookeeperPath.ZK_LOCAL_WRITE_PATH.getKey()).getPath(); + outputPath = new File(outputPath).getPath() + File.separator; + outputPath += EHCACHE_NAME; + + parseEcacheXMl.parseToXmlWrite(ehcache, outputPath, null); + + // 设置zk监控的路径信息 + String watchPath = zkDirectory.getName(); + watchPath = watchPath + ZookeeperPath.ZK_SEPARATOR.getKey() + EHCACHE_NAME; + this.zookeeperListen.watchPath(currZkPath, watchPath); + + // 写入cacheservice.properties的信息 + DataInf cacheserZkDirectory = this.getZkData(zkDirectory, CACHESERVER_NAME); + + if (null != cacheserZkDirectory) { + ZkDataImpl cacheData = (ZkDataImpl) cacheserZkDirectory; + + // 写入文件cacheservice.properties + this.writeCacheservice(cacheData.getName(), cacheData.getValue()); + + String watchServerPath = zkDirectory.getName(); + watchServerPath = watchPath + ZookeeperPath.ZK_SEPARATOR.getKey() + CACHESERVER_NAME; + this.zookeeperListen.watchPath(currZkPath, watchServerPath); + } + + } + + /** + * 读取 mapFile文件的信息 + * 方法描述 + * @param name 名称信息 + * @return + * @创建日期 2016年9月18日 + */ + private void writeCacheservice(String name, String value) { + + // 加载数据 + String path = RuleszkToxmlLoader.class.getClassLoader().getResource(ZookeeperPath.ZK_LOCAL_WRITE_PATH.getKey()) + .getPath(); + + checkNotNull(path, "write ecache file curr Path :" + path + " is null! must is not null"); + path = new File(path).getPath() + File.separator; + path += name; + + // 进行数据写入 + try { + Files.write(value.getBytes(), new File(path)); + } catch (IOException e1) { + e1.printStackTrace(); + } + + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/zktoxml/listen/RuleDataPathChildrenCacheListener.java b/src/main/java/io/mycat/config/loader/zkprocess/zktoxml/listen/RuleDataPathChildrenCacheListener.java new file mode 100644 index 000000000..10bc09d13 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/zktoxml/listen/RuleDataPathChildrenCacheListener.java @@ -0,0 +1,76 @@ +package io.mycat.config.loader.zkprocess.zktoxml.listen; + +import com.google.common.io.Files; +import io.mycat.MycatServer; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.config.model.TableConfig; +import io.mycat.config.model.rule.RuleConfig; +import io.mycat.route.function.AbstractPartitionAlgorithm; +import io.mycat.route.function.ReloadFunction; +import org.apache.curator.framework.CuratorFramework; +import org.apache.curator.framework.recipes.cache.ChildData; +import org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent; +import org.apache.curator.framework.recipes.cache.PathChildrenCacheListener; + +import java.io.File; +import java.io.IOException; +import java.util.Map; + +/** + * Created by magicdoom on 2016/10/27. + */ +public class RuleDataPathChildrenCacheListener implements PathChildrenCacheListener { + @Override public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception { + ChildData data = event.getData(); + switch (event.getType()) { + + case CHILD_ADDED: + + add(data.getPath().substring(data.getPath().lastIndexOf("/")+1),event.getData().getData()) ; + break; + case CHILD_REMOVED: + delete(data.getPath().substring(data.getPath().lastIndexOf("/")+1),event.getData().getData()); ; + break; + case CHILD_UPDATED: + add(data.getPath().substring(data.getPath().lastIndexOf("/")+1),event.getData().getData()) ; + break; + default: + break; + } + } + + + + private void reloadRuleData(String name){ + String tableName=name.substring(name.lastIndexOf("_")+1,name.indexOf(".")); + String ruleName=name.substring(0,name.indexOf(".")); + Map schemaConfigMap= MycatServer.getInstance().getConfig().getSchemas() ; + for (SchemaConfig schemaConfig : schemaConfigMap.values()) { + TableConfig tableConfig= schemaConfig.getTables().get(tableName.toUpperCase()); + if(tableConfig==null)continue; + RuleConfig rule=tableConfig.getRule(); + AbstractPartitionAlgorithm function= rule.getRuleAlgorithm() ; + if(function instanceof ReloadFunction){ + ((ReloadFunction) function).reload(); + } + } + } + + private void add(String name,byte[] data) throws IOException { + File file = new File( + SystemConfig.getHomePath() + File.separator + "conf" + File.separator + "ruledata", + name); + Files.write(data,file); + reloadRuleData(name); + } + + private void delete(String name,byte[] data) throws IOException { + File file = new File( + SystemConfig.getHomePath() + File.separator + "conf" + File.separator + "ruledata", + name); + if(file.exists()) + file.delete(); + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/zktoxml/listen/RuleFunctionCacheListener.java b/src/main/java/io/mycat/config/loader/zkprocess/zktoxml/listen/RuleFunctionCacheListener.java new file mode 100644 index 000000000..6f900f289 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/zktoxml/listen/RuleFunctionCacheListener.java @@ -0,0 +1,232 @@ +package io.mycat.config.loader.zkprocess.zktoxml.listen; + +import com.google.common.io.Files; +import io.mycat.MycatServer; +import io.mycat.config.loader.console.ZookeeperPath; +import io.mycat.config.loader.zkprocess.console.ParseParamEnum; +import io.mycat.config.loader.zkprocess.entity.Property; +import io.mycat.config.loader.zkprocess.entity.Rules; +import io.mycat.config.loader.zkprocess.entity.rule.function.Function; +import io.mycat.config.loader.zkprocess.entity.rule.tablerule.TableRule; +import io.mycat.config.loader.zkprocess.parse.ParseJsonServiceInf; +import io.mycat.config.loader.zkprocess.parse.ParseXmlServiceInf; +import io.mycat.config.loader.zkprocess.parse.XmlProcessBase; +import io.mycat.config.loader.zkprocess.parse.entryparse.rule.json.FunctionJsonParse; +import io.mycat.config.loader.zkprocess.parse.entryparse.rule.json.TableRuleJsonParse; +import io.mycat.config.loader.zkprocess.parse.entryparse.rule.xml.RuleParseXmlImpl; +import io.mycat.config.loader.zkprocess.zookeeper.DataInf; +import io.mycat.config.loader.zkprocess.zookeeper.DiretoryInf; +import io.mycat.config.loader.zkprocess.zookeeper.process.ZkDataImpl; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.config.model.TableConfig; +import io.mycat.config.model.rule.RuleConfig; +import io.mycat.manager.response.ReloadConfig; +import io.mycat.route.function.AbstractPartitionAlgorithm; +import io.mycat.route.function.ReloadFunction; +import io.mycat.util.ZKUtils; +import org.apache.curator.framework.CuratorFramework; +import org.apache.curator.framework.recipes.cache.ChildData; +import org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent; +import org.apache.curator.framework.recipes.cache.PathChildrenCacheListener; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.xml.bind.JAXBException; +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static com.google.common.base.Preconditions.checkNotNull; + +/** + * Created by magicdoom on 2016/10/27. + */ +public class RuleFunctionCacheListener implements PathChildrenCacheListener { + private static final Logger LOGGER = LoggerFactory.getLogger(RuleFunctionCacheListener.class); + @Override public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception { + ChildData data = event.getData(); + switch (event.getType()) { + + case CHILD_ADDED: + addOrUpdate(); + break; + case CHILD_UPDATED: + addOrUpdate(); + break; + default: + break; + } + } + + public RuleFunctionCacheListener() { + XmlProcessBase xmlProcessBase = new XmlProcessBase(); + + parseRulesXMl = new RuleParseXmlImpl(xmlProcessBase) ; + try { + xmlProcessBase.initJaxbClass(); + } catch (JAXBException e) { + LOGGER.error("error",e); + } + } + + private void addOrUpdate() + { + Rules Rules = null; + try { + Rules = this.zktoRulesBean(); + } catch (Exception e) { + LOGGER.error("error",e); + } + + LOGGER.info("RuleszkToxmlLoader notiflyProcess zk to object zk Rules Object :" + Rules); + + // 将mapfile信息写入到文件 中 + writeMapFileAddFunction(Rules.getFunction()); + + LOGGER.info("RuleszkToxmlLoader notiflyProcess write mapFile is success "); + + // 数配制信息写入文件 + String path = RuleszkToxmlLoader.class.getClassLoader().getResource(ZookeeperPath.ZK_LOCAL_WRITE_PATH.getKey()) + .getPath(); + path = new File(path).getPath() + File.separator; + path = path + WRITEPATH; + + LOGGER.info("RuleszkToxmlLoader notiflyProcess zk to object writePath :" + path); + + this.parseRulesXMl.parseToXmlWrite(Rules, path, "rule"); + + LOGGER.info("RuleszkToxmlLoader notiflyProcess zk to object zk Rules write :" + path + " is success"); + + if (MycatServer.getInstance().getProcessors() != null) + ReloadConfig.reload(); + + } + + + private static final String WRITEPATH = "rule.xml"; + + /** + * Rules的xml的转换信息 + * @字段说明 parseRulesXMl + */ + private ParseXmlServiceInf parseRulesXMl;; + + /** + * 表的路由信息 + * @字段说明 parseJsonService + */ + private ParseJsonServiceInf> parseJsonTableRuleService = new TableRuleJsonParse(); + + /** + * 表对应的字段信息 + * @字段说明 parseJsonFunctionService + */ + private ParseJsonServiceInf> parseJsonFunctionService = new FunctionJsonParse(); + + + private Rules zktoRulesBean() throws Exception { + Rules Rules = new Rules(); + + // tablerule信息 + String value= new String( ZKUtils.getConnection().getData().forPath(ZKUtils.getZKBasePath()+"rules/tableRule"),"UTF-8") ; + DataInf RulesZkData = new ZkDataImpl("tableRule",value); + List tableRuleData = parseJsonTableRuleService.parseJsonToBean(RulesZkData.getDataValue()); + Rules.setTableRule(tableRuleData); + + + + // 得到function信息 + String fucValue= new String( ZKUtils.getConnection().getData().forPath(ZKUtils.getZKBasePath()+"rules/function"),"UTF-8") ; + DataInf functionZkData =new ZkDataImpl("function",fucValue) ; + List functionList = parseJsonFunctionService.parseJsonToBean(functionZkData.getDataValue()); + Rules.setFunction(functionList); + + + + return Rules; + } + + + /** + * 读取序列配制文件便利店 + * 方法描述 + * @param functionList + * @创建日期 2016年9月18日 + */ + private void writeMapFileAddFunction(List functionList) { + + List tempData = new ArrayList<>(); + + List writeData = new ArrayList<>(); + + for (Function function : functionList) { + List proList = function.getProperty(); + if (null != proList && !proList.isEmpty()) { + // 进行数据遍历 + for (Property property : proList) { + // 如果为mapfile,则需要去读取数据信息,并存到json中 + if (ParseParamEnum.ZK_PATH_RULE_MAPFILE_NAME.getKey().equals(property.getName())) { + tempData.add(property); + } + } + + // 通过mapfile的名称,找到对应的数据信息 + if (!tempData.isEmpty()) { + for (Property property : tempData) { + for (Property prozkdownload : proList) { + // 根据mapfile的文件名去提取数据 + if (property.getValue().equals(prozkdownload.getName())) { + writeData.add(prozkdownload); + } + } + } + } + + // 将对应的数据信息写入到磁盘中 + if (!writeData.isEmpty()) { + for (Property writeMsg : writeData) { + this.writeMapFile(writeMsg.getName(), writeMsg.getValue()); + } + } + + // 将数据添加的集合中 + proList.removeAll(writeData); + + // 清空,以进行下一次的添加 + tempData.clear(); + writeData.clear(); + } + } + + } + + /** + * 读取 mapFile文件的信息 + * 方法描述 + * @param name 名称信息 + * @return + * @创建日期 2016年9月18日 + */ + private void writeMapFile(String name, String value) { + + // 加载数据 + String path = RuleszkToxmlLoader.class.getClassLoader().getResource(ZookeeperPath.ZK_LOCAL_WRITE_PATH.getKey()) + .getPath(); + + checkNotNull(path, "write Map file curr Path :" + path + " is null! must is not null"); + path = new File(path).getPath() + File.separator; + path += name; + + // 进行数据写入 + try { + Files.write(value.getBytes(), new File(path)); + } catch (IOException e1) { + e1.printStackTrace(); + } + + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/zktoxml/listen/RuleszkToxmlLoader.java b/src/main/java/io/mycat/config/loader/zkprocess/zktoxml/listen/RuleszkToxmlLoader.java new file mode 100644 index 000000000..b1e485718 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/zktoxml/listen/RuleszkToxmlLoader.java @@ -0,0 +1,262 @@ +package io.mycat.config.loader.zkprocess.zktoxml.listen; + +import static com.google.common.base.Preconditions.checkNotNull; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.curator.framework.CuratorFramework; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.io.Files; + +import io.mycat.MycatServer; +import io.mycat.config.loader.console.ZookeeperPath; +import io.mycat.config.loader.zkprocess.comm.NotiflyService; +import io.mycat.config.loader.zkprocess.comm.ZookeeperProcessListen; +import io.mycat.config.loader.zkprocess.console.ParseParamEnum; +import io.mycat.config.loader.zkprocess.entity.Property; +import io.mycat.config.loader.zkprocess.entity.Rules; +import io.mycat.config.loader.zkprocess.entity.rule.function.Function; +import io.mycat.config.loader.zkprocess.entity.rule.tablerule.TableRule; +import io.mycat.config.loader.zkprocess.parse.ParseJsonServiceInf; +import io.mycat.config.loader.zkprocess.parse.ParseXmlServiceInf; +import io.mycat.config.loader.zkprocess.parse.XmlProcessBase; +import io.mycat.config.loader.zkprocess.parse.entryparse.rule.json.FunctionJsonParse; +import io.mycat.config.loader.zkprocess.parse.entryparse.rule.json.TableRuleJsonParse; +import io.mycat.config.loader.zkprocess.parse.entryparse.rule.xml.RuleParseXmlImpl; +import io.mycat.config.loader.zkprocess.zookeeper.DataInf; +import io.mycat.config.loader.zkprocess.zookeeper.DiretoryInf; +import io.mycat.config.loader.zkprocess.zookeeper.process.ZkDirectoryImpl; +import io.mycat.config.loader.zkprocess.zookeeper.process.ZkMultLoader; +import io.mycat.manager.response.ReloadConfig; + +/** + * 进行rule的文件从zk中加载 +* 源文件名:RuleszkToxmlLoader.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月15日 +* 修改作者:liujun +* 修改日期:2016年9月15日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public class RuleszkToxmlLoader extends ZkMultLoader implements NotiflyService { + + /** + * 日志 + * @字段说明 LOGGER + */ + private static final Logger LOGGER = LoggerFactory.getLogger(RuleszkToxmlLoader.class); + + /** + * 当前文件中的zkpath信息 + * @字段说明 currZkPath + */ + private final String currZkPath; + + /** + * 写入本地的文件路径 + * @字段说明 WRITEPATH + */ + private static final String WRITEPATH = "rule.xml"; + + /** + * Rules的xml的转换信息 + * @字段说明 parseRulesXMl + */ + private ParseXmlServiceInf parseRulesXMl; + + /** + * 表的路由信息 + * @字段说明 parseJsonService + */ + private ParseJsonServiceInf> parseJsonTableRuleService = new TableRuleJsonParse(); + + /** + * 表对应的字段信息 + * @字段说明 parseJsonFunctionService + */ + private ParseJsonServiceInf> parseJsonFunctionService = new FunctionJsonParse(); + + /** + * zk的监控路径信息 + * @字段说明 zookeeperListen + */ + private ZookeeperProcessListen zookeeperListen; + + public RuleszkToxmlLoader(ZookeeperProcessListen zookeeperListen, CuratorFramework curator, + XmlProcessBase xmlParseBase) { + + this.setCurator(curator); + + this.zookeeperListen = zookeeperListen; + + // 获得当前集群的名称 + String RulesPath = zookeeperListen.getBasePath(); + RulesPath = RulesPath + ZookeeperPath.ZK_SEPARATOR.getKey() + ZookeeperPath.FLOW_ZK_PATH_RULE.getKey(); + + currZkPath = RulesPath; + // 将当前自己注册为事件接收对象 + zookeeperListen.addListen(RulesPath, this); + + // 生成xml与类的转换信息 + parseRulesXMl = new RuleParseXmlImpl(xmlParseBase); + } + + @Override + public boolean notiflyProcess() throws Exception { + // 1,将集群Rules目录下的所有集群按层次结构加载出来 + // 通过组合模式进行zk目录树的加载 + DiretoryInf RulesDirectory = new ZkDirectoryImpl(currZkPath, null); + // 进行递归的数据获取 + this.getTreeDirectory(currZkPath, ZookeeperPath.FLOW_ZK_PATH_RULE.getKey(), RulesDirectory); + + // 从当前的下一级开始进行遍历,获得到 + ZkDirectoryImpl zkDirectory = (ZkDirectoryImpl) RulesDirectory.getSubordinateInfo().get(0); + Rules Rules = this.zktoRulesBean(zkDirectory); + + LOGGER.info("RuleszkToxmlLoader notiflyProcess zk to object zk Rules Object :" + Rules); + + // 将mapfile信息写入到文件 中 + writeMapFileAddFunction(Rules.getFunction()); + + LOGGER.info("RuleszkToxmlLoader notiflyProcess write mapFile is success "); + + // 数配制信息写入文件 + String path = RuleszkToxmlLoader.class.getClassLoader().getResource(ZookeeperPath.ZK_LOCAL_WRITE_PATH.getKey()) + .getPath(); + path = new File(path).getPath() + File.separator; + path = path + WRITEPATH; + + LOGGER.info("RuleszkToxmlLoader notiflyProcess zk to object writePath :" + path); + + this.parseRulesXMl.parseToXmlWrite(Rules, path, "rule"); + + LOGGER.info("RuleszkToxmlLoader notiflyProcess zk to object zk Rules write :" + path + " is success"); + + if (MycatServer.getInstance().getProcessors() != null) + ReloadConfig.reload(); + + return true; + } + + /** + * 将zk上面的信息转换为javabean对象 + * 方法描述 + * @param zkDirectory + * @return + * @创建日期 2016年9月17日 + */ + private Rules zktoRulesBean(DiretoryInf zkDirectory) { + Rules Rules = new Rules(); + + // tablerule信息 + DataInf RulesZkData = this.getZkData(zkDirectory, ZookeeperPath.FLOW_ZK_PATH_RULE_TABLERULE.getKey()); + List tableRuleData = parseJsonTableRuleService.parseJsonToBean(RulesZkData.getDataValue()); + Rules.setTableRule(tableRuleData); + + // tablerule的监控路径信息 + String watchPath = ZookeeperPath.FLOW_ZK_PATH_RULE.getKey(); + watchPath = watchPath + ZookeeperPath.ZK_SEPARATOR.getKey() + + ZookeeperPath.FLOW_ZK_PATH_RULE_TABLERULE.getKey(); + this.zookeeperListen.watchPath(currZkPath, watchPath); + + // 得到function信息 + DataInf functionZkData = this.getZkData(zkDirectory, ZookeeperPath.FLOW_ZK_PATH_RULE_FUNCTION.getKey()); + List functionList = parseJsonFunctionService.parseJsonToBean(functionZkData.getDataValue()); + Rules.setFunction(functionList); + + // function的监控路径信息 + String functionWatchPath = ZookeeperPath.FLOW_ZK_PATH_RULE.getKey(); + functionWatchPath = functionWatchPath + ZookeeperPath.ZK_SEPARATOR.getKey() + + ZookeeperPath.FLOW_ZK_PATH_RULE_FUNCTION.getKey(); + this.zookeeperListen.watchPath(currZkPath, functionWatchPath); + + return Rules; + } + + /** + * 读取序列配制文件便利店 + * 方法描述 + * @param functionList + * @创建日期 2016年9月18日 + */ + private void writeMapFileAddFunction(List functionList) { + + List tempData = new ArrayList<>(); + + List writeData = new ArrayList<>(); + + for (Function function : functionList) { + List proList = function.getProperty(); + if (null != proList && !proList.isEmpty()) { + // 进行数据遍历 + for (Property property : proList) { + // 如果为mapfile,则需要去读取数据信息,并存到json中 + if (ParseParamEnum.ZK_PATH_RULE_MAPFILE_NAME.getKey().equals(property.getName())) { + tempData.add(property); + } + } + + // 通过mapfile的名称,找到对应的数据信息 + if (!tempData.isEmpty()) { + for (Property property : tempData) { + for (Property prozkdownload : proList) { + // 根据mapfile的文件名去提取数据 + if (property.getValue().equals(prozkdownload.getName())) { + writeData.add(prozkdownload); + } + } + } + } + + // 将对应的数据信息写入到磁盘中 + if (!writeData.isEmpty()) { + for (Property writeMsg : writeData) { + this.writeMapFile(writeMsg.getName(), writeMsg.getValue()); + } + } + + // 将数据添加的集合中 + proList.removeAll(writeData); + + // 清空,以进行下一次的添加 + tempData.clear(); + writeData.clear(); + } + } + + } + + /** + * 读取 mapFile文件的信息 + * 方法描述 + * @param name 名称信息 + * @return + * @创建日期 2016年9月18日 + */ + private void writeMapFile(String name, String value) { + + // 加载数据 + String path = RuleszkToxmlLoader.class.getClassLoader().getResource(ZookeeperPath.ZK_LOCAL_WRITE_PATH.getKey()) + .getPath(); + + checkNotNull(path, "write Map file curr Path :" + path + " is null! must is not null"); + path = new File(path).getPath() + File.separator; + path += name; + + // 进行数据写入 + try { + Files.write(value.getBytes(), new File(path)); + } catch (IOException e1) { + e1.printStackTrace(); + } + + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/zktoxml/listen/SchemaszkToxmlLoader.java b/src/main/java/io/mycat/config/loader/zkprocess/zktoxml/listen/SchemaszkToxmlLoader.java new file mode 100644 index 000000000..adfc85d55 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/zktoxml/listen/SchemaszkToxmlLoader.java @@ -0,0 +1,178 @@ +package io.mycat.config.loader.zkprocess.zktoxml.listen; + +import java.io.File; +import java.util.List; + +import io.mycat.MycatServer; +import io.mycat.manager.response.ReloadConfig; +import org.apache.curator.framework.CuratorFramework; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.config.loader.console.ZookeeperPath; +import io.mycat.config.loader.zkprocess.comm.NotiflyService; +import io.mycat.config.loader.zkprocess.comm.ZookeeperProcessListen; +import io.mycat.config.loader.zkprocess.entity.Schemas; +import io.mycat.config.loader.zkprocess.entity.schema.datahost.DataHost; +import io.mycat.config.loader.zkprocess.entity.schema.datanode.DataNode; +import io.mycat.config.loader.zkprocess.entity.schema.schema.Schema; +import io.mycat.config.loader.zkprocess.parse.ParseJsonServiceInf; +import io.mycat.config.loader.zkprocess.parse.ParseXmlServiceInf; +import io.mycat.config.loader.zkprocess.parse.XmlProcessBase; +import io.mycat.config.loader.zkprocess.parse.entryparse.schema.json.DataHostJsonParse; +import io.mycat.config.loader.zkprocess.parse.entryparse.schema.json.DataNodeJsonParse; +import io.mycat.config.loader.zkprocess.parse.entryparse.schema.json.SchemaJsonParse; +import io.mycat.config.loader.zkprocess.parse.entryparse.schema.xml.SchemasParseXmlImpl; +import io.mycat.config.loader.zkprocess.zookeeper.DataInf; +import io.mycat.config.loader.zkprocess.zookeeper.DiretoryInf; +import io.mycat.config.loader.zkprocess.zookeeper.process.ZkDirectoryImpl; +import io.mycat.config.loader.zkprocess.zookeeper.process.ZkMultLoader; + +/** + * 进行schema的文件从zk中加载 +* 源文件名:SchemasLoader.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月15日 +* 修改作者:liujun +* 修改日期:2016年9月15日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public class SchemaszkToxmlLoader extends ZkMultLoader implements NotiflyService { + + /** + * 日志 + * @字段说明 LOGGER + */ + private static final Logger LOGGER = LoggerFactory.getLogger(SchemaszkToxmlLoader.class); + + /** + * 当前文件中的zkpath信息 + * @字段说明 currZkPath + */ + private final String currZkPath; + + /** + * 写入本地的文件路径 + * @字段说明 WRITEPATH + */ + private static final String WRITEPATH = "schema.xml"; + + /** + * schema类与xml转换服务 + * @字段说明 parseSchemaService + */ + private ParseXmlServiceInf parseSchemaXmlService; + + /** + * 进行将schema + * @字段说明 parseJsonSchema + */ + private ParseJsonServiceInf> parseJsonSchema = new SchemaJsonParse(); + + /** + * 进行将dataNode + * @字段说明 parseJsonSchema + */ + private ParseJsonServiceInf> parseJsonDataNode = new DataNodeJsonParse(); + + /** + * 进行将dataNode + * @字段说明 parseJsonSchema + */ + private ParseJsonServiceInf> parseJsonDataHost = new DataHostJsonParse(); + + /** + * zk的监控路径信息 + * @字段说明 zookeeperListen + */ + private ZookeeperProcessListen zookeeperListen; + + public SchemaszkToxmlLoader(ZookeeperProcessListen zookeeperListen, CuratorFramework curator, + XmlProcessBase xmlParseBase) { + + this.setCurator(curator); + + this.zookeeperListen = zookeeperListen; + + // 获得当前集群的名称 + String schemaPath = zookeeperListen.getBasePath(); + schemaPath = schemaPath + ZookeeperPath.ZK_SEPARATOR.getKey() + ZookeeperPath.FOW_ZK_PATH_SCHEMA.getKey(); + + currZkPath = schemaPath; + // 将当前自己注册为事件接收对象 + this.zookeeperListen.addListen(schemaPath, this); + + // 生成xml与类的转换信息 + this.parseSchemaXmlService = new SchemasParseXmlImpl(xmlParseBase); + } + + @Override + public boolean notiflyProcess() throws Exception { + // 1,将集群schema目录下的所有集群按层次结构加载出来 + // 通过组合模式进行zk目录树的加载 + DiretoryInf schemaDirectory = new ZkDirectoryImpl(currZkPath, null); + // 进行递归的数据获取 + this.getTreeDirectory(currZkPath, ZookeeperPath.FLOW_ZK_PATH_SCHEMA_SCHEMA.getKey(), schemaDirectory); + + // 从当前的下一级开始进行遍历,获得到 + ZkDirectoryImpl zkDirectory = (ZkDirectoryImpl) schemaDirectory.getSubordinateInfo().get(0); + + Schemas schema = this.zktoSchemasBean(zkDirectory); + + LOGGER.info("SchemasLoader notiflyProcess zk to object zk schema Object :" + schema); + + String path = SchemaszkToxmlLoader.class.getClassLoader() + .getResource(ZookeeperPath.ZK_LOCAL_WRITE_PATH.getKey()).getPath(); + path=new File(path).getPath()+File.separator; + path += WRITEPATH; + + LOGGER.info("SchemasLoader notiflyProcess zk to object writePath :" + path); + + this.parseSchemaXmlService.parseToXmlWrite(schema, path, "schema"); + + LOGGER.info("SchemasLoader notiflyProcess zk to object zk schema write :" + path + " is success"); + + if(MycatServer.getInstance().getStartup().get()) { + ReloadConfig.reload_all(); + } + return true; + } + + /** + * 将zk上面的信息转换为javabean对象 + * 方法描述 + * @param zkDirectory + * @return + * @创建日期 2016年9月17日 + */ + private Schemas zktoSchemasBean(ZkDirectoryImpl zkDirectory) { + Schemas schema = new Schemas(); + + // 得到schema对象的目录信息 + DataInf schemaZkDirectory = this.getZkData(zkDirectory, ZookeeperPath.FLOW_ZK_PATH_SCHEMA_SCHEMA.getKey()); + List schemaList = parseJsonSchema.parseJsonToBean(schemaZkDirectory.getDataValue()); + schema.setSchema(schemaList); + + this.zookeeperListen.watchPath(currZkPath, ZookeeperPath.FLOW_ZK_PATH_SCHEMA_SCHEMA.getKey()); + + // 得到dataNode的信息 + DataInf dataNodeZkDirectory = this.getZkData(zkDirectory, ZookeeperPath.FLOW_ZK_PATH_SCHEMA_DATANODE.getKey()); + List dataNodeList = parseJsonDataNode.parseJsonToBean(dataNodeZkDirectory.getDataValue()); + schema.setDataNode(dataNodeList); + + this.zookeeperListen.watchPath(currZkPath, ZookeeperPath.FLOW_ZK_PATH_SCHEMA_DATANODE.getKey()); + + // 得到dataNode的信息 + DataInf dataHostZkDirectory = this.getZkData(zkDirectory, ZookeeperPath.FLOW_ZK_PATH_SCHEMA_DATAHOST.getKey()); + List dataHostList = parseJsonDataHost.parseJsonToBean(dataHostZkDirectory.getDataValue()); + schema.setDataHost(dataHostList); + + this.zookeeperListen.watchPath(currZkPath, ZookeeperPath.FLOW_ZK_PATH_SCHEMA_DATAHOST.getKey()); + + return schema; + + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/zktoxml/listen/SequenceTopropertiesLoader.java b/src/main/java/io/mycat/config/loader/zkprocess/zktoxml/listen/SequenceTopropertiesLoader.java new file mode 100644 index 000000000..d75f815fd --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/zktoxml/listen/SequenceTopropertiesLoader.java @@ -0,0 +1,287 @@ +package io.mycat.config.loader.zkprocess.zktoxml.listen; + +import static com.google.common.base.Preconditions.checkNotNull; + +import java.io.File; +import java.io.IOException; + +import org.apache.curator.framework.CuratorFramework; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.io.Files; + +import io.mycat.MycatServer; +import io.mycat.config.loader.console.ZookeeperPath; +import io.mycat.config.loader.zkprocess.comm.NotiflyService; +import io.mycat.config.loader.zkprocess.comm.ZkConfig; +import io.mycat.config.loader.zkprocess.comm.ZkParamCfg; +import io.mycat.config.loader.zkprocess.comm.ZookeeperProcessListen; +import io.mycat.config.loader.zkprocess.parse.XmlProcessBase; +import io.mycat.config.loader.zkprocess.zookeeper.DiretoryInf; +import io.mycat.config.loader.zkprocess.zookeeper.process.ZkDataImpl; +import io.mycat.config.loader.zkprocess.zookeeper.process.ZkDirectoryImpl; +import io.mycat.config.loader.zkprocess.zookeeper.process.ZkMultLoader; +import io.mycat.manager.response.ReloadConfig; + +/** + * 进行从sequence加载到zk中加载 +* 源文件名:SchemasLoader.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月15日 +* 修改作者:liujun +* 修改日期:2016年9月15日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public class SequenceTopropertiesLoader extends ZkMultLoader implements NotiflyService { + + /** + * 日志 + * @字段说明 LOGGER + */ + private static final Logger LOGGER = LoggerFactory.getLogger(SequenceTopropertiesLoader.class); + + /** + * 当前文件中的zkpath信息 + * @字段说明 currZkPath + */ + private final String currZkPath; + + /** + * 后缀名 + * @字段说明 PROPERTIES_SUFFIX + */ + private static final String PROPERTIES_SUFFIX = ".properties"; + + /** + * 序列配制信息 + * @字段说明 PROPERTIES_SEQUENCE_CONF + */ + private static final String PROPERTIES_SEQUENCE_CONF = "sequence_conf"; + + /** + * db序列配制信息 + * @字段说明 PROPERTIES_SEQUENCE_CONF + */ + private static final String PROPERTIES_SEQUENCE_DB_CONF = "sequence_db_conf"; + + /** + * 分布式的序列配制 + * @字段说明 PROPERTIES_SEQUENCE_CONF + */ + private static final String PROPERTIES_SEQUENCE_DISTRIBUTED_CONF = "sequence_distributed_conf"; + + /** + * 时间的序列配制 + * @字段说明 PROPERTIES_SEQUENCE_CONF + */ + private static final String PROPERTIES_SEQUENCE_TIME_CONF = "sequence_time_conf"; + + /** + * 监控路径信息 + * @字段说明 zookeeperListen + */ + private ZookeeperProcessListen zookeeperListen; + + public SequenceTopropertiesLoader(ZookeeperProcessListen zookeeperListen, CuratorFramework curator, + XmlProcessBase xmlParseBase) { + + this.setCurator(curator); + + this.zookeeperListen = zookeeperListen; + + // 获得当前集群的名称 + String schemaPath = zookeeperListen.getBasePath(); + schemaPath = schemaPath + ZookeeperPath.ZK_SEPARATOR.getKey() + ZookeeperPath.FLOW_ZK_PATH_SEQUENCE.getKey(); + + currZkPath = schemaPath; + // 将当前自己注册为事件接收对象 + this.zookeeperListen.addListen(schemaPath, this); + + } + + @Override + public boolean notiflyProcess() throws Exception { + + // 1,将集群server目录下的所有集群按层次结构加载出来 + // 通过组合模式进行zk目录树的加载 + DiretoryInf sequenceDirectory = new ZkDirectoryImpl(currZkPath, null); + // 进行递归的数据获取 + this.getTreeDirectory(currZkPath, ZookeeperPath.FLOW_ZK_PATH_SEQUENCE.getKey(), sequenceDirectory); + + // 取到当前根目录 信息 + sequenceDirectory = (DiretoryInf) sequenceDirectory.getSubordinateInfo().get(0); + + // 将zk序列配配制信息入本地文件 + this.sequenceZkToProperties(currZkPath, PROPERTIES_SEQUENCE_CONF, sequenceDirectory); + + LOGGER.info("SequenceTozkLoader notiflyProcess sequence_conf to local properties success"); + + // 将zk的db方式信息入本地文件 + this.sequenceZkToProperties(currZkPath, PROPERTIES_SEQUENCE_DB_CONF, sequenceDirectory); + + LOGGER.info("SequenceTozkLoader notiflyProcess sequence_db_conf to local properties success"); + + // 将zk的分布式信息入本地文件 + this.seqWriteOneZkToProperties(currZkPath, PROPERTIES_SEQUENCE_DISTRIBUTED_CONF, sequenceDirectory); + + LOGGER.info("SequenceTozkLoader notiflyProcess sequence_distributed_conf to local properties success"); + + // 将zk时间序列入本地文件 + this.seqWriteOneZkToProperties(currZkPath, PROPERTIES_SEQUENCE_TIME_CONF, sequenceDirectory); + + LOGGER.info("SequenceTozkLoader notiflyProcess sequence_time_conf to local properties success"); + + LOGGER.info("SequenceTozkLoader notiflyProcess xml to local properties is success"); + + if (MycatServer.getInstance().getProcessors() != null) + ReloadConfig.reload(); + return true; + } + + /** + * 将xml文件的信息写入到zk中 + * 方法描述 + * @param basePath 基本路径 + * @param schema schema文件的信息 + * @throws Exception 异常信息 + * @创建日期 2016年9月17日 + */ + private void sequenceZkToProperties(String basePath, String name, DiretoryInf seqDirectory) throws Exception { + // 读取当前节的信息 + ZkDirectoryImpl zkDirectory = (ZkDirectoryImpl) this.getZkDirectory(seqDirectory, + ZookeeperPath.FLOW_ZK_PATH_SEQUENCE_COMMON.getKey()); + + if (null != zkDirectory) { + String writeFile = name + PROPERTIES_SUFFIX; + + // 读取common目录下的数据 + ZkDataImpl commData = (ZkDataImpl) this.getZkData(zkDirectory, writeFile); + + // 读取公共节点的信息 + this.writeMapFile(commData.getName(), commData.getValue()); + + String seqComm = ZookeeperPath.FLOW_ZK_PATH_SEQUENCE_COMMON.getKey(); + seqComm = seqComm + ZookeeperPath.ZK_SEPARATOR.getKey() + commData.getName(); + + this.zookeeperListen.watchPath(currZkPath, seqComm); + + } + + // 集群中特有的节点的配制信息 + ZkDirectoryImpl zkClusterDir = (ZkDirectoryImpl) this.getZkDirectory(seqDirectory, + ZookeeperPath.FLOW_ZK_PATH_SEQUENCE_CLUSTER.getKey()); + + if (null != zkClusterDir) { + + String clusterName = ZkConfig.getInstance().getValue(ZkParamCfg.ZK_CFG_MYID); + + String nodeName = name + "-" + clusterName + PROPERTIES_SUFFIX; + + // 读取cluster目录下的数据 + ZkDataImpl clusterData = (ZkDataImpl) this.getZkData(zkClusterDir, nodeName); + + if (null != clusterData) { + // 读取当前集群中特有的节点的信息 + this.writeMapFile(clusterData.getName(), clusterData.getValue()); + + String seqCluster = ZookeeperPath.FLOW_ZK_PATH_SEQUENCE_COMMON.getKey(); + seqCluster = seqCluster + ZookeeperPath.ZK_SEPARATOR.getKey() + clusterData.getName(); + + this.zookeeperListen.watchPath(currZkPath, seqCluster); + } + } + } + + /** + * 将xml文件的信息写入到zk中 + * 方法描述 + * @param basePath 基本路径 + * @param schema schema文件的信息 + * @throws Exception 异常信息 + * @创建日期 2016年9月17日 + */ + private void seqWriteOneZkToProperties(String basePath, String name, DiretoryInf seqDirectory) throws Exception { + // 读取当前节的信息 + ZkDirectoryImpl zkDirectory = (ZkDirectoryImpl) this.getZkDirectory(seqDirectory, + ZookeeperPath.FLOW_ZK_PATH_SEQUENCE_COMMON.getKey()); + + ZkDataImpl commData = null; + + if (null != zkDirectory) { + String writeFile = name + PROPERTIES_SUFFIX; + + // 读取common目录下的数据 + commData = (ZkDataImpl) this.getZkData(zkDirectory, writeFile); + + // comm路径的监控路径 + String seqComm = ZookeeperPath.FLOW_ZK_PATH_SEQUENCE_COMMON.getKey(); + seqComm = seqComm + ZookeeperPath.ZK_SEPARATOR.getKey() + commData.getName(); + + this.zookeeperListen.watchPath(currZkPath, seqComm); + } + + // 集群中特有的节点的配制信息 + ZkDirectoryImpl zkClusterDir = (ZkDirectoryImpl) this.getZkDirectory(seqDirectory, + ZookeeperPath.FLOW_ZK_PATH_SEQUENCE_CLUSTER.getKey()); + + ZkDataImpl clusterData = null; + + if (null != zkClusterDir) { + + String clusterName = ZkConfig.getInstance().getValue(ZkParamCfg.ZK_CFG_MYID); + + String nodeName = name + "-" + clusterName + PROPERTIES_SUFFIX; + + // 读取cluster目录下的数据 + clusterData = (ZkDataImpl) this.getZkData(zkClusterDir, nodeName); + + if (null != clusterData) { + // comm路径的监控路径 + String seqCluster = ZookeeperPath.FLOW_ZK_PATH_SEQUENCE_CLUSTER.getKey(); + seqCluster = seqCluster + ZookeeperPath.ZK_SEPARATOR.getKey() + clusterData.getName(); + + this.zookeeperListen.watchPath(currZkPath, seqCluster); + } + } + + // 如果配制了单独节点的信息,以公共的名称,写入当前的值 + if (clusterData != null && commData != null) { + // 读取公共节点的信息 + this.writeMapFile(commData.getName(), clusterData.getValue()); + } else if (commData != null) { + // 读取当前集群中特有的节点的信息 + this.writeMapFile(commData.getName(), commData.getValue()); + } + } + + /** + * 读取 mapFile文件的信息 + * 方法描述 + * @param name 名称信息 + * @return + * @创建日期 2016年9月18日 + */ + private void writeMapFile(String name, String value) { + + // 加载数据 + String path = RuleszkToxmlLoader.class.getClassLoader().getResource(ZookeeperPath.ZK_LOCAL_WRITE_PATH.getKey()) + .getPath(); + + checkNotNull(path, "write Map file curr Path :" + path + " is null! must is not null"); + + path = new File(path).getPath() + File.separator; + path += name; + + // 进行数据写入 + try { + Files.write(value.getBytes(), new File(path)); + } catch (IOException e1) { + e1.printStackTrace(); + } + + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/zktoxml/listen/ServerzkToxmlLoader.java b/src/main/java/io/mycat/config/loader/zkprocess/zktoxml/listen/ServerzkToxmlLoader.java new file mode 100644 index 000000000..e3c9ca575 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/zktoxml/listen/ServerzkToxmlLoader.java @@ -0,0 +1,266 @@ +package io.mycat.config.loader.zkprocess.zktoxml.listen; + +import static com.google.common.base.Preconditions.checkNotNull; + +import java.io.File; +import java.io.IOException; +import java.util.List; + +import org.apache.curator.framework.CuratorFramework; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.io.Files; + +import io.mycat.MycatServer; +import io.mycat.config.loader.console.ZookeeperPath; +import io.mycat.config.loader.zkprocess.comm.NotiflyService; +import io.mycat.config.loader.zkprocess.comm.ZkConfig; +import io.mycat.config.loader.zkprocess.comm.ZkParamCfg; +import io.mycat.config.loader.zkprocess.comm.ZookeeperProcessListen; +import io.mycat.config.loader.zkprocess.entity.Server; +import io.mycat.config.loader.zkprocess.entity.server.System; +import io.mycat.config.loader.zkprocess.entity.server.user.User; +import io.mycat.config.loader.zkprocess.parse.ParseJsonServiceInf; +import io.mycat.config.loader.zkprocess.parse.ParseXmlServiceInf; +import io.mycat.config.loader.zkprocess.parse.XmlProcessBase; +import io.mycat.config.loader.zkprocess.parse.entryparse.server.json.SystemJsonParse; +import io.mycat.config.loader.zkprocess.parse.entryparse.server.json.UserJsonParse; +import io.mycat.config.loader.zkprocess.parse.entryparse.server.xml.ServerParseXmlImpl; +import io.mycat.config.loader.zkprocess.zookeeper.DataInf; +import io.mycat.config.loader.zkprocess.zookeeper.DiretoryInf; +import io.mycat.config.loader.zkprocess.zookeeper.process.ZkDataImpl; +import io.mycat.config.loader.zkprocess.zookeeper.process.ZkDirectoryImpl; +import io.mycat.config.loader.zkprocess.zookeeper.process.ZkMultLoader; +import io.mycat.manager.response.ReloadConfig; + +/** + * 进行server的文件从zk中加载 +* 源文件名:ServerzkToxmlLoader.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月15日 +* 修改作者:liujun +* 修改日期:2016年9月15日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public class ServerzkToxmlLoader extends ZkMultLoader implements NotiflyService { + + /** + * 日志 + * @字段说明 LOGGER + */ + private static final Logger LOGGER = LoggerFactory.getLogger(ServerzkToxmlLoader.class); + + /** + * 当前文件中的zkpath信息 + * @字段说明 currZkPath + */ + private final String currZkPath; + + /** + * 写入本地的文件路径 + * @字段说明 WRITEPATH + */ + private static final String WRITEPATH = "server.xml"; + + /** + * index_to_charset文件的路径信息 + * @字段说明 SCHEMA_PATH + */ + private static final String INDEX_TOCHARSET_PATH = "index_to_charset.properties"; + + /** + * server的xml的转换信息 + * @字段说明 parseServerXMl + */ + private ParseXmlServiceInf parseServerXMl; + + /** + * system信息 + * @字段说明 parseJsonserver + */ + private ParseJsonServiceInf parseJsonSystem = new SystemJsonParse(); + + /** + * system信息 + * @字段说明 parseJsonserver + */ + private ParseJsonServiceInf> parseJsonUser = new UserJsonParse(); + + /** + * zk监控路径 + * @字段说明 zookeeperListen + */ + private ZookeeperProcessListen zookeeperListen; + + public ServerzkToxmlLoader(ZookeeperProcessListen zookeeperListen, CuratorFramework curator, + XmlProcessBase xmlParseBase) { + + this.setCurator(curator); + + this.zookeeperListen = zookeeperListen; + + // 获得当前集群的名称 + String serverPath = zookeeperListen.getBasePath(); + serverPath = serverPath + ZookeeperPath.ZK_SEPARATOR.getKey() + ZookeeperPath.FLOW_ZK_PATH_SERVER.getKey(); + + currZkPath = serverPath; + // 将当前自己注册为事件接收对象 + this.zookeeperListen.addListen(serverPath, this); + + // 生成xml与类的转换信息 + parseServerXMl = new ServerParseXmlImpl(xmlParseBase); + } + + @Override + public boolean notiflyProcess() throws Exception { + // 1,将集群server目录下的所有集群按层次结构加载出来 + // 通过组合模式进行zk目录树的加载 + DiretoryInf serverDirectory = new ZkDirectoryImpl(currZkPath, null); + // 进行递归的数据获取 + this.getTreeDirectory(currZkPath, ZookeeperPath.FLOW_ZK_PATH_SERVER.getKey(), serverDirectory); + + // 从当前的下一级开始进行遍历,获得到 + ZkDirectoryImpl zkDirectory = (ZkDirectoryImpl) serverDirectory.getSubordinateInfo().get(0); + Server server = this.zktoServerBean(zkDirectory); + + // 读取当前集群中当前节点的特殊的配制信息 + Server currSer = this.zktoServerBeanByCurrNode(zkDirectory); + + // 为当前的参数赋新值 + if (null != currSer) { + server.getSystem().setNewValue(currSer.getSystem()); + } + + LOGGER.info("ServerzkToxmlLoader notiflyProcess zk to object zk server Object :" + server); + + // 数配制信息写入文件 + String path = ServerzkToxmlLoader.class.getClassLoader().getResource(ZookeeperPath.ZK_LOCAL_WRITE_PATH.getKey()) + .getPath(); + path = new File(path).getPath() + File.separator; + path += WRITEPATH; + + LOGGER.info("ServerzkToxmlLoader notiflyProcess zk to object writePath :" + path); + + this.parseServerXMl.parseToXmlWrite(server, path, "server"); + + LOGGER.info("ServerzkToxmlLoader notiflyProcess zk to object zk server write :" + path + " is success"); + + // 得到server对象的目录信息 + DataInf indexToCharSet = this.getZkData(zkDirectory, INDEX_TOCHARSET_PATH); + + if (null != indexToCharSet) { + + if (indexToCharSet instanceof ZkDataImpl) { + ZkDataImpl dataImpl = (ZkDataImpl) indexToCharSet; + this.writeProperties(dataImpl.getName(), dataImpl.getValue()); + } + + LOGGER.info("ServerzkToxmlLoader notiflyProcess zk to write index_to_charset.properties is success"); + } + if (MycatServer.getInstance().getProcessors() != null) + ReloadConfig.reload(); + return true; + } + + /** + * 将zk上面的信息转换为javabean对象 + * 方法描述 + * @param zkDirectory + * @return + * @创建日期 2016年9月17日 + */ + private Server zktoServerBean(DiretoryInf zkDirectory) { + Server server = new Server(); + + // 得到server对象的目录信息 + DataInf serverZkDirectory = this.getZkData(zkDirectory, ZookeeperPath.FLOW_ZK_PATH_SERVER_DEFAULT.getKey()); + System systemValue = parseJsonSystem.parseJsonToBean(serverZkDirectory.getDataValue()); + server.setSystem(systemValue); + + this.zookeeperListen.watchPath(currZkPath, ZookeeperPath.FLOW_ZK_PATH_SERVER_DEFAULT.getKey()); + + // 得到user的信息 + DataInf userZkDirectory = this.getZkData(zkDirectory, ZookeeperPath.FLOW_ZK_PATH_SERVER_USER.getKey()); + List userList = parseJsonUser.parseJsonToBean(userZkDirectory.getDataValue()); + server.setUser(userList); + + // 用户路径的监控 + this.zookeeperListen.watchPath(currZkPath, ZookeeperPath.FLOW_ZK_PATH_SERVER_USER.getKey()); + + return server; + } + + /** + * 加载当前节点的特殊配制信息 + * 方法描述 + * @param zkDirectory + * @return + * @创建日期 2016年9月17日 + */ + private Server zktoServerBeanByCurrNode(DiretoryInf zkDirectory) { + + Server server = null; + + // 得到集群节点的配制信息 + DiretoryInf directory = this.getZkDirectory(zkDirectory, ZookeeperPath.FLOW_ZK_PATH_SERVER_CLUSTER.getKey()); + + if (null != directory) { + + // 获得当前myid的名称 + String myid = ZkConfig.getInstance().getValue(ZkParamCfg.ZK_CFG_MYID); + + // 获邓当前节点的信息 + DataInf currDataCfg = this.getZkData(directory, myid); + + // 如果当前节点存在配制信息,则加载 + if (null != currDataCfg) { + server = new Server(); + + System systemValue = parseJsonSystem.parseJsonToBean(currDataCfg.getDataValue()); + server.setSystem(systemValue); + + if (currDataCfg instanceof ZkDataImpl) { + ZkDataImpl zkData = (ZkDataImpl) currDataCfg; + + // 监控的路径信息 + String defaultWatchPath = ZookeeperPath.FLOW_ZK_PATH_SERVER_CLUSTER.getKey(); + defaultWatchPath = defaultWatchPath + ZookeeperPath.ZK_SEPARATOR.getKey() + zkData.getName(); + + this.zookeeperListen.watchPath(currZkPath, defaultWatchPath); + } + } + } + + return server; + } + + /** + * 写入本地文件配制信息 + * 方法描述 + * @param name 名称信息 + * @return + * @创建日期 2016年9月18日 + */ + private void writeProperties(String name, String value) { + + // 加载数据 + String path = RuleszkToxmlLoader.class.getClassLoader().getResource(ZookeeperPath.ZK_LOCAL_WRITE_PATH.getKey()) + .getPath(); + + checkNotNull(path, "write properties curr Path :" + path + " is null! must is not null"); + + path = new File(path).getPath() + File.separator; + path += name; + + // 进行数据写入 + try { + Files.write(value.getBytes(), new File(path)); + } catch (IOException e1) { + e1.printStackTrace(); + } + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/zookeeper/ClusterInfo.java b/src/main/java/io/mycat/config/loader/zkprocess/zookeeper/ClusterInfo.java new file mode 100644 index 000000000..23224baee --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/zookeeper/ClusterInfo.java @@ -0,0 +1,28 @@ +package io.mycat.config.loader.zkprocess.zookeeper; + +/** + * Created by magicdoom on 2016/12/21. + */ +public class ClusterInfo { + private int clusterSize; + private String clusterNodes; + + + public int getClusterSize() { + return clusterSize; + } + + public void setClusterSize(int clusterSize) { + this.clusterSize = clusterSize; + } + + public String getClusterNodes() { + return clusterNodes; + } + + public void setClusterNodes(String clusterNodes) { + this.clusterNodes = clusterNodes; + } + + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/zookeeper/DataInf.java b/src/main/java/io/mycat/config/loader/zkprocess/zookeeper/DataInf.java new file mode 100644 index 000000000..88f450d84 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/zookeeper/DataInf.java @@ -0,0 +1,30 @@ +package io.mycat.config.loader.zkprocess.zookeeper; + +/** + * 数据节点信息 +* 源文件名:DataInf.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月15日 +* 修改作者:liujun +* 修改日期:2016年9月15日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public interface DataInf { + + /** + * 获取信息,以:分隔两个值 + * @return + */ + String getDataInfo(); + + /** + * 返回数据节点值信息 + * 方法描述 + * @return + * @创建日期 2016年9月17日 + */ + String getDataValue(); + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/zookeeper/DiretoryInf.java b/src/main/java/io/mycat/config/loader/zkprocess/zookeeper/DiretoryInf.java new file mode 100644 index 000000000..db82e4c81 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/zookeeper/DiretoryInf.java @@ -0,0 +1,50 @@ +package io.mycat.config.loader.zkprocess.zookeeper; + +import java.util.List; + +/** + * 目录接口信息 +* 源文件名:DiretoryInf.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月15日 +* 修改作者:liujun +* 修改日期:2016年9月15日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public interface DiretoryInf { + + /** + * 获取当前的目录信息 + * @return + */ + String getDiretoryInfo(); + + /** + * 添加目录或者数据节点 + * @param branch + */ + void add(DiretoryInf directory); + + /** + * 添加数据节点信息 + * 方法描述 + * @param data + * @创建日期 2016年9月15日 + */ + void add(DataInf data); + + /** + * 获取子节点信息 + * @return + */ + List getSubordinateInfo(); + + /** + * 获取节点的名称 + * @字段说明 getDataName + */ + String getDataName(); + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/zookeeper/process/ZkDataImpl.java b/src/main/java/io/mycat/config/loader/zkprocess/zookeeper/process/ZkDataImpl.java new file mode 100644 index 000000000..191e79799 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/zookeeper/process/ZkDataImpl.java @@ -0,0 +1,62 @@ +package io.mycat.config.loader.zkprocess.zookeeper.process; + +import io.mycat.config.loader.zkprocess.zookeeper.DataInf; + +/** + * 数据节点信息 +* 源文件名:DataImpl.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月15日 +* 修改作者:liujun +* 修改日期:2016年9月15日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public class ZkDataImpl implements DataInf { + + /** + * 名称信息 + * @字段说明 name + */ + private String name; + + /** + * 当前值信息 + * @字段说明 value + */ + private String value; + + public ZkDataImpl(String name, String value) { + super(); + this.name = name; + this.value = value; + } + + @Override + public String getDataInfo() { + return this.name + ":" + this.value; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getValue() { + return value; + } + + public void setValue(String value) { + this.value = value; + } + + @Override + public String getDataValue() { + return this.value; + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/zookeeper/process/ZkDirectoryImpl.java b/src/main/java/io/mycat/config/loader/zkprocess/zookeeper/process/ZkDirectoryImpl.java new file mode 100644 index 000000000..9541c90f4 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/zookeeper/process/ZkDirectoryImpl.java @@ -0,0 +1,86 @@ +package io.mycat.config.loader.zkprocess.zookeeper.process; + +import java.util.ArrayList; +import java.util.List; + +import io.mycat.config.loader.zkprocess.zookeeper.DataInf; +import io.mycat.config.loader.zkprocess.zookeeper.DiretoryInf; + +/** + * zk的目录节点信息 +* 源文件名:ZkDirectoryMsg.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月15日 +* 修改作者:liujun +* 修改日期:2016年9月15日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public class ZkDirectoryImpl implements DiretoryInf { + + /** + * 整个节点信息 + * @字段说明 subordinateInfo + */ + private List subordinateInfoList = new ArrayList(); + + /** + * 节点的名称信息 + * @字段说明 name + */ + private String name; + + /** + * 当前节点的数据信息 + * @字段说明 value + */ + private String value; + + public ZkDirectoryImpl(String name, String value) { + this.name = name; + this.value = value; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getValue() { + return value; + } + + public void setValue(String value) { + this.value = value; + } + + @Override + public String getDiretoryInfo() { + return name + ":" + value; + } + + @Override + public void add(DiretoryInf branch) { + this.subordinateInfoList.add(branch); + } + + @Override + public List getSubordinateInfo() { + return this.subordinateInfoList; + } + + @Override + public void add(DataInf data) { + this.subordinateInfoList.add(data); + } + + @Override + public String getDataName() { + return this.name; + } + +} diff --git a/src/main/java/io/mycat/config/loader/zkprocess/zookeeper/process/ZkMultLoader.java b/src/main/java/io/mycat/config/loader/zkprocess/zookeeper/process/ZkMultLoader.java new file mode 100644 index 000000000..cb4444d55 --- /dev/null +++ b/src/main/java/io/mycat/config/loader/zkprocess/zookeeper/process/ZkMultLoader.java @@ -0,0 +1,259 @@ +package io.mycat.config.loader.zkprocess.zookeeper.process; + +import static com.google.common.base.Preconditions.checkNotNull; + +import java.nio.charset.StandardCharsets; +import java.util.List; + +import org.apache.curator.framework.CuratorFramework; +import org.apache.curator.utils.ZKPaths; +import org.apache.zookeeper.data.Stat; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.gson.Gson; + +import io.mycat.config.loader.console.ZookeeperPath; +import io.mycat.config.loader.zkprocess.zookeeper.DataInf; +import io.mycat.config.loader.zkprocess.zookeeper.DiretoryInf; + +/** + * 进行zk获取数据类信息 +* 源文件名:AbstractLoader.java +* 文件版本:1.0.0 +* 创建作者:liujun +* 创建日期:2016年9月15日 +* 修改作者:liujun +* 修改日期:2016年9月15日 +* 文件描述:TODO +* 版权所有:Copyright 2016 zjhz, Inc. All Rights Reserved. +*/ +public class ZkMultLoader { + + /** + * 日志 + * @字段说明 LOGGER + */ + private static final Logger LOGGER = LoggerFactory.getLogger(ZkMultLoader.class); + + /** + * zk连接信息 + * @字段说明 curator + */ + private CuratorFramework curator; + + /** + * 进行数据转换操作 + * @字段说明 gson + */ + private Gson gson = new Gson(); + + /** + * 得到树形节点信息 + * 方法描述 + * @param path + * @param zkDirectory + * @throws Exception + * @创建日期 2016年9月15日 + */ + public void getTreeDirectory(String path, String name, DiretoryInf zkDirectory) throws Exception { + + boolean check = this.checkPathExists(path); + + // 如果文件存在,则继续遍历 + if (check) { + // 首先获取当前节点的数据,然后再递归 + String currDate = this.getDataToString(path); + + List childPathList = this.getChildNames(path); + + // 如果存在子目录信息,则进行 + if (null != childPathList && !childPathList.isEmpty()) { + DiretoryInf directory = new ZkDirectoryImpl(name, currDate); + + // 添加目录节点信息 + zkDirectory.add(directory); + + for (String childPath : childPathList) { + this.getTreeDirectory(path + ZookeeperPath.ZK_SEPARATOR.getKey() + childPath, childPath, directory); + } + } + // 添加当前的数据节点信息 + else { + zkDirectory.add(new ZkDataImpl(name, currDate)); + } + } + } + + /** + * 检查文件是否存在 + * 方法描述 + * @param path + * @return + * @创建日期 2016年9月21日 + */ + protected boolean checkPathExists(String path) { + try { + Stat state = this.curator.checkExists().forPath(path); + + if (null != state) { + return true; + } + } catch (Exception e) { + e.printStackTrace(); + } + return false; + } + + /** + * get data from zookeeper and convert to string with check not null. + */ + protected String getDataToString(String path) throws Exception { + byte[] raw = curator.getData().forPath(path); + + checkNotNull(raw, "data of " + path + " must be not null!"); + return byteToString(raw); + } + + /** + * get child node name list based on path from zookeeper. + * @throws Exception + */ + protected List getChildNames(String path) throws Exception { + return curator.getChildren().forPath(path); + } + + protected void checkAndwriteString(String parentPath, String currpath, String value) throws Exception { + checkNotNull(parentPath, "data of path" + parentPath + " must be not null!"); + checkNotNull(currpath, "data of path" + currpath + " must be not null!"); + checkNotNull(value, "data of value:" + value + " must be not null!"); + + String nodePath = ZKPaths.makePath(parentPath, currpath); + + Stat stat = curator.checkExists().forPath(nodePath); + + if (null == stat) { + this.createPath(nodePath); + } + + LOGGER.debug("ZkMultLoader write file :" + nodePath + ", value :" + value); + + curator.setData().inBackground().forPath(nodePath, value.getBytes()); + + } + + /** + * 创建配制信息 + * 方法描述 + * @param configKey 配制的当前路径名称信息 + * @param filterInnerMap 最终的信息是否为map + * @param configDirectory 配制的目录 + * @param restDirectory 子目录信息 + * @创建日期 2016年9月11日 + */ + public boolean createPath(String path) { + + // 得到当前的目录信息 + LOGGER.trace("createPath child path is {}", path); + + boolean result = true; + try { + // 进行目录的创建操作 + ZKPaths.mkdirs(curator.getZookeeperClient().getZooKeeper(), path); + } catch (Exception e) { + LOGGER.error(" createPath error", e); + result = false; + } + + return result; + } + + protected void writeZkString(String path, String value) throws Exception { + checkNotNull(path, "data of path" + path + " must be not null!"); + checkNotNull(value, "data of value:" + value + " must be not null!"); + + curator.setData().forPath(path, value.getBytes()); + } + + /** + * raw byte data to string + */ + protected String byteToString(byte[] raw) { + // return empty json {}. + if (raw.length == 0) { + return "{}"; + } + return new String(raw, StandardCharsets.UTF_8); + } + + /** + * 通过名称数据节点信息 + * 方法描述 + * @param zkDirectory + * @param name + * @return + * @创建日期 2016年9月16日 + */ + protected DataInf getZkData(DiretoryInf zkDirectory, String name) { + List list = zkDirectory.getSubordinateInfo(); + + if (null != list && !list.isEmpty()) { + for (Object directObj : list) { + + if (directObj instanceof ZkDataImpl) { + ZkDataImpl zkDirectoryValue = (ZkDataImpl) directObj; + + if (name.equals(zkDirectoryValue.getName())) { + + return zkDirectoryValue; + } + } + } + } + return null; + } + + /** + * 通过名称获得目录节点信息 + * 方法描述 + * @param zkDirectory + * @param name + * @return + * @创建日期 2016年9月16日 + */ + protected DiretoryInf getZkDirectory(DiretoryInf zkDirectory, String name) { + List list = zkDirectory.getSubordinateInfo(); + + if (null != list && !list.isEmpty()) { + for (Object directObj : list) { + + if (directObj instanceof DiretoryInf) { + DiretoryInf zkDirectoryValue = (DiretoryInf) directObj; + + if (name.equals(zkDirectoryValue.getDataName())) { + + return zkDirectoryValue; + } + } + } + } + return null; + } + + public CuratorFramework getCurator() { + return curator; + } + + public void setCurator(CuratorFramework curator) { + this.curator = curator; + } + + public Gson getGson() { + return gson; + } + + public void setGson(Gson gson) { + this.gson = gson; + } + +} diff --git a/src/main/java/io/mycat/config/model/ClusterConfig.java b/src/main/java/io/mycat/config/model/ClusterConfig.java new file mode 100644 index 000000000..08dc053bd --- /dev/null +++ b/src/main/java/io/mycat/config/model/ClusterConfig.java @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.config.model; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.w3c.dom.Element; +import org.w3c.dom.Node; +import org.w3c.dom.NodeList; + +import io.mycat.config.util.ConfigException; +import io.mycat.config.util.ConfigUtil; +import io.mycat.util.SplitUtil; + +/** + * @author mycat + */ +public class ClusterConfig { + private final Map nodes; + private final Map> groups; + + public ClusterConfig(Element root, int port) { + nodes = Collections.unmodifiableMap(loadNode(root, port)); + groups = Collections.unmodifiableMap(loadGroup(root, nodes)); + } + + public Map getNodes() { + return nodes; + } + + public Map> getGroups() { + return groups; + } + + private static Map loadNode(Element root, int port) { + Map nodes = new HashMap(); + NodeList list = root.getElementsByTagName("node"); + Set hostSet = new HashSet(); + for (int i = 0, n = list.getLength(); i < n; i++) { + Node node = list.item(i); + if (node instanceof Element) { + Element element = (Element) node; + String name = element.getAttribute("name").trim(); + if (nodes.containsKey(name)) { + throw new ConfigException("node name duplicated :" + name); + } + + Map props = ConfigUtil.loadElements(element); + String host = (String) props.get("host"); + if (null == host || "".equals(host)) { + throw new ConfigException("host empty in node: " + name); + } + if (hostSet.contains(host)) { + throw new ConfigException("node host duplicated :" + host); + } + + String wei = (String) props.get("weight"); + if (null == wei || "".equals(wei)) { + throw new ConfigException("weight should not be null in host:" + host); + } + int weight = Integer.parseInt(wei); + if (weight <= 0) { + throw new ConfigException("weight should be > 0 in host:" + host + " weight:" + weight); + } + + MycatNodeConfig conf = new MycatNodeConfig(name, host, port, weight); + nodes.put(name, conf); + hostSet.add(host); + } + } + return nodes; + } + + private static Map> loadGroup(Element root, Map nodes) { + Map> groups = new HashMap>(); + NodeList list = root.getElementsByTagName("group"); + for (int i = 0, n = list.getLength(); i < n; i++) { + Node node = list.item(i); + if (node instanceof Element) { + Element e = (Element) node; + String groupName = e.getAttribute("name").trim(); + if (groups.containsKey(groupName)) { + throw new ConfigException("group duplicated : " + groupName); + } + + Map props = ConfigUtil.loadElements(e); + String value = (String) props.get("nodeList"); + if (null == value || "".equals(value)) { + throw new ConfigException("group should contain 'nodeList'"); + } + + String[] sList = SplitUtil.split(value, ',', true); + + if (null == sList || sList.length == 0) { + throw new ConfigException("group should contain 'nodeList'"); + } + + for (String s : sList) { + if (!nodes.containsKey(s)) { + throw new ConfigException("[ node :" + s + "] in [ group:" + groupName + "] doesn't exist!"); + } + } + List nodeList = Arrays.asList(sList); + groups.put(groupName, nodeList); + } + } + if (!groups.containsKey("default")) { + List nodeList = new ArrayList(nodes.keySet()); + groups.put("default", nodeList); + } + return groups; + } +} \ No newline at end of file diff --git a/src/main/java/io/mycat/config/model/DBHostConfig.java b/src/main/java/io/mycat/config/model/DBHostConfig.java new file mode 100644 index 000000000..a3b34df42 --- /dev/null +++ b/src/main/java/io/mycat/config/model/DBHostConfig.java @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.config.model; + +public class DBHostConfig { + + private long idleTimeout = SystemConfig.DEFAULT_IDLE_TIMEOUT; // 连接池中连接空闲超时时间 + private final String hostName; + private final String ip; + private final int port; + private final String url; + private final String user; + private final String password; + private final String encryptPassword; //密文 + private int maxCon ; + private int minCon ; + private String dbType; + private String filters="mergeStat"; + private long logTime = 300000; + private int weight; + + public String getDbType() { + return dbType; + } + + public void setDbType(String dbType) { + this.dbType = dbType; + } + + public DBHostConfig(String hostName, String ip, int port, String url, + String user, String password,String encryptPassword) { + super(); + this.hostName = hostName; + this.ip = ip; + this.port = port; + this.url = url; + this.user = user; + this.password = password; + this.encryptPassword = encryptPassword; + } + + public long getIdleTimeout() { + return idleTimeout; + } + + public void setIdleTimeout(long idleTimeout) { + this.idleTimeout = idleTimeout; + } + + public int getMaxCon() { + return maxCon; + } + + public void setMaxCon(int maxCon) { + this.maxCon = maxCon; + } + + public int getMinCon() { + return minCon; + } + + public void setMinCon(int minCon) { + this.minCon = minCon; + } + + public String getHostName() { + return hostName; + } + + public String getIp() { + return ip; + } + + public int getPort() { + return port; + } + + public String getUrl() { + return url; + } + + public String getUser() { + return user; + } + public String getFilters() { + return filters; + } + + public void setFilters(String filters) { + this.filters = filters; + } + public String getPassword() { + return password; + } + + public long getLogTime() { + return logTime; + } + + public void setLogTime(long logTime) { + this.logTime = logTime; + } + + public int getWeight() { + return weight; + } + + public void setWeight(int weight) { + this.weight = weight; + } + + public String getEncryptPassword() { + return this.encryptPassword; + } + + @Override + public String toString() { + return "DBHostConfig [hostName=" + hostName + ", url=" + url + "]"; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/config/model/DataHostConfig.java b/src/main/java/io/mycat/config/model/DataHostConfig.java new file mode 100644 index 000000000..6a014d010 --- /dev/null +++ b/src/main/java/io/mycat/config/model/DataHostConfig.java @@ -0,0 +1,228 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.config.model; + +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import com.google.common.collect.Iterables; +import io.mycat.backend.datasource.PhysicalDBPool; + +/** + * Datahost is a group of DB servers which is synchronized with each other + * + * @author wuzhih + * + */ +public class DataHostConfig { + public static final int NOT_SWITCH_DS = -1; + public static final int DEFAULT_SWITCH_DS = 1; + public static final int SYN_STATUS_SWITCH_DS = 2; + public static final int CLUSTER_STATUS_SWITCH_DS = 3; + private static final Pattern pattern = Pattern.compile("\\s*show\\s+slave\\s+status\\s*",Pattern.CASE_INSENSITIVE); + private static final Pattern patternCluster = Pattern.compile("\\s*show\\s+status\\s+like\\s+'wsrep%'",Pattern.CASE_INSENSITIVE); + private String name; + private int maxCon = SystemConfig.DEFAULT_POOL_SIZE; + private int minCon = 10; + private int balance = PhysicalDBPool.BALANCE_NONE; + private int writeType = PhysicalDBPool.WRITE_ONLYONE_NODE; + private final String dbType; + private final String dbDriver; + private final DBHostConfig[] writeHosts; + private final Map readHosts; + private String hearbeatSQL; + private boolean isShowSlaveSql=false; + private boolean isShowClusterSql=false; + private String connectionInitSql; + private int slaveThreshold = -1; + private final int switchType; + private String filters="mergeStat"; + private long logTime=300000; + private boolean tempReadHostAvailable = false; //如果写服务挂掉, 临时读服务是否继续可用 + private final Set dataNodes; //包含的所有dataNode名字 + private String slaveIDs; + + public DataHostConfig(String name, String dbType, String dbDriver, + DBHostConfig[] writeHosts, Map readHosts,int switchType,int slaveThreshold, boolean tempReadHostAvailable) { + super(); + this.name = name; + this.dbType = dbType; + this.dbDriver = dbDriver; + this.writeHosts = writeHosts; + this.readHosts = readHosts; + this.switchType=switchType; + this.slaveThreshold=slaveThreshold; + this.tempReadHostAvailable = tempReadHostAvailable; + this.dataNodes = new HashSet<>(); + } + + public boolean isTempReadHostAvailable() { + return this.tempReadHostAvailable; + } + + public int getSlaveThreshold() { + return slaveThreshold; + } + + public void setSlaveThreshold(int slaveThreshold) { + this.slaveThreshold = slaveThreshold; + } + + public int getSwitchType() { + return switchType; + } + + public String getConnectionInitSql() + { + return connectionInitSql; + } + + public void setConnectionInitSql(String connectionInitSql) + { + this.connectionInitSql = connectionInitSql; + } + + public int getWriteType() { + return writeType; + } + + public void setWriteType(int writeType) { + this.writeType = writeType; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public boolean isShowSlaveSql() + { + return isShowSlaveSql; + } + + public int getMaxCon() { + return maxCon; + } + + public void setMaxCon(int maxCon) { + this.maxCon = maxCon; + } + + public int getMinCon() { + return minCon; + } + + public void setMinCon(int minCon) { + this.minCon = minCon; + } + + public String getSlaveIDs() { + return slaveIDs; + } + + public void setSlaveIDs(String slaveIDs) { + this.slaveIDs = slaveIDs; + } + + public int getBalance() { + return balance; + } + + public void setBalance(int balance) { + this.balance = balance; + } + + public String getDbType() { + return dbType; + } + + public String getDbDriver() { + return dbDriver; + } + + public DBHostConfig[] getWriteHosts() { + return writeHosts; + } + + public Map getReadHosts() { + return readHosts; + } + + public String getHearbeatSQL() { + return hearbeatSQL; + } + + public void setHearbeatSQL(String heartbeatSQL) { + this.hearbeatSQL = heartbeatSQL; + Matcher matcher = pattern.matcher(heartbeatSQL); + if (matcher.find()) + { + isShowSlaveSql=true; + } + Matcher matcher2 = patternCluster.matcher(heartbeatSQL); + if (matcher2.find()) + { + isShowClusterSql=true; + } + } + + public String getFilters() { + return filters; + } + + public void setFilters(String filters) { + this.filters = filters; + } + + public long getLogTime() { + return logTime; + } + + public boolean isShowClusterSql() { + return this.isShowClusterSql; + } + + public void setLogTime(long logTime) { + this.logTime = logTime; + } + + public void addDataNode(String name){ + this.dataNodes.add(name); + } + + public String getRandomDataNode() { + int index = (int) (Math.random() * dataNodes.size()); + return Iterables.get(dataNodes,index); + } + + public boolean containDataNode(String randomDn) { + return dataNodes.contains(randomDn); + } +} \ No newline at end of file diff --git a/src/main/java/io/mycat/config/model/DataNodeConfig.java b/src/main/java/io/mycat/config/model/DataNodeConfig.java new file mode 100644 index 000000000..096172863 --- /dev/null +++ b/src/main/java/io/mycat/config/model/DataNodeConfig.java @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.config.model; + +/** + * 用于描述一个数据节点的配置 + * + * @author mycat + */ +public final class DataNodeConfig { + + private final String name; + private final String database; + private final String dataHost; + + public DataNodeConfig(String name, String database, String dataHost) { + super(); + this.name = name; + this.database = database; + this.dataHost = dataHost; + } + + public String getName() { + return name; + } + + public String getDatabase() { + return database; + } + + public String getDataHost() { + return dataHost; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/config/model/FirewallConfig.java b/src/main/java/io/mycat/config/model/FirewallConfig.java new file mode 100644 index 000000000..4f0e66c3f --- /dev/null +++ b/src/main/java/io/mycat/config/model/FirewallConfig.java @@ -0,0 +1,247 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.config.model; + +import java.io.File; +import java.io.InputStream; +import java.util.List; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.transform.Transformer; +import javax.xml.transform.TransformerFactory; +import javax.xml.transform.dom.DOMSource; +import javax.xml.transform.stream.StreamResult; + +import io.mycat.util.StringUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.w3c.dom.Document; +import org.w3c.dom.Element; +import org.xml.sax.EntityResolver; +import org.xml.sax.InputSource; +import org.xml.sax.SAXException; + +import com.alibaba.druid.wall.WallConfig; +import com.alibaba.druid.wall.WallProvider; +import com.alibaba.druid.wall.spi.MySqlWallProvider; + +import io.mycat.MycatServer; +import io.mycat.config.MycatConfig; +import io.mycat.config.loader.xml.XMLServerLoader; + +/** + * 防火墙配置定义 + * + * @author songwie + * @author zhuam + */ +public final class FirewallConfig { + + private static final Logger LOGGER = LoggerFactory.getLogger(FirewallConfig.class); + + private Map> whitehost;//具体host的白名单 + private Map> whitehostMask;//网段的白名单 + public static Pattern getMaskPattern(String host){ + return Pattern.compile(host.replaceAll(".","\\.").replaceAll("[*]","[0-9]*").replaceAll("%","[0-9]*")); + } + private List blacklist; + private boolean check = false; + + private WallConfig wallConfig = new WallConfig(); + + private static WallProvider provider ; + + public FirewallConfig() { } + + public void init(){ + if(check){ + provider = new MySqlWallProvider(wallConfig); + provider.setBlackListEnable(true); + } + } + + public Map> getWhitehostMask() { + return whitehostMask; + } + + public void setWhitehostMask(Map> whitehostMask) { + this.whitehostMask = whitehostMask; + } + + public WallProvider getWallProvider(){ + return provider; + } + + public Map> getWhitehost() { + return this.whitehost; + } + public void setWhitehost(Map> whitehost) { + this.whitehost = whitehost; + } + /** + * 通过manager端命令动态配置白名单,配置防火墙方法之一,一共有两处,另一处: + * @see XMLServerLoader + * + * @modification 修改增加网段白名单 + * @date 2016/12/8 + * @modifiedBy Hash Zhang + */ + public boolean addWhitehost(String host, List Users) { + if (existsHost(host)){ + return false; + } + else { + if(host.contains("*")||host.contains("%")){ + this.whitehostMask.put(getMaskPattern(host),Users); + }else { + this.whitehost.put(host, Users); + + } + return true; + } + } + + public List getBlacklist() { + return this.blacklist; + } + public void setBlacklist(List blacklist) { + this.blacklist = blacklist; + } + + public WallProvider getProvider() { + return provider; + } + + public boolean existsHost(String host) { + return this.whitehost!=null && whitehost.get(host)!=null ; + } + public boolean canConnect(String host,String user) { + if(whitehost==null || whitehost.size()==0){ + MycatConfig config = MycatServer.getInstance().getConfig(); + Map users = config.getUsers(); + return users.containsKey(user); + }else{ + List list = whitehost.get(host); + if(list==null){ + return false; + } + for(UserConfig userConfig : list){ + if(userConfig.getName().equals(user)){ + return true; + } + } + } + return false ; + } + + public static void setProvider(WallProvider provider) { + FirewallConfig.provider = provider; + } + + public void setWallConfig(WallConfig wallConfig) { + this.wallConfig = wallConfig; + + } + + public boolean isCheck() { + return this.check; + } + + public void setCheck(boolean check) { + this.check = check; + } + + public WallConfig getWallConfig() { + return this.wallConfig; + } + + public synchronized static void updateToFile(String host, List userConfigs) throws Exception{ + LOGGER.debug("set white host:" + host + "user:" + userConfigs); + String filename = SystemConfig.getHomePath()+ File.separator +"conf"+ File.separator +"server.xml"; + //String filename = "E:\\MyProject\\Mycat-Server\\src\\main\\resources\\server.xml"; + + DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); + factory.setNamespaceAware(false); + factory.setValidating(false); + DocumentBuilder builder = factory.newDocumentBuilder(); + builder.setEntityResolver(new IgnoreDTDEntityResolver()); + Document xmldoc = builder.parse(filename); + Element whitehost = (Element) xmldoc.getElementsByTagName("whitehost").item(0); + Element firewall = (Element) xmldoc.getElementsByTagName("firewall").item(0); + + if (firewall == null) { + firewall = xmldoc.createElement("firewall"); + Element root = xmldoc.getDocumentElement(); + root.appendChild(firewall); + if(whitehost==null){ + whitehost = xmldoc.createElement("whitehost"); + firewall.appendChild(whitehost); + } + } + + for(UserConfig userConfig : userConfigs){ + String user = userConfig.getName(); + Element hostEle = xmldoc.createElement("host"); + hostEle.setAttribute("host", host); + hostEle.setAttribute("user", user); + + whitehost.appendChild(hostEle); + } + + + TransformerFactory factory2 = TransformerFactory.newInstance(); + Transformer former = factory2.newTransformer(); + String systemId = xmldoc.getDoctype().getSystemId(); + if(systemId!=null){ + former.setOutputProperty(javax.xml.transform.OutputKeys.DOCTYPE_SYSTEM, systemId); + } + former.transform(new DOMSource(xmldoc), new StreamResult(new File(filename))); + + } + static class IgnoreDTDEntityResolver implements EntityResolver{ + public InputSource resolveEntity(java.lang.String publicId, java.lang.String systemId) throws SAXException, java.io.IOException{ + if (systemId.contains("server.dtd")){ + //InputSource is = new InputSource(new ByteArrayInputStream("".getBytes())); + InputStream dtd = XMLServerLoader.class.getResourceAsStream("/server.dtd"); + InputSource is = new InputSource(dtd); + return is; + } else { + return null; + } + } + } +// public static void main(String[] args) throws Exception { +// List userConfigs = new ArrayList(); +// UserConfig user = new UserConfig(); +// user.setName("mycat"); +// userConfigs.add(user); +// updateToFile("127.0.0.1",userConfigs); +// } + + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/config/cluster/MycatNodeConfig.java b/src/main/java/io/mycat/config/model/MycatNodeConfig.java similarity index 98% rename from src/main/java/io/mycat/server/config/cluster/MycatNodeConfig.java rename to src/main/java/io/mycat/config/model/MycatNodeConfig.java index 2af8c3d06..5bc96508e 100644 --- a/src/main/java/io/mycat/server/config/cluster/MycatNodeConfig.java +++ b/src/main/java/io/mycat/config/model/MycatNodeConfig.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.config.cluster; +package io.mycat.config.model; /** * @author mycat diff --git a/src/main/java/io/mycat/server/config/node/SchemaConfig.java b/src/main/java/io/mycat/config/model/SchemaConfig.java similarity index 66% rename from src/main/java/io/mycat/server/config/node/SchemaConfig.java rename to src/main/java/io/mycat/config/model/SchemaConfig.java index 1703330e2..2002590b4 100644 --- a/src/main/java/io/mycat/server/config/node/SchemaConfig.java +++ b/src/main/java/io/mycat/config/model/SchemaConfig.java @@ -21,8 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.config.node; - +package io.mycat.config.model; import java.util.HashMap; import java.util.HashSet; @@ -34,32 +33,29 @@ * @author mycat */ public class SchemaConfig { - private Random random = new Random(); - private String name; - private Map tables; - private boolean noSharding; - private String dataNode; - private Set metaDataNodes; - private Set allDataNodes; - /** - * when a select sql has no limit condition ,and default max limit to prevent memory problem - * when return a large result set - */ - private int defaultMaxLimit; - private boolean checkSQLSchema; - /** - * key is join relation ,A.ID=B.PARENT_ID value is Root Table ,if a->b*->c* ,then A is root - * table - */ - private Map joinRel2TableMap = new HashMap(); - private String[] allDataNodeStrArr; - private boolean needSupportMultiDBType = false; - private String defaultDataNodeDbType; - private Map dataNodeDbTypeMap = new HashMap<>(); - - public SchemaConfig(){ - super(); - } + private final Random random = new Random(); + private final String name; + private final Map tables; + private final boolean noSharding; + private final String dataNode; + private final Set metaDataNodes; + private final Set allDataNodes; + /** + * when a select sql has no limit condition ,and default max limit to + * prevent memory problem when return a large result set + */ + private final int defaultMaxLimit; + private final boolean checkSQLSchema; + private boolean needSupportMultiDBType=false; + private String defaultDataNodeDbType; + /** + * key is join relation ,A.ID=B.PARENT_ID value is Root Table ,if a->b*->c* + * ,then A is root table + */ + private final Map joinRel2TableMap = new HashMap(); + private final String[] allDataNodeStrArr; + + private Map dataNodeDbTypeMap=new HashMap<>(); public SchemaConfig(String name, String dataNode, Map tables, int defaultMaxLimit, @@ -67,29 +63,25 @@ public SchemaConfig(String name, String dataNode, this.name = name; this.dataNode = dataNode; this.checkSQLSchema = checkSQLschema; + this.tables = tables; this.defaultMaxLimit = defaultMaxLimit; - this.setTables(tables); - } - - public void setTables(Map tables) { - this.tables = tables; - buildJoinMap(tables); - this.noSharding = (tables == null || tables.isEmpty()); - if (noSharding && dataNode == null) { - throw new RuntimeException(name - + " in noSharding mode schema must have default dataNode "); - } - this.metaDataNodes = buildMetaDataNodes(); - this.allDataNodes = buildAllDataNodes(); + buildJoinMap(tables); + this.noSharding = (tables == null || tables.isEmpty()); + if (noSharding && dataNode == null) { + throw new RuntimeException(name + + " in noSharding mode schema must have default dataNode "); + } + this.metaDataNodes = buildMetaDataNodes(); + this.allDataNodes = buildAllDataNodes(); // this.metaDataNodes = buildAllDataNodes(); - if (this.allDataNodes != null && !this.allDataNodes.isEmpty()) { - String[] dnArr = new String[this.allDataNodes.size()]; - dnArr = this.allDataNodes.toArray(dnArr); - this.allDataNodeStrArr = dnArr; - } else { - this.allDataNodeStrArr = null; - } - } + if (this.allDataNodes != null && !this.allDataNodes.isEmpty()) { + String[] dnArr = new String[this.allDataNodes.size()]; + dnArr = this.allDataNodes.toArray(dnArr); + this.allDataNodeStrArr = dnArr; + } else { + this.allDataNodeStrArr = null; + } + } public String getDefaultDataNodeDbType() { @@ -105,22 +97,10 @@ public boolean isCheckSQLSchema() { return checkSQLSchema; } - public void setName(String name) { - this.name = name; - } - public int getDefaultMaxLimit() { return defaultMaxLimit; } - public void setCheckSQLSchema(boolean checkSQLSchema) { - this.checkSQLSchema = checkSQLSchema; - } - - public void setDefaultMaxLimit(int defaultMaxLimit) { - this.defaultMaxLimit = defaultMaxLimit; - } - private void buildJoinMap(Map tables2) { if (tables == null || tables.isEmpty()) { @@ -164,10 +144,6 @@ public String getDataNode() { return dataNode; } - public void setDataNode(String dataNode) { - this.dataNode = dataNode; - } - public Map getTables() { return tables; } @@ -198,7 +174,7 @@ public String getRandomDataNode() { if (this.allDataNodeStrArr == null) { return null; } - int index = Math.abs(random.nextInt()) % allDataNodeStrArr.length; + int index = Math.abs(random.nextInt(Integer.MAX_VALUE)) % allDataNodeStrArr.length; return this.allDataNodeStrArr[index]; } diff --git a/src/main/java/io/mycat/server/config/node/SystemConfig.java b/src/main/java/io/mycat/config/model/SystemConfig.java similarity index 59% rename from src/main/java/io/mycat/server/config/node/SystemConfig.java rename to src/main/java/io/mycat/config/model/SystemConfig.java index 002cc452d..686429bcc 100644 --- a/src/main/java/io/mycat/server/config/node/SystemConfig.java +++ b/src/main/java/io/mycat/config/model/SystemConfig.java @@ -21,16 +21,16 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.config.node; - -import io.mycat.server.Isolations; +package io.mycat.config.model; import java.io.File; import java.io.IOException; +import io.mycat.config.Isolations; + /** * 系统基础配置项 - * + * * @author mycat */ public final class SystemConfig { @@ -40,16 +40,22 @@ public final class SystemConfig { private static final int DEFAULT_MANAGER_PORT = 9066; private static final String DEFAULT_CHARSET = "utf8"; - private static final String DEFAULT_SQL_PARSER = "fdbparser";// druidparser - private static final int DEFAULT_BUFFER_CHUNK_SIZE = 4096; + private static final String DEFAULT_SQL_PARSER = "druidparser";// fdbparser, druidparser + private static final short DEFAULT_BUFFER_CHUNK_SIZE = 4096; + private static final int DEFAULT_BUFFER_POOL_PAGE_SIZE = 512*1024*4; + private static final short DEFAULT_BUFFER_POOL_PAGE_NUMBER = 64; private int processorBufferLocalPercent; - private static final int DEFAULT_PROCESSORS = Runtime.getRuntime() - .availableProcessors(); + private static final int DEFAULT_PROCESSORS = Runtime.getRuntime().availableProcessors(); private int frontSocketSoRcvbuf = 1024 * 1024; private int frontSocketSoSndbuf = 4 * 1024 * 1024; private int backSocketSoRcvbuf = 4 * 1024 * 1024;// mysql 5.6 // net_buffer_length // defaut 4M + + private final static String RESERVED_SYSTEM_MEMORY_BYTES = "384m"; + private final static String MEMORY_PAGE_SIZE = "1m"; + private final static String SPILLS_FILE_BUFFER_SIZE = "2K"; + private final static String DATANODE_SORTED_TEMP_DIR = "datanode"; private int backSocketSoSndbuf = 1024 * 1024; private int frontSocketNoDelay = 1; // 0=false private int backSocketNoDelay = 1; // 1=true @@ -66,11 +72,11 @@ public final class SystemConfig { private static final String DEFAULT_CLUSTER_HEARTBEAT_PASS = "_HEARTBEAT_PASS_"; private static final int DEFAULT_PARSER_COMMENT_VERSION = 50148; private static final int DEFAULT_SQL_RECORD_COUNT = 10; - // 全局表一致性检测任务,默认24小时调度一次 - private static final long DEFAULT_GLOBAL_TABLE_CHECK_PERIOD = 24 * 60 * 60 * 1000L; + private static final boolean DEFAULT_USE_ZK_SWITCH = false; private int maxStringLiteralLength = 65535; private int frontWriteQueueSize = 2048; private String bindIp = "0.0.0.0"; + private String fakeMySQLVersion = null; private int serverPort; private int managerPort; private String charset; @@ -88,24 +94,50 @@ public final class SystemConfig { private String clusterHeartbeatUser; private String clusterHeartbeatPass; private long clusterHeartbeatPeriod; - private long glableTableCheckPeriod; private long clusterHeartbeatTimeout; private int clusterHeartbeatRetry; - private int txIsolation; private int parserCommentVersion; private int sqlRecordCount; - private long processorBufferPool; - private int processorBufferChunk; + + // a page size + private int bufferPoolPageSize; + + //minimum allocation unit + private short bufferPoolChunkSize; + + // buffer pool page number + private short bufferPoolPageNumber; + + //大结果集阈值,默认512kb + private int maxResultSet=512*1024; + //大结果集拒绝策略次数过滤限制,默认10次 + private int bigResultSizeSqlCount=10; + //大结果集拒绝策咯,bufferpool使用率阈值(0-100),默认80% + private int bufferUsagePercent=80; + //大结果集保护策咯,0:不开启,1:级别1为在当前mucat bufferpool + //使用率大于bufferUsagePercent阈值时,拒绝超过defaultBigResultSizeSqlCount + //sql次数阈值并且符合超过大结果集阈值maxResultSet的所有sql + //默认值0 + private int flowControlRejectStrategy=0; + //清理大结果集记录周期 + private long clearBigSqLResultSetMapMs=10*60*1000; + private int defaultMaxLimit = DEFAULT_MAX_LIMIT; public static final int SEQUENCEHANDLER_LOCALFILE = 0; public static final int SEQUENCEHANDLER_MYSQLDB = 1; public static final int SEQUENCEHANDLER_LOCAL_TIME = 2; + public static final int SEQUENCEHANDLER_ZK_DISTRIBUTED = 3; + public static final int SEQUENCEHANDLER_ZK_GLOBAL_INCREMENT = 4; + /* + * 注意!!! 目前mycat支持的MySQL版本,如果后续有新的MySQL版本,请添加到此数组, 对于MySQL的其他分支, + * 比如MariaDB目前版本号已经到10.1.x,但是其驱动程序仍然兼容官方的MySQL,因此这里版本号只需要MySQL官方的版本号即可。 + */ + public static final String[] MySQLVersions = { "5.5", "5.6", "5.7" }; private int sequnceHandlerType = SEQUENCEHANDLER_LOCALFILE; private String sqlInterceptor = "io.mycat.server.interceptor.impl.DefaultSqlInterceptor"; private String sqlInterceptorType = "select"; - private String sqlInterceptorFile = System.getProperty("user.dir") - + "/logs/sql.txt"; + private String sqlInterceptorFile = System.getProperty("user.dir")+"/logs/sql.txt"; public static final int MUTINODELIMIT_SMALL_DATA = 0; public static final int MUTINODELIMIT_LAR_DATA = 1; private int mutiNodeLimitType = MUTINODELIMIT_SMALL_DATA; @@ -117,9 +149,92 @@ public final class SystemConfig { private int usingAIO = 0; private int packetHeaderSize = 4; private int maxPacketSize = 16 * 1024 * 1024; - private int mycatNodeId = 1; - private int useCompression = 0; + private int mycatNodeId=1; + private int useCompression =0; + private int useSqlStat = 1; + //子查询中存在关联查询的情况下,检查关联字段中是否有分片字段 .默认 false + private boolean subqueryRelationshipCheck = false; + + // 是否使用HandshakeV10Packet来与client进行通讯, 1:是 , 0:否(使用HandshakePacket) + // 使用HandshakeV10Packet为的是兼容高版本的jdbc驱动, 后期稳定下来考虑全部采用HandshakeV10Packet来通讯 + private int useHandshakeV10 = 0; + + //处理分布式事务开关,默认为不过滤分布式事务 + private int handleDistributedTransactions = 0; + + private int checkTableConsistency = 0; + private long checkTableConsistencyPeriod = CHECKTABLECONSISTENCYPERIOD; + private final static long CHECKTABLECONSISTENCYPERIOD = 1 * 60 * 1000; + + private int processorBufferPoolType = 0; + + // 全局表一致性检测任务,默认24小时调度一次 + private static final long DEFAULT_GLOBAL_TABLE_CHECK_PERIOD = 24 * 60 * 60 * 1000L; + private int useGlobleTableCheck = 1; // 全局表一致性检查开关 + + private long glableTableCheckPeriod; + + /** + * Mycat 使用 Off Heap For Merge/Order/Group/Limit计算相关参数 + */ + + + /** + * 是否启用Off Heap for Merge 1-启用,0-不启用 + */ + private int useOffHeapForMerge; + + /** + *页大小,对应MemoryBlock的大小,单位为M + */ + private String memoryPageSize; + + + /** + * DiskRowWriter写磁盘是临时写Buffer,单位为K + */ + private String spillsFileBufferSize; + /** + * 启用结果集流输出,不经过merge模块, + */ + private int useStreamOutput; + + /** + * 该变量仅在Merge使用On Heap + * 内存方式时起作用,如果使用Off Heap内存方式 + * 那么可以认为-Xmx就是系统预留内存。 + * 在On Heap上给系统预留的内存, + * 主要供新小对象创建,JAVA简单数据结构使用 + * 以保证在On Heap上大结果集计算时情况,能快速响应其他 + * 连接操作。 + */ + private String systemReserveMemorySize; + + private String XARecoveryLogBaseDir; + + private String XARecoveryLogBaseName; + + /** + * 排序时,内存不够时,将已经排序的结果集 + * 写入到临时目录 + */ + private String dataNodeSortedTempDir; + + /** + * 是否启用zk切换 + */ + private boolean useZKSwitch=DEFAULT_USE_ZK_SWITCH; + + + /** + * huangyiming add + * 无密码登陆标示, 0:否,1:是,默认为0 + */ + private int nonePasswordLogin = DEFAULT_NONEPASSWORDLOGIN ; + + private final static int DEFAULT_NONEPASSWORDLOGIN = 0; + public String getDefaultSqlParser() { return defaultSqlParser; } @@ -133,12 +248,17 @@ public SystemConfig() { this.managerPort = DEFAULT_MANAGER_PORT; this.charset = DEFAULT_CHARSET; this.processors = DEFAULT_PROCESSORS; - - processorBufferChunk = DEFAULT_BUFFER_CHUNK_SIZE; - this.processorExecutor = (DEFAULT_PROCESSORS != 1) ? DEFAULT_PROCESSORS * 2 - : 4; + this.bufferPoolPageSize = DEFAULT_BUFFER_POOL_PAGE_SIZE; + this.bufferPoolChunkSize = DEFAULT_BUFFER_CHUNK_SIZE; + + /** + * 大结果集时 需增大 network buffer pool pages. + */ + this.bufferPoolPageNumber = (short) (DEFAULT_PROCESSORS*20); + + this.processorExecutor = (DEFAULT_PROCESSORS != 1) ? DEFAULT_PROCESSORS * 2 : 4; this.managerExecutor = 2; - processorBufferPool = DEFAULT_BUFFER_CHUNK_SIZE * processors * 1000; + this.processorBufferLocalPercent = 100; this.timerExecutor = 2; this.idleTimeout = DEFAULT_IDLE_TIMEOUT; @@ -154,9 +274,92 @@ public SystemConfig() { this.parserCommentVersion = DEFAULT_PARSER_COMMENT_VERSION; this.sqlRecordCount = DEFAULT_SQL_RECORD_COUNT; this.glableTableCheckPeriod = DEFAULT_GLOBAL_TABLE_CHECK_PERIOD; + this.useOffHeapForMerge = 1; + this.memoryPageSize = MEMORY_PAGE_SIZE; + this.spillsFileBufferSize = SPILLS_FILE_BUFFER_SIZE; + this.useStreamOutput = 0; + this.systemReserveMemorySize = RESERVED_SYSTEM_MEMORY_BYTES; + this.dataNodeSortedTempDir = System.getProperty("user.dir"); + this.XARecoveryLogBaseDir = SystemConfig.getHomePath()+"/tmlogs/"; + this.XARecoveryLogBaseName ="tmlog"; + } + public String getDataNodeSortedTempDir() { + return dataNodeSortedTempDir; } - + + public int getUseOffHeapForMerge() { + return useOffHeapForMerge; + } + + public void setUseOffHeapForMerge(int useOffHeapForMerge) { + this.useOffHeapForMerge = useOffHeapForMerge; + } + + public String getMemoryPageSize() { + return memoryPageSize; + } + + public void setMemoryPageSize(String memoryPageSize) { + this.memoryPageSize = memoryPageSize; + } + + public String getSpillsFileBufferSize() { + return spillsFileBufferSize; + } + + public void setSpillsFileBufferSize(String spillsFileBufferSize) { + this.spillsFileBufferSize = spillsFileBufferSize; + } + + public int getUseStreamOutput() { + return useStreamOutput; + } + + public void setUseStreamOutput(int useStreamOutput) { + this.useStreamOutput = useStreamOutput; + } + + public String getSystemReserveMemorySize() { + return systemReserveMemorySize; + } + + public void setSystemReserveMemorySize(String systemReserveMemorySize) { + this.systemReserveMemorySize = systemReserveMemorySize; + } + + public boolean isUseZKSwitch() { + return useZKSwitch; + } + + public void setUseZKSwitch(boolean useZKSwitch) { + this.useZKSwitch = useZKSwitch; + } + + public String getXARecoveryLogBaseDir() { + return XARecoveryLogBaseDir; + } + + public void setXARecoveryLogBaseDir(String XARecoveryLogBaseDir) { + this.XARecoveryLogBaseDir = XARecoveryLogBaseDir; + } + + public String getXARecoveryLogBaseName() { + return XARecoveryLogBaseName; + } + + public void setXARecoveryLogBaseName(String XARecoveryLogBaseName) { + this.XARecoveryLogBaseName = XARecoveryLogBaseName; + } + + public int getUseGlobleTableCheck() { + return useGlobleTableCheck; + } + + public void setUseGlobleTableCheck(int useGlobleTableCheck) { + this.useGlobleTableCheck = useGlobleTableCheck; + } + public long getGlableTableCheckPeriod() { return glableTableCheckPeriod; } @@ -231,26 +434,23 @@ public void setDefaultMaxLimit(int defaultMaxLimit) { public static String getHomePath() { String home = System.getProperty(SystemConfig.SYS_HOME); - if (home != null) { - if (home.endsWith(File.pathSeparator)) { + if (home != null + && home.endsWith(File.pathSeparator)) { home = home.substring(0, home.length() - 1); System.setProperty(SystemConfig.SYS_HOME, home); - } } // MYCAT_HOME为空,默认尝试设置为当前目录或上级目录。BEN - if (home == null) { + if(home == null) { try { - String path = new File("..").getCanonicalPath().replaceAll( - "\\\\", "/"); - File conf = new File(path + "/conf"); - if (conf.exists() && conf.isDirectory()) { + String path = new File("..").getCanonicalPath().replaceAll("\\\\", "/"); + File conf = new File(path+"/conf"); + if(conf.exists() && conf.isDirectory()) { home = path; } else { - path = new File(".").getCanonicalPath().replaceAll("\\\\", - "/"); - conf = new File(path + "/conf"); - if (conf.exists() && conf.isDirectory()) { + path = new File(".").getCanonicalPath().replaceAll("\\\\", "/"); + conf = new File(path+"/conf"); + if(conf.exists() && conf.isDirectory()) { home = path; } } @@ -265,12 +465,25 @@ public static String getHomePath() { return home; } + + // 是否使用SQL统计 + public int getUseSqlStat() + { + return useSqlStat; + } + + public void setUseSqlStat(int useSqlStat) + { + this.useSqlStat = useSqlStat; + } - public int getUseCompression() { + public int getUseCompression() + { return useCompression; } - public void setUseCompression(int useCompression) { + public void setUseCompression(int useCompression) + { this.useCompression = useCompression; } @@ -282,6 +495,14 @@ public void setCharset(String charset) { this.charset = charset; } + public String getFakeMySQLVersion() { + return fakeMySQLVersion; + } + + public void setFakeMySQLVersion(String mysqlVersion) { + this.fakeMySQLVersion = mysqlVersion; + } + public int getServerPort() { return serverPort; } @@ -450,20 +671,69 @@ public void setSqlRecordCount(int sqlRecordCount) { this.sqlRecordCount = sqlRecordCount; } - public long getProcessorBufferPool() { - return processorBufferPool; + + public short getBufferPoolChunkSize() { + return bufferPoolChunkSize; + } + + public void setBufferPoolChunkSize(short bufferPoolChunkSize) { + this.bufferPoolChunkSize = bufferPoolChunkSize; + } + + public int getMaxResultSet() { + return maxResultSet; + } + + public void setMaxResultSet(int maxResultSet) { + this.maxResultSet = maxResultSet; + } + + public int getBigResultSizeSqlCount() { + return bigResultSizeSqlCount; + } + + public void setBigResultSizeSqlCount(int bigResultSizeSqlCount) { + this.bigResultSizeSqlCount = bigResultSizeSqlCount; + } + + public int getBufferUsagePercent() { + return bufferUsagePercent; + } + + public void setBufferUsagePercent(int bufferUsagePercent) { + this.bufferUsagePercent = bufferUsagePercent; + } + + public int getFlowControlRejectStrategy() { + return flowControlRejectStrategy; + } + + public void setFlowControlRejectStrategy(int flowControlRejectStrategy) { + this.flowControlRejectStrategy = flowControlRejectStrategy; + } + + public long getClearBigSqLResultSetMapMs() { + return clearBigSqLResultSetMapMs; + } + + public void setClearBigSqLResultSetMapMs(long clearBigSqLResultSetMapMs) { + this.clearBigSqLResultSetMapMs = clearBigSqLResultSetMapMs; + } + + public int getBufferPoolPageSize() { + return bufferPoolPageSize; } - public void setProcessorBufferPool(long processorBufferPool) { - this.processorBufferPool = processorBufferPool; + public void setBufferPoolPageSize(int bufferPoolPageSize) { + this.bufferPoolPageSize = bufferPoolPageSize; } - public int getProcessorBufferChunk() { - return processorBufferChunk; + public short getBufferPoolPageNumber() { + return bufferPoolPageNumber; } - public void setProcessorBufferChunk(int processorBufferChunk) { - this.processorBufferChunk = processorBufferChunk; + public void setBufferPoolPageNumber(short bufferPoolPageNumber) { + this.bufferPoolPageNumber = bufferPoolPageNumber; } public int getFrontSocketSoRcvbuf() { @@ -603,24 +873,88 @@ public String toString() { + ", clusterHeartbeatUser=" + clusterHeartbeatUser + ", clusterHeartbeatPass=" + clusterHeartbeatPass + ", clusterHeartbeatPeriod=" + clusterHeartbeatPeriod - + ", glableTableCheckPeriod=" + glableTableCheckPeriod + ", clusterHeartbeatTimeout=" + clusterHeartbeatTimeout + ", clusterHeartbeatRetry=" + clusterHeartbeatRetry + ", txIsolation=" + txIsolation + ", parserCommentVersion=" + parserCommentVersion + ", sqlRecordCount=" + sqlRecordCount - + ", processorBufferPool=" + processorBufferPool - + ", processorBufferChunk=" + processorBufferChunk + + ", bufferPoolPageSize=" + bufferPoolPageSize + + ", bufferPoolChunkSize=" + bufferPoolChunkSize + + ", bufferPoolPageNumber=" + bufferPoolPageNumber + + ", maxResultSet=" +maxResultSet + + ", bigResultSizeSqlCount="+bigResultSizeSqlCount + + ", bufferUsagePercent="+bufferUsagePercent + + ", flowControlRejectStrategy="+flowControlRejectStrategy + + ", clearBigSqLResultSetMapMs="+clearBigSqLResultSetMapMs + ", defaultMaxLimit=" + defaultMaxLimit + ", sequnceHandlerType=" + sequnceHandlerType + ", sqlInterceptor=" + sqlInterceptor + ", sqlInterceptorType=" + sqlInterceptorType + ", sqlInterceptorFile=" + sqlInterceptorFile - + ", mutiNodeLimitType=" + mutiNodeLimitType - + ", mutiNodePatchSize=" + mutiNodePatchSize - + ", defaultSqlParser=" + defaultSqlParser + ", usingAIO=" - + usingAIO + ", packetHeaderSize=" + packetHeaderSize - + ", maxPacketSize=" + maxPacketSize + ", mycatNodeId=" - + mycatNodeId + "]"; + + ", mutiNodeLimitType=" + mutiNodeLimitType + + ", mutiNodePatchSize=" + mutiNodePatchSize + + ", defaultSqlParser=" + defaultSqlParser + + ", usingAIO=" + usingAIO + + ", packetHeaderSize=" + packetHeaderSize + + ", maxPacketSize=" + maxPacketSize + + ", mycatNodeId=" + mycatNodeId + "]"; + } + + + public int getCheckTableConsistency() { + return checkTableConsistency; + } + + public void setCheckTableConsistency(int checkTableConsistency) { + this.checkTableConsistency = checkTableConsistency; + } + + public long getCheckTableConsistencyPeriod() { + return checkTableConsistencyPeriod; + } + + public void setCheckTableConsistencyPeriod(long checkTableConsistencyPeriod) { + this.checkTableConsistencyPeriod = checkTableConsistencyPeriod; + } + + public int getProcessorBufferPoolType() { + return processorBufferPoolType; + } + + public void setProcessorBufferPoolType(int processorBufferPoolType) { + this.processorBufferPoolType = processorBufferPoolType; } -} \ No newline at end of file + public int getHandleDistributedTransactions() { + return handleDistributedTransactions; + } + + public void setHandleDistributedTransactions(int handleDistributedTransactions) { + this.handleDistributedTransactions = handleDistributedTransactions; + } + + public int getUseHandshakeV10() { + return useHandshakeV10; + } + + public void setUseHandshakeV10(int useHandshakeV10) { + this.useHandshakeV10 = useHandshakeV10; + } + + public int getNonePasswordLogin() { + return nonePasswordLogin; + } + + public void setNonePasswordLogin(int nonePasswordLogin) { + this.nonePasswordLogin = nonePasswordLogin; + } + + public boolean isSubqueryRelationshipCheck() { + return subqueryRelationshipCheck; + } + + public void setSubqueryRelationshipCheck(boolean subqueryRelationshipCheck) { + this.subqueryRelationshipCheck = subqueryRelationshipCheck; + } + + +} diff --git a/src/main/java/io/mycat/config/model/TableConfig.java b/src/main/java/io/mycat/config/model/TableConfig.java new file mode 100644 index 000000000..dd2ede1a3 --- /dev/null +++ b/src/main/java/io/mycat/config/model/TableConfig.java @@ -0,0 +1,305 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.config.model; + +import java.util.*; +import java.util.concurrent.locks.ReentrantLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import com.alibaba.druid.sql.ast.SQLDataType; +import com.alibaba.druid.sql.ast.statement.SQLTableElement; +import io.mycat.config.model.rule.RuleConfig; +import io.mycat.util.SplitUtil; + +/** + * @author mycat + */ +public class TableConfig { + public static final int TYPE_GLOBAL_TABLE = 1; + public static final int TYPE_GLOBAL_DEFAULT = 0; + private final String name; + private final String primaryKey; + private final boolean autoIncrement; + private final boolean needAddLimit; + private final Set dbTypes; + private final int tableType; + private final ArrayList dataNodes; + private final ArrayList distTables; + private final RuleConfig rule; + private final String partitionColumn; + private final boolean ruleRequired; + private final TableConfig parentTC; + private final boolean childTable; + private final String joinKey; + private final String parentKey; + private final String locateRTableKeySql; + // only has one level of parent + private final boolean secondLevel; + private final boolean partionKeyIsPrimaryKey; + private final Random rand = new Random(); + + private volatile List tableElementList; + private volatile String tableStructureSQL; + private volatile Map> dataNodeTableStructureSQLMap; + private ReentrantReadWriteLock reentrantReadWriteLock = new ReentrantReadWriteLock(false); + + + public TableConfig(String name, String primaryKey, boolean autoIncrement,boolean needAddLimit, int tableType, + String dataNode,Set dbType, RuleConfig rule, boolean ruleRequired, + TableConfig parentTC, boolean isChildTable, String joinKey, + String parentKey,String subTables) { + if (name == null) { + throw new IllegalArgumentException("table name is null"); + } else if (dataNode == null) { + throw new IllegalArgumentException("dataNode name is null"); + } + this.primaryKey = primaryKey; + this.autoIncrement = autoIncrement; + this.needAddLimit=needAddLimit; + this.tableType = tableType; + this.dbTypes=dbType; + if (ruleRequired && rule == null) { + throw new IllegalArgumentException("ruleRequired but rule is null"); + } + + this.name = name.toUpperCase(); + + String theDataNodes[] = SplitUtil.split(dataNode, ',', '$', '-'); + if (theDataNodes == null || theDataNodes.length <= 0) { + throw new IllegalArgumentException("invalid table dataNodes: " + dataNode); + } + dataNodes = new ArrayList(theDataNodes.length); + for (String dn : theDataNodes) { + dataNodes.add(dn); + } + + if(subTables!=null && !subTables.equals("")){ + String sTables[] = SplitUtil.split(subTables, ',', '$', '-'); + if (sTables == null || sTables.length <= 0) { + throw new IllegalArgumentException("invalid table subTables"); + } + this.distTables = new ArrayList(sTables.length); + for (String table : sTables) { + distTables.add(table); + } + }else{ + this.distTables = new ArrayList(); + } + + this.rule = rule; + this.partitionColumn = (rule == null) ? null : rule.getColumn(); + partionKeyIsPrimaryKey=(partitionColumn==null)?primaryKey==null:partitionColumn.equals(primaryKey); + this.ruleRequired = ruleRequired; + this.childTable = isChildTable; + this.parentTC = parentTC; + this.joinKey = joinKey; + this.parentKey = parentKey; + if (parentTC != null) { + locateRTableKeySql = genLocateRootParentSQL(); + secondLevel = (parentTC.parentTC == null); + } else { + locateRTableKeySql = null; + secondLevel = false; + } + } + + public String getPrimaryKey() { + return primaryKey; + } + + public Set getDbTypes() + { + return dbTypes; + } + + public boolean isAutoIncrement() { + return autoIncrement; + } + + public boolean isNeedAddLimit() { + return needAddLimit; + } + + public boolean isSecondLevel() { + return secondLevel; + } + + public String getLocateRTableKeySql() { + return locateRTableKeySql; + } + + public boolean isGlobalTable() { + return this.tableType == TableConfig.TYPE_GLOBAL_TABLE; + } + + public String genLocateRootParentSQL() { + TableConfig tb = this; + StringBuilder tableSb = new StringBuilder(); + StringBuilder condition = new StringBuilder(); + TableConfig prevTC = null; + int level = 0; + String latestCond = null; + while (tb.parentTC != null) { + tableSb.append(tb.parentTC.name).append(','); + String relation = null; + if (level == 0) { + latestCond = " " + tb.parentTC.getName() + '.' + tb.parentKey + + "="; + } else { + relation = tb.parentTC.getName() + '.' + tb.parentKey + '=' + + tb.name + '.' + tb.joinKey; + condition.append(relation).append(" AND "); + } + level++; + prevTC = tb; + tb = tb.parentTC; + } + String sql = "SELECT " + + prevTC.parentTC.name + + '.' + + prevTC.parentKey + + " FROM " + + tableSb.substring(0, tableSb.length() - 1) + + " WHERE " + + ((level < 2) ? latestCond : condition.toString() + latestCond); + // System.out.println(this.name+" sql " + sql); + return sql; + + } + + public String getPartitionColumn() { + return partitionColumn; + } + + public int getTableType() { + return tableType; + } + + /** + * get root parent + * + * @return + */ + public TableConfig getRootParent() { + if (parentTC == null) { + return null; + } + TableConfig preParent = parentTC; + TableConfig parent = preParent.getParentTC(); + + while (parent != null) { + preParent = parent; + parent = parent.getParentTC(); + } + return preParent; + } + + public TableConfig getParentTC() { + return parentTC; + } + + public boolean isChildTable() { + return childTable; + } + + public String getJoinKey() { + return joinKey; + } + + public String getParentKey() { + return parentKey; + } + + /** + * @return upper-case + */ + public String getName() { + return name; + } + + public ArrayList getDataNodes() { + return dataNodes; + } + + public String getRandomDataNode() { + int index = Math.abs(rand.nextInt(Integer.MAX_VALUE)) % dataNodes.size(); + return dataNodes.get(index); + } + + public boolean isRuleRequired() { + return ruleRequired; + } + + public RuleConfig getRule() { + return rule; + } + + public boolean primaryKeyIsPartionKey() { + return partionKeyIsPrimaryKey; + } + + public ArrayList getDistTables() { + return this.distTables; + } + + public boolean isDistTable(){ + if(this.distTables!=null && !this.distTables.isEmpty() ){ + return true; + } + return false; + } + + public List getTableElementList() { + return tableElementList; + } + + public void setTableElementList(List tableElementList) { + this.tableElementList = tableElementList; + } + + public ReentrantReadWriteLock getReentrantReadWriteLock() { + return reentrantReadWriteLock; + } + + public void setReentrantReadWriteLock(ReentrantReadWriteLock reentrantReadWriteLock) { + this.reentrantReadWriteLock = reentrantReadWriteLock; + } + + public String getTableStructureSQL() { + return tableStructureSQL; + } + + public void setTableStructureSQL(String tableStructureSQL) { + this.tableStructureSQL = tableStructureSQL; + } + + + public Map> getDataNodeTableStructureSQLMap() { + return dataNodeTableStructureSQLMap; + } + + public void setDataNodeTableStructureSQLMap(Map> dataNodeTableStructureSQLMap) { + this.dataNodeTableStructureSQLMap = dataNodeTableStructureSQLMap; + } +} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/config/node/TableConfigMap.java b/src/main/java/io/mycat/config/model/TableConfigMap.java similarity index 70% rename from src/main/java/io/mycat/server/config/node/TableConfigMap.java rename to src/main/java/io/mycat/config/model/TableConfigMap.java index 6390ef1d7..4cae28e22 100644 --- a/src/main/java/io/mycat/server/config/node/TableConfigMap.java +++ b/src/main/java/io/mycat/config/model/TableConfigMap.java @@ -1,31 +1,35 @@ -package io.mycat.server.config.node; - -import java.util.HashMap; - -/** - * 支持表名中包含引号[`] - * - * @author BEN GONG - */ -public class TableConfigMap extends HashMap { - - private static final long serialVersionUID = -6605226933829917213L; - - @Override - public TableConfig get(Object key) { - String tableName = key.toString(); - // 忽略表名中的引号。 - if(tableName.contains("`")) tableName = tableName.replaceAll("`", ""); - - return super.get(tableName); - } - - @Override - public boolean containsKey(Object key) { - String tableName = key.toString(); - // 忽略表名中的引号。 - if(tableName.contains("`")) tableName = tableName.replaceAll("`", ""); - - return super.containsKey(tableName); - } -} +package io.mycat.config.model; + +import java.util.HashMap; + +/** + * 支持表名中包含引号[`] + * + * @author BEN GONG + */ +public class TableConfigMap extends HashMap { + + private static final long serialVersionUID = -6605226933829917213L; + + @Override + public TableConfig get(Object key) { + String tableName = key.toString(); + // 忽略表名中的引号。 + if(tableName.contains("`")) { + tableName = tableName.replaceAll("`", ""); + } + + return super.get(tableName); + } + + @Override + public boolean containsKey(Object key) { + String tableName = key.toString(); + // 忽略表名中的引号。 + if(tableName.contains("`")) { + tableName = tableName.replaceAll("`", ""); + } + + return super.containsKey(tableName); + } +} diff --git a/src/main/java/io/mycat/config/model/TableRuleConfig.java b/src/main/java/io/mycat/config/model/TableRuleConfig.java new file mode 100644 index 000000000..64799afad --- /dev/null +++ b/src/main/java/io/mycat/config/model/TableRuleConfig.java @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.config.model; + +import java.beans.Expression; + +/** + * @author mycat + */ +public final class TableRuleConfig { + + private final String name; + private final RuleConfig[] rules; + + public TableRuleConfig(String name, RuleConfig[] rules) { + this.name = name; + this.rules = rules; + if (rules != null) { + for (RuleConfig r : rules) { + r.tableRuleName = name; + } + } + } + + public String getName() { + return name; + } + + public RuleConfig[] getRules() { + return rules; + } + + public static final class RuleConfig { + private String tableRuleName; + /** upper-case */ + private final String[] columns; + private final Expression algorithm; + + public RuleConfig(String[] columns, Expression algorithm) { + this.columns = columns == null ? new String[0] : columns; + this.algorithm = algorithm; + } + + public String[] getColumns() { + return columns; + } + + public Expression getAlgorithm() { + return algorithm; + } + + @Override + public String toString() { + StringBuilder s = new StringBuilder(); + s.append("{tableRule:").append(tableRuleName).append(", columns:["); + for (int i = 0; i < columns.length; ++i) { + if (i > 0) { + s.append(", "); + } + s.append(columns[i]); + } + s.append("]}"); + return s.toString(); + } + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/config/model/UserConfig.java b/src/main/java/io/mycat/config/model/UserConfig.java new file mode 100644 index 000000000..70c2cfa21 --- /dev/null +++ b/src/main/java/io/mycat/config/model/UserConfig.java @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.config.model; + +import java.util.Set; + +/** + * @author mycat + */ +public class UserConfig { + + private String name; + private String password; //明文 + private String encryptPassword; //密文 + private int benchmark = 0; // 负载限制, 默认0表示不限制 + private UserPrivilegesConfig privilegesConfig; //SQL表级的增删改查权限控制 + + /** + * 是否无密码登陆的默认账户 + */ + private boolean defaultAccount = false; + private boolean readOnly = false; + + public boolean isReadOnly() { + return readOnly; + } + + public void setReadOnly(boolean readOnly) { + this.readOnly = readOnly; + } + + private Set schemas; + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getPassword() { + return password; + } + + public void setPassword(String password) { + this.password = password; + } + + public int getBenchmark() { + return benchmark; + } + + public void setBenchmark(int benchmark) { + this.benchmark = benchmark; + } + + public Set getSchemas() { + return schemas; + } + + public String getEncryptPassword() { + return this.encryptPassword; + } + + public void setEncryptPassword(String encryptPassword) { + this.encryptPassword = encryptPassword; + } + + public void setSchemas(Set schemas) { + this.schemas = schemas; + } + + public UserPrivilegesConfig getPrivilegesConfig() { + return privilegesConfig; + } + + public void setPrivilegesConfig(UserPrivilegesConfig privilegesConfig) { + this.privilegesConfig = privilegesConfig; + } + + + public boolean isDefaultAccount() { + return defaultAccount; + } + + public void setDefaultAccount(boolean defaultAccount) { + this.defaultAccount = defaultAccount; + } + + @Override + public String toString() { + return "UserConfig [name=" + name + ", password=" + password + ", encryptPassword=" + encryptPassword + + ", benchmark=" + benchmark + ", privilegesConfig=" + privilegesConfig + ", defaultAccount=" + + defaultAccount + ", readOnly=" + readOnly + ", schemas=" + schemas + "]"; + } + + + + + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/config/model/UserPrivilegesConfig.java b/src/main/java/io/mycat/config/model/UserPrivilegesConfig.java new file mode 100644 index 000000000..05a0b1f76 --- /dev/null +++ b/src/main/java/io/mycat/config/model/UserPrivilegesConfig.java @@ -0,0 +1,101 @@ +package io.mycat.config.model; + +import java.util.HashMap; +import java.util.Map; + + +/** + * 用户 SQL 权限配置 + * + * @author zhuam + * + */ +public class UserPrivilegesConfig { + + private boolean check = false; + + private Map schemaPrivileges = new HashMap(); + + public boolean isCheck() { + return check; + } + + public void setCheck(boolean check) { + this.check = check; + } + + public void addSchemaPrivilege(String schemaName, SchemaPrivilege privilege) { + this.schemaPrivileges.put(schemaName, privilege); + } + + public SchemaPrivilege getSchemaPrivilege(String schemaName) { + SchemaPrivilege schemaPrivilege = schemaPrivileges.get( schemaName ); + return schemaPrivilege; + } + + /** + * 库级权限 + */ + public static class SchemaPrivilege { + + private String name; + private int[] dml = new int[]{0, 0, 0, 0}; + + private Map tablePrivileges = new HashMap(); + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public int[] getDml() { + return dml; + } + + public void setDml(int[] dml) { + this.dml = dml; + } + + public void addTablePrivilege(String tableName, TablePrivilege privilege) { + this.tablePrivileges.put(tableName, privilege); + } + + public TablePrivilege getTablePrivilege(String tableName) { + TablePrivilege tablePrivilege = tablePrivileges.get( tableName ); + if ( tablePrivilege == null ) { + tablePrivilege = new TablePrivilege(); + tablePrivilege.setName(tableName); + tablePrivilege.setDml(dml); + } + return tablePrivilege; + } + } + + /** + * 表级权限 + */ + public static class TablePrivilege { + + private String name; + private int[] dml = new int[] { 0, 0, 0, 0 }; + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public int[] getDml() { + return dml; + } + + public void setDml(int[] dml) { + this.dml = dml; + } + } +} \ No newline at end of file diff --git a/src/main/java/io/mycat/route/function/RuleAlgorithm.java b/src/main/java/io/mycat/config/model/rule/RuleAlgorithm.java similarity index 90% rename from src/main/java/io/mycat/route/function/RuleAlgorithm.java rename to src/main/java/io/mycat/config/model/rule/RuleAlgorithm.java index 1cf2c03d0..660b2520c 100644 --- a/src/main/java/io/mycat/route/function/RuleAlgorithm.java +++ b/src/main/java/io/mycat/config/model/rule/RuleAlgorithm.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.route.function; +package io.mycat.config.model.rule; /** * @author mycat @@ -41,7 +41,7 @@ public interface RuleAlgorithm { * columnValue is column's value * @return never null */ - Integer calculate(String columnValue); + Integer calculate(String columnValue) ; - Integer[] calculateRange(String beginValue,String endValue); + Integer[] calculateRange(String beginValue,String endValue) ; } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/config/node/RuleConfig.java b/src/main/java/io/mycat/config/model/rule/RuleConfig.java similarity index 51% rename from src/main/java/io/mycat/server/config/node/RuleConfig.java rename to src/main/java/io/mycat/config/model/rule/RuleConfig.java index 55e5565f5..91fee3b56 100644 --- a/src/main/java/io/mycat/server/config/node/RuleConfig.java +++ b/src/main/java/io/mycat/config/model/rule/RuleConfig.java @@ -1,81 +1,118 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.server.config.node; - -import java.util.HashMap; -import java.util.Map; - -import io.mycat.route.function.AbstractPartitionAlgorithm; -import io.mycat.server.config.ConfigException; - - -/** - * 分片规则,column是用于分片的数据库物理字段 - * @author mycat - */ -public class RuleConfig { - private final String name; - private final String column; - private final String functionName; - private AbstractPartitionAlgorithm ruleAlgorithm; - private Map props = new HashMap(); - - public RuleConfig(String name,String column, String functionName) { - if (name == null) { - throw new ConfigException("name is null"); - } - this.name = name; - if (functionName == null) { - throw new ConfigException("functionName is null"); - } - this.functionName = functionName; - if (column == null || column.length() <= 0) { - throw new ConfigException("no rule column is found"); - } - this.column = column.toUpperCase(); - } - - public AbstractPartitionAlgorithm getRuleAlgorithm() { - return ruleAlgorithm; - } - public void setRuleAlgorithm(AbstractPartitionAlgorithm ruleAlgorithm) { - this.ruleAlgorithm = ruleAlgorithm; - } - public String getColumn() { - return column; - } - public String getFunctionName() { - return functionName; - } - public Map getProps() { - return props; - } - public void setProps(Map props) { - this.props = props; - } - public String getName() { - return name; - } - -} \ No newline at end of file +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.config.model.rule; + +import io.mycat.route.function.AbstractPartitionAlgorithm; + +import java.io.Serializable; + +/** + * 分片规则,column是用于分片的数据库物理字段 + * @author mycat + */ +public class RuleConfig implements Serializable { + private final String column; + private final String functionName; + private AbstractPartitionAlgorithm ruleAlgorithm; + + public RuleConfig(String column, String functionName) { + if (functionName == null) { + throw new IllegalArgumentException("functionName is null"); + } + this.functionName = functionName; + if (column == null || column.length() <= 0) { + throw new IllegalArgumentException("no rule column is found"); + } + this.column = column; + } + + + + public AbstractPartitionAlgorithm getRuleAlgorithm() { + return ruleAlgorithm; + } + + + + public void setRuleAlgorithm(AbstractPartitionAlgorithm ruleAlgorithm) { + this.ruleAlgorithm = ruleAlgorithm; + } + + + + /** + * @return unmodifiable, upper-case + */ + public String getColumn() { + return column; + } + + public String getFunctionName() { + return functionName; + } + + + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((column == null) ? 0 : column.hashCode()); + result = prime * result + ((functionName == null) ? 0 : functionName.hashCode()); + result = prime * result + ((ruleAlgorithm == null) ? 0 : ruleAlgorithm.hashCode()); + return result; + } + + + //huangyiming add 判断分片规则是否相同,暂时根据这个去判断 + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + RuleConfig other = (RuleConfig) obj; + if (column == null) { + if (other.column != null) + return false; + } else if (!column.equals(other.column)) + return false; + if (functionName == null) { + if (other.functionName != null) + return false; + } else if (!functionName.equals(other.functionName)) + return false; + if (ruleAlgorithm == null) { + if (other.ruleAlgorithm != null) + return false; + } else if (!ruleAlgorithm.equals(other.ruleAlgorithm)) + return false; + return true; + } + + + +} diff --git a/src/main/java/io/mycat/server/config/node/UserConfig.java b/src/main/java/io/mycat/config/model/rule/TableRuleConfig.java similarity index 66% rename from src/main/java/io/mycat/server/config/node/UserConfig.java rename to src/main/java/io/mycat/config/model/rule/TableRuleConfig.java index 43e849939..8f44be2b2 100644 --- a/src/main/java/io/mycat/server/config/node/UserConfig.java +++ b/src/main/java/io/mycat/config/model/rule/TableRuleConfig.java @@ -21,28 +21,27 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.config.node; +package io.mycat.config.model.rule; -import java.util.Set; +import java.io.Serializable; /** * @author mycat */ -public class UserConfig { - - private String name; - private String password; - private boolean readOnly=false; - - public boolean isReadOnly() { - return readOnly; - } - - public void setReadOnly(boolean readOnly) { - this.readOnly = readOnly; - } - - private Set schemas; +public class TableRuleConfig implements Serializable { + private String name; + private final RuleConfig rule; + + public TableRuleConfig(String name, RuleConfig rule) { + if (name == null) { + throw new IllegalArgumentException("name is null"); + } + this.name = name; + if (rule == null) { + throw new IllegalArgumentException("no rule is found"); + } + this.rule =rule; + } public String getName() { return name; @@ -52,20 +51,11 @@ public void setName(String name) { this.name = name; } - public String getPassword() { - return password; - } - - public void setPassword(String password) { - this.password = password; - } - - public Set getSchemas() { - return schemas; - } - - public void setSchemas(Set schemas) { - this.schemas = schemas; + /** + * @return unmodifiable + */ + public RuleConfig getRule() { + return rule; } -} \ No newline at end of file +} diff --git a/src/main/java/io/mycat/config/table/structure/MySQLTableStructureDetector.java b/src/main/java/io/mycat/config/table/structure/MySQLTableStructureDetector.java new file mode 100644 index 000000000..ef996d859 --- /dev/null +++ b/src/main/java/io/mycat/config/table/structure/MySQLTableStructureDetector.java @@ -0,0 +1,108 @@ +package io.mycat.config.table.structure; + +import io.mycat.MycatServer; +import io.mycat.backend.datasource.PhysicalDBNode; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.TableConfig; +import io.mycat.sqlengine.OneRawSQLQueryResultHandler; +import io.mycat.sqlengine.SQLJob; +import io.mycat.sqlengine.SQLQueryResult; +import io.mycat.sqlengine.SQLQueryResultListener; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; + +/** + * 表结构结果处理 + * + * @author Hash Zhang + * @version 1.0 + * @time 00:09:03 2016/5/11 + */ +public class MySQLTableStructureDetector implements Runnable { + private static final Logger LOGGER = LoggerFactory.getLogger(MySQLTableStructureDetector.class); + private static final String[] MYSQL_SHOW_CREATE_TABLE_COLMS = new String[]{ + "Table", + "Create Table"}; + private static final String sqlPrefix = "show create table "; + + @Override + public void run() { + for (SchemaConfig schema : MycatServer.getInstance().getConfig().getSchemas().values()) { + for (TableConfig table : schema.getTables().values()) { + for (String dataNode : table.getDataNodes()) { + try { + table.getReentrantReadWriteLock().writeLock().lock(); + ConcurrentHashMap> map = new ConcurrentHashMap<>(); + table.setDataNodeTableStructureSQLMap(map); + } finally { + table.getReentrantReadWriteLock().writeLock().unlock(); + } + OneRawSQLQueryResultHandler resultHandler = new OneRawSQLQueryResultHandler(MYSQL_SHOW_CREATE_TABLE_COLMS, new MySQLTableStructureListener(dataNode, table)); + resultHandler.setMark("Table Structure"); + PhysicalDBNode dn = MycatServer.getInstance().getConfig().getDataNodes().get(dataNode); + SQLJob sqlJob = new SQLJob(sqlPrefix + table.getName(), dn.getDatabase(), resultHandler, dn.getDbPool().getSource()); + sqlJob.run(); + } + } + } + } + + private static class MySQLTableStructureListener implements SQLQueryResultListener>> { + private String dataNode; + private TableConfig table; + + public MySQLTableStructureListener(String dataNode, TableConfig table) { + this.dataNode = dataNode; + this.table = table; + } + + /** + * @param result + * @// TODO: 2016/5/11 检查表元素,来确定是哪个元素不一致,未来还有其他用 + */ + @Override + public void onResult(SQLQueryResult> result) { + try { + table.getReentrantReadWriteLock().writeLock().lock(); + if (!result.isSuccess()) { + LOGGER.warn("Can't get table " + table.getName() + "'s config from DataNode:" + dataNode + "! Maybe the table is not initialized!"); + return; + } + String currentSql = result.getResult().get(MYSQL_SHOW_CREATE_TABLE_COLMS[1]); + Map> dataNodeTableStructureSQLMap = table.getDataNodeTableStructureSQLMap(); + if (dataNodeTableStructureSQLMap.containsKey(currentSql)) { + List dataNodeList = dataNodeTableStructureSQLMap.get(currentSql); + dataNodeList.add(dataNode); + } else { + List dataNodeList = new LinkedList<>(); + dataNodeList.add(dataNode); + dataNodeTableStructureSQLMap.put(currentSql,dataNodeList); + } + if (dataNodeTableStructureSQLMap.size() > 1) { + LOGGER.warn("Table [" + table.getName() + "] structure are not consistent!"); + LOGGER.warn("Currently detected: "); + for(String sql : dataNodeTableStructureSQLMap.keySet()){ + StringBuilder stringBuilder = new StringBuilder(); + for(String dn : dataNodeTableStructureSQLMap.get(sql)){ + stringBuilder.append("DataNode:[").append(dn).append("]"); + } + stringBuilder.append(":").append(sql); + LOGGER.warn(stringBuilder.toString()); + } + } + } finally { + table.getReentrantReadWriteLock().writeLock().unlock(); + } + } + } + +// public static void main(String[] args) { +// System.out.println(UUID.randomUUID()); +// } +} diff --git a/src/main/java/io/mycat/config/table/structure/TableStructureProcessor.java b/src/main/java/io/mycat/config/table/structure/TableStructureProcessor.java new file mode 100644 index 000000000..11aa7941f --- /dev/null +++ b/src/main/java/io/mycat/config/table/structure/TableStructureProcessor.java @@ -0,0 +1,13 @@ +package io.mycat.config.table.structure; + +/** + * 将表结构持久化 + * + * @author Hash Zhang + * @version 1.0 + * @time 00:09:03 2016/5/11 + */ +public abstract class TableStructureProcessor { + public abstract void saveTableStructure(); + public abstract void loadTableStructure(); +} diff --git a/src/main/java/io/mycat/server/config/cluster/BeanConfig.java b/src/main/java/io/mycat/config/util/BeanConfig.java similarity index 90% rename from src/main/java/io/mycat/server/config/cluster/BeanConfig.java rename to src/main/java/io/mycat/config/util/BeanConfig.java index 330633cf8..709779c04 100644 --- a/src/main/java/io/mycat/server/config/cluster/BeanConfig.java +++ b/src/main/java/io/mycat/config/util/BeanConfig.java @@ -21,18 +21,14 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.config.cluster; - -import io.mycat.server.config.ConfigException; -import io.mycat.server.config.Initializable; -import io.mycat.server.config.ParameterMapping; -import io.mycat.server.config.ReflectionProvider; -import io.mycat.util.ObjectUtil; +package io.mycat.config.util; import java.lang.reflect.InvocationTargetException; import java.util.HashMap; import java.util.Map; +import io.mycat.util.ObjectUtil; + /** * @author mycat */ @@ -83,6 +79,11 @@ public Object create(boolean initEarly) throws IllegalAccessException, Invocatio @Override public Object clone() { + try { + super.clone(); + } catch (CloneNotSupportedException e) { + throw new ConfigException(e); + } BeanConfig bc = null; try { bc = getClass().newInstance(); @@ -91,13 +92,13 @@ public Object clone() { } catch (IllegalAccessException e) { throw new ConfigException(e); } - if (bc == null) { - return null; - } +// if (bc == null) { +// return null; +// } bc.className = className; bc.name = name; - Map params = new HashMap(); - params.putAll(params); +// Map params = new HashMap(); +// params.putAll(params); return bc; } diff --git a/src/main/java/io/mycat/server/config/ConfigException.java b/src/main/java/io/mycat/config/util/ConfigException.java similarity index 97% rename from src/main/java/io/mycat/server/config/ConfigException.java rename to src/main/java/io/mycat/config/util/ConfigException.java index bb17004dd..59fe99449 100644 --- a/src/main/java/io/mycat/server/config/ConfigException.java +++ b/src/main/java/io/mycat/config/util/ConfigException.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.config; +package io.mycat.config.util; /** * @author mycat diff --git a/src/main/java/io/mycat/server/config/ConfigUtil.java b/src/main/java/io/mycat/config/util/ConfigUtil.java similarity index 84% rename from src/main/java/io/mycat/server/config/ConfigUtil.java rename to src/main/java/io/mycat/config/util/ConfigUtil.java index dd67c22ae..8913f0cc7 100644 --- a/src/main/java/io/mycat/server/config/ConfigUtil.java +++ b/src/main/java/io/mycat/config/util/ConfigUtil.java @@ -2,8 +2,8 @@ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the * terms of the GNU General Public License version 2 only, as published by the * Free Software Foundation. * @@ -16,20 +16,16 @@ * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address + * + * Any questions about this component can be directed to it's project Web address * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.config; - -import io.mycat.server.config.cluster.BeanConfig; -import io.mycat.util.StringUtil; +package io.mycat.config.util; import java.io.IOException; import java.io.InputStream; import java.util.HashMap; -import java.util.LinkedHashMap; import java.util.Map; import java.util.Properties; @@ -49,6 +45,8 @@ import org.xml.sax.SAXException; import org.xml.sax.SAXParseException; +import io.mycat.util.StringUtil; + /** * @author mycat */ @@ -90,7 +88,7 @@ public static String filter(String text, Properties properties) { public static Document getDocument(final InputStream dtd, InputStream xml) throws ParserConfigurationException, SAXException, IOException { DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); -//factory.setValidating(false); + factory.setValidating(true); factory.setNamespaceAware(false); DocumentBuilder builder = factory.newDocumentBuilder(); builder.setEntityResolver(new EntityResolver() { @@ -142,6 +140,11 @@ public static Element loadElement(Element parent, String tagName) { } } + /** + * 获取节点下所有property + * @param parent + * @return key-value property键值对 + */ public static Map loadElements(Element parent) { Map map = new HashMap(); NodeList children = parent.getChildNodes(); @@ -150,28 +153,7 @@ public static Map loadElements(Element parent) { if (node instanceof Element) { Element e = (Element) node; String name = e.getNodeName(); - if ("property".equals(name)) { - String key = e.getAttribute("name"); - NodeList nl = e.getElementsByTagName("bean"); - if (nl.getLength() == 0) { - String value = e.getTextContent(); - map.put(key, StringUtil.isEmpty(value) ? null : value.trim()); - } else { - map.put(key, loadBean((Element) nl.item(0))); - } - } - } - } - return map; - } - public static LinkedHashMap loadLinkElements(Element parent) { - LinkedHashMap map = new LinkedHashMap(); - NodeList children = parent.getChildNodes(); - for (int i = 0; i < children.getLength(); i++) { - Node node = children.item(i); - if (node instanceof Element) { - Element e = (Element) node; - String name = e.getNodeName(); + //获取property if ("property".equals(name)) { String key = e.getAttribute("name"); NodeList nl = e.getElementsByTagName("bean"); diff --git a/src/main/java/io/mycat/server/config/DnPropertyUtil.java b/src/main/java/io/mycat/config/util/DnPropertyUtil.java similarity index 86% rename from src/main/java/io/mycat/server/config/DnPropertyUtil.java rename to src/main/java/io/mycat/config/util/DnPropertyUtil.java index daaa6eafc..a047f1493 100644 --- a/src/main/java/io/mycat/server/config/DnPropertyUtil.java +++ b/src/main/java/io/mycat/config/util/DnPropertyUtil.java @@ -1,15 +1,14 @@ -package io.mycat.server.config; - - -import io.mycat.server.config.node.SystemConfig; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +package io.mycat.config.util; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.util.Properties; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.config.model.SystemConfig; + /** * * @author yanglixue diff --git a/src/main/java/io/mycat/server/config/FieldDictionary.java b/src/main/java/io/mycat/config/util/FieldDictionary.java similarity index 97% rename from src/main/java/io/mycat/server/config/FieldDictionary.java rename to src/main/java/io/mycat/config/util/FieldDictionary.java index b6869de12..5eee561d0 100644 --- a/src/main/java/io/mycat/server/config/FieldDictionary.java +++ b/src/main/java/io/mycat/config/util/FieldDictionary.java @@ -21,9 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.config; - -import io.mycat.server.config.info.JVMInfo; +package io.mycat.config.util; import java.lang.reflect.Field; import java.util.Collections; @@ -133,7 +131,8 @@ public FieldKey(String fieldName, Class declaringClass, int order) { i++; c = c.getSuperclass(); } - depth = new Integer(i); + //不用构造器创建Integer,用静态方法节省时间和空间,因为depth是不可变变量 + depth = Integer.valueOf(i); } @Override diff --git a/src/main/java/io/mycat/server/config/Initializable.java b/src/main/java/io/mycat/config/util/Initializable.java similarity index 97% rename from src/main/java/io/mycat/server/config/Initializable.java rename to src/main/java/io/mycat/config/util/Initializable.java index a648a5888..a6f4e9021 100644 --- a/src/main/java/io/mycat/server/config/Initializable.java +++ b/src/main/java/io/mycat/config/util/Initializable.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.config; +package io.mycat.config.util; /** * @author mycat diff --git a/src/main/java/io/mycat/server/config/info/JVMInfo.java b/src/main/java/io/mycat/config/util/JVMInfo.java similarity index 98% rename from src/main/java/io/mycat/server/config/info/JVMInfo.java rename to src/main/java/io/mycat/config/util/JVMInfo.java index 149f76776..80bf3ca0a 100644 --- a/src/main/java/io/mycat/server/config/info/JVMInfo.java +++ b/src/main/java/io/mycat/config/util/JVMInfo.java @@ -21,9 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.config.info; - -import io.mycat.server.config.ReflectionProvider; +package io.mycat.config.util; import java.lang.reflect.Field; import java.text.AttributedString; diff --git a/src/main/java/io/mycat/server/config/ObjectAccessException.java b/src/main/java/io/mycat/config/util/ObjectAccessException.java similarity index 97% rename from src/main/java/io/mycat/server/config/ObjectAccessException.java rename to src/main/java/io/mycat/config/util/ObjectAccessException.java index aff8a5cd1..9f241eb8d 100644 --- a/src/main/java/io/mycat/server/config/ObjectAccessException.java +++ b/src/main/java/io/mycat/config/util/ObjectAccessException.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.config; +package io.mycat.config.util; /** * @author mycat diff --git a/src/main/java/io/mycat/server/config/OrderRetainingMap.java b/src/main/java/io/mycat/config/util/OrderRetainingMap.java similarity index 98% rename from src/main/java/io/mycat/server/config/OrderRetainingMap.java rename to src/main/java/io/mycat/config/util/OrderRetainingMap.java index bcc88321a..98740ce97 100644 --- a/src/main/java/io/mycat/server/config/OrderRetainingMap.java +++ b/src/main/java/io/mycat/config/util/OrderRetainingMap.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.config; +package io.mycat.config.util; import java.util.ArrayList; import java.util.Collection; diff --git a/src/main/java/io/mycat/server/config/ParameterMapping.java b/src/main/java/io/mycat/config/util/ParameterMapping.java similarity index 90% rename from src/main/java/io/mycat/server/config/ParameterMapping.java rename to src/main/java/io/mycat/config/util/ParameterMapping.java index b96c0be00..f8cc5518e 100644 --- a/src/main/java/io/mycat/server/config/ParameterMapping.java +++ b/src/main/java/io/mycat/config/util/ParameterMapping.java @@ -21,10 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.config; - -import io.mycat.server.config.cluster.BeanConfig; -import io.mycat.util.StringUtil; +package io.mycat.config.util; import java.beans.BeanInfo; import java.beans.IntrospectionException; @@ -40,6 +37,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import io.mycat.util.StringUtil; + /** * @author mycat */ @@ -48,14 +47,23 @@ public class ParameterMapping { .getLogger(ParameterMapping.class); private static final Map, PropertyDescriptor[]> descriptors = new HashMap, PropertyDescriptor[]>(); + /** + * 将property键值对赋值组装到object中 + * @param object 目标反射对象 + * @param parameter property的键值对 + * @throws IllegalAccessException + * @throws InvocationTargetException + */ public static void mapping(Object object, Map parameter) throws IllegalAccessException, InvocationTargetException { + //获取用于导出clazz这个JavaBean的所有属性的PropertyDescriptor PropertyDescriptor[] pds = getDescriptors(object.getClass()); for (int i = 0; i < pds.length; i++) { PropertyDescriptor pd = pds[i]; Object obj = parameter.get(pd.getName()); Object value = obj; Class cls = pd.getPropertyType(); + //类型转换 if (obj instanceof String) { String string = (String) obj; if (!StringUtil.isEmpty(string)) { @@ -73,13 +81,13 @@ public static void mapping(Object object, Map paramete } value = list.toArray(); } - if (cls != null) { - if (value != null) { + //赋值 + if (cls != null + && value != null) { Method method = pd.getWriteMethod(); if (method != null) { method.invoke(object, new Object[] { value }); } - } } } } @@ -106,15 +114,23 @@ public static Object createBean(BeanConfig config) throws IllegalAccessException return bean; } + /** + * 用于导出clazz这个JavaBean的所有属性的PropertyDescriptor + * @param clazz + * @return + */ private static PropertyDescriptor[] getDescriptors(Class clazz) { + //PropertyDescriptor类表示JavaBean类通过存储器导出一个属性 PropertyDescriptor[] pds; List list; PropertyDescriptor[] pds2 = descriptors.get(clazz); + //该clazz是否第一次加载 if (null == pds2) { try { BeanInfo beanInfo = Introspector.getBeanInfo(clazz); pds = beanInfo.getPropertyDescriptors(); list = new ArrayList(); + //加载每一个类型不为空的property for (int i = 0; i < pds.length; i++) { if (null != pds[i].getPropertyType()) { list.add(pds[i]); diff --git a/src/main/java/io/mycat/server/config/ReflectionProvider.java b/src/main/java/io/mycat/config/util/ReflectionProvider.java similarity index 99% rename from src/main/java/io/mycat/server/config/ReflectionProvider.java rename to src/main/java/io/mycat/config/util/ReflectionProvider.java index 0cda680b0..36bcb8af6 100644 --- a/src/main/java/io/mycat/server/config/ReflectionProvider.java +++ b/src/main/java/io/mycat/config/util/ReflectionProvider.java @@ -21,9 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.config; - -import io.mycat.server.config.info.JVMInfo; +package io.mycat.config.util; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; diff --git a/src/main/java/io/mycat/server/config/Visitor.java b/src/main/java/io/mycat/config/util/Visitor.java similarity index 97% rename from src/main/java/io/mycat/server/config/Visitor.java rename to src/main/java/io/mycat/config/util/Visitor.java index 7d3cae0fb..9b8e9bcd8 100644 --- a/src/main/java/io/mycat/server/config/Visitor.java +++ b/src/main/java/io/mycat/config/util/Visitor.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.config; +package io.mycat.config.util; /** * @author mycat diff --git a/src/main/java/io/mycat/locator/ZookeeperServiceLocator.java b/src/main/java/io/mycat/locator/ZookeeperServiceLocator.java deleted file mode 100644 index f0f33448d..000000000 --- a/src/main/java/io/mycat/locator/ZookeeperServiceLocator.java +++ /dev/null @@ -1,46 +0,0 @@ -package io.mycat.locator; - -import io.mycat.server.config.ConfigException; -import org.apache.curator.framework.CuratorFramework; -import org.apache.curator.framework.CuratorFrameworkFactory; -import org.apache.curator.retry.ExponentialBackoffRetry; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.concurrent.TimeUnit; - -/** - * ServiceLocator zookeeper implements. - *

responsible for connecting zookeeper server and provide

- * Created by v1.lion on 2015/10/5. - */ -public class ZookeeperServiceLocator { - private static final Logger LOGGER = LoggerFactory.getLogger(ZookeeperServiceLocator.class); - - private ZookeeperServiceLocator() { - super(); - } - - public static CuratorFramework createConnection(String connectString) { - CuratorFramework curatorFramework = CuratorFrameworkFactory - .newClient(connectString, new ExponentialBackoffRetry(100, 6)); - - //start connection - curatorFramework.start(); - LOGGER.debug("connect to zookeeper server : {}", connectString); - - //wait 3 second to establish connect - try { - curatorFramework.blockUntilConnected(3, TimeUnit.SECONDS); - if (curatorFramework.getZookeeperClient().isConnected()) { - return curatorFramework; - } - } catch (InterruptedException e) { - LOGGER.error(e.getMessage(), e); - } - - //fail situation - curatorFramework.close(); - throw new ConfigException("failed to connect to zookeeper service : " + connectString); - } -} diff --git a/src/main/java/io/mycat/manager/ManagerConnection.java b/src/main/java/io/mycat/manager/ManagerConnection.java new file mode 100644 index 000000000..da2998f6b --- /dev/null +++ b/src/main/java/io/mycat/manager/ManagerConnection.java @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager; + +import java.io.IOException; +import java.nio.channels.NetworkChannel; + +import io.mycat.net.FrontendConnection; +import io.mycat.util.TimeUtil; + +/** + * @author mycat + */ +public class ManagerConnection extends FrontendConnection { + private static final long AUTH_TIMEOUT = 15 * 1000L; + + public ManagerConnection(NetworkChannel channel) throws IOException { + super(channel); + } + + @Override + public boolean isIdleTimeout() { + if (isAuthenticated) { + return super.isIdleTimeout(); + } else { + return TimeUtil.currentTimeMillis() > Math.max(lastWriteTime, + lastReadTime) + AUTH_TIMEOUT; + } + } + + @Override + public void handle(final byte[] data) { + handler.handle(data); + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/manager/ManagerConnectionFactory.java b/src/main/java/io/mycat/manager/ManagerConnectionFactory.java new file mode 100644 index 000000000..694f4cda8 --- /dev/null +++ b/src/main/java/io/mycat/manager/ManagerConnectionFactory.java @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager; + +import java.io.IOException; +import java.nio.channels.NetworkChannel; + +import io.mycat.MycatServer; +import io.mycat.config.MycatPrivileges; +import io.mycat.net.FrontendConnection; +import io.mycat.net.factory.FrontendConnectionFactory; + +/** + * @author mycat + */ +public class ManagerConnectionFactory extends FrontendConnectionFactory { + + @Override + protected FrontendConnection getConnection(NetworkChannel channel) throws IOException { + ManagerConnection c = new ManagerConnection(channel); + MycatServer.getInstance().getConfig().setSocketParams(c, true); + c.setPrivileges(MycatPrivileges.instance()); + c.setQueryHandler(new ManagerQueryHandler(c)); + return c; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/manager/ManagerQueryHandler.java b/src/main/java/io/mycat/manager/ManagerQueryHandler.java new file mode 100644 index 000000000..a51408d2a --- /dev/null +++ b/src/main/java/io/mycat/manager/ManagerQueryHandler.java @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager; + +import io.mycat.config.ErrorCode; +import io.mycat.manager.handler.*; +import io.mycat.manager.response.KillConnection; +import io.mycat.manager.response.Offline; +import io.mycat.manager.response.Online; +import io.mycat.net.handler.FrontendQueryHandler; +import io.mycat.net.mysql.OkPacket; +import io.mycat.route.parser.ManagerParse; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * @author mycat + */ +public class ManagerQueryHandler implements FrontendQueryHandler { + private static final Logger LOGGER = LoggerFactory.getLogger(ManagerQueryHandler.class); + private static final int SHIFT = 8; + private final ManagerConnection source; + protected Boolean readOnly; + + public ManagerQueryHandler(ManagerConnection source) { + this.source = source; + } + + public void setReadOnly(Boolean readOnly) { + this.readOnly = readOnly; + } + + @Override + public void query(String sql) { + ManagerConnection c = this.source; + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(new StringBuilder().append(c).append(sql).toString()); + } + int rs = ManagerParse.parse(sql); + switch (rs & 0xff) { + case ManagerParse.SELECT: + SelectHandler.handle(sql, c, rs >>> SHIFT); + break; + case ManagerParse.SET: + c.write(c.writeToBuffer(OkPacket.OK, c.allocate())); + break; + case ManagerParse.SHOW: + ShowHandler.handle(sql, c, rs >>> SHIFT); + break; + case ManagerParse.SWITCH: + SwitchHandler.handler(sql, c, rs >>> SHIFT); + break; + case ManagerParse.KILL_CONN: + KillConnection.response(sql, rs >>> SHIFT, c); + break; + case ManagerParse.OFFLINE: + Offline.execute(sql, c); + break; + case ManagerParse.ONLINE: + Online.execute(sql, c); + break; + case ManagerParse.STOP: + StopHandler.handle(sql, c, rs >>> SHIFT); + break; + case ManagerParse.RELOAD: + ReloadHandler.handle(sql, c, rs >>> SHIFT); + break; + case ManagerParse.ROLLBACK: + RollbackHandler.handle(sql, c, rs >>> SHIFT); + break; + case ManagerParse.CLEAR: + ClearHandler.handle(sql, c, rs >>> SHIFT); + break; + case ManagerParse.CONFIGFILE: + ConfFileHandler.handle(sql, c); + break; + case ManagerParse.LOGFILE: + ShowServerLog.handle(sql, c); + break; + case ManagerParse.ZK: + ZKHandler.handle(sql, c, rs >>> SHIFT); + break; + default: + c.writeErrMessage(ErrorCode.ER_YES, "Unsupported statement"); + } + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/manager/handler/ClearHandler.java b/src/main/java/io/mycat/manager/handler/ClearHandler.java new file mode 100644 index 000000000..d3c857abc --- /dev/null +++ b/src/main/java/io/mycat/manager/handler/ClearHandler.java @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.handler; + +import io.mycat.config.ErrorCode; +import io.mycat.manager.ManagerConnection; +import io.mycat.manager.response.ClearSlow; +import io.mycat.route.parser.ManagerParseClear; +import io.mycat.util.StringUtil; + +/** + * @author mycat + */ +public class ClearHandler { + + public static void handle(String stmt, ManagerConnection c, int offset) { + int rs = ManagerParseClear.parse(stmt, offset); + switch (rs & 0xff) { + case ManagerParseClear.SLOW_DATANODE: { + String name = stmt.substring(rs >>> 8).trim(); + if (StringUtil.isEmpty(name)) { + c.writeErrMessage(ErrorCode.ER_YES, "Unsupported statement"); + } else { + ClearSlow.dataNode(c, name); + } + break; + } + case ManagerParseClear.SLOW_SCHEMA: { + String name = stmt.substring(rs >>> 8).trim(); + if (StringUtil.isEmpty(name)) { + c.writeErrMessage(ErrorCode.ER_YES, "Unsupported statement"); + } else { + ClearSlow.schema(c, name); + } + break; + } + default: + c.writeErrMessage(ErrorCode.ER_YES, "Unsupported statement"); + } + } +} \ No newline at end of file diff --git a/src/main/java/io/mycat/manager/handler/ConfFileHandler.java b/src/main/java/io/mycat/manager/handler/ConfFileHandler.java new file mode 100644 index 000000000..afc4a8c4e --- /dev/null +++ b/src/main/java/io/mycat/manager/handler/ConfFileHandler.java @@ -0,0 +1,355 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.handler; + +import java.io.BufferedOutputStream; +import java.io.BufferedReader; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.FileReader; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.text.SimpleDateFormat; +import java.util.Date; + +import javax.xml.parsers.ParserConfigurationException; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.xml.sax.SAXException; + +import io.mycat.MycatServer; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.config.model.SystemConfig; +import io.mycat.config.util.ConfigUtil; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.util.StringUtil; + +/** + * Mycat conf file related Handler + * + * @author wuzh + */ +public final class ConfFileHandler { + private static final Logger LOGGER = LoggerFactory + .getLogger(ConfFileHandler.class); + private static final int FIELD_COUNT = 1; + private static final ResultSetHeaderPacket header = PacketUtil + .getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + private static final String UPLOAD_CMD = "FILE @@UPLOAD"; + static { + int i = 0; + byte packetId = 0; + header.packetId = 1; + + fields[i] = PacketUtil.getField("DATA", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + eof.packetId = ++packetId; + } + + public static void handle( String stmt,ManagerConnection c) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + // write rows + byte packetId = eof.packetId; + String theStmt = stmt.toUpperCase().trim(); + PackageBufINf bufInf = null; + if (theStmt.equals("FILE @@LIST")) { + bufInf = listConfigFiles(c, buffer, packetId); + } else if (theStmt.startsWith("FILE @@SHOW")) { + int index = stmt.lastIndexOf(' '); + String fileName = stmt.substring(index + 1); + bufInf = showConfigFile(c, buffer, packetId, fileName); + } else if (theStmt.startsWith(UPLOAD_CMD)) { + int index = stmt.indexOf(' ', UPLOAD_CMD.length()); + int index2 = stmt.indexOf(' ', index + 1); + if (index <= 0 || index2 <= 0 || index + 1 > stmt.length() + || index2 + 1 > stmt.length()) { + bufInf = showInfo(c, buffer, packetId, "Invald param ,usage "); + } + String fileName = stmt.substring(index + 1, index2); + String content = stmt.substring(index2 + 1).trim(); + bufInf = upLoadConfigFile(c, buffer, packetId, fileName, content); + } else { + + bufInf = showInfo(c, buffer, packetId, "Invald command "); + } + + packetId = bufInf.packetId; + buffer = bufInf.buffer; + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // write buffer + c.write(buffer); + } + + private static void checkXMLFile(String xmlFileName, byte[] data) + throws ParserConfigurationException, SAXException, IOException { + InputStream dtdStream = new ByteArrayInputStream(new byte[0]); + File confDir = new File(SystemConfig.getHomePath(), "conf"); + if (xmlFileName.equals("schema.xml")) { + dtdStream = MycatServer.class.getResourceAsStream("/schema.dtd"); + if (dtdStream == null) { + dtdStream = new ByteArrayInputStream(readFileByBytes(new File( + confDir, "schema.dtd"))); + } + + } else if (xmlFileName.equals("server.xml")) { + dtdStream = MycatServer.class.getResourceAsStream("/server.dtd"); + if (dtdStream == null) { + dtdStream = new ByteArrayInputStream(readFileByBytes(new File( + confDir, "server.dtd"))); + } + } else if (xmlFileName.equals("rule.xml")) { + dtdStream = MycatServer.class.getResourceAsStream("/rule.dtd"); + if (dtdStream == null) { + dtdStream = new ByteArrayInputStream(readFileByBytes(new File( + confDir, "rule.dtd"))); + } + } + ConfigUtil.getDocument(dtdStream, new ByteArrayInputStream(data)); + } + + /** + * 以字节为单位读取文件,常用于读二进制文件,如图片、声音、影像等文件。 + */ + private static byte[] readFileByBytes(File fileName) { + InputStream in = null; + ByteArrayOutputStream outStream = new ByteArrayOutputStream(); + try { // 一次读多个字节 + byte[] tempbytes = new byte[100]; + int byteread = 0; + in = new FileInputStream(fileName); + // 读入多个字节到字节数组中,byteread为一次读入的字节数 + while ((byteread = in.read(tempbytes)) != -1) { + outStream.write(tempbytes, 0, byteread); + } + } catch (Exception e1) { + LOGGER.error("readFileByBytesError",e1); + } finally { + if (in != null) { + try { + in.close(); + } catch (IOException e1) { + LOGGER.error("readFileByBytesError",e1); + } + } + } + return outStream.toByteArray(); + } + + private static PackageBufINf upLoadConfigFile(ManagerConnection c, + ByteBuffer buffer, byte packetId, String fileName, String content) { + LOGGER.info("Upload Daas Config file " + fileName + " ,content:" + + content); + String tempFileName = System.currentTimeMillis() + "_" + fileName; + File tempFile = new File(SystemConfig.getHomePath(), "conf" + + File.separator + tempFileName); + BufferedOutputStream buff = null; + boolean suc = false; + try { + byte[] fileData = content.getBytes("UTF-8"); + if (fileName.endsWith(".xml")) { + checkXMLFile(fileName, fileData); + } + buff = new BufferedOutputStream(new FileOutputStream(tempFile)); + buff.write(fileData); + buff.flush(); + + } catch (Exception e) { + LOGGER.warn("write file err " + e); + return showInfo(c, buffer, packetId, "write file err " + e); + + } finally { + if (buff != null) { + try { + buff.close(); + suc = true; + } catch (IOException e) { + LOGGER.warn("save config file err " + e); + } + } + } + if (suc) { + // if succcess + File oldFile = new File(SystemConfig.getHomePath(), "conf" + + File.separator + fileName); + if (oldFile.exists()) { + File backUP = new File(SystemConfig.getHomePath(), "conf" + + File.separator + fileName + "_" + + System.currentTimeMillis() + "_auto"); + if (!oldFile.renameTo(backUP)) { + String msg = "rename old file failed"; + LOGGER.warn(msg + " for upload file " + + oldFile.getAbsolutePath()); + return showInfo(c, buffer, packetId, msg); + } + } + File dest = new File(SystemConfig.getHomePath(), "conf" + + File.separator + fileName); + if (!tempFile.renameTo(dest)) { + String msg = "rename file failed"; + LOGGER.warn(msg + " for upload file " + + tempFile.getAbsolutePath()); + return showInfo(c, buffer, packetId, msg); + } + return showInfo(c, buffer, packetId, "SUCCESS SAVED FILE:" + + fileName); + } else { + return showInfo(c, buffer, packetId, "UPLOAD ERROR OCCURD:" + + fileName); + } + } + + private static PackageBufINf showInfo(ManagerConnection c, + ByteBuffer buffer, byte packetId, String string) { + PackageBufINf bufINf = new PackageBufINf(); + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(StringUtil.encode(string, c.getCharset())); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + bufINf.packetId = packetId; + bufINf.buffer = buffer; + return bufINf; + } + + private static PackageBufINf showConfigFile(ManagerConnection c, + ByteBuffer buffer, byte packetId, String fileName) { + File file = new File(SystemConfig.getHomePath(), "conf" + + File.separator + fileName); + BufferedReader br = null; + PackageBufINf bufINf = new PackageBufINf(); + try { + br = new BufferedReader(new FileReader(file)); + String line = null; + while ((line = br.readLine()) != null) { + if (line.isEmpty()) { + continue; + } + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(StringUtil.encode(line, c.getCharset())); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + bufINf.buffer = buffer; + bufINf.packetId = packetId; + return bufINf; + + } catch (Exception e) { + LOGGER.error("showConfigFileError",e); + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(StringUtil.encode(e.toString(), c.getCharset())); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + bufINf.buffer = buffer; + } finally { + if (br != null) { + try { + br.close(); + } catch (IOException e) { + LOGGER.error("showConfigFileError",e); + } + } + + } + bufINf.packetId = packetId; + return bufINf; + } + + private static PackageBufINf listConfigFiles(ManagerConnection c, + ByteBuffer buffer, byte packetId) { + PackageBufINf bufINf = new PackageBufINf(); + SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd HH:mm"); + try { + int i = 1; + File[] file = new File(SystemConfig.getHomePath(), "conf") + .listFiles(); + for (File f : file) { + if (f.isFile()) { + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(StringUtil.encode( + (i++) + " : " + f.getName() + " time:" + + df.format(new Date(f.lastModified())), + c.getCharset())); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + } + + bufINf.buffer = buffer; + bufINf.packetId = packetId; + return bufINf; + + } catch (Exception e) { + LOGGER.error("listConfigFilesError",e); + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(StringUtil.encode(e.toString(), c.getCharset())); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + bufINf.buffer = buffer; + } + bufINf.packetId = packetId; + return bufINf; + } + + public static void main(String[] args) { + String stmt = "FILE @@UPLOAD test.xml 1234567890"; + int index = stmt.indexOf(' ', UPLOAD_CMD.length()); + int index2 = stmt.indexOf(' ', index + 1); + if (index <= 0 || index2 <= 0 || index + 1 > stmt.length() + || index2 + 1 > stmt.length()) { + System.out.println("valid ...."); + } else { + String fileName = stmt.substring(index + 1, index2); + String content = stmt.substring(index2 + 1).trim(); + System.out.println(fileName + " content:" + content); + } + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/manager/handler/ReloadHandler.java b/src/main/java/io/mycat/manager/handler/ReloadHandler.java new file mode 100644 index 000000000..dedcdedb6 --- /dev/null +++ b/src/main/java/io/mycat/manager/handler/ReloadHandler.java @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.handler; + +import io.mycat.config.ErrorCode; +import io.mycat.manager.ManagerConnection; +import io.mycat.manager.response.ReloadConfig; +import io.mycat.manager.response.ReloadQueryCf; +import io.mycat.manager.response.ReloadSqlSlowTime; +import io.mycat.manager.response.ReloadUser; +import io.mycat.manager.response.ReloadUserStat; +import io.mycat.route.parser.ManagerParseReload; +import io.mycat.route.parser.util.ParseUtil; + +/** + * @author mycat + */ +public final class ReloadHandler +{ + + public static void handle(String stmt, ManagerConnection c, int offset) + { + int rs = ManagerParseReload.parse(stmt, offset); + switch (rs) + { + case ManagerParseReload.CONFIG: + ReloadConfig.execute(c,false); + break; + case ManagerParseReload.CONFIG_ALL: + ReloadConfig.execute(c,true); + break; + case ManagerParseReload.ROUTE: + c.writeErrMessage(ErrorCode.ER_YES, "Unsupported statement"); + break; + case ManagerParseReload.USER: + ReloadUser.execute(c); + break; + case ManagerParseReload.USER_STAT: + ReloadUserStat.execute(c); + break; + case ManagerParseReload.SQL_SLOW: + ReloadSqlSlowTime.execute(c, ParseUtil.getSQLId(stmt)); + break; + case ManagerParseReload.QUERY_CF: + String filted = ParseUtil.parseString(stmt) ; + ReloadQueryCf.execute(c, filted); + break; + default: + c.writeErrMessage(ErrorCode.ER_YES, "Unsupported statement"); + } + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/manager/handler/RollbackHandler.java b/src/main/java/io/mycat/manager/handler/RollbackHandler.java new file mode 100644 index 000000000..6bbd97145 --- /dev/null +++ b/src/main/java/io/mycat/manager/handler/RollbackHandler.java @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.handler; + +import io.mycat.config.ErrorCode; +import io.mycat.manager.ManagerConnection; +import io.mycat.manager.response.RollbackConfig; +import io.mycat.manager.response.RollbackUser; +import io.mycat.route.parser.ManagerParseRollback; + +/** + * @author mycat + */ +public final class RollbackHandler { + + public static void handle(String stmt, ManagerConnection c, int offset) { + switch (ManagerParseRollback.parse(stmt, offset)) { + case ManagerParseRollback.CONFIG: + RollbackConfig.execute(c); + break; + case ManagerParseRollback.ROUTE: + c.writeErrMessage(ErrorCode.ER_YES, "Unsupported statement"); + break; + case ManagerParseRollback.USER: + RollbackUser.execute(c); + break; + default: + c.writeErrMessage(ErrorCode.ER_YES, "Unsupported statement"); + } + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/manager/handler/SelectHandler.java b/src/main/java/io/mycat/manager/handler/SelectHandler.java new file mode 100644 index 000000000..66a0b93be --- /dev/null +++ b/src/main/java/io/mycat/manager/handler/SelectHandler.java @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.handler; + +import static io.mycat.route.parser.ManagerParseSelect.SESSION_AUTO_INCREMENT; +import static io.mycat.route.parser.ManagerParseSelect.VERSION_COMMENT; +import static io.mycat.route.parser.ManagerParseSelect.SESSION_TX_READ_ONLY; + +import io.mycat.config.ErrorCode; +import io.mycat.manager.ManagerConnection; +import io.mycat.manager.response.SelectSessionAutoIncrement; +import io.mycat.manager.response.SelectSessionTxReadOnly; +import io.mycat.manager.response.SelectVersionComment; +import io.mycat.route.parser.ManagerParseSelect; + +/** + * @author mycat + */ +public final class SelectHandler { + + public static void handle(String stmt, ManagerConnection c, int offset) { + switch (ManagerParseSelect.parse(stmt, offset)) { + case VERSION_COMMENT: + SelectVersionComment.execute(c); + break; + case SESSION_AUTO_INCREMENT: + SelectSessionAutoIncrement.execute(c); + break; + case SESSION_TX_READ_ONLY: + SelectSessionTxReadOnly.execute(c); + break; + default: + c.writeErrMessage(ErrorCode.ER_YES, "Unsupported statement"); + } + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/manager/handler/ShowHandler.java b/src/main/java/io/mycat/manager/handler/ShowHandler.java new file mode 100644 index 000000000..0d308901d --- /dev/null +++ b/src/main/java/io/mycat/manager/handler/ShowHandler.java @@ -0,0 +1,254 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.handler; + +import io.mycat.config.ErrorCode; +import io.mycat.manager.ManagerConnection; +import io.mycat.manager.response.ShowBackend; +import io.mycat.manager.response.ShowBackendOld; +import io.mycat.manager.response.ShowCollation; +import io.mycat.manager.response.ShowCommand; +import io.mycat.manager.response.ShowConnection; +import io.mycat.manager.response.ShowConnectionSQL; +import io.mycat.manager.response.ShowDataNode; +import io.mycat.manager.response.ShowDataSource; +import io.mycat.manager.response.ShowDatabase; +import io.mycat.manager.response.ShowDatasourceCluster; +import io.mycat.manager.response.ShowDatasourceSyn; +import io.mycat.manager.response.ShowDatasourceSynDetail; +import io.mycat.manager.response.ShowHeartbeat; +import io.mycat.manager.response.ShowHeartbeatDetail; +import io.mycat.manager.response.ShowHelp; +import io.mycat.manager.response.ShowParser; +import io.mycat.manager.response.ShowProcessor; +import io.mycat.manager.response.ShowRouter; +import io.mycat.manager.response.ShowSQL; +import io.mycat.manager.response.ShowSQLCondition; +import io.mycat.manager.response.ShowSQLDetail; +import io.mycat.manager.response.ShowSQLExecute; +import io.mycat.manager.response.ShowSQLHigh; +import io.mycat.manager.response.ShowSQLLarge; +import io.mycat.manager.response.ShowSQLSlow; +import io.mycat.manager.response.ShowSQLSumTable; +import io.mycat.manager.response.ShowSQLSumUser; +import io.mycat.manager.response.ShowServer; +import io.mycat.manager.response.ShowSession; +import io.mycat.manager.response.ShowSqlResultSet; +import io.mycat.manager.response.ShowSysLog; +import io.mycat.manager.response.ShowSysParam; +import io.mycat.manager.response.ShowThreadPool; +import io.mycat.manager.response.ShowTime; +import io.mycat.manager.response.ShowVariables; +import io.mycat.manager.response.ShowVersion; +import io.mycat.manager.response.ShowWhiteHost; +import io.mycat.manager.response.ShowDirectMemory; +import io.mycat.route.parser.ManagerParseShow; +import io.mycat.route.parser.util.ParseUtil; +import io.mycat.server.handler.ShowCache; +import io.mycat.util.StringUtil; + +/** + * @author mycat + */ +public final class ShowHandler { + + public static void handle(String stmt, ManagerConnection c, int offset) { + int rs = ManagerParseShow.parse(stmt, offset); + switch (rs & 0xff) { + case ManagerParseShow.SYSPARAM://add rainbow + ShowSysParam.execute(c); + break; + case ManagerParseShow.SYSLOG: //add by zhuam + String lines = stmt.substring(rs >>> 8).trim(); + ShowSysLog.execute(c, Integer.parseInt( lines ) ); + break; + case ManagerParseShow.COMMAND: + ShowCommand.execute(c); + break; + case ManagerParseShow.COLLATION: + ShowCollation.execute(c); + break; + case ManagerParseShow.CONNECTION: + ShowConnection.execute(c); + break; + case ManagerParseShow.BACKEND: + ShowBackend.execute(c); + break; + case ManagerParseShow.BACKEND_OLD: + ShowBackendOld.execute(c); + break; + case ManagerParseShow.CONNECTION_SQL: + ShowConnectionSQL.execute(c); + break; + case ManagerParseShow.DATABASE: + ShowDatabase.execute(c); + break; + case ManagerParseShow.DATANODE: + ShowDataNode.execute(c, null); + break; + case ManagerParseShow.DATANODE_WHERE: { + String name = stmt.substring(rs >>> 8).trim(); + if (StringUtil.isEmpty(name)) { + c.writeErrMessage(ErrorCode.ER_YES, "Unsupported statement"); + } else { + ShowDataNode.execute(c, name); + } + break; + } + case ManagerParseShow.DATASOURCE: + ShowDataSource.execute(c, null); + break; + case ManagerParseShow.DATASOURCE_WHERE: { + String name = stmt.substring(rs >>> 8).trim(); + if (StringUtil.isEmpty(name)) { + c.writeErrMessage(ErrorCode.ER_YES, "Unsupported statement"); + } else { + ShowDataSource.execute(c, name); + } + break; + } + case ManagerParseShow.HELP: + ShowHelp.execute(c); + break; + case ManagerParseShow.HEARTBEAT: + ShowHeartbeat.response(c); + break; + case ManagerParseShow.PARSER: + ShowParser.execute(c); + break; + case ManagerParseShow.PROCESSOR: + ShowProcessor.execute(c); + break; + case ManagerParseShow.ROUTER: + ShowRouter.execute(c); + break; + case ManagerParseShow.SERVER: + ShowServer.execute(c); + break; + case ManagerParseShow.WHITE_HOST: + ShowWhiteHost.execute(c); + break; + case ManagerParseShow.WHITE_HOST_SET: + ShowWhiteHost.setHost(c,ParseUtil.parseString(stmt)); + break; + case ManagerParseShow.SQL: + boolean isClearSql = Boolean.valueOf( stmt.substring(rs >>> 8).trim() ); + ShowSQL.execute(c, isClearSql); + break; + case ManagerParseShow.SQL_DETAIL: + ShowSQLDetail.execute(c, ParseUtil.getSQLId(stmt)); + break; + case ManagerParseShow.SQL_EXECUTE: + ShowSQLExecute.execute(c); + break; + case ManagerParseShow.SQL_SLOW: + boolean isClearSlow = Boolean.valueOf( stmt.substring(rs >>> 8).trim() ); + ShowSQLSlow.execute(c, isClearSlow); + break; + case ManagerParseShow.SQL_HIGH: + boolean isClearHigh = Boolean.valueOf( stmt.substring(rs >>> 8).trim() ); + ShowSQLHigh.execute(c, isClearHigh); + break; + case ManagerParseShow.SQL_LARGE: + boolean isClearLarge = Boolean.valueOf( stmt.substring(rs >>> 8).trim() ); + ShowSQLLarge.execute(c, isClearLarge); + break; + case ManagerParseShow.SQL_CONDITION: + ShowSQLCondition.execute(c); + break; + case ManagerParseShow.SQL_RESULTSET: + ShowSqlResultSet.execute(c); + break; + case ManagerParseShow.SQL_SUM_USER: + boolean isClearSum = Boolean.valueOf( stmt.substring(rs >>> 8).trim() ); + ShowSQLSumUser.execute(c,isClearSum); + break; + case ManagerParseShow.SQL_SUM_TABLE: + boolean isClearTable = Boolean.valueOf( stmt.substring(rs >>> 8).trim() ); + ShowSQLSumTable.execute(c, isClearTable); + break; + case ManagerParseShow.SLOW_DATANODE: { + String name = stmt.substring(rs >>> 8).trim(); + if (StringUtil.isEmpty(name)) { + c.writeErrMessage(ErrorCode.ER_YES, "Unsupported statement"); + } else { + // ShowSlow.dataNode(c, name); + c.writeErrMessage(ErrorCode.ER_YES, "Unsupported statement"); + } + break; + } + case ManagerParseShow.SLOW_SCHEMA: { + String name = stmt.substring(rs >>> 8).trim(); + if (StringUtil.isEmpty(name)) { + c.writeErrMessage(ErrorCode.ER_YES, "Unsupported statement"); + } else { + c.writeErrMessage(ErrorCode.ER_YES, "Unsupported statement"); + // ShowSlow.schema(c, name); + } + break; + } + case ManagerParseShow.THREADPOOL: + ShowThreadPool.execute(c); + break; + case ManagerParseShow.CACHE: + ShowCache.execute(c); + break; + case ManagerParseShow.SESSION: + ShowSession.execute(c); + break; + case ManagerParseShow.TIME_CURRENT: + ShowTime.execute(c, ManagerParseShow.TIME_CURRENT); + break; + case ManagerParseShow.TIME_STARTUP: + ShowTime.execute(c, ManagerParseShow.TIME_STARTUP); + break; + case ManagerParseShow.VARIABLES: + ShowVariables.execute(c); + break; + case ManagerParseShow.VERSION: + ShowVersion.execute(c); + break; + case ManagerParseShow.HEARTBEAT_DETAIL://by songwie + ShowHeartbeatDetail.response(c,stmt); + break; + case ManagerParseShow.DATASOURCE_SYNC://by songwie + ShowDatasourceSyn.response(c,stmt); + break; + case ManagerParseShow.DATASOURCE_SYNC_DETAIL://by songwie + ShowDatasourceSynDetail.response(c,stmt); + break; + case ManagerParseShow.DATASOURCE_CLUSTER://by songwie + ShowDatasourceCluster.response(c,stmt); + break; + case ManagerParseShow.DIRECTMEMORY_DETAILl: + ShowDirectMemory.execute(c,2); + break; + case ManagerParseShow.DIRECTMEMORY_TOTAL: + ShowDirectMemory.execute(c,1); + break; + default: + c.writeErrMessage(ErrorCode.ER_YES, "Unsupported statement"); + } + } +} diff --git a/src/main/java/io/mycat/manager/handler/ShowServerLog.java b/src/main/java/io/mycat/manager/handler/ShowServerLog.java new file mode 100644 index 000000000..9d998f1e6 --- /dev/null +++ b/src/main/java/io/mycat/manager/handler/ShowServerLog.java @@ -0,0 +1,291 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.handler; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileReader; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.MycatServer; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.config.model.SystemConfig; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.util.CircularArrayList; +import io.mycat.util.StringUtil; + +public final class ShowServerLog { + private static final int FIELD_COUNT = 1; + private static final ResultSetHeaderPacket header = PacketUtil + .getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + private static final String DEFAULT_LOGFILE = "mycat.log"; + private static final Logger LOGGER = LoggerFactory + .getLogger(ShowServerLog.class); + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("LOG", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + eof.packetId = ++packetId; + } + + private static File getLogFile(String logFile) { + + String daasHome = SystemConfig.getHomePath(); + File file = new File(daasHome, "logs" + File.separator + logFile); + return file; + } + + public static void handle(String stmt,ManagerConnection c) { + + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + + byte packetId = eof.packetId; + PackageBufINf bufInf = null; + // show log key=warn limit=0,30 + Map condPairMap = getCondPair(stmt); + if (condPairMap.isEmpty()) { + bufInf = showLogSum(c, buffer, packetId); + } else { + String logFile = condPairMap.get("file"); + if (logFile == null) { + logFile = DEFAULT_LOGFILE; + } + String limitStr = condPairMap.get("limit"); + limitStr = (limitStr != null) ? limitStr : "0," + 100000; + String[] limtArry = limitStr.split("\\s|,"); + int start = Integer.parseInt(limtArry[0]); + int page = Integer.parseInt(limtArry[1]); + int end = Integer.valueOf(start + page); + String key = condPairMap.get("key"); + String regex = condPairMap.get("regex"); + bufInf = showLogRange(c, buffer, packetId, key, regex, start, end, + logFile); + + } + + packetId = bufInf.packetId; + buffer = bufInf.buffer; + + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // write buffer + c.write(buffer); + } + + public static PackageBufINf showLogRange(ManagerConnection c, + ByteBuffer buffer, byte packetId, String key, String regex, + int start, int end, String logFile) { + PackageBufINf bufINf = new PackageBufINf(); + Pattern pattern = null; + if (regex != null) { + pattern = Pattern.compile(regex, Pattern.CASE_INSENSITIVE); + } + if (key != null) { + key = key.toLowerCase(); + } + File file = getLogFile(logFile); + BufferedReader br = null; + int curLine = 0; + try { + br = new BufferedReader(new FileReader(file)); + String line = null; + while ((line = br.readLine()) != null) { + curLine++; + if (curLine >= start && curLine <= end + && ( + (pattern != null && pattern.matcher(line).find()) + || (pattern == null && key == null) + || (key != null && line.toLowerCase().contains(key)) + )) { + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(StringUtil.encode(curLine + "->" + line, + c.getCharset())); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + } + bufINf.buffer = buffer; + bufINf.packetId = packetId; + return bufINf; + + } catch (Exception e) { + LOGGER.error("showLogRangeError", e); + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(StringUtil.encode(e.toString(), c.getCharset())); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + bufINf.buffer = buffer; + } finally { + if (br != null) { + try { + br.close(); + } catch (IOException e) { + LOGGER.error("showLogRangeError", e); + } + } + + } + bufINf.packetId = packetId; + return bufINf; + } + + private static PackageBufINf showLogSum(ManagerConnection c, + ByteBuffer buffer, byte packetId) { + PackageBufINf bufINf = new PackageBufINf(); + File[] logFiles = new File(SystemConfig.getHomePath(), "logs") + .listFiles(); + String fileNames = ""; + for (File f : logFiles) { + if (f.isFile()) { + fileNames += " " + f.getName(); + } + } + + File file = getLogFile(DEFAULT_LOGFILE); + BufferedReader br = null; + int totalLines = 0; + CircularArrayList queue = new CircularArrayList(50); + try { + br = new BufferedReader(new FileReader(file)); + String line = null; + while ((line = br.readLine()) != null) { + totalLines++; + if (queue.size() == queue.capacity()) { + queue.remove(0); + } + queue.add(line); + + } + + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(StringUtil.encode("files in log dir:" + totalLines + + fileNames, c.getCharset())); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + row = new RowDataPacket(FIELD_COUNT); + row.add(StringUtil.encode("Total lines " + totalLines + " ,tail " + + queue.size() + " line is following:", c.getCharset())); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + int size = queue.size() - 1; + for (int i = size; i >= 0; i--) { + String data = queue.get(i); + row = new RowDataPacket(FIELD_COUNT); + row.add(StringUtil.encode(data, c.getCharset())); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + bufINf.buffer = buffer; + bufINf.packetId = packetId; + return bufINf; + + } catch (Exception e) { + LOGGER.error("showLogSumError", e); + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(StringUtil.encode(e.toString(), c.getCharset())); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + bufINf.buffer = buffer; + } finally { + if (br != null) { + try { + br.close(); + } catch (IOException e) { + LOGGER.error("showLogSumError", e); + } + } + + } + bufINf.packetId = packetId; + return bufINf; + } + + public static Map getCondPair(String sql) { + HashMap map = new HashMap(); + Pattern p = Pattern.compile("(\\S+\\s*=\\s*\\S+)"); + Matcher m = p.matcher(sql); + while (m.find()) { + String item = m.group(); + Pattern p2 = Pattern.compile("(\\S+)\\s*=\\s*(\\S+)"); + Matcher m2 = p2.matcher(item); + if (m2.find()) { + map.put(m2.group(1), m2.group(2)); + } + } + return map; + } + + public static void main(String[] args) { + String sql = "show log limit =1,2 key=warn file= \"2\" "; + Map condPairMap = getCondPair(sql); + for (Map.Entry entry : condPairMap.entrySet()) { + System.out.println("key:" + entry.getKey() + ",value:" + + entry.getValue()); + + } + String limt = "1,2"; + System.out.println(Arrays.toString(limt.split("\\s|,"))); + + } +} + +class PackageBufINf { + public byte packetId; + public ByteBuffer buffer; +} \ No newline at end of file diff --git a/src/main/java/io/mycat/manager/handler/StopHandler.java b/src/main/java/io/mycat/manager/handler/StopHandler.java new file mode 100644 index 000000000..dded652fd --- /dev/null +++ b/src/main/java/io/mycat/manager/handler/StopHandler.java @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.handler; + +import io.mycat.config.ErrorCode; +import io.mycat.manager.ManagerConnection; +import io.mycat.manager.response.StopHeartbeat; +import io.mycat.route.parser.ManagerParseStop; + +/** + * @author mycat + */ +public final class StopHandler { + + public static void handle(String stmt, ManagerConnection c, int offset) { + switch (ManagerParseStop.parse(stmt, offset)) { + case ManagerParseStop.HEARTBEAT: + StopHeartbeat.execute(stmt, c); + break; + default: + c.writeErrMessage(ErrorCode.ER_YES, "Unsupported statement"); + } + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/manager/handler/SwitchHandler.java b/src/main/java/io/mycat/manager/handler/SwitchHandler.java new file mode 100644 index 000000000..500d8a8ef --- /dev/null +++ b/src/main/java/io/mycat/manager/handler/SwitchHandler.java @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.handler; + +import static io.mycat.route.parser.ManagerParseSwitch.DATASOURCE; + +import io.mycat.config.ErrorCode; +import io.mycat.manager.ManagerConnection; +import io.mycat.manager.response.SwitchDataSource; +import io.mycat.route.parser.ManagerParseSwitch; + +/** + * @author mycat + */ +public final class SwitchHandler { + + public static void handler(String stmt, ManagerConnection c, int offset) { + switch (ManagerParseSwitch.parse(stmt, offset)) { + case DATASOURCE: + SwitchDataSource.response(stmt, c); + break; + default: + c.writeErrMessage(ErrorCode.ER_YES, "Unsupported statement"); + } + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/manager/handler/ZKHandler.java b/src/main/java/io/mycat/manager/handler/ZKHandler.java new file mode 100644 index 000000000..37732ff36 --- /dev/null +++ b/src/main/java/io/mycat/manager/handler/ZKHandler.java @@ -0,0 +1,152 @@ +package io.mycat.manager.handler; + +import org.apache.curator.framework.CuratorFramework; +import org.apache.curator.utils.ZKPaths; +import org.apache.zookeeper.data.Stat; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.config.ErrorCode; +import io.mycat.config.loader.zkprocess.comm.ZkConfig; +import io.mycat.config.loader.zkprocess.comm.ZkParamCfg; +import io.mycat.config.loader.zkprocess.console.ZkNofiflyCfg; +import io.mycat.config.loader.zkprocess.zktoxml.ZktoXmlMain; +import io.mycat.manager.ManagerConnection; +import io.mycat.manager.response.ReloadZktoXml; +import io.mycat.util.ZKUtils; + +/** + * zookeeper 实现动态配置 + * + * @author Hash Zhang + * @version 1.0 + * @time 23:35 2016/5/7 + */ +public class ZKHandler { + + private static final Logger LOGGER = LoggerFactory.getLogger(ZKHandler.class); + + /** + * 直接从zk拉所有配置,然后本地执行reload_all + */ + public static final String RELOAD_FROM_ZK = "zk reload_from_zk"; + + /** + * 强制所有节点操作 + */ + private static final String RELOAD_ALL = "all"; + + /** + * 命令节点信息 + */ + public static final String ZK_NODE_PATH = "command"; + + public static void handle(String stmt, ManagerConnection c, int offset) { + String command = stmt.toLowerCase(); + // 检查当前的命令是否为zk reload_from_zk + if (RELOAD_FROM_ZK.equals(command)) { + // 调用zktoxml操作 + try { + // 通知所有节点进行数据更新 + ZktoXmlMain.ZKLISTENER.notifly(ZkNofiflyCfg.ZK_NOTIFLY_LOAD_ALL.getKey()); + + // 执行重新加载本地配制信息 + ReloadHandler.handle("RELOAD @@config_all", c, 7 >>> 8); + + offset += RELOAD_FROM_ZK.length(); + + ReloadZktoXml.execute(c, "zk reload success "); + } catch (Exception e) { + LOGGER.error("ZKHandler loadZktoFile exception", e); + c.writeErrMessage(ErrorCode.ER_YES, "zk command send error,command is :" + command); + } + } else { + String[] matchKeys = stmt.split("\\s+"); + + if (null != matchKeys && matchKeys.length > 2) { + // 取得所有配制的节点信息 + String key = ZkConfig.getInstance().getValue(ZkParamCfg.ZK_CFG_CLUSTER_NODES); + + String[] myidArray = key.split(","); + + String idkeys = matchKeys[1].toLowerCase(); + + // 发送的命令信息 + StringBuilder commandMsg = new StringBuilder(); + + for (int i = 2; i < matchKeys.length; i++) { + if (i == matchKeys.length - 1) { + commandMsg.append(matchKeys[i]); + } else { + commandMsg.append(matchKeys[i]).append(" "); + } + } + + // 命令的形式为zk all reload_from_zk + // 进行第二个匹配,检查是否为所有节点更新 + if (RELOAD_ALL.equals(idkeys)) { + // 按所有id,将把所有的节点都更新 + try { + // 将所有指令发送至服务器 + for (String myid : myidArray) { + sendZkCommand(myid, commandMsg.toString()); + } + + ReloadZktoXml.execute(c, "zk reload " + matchKeys[1] + " success "); + } catch (Exception e) { + c.writeErrMessage(ErrorCode.ER_YES, "zk command send error"); + } + } + // 如果不是所有节点,则检查是否能匹配上单独的节点 + else { + for (String myid : myidArray) { + if (myid.equals(idkeys)) { + try { + sendZkCommand(myid, commandMsg.toString()); + + ReloadZktoXml.execute(c, "zk reload " + matchKeys[1] + " success "); + } catch (Exception e) { + c.writeErrMessage(ErrorCode.ER_YES, "zk command send error,myid :" + myid); + } + + break; + } + } + } + + } else { + c.writeErrMessage(ErrorCode.ER_YES, "zk command is error"); + } + } + } + + /** + * 向节点发送命令 + * @param myId 节点的id信息 + * @param command 命令内容 + * @throws Exception 异常信息 + */ + private static void sendZkCommand(String myId, String command) throws Exception { + CuratorFramework zkConn = ZKUtils.getConnection(); + + String basePath = ZKUtils.getZKBasePath(); + + String nodePath = ZKPaths.makePath(basePath, ZK_NODE_PATH + "/" + myId); + + Stat stat; + try { + stat = zkConn.checkExists().forPath(nodePath); + + if (null == stat) { + // 进行目录的创建操作 + ZKPaths.mkdirs(zkConn.getZookeeperClient().getZooKeeper(), nodePath); + } + // 设置节点信息 + zkConn.setData().inBackground().forPath(nodePath, command.getBytes()); + } catch (Exception e) { + LOGGER.error("ZKHandler sendZkCommand exception", e); + throw e; + } + + } +} diff --git a/src/main/java/io/mycat/manager/response/ClearSlow.java b/src/main/java/io/mycat/manager/response/ClearSlow.java new file mode 100644 index 000000000..a60249c38 --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ClearSlow.java @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.response; + +import io.mycat.MycatServer; +import io.mycat.backend.datasource.PhysicalDBNode; +import io.mycat.backend.datasource.PhysicalDBPool; +import io.mycat.config.ErrorCode; +import io.mycat.config.MycatConfig; +import io.mycat.config.model.SchemaConfig; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.OkPacket; + +/** + * @author mycat + */ +public class ClearSlow { + + public static void dataNode(ManagerConnection c, String name) { + PhysicalDBNode dn = MycatServer.getInstance().getConfig().getDataNodes().get(name); + PhysicalDBPool ds = null; + if (dn != null && ((ds = dn.getDbPool())!= null)) { + // ds.getSqlRecorder().clear(); + c.write(c.writeToBuffer(OkPacket.OK, c.allocate())); + } else { + c.writeErrMessage(ErrorCode.ER_YES, "Invalid DataNode:" + name); + } + } + + public static void schema(ManagerConnection c, String name) { + MycatConfig conf = MycatServer.getInstance().getConfig(); + SchemaConfig schema = conf.getSchemas().get(name); + if (schema != null) { +// Map dataNodes = conf.getDataNodes(); +// for (String n : schema.getAllDataNodes()) { +// MySQLDataNode dn = dataNodes.get(n); +// MySQLDataSource ds = null; +// if (dn != null && (ds = dn.getSource()) != null) { +// ds.getSqlRecorder().clear(); +// } +// } + c.write(c.writeToBuffer(OkPacket.OK, c.allocate())); + } else { + c.writeErrMessage(ErrorCode.ER_YES, "Invalid Schema:" + name); + } + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/response/KillConnection.java b/src/main/java/io/mycat/manager/response/KillConnection.java similarity index 66% rename from src/main/java/io/mycat/server/response/KillConnection.java rename to src/main/java/io/mycat/manager/response/KillConnection.java index 2e16ca206..b42341bc5 100644 --- a/src/main/java/io/mycat/server/response/KillConnection.java +++ b/src/main/java/io/mycat/manager/response/KillConnection.java @@ -21,37 +21,39 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.response; - -import io.mycat.net.Connection; -import io.mycat.net.NetSystem; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.OkPacket; -import io.mycat.util.SplitUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +package io.mycat.manager.response; import java.util.ArrayList; import java.util.List; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.MycatServer; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.FrontendConnection; +import io.mycat.net.NIOConnection; +import io.mycat.net.NIOProcessor; +import io.mycat.net.mysql.OkPacket; +import io.mycat.util.SplitUtil; + /** * @author mycat */ public final class KillConnection { - private static final Logger logger = LoggerFactory - .getLogger(KillConnection.class); + private static final Logger logger = LoggerFactory.getLogger(KillConnection.class); - public static void response(String stmt, int offset, MySQLFrontConnection mc) { + public static void response(String stmt, int offset, ManagerConnection mc) { int count = 0; - List list = getList(stmt, offset, mc); - if (list != null) - for (MySQLFrontConnection c : list) { + List list = getList(stmt, offset, mc); + if (list != null) { + for (NIOConnection c : list) { StringBuilder s = new StringBuilder(); logger.warn(s.append(c).append("killed by manager").toString()); c.close("kill by manager"); count++; } + } OkPacket packet = new OkPacket(); packet.packetId = 1; packet.affectedRows = count; @@ -59,12 +61,12 @@ public static void response(String stmt, int offset, MySQLFrontConnection mc) { packet.write(mc); } - private static List getList(String stmt, int offset, MySQLFrontConnection mc) { + private static List getList(String stmt, int offset, ManagerConnection mc) { String ids = stmt.substring(offset).trim(); if (ids.length() > 0) { String[] idList = SplitUtil.split(ids, ',', true); - List fcList = new ArrayList(idList.length); - + List fcList = new ArrayList(idList.length); + NIOProcessor[] processors = MycatServer.getInstance().getProcessors(); for (String id : idList) { long value = 0; try { @@ -72,9 +74,10 @@ private static List getList(String stmt, int offset, MySQL } catch (NumberFormatException e) { continue; } - for (Connection fc: NetSystem.getInstance().getAllConnectios().values()) { - if (fc instanceof MySQLFrontConnection && fc.getId()==value) { - fcList.add((MySQLFrontConnection) fc); + FrontendConnection fc = null; + for (NIOProcessor p : processors) { + if ((fc = p.getFrontends().get(value)) != null) { + fcList.add(fc); break; } } diff --git a/src/main/java/io/mycat/server/response/Offline.java b/src/main/java/io/mycat/manager/response/Offline.java similarity index 88% rename from src/main/java/io/mycat/server/response/Offline.java rename to src/main/java/io/mycat/manager/response/Offline.java index 07f485e49..e42beb658 100644 --- a/src/main/java/io/mycat/server/response/Offline.java +++ b/src/main/java/io/mycat/manager/response/Offline.java @@ -21,12 +21,11 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.response; - +package io.mycat.manager.response; import io.mycat.MycatServer; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.OkPacket; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.OkPacket; /** * @author mycat @@ -40,7 +39,7 @@ public class Offline { ok.serverStatus = 2; } - public static void execute(String stmt, MySQLFrontConnection c) { + public static void execute(String stmt, ManagerConnection c) { MycatServer.getInstance().offline(); ok.write(c); } diff --git a/src/main/java/io/mycat/server/response/Online.java b/src/main/java/io/mycat/manager/response/Online.java similarity index 88% rename from src/main/java/io/mycat/server/response/Online.java rename to src/main/java/io/mycat/manager/response/Online.java index 7476b920c..b860ed4ee 100644 --- a/src/main/java/io/mycat/server/response/Online.java +++ b/src/main/java/io/mycat/manager/response/Online.java @@ -21,12 +21,11 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.response; - +package io.mycat.manager.response; import io.mycat.MycatServer; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.OkPacket; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.OkPacket; /** * @author mycat @@ -40,7 +39,7 @@ public class Online { ok.serverStatus = 2; } - public static void execute(String stmt, MySQLFrontConnection mc) { + public static void execute(String stmt, ManagerConnection mc) { MycatServer.getInstance().online(); ok.write(mc); } diff --git a/src/main/java/io/mycat/manager/response/ReloadConfig.java b/src/main/java/io/mycat/manager/response/ReloadConfig.java new file mode 100644 index 000000000..89b53278c --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ReloadConfig.java @@ -0,0 +1,280 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.response; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.locks.ReentrantLock; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; + +import io.mycat.MycatServer; +import io.mycat.backend.BackendConnection; +import io.mycat.backend.datasource.PhysicalDBNode; +import io.mycat.backend.datasource.PhysicalDBPool; +import io.mycat.backend.datasource.PhysicalDatasource; +import io.mycat.backend.jdbc.JDBCConnection; +import io.mycat.backend.mysql.nio.MySQLConnection; +import io.mycat.config.ConfigInitializer; +import io.mycat.config.ErrorCode; +import io.mycat.config.MycatCluster; +import io.mycat.config.MycatConfig; +import io.mycat.config.model.FirewallConfig; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.UserConfig; +import io.mycat.config.util.DnPropertyUtil; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.NIOProcessor; +import io.mycat.net.mysql.OkPacket; + +/** + * @author mycat + * @author zhuam + */ +public final class ReloadConfig { + + private static final Logger LOGGER = LoggerFactory.getLogger(ReloadConfig.class); + + public static void execute(ManagerConnection c, final boolean loadAll) { + + // reload @@config_all 校验前一次的事务完成情况 + if ( loadAll && !NIOProcessor.backends_old.isEmpty() ) { + c.writeErrMessage(ErrorCode.ER_YES, "The are several unfinished db transactions before executing \"reload @@config_all\", therefore the execution is terminated for logical integrity and please try again later."); + return; + } + + final ReentrantLock lock = MycatServer.getInstance().getConfig().getLock(); + lock.lock(); + try { + ListenableFuture listenableFuture = MycatServer.getInstance().getListeningExecutorService().submit( + new Callable() { + @Override + public Boolean call() throws Exception { + return loadAll ? reload_all() : reload(); + } + } + ); + Futures.addCallback(listenableFuture, new ReloadCallBack(c), MycatServer.getInstance().getListeningExecutorService()); + } finally { + lock.unlock(); + } + } + + public static boolean reload_all() { + + /** + * 1、载入新的配置 + * 1.1、ConfigInitializer 初始化,基本自检 + * 1.2、DataNode/DataHost 实际链路检测 + */ + ConfigInitializer loader = new ConfigInitializer(true); + Map newUsers = loader.getUsers(); + Map newSchemas = loader.getSchemas(); + Map newDataNodes = loader.getDataNodes(); + Map newDataHosts = loader.getDataHosts(); + MycatCluster newCluster = loader.getCluster(); + FirewallConfig newFirewall = loader.getFirewall(); + + /** + * 1.2、实际链路检测 + */ + loader.testConnection(); + + /** + * 2、承接 + * 2.1、老的 dataSource 继续承接新建请求 + * 2.2、新的 dataSource 开始初始化, 完毕后交由 2.3 + * 2.3、新的 dataSource 开始承接新建请求 + * 2.4、老的 dataSource 内部的事务执行完毕, 相继关闭 + * 2.5、老的 dataSource 超过阀值的,强制关闭 + */ + + MycatConfig config = MycatServer.getInstance().getConfig(); + + /** + * 2.1 、老的 dataSource 继续承接新建请求, 此处什么也不需要做 + */ + + boolean isReloadStatusOK = true; + + /** + * 2.2、新的 dataHosts 初始化 + */ + for (PhysicalDBPool dbPool : newDataHosts.values()) { + String hostName = dbPool.getHostName(); + + // 设置 schemas + ArrayList dnSchemas = new ArrayList(30); + for (PhysicalDBNode dn : newDataNodes.values()) { + if (dn.getDbPool().getHostName().equals(hostName)) { + dnSchemas.add(dn.getDatabase()); + } + } + dbPool.setSchemas( dnSchemas.toArray(new String[dnSchemas.size()]) ); + + // 获取 data host + String dnIndex = DnPropertyUtil.loadDnIndexProps().getProperty(dbPool.getHostName(), "0"); + if ( !"0".equals(dnIndex) ) { + LOGGER.info("init datahost: " + dbPool.getHostName() + " to use datasource index:" + dnIndex); + } + + dbPool.init( Integer.valueOf(dnIndex) ); + if ( !dbPool.isInitSuccess() ) { + isReloadStatusOK = false; + break; + } + } + + /** + * TODO: 确认初始化情况 + * + * 新的 dataHosts 是否初始化成功 + */ + if ( isReloadStatusOK ) { + + /** + * 2.3、 在老的配置上,应用新的配置,开始准备承接任务 + */ + config.reload(newUsers, newSchemas, newDataNodes, newDataHosts, newCluster, newFirewall, true); + + /** + * 2.4、 处理旧的资源 + */ + LOGGER.warn("1、clear old backend connection(size): " + NIOProcessor.backends_old.size()); + + // 清除前一次 reload 转移出去的 old Cons + Iterator iter = NIOProcessor.backends_old.iterator(); + while( iter.hasNext() ) { + BackendConnection con = iter.next(); + con.close("clear old datasources"); + iter.remove(); + } + + Map oldDataHosts = config.getBackupDataHosts(); + for (PhysicalDBPool dbPool : oldDataHosts.values()) { + dbPool.stopHeartbeat(); + + // 提取数据源下的所有连接 + for (PhysicalDatasource ds : dbPool.getAllDataSources()) { + // + for (NIOProcessor processor : MycatServer.getInstance().getProcessors()) { + for (BackendConnection con : processor.getBackends().values()) { + if (con instanceof MySQLConnection) { + MySQLConnection mysqlCon = (MySQLConnection) con; + if ( mysqlCon.getPool() == ds) { + NIOProcessor.backends_old.add( con ); + } + + } else if (con instanceof JDBCConnection) { + JDBCConnection jdbcCon = (JDBCConnection) con; + if (jdbcCon.getPool() == ds) { + NIOProcessor.backends_old.add( con ); + } + } + } + } + } + } + LOGGER.warn("2、to be recycled old backend connection(size): " + NIOProcessor.backends_old.size()); + + //清理缓存 + MycatServer.getInstance().getCacheService().clearCache(); + MycatServer.getInstance().initRuleData(); + return true; + + } else { + // 如果重载不成功,则清理已初始化的资源。 + LOGGER.warn("reload failed, clear previously created datasources "); + for (PhysicalDBPool dbPool : newDataHosts.values()) { + dbPool.clearDataSources("reload config"); + dbPool.stopHeartbeat(); + } + return false; + } + } + + public static boolean reload() { + + /** + * 1、载入新的配置, ConfigInitializer 内部完成自检工作, 由于不更新数据源信息,此处不自检 dataHost dataNode + */ + ConfigInitializer loader = new ConfigInitializer(false); + Map users = loader.getUsers(); + Map schemas = loader.getSchemas(); + Map dataNodes = loader.getDataNodes(); + Map dataHosts = loader.getDataHosts(); + MycatCluster cluster = loader.getCluster(); + FirewallConfig firewall = loader.getFirewall(); + + /** + * 2、在老的配置上,应用新的配置 + */ + MycatServer.getInstance().getConfig().reload(users, schemas, dataNodes, dataHosts, cluster, firewall, false); + + /** + * 3、清理缓存 + */ + MycatServer.getInstance().getCacheService().clearCache(); + MycatServer.getInstance().initRuleData(); + return true; + } + + /** + * 异步执行回调类,用于回写数据给用户等。 + */ + private static class ReloadCallBack implements FutureCallback { + + private ManagerConnection mc; + + private ReloadCallBack(ManagerConnection c) { + this.mc = c; + } + + @Override + public void onSuccess(Boolean result) { + if (result) { + LOGGER.warn("send ok package to client " + String.valueOf(mc)); + OkPacket ok = new OkPacket(); + ok.packetId = 1; + ok.affectedRows = 1; + ok.serverStatus = 2; + ok.message = "Reload config success".getBytes(); + ok.write(mc); + } else { + mc.writeErrMessage(ErrorCode.ER_YES, "Reload config failure"); + } + } + + @Override + public void onFailure(Throwable t) { + mc.writeErrMessage(ErrorCode.ER_YES, "Reload config failure"); + } + } +} diff --git a/src/main/java/io/mycat/manager/response/ReloadQueryCf.java b/src/main/java/io/mycat/manager/response/ReloadQueryCf.java new file mode 100644 index 000000000..6067d6728 --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ReloadQueryCf.java @@ -0,0 +1,37 @@ +package io.mycat.manager.response; + + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.OkPacket; +import io.mycat.statistic.stat.QueryConditionAnalyzer; + +public class ReloadQueryCf { + + private static final Logger logger = LoggerFactory.getLogger(ReloadSqlSlowTime.class); + + public static void execute(ManagerConnection c, String cf) { + + if ( cf == null ) { + cf = "NULL"; + } + + QueryConditionAnalyzer.getInstance().setCf(cf); + + StringBuilder s = new StringBuilder(); + s.append(c).append("Reset show @@sql.condition="+ cf +" success by manager"); + + logger.warn(s.toString()); + + OkPacket ok = new OkPacket(); + ok.packetId = 1; + ok.affectedRows = 1; + ok.serverStatus = 2; + ok.message = "Reset show @@sql.condition success".getBytes(); + ok.write(c); + + System.out.println(s.toString()); + } + +} diff --git a/src/main/java/io/mycat/manager/response/ReloadSqlSlowTime.java b/src/main/java/io/mycat/manager/response/ReloadSqlSlowTime.java new file mode 100644 index 000000000..a842d6651 --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ReloadSqlSlowTime.java @@ -0,0 +1,36 @@ +package io.mycat.manager.response; + +import java.util.Map; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.OkPacket; +import io.mycat.statistic.stat.UserStat; +import io.mycat.statistic.stat.UserStatAnalyzer; + +public class ReloadSqlSlowTime { + private static final Logger logger = LoggerFactory.getLogger(ReloadSqlSlowTime.class); + + public static void execute(ManagerConnection c,long time) { + + Map statMap = UserStatAnalyzer.getInstance().getUserStatMap(); + for (UserStat userStat : statMap.values()) { + userStat.setSlowTime(time); + } + + StringBuilder s = new StringBuilder(); + s.append(c).append("Reset show @@sql.slow="+time+" time success by manager"); + + logger.warn(s.toString()); + + OkPacket ok = new OkPacket(); + ok.packetId = 1; + ok.affectedRows = 1; + ok.serverStatus = 2; + ok.message = "Reset show @@sql.slow time success".getBytes(); + ok.write(c); + System.out.println(s.toString()); + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/response/ReloadUser.java b/src/main/java/io/mycat/manager/response/ReloadUser.java similarity index 82% rename from src/main/java/io/mycat/server/response/ReloadUser.java rename to src/main/java/io/mycat/manager/response/ReloadUser.java index 8f1d96c1d..464fed1fd 100644 --- a/src/main/java/io/mycat/server/response/ReloadUser.java +++ b/src/main/java/io/mycat/manager/response/ReloadUser.java @@ -21,24 +21,22 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.response; +package io.mycat.manager.response; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import io.mycat.server.ErrorCode; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.OkPacket; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import io.mycat.config.ErrorCode; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.OkPacket; /** * @author mycat */ public final class ReloadUser { - private static final Logger logger = LoggerFactory - .getLogger(ReloadUser.class); + private static final Logger logger = LoggerFactory.getLogger(ReloadUser.class); - public static void execute(MySQLFrontConnection c) { + public static void execute(ManagerConnection c) { boolean status = false; if (status) { StringBuilder s = new StringBuilder(); diff --git a/src/main/java/io/mycat/manager/response/ReloadUserStat.java b/src/main/java/io/mycat/manager/response/ReloadUserStat.java new file mode 100644 index 000000000..4e9981655 --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ReloadUserStat.java @@ -0,0 +1,36 @@ +package io.mycat.manager.response; + +import java.util.Map; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.OkPacket; +import io.mycat.statistic.stat.UserStat; +import io.mycat.statistic.stat.UserStatAnalyzer; + +public final class ReloadUserStat { + + private static final Logger logger = LoggerFactory.getLogger(ReloadUserStat.class); + + public static void execute(ManagerConnection c) { + + Map statMap = UserStatAnalyzer.getInstance().getUserStatMap(); + for (UserStat userStat : statMap.values()) { + userStat.reset(); + } + + StringBuilder s = new StringBuilder(); + s.append(c).append("Reset show @@sql @@sql.sum @@sql.slow success by manager"); + + logger.warn(s.toString()); + + OkPacket ok = new OkPacket(); + ok.packetId = 1; + ok.affectedRows = 1; + ok.serverStatus = 2; + ok.message = "Reset show @@sql @@sql.sum @@sql.slow success".getBytes(); + ok.write(c); + } + +} diff --git a/src/main/java/io/mycat/manager/response/ReloadZktoXml.java b/src/main/java/io/mycat/manager/response/ReloadZktoXml.java new file mode 100644 index 000000000..ffa5ce20a --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ReloadZktoXml.java @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.response; + +import java.nio.ByteBuffer; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.util.StringUtil; + +/** + * 进行reload_zk操作的响应 + * + * @author mycat + * @author mycat + */ +public final class ReloadZktoXml { + + private static final int FIELD_COUNT = 1; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("STATEMENT", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + + eof.packetId = ++packetId; + } + + public static void execute(ManagerConnection c, String rsp) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c, true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c, true); + } + + // write eof + buffer = eof.write(buffer, c, true); + + // write rows + byte packetId = eof.packetId; + + RowDataPacket row = getRow(rsp, c.getCharset()); + row.packetId = ++packetId; + buffer = row.write(buffer, c, true); + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c, true); + + // post write + c.write(buffer); + } + + private static RowDataPacket getRow(String stmt, String charset) { + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(StringUtil.encode(stmt, charset)); + return row; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/manager/response/RollbackConfig.java b/src/main/java/io/mycat/manager/response/RollbackConfig.java new file mode 100644 index 000000000..6d2d77eda --- /dev/null +++ b/src/main/java/io/mycat/manager/response/RollbackConfig.java @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.response; + +import java.util.Map; +import java.util.concurrent.locks.ReentrantLock; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.MycatServer; +import io.mycat.backend.datasource.PhysicalDBNode; +import io.mycat.backend.datasource.PhysicalDBPool; +import io.mycat.config.ErrorCode; +import io.mycat.config.MycatCluster; +import io.mycat.config.MycatConfig; +import io.mycat.config.model.FirewallConfig; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.UserConfig; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.OkPacket; + +/** + * @author mycat + */ +public final class RollbackConfig { + private static final Logger LOGGER = LoggerFactory.getLogger(RollbackConfig.class); + + public static void execute(ManagerConnection c) { + final ReentrantLock lock = MycatServer.getInstance().getConfig() + .getLock(); + lock.lock(); + try { + if (rollback()) { + StringBuilder s = new StringBuilder(); + s.append(c).append("Rollback config success by manager"); + LOGGER.warn(s.toString()); + OkPacket ok = new OkPacket(); + ok.packetId = 1; + ok.affectedRows = 1; + ok.serverStatus = 2; + ok.message = "Rollback config success".getBytes(); + ok.write(c); + } else { + c.writeErrMessage(ErrorCode.ER_YES, "Rollback config failure"); + } + } finally { + lock.unlock(); + } + } + + private static boolean rollback() { + MycatConfig conf = MycatServer.getInstance().getConfig(); + Map users = conf.getBackupUsers(); + Map schemas = conf.getBackupSchemas(); + Map dataNodes = conf.getBackupDataNodes(); + Map dataHosts = conf.getBackupDataHosts(); + MycatCluster cluster = conf.getBackupCluster(); + FirewallConfig firewall = conf.getBackupFirewall(); + + // 检查可回滚状态 + if (!conf.canRollback()) { + return false; + } + + // 如果回滚已经存在的pool + boolean rollbackStatus = true; + Map cNodes = conf.getDataHosts(); + for (PhysicalDBPool dn : dataHosts.values()) { + dn.init(dn.getActivedIndex()); + if (!dn.isInitSuccess()) { + rollbackStatus = false; + break; + } + } + // 如果回滚不成功,则清理已初始化的资源。 + if (!rollbackStatus) { + for (PhysicalDBPool dn : dataHosts.values()) { + dn.clearDataSources("rollbackup config"); + dn.stopHeartbeat(); + } + return false; + } + + // 应用回滚 + conf.rollback(users, schemas, dataNodes, dataHosts, cluster, firewall); + + // 处理旧的资源 + for (PhysicalDBPool dn : cNodes.values()) { + dn.clearDataSources("clear old config "); + dn.stopHeartbeat(); + } + + //清理缓存 + MycatServer.getInstance().getCacheService().clearCache(); + return true; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/response/RollbackUser.java b/src/main/java/io/mycat/manager/response/RollbackUser.java similarity index 82% rename from src/main/java/io/mycat/server/response/RollbackUser.java rename to src/main/java/io/mycat/manager/response/RollbackUser.java index 13422e8d2..b13a8403d 100644 --- a/src/main/java/io/mycat/server/response/RollbackUser.java +++ b/src/main/java/io/mycat/manager/response/RollbackUser.java @@ -21,24 +21,22 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.response; +package io.mycat.manager.response; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import io.mycat.server.ErrorCode; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.OkPacket; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import io.mycat.config.ErrorCode; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.OkPacket; /** * @author mycat */ public final class RollbackUser { - private static final Logger logger = LoggerFactory - .getLogger(RollbackUser.class); + private static final Logger logger = LoggerFactory.getLogger(RollbackUser.class); - public static void execute(MySQLFrontConnection c) { + public static void execute(ManagerConnection c) { boolean status = false; if (status) { StringBuilder s = new StringBuilder(); diff --git a/src/main/java/io/mycat/manager/response/SelectSessionAutoIncrement.java b/src/main/java/io/mycat/manager/response/SelectSessionAutoIncrement.java new file mode 100644 index 000000000..b72373a97 --- /dev/null +++ b/src/main/java/io/mycat/manager/response/SelectSessionAutoIncrement.java @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.response; + +import java.nio.ByteBuffer; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.util.LongUtil; + +/** + * @author mycat + */ +public final class SelectSessionAutoIncrement { + + private static final int FIELD_COUNT = 1; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("SESSION.AUTOINCREMENT", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + eof.packetId = ++packetId; + } + + public static void execute(ManagerConnection c) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.packetId = ++packetId; + row.add(LongUtil.toBytes(1)); + buffer = row.write(buffer, c,true); + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // post write + c.write(buffer); + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/manager/response/SelectSessionTxReadOnly.java b/src/main/java/io/mycat/manager/response/SelectSessionTxReadOnly.java new file mode 100644 index 000000000..e4b9a0b4e --- /dev/null +++ b/src/main/java/io/mycat/manager/response/SelectSessionTxReadOnly.java @@ -0,0 +1,62 @@ +package io.mycat.manager.response; + +import java.nio.ByteBuffer; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.util.LongUtil; + +public final class SelectSessionTxReadOnly { + + private static final String SESSION_TX_READ_ONLY = "@@SESSION.TX_READ_ONLY"; + private static final int FIELD_COUNT = 1; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField(SESSION_TX_READ_ONLY, Fields.FIELD_TYPE_INT24); + fields[i++].packetId = ++packetId; + + eof.packetId = ++packetId; + } + + public static void execute(ManagerConnection c) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.packetId = ++packetId; + row.add(LongUtil.toBytes(0)); + buffer = row.write(buffer, c,true); + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // post write + c.write(buffer); + } + +} diff --git a/src/main/java/io/mycat/manager/response/SelectVersionComment.java b/src/main/java/io/mycat/manager/response/SelectVersionComment.java new file mode 100644 index 000000000..5aa7572d9 --- /dev/null +++ b/src/main/java/io/mycat/manager/response/SelectVersionComment.java @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.response; + +import java.nio.ByteBuffer; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; + +/** + * @author mycat + */ +public final class SelectVersionComment { + + private static final byte[] VERSION_COMMENT = "MyCat Server (monitor)".getBytes(); + private static final int FIELD_COUNT = 1; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("@@VERSION_COMMENT", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + eof.packetId = ++packetId; + } + + public static void execute(ManagerConnection c) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(VERSION_COMMENT); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // post write + c.write(buffer); + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/response/ShowBackend.java b/src/main/java/io/mycat/manager/response/ShowBackend.java similarity index 76% rename from src/main/java/io/mycat/server/response/ShowBackend.java rename to src/main/java/io/mycat/manager/response/ShowBackend.java index 7e6bfdcbd..64d517e31 100644 --- a/src/main/java/io/mycat/server/response/ShowBackend.java +++ b/src/main/java/io/mycat/manager/response/ShowBackend.java @@ -21,20 +21,23 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.response; +package io.mycat.manager.response; +import java.nio.ByteBuffer; + +import io.mycat.MycatServer; import io.mycat.backend.BackendConnection; -import io.mycat.backend.nio.MySQLBackendConnection; -import io.mycat.net.BufferArray; -import io.mycat.net.Connection; -import io.mycat.net.NetSystem; -import io.mycat.server.Fields; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.PacketUtil; +import io.mycat.backend.jdbc.JDBCConnection; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.backend.mysql.nio.MySQLConnection; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.BackendAIOConnection; +import io.mycat.net.NIOProcessor; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; import io.mycat.util.IntegerUtil; import io.mycat.util.LongUtil; import io.mycat.util.StringUtil; @@ -98,55 +101,51 @@ public class ShowBackend { eof.packetId = ++packetId; } - public static void execute(MySQLFrontConnection c) { - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); - header.write(bufferArray); + public static void execute(ManagerConnection c) { + ByteBuffer buffer = c.allocate(); + buffer = header.write(buffer, c, true); for (FieldPacket field : fields) { - field.write(bufferArray); + buffer = field.write(buffer, c, true); } - eof.write(bufferArray); + buffer = eof.write(buffer, c, true); byte packetId = eof.packetId; String charset = c.getCharset(); - - - for ( Connection con : NetSystem.getInstance().getAllConnectios().values()) { - if(con instanceof BackendConnection) - { - RowDataPacket row = getRow((BackendConnection) con, charset); + for (NIOProcessor p : MycatServer.getInstance().getProcessors()) { + for (BackendConnection bc : p.getBackends().values()) { + if (bc != null) { + RowDataPacket row = getRow(bc, charset); row.packetId = ++packetId; - row.write(bufferArray); + buffer = row.write(buffer, c, true); } - + } } EOFPacket lastEof = new EOFPacket(); lastEof.packetId = ++packetId; - lastEof.write(bufferArray); - c.write(bufferArray); + buffer = lastEof.write(buffer, c, true); + c.write(buffer); } private static RowDataPacket getRow(BackendConnection c, String charset) { RowDataPacket row = new RowDataPacket(FIELD_COUNT); - row.add("N/A".getBytes()); - long netInBytes = 0; - long netOutbytes = 0; + if (c instanceof BackendAIOConnection) { + row.add(((BackendAIOConnection) c).getProcessor().getName() + .getBytes()); + } else if(c instanceof JDBCConnection){ + row.add(((JDBCConnection)c).getProcessor().getName().getBytes()); + }else{ + row.add("N/A".getBytes()); + } row.add(LongUtil.toBytes(c.getId())); long threadId = 0; - if (c instanceof Connection) { - Connection nioCon = (Connection) c; - if (nioCon instanceof MySQLBackendConnection) { - threadId = ((MySQLBackendConnection) nioCon).getThreadId(); - } - - netInBytes = nioCon.getNetInBytes(); - netOutbytes = nioCon.getNetOutBytes(); + if (c instanceof MySQLConnection) { + threadId = ((MySQLConnection) c).getThreadId(); } row.add(LongUtil.toBytes(threadId)); row.add(StringUtil.encode(c.getHost(), charset)); row.add(IntegerUtil.toBytes(c.getPort())); row.add(IntegerUtil.toBytes(c.getLocalPort())); - row.add(LongUtil.toBytes(netInBytes)); - row.add(LongUtil.toBytes(netOutbytes)); + row.add(LongUtil.toBytes(c.getNetInBytes())); + row.add(LongUtil.toBytes(c.getNetOutBytes())); row.add(LongUtil.toBytes((TimeUtil.currentTimeMillis() - c .getStartupTime()) / 1000L)); row.add(c.isClosed() ? "true".getBytes() : "false".getBytes()); @@ -160,8 +159,8 @@ private static RowDataPacket getRow(BackendConnection c, String charset) { String txLevel = ""; String txAutommit = ""; - if (c instanceof MySQLBackendConnection) { - MySQLBackendConnection mysqlC = (MySQLBackendConnection) c; + if (c instanceof MySQLConnection) { + MySQLConnection mysqlC = (MySQLConnection) c; writeQueueSize = mysqlC.getWriteQueue().size(); schema = mysqlC.getSchema(); charsetInf = mysqlC.getCharset() + ":" + mysqlC.getCharsetIndex(); diff --git a/src/main/java/io/mycat/manager/response/ShowBackendOld.java b/src/main/java/io/mycat/manager/response/ShowBackendOld.java new file mode 100644 index 000000000..f72e5daa1 --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ShowBackendOld.java @@ -0,0 +1,103 @@ +package io.mycat.manager.response; + +import java.nio.ByteBuffer; + +import io.mycat.backend.BackendConnection; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.backend.mysql.nio.MySQLConnection; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.NIOProcessor; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.util.IntegerUtil; +import io.mycat.util.LongUtil; +import io.mycat.util.StringUtil; +import io.mycat.util.TimeUtil; + +/** + * 查询 reload @@config_all 后产生的后端连接(待回收) + * + * @author zhuam + */ +public class ShowBackendOld { + + private static final int FIELD_COUNT = 10; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + fields[i] = PacketUtil.getField("id", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; + fields[i] = PacketUtil.getField("mysqlId", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; + fields[i] = PacketUtil.getField("host", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + fields[i] = PacketUtil.getField("port", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; + fields[i] = PacketUtil.getField("l_port", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; + fields[i] = PacketUtil.getField("net_in", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + fields[i] = PacketUtil.getField("net_out", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + fields[i] = PacketUtil.getField("life", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + fields[i] = PacketUtil.getField("lasttime", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + fields[i] = PacketUtil.getField("borrowed",Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + eof.packetId = ++packetId; + } + + public static void execute(ManagerConnection c) { + ByteBuffer buffer = c.allocate(); + buffer = header.write(buffer, c, true); + for (FieldPacket field : fields) { + buffer = field.write(buffer, c, true); + } + buffer = eof.write(buffer, c, true); + byte packetId = eof.packetId; + String charset = c.getCharset(); + + for (BackendConnection bc : NIOProcessor.backends_old) { + if ( bc != null) { + RowDataPacket row = getRow(bc, charset); + row.packetId = ++packetId; + buffer = row.write(buffer, c, true); + } + } + + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c, true); + c.write(buffer); + } + + private static RowDataPacket getRow(BackendConnection c, String charset) { + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(LongUtil.toBytes(c.getId())); + long threadId = 0; + if (c instanceof MySQLConnection) { + threadId = ((MySQLConnection) c).getThreadId(); + } + row.add(LongUtil.toBytes(threadId)); + row.add(StringUtil.encode(c.getHost(), charset)); + row.add(IntegerUtil.toBytes(c.getPort())); + row.add(IntegerUtil.toBytes(c.getLocalPort())); + row.add(LongUtil.toBytes(c.getNetInBytes())); + row.add(LongUtil.toBytes(c.getNetOutBytes())); + row.add(LongUtil.toBytes((TimeUtil.currentTimeMillis() - c.getStartupTime()) / 1000L)); + row.add(LongUtil.toBytes( c.getLastTime() )); + boolean isBorrowed = c.isBorrowed(); + row.add(isBorrowed ? "true".getBytes() : "false".getBytes()); + return row; + } + +} diff --git a/src/main/java/io/mycat/manager/response/ShowCollation.java b/src/main/java/io/mycat/manager/response/ShowCollation.java new file mode 100644 index 000000000..c21dd46d1 --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ShowCollation.java @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.response; + +import java.nio.ByteBuffer; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.util.IntegerUtil; +import io.mycat.util.LongUtil; + +/** + * @author mycat + * @author mycat + */ +public final class ShowCollation { + + private static final int FIELD_COUNT = 6; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("COLLATION", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("CHARSET", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("ID", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("DEFAULT", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("COMPILED", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("SORTLEN", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; + + eof.packetId = ++packetId; + } + + public static void execute(ManagerConnection c) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + RowDataPacket row = getRow(c.getCharset()); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + + // write lastEof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // write buffer + c.write(buffer); + } + + private static RowDataPacket getRow(String charset) { + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add("utf8_general_ci".getBytes()); + row.add("utf8".getBytes()); + row.add(IntegerUtil.toBytes(33)); + row.add("Yes".getBytes()); + row.add("Yes".getBytes()); + row.add(LongUtil.toBytes(1)); + return row; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/manager/response/ShowCommand.java b/src/main/java/io/mycat/manager/response/ShowCommand.java new file mode 100644 index 000000000..a67426628 --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ShowCommand.java @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.response; + +import java.nio.ByteBuffer; + +import io.mycat.MycatServer; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.NIOProcessor; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.statistic.CommandCount; +import io.mycat.util.LongUtil; + +/** + * 统计各类数据包的执行次数 + * + * @author mycat + * @author mycat + */ +public final class ShowCommand { + + private static final int FIELD_COUNT = 10; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("PROCESSOR", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("INIT_DB", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("QUERY", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("STMT_PREPARE", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("STMT_EXECUTE", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("STMT_CLOSE", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("PING", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("KILL", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("QUIT", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("OTHER", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + eof.packetId = ++packetId; + } + + public static void execute(ManagerConnection c) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + for (NIOProcessor p : MycatServer.getInstance().getProcessors()) { + RowDataPacket row = getRow(p, c.getCharset()); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // write buffer + c.write(buffer); + } + + private static RowDataPacket getRow(NIOProcessor processor, String charset) { + CommandCount cc = processor.getCommands(); + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(processor.getName().getBytes()); + row.add(LongUtil.toBytes(cc.initDBCount())); + row.add(LongUtil.toBytes(cc.queryCount())); + row.add(LongUtil.toBytes(cc.stmtPrepareCount())); + row.add(LongUtil.toBytes(cc.stmtExecuteCount())); + row.add(LongUtil.toBytes(cc.stmtCloseCount())); + row.add(LongUtil.toBytes(cc.pingCount())); + row.add(LongUtil.toBytes(cc.killCount())); + row.add(LongUtil.toBytes(cc.quitCount())); + row.add(LongUtil.toBytes(cc.otherCount())); + return row; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/response/ShowConnection.java b/src/main/java/io/mycat/manager/response/ShowConnection.java similarity index 71% rename from src/main/java/io/mycat/server/response/ShowConnection.java rename to src/main/java/io/mycat/manager/response/ShowConnection.java index 81fb02a5a..a1c7e8ffc 100644 --- a/src/main/java/io/mycat/server/response/ShowConnection.java +++ b/src/main/java/io/mycat/manager/response/ShowConnection.java @@ -21,25 +21,26 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.response; - -import io.mycat.net.BufferArray; -import io.mycat.net.Connection; -import io.mycat.net.NetSystem; -import io.mycat.server.Fields; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.PacketUtil; +package io.mycat.manager.response; + +import java.nio.ByteBuffer; + +import io.mycat.MycatServer; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.FrontendConnection; +import io.mycat.net.NIOProcessor; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.server.ServerConnection; import io.mycat.util.IntegerUtil; import io.mycat.util.LongUtil; import io.mycat.util.StringUtil; import io.mycat.util.TimeUtil; -import java.nio.ByteBuffer; - /** * 查看当前有效连接信息 * @@ -48,7 +49,7 @@ */ public final class ShowConnection { - private static final int FIELD_COUNT = 14; + private static final int FIELD_COUNT = 15; private static final ResultSetHeaderPacket header = PacketUtil .getHeader(FIELD_COUNT); private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; @@ -73,6 +74,9 @@ public final class ShowConnection { fields[i] = PacketUtil.getField("LOCAL_PORT", Fields.FIELD_TYPE_LONG); fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("USER", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; fields[i] = PacketUtil.getField("SCHEMA", Fields.FIELD_TYPE_VAR_STRING); fields[i++].packetId = ++packetId; @@ -108,64 +112,67 @@ public final class ShowConnection { eof.packetId = ++packetId; } - public static void execute(MySQLFrontConnection c) { - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); + public static void execute(ManagerConnection c) { + ByteBuffer buffer = c.allocate(); // write header - header.write(bufferArray); + buffer = header.write(buffer, c, true); // write fields for (FieldPacket field : fields) { - field.write(bufferArray); + buffer = field.write(buffer, c, true); } // write eof - eof.write(bufferArray); + buffer = eof.write(buffer, c, true); // write rows byte packetId = eof.packetId; String charset = c.getCharset(); - for (Connection con : NetSystem.getInstance().getAllConnectios() - .values()) { - if (con instanceof MySQLFrontConnection) { - RowDataPacket row = getRow((MySQLFrontConnection) con, charset); - row.packetId = ++packetId; - row.write(bufferArray); + NIOProcessor[] processors = MycatServer.getInstance().getProcessors(); + for (NIOProcessor p : processors) { + for (FrontendConnection fc : p.getFrontends().values()) { + if (fc != null) { + RowDataPacket row = getRow(fc, charset); + row.packetId = ++packetId; + buffer = row.write(buffer, c, true); + } } } // write last eof EOFPacket lastEof = new EOFPacket(); lastEof.packetId = ++packetId; - lastEof.write(bufferArray); + buffer = lastEof.write(buffer, c, true); // write buffer - c.write(bufferArray); + c.write(buffer); } - private static RowDataPacket getRow(MySQLFrontConnection c, String charset) { + private static RowDataPacket getRow(FrontendConnection c, String charset) { RowDataPacket row = new RowDataPacket(FIELD_COUNT); - row.add("N/A".getBytes()); + row.add(c.getProcessor().getName().getBytes()); row.add(LongUtil.toBytes(c.getId())); row.add(StringUtil.encode(c.getHost(), charset)); row.add(IntegerUtil.toBytes(c.getPort())); row.add(IntegerUtil.toBytes(c.getLocalPort())); + row.add(StringUtil.encode(c.getUser(), charset)); row.add(StringUtil.encode(c.getSchema(), charset)); - row.add(StringUtil.encode(c.getCharset() + ":" + c.getCharsetIndex(), - charset)); + row.add(StringUtil.encode(c.getCharset()+":"+c.getCharsetIndex(), charset)); row.add(LongUtil.toBytes(c.getNetInBytes())); row.add(LongUtil.toBytes(c.getNetOutBytes())); - row.add(LongUtil.toBytes((TimeUtil.currentTimeMillis() - c - .getStartupTime()) / 1000L)); + row.add(LongUtil.toBytes((TimeUtil.currentTimeMillis() - c.getStartupTime()) / 1000L)); ByteBuffer bb = c.getReadBuffer(); row.add(IntegerUtil.toBytes(bb == null ? 0 : bb.capacity())); row.add(IntegerUtil.toBytes(c.getWriteQueue().size())); String txLevel = ""; String txAutommit = ""; - txLevel = c.getTxIsolation() + ""; - txAutommit = c.isAutocommit() + ""; + if (c instanceof ServerConnection) { + ServerConnection mysqlC = (ServerConnection) c; + txLevel = mysqlC.getTxIsolation() + ""; + txAutommit = mysqlC.isAutocommit() + ""; + } row.add(txLevel.getBytes()); row.add(txAutommit.getBytes()); diff --git a/src/main/java/io/mycat/manager/response/ShowConnectionSQL.java b/src/main/java/io/mycat/manager/response/ShowConnectionSQL.java new file mode 100644 index 000000000..5af54db06 --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ShowConnectionSQL.java @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.response; + +import java.nio.ByteBuffer; + +import io.mycat.MycatServer; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.FrontendConnection; +import io.mycat.net.NIOProcessor; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.server.ServerConnection; +import io.mycat.util.LongUtil; +import io.mycat.util.StringUtil; +import io.mycat.util.TimeUtil; + +/** + * @author mycat + */ +public final class ShowConnectionSQL { + + private static final int FIELD_COUNT = 7; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("ID", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("HOST", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("USER", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("SCHEMA", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("START_TIME", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("EXECUTE_TIME", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("SQL", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + eof.packetId = ++packetId; + } + + public static void execute(ManagerConnection c) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + String charset = c.getCharset(); + for (NIOProcessor p : MycatServer.getInstance().getProcessors()) { + for (FrontendConnection fc : p.getFrontends().values()) { + if (!fc.isClosed()) { + if(fc.getExecuteSql()==null){ + continue; + } + if(fc instanceof ServerConnection){ + RowDataPacket row = getRow(fc, charset); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + } + } + } + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // write buffer + c.write(buffer); + } + + private static RowDataPacket getRow(FrontendConnection c, String charset) { + String executeSql = c.getExecuteSql(); + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(LongUtil.toBytes(c.getId())); + row.add(StringUtil.encode(c.getHost(), charset)); + row.add(StringUtil.encode(c.getUser(), charset)); + row.add(StringUtil.encode(c.getSchema(), charset)); + row.add(LongUtil.toBytes(c.getLastReadTime())); + long rt = c.getLastReadTime(); + long wt = c.getLastWriteTime(); + row.add(LongUtil.toBytes(executeSql==null?0:((wt > rt) ? (wt - rt) : (TimeUtil.currentTimeMillis() - rt)))); + row.add(StringUtil.encode(executeSql==null?"":executeSql, charset) ); + return row; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/response/ShowDataNode.java b/src/main/java/io/mycat/manager/response/ShowDataNode.java similarity index 85% rename from src/main/java/io/mycat/server/response/ShowDataNode.java rename to src/main/java/io/mycat/manager/response/ShowDataNode.java index f32122681..f788f0f6e 100644 --- a/src/main/java/io/mycat/server/response/ShowDataNode.java +++ b/src/main/java/io/mycat/manager/response/ShowDataNode.java @@ -21,30 +21,9 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.response; - -import io.mycat.MycatServer; -import io.mycat.backend.PhysicalDBNode; -import io.mycat.backend.PhysicalDBPool; -import io.mycat.backend.PhysicalDatasource; -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.server.Fields; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.config.node.MycatConfig; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.PacketUtil; -import io.mycat.util.IntegerUtil; -import io.mycat.util.LongUtil; -import io.mycat.util.Pair; -import io.mycat.util.PairUtil; -import io.mycat.util.StringUtil; -import io.mycat.util.TimeUtil; +package io.mycat.manager.response; +import java.nio.ByteBuffer; import java.text.DecimalFormat; import java.text.NumberFormat; import java.util.ArrayList; @@ -53,6 +32,26 @@ import java.util.List; import java.util.Map; +import io.mycat.MycatServer; +import io.mycat.backend.datasource.PhysicalDBNode; +import io.mycat.backend.datasource.PhysicalDBPool; +import io.mycat.backend.datasource.PhysicalDatasource; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.config.MycatConfig; +import io.mycat.config.model.SchemaConfig; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.route.parser.util.Pair; +import io.mycat.route.parser.util.PairUtil; +import io.mycat.util.IntegerUtil; +import io.mycat.util.LongUtil; +import io.mycat.util.StringUtil; +import io.mycat.util.TimeUtil; + /** * 查看数据节点信息 * @@ -115,20 +114,19 @@ public final class ShowDataNode { eof.packetId = ++packetId; } - public static void execute(MySQLFrontConnection c,String name) { - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); + public static void execute(ManagerConnection c, String name) { + ByteBuffer buffer = c.allocate(); // write header - header.write(bufferArray); + buffer = header.write(buffer, c, true); // write fields for (FieldPacket field : fields) { - field.write(bufferArray); + buffer = field.write(buffer, c, true); } // write eof - eof.write(bufferArray); + buffer = eof.write(buffer, c, true); // write rows byte packetId = eof.packetId; @@ -147,16 +145,16 @@ public static void execute(MySQLFrontConnection c,String name) { for (String key : keys) { RowDataPacket row = getRow(dataNodes.get(key), c.getCharset()); row.packetId = ++packetId; - row.write(bufferArray); + buffer = row.write(buffer, c, true); } // write last eof EOFPacket lastEof = new EOFPacket(); lastEof.packetId = ++packetId; - lastEof.write(bufferArray); + buffer = lastEof.write(buffer, c, true); // post write - c.write(bufferArray); + c.write(buffer); } private static RowDataPacket getRow(PhysicalDBNode node, String charset) { diff --git a/src/main/java/io/mycat/server/response/ShowDataSource.java b/src/main/java/io/mycat/manager/response/ShowDataSource.java similarity index 80% rename from src/main/java/io/mycat/server/response/ShowDataSource.java rename to src/main/java/io/mycat/manager/response/ShowDataSource.java index da5d54f68..cfd539655 100644 --- a/src/main/java/io/mycat/server/response/ShowDataSource.java +++ b/src/main/java/io/mycat/manager/response/ShowDataSource.java @@ -21,30 +21,29 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.response; - -import io.mycat.MycatServer; -import io.mycat.backend.PhysicalDBNode; -import io.mycat.backend.PhysicalDatasource; -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.server.Fields; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.config.node.MycatConfig; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.PacketUtil; -import io.mycat.util.IntegerUtil; -import io.mycat.util.LongUtil; -import io.mycat.util.StringUtil; +package io.mycat.manager.response; +import java.nio.ByteBuffer; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; +import io.mycat.MycatServer; +import io.mycat.backend.datasource.PhysicalDBNode; +import io.mycat.backend.datasource.PhysicalDatasource; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.config.MycatConfig; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.util.IntegerUtil; +import io.mycat.util.LongUtil; +import io.mycat.util.StringUtil; + /** * 查看数据源信息 * @@ -53,7 +52,7 @@ */ public final class ShowDataSource { - private static final int FIELD_COUNT = 10; + private static final int FIELD_COUNT = 12; private static final ResultSetHeaderPacket header = PacketUtil .getHeader(FIELD_COUNT); private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; @@ -93,24 +92,29 @@ public final class ShowDataSource { fields[i] = PacketUtil.getField("EXECUTE", Fields.FIELD_TYPE_LONGLONG); fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("READ_LOAD", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("WRITE_LOAD", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; eof.packetId = ++packetId; } - public static void execute(MySQLFrontConnection c, String name) { - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); + public static void execute(ManagerConnection c, String name) { + ByteBuffer buffer = c.allocate(); // write header - header.write(bufferArray); + buffer = header.write(buffer, c,true); // write fields for (FieldPacket field : fields) { - field.write(bufferArray); + buffer = field.write(buffer, c,true); } // write eof - eof.write(bufferArray); + buffer = eof.write(buffer, c,true); // write rows byte packetId = eof.packetId; @@ -141,17 +145,17 @@ public static void execute(MySQLFrontConnection c, String name) { for (PhysicalDatasource ds : dsEntry.getValue()) { RowDataPacket row = getRow(dnName, ds, c.getCharset()); row.packetId = ++packetId; - row.write(bufferArray); + buffer = row.write(buffer, c,true); } } // write last eof EOFPacket lastEof = new EOFPacket(); lastEof.packetId = ++packetId; - lastEof.write(bufferArray); + buffer = lastEof.write(buffer, c,true); // post write - c.write(bufferArray); + c.write(buffer); } private static RowDataPacket getRow(String dataNode, PhysicalDatasource ds, @@ -167,6 +171,8 @@ private static RowDataPacket getRow(String dataNode, PhysicalDatasource ds, row.add(IntegerUtil.toBytes(ds.getIdleCount())); row.add(IntegerUtil.toBytes(ds.getSize())); row.add(LongUtil.toBytes(ds.getExecuteCount())); + row.add(LongUtil.toBytes(ds.getReadCount())); + row.add(LongUtil.toBytes(ds.getWriteCount())); return row; } diff --git a/src/main/java/io/mycat/manager/response/ShowDatabase.java b/src/main/java/io/mycat/manager/response/ShowDatabase.java new file mode 100644 index 000000000..8b37ce729 --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ShowDatabase.java @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.response; + +import java.nio.ByteBuffer; +import java.util.Map; +import java.util.TreeSet; + +import io.mycat.MycatServer; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.config.model.SchemaConfig; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.util.StringUtil; + +/** + * 查看schema信息 + * + * @author mycat + * @author mycat + */ +public final class ShowDatabase { + + private static final int FIELD_COUNT = 1; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("DATABASE", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + eof.packetId = ++packetId; + } + + public static void execute(ManagerConnection c) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + Map schemas = MycatServer.getInstance().getConfig().getSchemas(); + for (String name : new TreeSet(schemas.keySet())) { + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(StringUtil.encode(name, c.getCharset())); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + + // write lastEof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // write buffer + c.write(buffer); + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/manager/response/ShowDatasourceCluster.java b/src/main/java/io/mycat/manager/response/ShowDatasourceCluster.java new file mode 100644 index 000000000..a3eddbcf1 --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ShowDatasourceCluster.java @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.response; + +import java.nio.ByteBuffer; +import java.text.SimpleDateFormat; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import io.mycat.MycatServer; +import io.mycat.backend.datasource.PhysicalDBPool; +import io.mycat.backend.datasource.PhysicalDatasource; +import io.mycat.backend.heartbeat.DBHeartbeat; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.config.MycatConfig; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.statistic.DataSourceSyncRecorder; +import io.mycat.util.LongUtil; +import io.mycat.util.StringUtil; + + +/** + * @author songwie + */ +public class ShowDatasourceCluster { + + private static final int FIELD_COUNT = 17; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + private static final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); + + /*private static final String[] MYSQL_CLUSTER_STAUTS_COLMS = new String[] { + "wsrep_incoming_addresses","wsrep_cluster_size","wsrep_cluster_status", "wsrep_connected", "wsrep_flow_control_paused", + "wsrep_local_state_comment","wsrep_ready","wsrep_flow_control_paused_ns","wsrep_flow_control_recv","wsrep_local_bf_aborts", + "wsrep_local_recv_queue_avg","wsrep_local_send_queue_avg","wsrep_apply_oool","wsrep_apply_oooe"};*/ + + + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("name", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("host", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("port", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("wsrep_incoming_addresses", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("wsrep_cluster_size", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("wsrep_cluster_status", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("wsrep_connected", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("wsrep_flow_control_paused", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("wsrep_local_state_comment", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("wsrep_ready", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("wsrep_flow_control_paused_ns", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("wsrep_flow_control_recv", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("wsrep_local_bf_aborts", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("wsrep_local_recv_queue_avg", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("wsrep_local_send_queue_avg", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("wsrep_apply_oool", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("wsrep_apply_oooe", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + eof.packetId = ++packetId; + } + + public static void response(ManagerConnection c,String stmt) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + + for (RowDataPacket row : getRows(c.getCharset())) { + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // post write + c.write(buffer); + } + + private static List getRows(String charset) { + List list = new LinkedList(); + MycatConfig conf = MycatServer.getInstance().getConfig(); + // host nodes + Map dataHosts = conf.getDataHosts(); + for (PhysicalDBPool pool : dataHosts.values()) { + for (PhysicalDatasource ds : pool.getAllDataSources()) { + DBHeartbeat hb = ds.getHeartbeat(); + DataSourceSyncRecorder record = hb.getAsynRecorder(); + Map states = record.getRecords(); + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + if(!states.isEmpty()){ + row.add(StringUtil.encode(ds.getName(),charset)); + row.add(StringUtil.encode(ds.getConfig().getIp(),charset)); + row.add(LongUtil.toBytes(ds.getConfig().getPort())); + + row.add(StringUtil.encode(states.get("wsrep_incoming_addresses")==null?"":states.get("wsrep_incoming_addresses"),charset)); + row.add(StringUtil.encode(states.get("wsrep_cluster_size")==null?"":states.get("wsrep_cluster_size"),charset)); + row.add(StringUtil.encode(states.get("wsrep_cluster_status")==null?"":states.get("wsrep_cluster_status"),charset)); + row.add(StringUtil.encode(states.get("wsrep_connected")==null?"":states.get("wsrep_connected"),charset)); + row.add(StringUtil.encode(states.get("wsrep_flow_control_paused")==null?"":states.get("wsrep_flow_control_paused"),charset)); + row.add(StringUtil.encode(states.get("wsrep_local_state_comment")==null?"":states.get("wsrep_local_state_comment"),charset)); + row.add(StringUtil.encode(states.get("wsrep_ready")==null?"":states.get("wsrep_ready"),charset)); + row.add(StringUtil.encode(states.get("wsrep_flow_control_paused_ns")==null?"":states.get("wsrep_flow_control_paused_ns"),charset)); + row.add(StringUtil.encode(states.get("wsrep_flow_control_recv")==null?"":states.get("wsrep_flow_control_recv"),charset)); + row.add(StringUtil.encode(states.get("wsrep_local_bf_aborts")==null?"":states.get("wsrep_local_bf_aborts"),charset)); + row.add(StringUtil.encode(states.get("wsrep_local_recv_queue_avg")==null?"":states.get("wsrep_local_recv_queue_avg"),charset)); + row.add(StringUtil.encode(states.get("wsrep_local_send_queue_avg")==null?"":states.get("wsrep_local_recv_queue_avg"),charset)); + row.add(StringUtil.encode(states.get("wsrep_apply_oool")==null?"":states.get("wsrep_apply_oool"),charset)); + row.add(StringUtil.encode(states.get("wsrep_apply_oooe")==null?"":states.get("wsrep_apply_oooe"),charset)); + + + list.add(row); + } + } + } + return list; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/manager/response/ShowDatasourceSyn.java b/src/main/java/io/mycat/manager/response/ShowDatasourceSyn.java new file mode 100644 index 000000000..7c2c82620 --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ShowDatasourceSyn.java @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.response; + +import java.nio.ByteBuffer; +import java.text.SimpleDateFormat; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import io.mycat.MycatServer; +import io.mycat.backend.datasource.PhysicalDBPool; +import io.mycat.backend.datasource.PhysicalDatasource; +import io.mycat.backend.heartbeat.DBHeartbeat; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.config.MycatConfig; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.statistic.DataSourceSyncRecorder; +import io.mycat.util.LongUtil; +import io.mycat.util.StringUtil; + + +/** + * @author songwie + */ +public class ShowDatasourceSyn { + + private static final int FIELD_COUNT = 12; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + private static final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); + + + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("name", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("host", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("port", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("Master_Host", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("Master_Port", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("Master_Use", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("Seconds_Behind_Master", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("Slave_IO_Running", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("Slave_SQL_Running", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("Slave_IO_State", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("Connect_Retry", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("Last_IO_Error", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + eof.packetId = ++packetId; + } + + public static void response(ManagerConnection c,String stmt) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + + for (RowDataPacket row : getRows(c.getCharset())) { + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // post write + c.write(buffer); + } + + private static List getRows(String charset) { + List list = new LinkedList(); + MycatConfig conf = MycatServer.getInstance().getConfig(); + // host nodes + Map dataHosts = conf.getDataHosts(); + for (PhysicalDBPool pool : dataHosts.values()) { + for (PhysicalDatasource ds : pool.getAllDataSources()) { + DBHeartbeat hb = ds.getHeartbeat(); + DataSourceSyncRecorder record = hb.getAsynRecorder(); + Map states = record.getRecords(); + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + if(!states.isEmpty()){ + row.add(StringUtil.encode(ds.getName(),charset)); + row.add(StringUtil.encode(ds.getConfig().getIp(),charset)); + row.add(LongUtil.toBytes(ds.getConfig().getPort())); + row.add(StringUtil.encode(states.get("Master_Host"),charset)); + row.add(LongUtil.toBytes(Long.valueOf(states.get("Master_Port")))); + row.add(StringUtil.encode(states.get("Master_Use"),charset)); + String secords = states.get("Seconds_Behind_Master"); + row.add(secords==null?null:LongUtil.toBytes(Long.valueOf(secords))); + row.add(StringUtil.encode(states.get("Slave_IO_Running"),charset)); + row.add(StringUtil.encode(states.get("Slave_SQL_Running"),charset)); + row.add(StringUtil.encode(states.get("Slave_IO_State"),charset)); + row.add(LongUtil.toBytes(Long.valueOf(states.get("Connect_Retry")))); + row.add(StringUtil.encode(states.get("Last_IO_Error"),charset)); + + list.add(row); + } + } + } + return list; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/manager/response/ShowDatasourceSynDetail.java b/src/main/java/io/mycat/manager/response/ShowDatasourceSynDetail.java new file mode 100644 index 000000000..c4825ff71 --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ShowDatasourceSynDetail.java @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.response; + +import java.nio.ByteBuffer; +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import io.mycat.MycatServer; +import io.mycat.backend.datasource.PhysicalDBPool; +import io.mycat.backend.datasource.PhysicalDatasource; +import io.mycat.backend.heartbeat.DBHeartbeat; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.config.MycatConfig; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.route.parser.ManagerParseShow; +import io.mycat.statistic.DataSourceSyncRecorder; +import io.mycat.statistic.DataSourceSyncRecorder.Record; +import io.mycat.util.LongUtil; +import io.mycat.util.StringUtil; + + +/** + * @author songwie + */ +public class ShowDatasourceSynDetail { + + private static final int FIELD_COUNT = 8; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + + + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("name", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("host", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("port", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("Master_Host", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("Master_Port", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("Master_Use", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("TIME", Fields.FIELD_TYPE_DATETIME); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("Seconds_Behind_Master", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; + + eof.packetId = ++packetId; + } + + public static void response(ManagerConnection c,String stmt) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + + String name = ManagerParseShow.getWhereParameter(stmt); + for (RowDataPacket row : getRows(name,c.getCharset())) { + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // post write + c.write(buffer); + } + + private static List getRows(String name,String charset) { + List list = new LinkedList(); + MycatConfig conf = MycatServer.getInstance().getConfig(); + // host nodes + Map dataHosts = conf.getDataHosts(); + for (PhysicalDBPool pool : dataHosts.values()) { + for (PhysicalDatasource ds : pool.getAllDataSources()) { + DBHeartbeat hb = ds.getHeartbeat(); + DataSourceSyncRecorder record = hb.getAsynRecorder(); + Map states = record.getRecords(); + if(name.equals(ds.getName())){ + List data = record.getAsynRecords(); + for(Record r : data){ + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + + row.add(StringUtil.encode(ds.getName(),charset)); + row.add(StringUtil.encode(ds.getConfig().getIp(),charset)); + row.add(LongUtil.toBytes(ds.getConfig().getPort())); + row.add(StringUtil.encode(states.get("Master_Host"),charset)); + row.add(LongUtil.toBytes(Long.valueOf(states.get("Master_Port")))); + row.add(StringUtil.encode(states.get("Master_Use"),charset)); + //DateFormat非线程安全 + SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); + String time = sdf.format(new Date(r.getTime())); + row.add(StringUtil.encode(time,charset)); + row.add(LongUtil.toBytes((Long)r.getValue())); + + list.add(row); + } + break; + } + + } + } + return list; + } +} diff --git a/src/main/java/io/mycat/manager/response/ShowDirectMemory.java b/src/main/java/io/mycat/manager/response/ShowDirectMemory.java new file mode 100644 index 000000000..207f43a7b --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ShowDirectMemory.java @@ -0,0 +1,343 @@ +package io.mycat.manager.response; + +import io.mycat.MycatServer; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.buffer.BufferPool; +import io.mycat.buffer.NettyBufferPool; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.memory.MyCatMemory; +import io.mycat.memory.unsafe.Platform; +import io.mycat.memory.unsafe.utils.JavaUtils; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.netty.buffer.PoolArenaMetric; +import io.netty.buffer.PoolChunkListMetric; +import io.netty.buffer.PoolChunkMetric; +import io.netty.buffer.PoolSubpageMetric; +import sun.rmi.runtime.Log; + +import java.io.UnsupportedEncodingException; +import java.nio.ByteBuffer; +import java.nio.charset.Charset; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** + * 实现show@@directmemory功能 + * + * @author zagnix + * @version 1.0 + * @create 2016-09-21 17:35 + */ + +public class ShowDirectMemory { + private static final int DETAILl_FIELD_COUNT = 3; + private static final ResultSetHeaderPacket detailHeader = PacketUtil.getHeader(DETAILl_FIELD_COUNT); + private static final FieldPacket[] detailFields = new FieldPacket[DETAILl_FIELD_COUNT]; + private static final EOFPacket detailEof = new EOFPacket(); + + + private static final int TOTAL_FIELD_COUNT = 5; + private static final ResultSetHeaderPacket totalHeader = PacketUtil.getHeader(TOTAL_FIELD_COUNT); + private static final FieldPacket[] totalFields = new FieldPacket[TOTAL_FIELD_COUNT]; + private static final EOFPacket totalEof = new EOFPacket(); + + private static int useOffHeapForMerge ; + private static int processorBufferPoolType; + private static BufferPool bufferPool ; + + static { + int i = 0; + byte packetId = 0; + detailHeader.packetId = ++packetId; + + detailFields[i] = PacketUtil.getField("THREAD_ID", Fields.FIELD_TYPE_VAR_STRING); + detailFields[i++].packetId = ++packetId; + + detailFields[i] = PacketUtil.getField("MEM_USE_TYPE", Fields.FIELD_TYPE_VAR_STRING); + detailFields[i++].packetId = ++packetId; + + detailFields[i] = PacketUtil.getField(" SIZE ", Fields.FIELD_TYPE_VAR_STRING); + detailFields[i++].packetId = ++packetId; + detailEof.packetId = ++packetId; + + + i = 0; + packetId = 0; + + totalHeader.packetId = ++packetId; + + totalFields[i] = PacketUtil.getField("MDIRECT_MEMORY_MAXED", Fields.FIELD_TYPE_VAR_STRING); + totalFields[i++].packetId = ++packetId; + + totalFields[i] = PacketUtil.getField("DIRECT_MEMORY_USED", Fields.FIELD_TYPE_VAR_STRING); + totalFields[i++].packetId = ++packetId; + + totalFields[i] = PacketUtil.getField("DIRECT_MEMORY_AVAILABLE", Fields.FIELD_TYPE_VAR_STRING); + totalFields[i++].packetId = ++packetId; + + totalFields[i] = PacketUtil.getField("SAFETY_FRACTION", Fields.FIELD_TYPE_VAR_STRING); + totalFields[i++].packetId = ++packetId; + + totalFields[i] = PacketUtil.getField("DIRECT_MEMORY_RESERVED", Fields.FIELD_TYPE_VAR_STRING); + totalFields[i++].packetId = ++packetId; + totalEof.packetId = ++packetId; + + } + + + public static void execute(ManagerConnection c, int showtype) { + useOffHeapForMerge = MycatServer.getInstance().getConfig(). + getSystem().getUseOffHeapForMerge(); + processorBufferPoolType = MycatServer.getInstance().getConfig(). + getSystem().getProcessorBufferPoolType(); + bufferPool = MycatServer.getInstance().getBufferPool(); + + if (showtype == 1) { + showDirectMemoryTotal(c); + } else if (showtype == 2) { + showDirectMemoryDetail(c); + } + } + + + public static void showDirectMemoryDetail(ManagerConnection c) { + + ByteBuffer buffer = c.allocate(); + + // write header + buffer = detailHeader.write(buffer, c, true); + + // write fields + for (FieldPacket field : detailFields) { + buffer = field.write(buffer, c, true); + } + + // write eof + buffer = detailEof.write(buffer, c, true); + + // write rows + byte packetId = detailEof.packetId; + + ConcurrentHashMap bufferpoolUsageMap = bufferPool.getNetDirectMemoryUsage(); + + try { + + if (useOffHeapForMerge == 1) { + ConcurrentHashMap concurrentHashMap = MycatServer.getInstance(). + getMyCatMemory(). + getResultMergeMemoryManager().getDirectMemorUsage(); + for (Long key : concurrentHashMap.keySet()) { + + + RowDataPacket row = new RowDataPacket(DETAILl_FIELD_COUNT); + Long value = concurrentHashMap.get(key); + row.add(String.valueOf(key).getBytes(c.getCharset())); + /** + * 该DIRECTMEMORY内存被结果集处理使用了 + */ + row.add("MergeMemoryPool".getBytes(c.getCharset())); + row.add(value > 0 ? + JavaUtils.bytesToString2(value).getBytes(c.getCharset()) : "0".getBytes(c.getCharset())); + row.packetId = ++packetId; + buffer = row.write(buffer, c, true); + } + } + + if(processorBufferPoolType == 2){ + + + } else { + for (Long key : bufferpoolUsageMap.keySet()) { + RowDataPacket row = new RowDataPacket(DETAILl_FIELD_COUNT); + Long value = bufferpoolUsageMap.get(key); + row.add(String.valueOf(key).getBytes(c.getCharset())); + /** + * 该DIRECTMEMORY内存属于Buffer Pool管理的! + */ + row.add("NetWorkBufferPool".getBytes(c.getCharset())); + row.add(value > 0 ? + JavaUtils.bytesToString2(value).getBytes(c.getCharset()) : "0".getBytes(c.getCharset())); + row.packetId = ++packetId; + buffer = row.write(buffer, c, true); + } + } + + } catch (UnsupportedEncodingException e) { + e.printStackTrace(); + } + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c, true); + + // write buffer + c.write(buffer); + + } + + + public static void showDirectMemoryTotal(ManagerConnection c) { + + ByteBuffer buffer = c.allocate(); + + // write header + buffer = totalHeader.write(buffer, c, true); + + // write fields + for (FieldPacket field : totalFields) { + buffer = field.write(buffer, c, true); + } + // write eof + buffer = totalEof.write(buffer, c, true); + // write rows + byte packetId = totalEof.packetId; + + long usedforMerge = 0; + long usedforNetwork = 0; + long chunkSizeBytes = 0; + int chunkCount = 0; + + if (processorBufferPoolType == 2 && bufferPool instanceof NettyBufferPool) { + + /**计算逻辑就是,1.先计算PoolChunk分配的页,表示已经消耗的内存, + 2.然后计算小于一页情况,记录小于一页内存使用情况, + 上面二者合起来就是整个netty 使用的内存, + 已经分配了,但是没有使用的内存的情况*/ + List list = ((NettyBufferPool) bufferPool).getAllocator().getAlloc().directArenas(); + chunkSizeBytes = ((NettyBufferPool) bufferPool).getAllocator().getChunkSize(); + long pageSize = ((NettyBufferPool) bufferPool).getAllocator().getPageSize(); + + long chunksUsedBytes = 0; + + /**PoolArenas*/ + for (PoolArenaMetric pool : list) { + List pcks = pool.chunkLists(); + + /**针对PoolChunkList*/ + for (PoolChunkListMetric pck : pcks) { + Iterator it = pck.iterator(); + while (it.hasNext()) { + chunkCount++; + PoolChunkMetric p = it.next(); + chunksUsedBytes += (chunkSizeBytes - p.freeBytes()); + } + } + + List tinySubpages = pool.tinySubpages(); + for (PoolSubpageMetric tiny : tinySubpages) { + chunksUsedBytes -= (pageSize - (tiny.maxNumElements() - tiny.numAvailable()) * tiny.elementSize()); + } + List smallSubpages = pool.smallSubpages(); + for (PoolSubpageMetric small : smallSubpages) { + chunksUsedBytes -= (pageSize - (small.maxNumElements() - small.numAvailable()) * small.elementSize()); + } + } + + usedforNetwork = chunkCount * chunkSizeBytes; + } + + ConcurrentHashMap bufferpoolUsageMap = bufferPool.getNetDirectMemoryUsage(); + + RowDataPacket row = new RowDataPacket(TOTAL_FIELD_COUNT); + + try { + + /** + * 通过-XX:MaxDirectMemorySize=2048m设置的值 + */ + row.add(JavaUtils.bytesToString2(Platform.getMaxDirectMemory()).getBytes(c.getCharset())); + + if (useOffHeapForMerge == 1) { + + /** + * 结果集合并时,总共消耗的DirectMemory内存 + */ + ConcurrentHashMap concurrentHashMap = MycatServer.getInstance(). + getMyCatMemory(). + getResultMergeMemoryManager().getDirectMemorUsage(); + for (Map.Entry entry : concurrentHashMap.entrySet()) { + usedforMerge += entry.getValue(); + } + } + + /** + * 网络packet处理,在buffer pool 已经使用DirectMemory内存 + */ + if (processorBufferPoolType == 2) { + usedforNetwork = chunkSizeBytes * chunkCount; + } else { + for (Map.Entry entry : bufferpoolUsageMap.entrySet()) { + usedforNetwork += entry.getValue(); + } + } + + row.add(JavaUtils.bytesToString2(usedforMerge + usedforNetwork).getBytes(c.getCharset())); + + + long totalAvailable = 0; + + if (useOffHeapForMerge == 1) { + /** + * 设置使用off-heap内存处理结果集时,防止客户把MaxDirectMemorySize设置到物理内存的极限。 + * Mycat能使用的DirectMemory是MaxDirectMemorySize*DIRECT_SAFETY_FRACTION大小, + * DIRECT_SAFETY_FRACTION为安全系数,为OS,Heap预留空间,避免因大结果集造成系统物理内存被耗尽! + */ + totalAvailable = (long) (Platform.getMaxDirectMemory() * MyCatMemory.DIRECT_SAFETY_FRACTION); + } else { + totalAvailable = Platform.getMaxDirectMemory(); + } + + row.add(JavaUtils.bytesToString2(totalAvailable - usedforMerge - usedforNetwork) + .getBytes(c.getCharset())); + + if (useOffHeapForMerge == 1) { + /** + * 输出安全系统DIRECT_SAFETY_FRACTION + */ + row.add(("" + MyCatMemory.DIRECT_SAFETY_FRACTION) + .getBytes(c.getCharset())); + } else { + row.add(("1.0") + .getBytes(c.getCharset())); + } + + + long resevedForOs = 0; + + if (useOffHeapForMerge == 1) { + /** + * 预留OS系统部分内存!!! + */ + resevedForOs = (long) ((1 - MyCatMemory.DIRECT_SAFETY_FRACTION) * + (Platform.getMaxDirectMemory() - + 2 * MycatServer.getInstance().getTotalNetWorkBufferSize())); + } + + row.add(resevedForOs > 0 ? JavaUtils.bytesToString2(resevedForOs).getBytes(c.getCharset()) : "0".getBytes(c.getCharset())); + + row.packetId = ++packetId; + buffer = row.write(buffer, c, true); + + } catch (UnsupportedEncodingException e) { + e.printStackTrace(); + } + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c, true); + + // write buffer + c.write(buffer); + + } + + +} diff --git a/src/main/java/io/mycat/server/response/ShowHeartbeat.java b/src/main/java/io/mycat/manager/response/ShowHeartbeat.java similarity index 77% rename from src/main/java/io/mycat/server/response/ShowHeartbeat.java rename to src/main/java/io/mycat/manager/response/ShowHeartbeat.java index 02c75a5d1..93b0fa2f4 100644 --- a/src/main/java/io/mycat/server/response/ShowHeartbeat.java +++ b/src/main/java/io/mycat/manager/response/ShowHeartbeat.java @@ -21,37 +21,35 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.response; +package io.mycat.manager.response; + +import java.nio.ByteBuffer; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; import io.mycat.MycatServer; -import io.mycat.backend.PhysicalDBPool; -import io.mycat.backend.PhysicalDatasource; +import io.mycat.backend.datasource.PhysicalDBPool; +import io.mycat.backend.datasource.PhysicalDatasource; import io.mycat.backend.heartbeat.DBHeartbeat; -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.server.Fields; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.config.node.MycatConfig; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.PacketUtil; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.config.MycatConfig; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; import io.mycat.util.IntegerUtil; import io.mycat.util.LongUtil; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; - /** * @author mycat */ public class ShowHeartbeat { private static final int FIELD_COUNT = 11; - private static final ResultSetHeaderPacket header = PacketUtil - .getHeader(FIELD_COUNT); + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; private static final EOFPacket eof = new EOFPacket(); static { @@ -83,12 +81,10 @@ public class ShowHeartbeat { fields[i] = PacketUtil.getField("TIMEOUT", Fields.FIELD_TYPE_LONGLONG); fields[i++].packetId = ++packetId; - fields[i] = PacketUtil.getField("EXECUTE_TIME", - Fields.FIELD_TYPE_VAR_STRING); + fields[i] = PacketUtil.getField("EXECUTE_TIME",Fields.FIELD_TYPE_VAR_STRING); fields[i++].packetId = ++packetId; - fields[i] = PacketUtil.getField("LAST_ACTIVE_TIME", - Fields.FIELD_TYPE_DATETIME); + fields[i] = PacketUtil.getField("LAST_ACTIVE_TIME",Fields.FIELD_TYPE_VAR_STRING); fields[i++].packetId = ++packetId; fields[i] = PacketUtil.getField("STOP", Fields.FIELD_TYPE_VAR_STRING); @@ -97,34 +93,34 @@ public class ShowHeartbeat { eof.packetId = ++packetId; } - public static void execute(MySQLFrontConnection c) { - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); + public static void response(ManagerConnection c) { + ByteBuffer buffer = c.allocate(); + // write header - header.write(bufferArray); + buffer = header.write(buffer, c,true); // write fields for (FieldPacket field : fields) { - field.write(bufferArray); + buffer = field.write(buffer, c,true); } // write eof - eof.write(bufferArray); + buffer = eof.write(buffer, c,true); // write rows byte packetId = eof.packetId; for (RowDataPacket row : getRows()) { row.packetId = ++packetId; - row.write(bufferArray); + buffer = row.write(buffer, c,true); } // write last eof EOFPacket lastEof = new EOFPacket(); lastEof.packetId = ++packetId; - lastEof.write(bufferArray); + buffer = lastEof.write(buffer, c,true); // post write - c.write(bufferArray); + c.write(buffer); } private static List getRows() { @@ -143,14 +139,12 @@ private static List getRows() { row.add(IntegerUtil.toBytes(ds.getConfig().getPort())); row.add(IntegerUtil.toBytes(hb.getStatus())); row.add(IntegerUtil.toBytes(hb.getErrorCount())); - row.add(hb.isChecking() ? "checking".getBytes() : "idle" - .getBytes()); + row.add(hb.isChecking() ? "checking".getBytes() : "idle".getBytes()); row.add(LongUtil.toBytes(hb.getTimeout())); row.add(hb.getRecorder().get().getBytes()); String lat = hb.getLastActiveTime(); row.add(lat == null ? null : lat.getBytes()); - row.add(hb.isStop() ? "true".getBytes() : "false" - .getBytes()); + row.add(hb.isStop() ? "true".getBytes() : "false".getBytes()); } else { row.add(null); row.add(null); diff --git a/src/main/java/io/mycat/manager/response/ShowHeartbeatDetail.java b/src/main/java/io/mycat/manager/response/ShowHeartbeatDetail.java new file mode 100644 index 000000000..e1538e5be --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ShowHeartbeatDetail.java @@ -0,0 +1,172 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.response; + +import java.nio.ByteBuffer; +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Queue; + +import io.mycat.MycatServer; +import io.mycat.backend.datasource.PhysicalDBPool; +import io.mycat.backend.datasource.PhysicalDatasource; +import io.mycat.backend.heartbeat.DBHeartbeat; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.config.MycatConfig; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.route.parser.ManagerParseHeartbeat; +import io.mycat.route.parser.util.Pair; +import io.mycat.statistic.HeartbeatRecorder; +import io.mycat.util.IntegerUtil; +import io.mycat.util.LongUtil; +import io.mycat.util.StringUtil; + + +/** + * @author songwie + */ +public class ShowHeartbeatDetail { + + private static final int FIELD_COUNT = 6; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("NAME", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("TYPE", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("HOST", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("PORT", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("TIME", Fields.FIELD_TYPE_DATETIME); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("EXECUTE_TIME", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + eof.packetId = ++packetId; + } + + public static void response(ManagerConnection c,String stmt) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + Pair pair = ManagerParseHeartbeat.getPair(stmt); + String name = pair.getValue(); + for (RowDataPacket row : getRows(name,c.getCharset())) { + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // post write + c.write(buffer); + } + private static List getRows(String name,String charset) { + List list = new LinkedList(); + MycatConfig conf = MycatServer.getInstance().getConfig(); + // host nodes + String type = ""; + String ip = ""; + int port = 0; + DBHeartbeat hb = null; + + Map dataHosts = conf.getDataHosts(); + for (PhysicalDBPool pool : dataHosts.values()) { + for (PhysicalDatasource ds : pool.getAllDataSources()) { + if(name.equals(ds.getName())){ + hb = ds.getHeartbeat(); + type = ds.getConfig().getDbType(); + ip = ds.getConfig().getIp(); + port = ds.getConfig().getPort(); + break; + } + } + } + if(hb!=null){ + SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); + Queue heatbeartRecorders = hb.getRecorder().getRecordsAll(); + for(HeartbeatRecorder.Record record : heatbeartRecorders){ + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(StringUtil.encode(name,charset)); + row.add(StringUtil.encode(type,charset)); + row.add(StringUtil.encode(ip,charset)); + row.add(IntegerUtil.toBytes(port)); + long time = record.getTime(); + String timeStr = sdf.format(new Date(time)); + row.add(StringUtil.encode(timeStr,charset)); + row.add(LongUtil.toBytes(record.getValue())); + + list.add(row); + } + }else{ + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(null); + row.add(null); + row.add(null); + row.add(null); + row.add(null); + row.add(null); + list.add(row); + } + + return list; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/manager/response/ShowHelp.java b/src/main/java/io/mycat/manager/response/ShowHelp.java new file mode 100644 index 000000000..7c3bf869f --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ShowHelp.java @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.response; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.util.StringUtil; + +/** + * 打印MycatServer所支持的语句 + * + * @author mycat + * @author mycat + */ +public final class ShowHelp { + + private static final int FIELD_COUNT = 2; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("STATEMENT", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("DESCRIPTION", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + eof.packetId = ++packetId; + } + + public static void execute(ManagerConnection c) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + for (String key : keys) { + RowDataPacket row = getRow(key, helps.get(key), c.getCharset()); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // post write + c.write(buffer); + } + + private static RowDataPacket getRow(String stmt, String desc, String charset) { + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(StringUtil.encode(stmt, charset)); + row.add(StringUtil.encode(desc, charset)); + return row; + } + + private static final Map helps = new LinkedHashMap(); + private static final List keys = new LinkedList(); + static { + // show + helps.put("show @@time.current", "Report current timestamp"); + helps.put("show @@time.startup", "Report startup timestamp"); + helps.put("show @@version", "Report Mycat Server version"); + helps.put("show @@server", "Report server status"); + helps.put("show @@threadpool", "Report threadPool status"); + helps.put("show @@database", "Report databases"); + helps.put("show @@datanode", "Report dataNodes"); + helps.put("show @@datanode where schema = ?", "Report dataNodes"); + helps.put("show @@datasource", "Report dataSources"); + helps.put("show @@datasource where dataNode = ?", "Report dataSources"); + helps.put("show @@datasource.synstatus", "Report datasource data synchronous"); + helps.put("show @@datasource.syndetail where name=?", "Report datasource data synchronous detail"); + helps.put("show @@datasource.cluster", "Report datasource galary cluster variables"); + helps.put("show @@processor", "Report processor status"); + helps.put("show @@command", "Report commands status"); + helps.put("show @@connection", "Report connection status"); + helps.put("show @@cache", "Report system cache usage"); + helps.put("show @@backend", "Report backend connection status"); + helps.put("show @@session", "Report front session details"); + helps.put("show @@connection.sql", "Report connection sql"); + helps.put("show @@sql.execute", "Report execute status"); + helps.put("show @@sql.detail where id = ?", "Report execute detail status"); + helps.put("show @@sql", "Report SQL list"); + // helps.put("show @@sql where id = ?", "Report specify SQL"); + helps.put("show @@sql.high", "Report Hight Frequency SQL"); + helps.put("show @@sql.slow", "Report slow SQL"); + helps.put("show @@sql.resultset", "Report BIG RESULTSET SQL"); + helps.put("show @@sql.sum", "Report User RW Stat "); + helps.put("show @@sql.sum.user", "Report User RW Stat "); + helps.put("show @@sql.sum.table", "Report Table RW Stat "); + helps.put("show @@parser", "Report parser status"); + helps.put("show @@router", "Report router status"); + helps.put("show @@heartbeat", "Report heartbeat status"); + helps.put("show @@heartbeat.detail where name=?", "Report heartbeat current detail"); + helps.put("show @@slow where schema = ?", "Report schema slow sql"); + helps.put("show @@slow where datanode = ?", "Report datanode slow sql"); + helps.put("show @@sysparam", "Report system param"); + helps.put("show @@syslog limit=?", "Report system mycat.log"); + helps.put("show @@white", "show mycat white host "); + helps.put("show @@white.set=?,?", "set mycat white host,[ip,user]"); + helps.put("show @@directmemory=1 or 2", "show mycat direct memory usage"); + + // switch + helps.put("switch @@datasource name:index", "Switch dataSource"); + + // kill + helps.put("kill @@connection id1,id2,...", "Kill the specified connections"); + + // stop + helps.put("stop @@heartbeat name:time", "Pause dataNode heartbeat"); + + // reload + helps.put("reload @@config", "Reload basic config from file"); + helps.put("reload @@config_all", "Reload all config from file"); + helps.put("reload @@route", "Reload route config from file"); + helps.put("reload @@user", "Reload user config from file"); + helps.put("reload @@sqlslow=", "Set Slow SQL Time(ms)"); + helps.put("reload @@user_stat", "Reset show @@sql @@sql.sum @@sql.slow"); + // rollback + helps.put("rollback @@config", "Rollback all config from memory"); + helps.put("rollback @@route", "Rollback route config from memory"); + helps.put("rollback @@user", "Rollback user config from memory"); + + // open/close sql stat + helps.put("reload @@sqlstat=open", "Open real-time sql stat analyzer"); + helps.put("reload @@sqlstat=close", "Close real-time sql stat analyzer"); + + // offline/online + helps.put("offline", "Change MyCat status to OFF"); + helps.put("online", "Change MyCat status to ON"); + + // clear + helps.put("clear @@slow where schema = ?", "Clear slow sql by schema"); + helps.put("clear @@slow where datanode = ?", "Clear slow sql by datanode"); + + // list sort + keys.addAll(helps.keySet()); + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/manager/response/ShowParser.java b/src/main/java/io/mycat/manager/response/ShowParser.java new file mode 100644 index 000000000..2b90e5e08 --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ShowParser.java @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.response; + +import java.nio.ByteBuffer; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; + +/** + * @author mycat + */ +public final class ShowParser { + + private static final int FIELD_COUNT = 7; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("PROCESSOR_NAME", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("PARSE_COUNT", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("TIME_COUNT", Fields.FIELD_TYPE_DOUBLE); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("MAX_PARSE_TIME", Fields.FIELD_TYPE_DOUBLE); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("MAX_PARSE_SQL_ID", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("CACHED_COUNT", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("CACHE_SIZE", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; + + eof.packetId = ++packetId; + } + + public static void execute(ManagerConnection c) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + for (int i = 0; i < 1; i++) { + RowDataPacket row = getRow(c.getCharset()); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // write buffer + c.write(buffer); + } + + private static RowDataPacket getRow(String charset) { + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(null); + row.add(null); + row.add(null); + row.add(null); + row.add(null); + row.add(null); + row.add(null); + return row; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/manager/response/ShowProcessor.java b/src/main/java/io/mycat/manager/response/ShowProcessor.java new file mode 100644 index 000000000..5f9a422eb --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ShowProcessor.java @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.response; + +import java.nio.ByteBuffer; + +import io.mycat.MycatServer; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.buffer.BufferPool; +import io.mycat.buffer.DirectByteBufferPool; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.NIOProcessor; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.util.IntegerUtil; +import io.mycat.util.LongUtil; + +/** + * 查看处理器状态 + * + * @author mycat + * @author mycat + */ +public final class ShowProcessor { + + private static final int FIELD_COUNT = 12; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("NAME", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("NET_IN", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("NET_OUT", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("REACT_COUNT", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("R_QUEUE", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("W_QUEUE", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("FREE_BUFFER", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("TOTAL_BUFFER", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("BU_PERCENT", Fields.FIELD_TYPE_TINY); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("BU_WARNS", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("FC_COUNT", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("BC_COUNT", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; + + eof.packetId = ++packetId; + } + + public static void execute(ManagerConnection c) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + for (NIOProcessor p : MycatServer.getInstance().getProcessors()) { + RowDataPacket row = getRow(p, c.getCharset()); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // write buffer + c.write(buffer); + } + + private static RowDataPacket getRow(NIOProcessor processor, String charset) { + BufferPool bufferPool=processor.getBufferPool(); + long bufferSize=bufferPool.size(); + long bufferCapacity=bufferPool.capacity(); + long bufferSharedOpts=bufferPool.getSharedOptsCount(); + long bufferUsagePercent=(bufferCapacity-bufferSize)*100/bufferCapacity; + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(processor.getName().getBytes()); + row.add(LongUtil.toBytes(processor.getNetInBytes())); + row.add(LongUtil.toBytes(processor.getNetOutBytes())); + row.add(LongUtil.toBytes(0)); + row.add(IntegerUtil.toBytes(0)); + row.add(IntegerUtil.toBytes(processor.getWriteQueueSize())); + row.add(LongUtil.toBytes(bufferSize)); + row.add(LongUtil.toBytes(bufferCapacity)); + row.add(LongUtil.toBytes(bufferUsagePercent)); + row.add(LongUtil.toBytes(bufferSharedOpts)); + row.add(IntegerUtil.toBytes(processor.getFrontends().size())); + row.add(IntegerUtil.toBytes(processor.getBackends().size())); + return row; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/manager/response/ShowRouter.java b/src/main/java/io/mycat/manager/response/ShowRouter.java new file mode 100644 index 000000000..05b8829d8 --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ShowRouter.java @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.response; + +import java.nio.ByteBuffer; +import java.text.DecimalFormat; +import java.text.NumberFormat; + +import io.mycat.MycatServer; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.NIOProcessor; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; + +/** + * @author mycat + */ +public final class ShowRouter { + + private static final int FIELD_COUNT = 5; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("PROCESSOR_NAME", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("ROUTE_COUNT", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("TIME_COUNT", Fields.FIELD_TYPE_FLOAT); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("MAX_ROUTE_TIME", Fields.FIELD_TYPE_FLOAT); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("MAX_ROUTE_SQL_ID", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + eof.packetId = ++packetId; + } + + public static void execute(ManagerConnection c) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + for (NIOProcessor p : MycatServer.getInstance().getProcessors()) { + RowDataPacket row = getRow(p, c.getCharset()); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // write buffer + c.write(buffer); + } + + private static final NumberFormat nf = DecimalFormat.getInstance(); + static { + nf.setMaximumFractionDigits(3); + } + + private static RowDataPacket getRow(NIOProcessor processor, String charset) { + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(processor.getName().getBytes()); + row.add(null); + row.add(null); + row.add(null); + row.add(null); + return row; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/manager/response/ShowSQL.java b/src/main/java/io/mycat/manager/response/ShowSQL.java new file mode 100644 index 000000000..536fe676d --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ShowSQL.java @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.response; + +import java.nio.ByteBuffer; +import java.util.List; +import java.util.Map; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.statistic.stat.UserSqlLastStat; +import io.mycat.statistic.stat.UserStat; +import io.mycat.statistic.stat.UserStatAnalyzer; + +import io.mycat.util.LongUtil; +import io.mycat.util.StringUtil; + + +/** + * 查询用户最近执行的SQL记录 + * + * @author mycat + * @author zhuam + */ +public final class ShowSQL { + + private static final int FIELD_COUNT = 5; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("ID", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("USER", Fields.FIELD_TYPE_VARCHAR); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("START_TIME", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("EXECUTE_TIME", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("SQL", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + eof.packetId = ++packetId; + } + + public static void execute(ManagerConnection c, boolean isClear) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + Map statMap = UserStatAnalyzer.getInstance().getUserStatMap(); + for (UserStat userStat : statMap.values()) { + String user = userStat.getUser(); + List sqls = userStat.getSqlLastStat().getSqls(); + int i = 1; + for (UserSqlLastStat.SqlLast sqlLast : sqls) { + if (sqlLast != null) { + RowDataPacket row = getRow(user, sqlLast, i, c.getCharset()); + row.packetId = ++packetId; + i++; + buffer = row.write(buffer, c,true); + } + } + + //读取SQL监控后清理 + if ( isClear ) { + userStat.getSqlLastStat().clear(); + } + } + + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // write buffer + c.write(buffer); + } + + private static RowDataPacket getRow(String user, UserSqlLastStat.SqlLast sql, int idx, String charset) { + + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(LongUtil.toBytes(idx)); + row.add( StringUtil.encode( user, charset) ); + row.add( LongUtil.toBytes( sql.getStartTime() ) ); + row.add( LongUtil.toBytes( sql.getExecuteTime() ) ); + row.add( StringUtil.encode( sql.getSql(), charset) ); + return row; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/manager/response/ShowSQLCondition.java b/src/main/java/io/mycat/manager/response/ShowSQLCondition.java new file mode 100644 index 000000000..b03284d01 --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ShowSQLCondition.java @@ -0,0 +1,116 @@ +package io.mycat.manager.response; + +import java.nio.ByteBuffer; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.statistic.stat.QueryConditionAnalyzer; +import io.mycat.util.LongUtil; +import io.mycat.util.StringUtil; + +/** + * SQL 查询条件 值统计 + * + * @author zhuam + * + */ +public class ShowSQLCondition { + + private static final int FIELD_COUNT = 4; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("ID", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("KEY", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("VALUE", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("COUNT", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + eof.packetId = ++packetId; + } + + public static void execute(ManagerConnection c) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + + String key = QueryConditionAnalyzer.getInstance().getKey(); + List> list = QueryConditionAnalyzer.getInstance().getValues(); + if ( list != null ) { + + int size = list.size(); + long total = 0L; + + for (int i = 0; i < size; i++) { + Map.Entry entry = list.get(i); + Object value = entry.getKey(); + Long count = entry.getValue().get(); + total += count; + + RowDataPacket row = getRow(i, key, value.toString(), count, c.getCharset()); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + + RowDataPacket vk_row = getRow(size + 1, key + ".valuekey", "size", size, c.getCharset()); + vk_row.packetId = ++packetId; + buffer = vk_row.write(buffer, c,true); + + RowDataPacket vc_row = getRow(size + 2, key + ".valuecount", "total", total, c.getCharset()); + vc_row.packetId = ++packetId; + buffer = vc_row.write(buffer, c,true); + + } + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // write buffer + c.write(buffer); + } + + private static RowDataPacket getRow(int i, String key, String value, long count, String charset) { + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add( LongUtil.toBytes( i ) ); + row.add( StringUtil.encode(key, charset) ); + row.add( StringUtil.encode(value, charset) ); + row.add( LongUtil.toBytes( count ) ); + return row; + } + + +} diff --git a/src/main/java/io/mycat/manager/response/ShowSQLDetail.java b/src/main/java/io/mycat/manager/response/ShowSQLDetail.java new file mode 100644 index 000000000..33a98cc0d --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ShowSQLDetail.java @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.response; + +import java.nio.ByteBuffer; +import java.text.DecimalFormat; +import java.text.NumberFormat; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.util.LongUtil; +import io.mycat.util.StringUtil; + +/** + * 查询指定SQL在各个pool中的执行情况 + * + * @author mycat + * @author mycat + */ +public final class ShowSQLDetail { + + private static final NumberFormat nf = DecimalFormat.getInstance(); + private static final int FIELD_COUNT = 5; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + static { + nf.setMaximumFractionDigits(3); + + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("DATA_SOURCE", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("EXECUTE", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("TIME", Fields.FIELD_TYPE_DOUBLE); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("LAST_EXECUTE_TIMESTAMP", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("LAST_TIME", Fields.FIELD_TYPE_DOUBLE); + fields[i++].packetId = ++packetId; + + eof.packetId = ++packetId; + } + + public static void execute(ManagerConnection c, long sql) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + for (int i = 0; i < 3; i++) { + RowDataPacket row = getRow(sql, c.getCharset()); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // write buffer + c.write(buffer); + } + + private static RowDataPacket getRow(long sql, String charset) { + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add("mysql_1".getBytes()); + row.add(LongUtil.toBytes(123L)); + row.add(StringUtil.encode(nf.format(2.3), charset)); + row.add(LongUtil.toBytes(1279188420682L)); + row.add(StringUtil.encode(nf.format(3.42), charset)); + return row; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/manager/response/ShowSQLExecute.java b/src/main/java/io/mycat/manager/response/ShowSQLExecute.java new file mode 100644 index 000000000..695c718e2 --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ShowSQLExecute.java @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.response; + +import java.nio.ByteBuffer; +import java.text.DecimalFormat; +import java.text.NumberFormat; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.util.LongUtil; +import io.mycat.util.StringUtil; + +/** + * 查询各SQL在所有pool中的执行情况 + * + * @author mycat + */ +public final class ShowSQLExecute { + + private static final NumberFormat nf = DecimalFormat.getInstance(); + private static final int FIELD_COUNT = 5; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + static { + nf.setMaximumFractionDigits(3); + + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("SQL_ID", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("EXECUTE", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("TIME", Fields.FIELD_TYPE_DOUBLE); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("MAX_TIME", Fields.FIELD_TYPE_DOUBLE); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("MIN_TIME", Fields.FIELD_TYPE_DOUBLE); + fields[i++].packetId = ++packetId; + + eof.packetId = ++packetId; + } + + public static void execute(ManagerConnection c) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + for (int i = 0; i < 3; i++) { + RowDataPacket row = getRow(1000 * (i + 1), c.getCharset()); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // write buffer + c.write(buffer); + } + + private static RowDataPacket getRow(long id, String charset) { + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(LongUtil.toBytes(id)); + row.add(LongUtil.toBytes(100L)); + row.add(StringUtil.encode(nf.format(898.9), charset)); + row.add(StringUtil.encode(nf.format(8.8), charset)); + row.add(StringUtil.encode(nf.format(1.0), charset)); + return row; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/manager/response/ShowSQLHigh.java b/src/main/java/io/mycat/manager/response/ShowSQLHigh.java new file mode 100644 index 000000000..32720303f --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ShowSQLHigh.java @@ -0,0 +1,126 @@ +package io.mycat.manager.response; + +import java.nio.ByteBuffer; +import java.util.List; +import java.util.Map; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.statistic.stat.SqlFrequency; +import io.mycat.statistic.stat.UserStat; +import io.mycat.statistic.stat.UserStatAnalyzer; +import io.mycat.util.LongUtil; +import io.mycat.util.StringUtil; + +/** + * 查询高频 SQL + * + * @author zhuam + * + */ +public final class ShowSQLHigh { + + private static final int FIELD_COUNT = 9; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("ID", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("USER", Fields.FIELD_TYPE_VARCHAR); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("FREQUENCY", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("AVG_TIME", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + fields[i] = PacketUtil.getField("MAX_TIME", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + fields[i] = PacketUtil.getField("MIN_TIME", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + fields[i] = PacketUtil.getField("EXECUTE_TIME", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("LAST_TIME", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("SQL", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + eof.packetId = ++packetId; + } + + public static void execute(ManagerConnection c, boolean isClear) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + + Map statMap = UserStatAnalyzer.getInstance().getUserStatMap(); + for (UserStat userStat : statMap.values()) { + String user = userStat.getUser(); + List list=userStat.getSqlHigh().getSqlFrequency( isClear ); + if ( list != null ) { + int i = 1; + for (SqlFrequency sqlFrequency : list) { + if(sqlFrequency != null){ + RowDataPacket row = getRow(i, user, sqlFrequency.getSql(), sqlFrequency.getCount(), + sqlFrequency.getAvgTime(), sqlFrequency.getMaxTime(), sqlFrequency.getMinTime(), + sqlFrequency.getExecuteTime(), sqlFrequency.getLastTime(), c.getCharset()); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + i++; + } + } + } + } + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // write buffer + c.write(buffer); + } + + private static RowDataPacket getRow(int i, String user, String sql, long count, long avgTime, long maxTime, + long minTime, long executTime, long lastTime, String charset) { + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(LongUtil.toBytes(i)); + row.add(StringUtil.encode(user, charset)); + row.add(LongUtil.toBytes(count)); + row.add(LongUtil.toBytes(avgTime)); + row.add(LongUtil.toBytes(maxTime)); + row.add(LongUtil.toBytes(minTime)); + row.add(LongUtil.toBytes(executTime)); + row.add(LongUtil.toBytes(lastTime)); + row.add(StringUtil.encode(sql, charset)); + return row; + } + + +} diff --git a/src/main/java/io/mycat/manager/response/ShowSQLLarge.java b/src/main/java/io/mycat/manager/response/ShowSQLLarge.java new file mode 100644 index 000000000..a6d75d6a0 --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ShowSQLLarge.java @@ -0,0 +1,110 @@ +package io.mycat.manager.response; + +import java.nio.ByteBuffer; +import java.util.List; +import java.util.Map; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.statistic.stat.UserSqlLargeStat; +import io.mycat.statistic.stat.UserStat; +import io.mycat.statistic.stat.UserStatAnalyzer; +import io.mycat.util.LongUtil; +import io.mycat.util.StringUtil; + +/** + * + * 查询每个用户大集合返回的 SQL + * + * @author zhuam + * + */ +public class ShowSQLLarge { + + private static final int FIELD_COUNT = 5; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("USER", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("ROWS", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("START_TIME", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("EXECUTE_TIME", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("SQL", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + eof.packetId = ++packetId; + } + + public static void execute(ManagerConnection c, boolean isClear) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + Map statMap = UserStatAnalyzer.getInstance().getUserStatMap(); + for (UserStat userStat : statMap.values()) { + String user = userStat.getUser(); + + List sqls = userStat.getSqlLargeRowStat().getSqls(); + for (UserSqlLargeStat.SqlLarge sql : sqls) { + if (sql != null) { + RowDataPacket row = getRow(user, sql, c.getCharset()); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + } + + if ( isClear ) { + userStat.getSqlLargeRowStat().clear();//读取大结果集SQL后,清理 + } + } + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // write buffer + c.write(buffer); + } + + private static RowDataPacket getRow(String user, io.mycat.statistic.stat.UserSqlLargeStat.SqlLarge sql, String charset) { + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add( StringUtil.encode(user, charset) ); + row.add( LongUtil.toBytes( sql.getSqlRows() ) ); + row.add( LongUtil.toBytes(sql.getStartTime() ) ); + row.add( LongUtil.toBytes(sql.getExecuteTime() ) ); + row.add( StringUtil.encode(sql.getSql(), charset) ); + return row; + } +} + diff --git a/src/main/java/io/mycat/manager/response/ShowSQLSlow.java b/src/main/java/io/mycat/manager/response/ShowSQLSlow.java new file mode 100644 index 000000000..b49fc5aff --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ShowSQLSlow.java @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.response; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.statistic.SQLRecord; +import io.mycat.statistic.stat.UserStat; +import io.mycat.statistic.stat.UserStatAnalyzer; +import io.mycat.util.LongUtil; +import io.mycat.util.StringUtil; + +/** + * 查询每个用户的执行时间超过设定阈值的SQL, 默认TOP10 + * + * @author mycat + * @author zhuam + */ +public final class ShowSQLSlow { + + private static final int FIELD_COUNT = 5; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("USER", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("DATASOURCE", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("START_TIME", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("EXECUTE_TIME", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("SQL", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + eof.packetId = ++packetId; + } + + public static void execute(ManagerConnection c, boolean isClear) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + Map statMap = UserStatAnalyzer.getInstance().getUserStatMap(); + for (UserStat userStat : statMap.values()) { + String user = userStat.getUser(); + List keyList = userStat.getSqlRecorder().getRecords(); + for (SQLRecord key : keyList) { + if (key != null) { + RowDataPacket row = getRow(user, key, c.getCharset()); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + } + + if ( isClear ) { + userStat.getSqlRecorder().clear();//读取慢SQL后,清理 + } + } + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // write buffer + c.write(buffer); + } + + private static RowDataPacket getRow(String user, SQLRecord sql, String charset) { + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add( StringUtil.encode(user, charset) ); + row.add( StringUtil.encode(sql.dataNode, charset) ); + row.add( LongUtil.toBytes(sql.startTime) ); + row.add( LongUtil.toBytes(sql.executeTime) ); + row.add( StringUtil.encode(sql.statement, charset) ); + return row; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/manager/response/ShowSQLSumTable.java b/src/main/java/io/mycat/manager/response/ShowSQLSumTable.java new file mode 100644 index 000000000..8b341a947 --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ShowSQLSumTable.java @@ -0,0 +1,148 @@ +package io.mycat.manager.response; + +import java.nio.ByteBuffer; +import java.text.DecimalFormat; +import java.util.List; +import java.util.Map; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.statistic.stat.TableStat; +import io.mycat.statistic.stat.TableStatAnalyzer; +import io.mycat.util.LongUtil; +import io.mycat.util.StringUtil; + +public class ShowSQLSumTable { + + private static DecimalFormat decimalFormat = new DecimalFormat("0.00"); + + private static final int FIELD_COUNT = 8; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("ID", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("TABLE", Fields.FIELD_TYPE_VARCHAR); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("R", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("W", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("R%", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("RELATABLE", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("RELACOUNT", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("LAST_TIME", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + eof.packetId = ++packetId; + } + + public static void execute(ManagerConnection c, boolean isClear) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + + /* + int i=0; + Map statMap = TableStatAnalyzer.getInstance().getTableStatMap(); + for (TableStat tableStat : statMap.values()) { + i++; + RowDataPacket row = getRow(tableStat,i, c.getCharset());//getRow(sqlStat,sql, c.getCharset()); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + */ + List list = TableStatAnalyzer.getInstance().getTableStats(isClear); + if ( list != null ) { + int i = 1; + for (TableStat tableStat : list) { + if(tableStat!=null){ + RowDataPacket row = getRow(tableStat,i, c.getCharset()); + i++; + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + } + } + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // write buffer + c.write(buffer); + } + + private static RowDataPacket getRow(TableStat tableStat, long idx, String charset) { + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(LongUtil.toBytes(idx)); + if (tableStat == null){ + row.add(StringUtil.encode(("not fond"), charset)); + return row; + } + + String table = tableStat.getTable(); + long R = tableStat.getRCount(); + long W = tableStat.getWCount(); + String __R = decimalFormat.format( 1.0D * R / (R + W) ); + + + StringBuffer relaTableNameBuffer = new StringBuffer(); + StringBuffer relaTableCountBuffer = new StringBuffer(); + List relaTables = tableStat.getRelaTables(); + if ( !relaTables.isEmpty() ) { + + for(TableStat.RelaTable relaTable: relaTables) { + relaTableNameBuffer.append( relaTable.getTableName() ).append(", "); + relaTableCountBuffer.append( relaTable.getCount() ).append(", "); + } + + } else { + relaTableNameBuffer.append("NULL"); + relaTableCountBuffer.append("NULL"); + } + + row.add( StringUtil.encode( table, charset) ); + row.add( LongUtil.toBytes( R ) ); + row.add( LongUtil.toBytes( W ) ); + row.add( StringUtil.encode( String.valueOf( __R ), charset) ); + row.add( StringUtil.encode( relaTableNameBuffer.toString(), charset) ); + row.add( StringUtil.encode( relaTableCountBuffer.toString(), charset) ); + row.add( LongUtil.toBytes( tableStat.getLastExecuteTime() ) ); + + return row; + } + +} diff --git a/src/main/java/io/mycat/manager/response/ShowSQLSumUser.java b/src/main/java/io/mycat/manager/response/ShowSQLSumUser.java new file mode 100644 index 000000000..21cab621a --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ShowSQLSumUser.java @@ -0,0 +1,147 @@ +package io.mycat.manager.response; + +import java.nio.ByteBuffer; +import java.text.DecimalFormat; +import java.util.Map; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.statistic.stat.UserSqlRWStat; +import io.mycat.statistic.stat.UserStat; +import io.mycat.statistic.stat.UserStatAnalyzer; +import io.mycat.util.LongUtil; +import io.mycat.util.StringUtil; + +/** + * 查询用户的 SQL 执行情况 + * + * 1、用户 R/W数、读占比、并发数 + * 2、请求时间范围 + * 3、请求的耗时范围 + * 4、Net 进/出 字节数 + * + * @author zhuam + */ +public class ShowSQLSumUser { + + private static DecimalFormat decimalFormat = new DecimalFormat("0.00"); + + private static final int FIELD_COUNT = 11; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("ID", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("USER", Fields.FIELD_TYPE_VARCHAR); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("R", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("W", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("R%", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("MAX", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("NET_IN", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("NET_OUT", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + //22-06h, 06-13h, 13-18h, 18-22h + fields[i] = PacketUtil.getField("TIME_COUNT", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + //<10ms, 10ms-200ms, 200ms-1s, >1s + fields[i] = PacketUtil.getField("TTL_COUNT", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("LAST_TIME", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + eof.packetId = ++packetId; + } + + public static void execute(ManagerConnection c, boolean isClear) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + int i=0; + + Map statMap = UserStatAnalyzer.getInstance().getUserStatMap(); + for (UserStat userStat : statMap.values()) { + i++; + RowDataPacket row = getRow(userStat,i, c.getCharset());//getRow(sqlStat,sql, c.getCharset()); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + if ( isClear ) { + userStat.clearRwStat(); + } + } + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // write buffer + c.write(buffer); + } + + private static RowDataPacket getRow(UserStat userStat, long idx, String charset) { + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(LongUtil.toBytes(idx)); + if (userStat == null){ + row.add(StringUtil.encode(("not fond"), charset)); + return row; + } + + String user = userStat.getUser(); + UserSqlRWStat rwStat = userStat.getRWStat(); + long R = rwStat.getRCount(); + long W = rwStat.getWCount(); + String __R = decimalFormat.format( 1.0D * R / (R + W) ); + int MAX = rwStat.getConcurrentMax(); + + row.add( StringUtil.encode( user, charset) ); + row.add( LongUtil.toBytes( R ) ); + row.add( LongUtil.toBytes( W ) ); + row.add( StringUtil.encode( String.valueOf( __R ), charset) ); + row.add( StringUtil.encode( String.valueOf( MAX ), charset) ); + row.add( LongUtil.toBytes( rwStat.getNetInBytes() ) ); + row.add( LongUtil.toBytes( rwStat.getNetOutBytes() ) ); + row.add( StringUtil.encode( rwStat.getExecuteHistogram().toString(), charset) ); + row.add( StringUtil.encode( rwStat.getTimeHistogram().toString(), charset) ); + row.add( LongUtil.toBytes( rwStat.getLastExecuteTime() ) ); + + return row; + } + +} diff --git a/src/main/java/io/mycat/server/response/ShowServer.java b/src/main/java/io/mycat/manager/response/ShowServer.java similarity index 80% rename from src/main/java/io/mycat/server/response/ShowServer.java rename to src/main/java/io/mycat/manager/response/ShowServer.java index bf66bd731..5e6b37b95 100644 --- a/src/main/java/io/mycat/server/response/ShowServer.java +++ b/src/main/java/io/mycat/manager/response/ShowServer.java @@ -21,18 +21,18 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.response; +package io.mycat.manager.response; + +import java.nio.ByteBuffer; import io.mycat.MycatServer; -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.server.Fields; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.PacketUtil; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; import io.mycat.util.FormatUtil; import io.mycat.util.LongUtil; import io.mycat.util.StringUtil; @@ -46,7 +46,7 @@ */ public final class ShowServer { - private static final int FIELD_COUNT = 9; + private static final int FIELD_COUNT = 8; private static final ResultSetHeaderPacket header = PacketUtil .getHeader(FIELD_COUNT); private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; @@ -86,40 +86,36 @@ public final class ShowServer { fields[i] = PacketUtil.getField("STATUS", Fields.FIELD_TYPE_VAR_STRING); fields[i++].packetId = ++packetId; - fields[i] = PacketUtil.getField("AVG_BUFPOOL_ITEM_SIZE", - Fields.FIELD_TYPE_LONG); - fields[i++].packetId = ++packetId; - eof.packetId = ++packetId; } - public static void execute(MySQLFrontConnection c) { - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); + public static void execute(ManagerConnection c) { + ByteBuffer buffer = c.allocate(); + // write header - header.write(bufferArray); + buffer = header.write(buffer, c, true); // write fields for (FieldPacket field : fields) { - field.write(bufferArray); + buffer = field.write(buffer, c, true); } // write eof - eof.write(bufferArray); + buffer = eof.write(buffer, c, true); // write rows byte packetId = eof.packetId; RowDataPacket row = getRow(c.getCharset()); row.packetId = ++packetId; - row.write(bufferArray); + buffer = row.write(buffer, c, true); // write last eof EOFPacket lastEof = new EOFPacket(); lastEof.packetId = ++packetId; - lastEof.write(bufferArray); + buffer = lastEof.write(buffer, c, true); // write buffer - c.write(bufferArray); + c.write(buffer); } private static RowDataPacket getRow(String charset) { @@ -141,7 +137,6 @@ private static RowDataPacket getRow(String charset) { row.add(StringUtil.encode(charset, charset)); row.add(StringUtil.encode(MycatServer.getInstance().isOnline() ? "ON" : "OFF", charset)); - row.add(LongUtil.toBytes(0)); return row; } diff --git a/src/main/java/io/mycat/server/response/ShowSession.java b/src/main/java/io/mycat/manager/response/ShowSession.java similarity index 61% rename from src/main/java/io/mycat/server/response/ShowSession.java rename to src/main/java/io/mycat/manager/response/ShowSession.java index 69f4ce30c..83969cbda 100644 --- a/src/main/java/io/mycat/server/response/ShowSession.java +++ b/src/main/java/io/mycat/manager/response/ShowSession.java @@ -1,21 +1,23 @@ -package io.mycat.server.response; +package io.mycat.manager.response; +import java.nio.ByteBuffer; +import java.util.Collection; + +import io.mycat.MycatServer; import io.mycat.backend.BackendConnection; -import io.mycat.net.BufferArray; -import io.mycat.net.Connection; -import io.mycat.net.NetSystem; -import io.mycat.server.Fields; -import io.mycat.server.MySQLFrontConnection; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.FrontendConnection; +import io.mycat.net.NIOProcessor; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; import io.mycat.server.NonBlockingSession; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.PacketUtil; +import io.mycat.server.ServerConnection; import io.mycat.util.StringUtil; -import java.util.Collection; - /** * show front session detail info * @@ -44,29 +46,33 @@ public class ShowSession { eof.packetId = ++packetId; } - public static void execute(MySQLFrontConnection c) { - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); + public static void execute(ManagerConnection c) { + ByteBuffer buffer = c.allocate(); + // write header - header.write(bufferArray); + buffer = header.write(buffer, c, true); // write fields for (FieldPacket field : fields) { - field.write(bufferArray); + buffer = field.write(buffer, c, true); } // write eof - eof.write(bufferArray); + buffer = eof.write(buffer, c, true); // write rows byte packetId = eof.packetId; - for (Connection con : NetSystem.getInstance().getAllConnectios() - .values()) { - if (con instanceof MySQLFrontConnection) { - RowDataPacket row = getRow((MySQLFrontConnection) con, c.getCharset()); + for (NIOProcessor process : MycatServer.getInstance().getProcessors()) { + for (FrontendConnection front : process.getFrontends().values()) { + + if (!(front instanceof ServerConnection)) { + continue; + } + ServerConnection sc = (ServerConnection) front; + RowDataPacket row = getRow(sc, c.getCharset()); if (row != null) { row.packetId = ++packetId; - row.write(bufferArray); + buffer = row.write(buffer, c, true); } } } @@ -74,13 +80,13 @@ public static void execute(MySQLFrontConnection c) { // write last eof EOFPacket lastEof = new EOFPacket(); lastEof.packetId = ++packetId; - lastEof.write(bufferArray); + buffer = lastEof.write(buffer, c, true); // write buffer - c.write(bufferArray); + c.write(buffer); } - private static RowDataPacket getRow(MySQLFrontConnection sc, String charset) { + private static RowDataPacket getRow(ServerConnection sc, String charset) { StringBuilder sb = new StringBuilder(); NonBlockingSession ssesion = sc.getSession2(); Collection backConnections = ssesion.getTargetMap() diff --git a/src/main/java/io/mycat/manager/response/ShowSqlResultSet.java b/src/main/java/io/mycat/manager/response/ShowSqlResultSet.java new file mode 100644 index 000000000..66b9fc459 --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ShowSqlResultSet.java @@ -0,0 +1,106 @@ +package io.mycat.manager.response; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.statistic.stat.SqlResultSet; +import io.mycat.statistic.stat.UserStat; +import io.mycat.statistic.stat.UserStatAnalyzer; +import io.mycat.util.IntegerUtil; +import io.mycat.util.LongUtil; +import io.mycat.util.StringUtil; + +import java.nio.ByteBuffer; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** + * show 大结果集 SQL + * + * @author songgw + * + */ +public final class ShowSqlResultSet { + + private static final int FIELD_COUNT = 5; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("ID", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("USER", Fields.FIELD_TYPE_VARCHAR); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("FREQUENCY", Fields.FIELD_TYPE_LONGLONG); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("SQL", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("RESULTSET_SIZE", Fields.FIELD_TYPE_INT24); + fields[i++].packetId = ++packetId; + + eof.packetId = ++packetId; + } + + public static void execute(ManagerConnection c) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + int i=0; + Map statMap = UserStatAnalyzer.getInstance().getUserStatMap(); + for (UserStat userStat : statMap.values()) { + String user = userStat.getUser(); + ConcurrentHashMap map=userStat.getSqlResultSizeRecorder().getSqlResultSet(); + if ( map != null ) { + for (SqlResultSet sqlResultSet:map.values()) { + RowDataPacket row = getRow(++i, user,sqlResultSet.getSql(), sqlResultSet.getCount(), sqlResultSet.getResultSetSize(),c.getCharset()); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + } + } + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // write buffer + c.write(buffer); + } + + private static RowDataPacket getRow(int i, String user,String sql, int count, int resultSetSize,String charset) { + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add( LongUtil.toBytes( i ) ); + row.add( StringUtil.encode( user, charset) ); + row.add( LongUtil.toBytes( count ) ); + row.add( StringUtil.encode(sql, charset) ); + row.add( IntegerUtil.toBytes(resultSetSize) ); + return row; + } + + +} diff --git a/src/main/java/io/mycat/manager/response/ShowSysLog.java b/src/main/java/io/mycat/manager/response/ShowSysLog.java new file mode 100644 index 000000000..b9b7bb23d --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ShowSysLog.java @@ -0,0 +1,157 @@ +package io.mycat.manager.response; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.UnsupportedEncodingException; +import java.nio.ByteBuffer; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.config.model.SystemConfig; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.util.StringUtil; + +/** + * Show @@SYSLOG LIMIT=50 + * + * @author zhuam + * + */ +public class ShowSysLog { + + private static final int FIELD_COUNT = 2; + + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("DATE", Fields.FIELD_TYPE_VARCHAR); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("LOG", Fields.FIELD_TYPE_VARCHAR); + fields[i++].packetId = ++packetId; + + eof.packetId = ++packetId; + } + + public static void execute(ManagerConnection c, int numLines) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c, true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c, true); + } + + // write eof + buffer = eof.write(buffer, c, true); + + // write rows + byte packetId = eof.packetId; + + String filename = SystemConfig.getHomePath() + File.separator + "logs" + File.separator + "mycat.log"; + + String[] lines = getLinesByLogFile(filename, numLines); + + boolean linesIsEmpty = true; + for(int i = 0; i < lines.length ; i++){ + String line = lines[i]; + if ( line != null ) { + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(StringUtil.encode( line.substring(0,19), c.getCharset())); + row.add(StringUtil.encode( line.substring(19,line.length()), c.getCharset())); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + + linesIsEmpty = false; + } + } + + if ( linesIsEmpty ) { + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(StringUtil.encode( "NULL", c.getCharset())); + row.add(StringUtil.encode( "NULL", c.getCharset())); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c, true); + + // write buffer + c.write(buffer); + } + + private static String[] getLinesByLogFile(String filename, int numLines) { + + + String lines[] = new String[numLines]; + + BufferedReader in = null; + try { + //获取长度 + int start = 0; + int totalNumLines = 0; + + File logFile = new File(filename); + in = new BufferedReader(new InputStreamReader(new FileInputStream(logFile), "UTF-8")); + + String line; + while ((line=in.readLine()) != null) { + totalNumLines++; + } + in.close(); + + // + in = new BufferedReader(new InputStreamReader(new FileInputStream(logFile), "UTF-8")); + + // 跳过行 + start = totalNumLines - numLines; + if (start < 0) { start = 0; } + for (int i=0; i paramValues = new ArrayList(); + paramValues.add(sysConfig.getProcessors() + ""); + paramValues.add(sysConfig.getBufferPoolChunkSize() + "B"); + paramValues.add(sysConfig.getBufferPoolPageSize() + "B"); + paramValues.add(sysConfig.getProcessorBufferLocalPercent() + ""); + paramValues.add(sysConfig.getProcessorExecutor() + ""); + paramValues.add(sysConfig.getSequnceHandlerType() == 1 ? "数据库方式" : "本地文件方式"); + paramValues.add(sysConfig.getPacketHeaderSize() + "B"); + paramValues.add(sysConfig.getMaxPacketSize()/1024/1024 + "M"); + paramValues.add(sysConfig.getIdleTimeout()/1000/60 + "分钟"); + paramValues.add(sysConfig.getCharset() + ""); + paramValues.add(ISOLATIONS[sysConfig.getTxIsolation()]); + paramValues.add(sysConfig.getSqlExecuteTimeout() + "秒"); + paramValues.add(sysConfig.getProcessorCheckPeriod()/1000 + "秒"); + paramValues.add(sysConfig.getDataNodeIdleCheckPeriod()/1000 + "秒"); + paramValues.add(sysConfig.getDataNodeHeartbeatPeriod()/1000 + "秒"); + paramValues.add(sysConfig.getBindIp() + ""); + paramValues.add(sysConfig.getServerPort()+ ""); + paramValues.add(sysConfig.getManagerPort() + ""); + + for(int i = 0; i < PARAMNAMES.length ; i++){ + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(StringUtil.encode(PARAMNAMES[i], c.getCharset())); + row.add(StringUtil.encode(paramValues.get(i), c.getCharset())); + row.add(StringUtil.encode(PARAM_DESCRIPTION[i], c.getCharset())); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c, true); + + // write buffer + c.write(buffer); + } + + private static final String[] PARAMNAMES = { + "processors", + "processorBufferChunk", + "processorBufferPool", + "processorBufferLocalPercent", + "processorExecutor", + "sequnceHandlerType", + "Mysql_packetHeaderSize", + "Mysql_maxPacketSize", + "Mysql_idleTimeout", + "Mysql_charset", + "Mysql_txIsolation", + "Mysql_sqlExecuteTimeout", + "Mycat_processorCheckPeriod", + "Mycat_dataNodeIdleCheckPeriod", + "Mycat_dataNodeHeartbeatPeriod", + "Mycat_bindIp", + "Mycat_serverPort", + "Mycat_managerPort"}; + + private static final String[] PARAM_DESCRIPTION = { + "主要用于指定系统可用的线程数,默认值为Runtime.getRuntime().availableProcessors()方法返回的值。主要影响processorBufferPool、processorBufferLocalPercent、processorExecutor属性。NIOProcessor的个数也是由这个属性定义的,所以调优的时候可以适当的调高这个属性。", + "指定每次分配Socket Direct Buffer的大小,默认是4096个字节。这个属性也影响buffer pool的长度。", + "指定bufferPool计算 比例值。由于每次执行NIO读、写操作都需要使用到buffer,系统初始化的时候会建立一定长度的buffer池来加快读、写的效率,减少建立buffer的时间", + "就是用来控制分配这个pool的大小用的,但其也并不是一个准确的值,也是一个比例值。这个属性默认值为100。线程缓存百分比 = bufferLocalPercent / processors属性。", + "主要用于指定NIOProcessor上共享的businessExecutor固定线程池大小。mycat在需要处理一些异步逻辑的时候会把任务提交到这个线程池中。新版本中这个连接池的使用频率不是很大了,可以设置一个较小的值。", + "指定使用Mycat全局序列的类型。", + "指定Mysql协议中的报文头长度。默认4", + "指定Mysql协议可以携带的数据最大长度。默认16M", + "指定连接的空闲超时时间。某连接在发起空闲检查下,发现距离上次使用超过了空闲时间,那么这个连接会被回收,就是被直接的关闭掉。默认30分钟", + "连接的初始化字符集。默认为utf8", + "前端连接的初始化事务隔离级别,只在初始化的时候使用,后续会根据客户端传递过来的属性对后端数据库连接进行同步。默认为REPEATED_READ", + "SQL执行超时的时间,Mycat会检查连接上最后一次执行SQL的时间,若超过这个时间则会直接关闭这连接。默认时间为300秒", + "清理NIOProcessor上前后端空闲、超时和关闭连接的间隔时间。默认是1秒", + "对后端连接进行空闲、超时检查的时间间隔,默认是300秒", + "对后端所有读、写库发起心跳的间隔时间,默认是10秒", + "mycat服务监听的IP地址,默认值为0.0.0.0", + "mycat的使用端口,默认值为8066", + "mycat的管理端口,默认值为9066"}; + + public static final String[] ISOLATIONS = {"", "READ_UNCOMMITTED", "READ_COMMITTED", "REPEATED_READ", "SERIALIZABLE"}; +} diff --git a/src/main/java/io/mycat/server/response/ShowThreadPool.java b/src/main/java/io/mycat/manager/response/ShowThreadPool.java similarity index 65% rename from src/main/java/io/mycat/server/response/ShowThreadPool.java rename to src/main/java/io/mycat/manager/response/ShowThreadPool.java index b2e1cea9d..e98e13137 100644 --- a/src/main/java/io/mycat/server/response/ShowThreadPool.java +++ b/src/main/java/io/mycat/manager/response/ShowThreadPool.java @@ -21,26 +21,25 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.response; - -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.server.Fields; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.PacketUtil; +package io.mycat.manager.response; + +import java.nio.ByteBuffer; +import java.util.LinkedList; +import java.util.List; + +import io.mycat.MycatServer; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; import io.mycat.util.IntegerUtil; import io.mycat.util.LongUtil; +import io.mycat.util.NameableExecutor; import io.mycat.util.StringUtil; -import java.util.HashMap; -import java.util.Map; -import java.util.Map.Entry; -import java.util.concurrent.ThreadPoolExecutor; - /** * 查看线程池状态 * @@ -83,42 +82,43 @@ public final class ShowThreadPool { eof.packetId = ++packetId; } - public static void execute(MySQLFrontConnection c) { - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); + public static void execute(ManagerConnection c) { + ByteBuffer buffer = c.allocate(); // write header - header.write(bufferArray); + buffer = header.write(buffer, c, true); // write fields for (FieldPacket field : fields) { - field.write(bufferArray); + buffer = field.write(buffer, c, true); } // write eof - eof.write(bufferArray); + buffer = eof.write(buffer, c, true); // write rows byte packetId = eof.packetId; - Map executors = getExecutors(); - for (Entry entry : executors.entrySet()) { - RowDataPacket row = getRow(entry.getKey(),entry.getValue(), c.getCharset()); - row.packetId = ++packetId; - row.write(bufferArray); + List executors = getExecutors(); + for (NameableExecutor exec : executors) { + if (exec != null) { + RowDataPacket row = getRow(exec, c.getCharset()); + row.packetId = ++packetId; + buffer = row.write(buffer, c, true); + } } // write last eof EOFPacket lastEof = new EOFPacket(); lastEof.packetId = ++packetId; - lastEof.write(bufferArray); + buffer = lastEof.write(buffer, c, true); // write buffer - c.write(bufferArray); + c.write(buffer); } - private static RowDataPacket getRow(String name,ThreadPoolExecutor exec, String charset) { + private static RowDataPacket getRow(NameableExecutor exec, String charset) { RowDataPacket row = new RowDataPacket(FIELD_COUNT); - row.add(StringUtil.encode(name, charset)); + row.add(StringUtil.encode(exec.getName(), charset)); row.add(IntegerUtil.toBytes(exec.getPoolSize())); row.add(IntegerUtil.toBytes(exec.getActiveCount())); row.add(IntegerUtil.toBytes(exec.getQueue().size())); @@ -127,10 +127,15 @@ private static RowDataPacket getRow(String name,ThreadPoolExecutor exec, String return row; } - private static Map getExecutors() { - Map map = new HashMap(); - map.put(NetSystem.getInstance().getTimer().getName(),NetSystem.getInstance().getTimer()); - map.put(NetSystem.getInstance().getExecutor().getName(),NetSystem.getInstance().getExecutor()); - return map; + private static List getExecutors() { + List list = new LinkedList(); + MycatServer server = MycatServer.getInstance(); + list.add(server.getTimerExecutor()); + // list.add(server.getAioExecutor()); + list.add(server.getBusinessExecutor()); + // for (NIOProcessor pros : server.getProcessors()) { + // list.add(pros.getExecutor()); + // } + return list; } } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/response/ShowTime.java b/src/main/java/io/mycat/manager/response/ShowTime.java similarity index 76% rename from src/main/java/io/mycat/server/response/ShowTime.java rename to src/main/java/io/mycat/manager/response/ShowTime.java index be32d9752..7324223d7 100644 --- a/src/main/java/io/mycat/server/response/ShowTime.java +++ b/src/main/java/io/mycat/manager/response/ShowTime.java @@ -21,19 +21,19 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.response; +package io.mycat.manager.response; + +import java.nio.ByteBuffer; import io.mycat.MycatServer; -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.server.Fields; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.PacketUtil; -import io.mycat.server.parser.ManagerParseShow; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.route.parser.ManagerParseShow; import io.mycat.util.LongUtil; /** @@ -56,33 +56,33 @@ public final class ShowTime { eof.packetId = ++packetId; } - public static void execute(MySQLFrontConnection c,int type) { - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); + public static void execute(ManagerConnection c, int type) { + ByteBuffer buffer = c.allocate(); + // write header - header.write(bufferArray); + buffer = header.write(buffer, c,true); // write fields for (FieldPacket field : fields) { - field.write(bufferArray); + buffer = field.write(buffer, c,true); } // write eof - eof.write(bufferArray); + buffer = eof.write(buffer, c,true); // write rows byte packetId = eof.packetId; RowDataPacket row = getRow(type); row.packetId = ++packetId; - row.write(bufferArray); + buffer = row.write(buffer, c,true); // write last eof EOFPacket lastEof = new EOFPacket(); lastEof.packetId = ++packetId; - lastEof.write(bufferArray); + buffer = lastEof.write(buffer, c,true); // post write - c.write(bufferArray); + c.write(buffer); } private static RowDataPacket getRow(int type) { diff --git a/src/main/java/io/mycat/manager/response/ShowVariables.java b/src/main/java/io/mycat/manager/response/ShowVariables.java new file mode 100644 index 000000000..f15592aa9 --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ShowVariables.java @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.response; + +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.Map; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.util.StringUtil; + +/** + * @author mycat + */ +public final class ShowVariables { + + private static final int FIELD_COUNT = 2; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("VARIABLE_NAME", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("VALUE", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + + eof.packetId = ++packetId; + } + + public static void execute(ManagerConnection c) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + for (Map.Entry e : variables.entrySet()) { + RowDataPacket row = getRow(e.getKey(), e.getValue(), c.getCharset()); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + + // write lastEof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // write buffer + c.write(buffer); + } + + private static RowDataPacket getRow(String name, String value, String charset) { + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(StringUtil.encode(name, charset)); + row.add(StringUtil.encode(value, charset)); + return row; + } + + private static final Map variables = new HashMap(); + static { + variables.put("character_set_client", "utf8"); + variables.put("character_set_connection", "utf8"); + variables.put("character_set_results", "utf8"); + variables.put("character_set_server", "utf8"); + variables.put("init_connect", ""); + variables.put("interactive_timeout", "172800"); + variables.put("lower_case_table_names", "1"); + variables.put("max_allowed_packet", "16777216"); + variables.put("net_buffer_length", "8192"); + variables.put("net_write_timeout", "60"); + variables.put("query_cache_size", "0"); + variables.put("query_cache_type", "OFF"); + variables.put("sql_mode", "STRICT_TRANS_TABLES"); + variables.put("system_time_zone", "CST"); + variables.put("time_zone", "SYSTEM"); + variables.put("lower_case_table_names", "1"); + variables.put("tx_isolation", "REPEATABLE-READ"); + variables.put("wait_timeout", "172800"); + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/manager/response/ShowVersion.java b/src/main/java/io/mycat/manager/response/ShowVersion.java new file mode 100644 index 000000000..1f3464289 --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ShowVersion.java @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.manager.response; + +import java.nio.ByteBuffer; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.config.Versions; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; + +/** + * 查看CobarServer版本 + * + * @author mycat + */ +public final class ShowVersion { + + private static final int FIELD_COUNT = 1; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("VERSION", Fields.FIELD_TYPE_STRING); + fields[i++].packetId = ++packetId; + + eof.packetId = ++packetId; + } + + public static void execute(ManagerConnection c) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(Versions.SERVER_VERSION); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // write buffer + c.write(buffer); + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/manager/response/ShowWhiteHost.java b/src/main/java/io/mycat/manager/response/ShowWhiteHost.java new file mode 100644 index 000000000..baeb691eb --- /dev/null +++ b/src/main/java/io/mycat/manager/response/ShowWhiteHost.java @@ -0,0 +1,155 @@ +package io.mycat.manager.response; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.MycatServer; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.ErrorCode; +import io.mycat.config.Fields; +import io.mycat.config.model.FirewallConfig; +import io.mycat.config.model.UserConfig; +import io.mycat.config.util.ConfigException; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.OkPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.util.StringUtil; + +public final class ShowWhiteHost { + private static final Logger LOGGER = LoggerFactory.getLogger(ShowWhiteHost.class); + + private static final int FIELD_COUNT = 2; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("IP", Fields.FIELD_TYPE_VARCHAR); + fields[i++].packetId = ++packetId; + + fields[i] = PacketUtil.getField("USER", Fields.FIELD_TYPE_VARCHAR); + fields[i++].packetId = ++packetId; + + + eof.packetId = ++packetId; + } + + public static void execute(ManagerConnection c) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + + Map> map=MycatServer.getInstance().getConfig().getFirewall().getWhitehost(); + for (String key : map.keySet()) { + List userConfigs=map.get(key); + String users=""; + for (int i = 0; i < userConfigs.size(); i++) { + if(i>0) { + users += "," + userConfigs.get(i).getName(); + } + else { + users += userConfigs.get(i).getName(); + } + } + RowDataPacket row = getRow(key, users, c.getCharset()); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // write buffer + c.write(buffer); + } + private static RowDataPacket getRow(String ip, String user, String charset) { + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add( StringUtil.encode( ip, charset) ); + row.add( StringUtil.encode( user, charset) ); + return row; + } + public static String parseString(String stmt) { + int offset = stmt.indexOf(','); + if (offset != -1 && stmt.length() > ++offset) { + String txt = stmt.substring(offset).trim(); + return txt; + } + return null; + } + public static synchronized void setHost(ManagerConnection c,String ips) { + OkPacket ok = new OkPacket(); + String []users = ips.split(","); + if (users.length<2){ + c.writeErrMessage(ErrorCode.ER_YES, "white host info error."); + return; + } + String host=""; + List userConfigs = new ArrayList(); + int i=0; + for(String user : users){ + if (i==0){ + host=user; + i++; + } + else { + i++; + UserConfig uc = MycatServer.getInstance().getConfig().getUsers().get(user); + if (null == uc) { + c.writeErrMessage(ErrorCode.ER_YES, "user doesn't exist in host."); + return; + } + if (uc.getSchemas() == null || uc.getSchemas().size() == 0) { + c.writeErrMessage(ErrorCode.ER_YES, "host contains one root privileges user."); + return; + } + userConfigs.add(uc); + } + } + if (MycatServer.getInstance().getConfig().getFirewall().addWhitehost(host, userConfigs)) { + try{ + FirewallConfig.updateToFile(host, userConfigs); + }catch(Exception e){ + LOGGER.warn("set while host error : " + e.getMessage()); + c.writeErrMessage(ErrorCode.ER_YES, "white host set success ,but write to file failed :" + e.getMessage()); + } + + ok.packetId = 1; + ok.affectedRows = 1; + ok.serverStatus = 2; + ok.message = "white host set to succeed.".getBytes(); + ok.write(c); + + } + else { + c.writeErrMessage(ErrorCode.ER_YES, "host duplicated."); + } + } + + + +} diff --git a/src/main/java/io/mycat/server/response/StopHeartbeat.java b/src/main/java/io/mycat/manager/response/StopHeartbeat.java similarity index 74% rename from src/main/java/io/mycat/server/response/StopHeartbeat.java rename to src/main/java/io/mycat/manager/response/StopHeartbeat.java index a7b970767..5b03c7f84 100644 --- a/src/main/java/io/mycat/server/response/StopHeartbeat.java +++ b/src/main/java/io/mycat/manager/response/StopHeartbeat.java @@ -21,21 +21,20 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.response; +package io.mycat.manager.response; +import java.util.Map; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; import io.mycat.MycatServer; -import io.mycat.backend.PhysicalDBPool; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.OkPacket; -import io.mycat.server.parser.ManagerParseStop; +import io.mycat.backend.datasource.PhysicalDBPool; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.OkPacket; +import io.mycat.route.parser.ManagerParseStop; +import io.mycat.route.parser.util.Pair; import io.mycat.util.FormatUtil; -import io.mycat.util.Pair; import io.mycat.util.TimeUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Map; /** * 暂停数据节点心跳检测 @@ -44,10 +43,9 @@ */ public final class StopHeartbeat { - private static final Logger logger = LoggerFactory - .getLogger(StopHeartbeat.class); + private static final Logger logger = LoggerFactory.getLogger(StopHeartbeat.class); - public static void execute(String stmt, MySQLFrontConnection c) { + public static void execute(String stmt, ManagerConnection c) { int count = 0; Pair keys = ManagerParseStop.getPair(stmt); if (keys.getKey() != null && keys.getValue() != null) { @@ -58,7 +56,9 @@ public static void execute(String stmt, MySQLFrontConnection c) { if (dn != null) { dn.getSource().setHeartbeatRecoveryTime(TimeUtil.currentTimeMillis() + time); ++count; - logger.warn("{} stop heartbeat '{}' by manager.",dn.getHostName(),FormatUtil.formatTime(time, 3)); + StringBuilder s = new StringBuilder(); + s.append(dn.getHostName()).append(" stop heartbeat '"); + logger.warn(s.append(FormatUtil.formatTime(time, 3)).append("' by manager.").toString()); } } } diff --git a/src/main/java/io/mycat/server/response/SwitchDataSource.java b/src/main/java/io/mycat/manager/response/SwitchDataSource.java similarity index 86% rename from src/main/java/io/mycat/server/response/SwitchDataSource.java rename to src/main/java/io/mycat/manager/response/SwitchDataSource.java index f8611885f..7b8a76e9a 100644 --- a/src/main/java/io/mycat/server/response/SwitchDataSource.java +++ b/src/main/java/io/mycat/manager/response/SwitchDataSource.java @@ -21,17 +21,16 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.response; +package io.mycat.manager.response; +import java.util.Map; import io.mycat.MycatServer; -import io.mycat.backend.PhysicalDBPool; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.OkPacket; -import io.mycat.server.parser.ManagerParseSwitch; -import io.mycat.util.Pair; - -import java.util.Map; +import io.mycat.backend.datasource.PhysicalDBPool; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.OkPacket; +import io.mycat.route.parser.ManagerParseSwitch; +import io.mycat.route.parser.util.Pair; /** * 切换数据节点的数据源 @@ -40,7 +39,7 @@ */ public final class SwitchDataSource { - public static void response(String stmt, MySQLFrontConnection c) { + public static void response(String stmt, ManagerConnection c) { int count = 0; Pair pair = ManagerParseSwitch.getPair(stmt); Map dns = MycatServer.getInstance().getConfig().getDataHosts(); diff --git a/src/main/java/io/mycat/memory/MyCatMemory.java b/src/main/java/io/mycat/memory/MyCatMemory.java new file mode 100644 index 000000000..f6b9f0d0c --- /dev/null +++ b/src/main/java/io/mycat/memory/MyCatMemory.java @@ -0,0 +1,197 @@ +package io.mycat.memory; + + +import com.google.common.annotations.VisibleForTesting; +import io.mycat.config.model.SystemConfig; +import io.mycat.memory.unsafe.Platform; +import io.mycat.memory.unsafe.memory.mm.DataNodeMemoryManager; +import io.mycat.memory.unsafe.memory.mm.MemoryManager; +import io.mycat.memory.unsafe.memory.mm.ResultMergeMemoryManager; +import io.mycat.memory.unsafe.storage.DataNodeDiskManager; +import io.mycat.memory.unsafe.storage.SerializerManager; +import io.mycat.memory.unsafe.utils.JavaUtils; +import io.mycat.memory.unsafe.utils.MycatPropertyConf; +import org.apache.log4j.Logger; + +/** + * Created by zagnix on 2016/6/2. + * Mycat内存管理工具类 + * 规划为三部分内存:结果集处理内存,系统预留内存,网络处理内存 + * 其中网络处理内存部分全部为Direct Memory + * 结果集内存分为Direct Memory 和 Heap Memory,但目前仅使用Direct Memory + * 系统预留内存为 Heap Memory。 + * 系统运行时,必须设置-XX:MaxDirectMemorySize 和 -Xmx JVM参数 + * -Xmx1024m -Xmn512m -XX:MaxDirectMemorySize=2048m -Xss256K -XX:+UseParallelGC + */ + +public class MyCatMemory { + private static Logger LOGGER = Logger.getLogger(MyCatMemory.class); + + public final static double DIRECT_SAFETY_FRACTION = 0.7; + private final long systemReserveBufferSize; + + private final long memoryPageSize; + private final long spillsFileBufferSize; + private final long resultSetBufferSize; + private final int numCores; + + + /** + * 内存管理相关关键类 + */ + private final MycatPropertyConf conf; + private final MemoryManager resultMergeMemoryManager; + private final DataNodeDiskManager blockManager; + private final SerializerManager serializerManager; + private final SystemConfig system; + + + public MyCatMemory(SystemConfig system,long totalNetWorkBufferSize) throws NoSuchFieldException, IllegalAccessException { + + this.system = system; + + LOGGER.info("useOffHeapForMerge = " + system.getUseOffHeapForMerge()); + LOGGER.info("memoryPageSize = " + system.getMemoryPageSize()); + LOGGER.info("spillsFileBufferSize = " + system.getSpillsFileBufferSize()); + LOGGER.info("useStreamOutput = " + system.getUseStreamOutput()); + LOGGER.info("systemReserveMemorySize = " + system.getSystemReserveMemorySize()); + LOGGER.info("totalNetWorkBufferSize = " + JavaUtils.bytesToString2(totalNetWorkBufferSize)); + LOGGER.info("dataNodeSortedTempDir = " + system.getDataNodeSortedTempDir()); + + this.conf = new MycatPropertyConf(); + numCores = Runtime.getRuntime().availableProcessors(); + + this.systemReserveBufferSize = JavaUtils. + byteStringAsBytes(system.getSystemReserveMemorySize()); + this.memoryPageSize = JavaUtils. + byteStringAsBytes(system.getMemoryPageSize()); + + this.spillsFileBufferSize = JavaUtils. + byteStringAsBytes(system.getSpillsFileBufferSize()); + + /** + * 目前merge,order by ,limit 没有使用On Heap内存 + */ + long maxOnHeapMemory = (Platform.getMaxHeapMemory()-systemReserveBufferSize); + + assert maxOnHeapMemory > 0; + + resultSetBufferSize = + (long)((Platform.getMaxDirectMemory()-2*totalNetWorkBufferSize)*DIRECT_SAFETY_FRACTION); + + assert resultSetBufferSize > 0; + + /** + * mycat.merge.memory.offHeap.enabled + * mycat.buffer.pageSize + * mycat.memory.offHeap.size + * mycat.merge.file.buffer + * mycat.direct.output.result + * mycat.local.dir + */ + + if(system.getUseOffHeapForMerge()== 1){ + conf.set("mycat.memory.offHeap.enabled","true"); + }else{ + conf.set("mycat.memory.offHeap.enabled","false"); + } + + if(system.getUseStreamOutput() == 1){ + conf.set("mycat.stream.output.result","true"); + }else{ + conf.set("mycat.stream.output.result","false"); + } + + + if(system.getMemoryPageSize() != null){ + conf.set("mycat.buffer.pageSize",system.getMemoryPageSize()); + }else{ + conf.set("mycat.buffer.pageSize","32k"); + } + + + if(system.getSpillsFileBufferSize() != null){ + conf.set("mycat.merge.file.buffer",system.getSpillsFileBufferSize()); + }else{ + conf.set("mycat.merge.file.buffer","32k"); + } + + conf.set("mycat.pointer.array.len","1k") + .set("mycat.memory.offHeap.size", JavaUtils.bytesToString2(resultSetBufferSize)); + + LOGGER.info("mycat.memory.offHeap.size: " + + JavaUtils.bytesToString2(resultSetBufferSize)); + + resultMergeMemoryManager = + new ResultMergeMemoryManager(conf,numCores,maxOnHeapMemory); + + + serializerManager = new SerializerManager(); + + blockManager = new DataNodeDiskManager(conf,true,serializerManager); + + } + + + @VisibleForTesting + public MyCatMemory() throws NoSuchFieldException, IllegalAccessException { + this.system = null; + this.systemReserveBufferSize = 0; + this.memoryPageSize = 0; + this.spillsFileBufferSize = 0; + conf = new MycatPropertyConf(); + numCores = Runtime.getRuntime().availableProcessors(); + + long maxOnHeapMemory = (Platform.getMaxHeapMemory()); + assert maxOnHeapMemory > 0; + + resultSetBufferSize = (long)((Platform.getMaxDirectMemory())*DIRECT_SAFETY_FRACTION); + + assert resultSetBufferSize > 0; + /** + * mycat.memory.offHeap.enabled + * mycat.buffer.pageSize + * mycat.memory.offHeap.size + * mycat.testing.memory + * mycat.merge.file.buffer + * mycat.direct.output.result + * mycat.local.dir + */ + conf.set("mycat.memory.offHeap.enabled","true") + .set("mycat.pointer.array.len","8K") + .set("mycat.buffer.pageSize","1m") + .set("mycat.memory.offHeap.size", JavaUtils.bytesToString2(resultSetBufferSize)) + .set("mycat.stream.output.result","false"); + + LOGGER.info("mycat.memory.offHeap.size: " + JavaUtils.bytesToString2(resultSetBufferSize)); + + resultMergeMemoryManager = + new ResultMergeMemoryManager(conf,numCores,maxOnHeapMemory); + + serializerManager = new SerializerManager(); + + blockManager = new DataNodeDiskManager(conf,true,serializerManager); + + } + + public MycatPropertyConf getConf() { + return conf; + } + + public long getResultSetBufferSize() { + return resultSetBufferSize; + } + + public MemoryManager getResultMergeMemoryManager() { + return resultMergeMemoryManager; + } + + public SerializerManager getSerializerManager() { + return serializerManager; + } + + public DataNodeDiskManager getBlockManager() { + return blockManager; + } + +} diff --git a/src/main/java/io/mycat/memory/environment/EnvironmentInformation.java b/src/main/java/io/mycat/memory/environment/EnvironmentInformation.java new file mode 100644 index 000000000..5a750eb75 --- /dev/null +++ b/src/main/java/io/mycat/memory/environment/EnvironmentInformation.java @@ -0,0 +1,338 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.environment; + + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.InputStream; +import java.lang.management.ManagementFactory; +import java.lang.management.OperatingSystemMXBean; +import java.lang.management.RuntimeMXBean; +import java.lang.reflect.Method; +import java.util.List; +import java.util.Properties; + +/** + * Utility class that gives access to the execution environment of the JVM, like + * the executing user, startup options, or the JVM version. + */ +public class EnvironmentInformation { + + private static final Logger LOG = LoggerFactory.getLogger(EnvironmentInformation.class); + + public static final String UNKNOWN = ""; + + /** + * Returns the version of the code as String. If version == null, then the JobManager does not run from a + * Maven build. An example is a source code checkout, compile, and run from inside an IDE. + * + * @return The version string. + */ + public static String getVersion() { + String version = EnvironmentInformation.class.getPackage().getImplementationVersion(); + return version != null ? version : UNKNOWN; + } + + /** + * Returns the code revision (commit and commit date) of Flink, as generated by the Maven builds. + * + * @return The code revision. + */ + public static RevisionInformation getRevisionInformation() { + String revision = UNKNOWN; + String commitDate = UNKNOWN; + try (InputStream propFile = EnvironmentInformation.class.getClassLoader().getResourceAsStream(".version.properties")) { + if (propFile != null) { + Properties properties = new Properties(); + properties.load(propFile); + String propRevision = properties.getProperty("git.commit.id.abbrev"); + String propCommitDate = properties.getProperty("git.commit.time"); + revision = propRevision != null ? propRevision : UNKNOWN; + commitDate = propCommitDate != null ? propCommitDate : UNKNOWN; + } + } catch (Throwable t) { + if (LOG.isDebugEnabled()) { + LOG.debug("Cannot determine code revision: Unable to read version property file.", t); + } else { + LOG.info("Cannot determine code revision: Unable to read version property file."); + } + } + + return new RevisionInformation(revision, commitDate); + } + + /** + * Gets the name of the user that is running the JVM. + * + * @return The name of the user that is running the JVM. + */ + public static String getUserRunning() { + String user = System.getProperty("user.name"); + if (user == null) { + user = UNKNOWN; + if (LOG.isDebugEnabled()) { + LOG.debug("Cannot determine user/group information for the current user."); + } + } + return user; + } + + /** + * The maximum JVM heap size, in bytes. + * + * @return The maximum JVM heap size, in bytes. + */ + public static long getMaxJvmHeapMemory() { + long maxMemory = Runtime.getRuntime().maxMemory(); + + if (maxMemory == Long.MAX_VALUE) { + // amount of free memory unknown + try { + // workaround for Oracle JDK + OperatingSystemMXBean operatingSystemMXBean = ManagementFactory.getOperatingSystemMXBean(); + Class clazz = Class.forName("com.sun.management.OperatingSystemMXBean"); + Method method = clazz.getMethod("getTotalPhysicalMemorySize"); + maxMemory = (Long) method.invoke(operatingSystemMXBean) / 4; + } + catch (Throwable e) { + throw new RuntimeException("Could not determine the amount of free memory.\n" + + "Please set the maximum memory for the JVM, e.g. -Xmx512M for 512 megabytes."); + } + } + + return maxMemory; + } + + /** + * Gets an estimate of the size of the free heap memory. + * + * NOTE: This method is heavy-weight. It triggers a garbage collection to reduce fragmentation and get + * a better estimate at the size of free memory. It is typically more accurate than the plain version + * {@link #getSizeOfFreeHeapMemory()}. + * + * @return An estimate of the size of the free heap memory, in bytes. + */ + public static long getSizeOfFreeHeapMemoryWithDefrag() { + // trigger a garbage collection, to reduce fragmentation + System.gc(); + + return getSizeOfFreeHeapMemory(); + } + + /** + * Gets an estimate of the size of the free heap memory. The estimate may vary, depending on the current + * level of memory fragmentation and the number of dead objects. For a better (but more heavy-weight) + * estimate, use {@link #getSizeOfFreeHeapMemoryWithDefrag()}. + * + * @return An estimate of the size of the free heap memory, in bytes. + */ + public static long getSizeOfFreeHeapMemory() { + Runtime r = Runtime.getRuntime(); + long maxMemory = r.maxMemory(); + + if (maxMemory == Long.MAX_VALUE) { + // amount of free memory unknown + try { + // workaround for Oracle JDK + OperatingSystemMXBean operatingSystemMXBean = ManagementFactory.getOperatingSystemMXBean(); + Class clazz = Class.forName("com.sun.management.OperatingSystemMXBean"); + Method method = clazz.getMethod("getTotalPhysicalMemorySize"); + maxMemory = (Long) method.invoke(operatingSystemMXBean) / 4; + } catch (Throwable e) { + throw new RuntimeException("Could not determine the amount of free memory.\n" + + "Please set the maximum memory for the JVM, e.g. -Xmx512M for 512 megabytes."); + } + } + + return maxMemory - r.totalMemory() + r.freeMemory(); + } + + /** + * Gets the version of the JVM in the form "VM_Name - Vendor - Spec/Version". + * + * @return The JVM version. + */ + public static String getJvmVersion() { + try { + final RuntimeMXBean bean = ManagementFactory.getRuntimeMXBean(); + return bean.getVmName() + " - " + bean.getVmVendor() + " - " + bean.getSpecVersion() + '/' + bean.getVmVersion(); + } + catch (Throwable t) { + return UNKNOWN; + } + } + + /** + * Gets the system parameters and environment parameters that were passed to the JVM on startup. + * + * @return The options passed to the JVM on startup. + */ + public static String getJvmStartupOptions() { + try { + final RuntimeMXBean bean = ManagementFactory.getRuntimeMXBean(); + final StringBuilder bld = new StringBuilder(); + + for (String s : bean.getInputArguments()) { + bld.append(s).append(' '); + } + + return bld.toString(); + } + catch (Throwable t) { + return UNKNOWN; + } + } + + /** + * Gets the system parameters and environment parameters that were passed to the JVM on startup. + * + * @return The options passed to the JVM on startup. + */ + public static String[] getJvmStartupOptionsArray() { + try { + RuntimeMXBean bean = ManagementFactory.getRuntimeMXBean(); + List options = bean.getInputArguments(); + return options.toArray(new String[options.size()]); + } + catch (Throwable t) { + return new String[0]; + } + } + + /** + * Gets the directory for temporary files, as returned by the JVM system property "java.io.tmpdir". + * + * @return The directory for temporary files. + */ + public static String getTemporaryFileDirectory() { + return System.getProperty("java.io.tmpdir"); + } + + /** + * Tries to retrieve the maximum number of open file handles. This method will only work on + * UNIX-based operating systems with Sun/Oracle Java versions. + * + *

If the number of max open file handles cannot be determined, this method returns {@code -1}.

+ * + * @return The limit of open file handles, or {@code -1}, if the limit could not be determined. + */ + public static long getOpenFileHandlesLimit() { + Class sunBeanClass; + try { + sunBeanClass = Class.forName("com.sun.management.UnixOperatingSystemMXBean"); + } + catch (ClassNotFoundException e) { + return -1L; + } + + try { + Method fhLimitMethod = sunBeanClass.getMethod("getMaxFileDescriptorCount"); + Object result = fhLimitMethod.invoke(ManagementFactory.getOperatingSystemMXBean()); + return (Long) result; + } + catch (Throwable t) { + LOG.warn("Unexpected error when accessing file handle limit", t); + return -1L; + } + } + + /** + * Logs a information about the environment, like code revision, current user, java version, + * and JVM parameters. + * + * @param log The logger to log the information to. + * @param componentName The component name to mention in the log. + * @param commandLineArgs The arguments accompanying the starting the component. + */ + public static void logEnvironmentInfo(Logger log, String componentName, String[] commandLineArgs) { + if (log.isInfoEnabled()) { + RevisionInformation rev = getRevisionInformation(); + String version = getVersion(); + + String user = getUserRunning(); + + String jvmVersion = getJvmVersion(); + String[] options = getJvmStartupOptionsArray(); + + String javaHome = System.getenv("JAVA_HOME"); + + long maxHeapMegabytes = getMaxJvmHeapMemory() >>> 20; + + log.info("--------------------------------------------------------------------------------"); + log.info(" Starting " + componentName + " (Version: " + version + ", " + + "Rev:" + rev.commitId + ", " + "Date:" + rev.commitDate + ")"); + log.info(" Current user: " + user); + log.info(" JVM: " + jvmVersion); + log.info(" Maximum heap size: " + maxHeapMegabytes + " MiBytes"); + log.info(" JAVA_HOME: " + (javaHome == null ? "(not set)" : javaHome)); + + + if (options.length == 0) { + log.info(" JVM Options: (none)"); + } + else { + log.info(" JVM Options:"); + for (String s: options) { + log.info(" " + s); + } + } + + if (commandLineArgs == null || commandLineArgs.length == 0) { + log.info(" Program Arguments: (none)"); + } + else { + log.info(" Program Arguments:"); + for (String s: commandLineArgs) { + log.info(" " + s); + } + } + + log.info(" Classpath: " + System.getProperty("java.class.path")); + + log.info("--------------------------------------------------------------------------------"); + } + } + + // -------------------------------------------------------------------------------------------- + + /** Don't instantiate this class */ + private EnvironmentInformation() {} + + // -------------------------------------------------------------------------------------------- + + /** + * Revision information encapsulates information about the source code revision of the Flink + * code. + */ + public static class RevisionInformation { + + /** The git commit id (hash) */ + public final String commitId; + + /** The git commit date */ + public final String commitDate; + + public RevisionInformation(String commitId, String commitDate) { + this.commitId = commitId; + this.commitDate = commitDate; + } + } +} diff --git a/src/main/java/io/mycat/memory/environment/Hardware.java b/src/main/java/io/mycat/memory/environment/Hardware.java new file mode 100644 index 000000000..f4b559647 --- /dev/null +++ b/src/main/java/io/mycat/memory/environment/Hardware.java @@ -0,0 +1,244 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.environment; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.BufferedReader; +import java.io.FileReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Convenience class to extract hardware specifics of the computer executing this class + */ +public class Hardware { + + private static final Logger LOG = LoggerFactory.getLogger(Hardware.class); + + private static final String LINUX_MEMORY_INFO_PATH = "/proc/meminfo"; + + private static final Pattern LINUX_MEMORY_REGEX = Pattern.compile("^MemTotal:\\s*(\\d+)\\s+kB$"); + + + + /** + * Gets the number of CPU cores (hardware contexts) that the JVM has access to. + * + * @return The number of CPU cores. + */ + public static int getNumberCPUCores() { + return Runtime.getRuntime().availableProcessors(); + } + + /** + * Returns the size of the physical memory in bytes. + * + * @return the size of the physical memory in bytes or -1 if + * the size could not be determined + */ + public static long getSizeOfPhysicalMemory() { + switch (OperatingSystem.getCurrentOperatingSystem()) { + case LINUX: + return getSizeOfPhysicalMemoryForLinux(); + + case WINDOWS: + return getSizeOfPhysicalMemoryForWindows(); + + case MAC_OS: + return getSizeOfPhysicalMemoryForMac(); + + case FREE_BSD: + return getSizeOfPhysicalMemoryForFreeBSD(); + + case UNKNOWN: + LOG.error("Cannot determine size of physical memory for unknown operating system"); + return -1; + + default: + LOG.error("Unrecognized OS: " + OperatingSystem.getCurrentOperatingSystem()); + return -1; + } + } + + /** + * Returns the size of the physical memory in bytes on a Linux-based + * operating system. + * + * @return the size of the physical memory in bytes or -1 if + * the size could not be determined + */ + private static long getSizeOfPhysicalMemoryForLinux() { + try (BufferedReader lineReader = new BufferedReader(new FileReader(LINUX_MEMORY_INFO_PATH))) { + String line; + while ((line = lineReader.readLine()) != null) { + Matcher matcher = LINUX_MEMORY_REGEX.matcher(line); + if (matcher.matches()) { + String totalMemory = matcher.group(1); + return Long.parseLong(totalMemory) * 1024L; // Convert from kilobyte to byte + } + } + // expected line did not come + LOG.error("Cannot determine the size of the physical memory for Linux host (using '/proc/meminfo'). Unexpected format."); + return -1; + } + catch (NumberFormatException e) { + LOG.error("Cannot determine the size of the physical memory for Linux host (using '/proc/meminfo'). Unexpected format."); + return -1; + } + catch (Throwable t) { + LOG.error("Cannot determine the size of the physical memory for Linux host (using '/proc/meminfo'): " + t.getMessage(), t); + return -1; + } + } + + /** + * Returns the size of the physical memory in bytes on a Mac OS-based + * operating system + * + * @return the size of the physical memory in bytes or -1 if + * the size could not be determined + */ + private static long getSizeOfPhysicalMemoryForMac() { + + BufferedReader bi = null; + + try { + Process proc = Runtime.getRuntime().exec("sysctl hw.memsize"); + + bi = new BufferedReader( + new InputStreamReader(proc.getInputStream())); + + String line; + + while ((line = bi.readLine()) != null) { + if (line.startsWith("hw.memsize")) { + long memsize = Long.parseLong(line.split(":")[1].trim()); + bi.close(); + proc.destroy(); + return memsize; + } + } + + } catch (Throwable t) { + LOG.error("Cannot determine physical memory of machine for MacOS host: " + t.getMessage(), t); + return -1; + } finally { + if (bi != null) { + try { + bi.close(); + } catch (IOException ignored) {} + } + } + return -1; + } + + /** + * Returns the size of the physical memory in bytes on FreeBSD. + * + * @return the size of the physical memory in bytes or -1 if + * the size could not be determined + */ + private static long getSizeOfPhysicalMemoryForFreeBSD() { + BufferedReader bi = null; + try { + Process proc = Runtime.getRuntime().exec("sysctl hw.physmem"); + + bi = new BufferedReader(new InputStreamReader(proc.getInputStream())); + + String line; + + while ((line = bi.readLine()) != null) { + if (line.startsWith("hw.physmem")) { + long memsize = Long.parseLong(line.split(":")[1].trim()); + bi.close(); + proc.destroy(); + return memsize; + } + } + + LOG.error("Cannot determine the size of the physical memory for FreeBSD host (using 'sysctl hw.physmem')."); + return -1; + } + catch (Throwable t) { + LOG.error("Cannot determine the size of the physical memory for FreeBSD host (using 'sysctl hw.physmem'): " + t.getMessage(), t); + return -1; + } + finally { + if (bi != null) { + try { + bi.close(); + } catch (IOException ignored) {} + } + } + } + + /** + * Returns the size of the physical memory in bytes on Windows. + * + * @return the size of the physical memory in bytes or -1 if + * the size could not be determined + */ + private static long getSizeOfPhysicalMemoryForWindows() { + BufferedReader bi = null; + try { + Process proc = Runtime.getRuntime().exec("wmic memorychip get capacity"); + + bi = new BufferedReader(new InputStreamReader(proc.getInputStream())); + + String line = bi.readLine(); + if (line == null) { + return -1L; + } + + if (!line.startsWith("Capacity")) { + return -1L; + } + + long sizeOfPhyiscalMemory = 0L; + while ((line = bi.readLine()) != null) { + if (line.isEmpty()) { + continue; + } + + line = line.replaceAll(" ", ""); + sizeOfPhyiscalMemory += Long.parseLong(line); + } + return sizeOfPhyiscalMemory; + } + catch (Throwable t) { + LOG.error("Cannot determine the size of the physical memory for Windows host (using 'wmic memorychip'): " + t.getMessage(), t); + return -1L; + } + finally { + if (bi != null) { + try { + bi.close(); + } catch (Throwable ignored) {} + } + } + } + + // -------------------------------------------------------------------------------------------- + + private Hardware() {} +} diff --git a/src/main/java/io/mycat/memory/environment/HardwareDescription.java b/src/main/java/io/mycat/memory/environment/HardwareDescription.java new file mode 100644 index 000000000..81bbd9c32 --- /dev/null +++ b/src/main/java/io/mycat/memory/environment/HardwareDescription.java @@ -0,0 +1,120 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.environment; + +import java.io.Serializable; + +/** + * A hardware description describes the resources available to a task manager. + */ +public final class HardwareDescription implements Serializable { + + private static final long serialVersionUID = 3380016608300325361L; + + /** The number of CPU cores available to the JVM on the compute node. */ + private int numberOfCPUCores; + + /** The size of physical memory in bytes available on the compute node. */ + private long sizeOfPhysicalMemory; + + /** The size of the JVM heap memory */ + private long sizeOfJvmHeap; + + /** The size of the memory managed by the system for caching, hashing, sorting, ... */ + private long sizeOfManagedMemory; + + + /** + * Public default constructor used for serialization process. + */ + public HardwareDescription() {} + + /** + * Constructs a new hardware description object. + * + * @param numberOfCPUCores The number of CPU cores available to the JVM on the compute node. + * @param sizeOfPhysicalMemory The size of physical memory in bytes available on the compute node. + * @param sizeOfJvmHeap The size of the JVM heap memory. + * @param sizeOfManagedMemory The size of the memory managed by the system for caching, hashing, sorting, ... + */ + public HardwareDescription(int numberOfCPUCores, long sizeOfPhysicalMemory, long sizeOfJvmHeap, long sizeOfManagedMemory) { + this.numberOfCPUCores = numberOfCPUCores; + this.sizeOfPhysicalMemory = sizeOfPhysicalMemory; + this.sizeOfJvmHeap = sizeOfJvmHeap; + this.sizeOfManagedMemory = sizeOfManagedMemory; + } + + /** + * Returns the number of CPU cores available to the JVM on the compute node. + * + * @return the number of CPU cores available to the JVM on the compute node + */ + public int getNumberOfCPUCores() { + return this.numberOfCPUCores; + } + + /** + * Returns the size of physical memory in bytes available on the compute node. + * + * @return the size of physical memory in bytes available on the compute node + */ + public long getSizeOfPhysicalMemory() { + return this.sizeOfPhysicalMemory; + } + + /** + * Returns the size of the JVM heap memory + * + * @return The size of the JVM heap memory + */ + public long getSizeOfJvmHeap() { + return this.sizeOfJvmHeap; + } + + /** + * Returns the size of the memory managed by the system for caching, hashing, sorting, ... + * + * @return The size of the memory managed by the system. + */ + public long getSizeOfManagedMemory() { + return this.sizeOfManagedMemory; + } + + // -------------------------------------------------------------------------------------------- + // Utils + // -------------------------------------------------------------------------------------------- + + @Override + public String toString() { + return String.format("cores=%d, physMem=%d, heap=%d, managed=%d", + numberOfCPUCores, sizeOfPhysicalMemory, sizeOfJvmHeap, sizeOfManagedMemory); + } + + // -------------------------------------------------------------------------------------------- + // Factory + // -------------------------------------------------------------------------------------------- + + public static HardwareDescription extractFromSystem(long managedMemory) { + final int numberOfCPUCores = Hardware.getNumberCPUCores(); + final long sizeOfJvmHeap = Runtime.getRuntime().maxMemory(); + final long sizeOfPhysicalMemory = Hardware.getSizeOfPhysicalMemory(); + + return new HardwareDescription(numberOfCPUCores, sizeOfPhysicalMemory, sizeOfJvmHeap, managedMemory); + } +} diff --git a/src/main/java/io/mycat/memory/environment/OperatingSystem.java b/src/main/java/io/mycat/memory/environment/OperatingSystem.java new file mode 100644 index 000000000..1d133d3da --- /dev/null +++ b/src/main/java/io/mycat/memory/environment/OperatingSystem.java @@ -0,0 +1,145 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.environment; + + + +/** + * An enumeration indicating the operating system that the JVM runs on. + */ + +public enum OperatingSystem { + + LINUX, + WINDOWS, + MAC_OS, + FREE_BSD, + UNKNOWN; + + // ------------------------------------------------------------------------ + + /** + * Gets the operating system that the JVM runs on from the java system properties. + * this method returns UNKNOWN, if the operating system was not successfully determined. + * + * @return The enum constant for the operating system, or UNKNOWN, if it was not possible to determine. + */ + public static OperatingSystem getCurrentOperatingSystem() { + return os; + } + + /** + * Checks whether the operating system this JVM runs on is Windows. + * + * @return true if the operating system this JVM runs on is + * Windows, false otherwise + */ + public static boolean isWindows() { + return getCurrentOperatingSystem() == WINDOWS; + } + + /** + * Checks whether the operating system this JVM runs on is Linux. + * + * @return true if the operating system this JVM runs on is + * Linux, false otherwise + */ + public static boolean isLinux() { + return getCurrentOperatingSystem() == LINUX; + } + + /** + * Checks whether the operating system this JVM runs on is Windows. + * + * @return true if the operating system this JVM runs on is + * Windows, false otherwise + */ + public static boolean isMac() { + return getCurrentOperatingSystem() == MAC_OS; + } + + /** + * Checks whether the operating system this JVM runs on is FreeBSD. + * + * @return true if the operating system this JVM runs on is + * FreeBSD, false otherwise + */ + public static boolean isFreeBSD() { + return getCurrentOperatingSystem() == FREE_BSD; + } + + /** + * The enum constant for the operating system. + */ + private static final OperatingSystem os = readOSFromSystemProperties(); + + /** + * Parses the operating system that the JVM runs on from the java system properties. + * If the operating system was not successfully determined, this method returns {@code UNKNOWN}. + * + * @return The enum constant for the operating system, or {@code UNKNOWN}, if it was not possible to determine. + */ + private static OperatingSystem readOSFromSystemProperties() { + String osName = System.getProperty(OS_KEY); + + if (osName.startsWith(LINUX_OS_PREFIX)) { + return LINUX; + } + if (osName.startsWith(WINDOWS_OS_PREFIX)) { + return WINDOWS; + } + if (osName.startsWith(MAC_OS_PREFIX)) { + return MAC_OS; + } + if (osName.startsWith(FREEBSD_OS_PREFIX)) { + return FREE_BSD; + } + + return UNKNOWN; + } + + // -------------------------------------------------------------------------------------------- + // Constants to extract the OS type from the java environment + // -------------------------------------------------------------------------------------------- + + /** + * The key to extract the operating system name from the system properties. + */ + private static final String OS_KEY = "os.name"; + + /** + * The expected prefix for Linux operating systems. + */ + private static final String LINUX_OS_PREFIX = "Linux"; + + /** + * The expected prefix for Windows operating systems. + */ + private static final String WINDOWS_OS_PREFIX = "Windows"; + + /** + * The expected prefix for Mac OS operating systems. + */ + private static final String MAC_OS_PREFIX = "Mac"; + + /** + * The expected prefix for FreeBSD. + */ + private static final String FREEBSD_OS_PREFIX = "FreeBSD"; +} diff --git a/src/main/java/io/mycat/memory/unsafe/KVIterator.java b/src/main/java/io/mycat/memory/unsafe/KVIterator.java new file mode 100644 index 000000000..65198e629 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/KVIterator.java @@ -0,0 +1,15 @@ + +package io.mycat.memory.unsafe; + +import java.io.IOException; + +public abstract class KVIterator { + + public abstract boolean next() throws IOException; + + public abstract K getKey(); + + public abstract V getValue(); + + public abstract void close(); +} diff --git a/src/main/java/io/mycat/memory/unsafe/Platform.java b/src/main/java/io/mycat/memory/unsafe/Platform.java new file mode 100644 index 000000000..6d4b994cc --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/Platform.java @@ -0,0 +1,391 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe; + +import io.mycat.memory.unsafe.utils.BytesTools; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import sun.misc.Cleaner; +import sun.misc.Unsafe; +import sun.nio.ch.DirectBuffer; + +import java.lang.reflect.Constructor; +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public final class Platform { + + private final static Logger logger = LoggerFactory.getLogger(Platform.class); + private static final Pattern MAX_DIRECT_MEMORY_SIZE_ARG_PATTERN = + Pattern.compile("\\s*-XX:MaxDirectMemorySize\\s*=\\s*([0-9]+)\\s*([kKmMgG]?)\\s*$"); + private static final Unsafe _UNSAFE; + + public static final int BYTE_ARRAY_OFFSET; + + public static final int SHORT_ARRAY_OFFSET; + + public static final int INT_ARRAY_OFFSET; + + public static final int LONG_ARRAY_OFFSET; + + public static final int FLOAT_ARRAY_OFFSET; + + public static final int DOUBLE_ARRAY_OFFSET; + + private static final long MAX_DIRECT_MEMORY; + + private static final boolean unaligned; + + public static final boolean littleEndian = ByteOrder.nativeOrder() + .equals(ByteOrder.LITTLE_ENDIAN); + + static { + boolean _unaligned; + // use reflection to access unaligned field + try { + Class bitsClass = + Class.forName("java.nio.Bits", false, ClassLoader.getSystemClassLoader()); + Method unalignedMethod = bitsClass.getDeclaredMethod("unaligned"); + unalignedMethod.setAccessible(true); + _unaligned = Boolean.TRUE.equals(unalignedMethod.invoke(null)); + } catch (Throwable t) { + // We at least know x86 and x64 support unaligned access. + String arch = System.getProperty("os.arch", ""); + //noinspection DynamicRegexReplaceableByCompiledPattern + _unaligned = arch.matches("^(i[3-6]86|x86(_64)?|x64|amd64)$"); + } + unaligned = _unaligned; + MAX_DIRECT_MEMORY = maxDirectMemory(); + + } + + + private static ClassLoader getSystemClassLoader() { + return System.getSecurityManager() == null ? ClassLoader.getSystemClassLoader() : (ClassLoader) AccessController.doPrivileged(new PrivilegedAction() { + public ClassLoader run() { + return ClassLoader.getSystemClassLoader(); + } + }); + } + + /** + * GET MaxDirectMemory Size,from Netty Project! + */ + private static long maxDirectMemory() { + long maxDirectMemory = 0L; + Class t; + try { + t = Class.forName("sun.misc.VM", true, getSystemClassLoader()); + Method runtimeClass = t.getDeclaredMethod("maxDirectMemory", new Class[0]); + maxDirectMemory = ((Number) runtimeClass.invoke((Object) null, new Object[0])).longValue(); + } catch (Throwable var8) { + ; + } + + if (maxDirectMemory > 0L) { + return maxDirectMemory; + } else { + try { + t = Class.forName("java.lang.management.ManagementFactory", true, getSystemClassLoader()); + Class var10 = Class.forName("java.lang.management.RuntimeMXBean", true, getSystemClassLoader()); + Object runtime = t.getDeclaredMethod("getRuntimeMXBean", new Class[0]).invoke((Object) null, new Object[0]); + List vmArgs = (List) var10.getDeclaredMethod("getInputArguments", new Class[0]).invoke(runtime, new Object[0]); + + label41: + for (int i = vmArgs.size() - 1; i >= 0; --i) { + Matcher m = MAX_DIRECT_MEMORY_SIZE_ARG_PATTERN.matcher((CharSequence) vmArgs.get(i)); + if (m.matches()) { + maxDirectMemory = Long.parseLong(m.group(1)); + switch (m.group(2).charAt(0)) { + case 'G': + case 'g': + maxDirectMemory *= 1073741824L; + break label41; + case 'K': + case 'k': + maxDirectMemory *= 1024L; + break label41; + case 'M': + case 'm': + maxDirectMemory *= 1048576L; + default: + break label41; + } + } + } + } catch (Throwable var9) { + logger.error(var9.getMessage()); + } + + if (maxDirectMemory <= 0L) { + maxDirectMemory = Runtime.getRuntime().maxMemory(); + //System.out.println("maxDirectMemory: {} bytes (maybe)" + Long.valueOf(maxDirectMemory)); + } else { + //System.out.println("maxDirectMemory: {} bytes" + Long.valueOf(maxDirectMemory)); + } + return maxDirectMemory; + } + } + + public static long getMaxDirectMemory() { + return MAX_DIRECT_MEMORY; + } + + public static long getMaxHeapMemory() { + return Runtime.getRuntime().maxMemory(); + } + + /** + * @return true when running JVM is having sun's Unsafe package available in it and underlying + * system having unaligned-access capability. + */ + public static boolean unaligned() { + return unaligned; + } + + public static int getInt(Object object, long offset) { + return _UNSAFE.getInt(object, offset); + } + + public static void putInt(Object object, long offset, int value) { + _UNSAFE.putInt(object, offset, value); + } + + public static boolean getBoolean(Object object, long offset) { + return _UNSAFE.getBoolean(object, offset); + } + + public static void putBoolean(Object object, long offset, boolean value) { + _UNSAFE.putBoolean(object, offset, value); + } + + public static byte getByte(Object object, long offset) { + return _UNSAFE.getByte(object, offset); + } + + public static void putByte(Object object, long offset, byte value) { + _UNSAFE.putByte(object, offset, value); + } + + public static short getShort(Object object, long offset) { + return _UNSAFE.getShort(object, offset); + } + + public static void putShort(Object object, long offset, short value) { + _UNSAFE.putShort(object, offset, value); + } + + public static long getLong(Object object, long offset) { + return _UNSAFE.getLong(object, offset); + } + + public static void putLong(Object object, long offset, long value) { + _UNSAFE.putLong(object, offset, value); + } + + public static float getFloat(Object object, long offset) { + return _UNSAFE.getFloat(object, offset); + } + + public static void putFloat(Object object, long offset, float value) { + _UNSAFE.putFloat(object, offset, value); + } + + public static double getDouble(Object object, long offset) { + return _UNSAFE.getDouble(object, offset); + } + + public static void putDouble(Object object, long offset, double value) { + _UNSAFE.putDouble(object, offset, value); + } + + + public static Object getObjectVolatile(Object object, long offset) { + return _UNSAFE.getObjectVolatile(object, offset); + } + + public static void putObjectVolatile(Object object, long offset, Object value) { + _UNSAFE.putObjectVolatile(object, offset, value); + } + + public static long allocateMemory(long size) { + return _UNSAFE.allocateMemory(size); + } + + public static void freeMemory(long address) { + _UNSAFE.freeMemory(address); + } + + public static long reallocateMemory(long address, long oldSize, long newSize) { + long newMemory = _UNSAFE.allocateMemory(newSize); + copyMemory(null, address, null, newMemory, oldSize); + freeMemory(address); + return newMemory; + } + + /** + * Uses internal JDK APIs to allocate a DirectByteBuffer while ignoring the JVM's + * MaxDirectMemorySize limit (the default limit is too low and we do not want to require users + * to increase it). + */ + @SuppressWarnings("unchecked") + public static ByteBuffer allocateDirectBuffer(int size) { + try { + Class cls = Class.forName("java.nio.DirectByteBuffer"); + Constructor constructor = cls.getDeclaredConstructor(Long.TYPE, Integer.TYPE); + constructor.setAccessible(true); + Field cleanerField = cls.getDeclaredField("cleaner"); + cleanerField.setAccessible(true); + final long memory = allocateMemory(size); + ByteBuffer buffer = (ByteBuffer) constructor.newInstance(memory, size); + Cleaner cleaner = Cleaner.create(buffer, new Runnable() { + @Override + public void run() { + freeMemory(memory); + } + }); + cleanerField.set(buffer, cleaner); + return buffer; + } catch (Exception e) { + throwException(e); + } + throw new IllegalStateException("unreachable"); + } + + public static void setMemory(long address, byte value, long size) { + _UNSAFE.setMemory(address, size, value); + } + + public static void copyMemory( + Object src, long srcOffset, Object dst, long dstOffset, long length) { + // Check if dstOffset is before or after srcOffset to determine if we should copy + // forward or backwards. This is necessary in case src and dst overlap. + if (dstOffset < srcOffset) { + while (length > 0) { + long size = Math.min(length, UNSAFE_COPY_THRESHOLD); + _UNSAFE.copyMemory(src, srcOffset, dst, dstOffset, size); + length -= size; + srcOffset += size; + dstOffset += size; + } + } else { + srcOffset += length; + dstOffset += length; + while (length > 0) { + long size = Math.min(length, UNSAFE_COPY_THRESHOLD); + srcOffset -= size; + dstOffset -= size; + _UNSAFE.copyMemory(src, srcOffset, dst, dstOffset, size); + length -= size; + } + + } + } + + /** + * Raises an exception bypassing compiler checks for checked exceptions. + */ + public static void throwException(Throwable t) { + _UNSAFE.throwException(t); + } + + /** + * Limits the number of bytes to copy per {@link Unsafe#copyMemory(long, long, long)} to + * allow safepoint polling during a large copy. + */ + private static final long UNSAFE_COPY_THRESHOLD = 1024L * 1024L; + + static { + Unsafe unsafe; + try { + Field unsafeField = Unsafe.class.getDeclaredField("theUnsafe"); + unsafeField.setAccessible(true); + unsafe = (Unsafe) unsafeField.get(null); + } catch (Throwable cause) { + unsafe = null; + } + _UNSAFE = unsafe; + + if (_UNSAFE != null) { + BYTE_ARRAY_OFFSET = _UNSAFE.arrayBaseOffset(byte[].class); + SHORT_ARRAY_OFFSET = _UNSAFE.arrayBaseOffset(short[].class); + INT_ARRAY_OFFSET = _UNSAFE.arrayBaseOffset(int[].class); + LONG_ARRAY_OFFSET = _UNSAFE.arrayBaseOffset(long[].class); + FLOAT_ARRAY_OFFSET = _UNSAFE.arrayBaseOffset(float[].class); + DOUBLE_ARRAY_OFFSET = _UNSAFE.arrayBaseOffset(double[].class); + } else { + BYTE_ARRAY_OFFSET = 0; + SHORT_ARRAY_OFFSET = 0; + INT_ARRAY_OFFSET = 0; + LONG_ARRAY_OFFSET = 0; + FLOAT_ARRAY_OFFSET = 0; + DOUBLE_ARRAY_OFFSET = 0; + } + } + + public static long objectFieldOffset(Field field) { + return _UNSAFE.objectFieldOffset(field); + } + + public static void putOrderedLong(Object object, long valueOffset, long initialValue) { + _UNSAFE.putOrderedLong(object, valueOffset, initialValue); + } + + public static void putLongVolatile(Object object, long valueOffset, long value) { + _UNSAFE.putLongVolatile(object, valueOffset, value); + } + + public static boolean compareAndSwapLong(Object object, long valueOffset, long expectedValue, long newValue) { + return _UNSAFE.compareAndSwapLong(object, valueOffset, expectedValue, newValue); + } + + public static int arrayBaseOffset(Class aClass) { + return _UNSAFE.arrayBaseOffset(aClass); + } + + public static int arrayIndexScale(Class aClass) { + return _UNSAFE.arrayIndexScale(aClass); + } + + public static void putOrderedInt(Object availableBuffer, long bufferAddress, int flag) { + _UNSAFE.putOrderedInt(availableBuffer, bufferAddress, flag); + } + + public static int getIntVolatile(Object availableBuffer, long bufferAddress) { + return _UNSAFE.getIntVolatile(availableBuffer, bufferAddress); + } + + public static Object getObject(Object entries, long l) { + return _UNSAFE.getObject(entries, l); + } + + public static char getChar(Object baseObj, long l) { + return _UNSAFE.getChar(baseObj, l); + } + + public static void putChar(Object baseObj, long l, char value) { + _UNSAFE.putChar(baseObj, l, value); + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/array/ByteArrayMethods.java b/src/main/java/io/mycat/memory/unsafe/array/ByteArrayMethods.java new file mode 100644 index 000000000..bdef3d2c8 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/array/ByteArrayMethods.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.array; + + +import io.mycat.memory.unsafe.Platform; + +public class ByteArrayMethods { + + private ByteArrayMethods() { + // Private constructor, since this class only contains static methods. + } + + /** Returns the next number greater or equal num that is power of 2. */ + public static long nextPowerOf2(long num) { + final long highBit = Long.highestOneBit(num); + return (highBit == num) ? num : highBit << 1; + } + + public static int roundNumberOfBytesToNearestWord(int numBytes) { + int remainder = numBytes & 0x07; // This is equivalent to `numBytes % 8` + if (remainder == 0) { + return numBytes; + } else { + return numBytes + (8 - remainder); + } + } + + /** + * Optimized byte array equality check for byte arrays. + * @return true if the arrays are equal, false otherwise + */ + public static boolean arrayEquals( + Object leftBase, long leftOffset, Object rightBase, long rightOffset, final long length) { + int i = 0; + while (i <= length - 8) { + if (Platform.getLong(leftBase, leftOffset + i) != + Platform.getLong(rightBase, rightOffset + i)) { + return false; + } + i += 8; + } + while (i < length) { + if (Platform.getByte(leftBase, leftOffset + i) != + Platform.getByte(rightBase, rightOffset + i)) { + return false; + } + i += 1; + } + return true; + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/array/CharArray.java b/src/main/java/io/mycat/memory/unsafe/array/CharArray.java new file mode 100644 index 000000000..1e48f4834 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/array/CharArray.java @@ -0,0 +1,87 @@ +package io.mycat.memory.unsafe.array; + +import io.mycat.memory.unsafe.Platform; +import io.mycat.memory.unsafe.memory.MemoryBlock; +import io.mycat.memory.unsafe.memory.mm.MemoryConsumer; + +/** + * @author Hash Zhang + * @version 1.0.0 + * @date 2016/8/8 + */ +public class CharArray { + private static final long WIDTH = 2; + private final MemoryConsumer memoryConsumer; + + private final MemoryBlock memory; + private final Object baseObj; + private final long baseOffset; + + private final long length; + + public CharArray(MemoryBlock memory,MemoryConsumer memoryConsumer) { + assert memory.size() < (long) Integer.MAX_VALUE * 2 : "Array size > 4 billion elements"; + this.memory = memory; + this.baseObj = memory.getBaseObject(); + this.baseOffset = memory.getBaseOffset(); + this.length = memory.size() / WIDTH; + this.memoryConsumer = memoryConsumer; + } + + + public MemoryBlock memoryBlock() { + return memory; + } + + public Object getBaseObject() { + return baseObj; + } + + public long getBaseOffset() { + return baseOffset; + } + + /** + * Returns the number of elements this array can hold. + */ + public long size() { + return length; + } + + /** + * Fill this all with 0L. + */ + public void zeroOut() { + for (long off = baseOffset; off < baseOffset + length * WIDTH; off += WIDTH) { + Platform.putLong(baseObj, off, 0); + } + } + + /** + * Sets the value at position {@code index}. + */ + public void set(int index, char value) { + assert index >= 0 : "index (" + index + ") should >= 0"; + assert index < length : "index (" + index + ") should < length (" + length + ")"; + Platform.putChar(baseObj, baseOffset + index * WIDTH, value); + } + + /** + * Returns the value at position {@code index}. + */ + public char get(int index) { + assert index >= 0 : "index (" + index + ") should >= 0"; + assert index < length : "index (" + index + ") should < length (" + length + ")"; + return Platform.getChar(baseObj, baseOffset + index * WIDTH); + } + + public String toString() { + StringBuilder stringBuilder = new StringBuilder((int) this.length); + for (int i = 0; i < this.length; i++) { + stringBuilder.append(get(i)); + } + return stringBuilder.toString(); + } + + //todo:实现from string,使字符串数组可变 +} diff --git a/src/main/java/io/mycat/memory/unsafe/array/LongArray.java b/src/main/java/io/mycat/memory/unsafe/array/LongArray.java new file mode 100644 index 000000000..5e7cc8fcd --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/array/LongArray.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.array; + + +import io.mycat.memory.unsafe.Platform; +import io.mycat.memory.unsafe.memory.MemoryBlock; + +/** + * An array of long values. Compared with native JVM arrays, this: + *
    + *
  • supports using both in-heap and off-heap memory
  • + *
  • has no bound checking, and thus can crash the JVM process when assert is turned off
  • + *
+ */ +public final class LongArray { + + // This is a long so that we perform long multiplications when computing offsets. + private static final long WIDTH = 8; + + private final MemoryBlock memory; + private final Object baseObj; + private final long baseOffset; + + private final long length; + + public LongArray(MemoryBlock memory) { + assert memory.size() < (long) Integer.MAX_VALUE * 8: "Array size > 4 billion elements"; + this.memory = memory; + this.baseObj = memory.getBaseObject(); + this.baseOffset = memory.getBaseOffset(); + this.length = memory.size() / WIDTH; + } + + + + public MemoryBlock memoryBlock() { + return memory; + } + + public Object getBaseObject() { + return baseObj; + } + + public long getBaseOffset() { + return baseOffset; + } + + /** + * Returns the number of elements this array can hold. + */ + public long size() { + return length; + } + + /** + * Fill this all with 0L. + */ + public void zeroOut() { + for (long off = baseOffset; off < baseOffset + length * WIDTH; off += WIDTH) { + Platform.putLong(baseObj, off, 0); + } + } + + /** + * Sets the value at position {@code index}. + */ + public void set(int index, long value) { + assert index >= 0 : "index (" + index + ") should >= 0"; + assert index < length : "index (" + index + ") should < length (" + length + ")"; + Platform.putLong(baseObj, baseOffset + index * WIDTH, value); + } + + /** + * Returns the value at position {@code index}. + */ + public long get(int index) { + assert index >= 0 : "index (" + index + ") should >= 0"; + assert index < length : "index (" + index + ") should < length (" + length + ")"; + return Platform.getLong(baseObj, baseOffset + index * WIDTH); + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/bitset/BitSetMethods.java b/src/main/java/io/mycat/memory/unsafe/bitset/BitSetMethods.java new file mode 100644 index 000000000..6bc56384a --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/bitset/BitSetMethods.java @@ -0,0 +1,131 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.bitset; + + +import io.mycat.memory.unsafe.Platform; + +/** + * Methods for working with fixed-size uncompressed bitsets. + * + * We assume that the bitset data is word-aligned (that is, a multiple of 8 bytes in length). + * + * Each bit occupies exactly one bit of storage. + */ +public final class BitSetMethods { + + private static final long WORD_SIZE = 8; + + private BitSetMethods() { + // Make the default constructor private, since this only holds static methods. + } + + /** + * Sets the bit at the specified index to {@code true}. + */ + public static void set(Object baseObject, long baseOffset, int index) { + assert index >= 0 : "index (" + index + ") should >= 0"; + final long mask = 1L << (index & 0x3f); // mod 64 and shift + final long wordOffset = baseOffset + (index >> 6) * WORD_SIZE; + final long word = Platform.getLong(baseObject, wordOffset); + Platform.putLong(baseObject, wordOffset, word | mask); + } + + /** + * Sets the bit at the specified index to {@code false}. + */ + public static void unset(Object baseObject, long baseOffset, int index) { + assert index >= 0 : "index (" + index + ") should >= 0"; + final long mask = 1L << (index & 0x3f); // mod 64 and shift + final long wordOffset = baseOffset + (index >> 6) * WORD_SIZE; + final long word = Platform.getLong(baseObject, wordOffset); + Platform.putLong(baseObject, wordOffset, word & ~mask); + } + + /** + * Returns {@code true} if the bit is set at the specified index. + */ + public static boolean isSet(Object baseObject, long baseOffset, int index) { + assert index >= 0 : "index (" + index + ") should >= 0"; + final long mask = 1L << (index & 0x3f); // mod 64 and shift + final long wordOffset = baseOffset + (index >> 6) * WORD_SIZE; + final long word = Platform.getLong(baseObject, wordOffset); + return (word & mask) != 0; + } + + /** + * Returns {@code true} if any bit is set. + */ + public static boolean anySet(Object baseObject, long baseOffset, long bitSetWidthInWords) { + long addr = baseOffset; + for (int i = 0; i < bitSetWidthInWords; i++, addr += WORD_SIZE) { + if (Platform.getLong(baseObject, addr) != 0) { + return true; + } + } + return false; + } + + /** + * Returns the index of the first bit that is set to true that occurs on or after the + * specified starting index. If no such bit exists then {@code -1} is returned. + *

+ * To iterate over the true bits in a BitSet, use the following loop: + *

+   * 
+   *  for (long i = bs.nextSetBit(0, sizeInWords); i >= 0;
+   *    i = bs.nextSetBit(i + 1, sizeInWords)) {
+   *    // operate on index i here
+   *  }
+   * 
+   * 
+ * + * @param fromIndex the index to start checking from (inclusive) + * @param bitsetSizeInWords the size of the bitset, measured in 8-byte words + * @return the index of the next set bit, or -1 if there is no such bit + */ + public static int nextSetBit( + Object baseObject, + long baseOffset, + int fromIndex, + int bitsetSizeInWords) { + int wi = fromIndex >> 6; + if (wi >= bitsetSizeInWords) { + return -1; + } + + // Try to find the next set bit in the current word + final int subIndex = fromIndex & 0x3f; + long word = Platform.getLong(baseObject, baseOffset + wi * WORD_SIZE) >> subIndex; + if (word != 0) { + return (wi << 6) + subIndex + Long.numberOfTrailingZeros(word); + } + + // Find the next set bit in the rest of the words + wi += 1; + while (wi < bitsetSizeInWords) { + word = Platform.getLong(baseObject, baseOffset + wi * WORD_SIZE); + if (word != 0) { + return (wi << 6) + Long.numberOfTrailingZeros(word); + } + wi += 1; + } + + return -1; + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/hash/Murmur3_x86_32.java b/src/main/java/io/mycat/memory/unsafe/hash/Murmur3_x86_32.java new file mode 100644 index 000000000..7d40b6153 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/hash/Murmur3_x86_32.java @@ -0,0 +1,127 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.hash; + + +import io.mycat.memory.unsafe.Platform; + +/** + * 32-bit Murmur3 hasher. This is based on Guava's Murmur3_32HashFunction. + */ +public final class Murmur3_x86_32 { + private static final int C1 = 0xcc9e2d51; + private static final int C2 = 0x1b873593; + + private final int seed; + + public Murmur3_x86_32(int seed) { + this.seed = seed; + } + + @Override + public String toString() { + return "Murmur3_32(seed=" + seed + ")"; + } + + public int hashInt(int input) { + return hashInt(input, seed); + } + + public static int hashInt(int input, int seed) { + int k1 = mixK1(input); + int h1 = mixH1(seed, k1); + + return fmix(h1, 4); + } + + public int hashUnsafeWords(Object base, long offset, int lengthInBytes) { + return hashUnsafeWords(base, offset, lengthInBytes, seed); + } + + public static int hashUnsafeWords(Object base, long offset, int lengthInBytes, int seed) { + // This is based on Guava's `Murmur32_Hasher.processRemaining(ByteBuffer)` method. + assert (lengthInBytes % 8 == 0): "lengthInBytes must be a multiple of 8 (word-aligned)"; + int h1 = hashBytesByInt(base, offset, lengthInBytes, seed); + return fmix(h1, lengthInBytes); + } + + public static int hashUnsafeBytes(Object base, long offset, int lengthInBytes, int seed) { + assert (lengthInBytes >= 0): "lengthInBytes cannot be negative"; + int lengthAligned = lengthInBytes - lengthInBytes % 4; + int h1 = hashBytesByInt(base, offset, lengthAligned, seed); + for (int i = lengthAligned; i < lengthInBytes; i++) { + int halfWord = Platform.getByte(base, offset + i); + int k1 = mixK1(halfWord); + h1 = mixH1(h1, k1); + } + return fmix(h1, lengthInBytes); + } + + private static int hashBytesByInt(Object base, long offset, int lengthInBytes, int seed) { + assert (lengthInBytes % 4 == 0); + int h1 = seed; + for (int i = 0; i < lengthInBytes; i += 4) { + int halfWord = Platform.getInt(base, offset + i); + int k1 = mixK1(halfWord); + h1 = mixH1(h1, k1); + } + return h1; + } + + public int hashLong(long input) { + return hashLong(input, seed); + } + + public static int hashLong(long input, int seed) { + int low = (int) input; + int high = (int) (input >>> 32); + + int k1 = mixK1(low); + int h1 = mixH1(seed, k1); + + k1 = mixK1(high); + h1 = mixH1(h1, k1); + + return fmix(h1, 8); + } + + private static int mixK1(int k1) { + k1 *= C1; + k1 = Integer.rotateLeft(k1, 15); + k1 *= C2; + return k1; + } + + private static int mixH1(int h1, int k1) { + h1 ^= k1; + h1 = Integer.rotateLeft(h1, 13); + h1 = h1 * 5 + 0xe6546b64; + return h1; + } + + // Finalization mix - force all bits of a hash block to avalanche + private static int fmix(int h1, int length) { + h1 ^= length; + h1 ^= h1 >>> 16; + h1 *= 0x85ebca6b; + h1 ^= h1 >>> 13; + h1 *= 0xc2b2ae35; + h1 ^= h1 >>> 16; + return h1; + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/map/BytesToBytesMap.java b/src/main/java/io/mycat/memory/unsafe/map/BytesToBytesMap.java new file mode 100644 index 000000000..37634f601 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/map/BytesToBytesMap.java @@ -0,0 +1,971 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.map; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.io.Closeables; + +import io.mycat.memory.unsafe.Platform; +import io.mycat.memory.unsafe.array.ByteArrayMethods; +import io.mycat.memory.unsafe.array.LongArray; +import io.mycat.memory.unsafe.hash.Murmur3_x86_32; +import io.mycat.memory.unsafe.memory.MemoryBlock; +import io.mycat.memory.unsafe.memory.mm.DataNodeMemoryManager; +import io.mycat.memory.unsafe.memory.mm.MemoryConsumer; +import io.mycat.memory.unsafe.storage.DataNodeDiskManager; +import io.mycat.memory.unsafe.storage.SerializerManager; +import io.mycat.memory.unsafe.utils.sort.UnsafeSorterSpillReader; +import io.mycat.memory.unsafe.utils.sort.UnsafeSorterSpillWriter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.annotation.Nullable; +import java.io.File; +import java.io.IOException; +import java.util.Iterator; +import java.util.LinkedList; + +/** + * An append-only hash map where keys and values are contiguous regions of bytes. + * + * This is backed by a power-of-2-sized hash table, using quadratic probing with triangular numbers, + * which is guaranteed to exhaust the space. + * + * The map can support up to 2^29 keys. If the key cardinality is higher than this, you should + * probably be using sorting instead of hashing for better cache locality. + * + * The key and values under the hood are stored together, in the following format: + * Bytes 0 to 4: len(k) (key length in bytes) + len(v) (value length in bytes) + 4 + * Bytes 4 to 8: len(k) + * Bytes 8 to 8 + len(k): key data + * Bytes 8 + len(k) to 8 + len(k) + len(v): value data + * Bytes 8 + len(k) + len(v) to 8 + len(k) + len(v) + 8: pointer to next pair + * + * This means that the first four bytes store the entire record (key + value) length. This format + * is compatible with {@link io.mycat.memory.unsafe.utils.sort.UnsafeExternalSorter}, + * so we can pass records from this map directly into the sorter to sort records in place. + */ +public final class BytesToBytesMap extends MemoryConsumer { + + private final Logger logger = LoggerFactory.getLogger(BytesToBytesMap.class); + + private static final HashMapGrowthStrategy growthStrategy = HashMapGrowthStrategy.DOUBLING; + + private final DataNodeMemoryManager dataNodeMemoryManager; + + /** + * A linked list for tracking all allocated data pages so that we can free all of our memory. + */ + private final LinkedList dataPages = new LinkedList(); + + /** + * The data page that will be used to store keys and values for new hashtable entries. When this + * page becomes full, a new page will be allocated and this pointer will change to point to that + * new page. + */ + private MemoryBlock currentPage = null; + + /** + * Offset into `currentPage` that points to the location where new data can be inserted into + * the page. This does not incorporate the page's base offset. + */ + private long pageCursor = 0; + + /** + * The maximum number of keys that BytesToBytesMap supports. The hash table has to be + * power-of-2-sized and its backing Java array can contain at most (1 << 30) elements, + * since that's the largest power-of-2 that's less than Integer.MAX_VALUE. We need two long array + * entries per key, giving us a maximum capacity of (1 << 29). + */ + @VisibleForTesting + public static final int MAX_CAPACITY = (1 << 29); + + // This choice of page table size and page size means that we can address up to 500 gigabytes + // of memory. + + /** + * A single array to store the key and value. + * + * Position {@code 2 * i} in the array is used to track a pointer to the key at index {@code i}, + * while position {@code 2 * i + 1} in the array holds key's full 32-bit hashcode. + */ + @Nullable + private LongArray longArray; + // TODO: we're wasting 32 bits of space here; we can probably store fewer bits of the hashcode + // and exploit word-alignment to use fewer bits to hold the address. This might let us store + // only one long per map entry, increasing the chance that this array will fit in cache at the + // expense of maybe performing more lookups if we have hash collisions. Say that we stored only + // 27 bits of the hashcode and 37 bits of the address. 37 bits is enough to address 1 terabyte + // of RAM given word-alignment. If we use 13 bits of this for our page table, that gives us a + // maximum page size of 2^24 * 8 = ~134 megabytes per page. This change will require us to store + // full base addresses in the page table for off-heap mode so that we can reconstruct the full + // absolute memory addresses. + + /** + * Whether or not the longArray can grow. We will not insert more elements if it's false. + */ + private boolean canGrowArray = true; + + private final double loadFactor; + + /** + * The size of the data pages that hold key and value data. Map entries cannot span multiple + * pages, so this limits the maximum entry size. + */ + private final long pageSizeBytes; + + /** + * Number of keys defined in the map. + */ + private int numKeys; + + /** + * Number of values defined in the map. A key could have multiple values. + */ + private int numValues; + + /** + * The map will be expanded once the number of keys exceeds this threshold. + */ + private int growthThreshold; + + /** + * Mask for truncating hashcodes so that they do not exceed the long array's size. + * This is a strength reduction optimization; we're essentially performing a modulus operation, + * but doing so with a bitmask because this is a power-of-2-sized hash map. + */ + private int mask; + + /** + * Return value of {@link BytesToBytesMap#lookup(Object, long, int)}. + */ + private final Location loc; + + private final boolean enablePerfMetrics; + + private long timeSpentResizingNs = 0; + + private long numProbes = 0; + + private long numKeyLookups = 0; + + private long numHashCollisions = 0; + + private long peakMemoryUsedBytes = 0L; + + private final DataNodeDiskManager blockManager; + private final SerializerManager serializerManager; + private volatile MapIterator destructiveIterator = null; + private LinkedList spillWriters = new LinkedList(); + + public BytesToBytesMap( + DataNodeMemoryManager dataNodeMemoryManager, + DataNodeDiskManager blockManager, + SerializerManager serializerManager, + int initialCapacity, + double loadFactor, + long pageSizeBytes, + boolean enablePerfMetrics) { + super(dataNodeMemoryManager, pageSizeBytes); + this.dataNodeMemoryManager = dataNodeMemoryManager; + this.blockManager = blockManager; + this.serializerManager = serializerManager; + this.loadFactor = loadFactor; + this.loc = new Location(); + this.pageSizeBytes = pageSizeBytes; + this.enablePerfMetrics = enablePerfMetrics; + if (initialCapacity <= 0) { + throw new IllegalArgumentException("Initial capacity must be greater than 0"); + } + if (initialCapacity > MAX_CAPACITY) { + throw new IllegalArgumentException( + "Initial capacity " + initialCapacity + " exceeds maximum capacity of " + MAX_CAPACITY); + } + if (pageSizeBytes > DataNodeMemoryManager.MAXIMUM_PAGE_SIZE_BYTES) { + throw new IllegalArgumentException("Page size " + pageSizeBytes + " cannot exceed " + + DataNodeMemoryManager.MAXIMUM_PAGE_SIZE_BYTES); + } + allocate(initialCapacity); + } + + public BytesToBytesMap( + DataNodeMemoryManager dataNodeMemoryManager, + int initialCapacity, + long pageSizeBytes) { + this(dataNodeMemoryManager, initialCapacity, pageSizeBytes, false); + } + + public BytesToBytesMap( + DataNodeMemoryManager dataNodeMemoryManager, + int initialCapacity, + long pageSizeBytes, + boolean enablePerfMetrics) { + this( + dataNodeMemoryManager, + null, + null, + initialCapacity, + 0.70, + pageSizeBytes, + enablePerfMetrics); + } + + /** + * Returns the number of keys defined in the map. + */ + public int numKeys() { return numKeys; } + + /** + * Returns the number of values defined in the map. A key could have multiple values. + */ + public int numValues() { return numValues; } + + public final class MapIterator implements Iterator { + + private int numRecords; + private final Location loc; + + private MemoryBlock currentPage = null; + private int recordsInPage = 0; + private Object pageBaseObject; + private long offsetInPage; + + // If this iterator destructive or not. When it is true, it frees each page as it moves onto + // next one. + private boolean destructive = false; + private UnsafeSorterSpillReader reader = null; + + private MapIterator(int numRecords, Location loc, boolean destructive) { + this.numRecords = numRecords; + this.loc = loc; + this.destructive = destructive; + if (destructive) { + destructiveIterator = this; + } + } + + private void advanceToNextPage() { + synchronized (this) { + int nextIdx = dataPages.indexOf(currentPage) + 1; + if (destructive && currentPage != null) { + dataPages.remove(currentPage); + freePage(currentPage); + nextIdx --; + } + if (dataPages.size() > nextIdx) { + currentPage = dataPages.get(nextIdx); + pageBaseObject = currentPage.getBaseObject(); + offsetInPage = currentPage.getBaseOffset(); + recordsInPage = Platform.getInt(pageBaseObject, offsetInPage); + offsetInPage += 4; + } else { + currentPage = null; + if (reader != null) { + // remove the spill file from disk + File file = spillWriters.removeFirst().getFile(); + if (file != null && file.exists()) { + if (!file.delete()) { + logger.error("Was unable to delete spill file {}", file.getAbsolutePath()); + } + } + } + try { + Closeables.close(reader, /* swallowIOException = */ false); + if(spillWriters.size()>0) { + reader = spillWriters.getFirst().getReader(serializerManager); + } + recordsInPage = -1; + + } catch (IOException e) { + // Scala iterator does not handle exception + Platform.throwException(e); + } + } + } + } + + @Override + public boolean hasNext() { + if (numRecords == 0) { + if (reader != null) { + // remove the spill file from disk + File file = spillWriters.removeFirst().getFile(); + if (file != null && file.exists()) { + if (!file.delete()) { + logger.error("Was unable to delete spill file {}", file.getAbsolutePath()); + } + } + } + } + return numRecords > 0; + } + + @Override + public Location next() { + if (recordsInPage == 0) { + advanceToNextPage(); + } + numRecords--; + if (currentPage != null) { + int totalLength = Platform.getInt(pageBaseObject, offsetInPage); + loc.with(currentPage, offsetInPage); + // [total size] [key size] [key] [value] [pointer to next] + offsetInPage += 4 + totalLength + 8; + recordsInPage --; + return loc; + } else { + + + assert(reader != null); +// if(reader == null) +// return null; + + if (!reader.hasNext()) { + advanceToNextPage(); + } + try { + reader.loadNext(); + } catch (IOException e) { + try { + reader.close(); + } catch(IOException e2) { + logger.error("Error while closing spill reader", e2); + } + // Scala iterator does not handle exception + Platform.throwException(e); + } + loc.with(reader.getBaseObject(), reader.getBaseOffset(), reader.getRecordLength()); + return loc; + } + } + + public long spill(long numBytes) throws IOException { + synchronized (this) { + if (!destructive || dataPages.size() == 1) { + return 0L; + } + + + + long released = 0L; + while (dataPages.size() > 0) { + MemoryBlock block = dataPages.getLast(); + // The currentPage is used, cannot be released + if (block == currentPage) { + break; + } + + Object base = block.getBaseObject(); + long offset = block.getBaseOffset(); + int numRecords = Platform.getInt(base, offset); + offset += 4; + final UnsafeSorterSpillWriter writer = + new UnsafeSorterSpillWriter(blockManager, 32 * 1024, numRecords); + while (numRecords > 0) { + int length = Platform.getInt(base, offset); + writer.write(base, offset + 4, length, 0); + offset += 4 + length + 8; + numRecords--; + } + writer.close(); + spillWriters.add(writer); + + dataPages.removeLast(); + released += block.size(); + freePage(block); + + if (released >= numBytes) { + break; + } + } + + return released; + } + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + } + + /** + * Returns an iterator for iterating over the entries of this map. + * + * For efficiency, all calls to `next()` will return the same {@link Location} object. + * + * If any other lookups or operations are performed on this map while iterating over it, including + * `lookup()`, the behavior of the returned iterator is undefined. + */ + public MapIterator iterator() { + return new MapIterator(numValues, loc, false); + } + + /** + * Returns a destructive iterator for iterating over the entries of this map. It frees each page + * as it moves onto next one. Notice: it is illegal to call any method on the map after + * `destructiveIterator()` has been called. + * + * For efficiency, all calls to `next()` will return the same {@link Location} object. + * + * If any other lookups or operations are performed on this map while iterating over it, including + * `lookup()`, the behavior of the returned iterator is undefined. + */ + public MapIterator destructiveIterator() { + return new MapIterator(numValues, loc, true); + } + + /** + * Looks up a key, and return a {@link Location} handle that can be used to map existence + * and read/write values. + * + * This function always return the same {@link Location} instance to avoid object allocation. + */ + public Location lookup(Object keyBase, long keyOffset, int keyLength) { + safeLookup(keyBase, keyOffset, keyLength, loc, + Murmur3_x86_32.hashUnsafeWords(keyBase, keyOffset, keyLength, 42)); + return loc; + } + + /** + * Looks up a key, and return a {@link Location} handle that can be used to map existence + * and read/write values. + * + * This function always return the same {@link Location} instance to avoid object allocation. + */ + public Location lookup(Object keyBase, long keyOffset, int keyLength, int hash) { + safeLookup(keyBase, keyOffset, keyLength, loc, hash); + return loc; + } + + /** + * Looks up a key, and saves the result in provided `loc`. + * + * This is a thread-safe version of `lookup`, could be used by multiple threads. + */ + public void safeLookup(Object keyBase, long keyOffset, int keyLength, Location loc, int hash) { + assert(longArray != null); + + if (enablePerfMetrics) { + numKeyLookups++; + } + + int pos = hash & mask; + int step = 1; + + while (true) { + if (enablePerfMetrics) { + numProbes++; + } + if (longArray.get(pos * 2) == 0) { + // This is a new key. + loc.with(pos, hash, false); + + return; + + } else { + + long stored = longArray.get(pos * 2 + 1); + + /** + * hash相等 + */ + if ((int) (stored) == hash) { + // Full hash code matches.Let's compare the keys for equality. + loc.with(pos,hash,true); + /** + * 比较key的值 + */ + if (loc.getKeyLength() == keyLength) { + final boolean areEqual = ByteArrayMethods.arrayEquals( + keyBase, + keyOffset, + loc.getKeyBase(), + loc.getKeyOffset(), + keyLength + ); + + if (areEqual) { + + return; + } else { + if (enablePerfMetrics) { + numHashCollisions++; + } + } + } + } + } + pos = (pos + step) & mask; + step++; + } + } + + /** + * Handle returned by {@link BytesToBytesMap#lookup(Object, long, int)} function. + */ + public final class Location { + /** An index into the hash map's Long array */ + private int pos; + /** True if this location points to a position where a key is defined, false otherwise */ + private boolean isDefined; + /** + * The hashcode of the most recent key passed to + * {@link BytesToBytesMap#lookup(Object, long, int, int)}. Caching this hashcode here allows us + * to avoid re-hashing the key when storing a value for that key. + */ + private int keyHashcode; + private Object baseObject; // the base object for key and value + private long keyOffset; + private int keyLength; + private long valueOffset; + private int valueLength; + + /** + * Memory page containing the record. Only set if created by {@link BytesToBytesMap#iterator()}. + */ + @Nullable + private MemoryBlock memoryPage; + + private void updateAddressesAndSizes(long fullKeyAddress) { + updateAddressesAndSizes( + dataNodeMemoryManager.getPage(fullKeyAddress), + dataNodeMemoryManager.getOffsetInPage(fullKeyAddress)); + } + + private void updateAddressesAndSizes(final Object base, long offset) { + baseObject = base; + final int totalLength = Platform.getInt(base, offset); + offset += 4; + keyLength = Platform.getInt(base, offset); + offset += 4; + keyOffset = offset; + valueOffset = offset + keyLength; + valueLength = totalLength - keyLength - 4; + } + + private Location with(int pos, int keyHashcode, boolean isDefined) { + assert(longArray != null); + this.pos = pos; + this.isDefined = isDefined; + this.keyHashcode = keyHashcode; + if (isDefined) { + final long fullKeyAddress = longArray.get(pos * 2); + updateAddressesAndSizes(fullKeyAddress); + } + return this; + } + + private Location with(MemoryBlock page, long offsetInPage) { + this.isDefined = true; + this.memoryPage = page; + updateAddressesAndSizes(page.getBaseObject(), offsetInPage); + return this; + } + + /** + * This is only used for spilling + */ + private Location with(Object base, long offset, int length) { + this.isDefined = true; + this.memoryPage = null; + baseObject = base; + keyOffset = offset + 4; + keyLength = Platform.getInt(base, offset); + valueOffset = offset + 4 + keyLength; + valueLength = length - 4 - keyLength; + return this; + } + + /** + * Find the next pair that has the same key as current one. + */ + public boolean nextValue() { + assert isDefined; + long nextAddr = Platform.getLong(baseObject, valueOffset + valueLength); + if (nextAddr == 0) { + return false; + } else { + updateAddressesAndSizes(nextAddr); + return true; + } + } + + /** + * Returns the memory page that contains the current record. + * This is only valid if this is returned by {@link BytesToBytesMap#iterator()}. + */ + public MemoryBlock getMemoryPage() { + return this.memoryPage; + } + + /** + * Returns true if the key is defined at this position, and false otherwise. + */ + public boolean isDefined() { + return isDefined; + } + + /** + * Returns the base object for key. + */ + public Object getKeyBase() { + assert (isDefined); + return baseObject; + } + + /** + * Returns the offset for key. + */ + public long getKeyOffset() { + assert (isDefined); + return keyOffset; + } + + /** + * Returns the base object for value. + */ + public Object getValueBase() { + assert (isDefined); + return baseObject; + } + + /** + * Returns the offset for value. + */ + public long getValueOffset() { + assert (isDefined); + return valueOffset; + } + + /** + * Returns the length of the key defined at this position. + * Unspecified behavior if the key is not defined. + */ + public int getKeyLength() { + assert (isDefined); + return keyLength; + } + + /** + * Returns the length of the value defined at this position. + * Unspecified behavior if the key is not defined. + */ + public int getValueLength() { + assert (isDefined); + return valueLength; + } + + /** + * Append a new value for the key. This method could be called multiple times for a given key. + * The return value indicates whether the put succeeded or whether it failed because additional + * memory could not be acquired. + *

+ * It is only valid to call this method immediately after calling `lookup()` using the same key. + *

+ *

+ * The key and value must be word-aligned (that is, their sizes must multiples of 8). + *

+ *

+ * After calling this method, calls to `get[Key|Value]Address()` and `get[Key|Value]Length` + * will return information on the data stored by this `append` call. + *

+ *

+ * As an example usage, here's the proper way to store a new key: + *

+ *
+     *   Location loc = map.lookup(keyBase, keyOffset, keyLength);
+     *   if (!loc.isDefined()) {
+     *     if (!loc.append(keyBase, keyOffset, keyLength, ...)) {
+     *       // handle failure to grow map (by spilling, for example)
+     *     }
+     *   }
+     * 
+ *

+ * Unspecified behavior if the key is not defined. + *

+ * + * @return true if the put() was successful and false if the put() failed because memory could + * not be acquired. + */ + public boolean append(Object kbase, long koff, int klen, Object vbase, long voff, int vlen) { + assert (klen % 8 == 0); + assert (vlen % 8 == 0); + assert (longArray != null); + + if (numKeys == MAX_CAPACITY + // The map could be reused from last spill (because of no enough memory to grow), + // then we don't try to grow again if hit the `growthThreshold`. + || !canGrowArray && numKeys > growthThreshold) { + return false; + } + + // Here, we'll copy the data into our data pages. Because we only store a relative offset from + // the key address instead of storing the absolute address of the value, the key and value + // must be stored in the same memory page. + // (8 byte key length) (key) (value) (8 byte pointer to next value) + final long recordLength = 8 + klen + vlen + 8; + if (currentPage == null || currentPage.size() - pageCursor < recordLength) { + if (!acquireNewPage(recordLength + 4L)) { + return false; + } + } + + // --- Append the key and value data to the current data page -------------------------------- + final Object base = currentPage.getBaseObject(); + long offset = currentPage.getBaseOffset() + pageCursor; + final long recordOffset = offset; + Platform.putInt(base, offset, klen + vlen + 4); + Platform.putInt(base, offset + 4, klen); + offset += 8; + Platform.copyMemory(kbase, koff, base, offset, klen); + offset += klen; + Platform.copyMemory(vbase, voff, base, offset, vlen); + offset += vlen; + // put this value at the beginning of the list + Platform.putLong(base, offset, isDefined ? longArray.get(pos * 2) : 0); + + // --- Update bookkeeping data structures ---------------------------------------------------- + offset = currentPage.getBaseOffset(); + Platform.putInt(base, offset, Platform.getInt(base, offset) + 1); + pageCursor += recordLength; + final long storedKeyAddress = dataNodeMemoryManager.encodePageNumberAndOffset( + currentPage, recordOffset); + longArray.set(pos * 2, storedKeyAddress); + updateAddressesAndSizes(storedKeyAddress); + numValues++; + if (!isDefined) { + numKeys++; + longArray.set(pos * 2 + 1, keyHashcode); + isDefined = true; + + if (numKeys > growthThreshold && longArray.size() < MAX_CAPACITY) { + try { + growAndRehash(); + } catch (OutOfMemoryError oom) { + canGrowArray = false; + } + } + } + return true; + } + } + + /** + * Acquire a new page from the memory manager. + * @return whether there is enough space to allocate the new page. + */ + private boolean acquireNewPage(long required) { + try { + currentPage = allocatePage(required); + } catch (OutOfMemoryError e) { + return false; + } + dataPages.add(currentPage); + Platform.putInt(currentPage.getBaseObject(), currentPage.getBaseOffset(), 0); + pageCursor = 4; + return true; + } + + @Override + public long spill(long size, MemoryConsumer trigger) throws IOException { + if (trigger != this && destructiveIterator != null) { + return destructiveIterator.spill(size); + } + return 0L; + } + + /** + * Allocate new data structures for this map. When calling this outside of the constructor, + * make sure to keep references to the old data structures so that you can free them. + * + * @param capacity the new map capacity + */ + private void allocate(int capacity) { + assert (capacity >= 0); + capacity = Math.max((int) Math.min(MAX_CAPACITY, ByteArrayMethods.nextPowerOf2(capacity)), 64); + assert (capacity <= MAX_CAPACITY); + longArray = allocateLongArray(capacity * 2); + longArray.zeroOut(); + + this.growthThreshold = (int) (capacity * loadFactor); + this.mask = capacity - 1; + } + + /** + * Free all allocated memory associated with this map, including the storage for keys and values + * as well as the hash map array itself. + * + * This method is idempotent and can be called multiple times. + */ + public void free() { + updatePeakMemoryUsed(); + if (longArray != null) { + freeLongArray(longArray); + longArray = null; + } + Iterator dataPagesIterator = dataPages.iterator(); + while (dataPagesIterator.hasNext()) { + MemoryBlock dataPage = dataPagesIterator.next(); + dataPagesIterator.remove(); + freePage(dataPage); + } + assert(dataPages.isEmpty()); + + while (!spillWriters.isEmpty()) { + File file = spillWriters.removeFirst().getFile(); + if (file != null && file.exists()) { + if (!file.delete()) { + logger.error("Was unable to delete spill file {}", file.getAbsolutePath()); + } + } + } + } + + public DataNodeMemoryManager getDataNodeMemoryManager() { + return dataNodeMemoryManager; + } + + public long getPageSizeBytes() { + return pageSizeBytes; + } + + /** + * Returns the total amount of memory, in bytes, consumed by this map's managed structures. + */ + public long getTotalMemoryConsumption() { + long totalDataPagesSize = 0L; + for (MemoryBlock dataPage : dataPages) { + totalDataPagesSize += dataPage.size(); + } + return totalDataPagesSize + ((longArray != null) ? longArray.memoryBlock().size() : 0L); + } + + private void updatePeakMemoryUsed() { + long mem = getTotalMemoryConsumption(); + if (mem > peakMemoryUsedBytes) { + peakMemoryUsedBytes = mem; + } + } + + /** + * Return the peak memory used so far, in bytes. + */ + public long getPeakMemoryUsedBytes() { + updatePeakMemoryUsed(); + return peakMemoryUsedBytes; + } + + /** + * Returns the total amount of time spent resizing this map (in nanoseconds). + */ + public long getTimeSpentResizingNs() { + if (!enablePerfMetrics) { + throw new IllegalStateException(); + } + return timeSpentResizingNs; + } + + /** + * Returns the average number of probes per key lookup. + */ + public double getAverageProbesPerLookup() { + if (!enablePerfMetrics) { + throw new IllegalStateException(); + } + return (1.0 * numProbes) / numKeyLookups; + } + + public long getNumHashCollisions() { + if (!enablePerfMetrics) { + throw new IllegalStateException(); + } + return numHashCollisions; + } + + @VisibleForTesting + public int getNumDataPages() { + return dataPages.size(); + } + + /** + * Returns the underline long[] of longArray. + */ + public LongArray getArray() { + assert(longArray != null); + return longArray; + } + + /** + * Reset this map to initialized state. + */ + public void reset() { + numKeys = 0; + numValues = 0; + longArray.zeroOut(); + + while (dataPages.size() > 0) { + MemoryBlock dataPage = dataPages.removeLast(); + freePage(dataPage); + } + currentPage = null; + pageCursor = 0; + } + + /** + * Grows the size of the hash table and re-hash everything. + */ + @VisibleForTesting + void growAndRehash() { + assert(longArray != null); + + long resizeStartTime = -1; + if (enablePerfMetrics) { + resizeStartTime = System.nanoTime(); + } + // Store references to the old data structures to be used when we re-hash + final LongArray oldLongArray = longArray; + final int oldCapacity = (int) oldLongArray.size() / 2; + + // Allocate the new data structures + allocate(Math.min(growthStrategy.nextCapacity(oldCapacity), MAX_CAPACITY)); + + // Re-mask (we don't recompute the hashcode because we stored all 32 bits of it) + for (int i = 0; i < oldLongArray.size(); i += 2) { + final long keyPointer = oldLongArray.get(i); + if (keyPointer == 0) { + continue; + } + final int hashcode = (int) oldLongArray.get(i + 1); + int newPos = hashcode & mask; + int step = 1; + while (longArray.get(newPos * 2) != 0) { + newPos = (newPos + step) & mask; + step++; + } + longArray.set(newPos * 2, keyPointer); + longArray.set(newPos * 2 + 1, hashcode); + } + + freeLongArray(oldLongArray); + + if (enablePerfMetrics) { + timeSpentResizingNs += System.nanoTime() - resizeStartTime; + } + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/map/HashMapGrowthStrategy.java b/src/main/java/io/mycat/memory/unsafe/map/HashMapGrowthStrategy.java new file mode 100644 index 000000000..38cb4eb6a --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/map/HashMapGrowthStrategy.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.map; + +/** + * Interface that defines how we can grow the size of a hash map when it is over a threshold. + */ +public interface HashMapGrowthStrategy { + + int nextCapacity(int currentCapacity); + + /** + * Double the size of the hash map every time. + */ + HashMapGrowthStrategy DOUBLING = new Doubling(); + + class Doubling implements HashMapGrowthStrategy { + @Override + public int nextCapacity(int currentCapacity) { + assert (currentCapacity > 0); + // Guard against overflow + return (currentCapacity * 2 > 0) ? (currentCapacity * 2) : Integer.MAX_VALUE; + } + } + +} diff --git a/src/main/java/io/mycat/memory/unsafe/map/UnsafeFixedWidthAggregationMap.java b/src/main/java/io/mycat/memory/unsafe/map/UnsafeFixedWidthAggregationMap.java new file mode 100644 index 000000000..b85524c11 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/map/UnsafeFixedWidthAggregationMap.java @@ -0,0 +1,288 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.map; + +import io.mycat.MycatServer; +import io.mycat.memory.unsafe.KVIterator; +import io.mycat.memory.unsafe.Platform; +import io.mycat.memory.unsafe.hash.Murmur3_x86_32; +import io.mycat.memory.unsafe.memory.mm.DataNodeMemoryManager; +import io.mycat.memory.unsafe.row.StructType; +import io.mycat.memory.unsafe.row.UnsafeRow; +import io.mycat.memory.unsafe.utils.sort.UnsafeKVExternalSorter; +import org.apache.log4j.Logger; + + +import java.io.IOException; + +/** + * Modify by zagnix ,add put find func + * Unsafe-based HashMap for performing aggregations where the aggregated values are fixed-width. + * This map supports a maximum of 2 billion keys. + */ +public final class UnsafeFixedWidthAggregationMap { + private static Logger LOGGER = Logger.getLogger(UnsafeFixedWidthAggregationMap.class); + + /** + * An empty aggregation buffer, encoded in UnsafeRow format. When inserting a new key into the + * map, we copy this buffer and use it as the value. + */ + private final byte[] emptyAggregationBuffer; + private final StructType aggregationBufferSchema; + private final StructType groupingKeySchema; + + /** + * A hashmap which maps from opaque bytearray keys to bytearray values. + */ + private final BytesToBytesMap map; + + /** + * Re-used pointer to the current aggregation buffer + */ + private final UnsafeRow currentAggregationBuffer; + + private final boolean enablePerfMetrics; + + private final static int SEED = 42; + + /** + * @return true if UnsafeFixedWidthAggregationMap supports aggregation buffers with the given + * schema, false otherwise. + */ + public static boolean supportsAggregationBufferSchema(StructType schema) { + return true; + } + + /** + * Create a new UnsafeFixedWidthAggregationMap. + * + * @param emptyAggregationBuffer the default value for new keys (a "zero" of the agg. function) + * @param aggregationBufferSchema the schema of the aggregation buffer, used for row conversion. + * @param groupingKeySchema the schema of the grouping key, used for row conversion. + * @param dataNodeMemoryManager the memory manager used to allocate our Unsafe memory structures. + * @param initialCapacity the initial capacity of the map (a sizing hint to avoid re-hashing). + * @param pageSizeBytes the data page size, in bytes; limits the maximum record size. + * @param enablePerfMetrics if true, performance metrics will be recorded (has minor perf impact) + */ + public UnsafeFixedWidthAggregationMap( + UnsafeRow emptyAggregationBuffer, + StructType aggregationBufferSchema, + StructType groupingKeySchema, + DataNodeMemoryManager dataNodeMemoryManager, + int initialCapacity, + long pageSizeBytes, + boolean enablePerfMetrics) { + this.aggregationBufferSchema = aggregationBufferSchema; + + this.currentAggregationBuffer = new UnsafeRow(aggregationBufferSchema.length()); + this.groupingKeySchema = groupingKeySchema; + this.map = new BytesToBytesMap(dataNodeMemoryManager,initialCapacity, pageSizeBytes, enablePerfMetrics); + this.enablePerfMetrics = enablePerfMetrics; + this.emptyAggregationBuffer = emptyAggregationBuffer.getBytes() ; + } + + /** + * Return the aggregation buffer for the current group. For efficiency, all calls to this method + * return the same object. If additional memory could not be allocated, then this method will + * signal an error by returning null. + */ + public UnsafeRow getAggregationBuffer(UnsafeRow groupingKey) { + return getAggregationBufferFromUnsafeRow(groupingKey); + } + + public UnsafeRow getAggregationBufferFromUnsafeRow(UnsafeRow key) { + + return getAggregationBufferFromUnsafeRow(key, + Murmur3_x86_32.hashUnsafeWords(key.getBaseObject(),key.getBaseOffset(), + key.getSizeInBytes(),SEED)); + } + + public boolean put(UnsafeRow key, UnsafeRow value){ + + int hash = Murmur3_x86_32.hashUnsafeWords(key.getBaseObject(), + key.getBaseOffset(), key.getSizeInBytes(),SEED); + + // Probe our map using the serialized key + final BytesToBytesMap.Location loc = map.lookup( + key.getBaseObject(), + key.getBaseOffset(), + key.getSizeInBytes(), + hash); + + if (!loc.isDefined()) { + // This is the first time that we've seen this grouping key, so we'll insert a copy of the + // empty aggregation buffer into the map: + boolean putSucceeded = loc.append( + key.getBaseObject(), + key.getBaseOffset(), + key.getSizeInBytes(), + value.getBaseObject(), + value.getBaseOffset(), + value.getSizeInBytes()); + + if (!putSucceeded) { + return false; + } + } + + return true; + } + + + public boolean find(UnsafeRow key){ + + int hash = Murmur3_x86_32.hashUnsafeWords(key.getBaseObject(),key.getBaseOffset(), key.getSizeInBytes(),42); + // Probe our map using the serialized key + final BytesToBytesMap.Location loc = map.lookup(key.getBaseObject(), + key.getBaseOffset(), key.getSizeInBytes(), hash); + + if (!loc.isDefined()) { + return false; + } + return true; + } + + + public UnsafeRow getAggregationBufferFromUnsafeRow(UnsafeRow key, int hash) { + // Probe our map using the serialized key + final BytesToBytesMap.Location loc = map.lookup( + key.getBaseObject(), + key.getBaseOffset(), + key.getSizeInBytes(), + hash); + + if (!loc.isDefined()) { + // This is the first time that we've seen this grouping key, so we'll insert a copy of the + // empty aggregation buffer into the map: + boolean putSucceeded = loc.append( + key.getBaseObject(), + key.getBaseOffset(), + key.getSizeInBytes(), + emptyAggregationBuffer, + Platform.BYTE_ARRAY_OFFSET, + emptyAggregationBuffer.length + ); + + if (!putSucceeded) { + return null; + } + } + + // Reset the pointer to point to the value that we just stored or looked up: + currentAggregationBuffer.pointTo( + loc.getValueBase(), + loc.getValueOffset(), + loc.getValueLength() + ); + return currentAggregationBuffer; + } + + /** + * Returns an iterator over the keys and values in this map. This uses destructive iterator of + * BytesToBytesMap. So it is illegal to call any other method on this map after `iterator()` has + * been called. + * + * For efficiency, each call returns the same object. + */ + public KVIterator iterator() { + return new KVIterator() { + + private final BytesToBytesMap.MapIterator mapLocationIterator = map.iterator(); + + private final UnsafeRow key = new UnsafeRow(groupingKeySchema.length()); + private final UnsafeRow value = new UnsafeRow(aggregationBufferSchema.length()); + + @Override + public boolean next() { + if (mapLocationIterator.hasNext()) { + final BytesToBytesMap.Location loc = mapLocationIterator.next(); + if (loc == null) + return false; + key.pointTo( + loc.getKeyBase(), + loc.getKeyOffset(), + loc.getKeyLength() + ); + value.pointTo( + loc.getValueBase(), + loc.getValueOffset(), + loc.getValueLength() + ); + return true; + } else { + return false; + } + } + + @Override + public UnsafeRow getKey() { + return key; + } + + @Override + public UnsafeRow getValue() { + return value; + } + + @Override + public void close() { + } + }; + } + + /** + * Return the peak memory used so far, in bytes. + */ + public long getPeakMemoryUsedBytes() { + return map.getPeakMemoryUsedBytes(); + } + + /** + * Free the memory associated with this map. This is idempotent and can be called multiple times. + */ + public void free() { + map.free(); + } + + @SuppressWarnings("UseOfSystemOutOrSystemErr") + public void printPerfMetrics() { + if (!enablePerfMetrics) { + throw new IllegalStateException("Perf metrics not enabled"); + } + System.out.println("Average probes per lookup: " + map.getAverageProbesPerLookup()); + System.out.println("Number of hash collisions: " + map.getNumHashCollisions()); + System.out.println("Time spent resizing (ns): " + map.getTimeSpentResizingNs()); + System.out.println("Total memory consumption (bytes): " + map.getTotalMemoryConsumption()); + } + + /** + * Sorts the map's records in place, spill them to disk, and returns an [[UnsafeKVExternalSorter]] + * + * Note that the map will be reset for inserting new records, and the returned sorter can NOT be + * used to insert records. + */ + public UnsafeKVExternalSorter destructAndCreateExternalSorter() throws IOException { + return new UnsafeKVExternalSorter( + groupingKeySchema, + aggregationBufferSchema, + MycatServer.getInstance().getMyCatMemory().getBlockManager(), + MycatServer.getInstance().getMyCatMemory().getSerializerManager(), + map.getPageSizeBytes(), + map); + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/memory/HeapMemoryAllocator.java b/src/main/java/io/mycat/memory/unsafe/memory/HeapMemoryAllocator.java new file mode 100644 index 000000000..51fcc4e7b --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/memory/HeapMemoryAllocator.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.memory; + + +import io.mycat.memory.unsafe.Platform; + +import javax.annotation.concurrent.GuardedBy; +import java.lang.ref.WeakReference; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.Map; + + +/** + * A simple {@link MemoryAllocator} that can allocate up to 16GB using a JVM long primitive array. + */ +public class HeapMemoryAllocator implements MemoryAllocator { + + @GuardedBy("this") + private final Map>> bufferPoolsBySize = + new HashMap>>(); + + private static final int POOLING_THRESHOLD_BYTES = 1024 * 1024; + + /** + * Returns true if allocations of the given size should go through the pooling mechanism and + * false otherwise. + */ + private boolean shouldPool(long size) { + // Very small allocations are less likely to benefit from pooling. + return size >= POOLING_THRESHOLD_BYTES; + } + + @Override + public MemoryBlock allocate(long size) throws OutOfMemoryError { + if (shouldPool(size)) { + synchronized (this) { + final LinkedList> pool = bufferPoolsBySize.get(size); + if (pool != null) { + while (!pool.isEmpty()) { + final WeakReference blockReference = pool.pop(); + final MemoryBlock memory = blockReference.get(); + if (memory != null) { + assert (memory.size() == size); + return memory; + } + } + bufferPoolsBySize.remove(size); + } + } + } + long[] array = new long[(int) ((size + 7) / 8)]; + return new MemoryBlock(array, Platform.LONG_ARRAY_OFFSET, size); + } + + @Override + public void free(MemoryBlock memory) { + final long size = memory.size(); + if (shouldPool(size)) { + synchronized (this) { + LinkedList> pool = bufferPoolsBySize.get(size); + if (pool == null) { + pool = new LinkedList>(); + bufferPoolsBySize.put(size, pool); + } + pool.add(new WeakReference(memory)); + } + } else { + // Do nothing + } + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/memory/MemoryAllocator.java b/src/main/java/io/mycat/memory/unsafe/memory/MemoryAllocator.java new file mode 100644 index 000000000..d0de6b920 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/memory/MemoryAllocator.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.memory; + +public interface MemoryAllocator { + + /** + * Allocates a contiguous block of memory. Note that the allocated memory is not guaranteed + * to be zeroed out (call `zero()` on the result if this is necessary). + */ + MemoryBlock allocate(long size) throws OutOfMemoryError; + + void free(MemoryBlock memory); + + MemoryAllocator UNSAFE = new UnsafeMemoryAllocator(); + + MemoryAllocator HEAP = new HeapMemoryAllocator(); +} diff --git a/src/main/java/io/mycat/memory/unsafe/memory/MemoryBlock.java b/src/main/java/io/mycat/memory/unsafe/memory/MemoryBlock.java new file mode 100644 index 000000000..51a7e2948 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/memory/MemoryBlock.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.memory; + + + +import io.mycat.memory.unsafe.Platform; + +import javax.annotation.Nullable; + +/** + * A consecutive block of memory, starting at a {@link MemoryLocation} with a fixed size. + */ +public class MemoryBlock extends MemoryLocation { + + private final long length; + + /** + * Optional page number; used when this MemoryBlock represents a page allocated by a + * DataNodeMemoryManager. This field is public so that it can be modified by the DataNodeMemoryManager, + * which lives in a different package. + */ + public int pageNumber = -1; + + public MemoryBlock(@Nullable Object obj, long offset, long length) { + super(obj, offset); + this.length = length; + } + + /** + * Returns the size of the memory block. + */ + public long size() { + return length; + } + + /** + * Creates a memory block pointing to the memory used by the long array. + */ + public static MemoryBlock fromLongArray(final long[] array) { + return new MemoryBlock(array, Platform.LONG_ARRAY_OFFSET, array.length * 8); + } + + +} diff --git a/src/main/java/io/mycat/memory/unsafe/memory/MemoryLocation.java b/src/main/java/io/mycat/memory/unsafe/memory/MemoryLocation.java new file mode 100644 index 000000000..3ba9edcf3 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/memory/MemoryLocation.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.memory; + +import javax.annotation.Nullable; + +/** + * A memory location. Tracked either by a memory address (with off-heap allocation), + * or by an offset from a JVM object (in-heap allocation). + */ +public class MemoryLocation { + + @Nullable + Object obj; + + long offset; + + public MemoryLocation(@Nullable Object obj, long offset) { + this.obj = obj; + this.offset = offset; + } + + public MemoryLocation() { + this(null, 0); + } + + public void setObjAndOffset(Object newObj, long newOffset) { + this.obj = newObj; + this.offset = newOffset; + } + + public final Object getBaseObject() { + return obj; + } + + public final long getBaseOffset() { + return offset; + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/memory/UnsafeMemoryAllocator.java b/src/main/java/io/mycat/memory/unsafe/memory/UnsafeMemoryAllocator.java new file mode 100644 index 000000000..4f3427076 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/memory/UnsafeMemoryAllocator.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.memory; + + +import io.mycat.memory.unsafe.Platform; + +/** + * A simple {@link MemoryAllocator} that uses {@code Unsafe} to allocate off-heap memory. + */ +public class UnsafeMemoryAllocator implements MemoryAllocator { + + @Override + public MemoryBlock allocate(long size) throws OutOfMemoryError { + long address = Platform.allocateMemory(size); + return new MemoryBlock(null, address, size); + } + + @Override + public void free(MemoryBlock memory) { + assert (memory.obj == null) : + "baseObject not null; are you trying to use the off-heap allocator to free on-heap memory?"; + Platform.freeMemory(memory.offset); + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/memory/mm/DataNodeMemoryManager.java b/src/main/java/io/mycat/memory/unsafe/memory/mm/DataNodeMemoryManager.java new file mode 100644 index 000000000..d815dbc55 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/memory/mm/DataNodeMemoryManager.java @@ -0,0 +1,422 @@ +package io.mycat.memory.unsafe.memory.mm; + +import com.google.common.annotations.VisibleForTesting; + +import io.mycat.memory.unsafe.memory.MemoryBlock; +import io.mycat.memory.unsafe.utils.JavaUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.annotation.concurrent.GuardedBy; +import java.io.IOException; +import java.util.Arrays; +import java.util.BitSet; +import java.util.HashSet; + +/** + * Modify by zagnix + * Manages the memory allocated by an individual thread. + *

+ * Most of the complexity in this class deals with encoding of off-heap addresses into 64-bit longs. + * In off-heap mode, memory can be directly addressed with 64-bit longs. In on-heap mode, memory is + * addressed by the combination of a base Object reference and a 64-bit offset within that object. + * This is a problem when we want to store pointers to data structures inside of other structures, + * such as record pointers inside hashmaps or sorting buffers. Even if we decided to use 128 bits + * to address memory, we can't just store the address of the base object since it's not guaranteed + * to remain stable as the heap gets reorganized due to GC. + *

+ * Instead, we use the following approach to encode record pointers in 64-bit longs: for off-heap + * mode, just store the raw address, and for on-heap mode use the upper 13 bits of the address to + * store a "page number" and the lower 51 bits to store an offset within this page. These page + * numbers are used to index into a "page table" array inside of the MemoryManager in order to + * retrieve the base object. + *

+ * This allows us to address 8192 pages. In on-heap mode, the maximum page size is limited by the + * maximum size of a long[] array, allowing us to address 8192 * 2^32 * 8 bytes, which is + * approximately 35 terabytes of memory. + */ +public class DataNodeMemoryManager { + + private final Logger logger = LoggerFactory.getLogger(DataNodeMemoryManager.class); + + /** The number of bits used to address the page table. */ + private static final int PAGE_NUMBER_BITS = 13; + + /** The number of bits used to encode offsets in data pages. */ + public static final int OFFSET_BITS = 64 - PAGE_NUMBER_BITS; // 51 + + /** The number of entries in the page table. */ + private static final int PAGE_TABLE_SIZE = 1 << PAGE_NUMBER_BITS; + + /** + * Maximum supported data page size (in bytes). In principle, the maximum addressable page size is + * (1L << OFFSET_BITS) bytes, which is 2+ petabytes. However, the on-heap allocator's + * maximum page size is limited by the maximum amount of data that can be stored in a long[] + * array, which is (2^32 - 1) * 8 bytes (or 16 gigabytes). Therefore, we cap this at 16 gigabytes. + */ + public static final long MAXIMUM_PAGE_SIZE_BYTES = ((1L << 31) - 1) * 8L; + + /** Bit mask for the lower 51 bits of a long. */ + private static final long MASK_LONG_LOWER_51_BITS = 0x7FFFFFFFFFFFFL; + + /** Bit mask for the upper 13 bits of a long */ + private static final long MASK_LONG_UPPER_13_BITS = ~MASK_LONG_LOWER_51_BITS; + + /** + * Similar to an operating system's page table, this array maps page numbers into base object + * pointers, allowing us to translate between the hashtable's internal 64-bit address + * representation and the baseObject+offset representation which we use to support both in- and + * off-heap addresses. When using an off-heap allocator, every entry in this map will be `null`. + * When using an in-heap allocator, the entries in this map will point to pages' base objects. + * Entries are added to this map as new data pages are allocated. + */ + private final MemoryBlock[] pageTable = new MemoryBlock[PAGE_TABLE_SIZE]; + + /** + * Bitmap for tracking free pages. + */ + private final BitSet allocatedPages = new BitSet(PAGE_TABLE_SIZE); + + private final MemoryManager memoryManager; + + private final long connectionAttemptId; + + /** + * Tracks whether we're in-heap or off-heap. For off-heap, we short-circuit most of these methods + * without doing any masking or lookups. Since this branching should be well-predicted by the JIT, + * this extra layer of indirection / abstraction hopefully shouldn't be too expensive. + */ + public final MemoryMode tungstenMemoryMode; + + /** + * Tracks spillable memory consumers. + */ + @GuardedBy("this") + private final HashSet consumers; + + /** + * The amount of memory that is acquired but not used. + */ + private volatile long acquiredButNotUsed = 0L; + + /** + * Construct a new DataNodeMemoryManager. + */ + public DataNodeMemoryManager(MemoryManager memoryManager, long connectionAttemptId) { + this.tungstenMemoryMode = memoryManager.tungstenMemoryMode(); + this.memoryManager = memoryManager; + this.connectionAttemptId = connectionAttemptId; + this.consumers = new HashSet(); + } + + /** + * Acquire N bytes of memory for a consumer. If there is no enough memory, it will call + * spill() of consumers to release more memory. + * + * @return number of bytes successfully granted (<= N). + */ + public long acquireExecutionMemory(long required,MemoryMode mode,MemoryConsumer consumer) throws InterruptedException { + + assert(required >= 0); + // If we are allocating Tungsten pages off-heap and receive a request to allocate on-heap + // memory here, then it may not make sense to spill since that would only end up freeing + // off-heap memory. This is subject to change, though, so it may be risky to make this + // optimization now in case we forget to undo it late when making changes. + synchronized (this) { + long got = memoryManager.acquireExecutionMemory(required,connectionAttemptId, mode); + // Try to release memory from other consumers first, then we can reduce the frequency of + // spilling, avoid to have too many spilled files. + if (got < required) { + // Call spill() on other consumers to release memory + for (MemoryConsumer c: consumers) { + if (c != consumer && c.getUsed() > 0) { + try { + /** + * 调用spill函数,写数据到磁盘中 + */ + long released = c.spill(required - got, consumer); + if (released > 0 && mode == tungstenMemoryMode) { + logger.info("Thread "+connectionAttemptId+" released "+ JavaUtils.bytesToString(released) + + " from "+ c +" for" + consumer); + got += memoryManager.acquireExecutionMemory(required - got, connectionAttemptId, mode); + if (got >= required) { + break; + } + } + } catch (IOException e) { + logger.error("error while calling spill() on " + c, e); + throw new OutOfMemoryError("error while calling spill() on " + c + " : " + + e.getMessage()); + } + } + } + } + + // call spill() on itself + if (got < required && consumer != null) { + try { + long released = consumer.spill(required - got, consumer); + if (released > 0 && mode == tungstenMemoryMode) { + logger.info("Thread " + connectionAttemptId + + " released "+ JavaUtils.bytesToString(released) +"from itself ("+consumer+ ")"); + got += memoryManager.acquireExecutionMemory(required - got, connectionAttemptId, mode); + } + } catch (IOException e) { + logger.error("error while calling spill() on " + consumer, e); + throw new OutOfMemoryError("error while calling spill() on " + consumer + " : " + + e.getMessage()); + + } + } + + if (consumer != null) { + consumers.add(consumer); + } + // logger.info("Thread" + connectionAttemptId + " acquire "+ JavaUtils.bytesToString(got) +" for "+ consumer+""); + return got; + } + } + + /** + * Release N bytes of execution memory for a MemoryConsumer. + */ + public void releaseExecutionMemory(long size, MemoryMode mode, MemoryConsumer consumer) { + logger.debug ("Thread" + connectionAttemptId + " release "+ JavaUtils.bytesToString(size) +" from "+ consumer+""); + + memoryManager.releaseExecutionMemory(size, connectionAttemptId, mode); + } + + /** + * Dump the memory usage of all consumers. + */ + public void showMemoryUsage() { + logger.info("Memory used in Thread " + connectionAttemptId); + synchronized (this) { + long memoryAccountedForByConsumers = 0; + for (MemoryConsumer c: consumers) { + long totalMemUsage = c.getUsed(); + memoryAccountedForByConsumers += totalMemUsage; + if (totalMemUsage > 0) { + logger.info("Acquired by " + c + ": " + JavaUtils.bytesToString(totalMemUsage)); + } + } + long memoryNotAccountedFor = + memoryManager.getExecutionMemoryUsageForConnection(connectionAttemptId) - memoryAccountedForByConsumers; + logger.info( + "{} bytes of memory were used by task {} but are not associated with specific consumers", + memoryNotAccountedFor, connectionAttemptId); + logger.info( + "{} bytes of memory are used for execution and {} bytes of memory are used for storage", + memoryManager.executionMemoryUsed()); + } + } + + /** + * Return the page size in bytes. + */ + public long pageSizeBytes() { + return memoryManager.pageSizeBytes(); + } + + /** + * Allocate a block of memory that will be tracked in the MemoryManager's page table; this is + * intended for allocating large blocks of Tungsten memory that will be shared between operators. + * + * Returns `null` if there was not enough memory to allocate the page. May return a page that + * contains fewer bytes than requested, so callers should verify the size of returned pages. + */ + public MemoryBlock allocatePage(long size, MemoryConsumer consumer) { + if (size > MAXIMUM_PAGE_SIZE_BYTES) { + throw new IllegalArgumentException( + "Cannot allocate a page with more than " + MAXIMUM_PAGE_SIZE_BYTES + " bytes"); + } + + /** + * 这里spill到磁盘中,释放内存空间 + */ + long acquired = 0; + try { + acquired = acquireExecutionMemory(size,tungstenMemoryMode, consumer); + } catch (InterruptedException e) { + logger.error(e.getMessage()); + } + + if (acquired <= 0) { + return null; + } + + final int pageNumber; + + synchronized (this) { + pageNumber = allocatedPages.nextClearBit(0); + if (pageNumber >= PAGE_TABLE_SIZE) { + releaseExecutionMemory(acquired, tungstenMemoryMode, consumer); + throw new IllegalStateException( + "Have already allocated a maximum of " + PAGE_TABLE_SIZE + " pages"); + } + allocatedPages.set(pageNumber); + } + + + + MemoryBlock page = null; + + try { + page = memoryManager.tungstenMemoryAllocator().allocate(acquired); + } catch (OutOfMemoryError e) { + logger.warn("Failed to allocate a page ({} bytes), try again.", acquired); + // there is no enough memory actually, it means the actual free memory is smaller than + // MemoryManager thought, we should keep the acquired memory. + synchronized (this) { + acquiredButNotUsed += acquired; + allocatedPages.clear(pageNumber); + } + // this could trigger spilling to free some pages. + return allocatePage(size, consumer); + } + + page.pageNumber = pageNumber; + pageTable[pageNumber] = page; + +// logger.info("Allocate page number " + pageNumber + " ("+ acquired +" bytes)"); + + return page; + } + + /** + * Free a block of memory allocated via {@link DataNodeMemoryManager#allocatePage}. + */ + public void freePage(MemoryBlock page, MemoryConsumer consumer) { + + assert (page.pageNumber != -1) : + "Called freePage() on memory that wasn't allocated with allocatePage()"; + assert(allocatedPages.get(page.pageNumber)); + pageTable[page.pageNumber] = null; + + synchronized (this) { + allocatedPages.clear(page.pageNumber); + } + + logger.trace("Freed page number "+ page.pageNumber +" ("+page.size() +" bytes)"); + + long pageSize = page.size(); + memoryManager.tungstenMemoryAllocator().free(page); + releaseExecutionMemory(pageSize,tungstenMemoryMode,consumer); + } + + /** + * Given a memory page and offset within that page, encode this address into a 64-bit long. + * This address will remain valid as long as the corresponding page has not been freed. + * + * @param page a data page allocated by {@link DataNodeMemoryManager#allocatePage}/ + * @param offsetInPage an offset in this page which incorporates the base offset. In other words, + * this should be the value that you would pass as the base offset into an + * UNSAFE call (e.g. page.baseOffset() + something). + * @return an encoded page address. + */ + public long encodePageNumberAndOffset(MemoryBlock page, long offsetInPage) { + + if (tungstenMemoryMode == MemoryMode.OFF_HEAP) { + // In off-heap mode, an offset is an absolute address that may require a full 64 bits to + // encode. Due to our page size limitation, though, we can convert this into an offset that's + // relative to the page's base offset; this relative offset will fit in 51 bits. + offsetInPage -= page.getBaseOffset(); + } + + return encodePageNumberAndOffset(page.pageNumber, offsetInPage); + } + + @VisibleForTesting + public static long encodePageNumberAndOffset(int pageNumber, long offsetInPage) { + assert (pageNumber != -1) : "encodePageNumberAndOffset called with invalid page"; + return (((long) pageNumber) << OFFSET_BITS) | (offsetInPage & MASK_LONG_LOWER_51_BITS); + } + + @VisibleForTesting + public static int decodePageNumber(long pagePlusOffsetAddress) { + return (int) (pagePlusOffsetAddress >>> OFFSET_BITS); + } + + private static long decodeOffset(long pagePlusOffsetAddress) { + return (pagePlusOffsetAddress & MASK_LONG_LOWER_51_BITS); + } + + /** + * Get the page associated with an address encoded by + * {@link DataNodeMemoryManager#encodePageNumberAndOffset(MemoryBlock, long)} + */ + public Object getPage(long pagePlusOffsetAddress) { + if (tungstenMemoryMode == MemoryMode.ON_HEAP) { + final int pageNumber = decodePageNumber(pagePlusOffsetAddress); + assert (pageNumber >= 0 && pageNumber < PAGE_TABLE_SIZE); + final MemoryBlock page = pageTable[pageNumber]; + assert (page != null); + assert (page.getBaseObject() != null); + return page.getBaseObject(); + } else { + return null; + } + } + + /** + * Get the offset associated with an address encoded by + * {@link DataNodeMemoryManager#encodePageNumberAndOffset(MemoryBlock, long)} + */ + public long getOffsetInPage(long pagePlusOffsetAddress) { + final long offsetInPage = decodeOffset(pagePlusOffsetAddress); + if (tungstenMemoryMode == MemoryMode.ON_HEAP) { + return offsetInPage; + } else { + // In off-heap mode, an offset is an absolute address. In encodePageNumberAndOffset, we + // converted the absolute address into a relative address. Here, we invert that operation: + final int pageNumber = decodePageNumber(pagePlusOffsetAddress); + assert (pageNumber >= 0 && pageNumber < PAGE_TABLE_SIZE); + final MemoryBlock page = pageTable[pageNumber]; + assert (page != null); + return page.getBaseOffset() + offsetInPage; + } + } + + /** + * Clean up all allocated memory and pages. Returns the number of bytes freed. A non-zero return + * value can be used to detect memory leaks. + */ + public long cleanUpAllAllocatedMemory() { + synchronized (this) { + for (MemoryConsumer c: consumers) { + if (c != null && c.getUsed() > 0) { + // In case of failed task, it's normal to see leaked memory + logger.warn("leak " + JavaUtils.bytesToString(c.getUsed()) + " memory from " + c); + } + } + consumers.clear(); + + for (MemoryBlock page : pageTable) { + if (page != null) { + logger.warn("leak a page: " + page + " in task " + connectionAttemptId); + memoryManager.tungstenMemoryAllocator().free(page); + } + } + Arrays.fill(pageTable, null); + } + + // release the memory that is not used by any consumer. + memoryManager.releaseExecutionMemory(acquiredButNotUsed, connectionAttemptId, tungstenMemoryMode); + + return memoryManager.releaseAllExecutionMemoryForConnection(connectionAttemptId); + } + + /** + * Returns the memory consumption, in bytes, for the current task. + */ + public long getMemoryConsumptionForThisConnection() { + return memoryManager.getExecutionMemoryUsageForConnection(connectionAttemptId); + } + + /** + * Returns Tungsten memory mode + */ + public MemoryMode getTungstenMemoryMode() { + return tungstenMemoryMode; + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/memory/mm/MemoryConsumer.java b/src/main/java/io/mycat/memory/unsafe/memory/mm/MemoryConsumer.java new file mode 100644 index 000000000..30471a84b --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/memory/mm/MemoryConsumer.java @@ -0,0 +1,179 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.mycat.memory.unsafe.memory.mm; + +import io.mycat.memory.unsafe.array.CharArray; +import io.mycat.memory.unsafe.array.LongArray; +import io.mycat.memory.unsafe.memory.MemoryBlock; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +/** + * An memory consumer of DataNodeMemoryManager, which support spilling. + * Note: this only supports allocation / spilling of Tungsten memory. + */ +public abstract class MemoryConsumer { + private final Logger logger = LoggerFactory.getLogger(MemoryConsumer.class); + + protected final DataNodeMemoryManager dataNodeMemoryManager; + private final long pageSize; + protected long used; + + protected MemoryConsumer(DataNodeMemoryManager dataNodeMemoryManager, long pageSize) { + this.dataNodeMemoryManager = dataNodeMemoryManager; + this.pageSize = pageSize; + } + + protected MemoryConsumer(DataNodeMemoryManager dataNodeMemoryManager) { + this(dataNodeMemoryManager, dataNodeMemoryManager.pageSizeBytes()); + } + + /** + * Returns the size of used memory in bytes. + */ + public long getUsed() { + return used; + } + + /** + * Force spill during building. + * + * For testing. + */ + public void spill() throws IOException { + spill(Long.MAX_VALUE, this); + } + + /** + * Spill some data to disk to release memory, which will be called by DataNodeMemoryManager + * when there is not enough memory for the task. + * + * This should be implemented by subclass. + * + * Note: In order to avoid possible deadlock, should not call acquireMemory() from spill(). + * + * Note: today, this only frees Tungsten-managed pages. + * + * @param size the amount of memory should be released + * @param trigger the MemoryConsumer that trigger this spilling + * @return the amount of released memory in bytes + * @throws IOException + */ + public abstract long spill(long size, MemoryConsumer trigger) throws IOException; + + /** + * Allocates a LongArray of `size`. + */ + public LongArray allocateLongArray(long size) { + long required = size * 8L; + MemoryBlock page = dataNodeMemoryManager.allocatePage(required,this); + if (page == null || page.size() < required) { + long got = 0; + if (page != null) { + got = page.size(); + dataNodeMemoryManager.freePage(page, this); + } + dataNodeMemoryManager.showMemoryUsage(); + throw new OutOfMemoryError("Unable to acquire " + required + " bytes of memory, got " + got); + } + used += required; + return new LongArray(page); + } + + /** + * Frees a LongArray. + */ + public void freeLongArray(LongArray array) { + freePage(array.memoryBlock()); + } + + public CharArray allocateCharArray(long size) { + long required = size * 2L; + MemoryBlock page = dataNodeMemoryManager.allocatePage(required,this); + if (page == null || page.size() < required) { + long got = 0; + if (page != null) { + got = page.size(); + dataNodeMemoryManager.freePage(page, this); + } + dataNodeMemoryManager.showMemoryUsage(); + throw new OutOfMemoryError("Unable to acquire " + required + " bytes of memory, got " + got); + } + used += required; + return new CharArray(page,this); + } + + /** + * Frees a CharArray. + */ + public void freeCharArray(CharArray array) { + freePage(array.memoryBlock()); + } + + /** + * Allocate a memory block with at least `required` bytes. + * + * Throws IOException if there is not enough memory. + * + * @throws OutOfMemoryError + */ + protected MemoryBlock allocatePage(long required) { + MemoryBlock page = dataNodeMemoryManager.allocatePage(Math.max(pageSize, required), this); + if (page == null || page.size() < required) { + long got = 0; + if (page != null) { + got = page.size(); + dataNodeMemoryManager.freePage(page,this); + } + dataNodeMemoryManager.showMemoryUsage(); + throw new OutOfMemoryError("Unable to acquire " + required + " bytes of memory, got " + got); + } + used += page.size(); + return page; + } + + /** + * Free a memory block. + */ + protected void freePage(MemoryBlock page) { + used -= page.size(); + dataNodeMemoryManager.freePage(page, this); + } + + /** + * Allocates a heap memory of `size`. + */ + public long acquireOnHeapMemory(long size) { + long granted = 0; + try { + granted = dataNodeMemoryManager.acquireExecutionMemory(size, MemoryMode.ON_HEAP, this); + } catch (InterruptedException e) { + logger.error(e.getMessage()); + } + used += granted; + return granted; + } + + /** + * Release N bytes of heap memory. + */ + public void freeOnHeapMemory(long size) { + dataNodeMemoryManager.releaseExecutionMemory(size, MemoryMode.ON_HEAP, this); + used -= size; + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/memory/mm/MemoryManager.java b/src/main/java/io/mycat/memory/unsafe/memory/mm/MemoryManager.java new file mode 100644 index 000000000..416ef14f4 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/memory/mm/MemoryManager.java @@ -0,0 +1,161 @@ +package io.mycat.memory.unsafe.memory.mm; + + +import io.mycat.memory.unsafe.Platform; +import io.mycat.memory.unsafe.array.ByteArrayMethods; +import io.mycat.memory.unsafe.memory.MemoryAllocator; +import io.mycat.memory.unsafe.utils.MycatPropertyConf; +import javax.annotation.concurrent.GuardedBy; +import java.util.concurrent.ConcurrentHashMap; + +public abstract class MemoryManager { + + private MycatPropertyConf conf; + + @GuardedBy("this") + protected ResultSetMemoryPool onHeapExecutionMemoryPool = + new ResultSetMemoryPool(this, MemoryMode.ON_HEAP); + + @GuardedBy("this") + protected ResultSetMemoryPool offHeapExecutionMemoryPool = + new ResultSetMemoryPool(this, MemoryMode.OFF_HEAP); + + protected long maxOffHeapMemory = 0L; + protected long offHeapExecutionMemory = 0L; + private int numCores = 0; + + public MemoryManager(MycatPropertyConf conf, int numCores, long onHeapExecutionMemory){ + this.conf = conf; + this.numCores =numCores; + maxOffHeapMemory = conf.getSizeAsBytes("mycat.memory.offHeap.size","128m"); + offHeapExecutionMemory = maxOffHeapMemory; + onHeapExecutionMemoryPool.incrementPoolSize(onHeapExecutionMemory); + + offHeapExecutionMemoryPool.incrementPoolSize(offHeapExecutionMemory); + } + + protected abstract long acquireExecutionMemory(long numBytes,long taskAttemptId,MemoryMode memoryMode) throws InterruptedException; + + /** + * Release numBytes of execution memory belonging to the given task. + */ +public void releaseExecutionMemory(long numBytes, long taskAttemptId, MemoryMode memoryMode) { + synchronized (this) { + switch (memoryMode) { + case ON_HEAP: + onHeapExecutionMemoryPool.releaseMemory(numBytes, taskAttemptId); + break; + case OFF_HEAP: + offHeapExecutionMemoryPool.releaseMemory(numBytes, taskAttemptId); + break; + } + } + + } + + /** + * Release all memory for the given task and mark it as inactive (e.g. when a task ends). + * @return the number of bytes freed. + */ + public long releaseAllExecutionMemoryForConnection(long connAttemptId){ + synchronized(this) { + return (onHeapExecutionMemoryPool.releaseAllMemoryForeConnection(connAttemptId) + + offHeapExecutionMemoryPool.releaseAllMemoryForeConnection(connAttemptId)); + } + } + + /** + * Execution memory currently in use, in bytes. + */ + public final long executionMemoryUsed() { + synchronized(this) { + return (onHeapExecutionMemoryPool.memoryUsed() + offHeapExecutionMemoryPool.memoryUsed()); + } + } + + /** + * Returns the execution memory consumption, in bytes, for the given task. + */ + public long getExecutionMemoryUsageForConnection(long connAttemptId) { + synchronized (this) { + assert (connAttemptId >= 0); + return (onHeapExecutionMemoryPool.getMemoryUsageConnection(connAttemptId) + + offHeapExecutionMemoryPool.getMemoryUsageConnection(connAttemptId)); + } + } + + /** + * Tracks whether Tungsten memory will be allocated on the JVM heap or off-heap using + * sun.misc.Unsafe. + */ + public final MemoryMode tungstenMemoryMode(){ + if (conf.getBoolean("mycat.memory.offHeap.enabled", false)) { + assert (conf.getSizeAsBytes("mycat.memory.offHeap.size",0) > 0); + assert (Platform.unaligned()); + return MemoryMode.OFF_HEAP; + } else { + return MemoryMode.ON_HEAP; + } + } + + /** + * The default page size, in bytes. + * + * If user didn't explicitly set "mycat.buffer.pageSize", we figure out the default value + * by looking at the number of cores available to the process, and the total amount of memory, + * and then divide it by a factor of safety. + */ + public long pageSizeBytes() { + + long minPageSize = 1L * 1024 * 1024 ; // 1MB + long maxPageSize = 64L * minPageSize ; // 64MB + + int cores = 0; + + if (numCores > 0){ + cores = numCores ; + } else { + cores = Runtime.getRuntime().availableProcessors(); + } + + // Because of rounding to next power of 2, we may have safetyFactor as 8 in worst case + int safetyFactor = 16; + long maxTungstenMemory = 0L; + + switch (tungstenMemoryMode()){ + case ON_HEAP: + maxTungstenMemory = onHeapExecutionMemoryPool.poolSize(); + break; + case OFF_HEAP: + maxTungstenMemory = offHeapExecutionMemoryPool.poolSize(); + break; + } + + long size = ByteArrayMethods.nextPowerOf2(maxTungstenMemory / cores / safetyFactor); + long defaultSize = Math.min(maxPageSize, Math.max(minPageSize, size)); + defaultSize = conf.getSizeAsBytes("mycat.buffer.pageSize", defaultSize); + + return defaultSize; + } + + /** + * Allocates memory for use by Unsafe/Tungsten code. + */ + public final MemoryAllocator tungstenMemoryAllocator() { + switch (tungstenMemoryMode()){ + case ON_HEAP: + return MemoryAllocator.HEAP; + case OFF_HEAP: + return MemoryAllocator.UNSAFE; + } + return null; + } + + /** + * Get Direct Memory Usage. + */ + public final ConcurrentHashMap getDirectMemorUsage() { + + return offHeapExecutionMemoryPool.getMemoryForConnection(); + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/memory/mm/MemoryMode.java b/src/main/java/io/mycat/memory/unsafe/memory/mm/MemoryMode.java new file mode 100644 index 000000000..fd8fcbbfa --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/memory/mm/MemoryMode.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.memory.mm; + +public enum MemoryMode { + ON_HEAP, + OFF_HEAP +} diff --git a/src/main/java/io/mycat/memory/unsafe/memory/mm/MemoryPool.java b/src/main/java/io/mycat/memory/unsafe/memory/mm/MemoryPool.java new file mode 100644 index 000000000..5981c2385 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/memory/mm/MemoryPool.java @@ -0,0 +1,68 @@ +package io.mycat.memory.unsafe.memory.mm; + +import javax.annotation.concurrent.GuardedBy; + +/** + * Manages bookkeeping for an adjustable-sized region of memory. This class is internal to + * the [[MemoryManager]]. See subclasses for more details. + * + */ +public abstract class MemoryPool { + /** + * lock [[MemoryManager]] instance, used for synchronization. We purposely erase the type + * to `Object` to avoid programming errors, since this object should only be used for + * synchronization purposes. + */ + protected final Object lock; + public MemoryPool(Object lock){ + this.lock = lock; + } + + @GuardedBy("lock") + private long _poolSize = 0; + + /** + * Returns the current size of the pool, in bytes. + */ + public final long poolSize() { + synchronized(lock) { + return _poolSize; + } + } + + /** + * Returns the amount of free memory in the pool, in bytes. + */ + public long memoryFree() { + synchronized(lock) { + return (_poolSize - memoryUsed()); + } + } + + /** + * Expands the pool by `delta` bytes. + */ + public final void incrementPoolSize(long delta) { + assert (delta >= 0); + synchronized(lock) { + _poolSize += delta; + } + } + + /** + * Shrinks the pool by `delta` bytes. + */ + public final void decrementPoolSize(long delta){ + synchronized(lock) { + assert (delta >= 0); + assert (delta <= _poolSize); + assert (_poolSize - delta >= memoryUsed()); + _poolSize -= delta; + } +} + + /** + * Returns the amount of used memory in this pool (in bytes). + */ + protected abstract long memoryUsed(); +} diff --git a/src/main/java/io/mycat/memory/unsafe/memory/mm/ResultMergeMemoryManager.java b/src/main/java/io/mycat/memory/unsafe/memory/mm/ResultMergeMemoryManager.java new file mode 100644 index 000000000..185dedbbb --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/memory/mm/ResultMergeMemoryManager.java @@ -0,0 +1,32 @@ +package io.mycat.memory.unsafe.memory.mm; + + +import io.mycat.memory.unsafe.utils.MycatPropertyConf; + +/** + * Created by zagnix on 2016/6/7. + */ +public class ResultMergeMemoryManager extends MemoryManager { + + private long maxOnHeapExecutionMemory; + private int numCores; + private MycatPropertyConf conf; + public ResultMergeMemoryManager(MycatPropertyConf conf, int numCores, long onHeapExecutionMemory){ + super(conf,numCores,onHeapExecutionMemory); + this.conf = conf; + this.numCores = numCores; + this.maxOnHeapExecutionMemory = onHeapExecutionMemory; + } + + @Override + protected synchronized long acquireExecutionMemory(long numBytes,long taskAttemptId,MemoryMode memoryMode) throws InterruptedException { + switch (memoryMode) { + case ON_HEAP: + return onHeapExecutionMemoryPool.acquireMemory(numBytes,taskAttemptId); + case OFF_HEAP: + return offHeapExecutionMemoryPool.acquireMemory(numBytes,taskAttemptId); + } + return 0L; + } + +} diff --git a/src/main/java/io/mycat/memory/unsafe/memory/mm/ResultSetMemoryPool.java b/src/main/java/io/mycat/memory/unsafe/memory/mm/ResultSetMemoryPool.java new file mode 100644 index 000000000..94d3e5587 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/memory/mm/ResultSetMemoryPool.java @@ -0,0 +1,171 @@ +package io.mycat.memory.unsafe.memory.mm; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.annotation.concurrent.GuardedBy; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +/** + * Created by zagnix on 2016/6/6. + */ +public class ResultSetMemoryPool extends MemoryPool { + private static final Logger LOG = LoggerFactory.getLogger(ResultSetMemoryPool.class); + + private MemoryMode memoryMode ; + + /** + * @param lock a [[MemoryManager]] instance to synchronize on + * @param memoryMode the type of memory tracked by this pool (on- or off-heap) + */ + public ResultSetMemoryPool(Object lock, MemoryMode memoryMode) { + super(lock); + this.memoryMode = memoryMode; + } + + + private String poolName(){ + + switch (memoryMode){ + case ON_HEAP: + return "on-heap memory"; + case OFF_HEAP: + return "off-heap memory"; + } + + return "off-heap memory"; + } + + public ConcurrentHashMap getMemoryForConnection() { + return memoryForConnection; + } + /** + * Map from taskAttemptId -> memory consumption in bytes + */ + private ConcurrentHashMap memoryForConnection = new ConcurrentHashMap(); + + @Override + protected long memoryUsed() { + synchronized (lock) { + long used =0; + for (Map.Entry entry : memoryForConnection.entrySet()) { + used += entry.getValue(); + } + return used; + } + } + + + /** + * Returns the memory consumption, in bytes, for the given task. + */ + public long getMemoryUsageConnection(long taskAttemptId) { + synchronized (lock) { + if (!memoryForConnection.containsKey(taskAttemptId)) { + memoryForConnection.put(taskAttemptId, 0L); + } + return memoryForConnection.get(taskAttemptId); + } + } + + + /** + * Try to acquire up to `numBytes` of memory for the given task and return the number of bytes + * obtained, or 0 if none can be allocated. + * + * This call may block until there is enough free memory in some situations, to make sure each + * task has a chance to ramp up to at least 1 / 8N of the total memory pool (where N is the # of + * active tasks) before it is forced to spill. This can happen if the number of tasks increase + * but an older task had a lot of memory already. + * + * @param numBytes number of bytes to acquire + * @param connAttemptId the task attempt acquiring memory + * @return the number of bytes granted to the task. + */ + public long acquireMemory(long numBytes, long connAttemptId) throws InterruptedException { + + synchronized (lock) { + assert (numBytes > 0); + // Add this connection to the taskMemory map just so we can keep an accurate count of the number + // of active tasks, to let other tasks ramp down their memory in calls to `acquireMemory` + if (!memoryForConnection.containsKey(connAttemptId)) { + memoryForConnection.put(connAttemptId, 0L); + // This will later cause waiting tasks to wake up and check numTasks again + lock.notifyAll(); + } + + + while (true) { + long numActiveConns = memoryForConnection.size(); + long curMem = memoryForConnection.get(connAttemptId); + + long maxPoolSize = poolSize(); + long maxMemoryPerTask = maxPoolSize / numActiveConns; + long minMemoryPerTask = poolSize() / (8 * numActiveConns); + + // How much we can grant this connection; keep its share within 0 <= X <= 1 / numActiveConns + long maxToGrant = Math.min(numBytes, Math.max(0, maxMemoryPerTask - curMem)); + // Only give it as much memory as is free, which might be none if it reached 1 / numActiveConns + long toGrant = Math.min(maxToGrant, memoryFree()); + + // We want to let each connection get at least 1 / (8 * numActiveConns) before blocking; + // if we can't give it this much now, wait for other tasks to free up memory + // (this happens if older tasks allocated lots of memory before N grew) + if (toGrant < numBytes && curMem + toGrant < minMemoryPerTask) { + LOG.info("Thread " + connAttemptId + " waiting for at least 1/8N of " + poolName() + " pool to be free"); + lock.wait(); + } else { + long temp = memoryForConnection.get(connAttemptId); + memoryForConnection.put(connAttemptId, (temp + toGrant)); + return toGrant; + } + } + } + } + + /** + * Release `numBytes` of memory acquired by the given task. + */ + public void releaseMemory(long numBytes, long connAttemptId) { + + synchronized (lock) { + long curMem = memoryForConnection.get(connAttemptId); + + long memoryToFree = 0L; + + if (curMem < numBytes) { + LOG.error( + "Internal error: release called on $numBytes bytes but task only has $curMem bytes " + + "of memory from the " + poolName() + " pool"); + memoryToFree = curMem; + } else { + memoryToFree = numBytes; + } + + if (memoryForConnection.containsKey(connAttemptId)) { + long temp = memoryForConnection.get(connAttemptId); + memoryForConnection.put(connAttemptId, (temp - memoryToFree)); + if (memoryForConnection.get(connAttemptId) <= 0) { + memoryForConnection.remove(connAttemptId); + } + } + // Notify waiters in acquireMemory() that memory has been freed + lock.notifyAll(); + } + } + + /** + * Release all memory for the given task and mark it as inactive (e.g. when a task ends). + * @return the number of bytes freed. + */ + public long releaseAllMemoryForeConnection(long connAttemptId) { + synchronized (lock){ + long numBytesToFree = getMemoryUsageConnection(connAttemptId); + releaseMemory(numBytesToFree,connAttemptId); + return numBytesToFree; + } + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/ringbuffer/RingBuffer.java b/src/main/java/io/mycat/memory/unsafe/ringbuffer/RingBuffer.java new file mode 100644 index 000000000..00d2c48db --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/ringbuffer/RingBuffer.java @@ -0,0 +1,629 @@ +package io.mycat.memory.unsafe.ringbuffer; + + +import io.mycat.memory.unsafe.Platform; +import io.mycat.memory.unsafe.memory.mm.MemoryConsumer; +import io.mycat.memory.unsafe.ringbuffer.common.Cursored; +import io.mycat.memory.unsafe.ringbuffer.common.event.*; +import io.mycat.memory.unsafe.ringbuffer.exception.InsufficientCapacityException; +import io.mycat.memory.unsafe.ringbuffer.producer.Sequencer; + +/** + * 环形buffer 待实现, + */ +public class RingBuffer implements Cursored, EventSequencer, EventSink { + //Buffer数组填充 + private static final int BUFFER_PAD; + //Buffer数组起始基址 + private static final long REF_ARRAY_BASE; + //2^n=每个数组对象引用所占空间,这个n就是REF_ELEMENT_SHIFT + private static final int REF_ELEMENT_SHIFT; + + static { + final int scale = Platform.arrayIndexScale(Object[].class); + //Object数组引用长度,32位为4字节,64位为8字节 + if (4 == scale) { + REF_ELEMENT_SHIFT = 2; + } else if (8 == scale) { + REF_ELEMENT_SHIFT = 3; + } else { + throw new IllegalStateException("Unknown pointer size"); + } + //需要填充128字节,缓存行长度一般是128字节 + BUFFER_PAD = 128 / scale; + REF_ARRAY_BASE = Platform.arrayBaseOffset(Object[].class) + (BUFFER_PAD << REF_ELEMENT_SHIFT); + } + + private final long indexMask; + private final Object[] entries; + protected final int bufferSize; + protected final Sequencer sequencer; + + public RingBuffer(EventFactory eventFactory, Sequencer sequencer) { + this.sequencer = sequencer; + this.bufferSize = sequencer.getBufferSize(); + //保证buffer大小不小于1 + if (bufferSize < 1) { + throw new IllegalArgumentException("bufferSize must not be less than 1"); + } + //保证buffer大小为2的n次方 + if (Integer.bitCount(bufferSize) != 1) { + throw new IllegalArgumentException("bufferSize must be a power of 2"); + } + //m % 2^n <=> m & (2^n - 1) + this.indexMask = bufferSize - 1; + /** + * 结构:缓存行填充,避免频繁访问的任一entry与另一被修改的无关变量写入同一缓存行 + * -------------- + * * 数组头 * BASE + * * Padding * 128字节 + * * reference1 * SCALE + * * reference2 * SCALE + * * reference3 * SCALE + * .......... + * * Padding * 128字节 + * -------------- + */ + this.entries = new Object[sequencer.getBufferSize() + 2 * BUFFER_PAD]; + //利用eventFactory初始化RingBuffer的每个槽 + fill(eventFactory); + } + + private void fill(EventFactory eventFactory) { + for (int i = 0; i < bufferSize; i++) { + entries[BUFFER_PAD + i] = eventFactory.newInstance(); + } + } + + /** + * 根据地址取出一个元素的引用 + * + * @param sequence + * @return + */ + private E elementAt(long sequence) { + return (E) Platform.getObject(entries, REF_ARRAY_BASE + ((sequence & indexMask) << REF_ELEMENT_SHIFT)); + } + + + @Override + public long getCursor() { + return sequencer.getCursor(); + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.DataProvider#get(long) + */ + @Override + public E get(long sequence) { + return elementAt(sequence); + } + + private void translateAndPublish(EventTranslator translator, long sequence) { + try { + translator.translateTo(get(sequence), sequence); + } finally { + sequencer.publish(sequence); + } + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#publishEvent(EventTranslator) + */ + @Override + public void publishEvent(EventTranslator translator) { + final long sequence = sequencer.next(); + translateAndPublish(translator, sequence); + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#tryPublishEvent(EventTranslator) + */ + @Override + public boolean tryPublishEvent(EventTranslator translator) { + try { + final long sequence = sequencer.tryNext(); + translateAndPublish(translator, sequence); + return true; + } catch (InsufficientCapacityException e) { + return false; + } + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#publishEvent(EventTranslator) + */ + private void translateAndPublish(EventTranslatorOneArg translator, long sequence, A arg0) { + try { + translator.translateTo(get(sequence), sequence, arg0); + } finally { + sequencer.publish(sequence); + } + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#publishEvent(EventTranslator) + */ + @Override + public void publishEvent(EventTranslatorOneArg translator, A arg0) { + final long sequence = sequencer.next(); + translateAndPublish(translator, sequence, arg0); + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#publishEvent(EventTranslator) + */ + @Override + public boolean tryPublishEvent(EventTranslatorOneArg translator, A arg0) { + try { + final long sequence = sequencer.tryNext(); + translateAndPublish(translator, sequence, arg0); + return true; + } catch (InsufficientCapacityException e) { + return false; + } + } + + private void translateAndPublish(EventTranslatorTwoArg translator, long sequence, A arg0, B arg1) { + try { + translator.translateTo(get(sequence), sequence, arg0, arg1); + } finally { + sequencer.publish(sequence); + } + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#publishEvent(EventTranslator) + */ + @Override + public void publishEvent(EventTranslatorTwoArg translator, A arg0, B arg1) { + final long sequence = sequencer.next(); + translateAndPublish(translator, sequence, arg0, arg1); + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#publishEvent(EventTranslator) + */ + @Override + public boolean tryPublishEvent(EventTranslatorTwoArg translator, A arg0, B arg1) { + try { + final long sequence = sequencer.tryNext(); + translateAndPublish(translator, sequence, arg0, arg1); + return true; + } catch (InsufficientCapacityException e) { + return false; + } + } + + private void translateAndPublish( + EventTranslatorThreeArg translator, long sequence, + A arg0, B arg1, C arg2) { + try { + translator.translateTo(get(sequence), sequence, arg0, arg1, arg2); + } finally { + sequencer.publish(sequence); + } + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#publishEvent(EventTranslator) + */ + @Override + public void publishEvent(EventTranslatorThreeArg translator, A arg0, B arg1, C arg2) { + final long sequence = sequencer.next(); + translateAndPublish(translator, sequence, arg0, arg1, arg2); + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#publishEvent(EventTranslator) + */ + @Override + public boolean tryPublishEvent(EventTranslatorThreeArg translator, A arg0, B arg1, C arg2) { + try { + final long sequence = sequencer.tryNext(); + translateAndPublish(translator, sequence, arg0, arg1, arg2); + return true; + } catch (InsufficientCapacityException e) { + return false; + } + } + + private void translateAndPublish(EventTranslatorVararg translator, long sequence, Object... args) { + try { + translator.translateTo(get(sequence), sequence, args); + } finally { + sequencer.publish(sequence); + } + } + + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#publishEvent(EventTranslator) + */ + @Override + public void publishEvent(EventTranslatorVararg translator, Object... args) { + final long sequence = sequencer.next(); + translateAndPublish(translator, sequence, args); + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#publishEvent(EventTranslator) + */ + @Override + public boolean tryPublishEvent(EventTranslatorVararg translator, Object... args) { + try { + final long sequence = sequencer.tryNext(); + translateAndPublish(translator, sequence, args); + return true; + } catch (InsufficientCapacityException e) { + return false; + } + } + + private void checkBounds(final EventTranslator[] translators, final int batchStartsAt, final int batchSize) { + checkBatchSizing(batchStartsAt, batchSize); + batchOverRuns(translators, batchStartsAt, batchSize); + } + + private void batchOverRuns(final A[] arg0, final int batchStartsAt, final int batchSize) { + if (batchStartsAt + batchSize > arg0.length) { + throw new IllegalArgumentException( + "A batchSize of: " + batchSize + + " with batchStatsAt of: " + batchStartsAt + + " will overrun the available number of arguments: " + (arg0.length - batchStartsAt)); + } + } + + private void checkBatchSizing(int batchStartsAt, int batchSize) { + if (batchStartsAt < 0 || batchSize < 0) { + throw new IllegalArgumentException("Both batchStartsAt and batchSize must be positive but got: batchStartsAt " + batchStartsAt + " and batchSize " + batchSize); + } else if (batchSize > bufferSize) { + throw new IllegalArgumentException("The ring buffer cannot accommodate " + batchSize + " it only has space for " + bufferSize + " entities."); + } + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#publishEvent(EventTranslator) + */ + @Override + public void publishEvents(EventTranslator[] translators) { + publishEvents(translators, 0, translators.length); + } + + private void translateAndPublishBatch( + final EventTranslator[] translators, int batchStartsAt, + final int batchSize, final long finalSequence) { + final long initialSequence = finalSequence - (batchSize - 1); + try { + long sequence = initialSequence; + final int batchEndsAt = batchStartsAt + batchSize; + for (int i = batchStartsAt; i < batchEndsAt; i++) { + final EventTranslator translator = translators[i]; + translator.translateTo(get(sequence), sequence++); + } + } finally { + sequencer.publish(initialSequence, finalSequence); + } + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#publishEvent(EventTranslator) + */ + @Override + public void publishEvents(EventTranslator[] translators, int batchStartsAt, int batchSize) { + checkBounds(translators, batchStartsAt, batchSize); + final long finalSequence = sequencer.next(batchSize); + translateAndPublishBatch(translators, batchStartsAt, batchSize, finalSequence); + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#publishEvent(EventTranslator) + */ + @Override + public boolean tryPublishEvents(EventTranslator[] translators) { + return false; + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#publishEvent(EventTranslator) + */ + @Override + public boolean tryPublishEvents(EventTranslator[] translators, int batchStartsAt, int batchSize) { + checkBounds(translators, batchStartsAt, batchSize); + try { + final long finalSequence = sequencer.tryNext(batchSize); + translateAndPublishBatch(translators, batchStartsAt, batchSize, finalSequence); + return true; + } catch (InsufficientCapacityException e) { + return false; + } + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#publishEvent(EventTranslator) + */ + @Override + public void publishEvents(EventTranslatorOneArg translator, A[] arg0) { + publishEvents(translator, 0, arg0.length, arg0); + } + + private void checkBounds(final A[] arg0, final int batchStartsAt, final int batchSize) { + checkBatchSizing(batchStartsAt, batchSize); + batchOverRuns(arg0, batchStartsAt, batchSize); + } + + private void translateAndPublishBatch( + final EventTranslatorOneArg translator, final A[] arg0, + int batchStartsAt, final int batchSize, final long finalSequence) { + final long initialSequence = finalSequence - (batchSize - 1); + try { + long sequence = initialSequence; + final int batchEndsAt = batchStartsAt + batchSize; + for (int i = batchStartsAt; i < batchEndsAt; i++) { + translator.translateTo(get(sequence), sequence++, arg0[i]); + } + } finally { + sequencer.publish(initialSequence, finalSequence); + } + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#publishEvent(EventTranslator) + */ + @Override + public void publishEvents(EventTranslatorOneArg translator, int batchStartsAt, int batchSize, A[] arg0) { + checkBounds(arg0, batchStartsAt, batchSize); + final long finalSequence = sequencer.next(batchSize); + translateAndPublishBatch(translator, arg0, batchStartsAt, batchSize, finalSequence); + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#publishEvent(EventTranslator) + */ + @Override + public boolean tryPublishEvents(EventTranslatorOneArg translator, A[] arg0) { + return tryPublishEvents(translator, 0, arg0.length, arg0); + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#publishEvent(EventTranslator) + */ + @Override + public boolean tryPublishEvents(EventTranslatorOneArg translator, int batchStartsAt, int batchSize, A[] arg0) { + checkBounds(arg0, batchStartsAt, batchSize); + try { + final long finalSequence = sequencer.tryNext(batchSize); + translateAndPublishBatch(translator, arg0, batchStartsAt, batchSize, finalSequence); + return true; + } catch (InsufficientCapacityException e) { + return false; + } + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#publishEvent(EventTranslator) + */ + @Override + public void publishEvents(EventTranslatorTwoArg translator, A[] arg0, B[] arg1) { + publishEvents(translator, 0, arg0.length, arg0, arg1); + } + + private void checkBounds(final A[] arg0, final B[] arg1, final int batchStartsAt, final int batchSize) { + checkBatchSizing(batchStartsAt, batchSize); + batchOverRuns(arg0, batchStartsAt, batchSize); + batchOverRuns(arg1, batchStartsAt, batchSize); + } + + private void translateAndPublishBatch( + final EventTranslatorTwoArg translator, final A[] arg0, + final B[] arg1, int batchStartsAt, int batchSize, + final long finalSequence) { + final long initialSequence = finalSequence - (batchSize - 1); + try { + long sequence = initialSequence; + final int batchEndsAt = batchStartsAt + batchSize; + for (int i = batchStartsAt; i < batchEndsAt; i++) { + translator.translateTo(get(sequence), sequence++, arg0[i], arg1[i]); + } + } finally { + sequencer.publish(initialSequence, finalSequence); + } + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#publishEvent(EventTranslator) + */ + @Override + public void publishEvents(EventTranslatorTwoArg translator, int batchStartsAt, int batchSize, A[] arg0, B[] arg1) { + checkBounds(arg0, arg1, batchStartsAt, batchSize); + final long finalSequence = sequencer.next(batchSize); + translateAndPublishBatch(translator, arg0, arg1, batchStartsAt, batchSize, finalSequence); + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#publishEvent(EventTranslator) + */ + @Override + public boolean tryPublishEvents(EventTranslatorTwoArg translator, A[] arg0, B[] arg1) { + return tryPublishEvents(translator, 0, arg0.length, arg0, arg1); + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#publishEvent(EventTranslator) + */ + @Override + public boolean tryPublishEvents(EventTranslatorTwoArg translator, int batchStartsAt, int batchSize, A[] arg0, B[] arg1) { + checkBounds(arg0, arg1, batchStartsAt, batchSize); + try { + final long finalSequence = sequencer.tryNext(batchSize); + translateAndPublishBatch(translator, arg0, arg1, batchStartsAt, batchSize, finalSequence); + return true; + } catch (InsufficientCapacityException e) { + return false; + } + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#publishEvent(EventTranslator) + */ + @Override + public void publishEvents(EventTranslatorThreeArg translator, A[] arg0, B[] arg1, C[] arg2) { + publishEvents(translator, 0, arg0.length, arg0, arg1, arg2); + } + + private void checkBounds( + final A[] arg0, final B[] arg1, final C[] arg2, final int batchStartsAt, final int batchSize) { + checkBatchSizing(batchStartsAt, batchSize); + batchOverRuns(arg0, batchStartsAt, batchSize); + batchOverRuns(arg1, batchStartsAt, batchSize); + batchOverRuns(arg2, batchStartsAt, batchSize); + } + + private void translateAndPublishBatch( + final EventTranslatorThreeArg translator, + final A[] arg0, final B[] arg1, final C[] arg2, int batchStartsAt, + final int batchSize, final long finalSequence) { + final long initialSequence = finalSequence - (batchSize - 1); + try { + long sequence = initialSequence; + final int batchEndsAt = batchStartsAt + batchSize; + for (int i = batchStartsAt; i < batchEndsAt; i++) { + translator.translateTo(get(sequence), sequence++, arg0[i], arg1[i], arg2[i]); + } + } finally { + sequencer.publish(initialSequence, finalSequence); + } + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#publishEvent(EventTranslator) + */ + @Override + public void publishEvents(EventTranslatorThreeArg translator, int batchStartsAt, int batchSize, A[] arg0, B[] arg1, C[] arg2) { + checkBounds(arg0, arg1, arg2, batchStartsAt, batchSize); + final long finalSequence = sequencer.next(batchSize); + translateAndPublishBatch(translator, arg0, arg1, arg2, batchStartsAt, batchSize, finalSequence); + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#publishEvent(EventTranslator) + */ + @Override + public boolean tryPublishEvents(EventTranslatorThreeArg translator, A[] arg0, B[] arg1, C[] arg2) { + return tryPublishEvents(translator, 0, arg0.length, arg0, arg1, arg2); + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#publishEvent(EventTranslator) + */ + @Override + public boolean tryPublishEvents(EventTranslatorThreeArg translator, int batchStartsAt, int batchSize, A[] arg0, B[] arg1, C[] arg2) { + checkBounds(arg0, arg1, arg2, batchStartsAt, batchSize); + try { + final long finalSequence = sequencer.tryNext(batchSize); + translateAndPublishBatch(translator, arg0, arg1, arg2, batchStartsAt, batchSize, finalSequence); + return true; + } catch (InsufficientCapacityException e) { + return false; + } + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#publishEvent(EventTranslator) + */ + @Override + public void publishEvents(EventTranslatorVararg translator, Object[]... args) { + publishEvents(translator, 0, args.length, args); + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#publishEvent(EventTranslator) + */ + @Override + public void publishEvents(EventTranslatorVararg translator, int batchStartsAt, int batchSize, Object[]... args) { + checkBounds(batchStartsAt, batchSize, args); + final long finalSequence = sequencer.next(batchSize); + translateAndPublishBatch(translator, batchStartsAt, batchSize, finalSequence, args); + } + + private void checkBounds(final int batchStartsAt, final int batchSize, final Object[][] args) { + checkBatchSizing(batchStartsAt, batchSize); + batchOverRuns(args, batchStartsAt, batchSize); + } + + private void translateAndPublishBatch( + final EventTranslatorVararg translator, int batchStartsAt, + final int batchSize, final long finalSequence, final Object[][] args) { + final long initialSequence = finalSequence - (batchSize - 1); + try { + long sequence = initialSequence; + final int batchEndsAt = batchStartsAt + batchSize; + for (int i = batchStartsAt; i < batchEndsAt; i++) { + translator.translateTo(get(sequence), sequence++, args[i]); + } + } finally { + sequencer.publish(initialSequence, finalSequence); + } + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#publishEvent(EventTranslator) + */ + @Override + public boolean tryPublishEvents(EventTranslatorVararg translator, Object[]... args) { + return tryPublishEvents(translator, 0, args.length, args); + } + + /** + * @see io.mycat.memory.unsafe.ringbuffer.common.event.EventSink#publishEvent(EventTranslator) + */ + @Override + public boolean tryPublishEvents(EventTranslatorVararg translator, int batchStartsAt, int batchSize, Object[]... args) { + return false; + } + + @Override + public int getBufferSize() { + return bufferSize; + } + + @Override + public boolean hasAvailableCapacity(int requiredCapacity) { + return sequencer.hasAvailableCapacity(requiredCapacity); + } + + @Override + public long remainingCapacity() { + return sequencer.remainingCapacity(); + } + + @Override + public long next() { + return sequencer.next(); + } + + @Override + public long next(int n) { + return sequencer.next(n); + } + + @Override + public long tryNext() throws InsufficientCapacityException { + return sequencer.tryNext(); + } + + @Override + public long tryNext(int n) throws InsufficientCapacityException { + return sequencer.tryNext(n); + } + + @Override + public void publish(long sequence) { + sequencer.publish(sequence); + } + + @Override + public void publish(long lo, long hi) { + sequencer.publish(lo, hi); + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/Cursored.java b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/Cursored.java new file mode 100644 index 000000000..c03efc479 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/Cursored.java @@ -0,0 +1,10 @@ +package io.mycat.memory.unsafe.ringbuffer.common; + +/** + * @author lmax.Disruptor + * @version 3.3.5 + * @date 2016/7/23 + */ +public interface Cursored { + long getCursor(); +} diff --git a/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/DataProvider.java b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/DataProvider.java new file mode 100644 index 000000000..b9da2dd93 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/DataProvider.java @@ -0,0 +1,15 @@ +package io.mycat.memory.unsafe.ringbuffer.common; + +/** + * @author lmax.Disruptor + * @version 3.3.5 + * @date 2016/7/29 + */ +public interface DataProvider { + /** + * 获取sequence对应的对象 + * @param sequence + * @return + */ + public T get(long sequence); +} diff --git a/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/Sequenced.java b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/Sequenced.java new file mode 100644 index 000000000..c2a54c977 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/Sequenced.java @@ -0,0 +1,66 @@ +package io.mycat.memory.unsafe.ringbuffer.common; + +import io.mycat.memory.unsafe.ringbuffer.exception.InsufficientCapacityException; + +/** + * @author lmax.Disruptor + * @version 3.3.5 + * @date 2016/7/23 + */ +public interface Sequenced { + /** + * @return ringBuffer的大小 + */ + int getBufferSize(); + + /** + * @param requiredCapacity 需要的大小 + * @return true ringBuffer的剩余空间足够 | false ringBuffer的剩余空间不足 + */ + boolean hasAvailableCapacity(final int requiredCapacity); + + /** + * @return ringBuffer的剩余空间 + */ + long remainingCapacity(); + + /** + * 申请下一个sequence(value)作为生产event的位置 + * @return sequence的value + */ + long next(); + + /** + * 申请下n个sequence(value)作为生产多个event的位置 + * @param n + * @return 最高的sequence的value + */ + long next(int n); + /** + * 尝试申请下一个sequence(value)作为生产event的位置 + * @return sequence的value + * @throws InsufficientCapacityException + */ + long tryNext() throws InsufficientCapacityException; + + /** + * 尝试申请下n个sequence(value)作为生产多个event的位置 + * @param n + * @return 最高的sequence的value + * @throws InsufficientCapacityException + */ + long tryNext(int n) throws InsufficientCapacityException; + + /** + * 发布一个Sequence,一般在这个Sequence对应位置的Event被填充后 + * @param sequence + */ + void publish(long sequence); + + /** + * 发布多个Sequence,一般在这些Sequence对应位置的Event被填充后 + * @param lo 第一个sequence的value + * @param hi 最后一个sequence的value + */ + void publish(long lo, long hi); +} diff --git a/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/barrier/SequenceBarrier.java b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/barrier/SequenceBarrier.java new file mode 100644 index 000000000..f58d07818 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/barrier/SequenceBarrier.java @@ -0,0 +1,53 @@ +package io.mycat.memory.unsafe.ringbuffer.common.barrier; + +import io.mycat.memory.unsafe.ringbuffer.exception.AlertException; +import io.mycat.memory.unsafe.ringbuffer.exception.TimeoutException; + +/** + * @author lmax.Disruptor + * @version 3.3.5 + * @date 2016/7/24 + */ +public interface SequenceBarrier { + /** + * 等待给定的sequence值可以被消费 + * + * @param sequence 等待的sequence值 + * @return 可以消费的最大sequence值 + * @throws AlertException 当Disruptor的状态改变时会抛出 + * @throws InterruptedException 唤醒线程 + * @throws TimeoutException 超过最大等待时间 + */ + long waitFor(long sequence) throws AlertException, InterruptedException, TimeoutException; + + /** + * 获取当前可以消费的cursor值 + * + * @return 当前可以消费的cursor值(已经被publish的) + */ + long getCursor(); + + /** + * alert状态 + * + * @return true 如果被alerted + */ + boolean isAlerted(); + + /** + * 进入alert状态 + */ + void alert(); + + /** + * 清除当前alert状态 + */ + void clearAlert(); + + /** + * 检查是否被alerted,如果是,则抛出{@link AlertException} + * + * @throws AlertException if alert has been raised. + */ + void checkAlert() throws AlertException; +} diff --git a/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/event/EventFactory.java b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/event/EventFactory.java new file mode 100644 index 000000000..e15ba81ab --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/event/EventFactory.java @@ -0,0 +1,12 @@ +package io.mycat.memory.unsafe.ringbuffer.common.event; + +/** + * 用户实现,生成Event的接口 + * + * @author lmax.Disruptor + * @version 3.3.5 + * @date 2016/7/29 + */ +public interface EventFactory { + T newInstance(); +} diff --git a/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/event/EventSequencer.java b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/event/EventSequencer.java new file mode 100644 index 000000000..ca7ef8441 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/event/EventSequencer.java @@ -0,0 +1,14 @@ +package io.mycat.memory.unsafe.ringbuffer.common.event; + +import io.mycat.memory.unsafe.ringbuffer.common.DataProvider; +import io.mycat.memory.unsafe.ringbuffer.common.Sequenced; + +/** + * EventSequencer接口没有自己的方法,只是为了将Sequencer和DataProvider合起来。 + * + * @author lmax.Disruptor + * @version 3.3.5 + * @date 2016/7/29 + */ +public interface EventSequencer extends DataProvider, Sequenced { +} diff --git a/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/event/EventSink.java b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/event/EventSink.java new file mode 100644 index 000000000..4c99ad67f --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/event/EventSink.java @@ -0,0 +1,92 @@ +package io.mycat.memory.unsafe.ringbuffer.common.event; + +/** + * Event槽接口 + * + * @author lmax.Disruptor + * @version 3.3.5 + * @date 2016/7/29 + */ +public interface EventSink { + /** + * 申请下一个Sequence->申请成功则获取对应槽的Event->利用translator初始化并填充对应槽的Event->发布Event + * @param translator translator用户实现,用于初始化Event,这里是不带参数Translator + */ + void publishEvent(EventTranslator translator); + + /** + * 尝试申请下一个Sequence->申请成功则获取对应槽的Event->利用translator初始化并填充对应槽的Event->发布Event + * 若空间不足,则立即失败返回 + * @param translator translator用户实现,用于初始化Event,这里是不带参数Translator + * @return 成功true,失败false + */ + boolean tryPublishEvent(EventTranslator translator); + + void publishEvent(EventTranslatorOneArg translator, A arg0); + + boolean tryPublishEvent(EventTranslatorOneArg translator, A arg0); + + void publishEvent(EventTranslatorTwoArg translator, A arg0, B arg1); + + boolean tryPublishEvent(EventTranslatorTwoArg translator, A arg0, B arg1); + + void publishEvent(EventTranslatorThreeArg translator, A arg0, B arg1, C arg2); + + boolean tryPublishEvent(EventTranslatorThreeArg translator, A arg0, B arg1, C arg2); + + void publishEvent(EventTranslatorVararg translator, Object... args); + + boolean tryPublishEvent(EventTranslatorVararg translator, Object... args); + + /** + * 包括申请多个Sequence->申请成功则获取对应槽的Event->利用每个translator初始化并填充每个对应槽的Event->发布Event + * @param translators + */ + void publishEvents(EventTranslator[] translators); + + void publishEvents(EventTranslator[] translators, int batchStartsAt, int batchSize); + + boolean tryPublishEvents(EventTranslator[] translators); + + boolean tryPublishEvents(EventTranslator[] translators, int batchStartsAt, int batchSize); + + void publishEvents(EventTranslatorOneArg translator, A[] arg0); + + void publishEvents(EventTranslatorOneArg translator, int batchStartsAt, int batchSize, A[] arg0); + + boolean tryPublishEvents(EventTranslatorOneArg translator, A[] arg0); + + boolean tryPublishEvents(EventTranslatorOneArg translator, int batchStartsAt, int batchSize, A[] arg0); + + void publishEvents(EventTranslatorTwoArg translator, A[] arg0, B[] arg1); + + void publishEvents( + EventTranslatorTwoArg translator, int batchStartsAt, int batchSize, A[] arg0, + B[] arg1); + + boolean tryPublishEvents(EventTranslatorTwoArg translator, A[] arg0, B[] arg1); + + boolean tryPublishEvents( + EventTranslatorTwoArg translator, int batchStartsAt, int batchSize, + A[] arg0, B[] arg1); + + void publishEvents(EventTranslatorThreeArg translator, A[] arg0, B[] arg1, C[] arg2); + + void publishEvents( + EventTranslatorThreeArg translator, int batchStartsAt, int batchSize, + A[] arg0, B[] arg1, C[] arg2); + + boolean tryPublishEvents(EventTranslatorThreeArg translator, A[] arg0, B[] arg1, C[] arg2); + + boolean tryPublishEvents( + EventTranslatorThreeArg translator, int batchStartsAt, + int batchSize, A[] arg0, B[] arg1, C[] arg2); + + void publishEvents(EventTranslatorVararg translator, Object[]... args); + + void publishEvents(EventTranslatorVararg translator, int batchStartsAt, int batchSize, Object[]... args); + + boolean tryPublishEvents(EventTranslatorVararg translator, Object[]... args); + + boolean tryPublishEvents(EventTranslatorVararg translator, int batchStartsAt, int batchSize, Object[]... args); +} diff --git a/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/event/EventTranslator.java b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/event/EventTranslator.java new file mode 100644 index 000000000..285131269 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/event/EventTranslator.java @@ -0,0 +1,12 @@ +package io.mycat.memory.unsafe.ringbuffer.common.event; + +/** + * Event初始化接口,生产者通过实现这个接口,在发布Event时,对应实现的translateTo方法会被调用 + * + * @author lmax.Disruptor + * @version 3.3.5 + * @date 2016/7/29 + */ +public interface EventTranslator { + void translateTo(final T event, long sequence); +} diff --git a/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/event/EventTranslatorOneArg.java b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/event/EventTranslatorOneArg.java new file mode 100644 index 000000000..ff83bf70c --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/event/EventTranslatorOneArg.java @@ -0,0 +1,13 @@ +package io.mycat.memory.unsafe.ringbuffer.common.event; + +/** + * Event初始化接口,生产者通过实现这个接口,在发布Event时,对应实现的translateTo方法会被调用 + * 这里用户可以传一个参数 + * + * @author lmax.Disruptor + * @version 3.3.5 + * @date 2016/7/29 + */ +public interface EventTranslatorOneArg { + void translateTo(final T event, long sequence, final A arg0); +} diff --git a/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/event/EventTranslatorThreeArg.java b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/event/EventTranslatorThreeArg.java new file mode 100644 index 000000000..aca267823 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/event/EventTranslatorThreeArg.java @@ -0,0 +1,13 @@ +package io.mycat.memory.unsafe.ringbuffer.common.event; + +/** + * Event初始化接口,生产者通过实现这个接口,在发布Event时,对应实现的translateTo方法会被调用 + * 这里用户可以传三个参数 + * + * @author lmax.Disruptor + * @version 3.3.5 + * @date 2016/7/29 + */ +public interface EventTranslatorThreeArg { + void translateTo(final T event, long sequence, final A arg0, final B arg1, final C arg2); +} diff --git a/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/event/EventTranslatorTwoArg.java b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/event/EventTranslatorTwoArg.java new file mode 100644 index 000000000..aee4b6c8b --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/event/EventTranslatorTwoArg.java @@ -0,0 +1,13 @@ +package io.mycat.memory.unsafe.ringbuffer.common.event; + +/** + * Event初始化接口,生产者通过实现这个接口,在发布Event时,对应实现的translateTo方法会被调用 + * 这里用户可以传两个参数 + * + * @author lmax.Disruptor + * @version 3.3.5 + * @date 2016/7/29 + */ +public interface EventTranslatorTwoArg { + void translateTo(final T event, long sequence, final A arg0, final B arg1); +} diff --git a/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/event/EventTranslatorVararg.java b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/event/EventTranslatorVararg.java new file mode 100644 index 000000000..2953f4acc --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/event/EventTranslatorVararg.java @@ -0,0 +1,13 @@ +package io.mycat.memory.unsafe.ringbuffer.common.event; + +/** + * Event初始化接口,生产者通过实现这个接口,在发布Event时,对应实现的translateTo方法会被调用 + * 这里用户可以传多个参数 + * + * @author lmax.Disruptor + * @version 3.3.5 + * @date 2016/7/29 + */ +public interface EventTranslatorVararg { + void translateTo(final T event, long sequence, final Object... args); +} diff --git a/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/sequence/Sequence.java b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/sequence/Sequence.java new file mode 100644 index 000000000..4da2b029a --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/sequence/Sequence.java @@ -0,0 +1,152 @@ +package io.mycat.memory.unsafe.ringbuffer.common.sequence; + + +import io.mycat.memory.unsafe.Platform; + +/** + * @author lmax.Disruptor + * @version 3.3.5 + * @date 2016/7/23 + */ +class LhsPadding +{ + protected long p1, p2, p3, p4, p5, p6, p7; +} + +class Value extends LhsPadding +{ + protected volatile long value; +} + +class RhsPadding extends Value +{ + protected long p9, p10, p11, p12, p13, p14, p15; +} + +/** + *

Concurrent sequence class used for tracking the progress of + * the ring buffer and event processors. Support a number + * of concurrent operations including CAS and order writes. + * + *

Also attempts to be more efficient with regards to false + * sharing by adding padding around the volatile field. + */ +public class Sequence extends RhsPadding +{ + public static final long INITIAL_VALUE = -1L; + private static final long VALUE_OFFSET; + + static + { + try + { + VALUE_OFFSET = Platform.objectFieldOffset(Value.class.getDeclaredField("value")); + } + catch (final Exception e) + { + throw new RuntimeException(e); + } + } + + /** + * Create a sequence initialised to -1. + */ + public Sequence() + { + this(INITIAL_VALUE); + } + + /** + * Create a sequence with a specified initial value. + * + * @param initialValue The initial value for this sequence. + */ + public Sequence(final long initialValue) + { + Platform.putOrderedLong(this, VALUE_OFFSET, initialValue); + } + + /** + * Perform a volatile read of this sequence's value. + * + * @return The current value of the sequence. + */ + public long get() + { + return value; + } + + /** + * Perform an ordered write of this sequence. The intent is + * a Store/Store barrier between this write and any previous + * store. + * + * @param value The new value for the sequence. + */ + public void set(final long value) + { + Platform.putOrderedLong(this, VALUE_OFFSET, value); + } + + /** + * Performs a volatile write of this sequence. The intent is + * a Store/Store barrier between this write and any previous + * write and a Store/Load barrier between this write and any + * subsequent volatile read. + * + * @param value The new value for the sequence. + */ + public void setVolatile(final long value) + { + Platform.putLongVolatile(this, VALUE_OFFSET, value); + } + + /** + * Perform a compare and set operation on the sequence. + * + * @param expectedValue The expected current value. + * @param newValue The value to update to. + * @return true if the operation succeeds, false otherwise. + */ + public boolean compareAndSet(final long expectedValue, final long newValue) + { + return Platform.compareAndSwapLong(this, VALUE_OFFSET, expectedValue, newValue); + } + + /** + * Atomically increment the sequence by one. + * + * @return The value after the increment + */ + public long incrementAndGet() + { + return addAndGet(1L); + } + + /** + * Atomically add the supplied value. + * + * @param increment The value to add to the sequence. + * @return The value after the increment. + */ + public long addAndGet(final long increment) + { + long currentValue; + long newValue; + + do + { + currentValue = get(); + newValue = currentValue + increment; + } + while (!compareAndSet(currentValue, newValue)); + + return newValue; + } + + @Override + public String toString() + { + return Long.toString(get()); + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/sequence/SequenceGroups.java b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/sequence/SequenceGroups.java new file mode 100644 index 000000000..54fe7d368 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/sequence/SequenceGroups.java @@ -0,0 +1,115 @@ +package io.mycat.memory.unsafe.ringbuffer.common.sequence; + +import io.mycat.memory.unsafe.ringbuffer.common.Cursored; + +import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; + +import static java.util.Arrays.copyOf; + +/** + * @author lmax.Disruptor + * @version 3.3.5 + * @date 2016/7/25 + */ +public class SequenceGroups { + /** + * 原子添加sequences + * + * @param holder 原子更新的域所属的类对象 + * @param updater 原子更新的域对象 + * @param cursor 定位 + * @param sequencesToAdd 要添加的sequences + * @param + */ + public static void addSequences( + final T holder, + final AtomicReferenceFieldUpdater updater, + final Cursored cursor, + final Sequence... sequencesToAdd) + { + long cursorSequence; + Sequence[] updatedSequences; + Sequence[] currentSequences; + //在更新成功之前,一直重新读取currentSequences,扩充为添加所有sequence之后的updatedSequences + do + { + currentSequences = updater.get(holder); + updatedSequences = copyOf(currentSequences, currentSequences.length + sequencesToAdd.length); + cursorSequence = cursor.getCursor(); + + int index = currentSequences.length; + //将新的sequences的值设置为cursorSequence + for (Sequence sequence : sequencesToAdd) + { + sequence.set(cursorSequence); + updatedSequences[index++] = sequence; + } + } + while (!updater.compareAndSet(holder, currentSequences, updatedSequences)); + + cursorSequence = cursor.getCursor(); + for (Sequence sequence : sequencesToAdd) + { + sequence.set(cursorSequence); + } + } + + /** + * 原子移除某个指定的sequence + * + * @param holder 原子更新的域所属的类对象 + * @param sequenceUpdater 原子更新的域对象 + * @param sequence 要移除的sequence + * @param + * @return + */ + public static boolean removeSequence( + final T holder, + final AtomicReferenceFieldUpdater sequenceUpdater, + final Sequence sequence) + { + int numToRemove; + Sequence[] oldSequences; + Sequence[] newSequences; + + do + { + oldSequences = sequenceUpdater.get(holder); + + numToRemove = countMatching(oldSequences, sequence); + + if (0 == numToRemove) + { + break; + } + + final int oldSize = oldSequences.length; + newSequences = new Sequence[oldSize - numToRemove]; + + for (int i = 0, pos = 0; i < oldSize; i++) + { + final Sequence testSequence = oldSequences[i]; + if (sequence != testSequence) + { + newSequences[pos++] = testSequence; + } + } + } + while (!sequenceUpdater.compareAndSet(holder, oldSequences, newSequences)); + + return numToRemove != 0; + } + + private static int countMatching(T[] values, final T toMatch) + { + int numToRemove = 0; + for (T value : values) + { + if (value == toMatch) // Specifically uses identity + { + numToRemove++; + } + } + return numToRemove; + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/waitStrategy/WaitStrategy.java b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/waitStrategy/WaitStrategy.java new file mode 100644 index 000000000..7b62568e0 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/waitStrategy/WaitStrategy.java @@ -0,0 +1,31 @@ +package io.mycat.memory.unsafe.ringbuffer.common.waitStrategy; + +import io.mycat.memory.unsafe.ringbuffer.common.barrier.SequenceBarrier; +import io.mycat.memory.unsafe.ringbuffer.common.sequence.Sequence; +import io.mycat.memory.unsafe.ringbuffer.exception.AlertException; +import io.mycat.memory.unsafe.ringbuffer.exception.TimeoutException; + +/** + * @author lmax.Disruptor + * @version 3.3.5 + * @date 2016/7/24 + */ +public interface WaitStrategy { + /** + * @param sequence 需要等待available的sequence + * @param cursor 对应RingBuffer的Cursor + * @param dependentSequence 需要等待(依赖)的Sequence + * @param barrier 多消费者注册的SequenceBarrier + * @return 已经available的sequence + * @throws AlertException + * @throws InterruptedException + * @throws TimeoutException + */ + long waitFor(long sequence, Sequence cursor, Sequence dependentSequence, SequenceBarrier barrier) + throws AlertException, InterruptedException, TimeoutException; + + /** + * 唤醒所有等待的消费者 + */ + void signalAllWhenBlocking(); +} diff --git a/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/waitStrategy/impl/BlockingWaitStrategy.java b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/waitStrategy/impl/BlockingWaitStrategy.java new file mode 100644 index 000000000..e5bb5fc86 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/waitStrategy/impl/BlockingWaitStrategy.java @@ -0,0 +1,58 @@ +package io.mycat.memory.unsafe.ringbuffer.common.waitStrategy.impl; + +import io.mycat.memory.unsafe.ringbuffer.common.barrier.SequenceBarrier; +import io.mycat.memory.unsafe.ringbuffer.common.sequence.Sequence; +import io.mycat.memory.unsafe.ringbuffer.common.waitStrategy.WaitStrategy; +import io.mycat.memory.unsafe.ringbuffer.exception.AlertException; +import io.mycat.memory.unsafe.ringbuffer.exception.TimeoutException; + +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +/** + * @author lmax.Disruptor + * @version 3.3.5 + * @date 2016/8/1 + */ +public class BlockingWaitStrategy implements WaitStrategy { + private final Lock lock = new ReentrantLock(); + private final Condition processorNotifyCondition = lock.newCondition(); + + @Override + public long waitFor(long sequence, Sequence cursorSequence, Sequence dependentSequence, SequenceBarrier barrier) + throws AlertException, InterruptedException { + long availableSequence; + if (cursorSequence.get() < sequence) { + lock.lock(); + try { + while (cursorSequence.get() < sequence) { + //检查是否Alert,如果Alert,则抛出AlertException + //Alert在这里代表对应的消费者被halt停止了 + barrier.checkAlert(); + //在processorNotifyCondition上等待唤醒 + processorNotifyCondition.await(); + } + } finally { + lock.unlock(); + } + } + + while ((availableSequence = dependentSequence.get()) < sequence) { + barrier.checkAlert(); + } + + return availableSequence; + } + + @Override + public void signalAllWhenBlocking() { + lock.lock(); + try { + //生产者生产消息后,会唤醒所有等待的消费者线程 + processorNotifyCondition.signalAll(); + } finally { + lock.unlock(); + } + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/waitStrategy/impl/BusySpinWaitStrategy.java b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/waitStrategy/impl/BusySpinWaitStrategy.java new file mode 100644 index 000000000..c89bcb60a --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/waitStrategy/impl/BusySpinWaitStrategy.java @@ -0,0 +1,30 @@ +package io.mycat.memory.unsafe.ringbuffer.common.waitStrategy.impl; + +import io.mycat.memory.unsafe.ringbuffer.common.barrier.SequenceBarrier; +import io.mycat.memory.unsafe.ringbuffer.common.sequence.Sequence; +import io.mycat.memory.unsafe.ringbuffer.common.waitStrategy.WaitStrategy; +import io.mycat.memory.unsafe.ringbuffer.exception.AlertException; + +/** + * @author lmax.Disruptor + * @version 3.3.5 + * @date 2016/8/1 + */ +public class BusySpinWaitStrategy implements WaitStrategy { + @Override + public long waitFor( + final long sequence, Sequence cursor, final Sequence dependentSequence, final SequenceBarrier barrier) + throws AlertException, InterruptedException { + + long availableSequence; + //一直while自旋检查 + while ((availableSequence = dependentSequence.get()) < sequence) { + barrier.checkAlert(); + } + return availableSequence; + } + + @Override + public void signalAllWhenBlocking() { + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/waitStrategy/impl/SleepingWaitStrategy.java b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/waitStrategy/impl/SleepingWaitStrategy.java new file mode 100644 index 000000000..f1f9fcad1 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/ringbuffer/common/waitStrategy/impl/SleepingWaitStrategy.java @@ -0,0 +1,65 @@ +package io.mycat.memory.unsafe.ringbuffer.common.waitStrategy.impl; + +import io.mycat.memory.unsafe.ringbuffer.common.barrier.SequenceBarrier; +import io.mycat.memory.unsafe.ringbuffer.common.sequence.Sequence; +import io.mycat.memory.unsafe.ringbuffer.common.waitStrategy.WaitStrategy; +import io.mycat.memory.unsafe.ringbuffer.exception.AlertException; + +import java.util.concurrent.locks.LockSupport; + +/** + * @author lmax.Disruptor + * @version 3.3.5 + * @date 2016/8/1 + */ +public class SleepingWaitStrategy implements WaitStrategy { + //重试200次 + private static final int DEFAULT_RETRIES = 200; + private final int retries; + + public SleepingWaitStrategy() { + this(DEFAULT_RETRIES); + } + + public SleepingWaitStrategy(int retries) { + this.retries = retries; + } + + @Override + public long waitFor( + final long sequence, Sequence cursor, final Sequence dependentSequence, final SequenceBarrier barrier) + throws AlertException, InterruptedException { + long availableSequence; + int counter = retries; + //直接检查dependentSequence.get() < sequence + while ((availableSequence = dependentSequence.get()) < sequence) { + counter = applyWaitMethod(barrier, counter); + } + + return availableSequence; + } + + @Override + public void signalAllWhenBlocking() { + } + + private int applyWaitMethod(final SequenceBarrier barrier, int counter) + throws AlertException { + //检查是否需要终止 + barrier.checkAlert(); + //如果在200~100,重试 + if (counter > 100) { + --counter; + } + //如果在100~0,调用Thread.yield()让出CPU + else if (counter > 0) { + --counter; + Thread.yield(); + } + //<0的话,利用LockSupport.parkNanos(1L)来sleep最小时间 + else { + LockSupport.parkNanos(1L); + } + return counter; + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/ringbuffer/exception/AlertException.java b/src/main/java/io/mycat/memory/unsafe/ringbuffer/exception/AlertException.java new file mode 100644 index 000000000..f21c7a8e2 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/ringbuffer/exception/AlertException.java @@ -0,0 +1,21 @@ +package io.mycat.memory.unsafe.ringbuffer.exception; + +/** + * @author lmax.Disruptor + * @version 3.3.5 + * @date 2016/7/24 + */ +public class AlertException extends Exception { + + public static final AlertException INSTANCE = new AlertException(); + + private AlertException() + { + } + + @Override + public Throwable fillInStackTrace() + { + return this; + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/ringbuffer/exception/InsufficientCapacityException.java b/src/main/java/io/mycat/memory/unsafe/ringbuffer/exception/InsufficientCapacityException.java new file mode 100644 index 000000000..a370590e0 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/ringbuffer/exception/InsufficientCapacityException.java @@ -0,0 +1,19 @@ +package io.mycat.memory.unsafe.ringbuffer.exception; + +/** + * @author lmax.Disruptor + * @version 3.3.5 + * @date 2016/7/23 + */ +@SuppressWarnings("serial") +public final class InsufficientCapacityException extends Exception { + public static final InsufficientCapacityException INSTANCE = new InsufficientCapacityException(); + + private InsufficientCapacityException() { + } + + @Override + public synchronized Throwable fillInStackTrace() { + return this; + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/ringbuffer/exception/TimeoutException.java b/src/main/java/io/mycat/memory/unsafe/ringbuffer/exception/TimeoutException.java new file mode 100644 index 000000000..5c61ba81c --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/ringbuffer/exception/TimeoutException.java @@ -0,0 +1,20 @@ +package io.mycat.memory.unsafe.ringbuffer.exception; + +/** + * @author lmax.Disruptor + * @version 3.3.5 + * @date 2016/7/24 + */ +public class TimeoutException extends Exception { + public static final TimeoutException INSTANCE = new TimeoutException(); + + private TimeoutException() + { + } + + @Override + public synchronized Throwable fillInStackTrace() + { + return this; + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/ringbuffer/producer/AbstractSequencer.java b/src/main/java/io/mycat/memory/unsafe/ringbuffer/producer/AbstractSequencer.java new file mode 100644 index 000000000..5c9b1d296 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/ringbuffer/producer/AbstractSequencer.java @@ -0,0 +1,76 @@ +package io.mycat.memory.unsafe.ringbuffer.producer; + +import io.mycat.memory.unsafe.ringbuffer.common.sequence.Sequence; +import io.mycat.memory.unsafe.ringbuffer.common.barrier.SequenceBarrier; +import io.mycat.memory.unsafe.ringbuffer.common.sequence.SequenceGroups; +import io.mycat.memory.unsafe.ringbuffer.common.waitStrategy.WaitStrategy; +import io.mycat.memory.unsafe.ringbuffer.exception.InsufficientCapacityException; +import io.mycat.memory.unsafe.ringbuffer.utils.Util; + +import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; + +/** + * @author lmax.Disruptor + * @version 3.3.5 + * @date 2016/7/24 + */ +public abstract class AbstractSequencer implements Sequencer { + + private static final AtomicReferenceFieldUpdater SEQUENCE_UPDATER = + AtomicReferenceFieldUpdater.newUpdater(AbstractSequencer.class, Sequence[].class, "gatingSequences"); + + protected final int bufferSize; + protected final WaitStrategy waitStrategy; + protected final Sequence cursor = new Sequence(Sequencer.INITIAL_CURSOR_VALUE); + protected volatile Sequence[] gatingSequences = new Sequence[0]; + + public AbstractSequencer(int bufferSize, WaitStrategy waitStrategy) + { + if (bufferSize < 1) + { + throw new IllegalArgumentException("bufferSize must not be less than 1"); + } + if (Integer.bitCount(bufferSize) != 1) + { + throw new IllegalArgumentException("bufferSize must be a power of 2"); + } + + this.bufferSize = bufferSize; + this.waitStrategy = waitStrategy; + } + + @Override + public final long getCursor() + { + return cursor.get(); + } + + + @Override + public final int getBufferSize() + { + return bufferSize; + } + + @Override + public void addGatingSequences(Sequence... gatingSequences) { + SequenceGroups.addSequences(this, SEQUENCE_UPDATER, this, gatingSequences); + } + + @Override + public boolean removeGatingSequence(Sequence sequence) { + return SequenceGroups.removeSequence(this, SEQUENCE_UPDATER, sequence); + } + + @Override + public long getMinimumSequence() { + return Util.getMinimumSequence(gatingSequences, cursor.get()); + } + + public SequenceBarrier newBarrier(Sequence... sequencesToTrack) + { + return null; + //TODO 完成SequenceBarrier +// return new ProcessingSequenceBarrier(this, waitStrategy, cursor, sequencesToTrack); + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/ringbuffer/producer/MultiProducerSequencer.java b/src/main/java/io/mycat/memory/unsafe/ringbuffer/producer/MultiProducerSequencer.java new file mode 100644 index 000000000..9c17b2bd8 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/ringbuffer/producer/MultiProducerSequencer.java @@ -0,0 +1,258 @@ +package io.mycat.memory.unsafe.ringbuffer.producer; + +import io.mycat.memory.unsafe.Platform; +import io.mycat.memory.unsafe.ringbuffer.common.sequence.Sequence; +import io.mycat.memory.unsafe.ringbuffer.common.waitStrategy.WaitStrategy; +import io.mycat.memory.unsafe.ringbuffer.exception.InsufficientCapacityException; +import io.mycat.memory.unsafe.ringbuffer.utils.Util; + +import java.util.concurrent.locks.LockSupport; + +/** + * 多生产者类,线程安全,与单一生产者不同的是,这里的cursor不再是可以消费的标记,而是多线程生产者抢占的标记 + * 可以消费的sequence由availableBuffer来判断标识 + * + * @author lmax.Disruptor + * @version 3.3.5 + * @date 2016/7/24 + */ +public class MultiProducerSequencer extends AbstractSequencer{ + private static final long BASE = Platform.arrayBaseOffset(int[].class); + private static final long SCALE = Platform.arrayIndexScale(int[].class); + + private final Sequence gatingSequenceCache = new Sequence(Sequencer.INITIAL_CURSOR_VALUE); + + private final int[] availableBuffer; + //利用对2^n取模 = 对2^n -1 取与运算原理,indexMask=bufferSize - 1 + private final int indexMask; + //就是上面的n,用来定位某个sequence到底转了多少圈,用来标识已被发布的sequence。 + //为什么不直接将sequence存入availableBuffer,因为这样sequence值会过大,很容易溢出 + private final int indexShift; + + public MultiProducerSequencer(int bufferSize, final WaitStrategy waitStrategy) + { + super(bufferSize, waitStrategy); + availableBuffer = new int[bufferSize]; + indexMask = bufferSize - 1; + indexShift = Util.log2(bufferSize); + initialiseAvailableBuffer(); + } + + /** + * 将availableBuffer都初始化为-1 + */ + private void initialiseAvailableBuffer() { + for (int i = availableBuffer.length - 1; i != 0; i--) { + setAvailableBufferValue(i, -1); + } + setAvailableBufferValue(0, -1); + } + + /** + * 发布某个sequence之前的都可以被消费了需要将availableBuffer上对应sequence下标的值设置为第几次用到这个槽 + * @param sequence + */ + private void setAvailable(final long sequence) { + setAvailableBufferValue(calculateIndex(sequence), calculateAvailabilityFlag(sequence)); + } + + /** + * 某个sequence右移indexShift,代表这个Sequence是第几次用到这个ringBuffer的某个槽,也就是这个sequence转了多少圈 + * @param sequence + * @return + */ + private int calculateAvailabilityFlag(final long sequence) { + return (int) (sequence >>> indexShift); + } + + /** + * 定位ringBuffer上某个槽用于生产event,对2^n取模 = 对2^n -1 + * @param sequence + * @return + */ + private int calculateIndex(final long sequence) { + return ((int) sequence) & indexMask; + } + + /** + * 通过Unsafe更新数组非volatile类型的值 + * 数组结构 + * -------------- + * * 数组头 * BASE + * * reference1 * SCALE + * * reference2 * SCALE + * * reference3 * SCALE + * -------------- + * @param index + * @param flag + */ + private void setAvailableBufferValue(int index, int flag) { + long bufferAddress = (index * SCALE) + BASE; + Platform.putOrderedInt(availableBuffer, bufferAddress, flag); + } + + @Override + public void claim(long sequence) { + cursor.set(sequence); + } + + /** + * 用同样的方法计算给定的sequence,判断与availableBuffer对应下标的值是否相等,如果相等证明已被发布可以消费 + * @param sequence of the buffer to check + * @return + */ + @Override + public boolean isAvailable(long sequence) { + int index = calculateIndex(sequence); + int flag = calculateAvailabilityFlag(sequence); + long bufferAddress = (index * SCALE) + BASE; + return Platform.getIntVolatile(availableBuffer, bufferAddress) == flag; + } + + /** + * 获取最高的可消费sequence,减少获取次数 + * @param nextSequence The sequence to start scanning from. + * @param availableSequence The sequence to scan to. + * @return + */ + @Override + public long getHighestPublishedSequence(long nextSequence, long availableSequence) { + for (long sequence = nextSequence; sequence <= availableSequence; sequence++) { + if (!isAvailable(sequence)) { + return sequence - 1; + } + } + return availableSequence; + } + + @Override + public boolean hasAvailableCapacity(final int requiredCapacity) { + return hasAvailableCapacity(gatingSequences, requiredCapacity, cursor.get()); + } + + /** + * 与单一生产者验证原理类似 + * @param gatingSequences + * @param requiredCapacity + * @param cursorValue + * @return + */ + private boolean hasAvailableCapacity(Sequence[] gatingSequences, final int requiredCapacity, long cursorValue) + { + //下一位置加上所需容量减去整个bufferSize,如果为正数,那证明至少转了一圈,则需要检查gatingSequences(由消费者更新里面的Sequence值)以保证不覆盖还未被消费的 + //由于最多只能生产不大于整个bufferSize的Events。所以减去一个bufferSize与最小sequence相比较即可 + long wrapPoint = (cursorValue + requiredCapacity) - bufferSize; + //缓存 + long cachedGatingSequence = gatingSequenceCache.get(); + //缓存失效条件 + if (wrapPoint > cachedGatingSequence || cachedGatingSequence > cursorValue) + { + long minSequence = Util.getMinimumSequence(gatingSequences, cursorValue); + gatingSequenceCache.set(minSequence); + //空间不足 + if (wrapPoint > minSequence) + { + return false; + } + } + return true; + } + + @Override + public long remainingCapacity() { + //与单一生产者的计算方法同理,不考虑并发 + long consumed = Util.getMinimumSequence(gatingSequences, cursor.get()); + long produced = cursor.get(); + return getBufferSize() - (produced - consumed); + } + + @Override + public long next() { + return next(1); + } + + /** + * 用于多个生产者抢占n个RingBuffer槽用于生产Event + * + * @param n + * @return + */ + @Override + public long next(int n) { + if (n < 1) { + throw new IllegalArgumentException("n must be > 0"); + } + + long current; + long next; + + do { + //首先通过缓存判断空间是否足够 + current = cursor.get(); + next = current + n; + + long wrapPoint = next - bufferSize; + long cachedGatingSequence = gatingSequenceCache.get(); + //如果缓存不满足 + if (wrapPoint > cachedGatingSequence || cachedGatingSequence > current) { + //重新获取最小的 + long gatingSequence = Util.getMinimumSequence(gatingSequences, current); + //如果空间不足,则唤醒消费者消费,并让出CPU + if (wrapPoint > gatingSequence) { + waitStrategy.signalAllWhenBlocking(); + LockSupport.parkNanos(1); // TODO, should we spin based on the wait strategy? + continue; + } + //重新设置缓存 + gatingSequenceCache.set(gatingSequence); + } //如果空间足够,尝试CAS更新cursor,更新cursor成功代表成功获取n个槽,退出死循环 + else if (cursor.compareAndSet(current, next)) { + break; + } + } + while (true); + //返回最新的cursor值 + return next; + } + + @Override + public long tryNext() throws InsufficientCapacityException { + return tryNext(1); + } + + @Override + public long tryNext(int n) throws InsufficientCapacityException { + if (n < 1) { + throw new IllegalArgumentException("n must be > 0"); + } + + long current; + long next; + //尝试获取一次,若不成功,则抛InsufficientCapacityException + do { + current = cursor.get(); + next = current + n; + + if (!hasAvailableCapacity(gatingSequences, n, current)) { + throw InsufficientCapacityException.INSTANCE; + } + } + while (!cursor.compareAndSet(current, next)); + + return next; + } + + @Override + public void publish(long sequence) { + setAvailable(sequence); + waitStrategy.signalAllWhenBlocking(); + } + + @Override + public void publish(long lo, long hi) { + for (long l = lo; l <= hi; l++) { + setAvailable(l); + } + waitStrategy.signalAllWhenBlocking(); + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/ringbuffer/producer/Sequencer.java b/src/main/java/io/mycat/memory/unsafe/ringbuffer/producer/Sequencer.java new file mode 100644 index 000000000..48f445d21 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/ringbuffer/producer/Sequencer.java @@ -0,0 +1,78 @@ +package io.mycat.memory.unsafe.ringbuffer.producer; + +import io.mycat.memory.unsafe.ringbuffer.common.Cursored; +import io.mycat.memory.unsafe.ringbuffer.common.sequence.Sequence; +import io.mycat.memory.unsafe.ringbuffer.common.Sequenced; +import io.mycat.memory.unsafe.ringbuffer.common.barrier.SequenceBarrier; + +/** + * @author lmax.Disruptor + * @version 3.3.5 + * @date 2016/7/23 + */ +public interface Sequencer extends Cursored,Sequenced{ + /** + * -1 为 sequence的起始值 + */ + long INITIAL_CURSOR_VALUE = -1L; + + /** + * 申请一个特殊的Sequence,只有设定特殊起始值的ringBuffer时才会使用 + * + * @param sequence The sequence to initialise too. + */ + void claim(long sequence); + + /** + * 非阻塞,验证一个sequence是否已经被published并且可以消费 + * + * @param sequence of the buffer to check + * @return true if the sequence is available for use, false if not + */ + boolean isAvailable(long sequence); + + /** + * 将这些sequence加入到需要跟踪处理的gatingSequences中 + * + * @param gatingSequences The sequences to add. + */ + void addGatingSequences(Sequence... gatingSequences); + + /** + * 移除某个sequence + * + * @param sequence to be removed. + * @return true if this sequence was found, false otherwise. + */ + boolean removeGatingSequence(Sequence sequence); + + /** + * 给定一串需要跟踪的sequence,创建SequenceBarrier + * SequenceBarrier是用来给多消费者确定消费位置是否可以消费用的 + * + * @param sequencesToTrack + * @return A sequence barrier that will track the specified sequences. + * @see SequenceBarrier + */ + SequenceBarrier newBarrier(Sequence... sequencesToTrack); + + /** + * 获取这个ringBuffer的gatingSequences中最小的一个sequence + * + * @return The minimum gating sequence or the cursor sequence if + */ + long getMinimumSequence(); + + /** + * 获取最高可以读取的Sequence + * + * @param nextSequence The sequence to start scanning from. + * @param availableSequence The sequence to scan to. + * @return The highest value that can be safely read, will be at least nextSequence - 1. + */ + long getHighestPublishedSequence(long nextSequence, long availableSequence); + /** + * 并没有什么用,不实现,注释掉 + */ +// EventPoller newPoller(DataProvider provider, Sequence... gatingSequences); +} diff --git a/src/main/java/io/mycat/memory/unsafe/ringbuffer/producer/SingleProducerSequencer.java b/src/main/java/io/mycat/memory/unsafe/ringbuffer/producer/SingleProducerSequencer.java new file mode 100644 index 000000000..ca1577746 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/ringbuffer/producer/SingleProducerSequencer.java @@ -0,0 +1,157 @@ +package io.mycat.memory.unsafe.ringbuffer.producer; + +import io.mycat.memory.unsafe.ringbuffer.common.sequence.Sequence; +import io.mycat.memory.unsafe.ringbuffer.common.waitStrategy.WaitStrategy; +import io.mycat.memory.unsafe.ringbuffer.exception.InsufficientCapacityException; +import io.mycat.memory.unsafe.ringbuffer.utils.Util; + +import java.util.concurrent.locks.LockSupport; + +/** + * 单一生产者相关类,非线程安全 + * + * @author lmax.Disruptor + * @version 3.3.5 + * @date 2016/7/24 + */ + +abstract class SingleProducerSequencerPad extends AbstractSequencer +{ + protected long p1, p2, p3, p4, p5, p6, p7; + + public SingleProducerSequencerPad(int bufferSize, WaitStrategy waitStrategy) + { + super(bufferSize, waitStrategy); + } +} + +abstract class SingleProducerSequencerFields extends SingleProducerSequencerPad +{ + public SingleProducerSequencerFields(int bufferSize, WaitStrategy waitStrategy) + { + super(bufferSize, waitStrategy); + } + + protected long nextValue = Sequence.INITIAL_VALUE; + protected long cachedValue = Sequence.INITIAL_VALUE; +} + +public class SingleProducerSequencer extends SingleProducerSequencerFields{ + + public SingleProducerSequencer(int bufferSize, final WaitStrategy waitStrategy) { + super(bufferSize, waitStrategy); + } + + @Override + public void claim(long sequence) { + nextValue = sequence; + } + + @Override + public boolean isAvailable(long sequence) { + return sequence <= cursor.get(); + } + + @Override + public long getHighestPublishedSequence(long nextSequence, long availableSequence) { + return availableSequence; + } + + @Override + public boolean hasAvailableCapacity(int requiredCapacity) { + //下一个生产Sequence位置 + long nextValue = this.nextValue; + //下一位置加上所需容量减去整个bufferSize,如果为正数,那证明至少转了一圈,则需要检查gatingSequences(由消费者更新里面的Sequence值)以保证不覆盖还未被消费的 + long wrapPoint = (nextValue + requiredCapacity) - bufferSize; + //Disruptor经常用缓存,这里缓存之间所有gatingSequences最小的那个,这样不用每次都遍历一遍gatingSequences,影响效率 + long cachedGatingSequence = this.cachedValue; + //只要wrapPoint大于缓存的所有gatingSequences最小的那个,就重新检查更新缓存 + if (wrapPoint > cachedGatingSequence || cachedGatingSequence > nextValue) + { + long minSequence = Util.getMinimumSequence(gatingSequences, nextValue); + this.cachedValue = minSequence; + //空间不足返回false + if (wrapPoint > minSequence) + { + return false; + } + } + //若wrapPoint小于缓存的所有gatingSequences最小的那个,证明可以放心生产 + return true; + } + + @Override + public long remainingCapacity() { + //使用的 = 生产的 - 已经消费的 + //剩余容量 = 容量 - 使用的 + long nextValue = this.nextValue; + long consumed = Util.getMinimumSequence(gatingSequences, nextValue); + long produced = nextValue; + return getBufferSize() - (produced - consumed); + } + + @Override + public long next() { + return next(1); + } + + @Override + public long next(int n) { + if (n < 1) { + throw new IllegalArgumentException("n must be > 0"); + } + + long nextValue = this.nextValue; + //next方法和之前的hasAvailableCapacity同理,只不过这里是相当于阻塞的 + long nextSequence = nextValue + n; + long wrapPoint = nextSequence - bufferSize; + long cachedGatingSequence = this.cachedValue; + + if (wrapPoint > cachedGatingSequence || cachedGatingSequence > nextValue) { + long minSequence; + //只要wrapPoint大于最小的gatingSequences,那么不断唤醒消费者去消费,并利用LockSupport让出CPU,直到wrapPoint不大于最小的gatingSequences + while (wrapPoint > (minSequence = Util.getMinimumSequence(gatingSequences, nextValue))) { + waitStrategy.signalAllWhenBlocking(); + LockSupport.parkNanos(1L); // TODO: Use waitStrategy to spin? + } + //同理,缓存最小的gatingSequences + this.cachedValue = minSequence; + } + + this.nextValue = nextSequence; + + return nextSequence; + } + + @Override + public long tryNext() throws InsufficientCapacityException { + return tryNext(1); + } + + @Override + public long tryNext(int n) throws InsufficientCapacityException { + if (n < 1) { + throw new IllegalArgumentException("n must be > 0"); + } + + if (!hasAvailableCapacity(n)) { + throw InsufficientCapacityException.INSTANCE; + } + + long nextSequence = this.nextValue += n; + + return nextSequence; + } + + @Override + public void publish(long sequence) { + //cursor代表可以消费的sequence + cursor.set(sequence); + waitStrategy.signalAllWhenBlocking(); + } + + @Override + public void publish(long lo, long hi) { + publish(hi); + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/ringbuffer/utils/Util.java b/src/main/java/io/mycat/memory/unsafe/ringbuffer/utils/Util.java new file mode 100644 index 000000000..49bd1c820 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/ringbuffer/utils/Util.java @@ -0,0 +1,60 @@ +package io.mycat.memory.unsafe.ringbuffer.utils; + +import io.mycat.memory.unsafe.ringbuffer.common.sequence.Sequence; + +/** + * Set of common functions used by the Disruptor + */ +public class Util { + /** + * 计算下一个不小于x的2的n次方 + * 原理:int最长为32位,计算x-1的前面有多少个0,之后用32减去这个值得到n,那么2的n次方就是下一个不小于x的2的n次方 + * + * @param x Value to round up + * @return The next power of 2 from x inclusive + */ + public static int ceilingNextPowerOfTwo(final int x) { + return 1 << (32 - Integer.numberOfLeadingZeros(x - 1)); + } + + /** + * 获取Sequence数组中value最小的值 + * + * @param sequences to compare. + * @return the minimum sequence found or Long.MAX_VALUE if the array is empty. + */ + public static long getMinimumSequence(final Sequence[] sequences) + { + return getMinimumSequence(sequences, Long.MAX_VALUE); + } + + /** + * 获取Sequence数组中value最小的值 + * + * @param sequences to compare. + * @param minimum 如果数组为空,将返回这个值 + * @return the smaller of minimum sequence value found in {@code sequences} and {@code minimum}; + * {@code minimum} if {@code sequences} is empty + */ + public static long getMinimumSequence(final Sequence[] sequences, long minimum) + { + for (int i = 0, n = sequences.length; i < n; i++) + { + long value = sequences[i].get(); + minimum = Math.min(minimum, value); + } + + return minimum; + } + + public static int log2(int i) + { + int r = 0; + while ((i >>= 1) != 0) + { + ++r; + } + return r; + } + +} diff --git a/src/main/java/io/mycat/memory/unsafe/row/BufferHolder.java b/src/main/java/io/mycat/memory/unsafe/row/BufferHolder.java new file mode 100644 index 000000000..3b8c59505 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/row/BufferHolder.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.row; + + +import io.mycat.memory.unsafe.Platform; + +/** + * A helper class to manage the data buffer for an unsafe row. The data buffer can grow and + * automatically re-point the unsafe row to it. + * + * This class can be used to build a one-pass unsafe row writing program, i.e. data will be written + * to the data buffer directly and no extra copy is needed. There should be only one instance of + * this class per writing program, so that the memory segment/data buffer can be reused. Note that + * for each incoming record, we should call `reset` of BufferHolder instance before write the record + * and reuse the data buffer. + * + * Generally we should call `UnsafeRow.setTotalSize` and pass in `BufferHolder.totalSize` to update + * the size of the result row, after writing a record to the buffer. However, we can skip this step + * if the fields of row are all fixed-length, as the size of result row is also fixed. + */ +public class BufferHolder { + public byte[] buffer; + public int cursor = Platform.BYTE_ARRAY_OFFSET; + + + private final UnsafeRow row; + private final int fixedSize; + + public BufferHolder(UnsafeRow row) { + this(row, 64); + } + + public BufferHolder(UnsafeRow row, int initialSize) { + this.fixedSize = UnsafeRow.calculateBitSetWidthInBytes(row.numFields()) + 8 * row.numFields(); + this.buffer = new byte[fixedSize + initialSize]; + this.row = row; + this.row.pointTo(buffer, buffer.length); + } + + /** + * Grows the buffer by at least neededSize and points the row to the buffer. + */ + public void grow(int neededSize) { + final int length = totalSize() + neededSize; + if (buffer.length < length) { + // This will not happen frequently, because the buffer is re-used. + final byte[] tmp = new byte[length * 2]; + Platform.copyMemory( + buffer, + Platform.BYTE_ARRAY_OFFSET, + tmp, + Platform.BYTE_ARRAY_OFFSET, + totalSize()); + buffer = tmp; + row.pointTo(buffer, buffer.length); + } + } + + public UnsafeRow getRow() { + return row; + } + + + public void reset() { + cursor = Platform.BYTE_ARRAY_OFFSET + fixedSize; + } + + public int totalSize() { + return cursor - Platform.BYTE_ARRAY_OFFSET; + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/row/StructType.java b/src/main/java/io/mycat/memory/unsafe/row/StructType.java new file mode 100644 index 000000000..4b85eb39d --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/row/StructType.java @@ -0,0 +1,44 @@ +package io.mycat.memory.unsafe.row; + +import io.mycat.sqlengine.mpp.ColMeta; +import io.mycat.sqlengine.mpp.OrderCol; + +import javax.annotation.Nonnull; +import java.util.Map; + +/** + * Created by zagnix on 2016/6/6. + */ +public class StructType { + + private final Map columToIndx; + private final int fieldCount; + + private OrderCol[] orderCols = null; + + public StructType(@Nonnull Map columToIndx,int fieldCount){ + assert fieldCount >=0; + this.columToIndx = columToIndx; + this.fieldCount = fieldCount; + } + + public int length() { + return fieldCount; + } + + public Map getColumToIndx() { + return columToIndx; + } + + public OrderCol[] getOrderCols() { + return orderCols; + } + + public void setOrderCols(OrderCol[] orderCols) { + this.orderCols = orderCols; + } + + public long apply(int i) { + return 0; + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/row/UnsafeRow.java b/src/main/java/io/mycat/memory/unsafe/row/UnsafeRow.java new file mode 100644 index 000000000..574bcd9da --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/row/UnsafeRow.java @@ -0,0 +1,549 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.row; + + +import io.mycat.backend.mysql.BufferUtil; +import io.mycat.memory.unsafe.Platform; +import io.mycat.memory.unsafe.array.ByteArrayMethods; +import io.mycat.memory.unsafe.bitset.BitSetMethods; +import io.mycat.memory.unsafe.hash.Murmur3_x86_32; +import io.mycat.memory.unsafe.types.UTF8String; +import io.mycat.net.FrontendConnection; +import io.mycat.net.mysql.MySQLPacket; + +import java.io.IOException; +import java.io.OutputStream; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.nio.ByteBuffer; + + +/** + * Modify by zagnix + * An Unsafe implementation of Row which is backed by raw memory instead of Java objects. + * + * Each tuple has three parts: [null bit set] [values] [variable length portion] + * + * The bit set is used for null tracking and is aligned to 8-byte word boundaries. It stores + * one bit per field. + * + * In the `values` region, we store one 8-byte word per field. For fields that hold fixed-length + * primitive types, such as long, double, or int, we store the value directly in the word. For + * fields with non-primitive or variable-length values, we store a relative offset (w.r.t. the + * base address of the row) that points to the beginning of the variable-length field, and length + * (they are combined into a long). + * + * Instances of `UnsafeRow` act as pointers to row data stored in this format. + */ +public final class UnsafeRow extends MySQLPacket { + + ////////////////////////////////////////////////////////////////////////////// + // Static methods + ////////////////////////////////////////////////////////////////////////////// + + public static int calculateBitSetWidthInBytes(int numFields) { + return ((numFields + 63)/ 64) * 8; + } + + public static int calculateFixedPortionByteSize(int numFields) { + return 8 * numFields + calculateBitSetWidthInBytes(numFields); + } + + ////////////////////////////////////////////////////////////////////////////// + // Private fields and methods + ////////////////////////////////////////////////////////////////////////////// + + private Object baseObject; + private long baseOffset; + + /** The number of fields in this row, used for calculating the bitset width (and in assertions) */ + private int numFields; + + /** The size of this row's backing data, in bytes) */ + private int sizeInBytes; + + /** The width of the null tracking bit set, in bytes */ + private int bitSetWidthInBytes; + + private long getFieldOffset(int ordinal) { + return baseOffset + bitSetWidthInBytes + ordinal * 8L; + } + + private void assertIndexIsValid(int index) { + assert index >= 0 : "index (" + index + ") should >= 0"; + assert index < numFields : "index (" + index + ") should < " + numFields; + } + + ////////////////////////////////////////////////////////////////////////////// + // Public methods + ////////////////////////////////////////////////////////////////////////////// + + /** + * Construct a new UnsafeRow. The resulting row won't be usable until `pointTo()` has been called, + * since the value returned by this constructor is equivalent to a null pointer. + * + * @param numFields the number of fields in this row + */ + public UnsafeRow(int numFields) { + this.numFields = numFields; + this.bitSetWidthInBytes = calculateBitSetWidthInBytes(numFields); + } + + // for serializer + public UnsafeRow() {} + + public Object getBaseObject() { return baseObject; } + public long getBaseOffset() { return baseOffset; } + public int getSizeInBytes() { return sizeInBytes; } + + public int numFields() { return numFields; } + + /** + * Update this UnsafeRow to point to different backing data. + * + * @param baseObject the base object + * @param baseOffset the offset within the base object + * @param sizeInBytes the size of this row's backing data, in bytes + */ + public void pointTo(Object baseObject, long baseOffset, int sizeInBytes) { + assert numFields >= 0 : "numFields (" + numFields + ") should >= 0"; + this.baseObject = baseObject; + this.baseOffset = baseOffset; + this.sizeInBytes = sizeInBytes; + } + + /** + * Update this UnsafeRow to point to the underlying byte array. + * + * @param buf byte array to point to + * @param sizeInBytes the number of bytes valid in the byte array + */ + public void pointTo(byte[] buf, int sizeInBytes) { + pointTo(buf, Platform.BYTE_ARRAY_OFFSET, sizeInBytes); + } + + public void setTotalSize(int sizeInBytes) { + this.sizeInBytes = sizeInBytes; + } + + public void setNotNullAt(int i) { + assertIndexIsValid(i); + BitSetMethods.unset(baseObject, baseOffset, i); + } + + + public void setNullAt(int i) { + assertIndexIsValid(i); + BitSetMethods.set(baseObject, baseOffset, i); + // To preserve row equality, zero out the value when setting the column to null. + // Since this row does does not currently support updates to variable-length values, we don't + // have to worry about zeroing out that data. + Platform.putLong(baseObject, getFieldOffset(i), 0); + } + + public void update(int ordinal, Object value) { + throw new UnsupportedOperationException(); + } + + public void setInt(int ordinal, int value) { + assertIndexIsValid(ordinal); + setNotNullAt(ordinal); + Platform.putInt(baseObject, getFieldOffset(ordinal), value); + } + + public void setLong(int ordinal, long value) { + assertIndexIsValid(ordinal); + setNotNullAt(ordinal); + Platform.putLong(baseObject, getFieldOffset(ordinal), value); + } + + public void setDouble(int ordinal, double value) { + assertIndexIsValid(ordinal); + setNotNullAt(ordinal); + if (Double.isNaN(value)) { + value = Double.NaN; + } + Platform.putDouble(baseObject, getFieldOffset(ordinal), value); + } + + public void setBoolean(int ordinal, boolean value) { + assertIndexIsValid(ordinal); + setNotNullAt(ordinal); + Platform.putBoolean(baseObject, getFieldOffset(ordinal), value); + } + + public void setShort(int ordinal, short value) { + assertIndexIsValid(ordinal); + setNotNullAt(ordinal); + Platform.putShort(baseObject, getFieldOffset(ordinal), value); + } + + public void setByte(int ordinal, byte value) { + assertIndexIsValid(ordinal); + setNotNullAt(ordinal); + Platform.putByte(baseObject, getFieldOffset(ordinal), value); + } + + public void setFloat(int ordinal, float value) { + assertIndexIsValid(ordinal); + setNotNullAt(ordinal); + if (Float.isNaN(value)) { + value = Float.NaN; + } + Platform.putFloat(baseObject, getFieldOffset(ordinal), value); + } + + + public boolean isNullAt(int ordinal) { + assertIndexIsValid(ordinal); + return BitSetMethods.isSet(baseObject, baseOffset, ordinal); + } + + + public boolean getBoolean(int ordinal) { + assertIndexIsValid(ordinal); + return Platform.getBoolean(baseObject, getFieldOffset(ordinal)); + } + + + public byte getByte(int ordinal) { + assertIndexIsValid(ordinal); + return Platform.getByte(baseObject, getFieldOffset(ordinal)); + } + + + public short getShort(int ordinal) { + assertIndexIsValid(ordinal); + return Platform.getShort(baseObject, getFieldOffset(ordinal)); + } + + + public int getInt(int ordinal) { + assertIndexIsValid(ordinal); + return Platform.getInt(baseObject, getFieldOffset(ordinal)); + } + + + public long getLong(int ordinal) { + assertIndexIsValid(ordinal); + return Platform.getLong(baseObject, getFieldOffset(ordinal)); + } + + + public float getFloat(int ordinal) { + assertIndexIsValid(ordinal); + return Platform.getFloat(baseObject, getFieldOffset(ordinal)); + } + + + public double getDouble(int ordinal) { + assertIndexIsValid(ordinal); + return Platform.getDouble(baseObject, getFieldOffset(ordinal)); + } + + + public UTF8String getUTF8String(int ordinal) { + if (isNullAt(ordinal)) return null; + final long offsetAndSize = getLong(ordinal); + final int offset = (int) (offsetAndSize >> 32); + final int size = (int) offsetAndSize; + return UTF8String.fromAddress(baseObject, baseOffset + offset, size); + } + public byte[] getBinary(int ordinal) { + if (isNullAt(ordinal)) { + return null; + } else { + final long offsetAndSize = getLong(ordinal); + final int offset = (int) (offsetAndSize >> 32); + final int size = (int) offsetAndSize; + final byte[] bytes = new byte[size]; + Platform.copyMemory( + baseObject, + baseOffset + offset, + bytes, + Platform.BYTE_ARRAY_OFFSET, + size + ); + return bytes; + } + } + + + + /** + * Copies this row, returning a self-contained UnsafeRow that stores its data in an internal + * byte array rather than referencing data stored in a data page. + */ + public UnsafeRow copy() { + UnsafeRow rowCopy = new UnsafeRow(numFields); + final byte[] rowDataCopy = new byte[sizeInBytes]; + Platform.copyMemory( + baseObject, + baseOffset, + rowDataCopy, + Platform.BYTE_ARRAY_OFFSET, + sizeInBytes + ); + rowCopy.pointTo(rowDataCopy, Platform.BYTE_ARRAY_OFFSET, sizeInBytes); + return rowCopy; + } + + /** + * Creates an empty UnsafeRow from a byte array with specified numBytes and numFields. + * The returned row is invalid until we call copyFrom on it. + */ + public static UnsafeRow createFromByteArray(int numBytes, int numFields) { + final UnsafeRow row = new UnsafeRow(numFields); + row.pointTo(new byte[numBytes], numBytes); + return row; + } + + /** + * Copies the input UnsafeRow to this UnsafeRow, and resize the underlying byte[] when the + * input row is larger than this row. + */ + public void copyFrom(UnsafeRow row) { + // copyFrom is only available for UnsafeRow created from byte array. + assert (baseObject instanceof byte[]) && baseOffset == Platform.BYTE_ARRAY_OFFSET; + if (row.sizeInBytes > this.sizeInBytes) { + // resize the underlying byte[] if it's not large enough. + this.baseObject = new byte[row.sizeInBytes]; + } + Platform.copyMemory( + row.baseObject, row.baseOffset, this.baseObject, this.baseOffset, row.sizeInBytes); + // update the sizeInBytes. + this.sizeInBytes = row.sizeInBytes; + } + + /** + * Write this UnsafeRow's underlying bytes to the given OutputStream. + * + * @param out the stream to write to. + * @param writeBuffer a byte array for buffering chunks of off-heap data while writing to the + * output stream. If this row is backed by an on-heap byte array, then this + * buffer will not be used and may be null. + */ + public void writeToStream(OutputStream out, byte[] writeBuffer) throws IOException { + if (baseObject instanceof byte[]) { + int offsetInByteArray = (int) (Platform.BYTE_ARRAY_OFFSET - baseOffset); + out.write((byte[]) baseObject, offsetInByteArray, sizeInBytes); + } else { + int dataRemaining = sizeInBytes; + long rowReadPosition = baseOffset; + while (dataRemaining > 0) { + int toTransfer = Math.min(writeBuffer.length, dataRemaining); + Platform.copyMemory( + baseObject, rowReadPosition, writeBuffer, Platform.BYTE_ARRAY_OFFSET, toTransfer); + out.write(writeBuffer, 0, toTransfer); + rowReadPosition += toTransfer; + dataRemaining -= toTransfer; + } + } + } + + @Override + public int hashCode() { + return Murmur3_x86_32.hashUnsafeWords(baseObject, baseOffset, sizeInBytes, 42); + } + + @Override + public boolean equals(Object other) { + if (other instanceof UnsafeRow) { + UnsafeRow o = (UnsafeRow) other; + return (sizeInBytes == o.sizeInBytes) && + ByteArrayMethods.arrayEquals(baseObject, baseOffset, o.baseObject, o.baseOffset, + sizeInBytes); + } + return false; + } + + /** + * Returns the underlying bytes for this UnsafeRow. + */ + public byte[] getBytes() { + if (baseObject instanceof byte[] && baseOffset == Platform.BYTE_ARRAY_OFFSET + && (((byte[]) baseObject).length == sizeInBytes)) { + return (byte[]) baseObject; + } else { + byte[] bytes = new byte[sizeInBytes]; + Platform.copyMemory(baseObject, baseOffset, bytes, Platform.BYTE_ARRAY_OFFSET, sizeInBytes); + return bytes; + } + } + + public static final byte NULL_MARK = (byte) 251; + public static final byte EMPTY_MARK = (byte) 0; + + @Override + public ByteBuffer write(ByteBuffer bb, FrontendConnection c, + boolean writeSocketIfFull) { + bb = c.checkWriteBuffer(bb,c.getPacketHeaderSize(),writeSocketIfFull); + BufferUtil.writeUB3(bb, calcPacketSize()); + bb.put(packetId); + for (int i = 0; i < numFields; i++) { + if (!isNullAt(i)) { + byte[] fv = this.getBinary(i); + if (fv.length == 0) { + bb = c.checkWriteBuffer(bb, 1, writeSocketIfFull); + bb.put(UnsafeRow.EMPTY_MARK); + } else { + bb = c.checkWriteBuffer(bb, BufferUtil.getLength(fv), + writeSocketIfFull); + BufferUtil.writeLength(bb, fv.length); + /** + * 把数据写到Writer Buffer中 + */ + bb = c.writeToBuffer(fv, bb); + } + } else { + //Col null value + bb = c.checkWriteBuffer(bb,1,writeSocketIfFull); + bb.put(UnsafeRow.NULL_MARK); + } + } + return bb; + } + + @Override + public int calcPacketSize() { + int size = 0; + for (int i = 0; i < numFields; i++) { + byte[] v = this.getBinary(i); + size += (v == null || v.length == 0) ? 1 : BufferUtil.getLength(v); + } + return size; + } + + public BigDecimal getDecimal(int ordinal, int scale) { + if (isNullAt(ordinal)) { + return null; + } + byte[] bytes = getBinary(ordinal); + BigInteger bigInteger = new BigInteger(bytes); + BigDecimal javaDecimal = new BigDecimal(bigInteger, scale); + return javaDecimal; + } + + /** + * update exist decimal column value to new decimal value + * + * NOTE: decimal max precision is limit to 38 + * @param ordinal + * @param value + * @param precision + */ + public void updateDecimal(int ordinal, BigDecimal value) { + assertIndexIsValid(ordinal); + // fixed length + long cursor = getLong(ordinal) >>> 32; + assert cursor > 0 : "invalid cursor " + cursor; + // zero-out the bytes + Platform.putLong(baseObject, baseOffset + cursor, 0L); + Platform.putLong(baseObject, baseOffset + cursor + 8, 0L); + + if (value == null) { + setNullAt(ordinal); + // keep the offset for future update + Platform.putLong(baseObject, getFieldOffset(ordinal), cursor << 32); + } else { + + final BigInteger integer = value.unscaledValue(); + byte[] bytes = integer.toByteArray(); + assert (bytes.length <= 16); + + // Write the bytes to the variable length portion. + Platform.copyMemory(bytes, Platform.BYTE_ARRAY_OFFSET, baseObject, baseOffset + cursor, bytes.length); + setLong(ordinal, (cursor << 32) | ((long) bytes.length)); + } + + } + + /** + public Decimal getDecimal(int ordinal, int precision, int scale) { + if (isNullAt(ordinal)) { + return null; + } + if (precision <= Decimal.MAX_LONG_DIGITS()) { + return Decimal.createUnsafe(getLong(ordinal), precision, scale); + } else { + byte[] bytes = getBinary(ordinal); + BigInteger bigInteger = new BigInteger(bytes); + BigDecimal javaDecimal = new BigDecimal(bigInteger, scale); + return Decimal.apply(javaDecimal, precision, scale); + } + } + + public void setDecimal(int ordinal, Decimal value, int precision) { + assertIndexIsValid(ordinal); + if (precision <= Decimal.MAX_LONG_DIGITS()) { + // compact format + if (value == null) { + setNullAt(ordinal); + } else { + setLong(ordinal, value.toUnscaledLong()); + } + } else { + // fixed length + long cursor = getLong(ordinal) >>> 32; + assert cursor > 0 : "invalid cursor " + cursor; + // zero-out the bytes + Platform.putLong(baseObject, baseOffset + cursor, 0L); + Platform.putLong(baseObject, baseOffset + cursor + 8, 0L); + + if (value == null) { + setNullAt(ordinal); + // keep the offset for future update + Platform.putLong(baseObject, getFieldOffset(ordinal), cursor << 32); + } else { + + final BigInteger integer = value.toJavaBigDecimal().unscaledValue(); + byte[] bytes = integer.toByteArray(); + assert(bytes.length <= 16); + + // Write the bytes to the variable length portion. + Platform.copyMemory( + bytes, Platform.BYTE_ARRAY_OFFSET, baseObject, baseOffset + cursor, bytes.length); + setLong(ordinal, (cursor << 32) | ((long) bytes.length)); + } + } + } + +*/ + @Override + protected String getPacketInfo() { + return "MySQL RowData Packet"; + } + + // This is for debugging + @Override + public String toString() { + StringBuilder build = new StringBuilder("["); + for (int i = 0; i < sizeInBytes; i += 8) { + if (i != 0) build.append(','); + build.append(Long.toHexString(Platform.getLong(baseObject, baseOffset + i))); + } + build.append(']'); + return build.toString(); + } + + public boolean anyNull() { + return BitSetMethods.anySet(baseObject, baseOffset, bitSetWidthInBytes / 8); + } + +} diff --git a/src/main/java/io/mycat/memory/unsafe/row/UnsafeRowWriter.java b/src/main/java/io/mycat/memory/unsafe/row/UnsafeRowWriter.java new file mode 100644 index 000000000..7be6d4d85 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/row/UnsafeRowWriter.java @@ -0,0 +1,231 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.row; + + +import java.math.BigDecimal; + +import io.mycat.memory.unsafe.Platform; +import io.mycat.memory.unsafe.array.ByteArrayMethods; +import io.mycat.memory.unsafe.bitset.BitSetMethods; + +/** + * A helper class to write data into global row buffer using `UnsafeRow` format. + * + * It will remember the offset of row buffer which it starts to write, and move the cursor of row + * buffer while writing. If new data(can be the input record if this is the outermost writer, or + * nested struct if this is an inner writer) comes, the starting cursor of row buffer may be + * changed, so we need to call `UnsafeRowWriter.reset` before writing, to update the + * `startingOffset` and clear out null bits. + * + * Note that if this is the outermost writer, which means we will always write from the very + * beginning of the global row buffer, we don't need to update `startingOffset` and can just call + * `zeroOutNullBytes` before writing new data. + */ +public class UnsafeRowWriter { + + private final BufferHolder holder; + // The offset of the global buffer where we start to write this row. + private int startingOffset; + private final int nullBitsSize; + private final int fixedSize; + + public UnsafeRowWriter(BufferHolder holder,int numFields) { + this.holder = holder; + this.nullBitsSize = UnsafeRow.calculateBitSetWidthInBytes(numFields); + this.fixedSize = nullBitsSize + 8 * numFields; + this.startingOffset = holder.cursor; + } + + /** + * Resets the `startingOffset` according to the current cursor of row buffer, and clear out null + * bits. This should be called before we write a new nested struct to the row buffer. + */ + public void reset() { + this.startingOffset = holder.cursor; + + // grow the global buffer to make sure it has enough space to write fixed-length data. + holder.grow(fixedSize); + holder.cursor += fixedSize; + + zeroOutNullBytes(); + } + + /** + * Clears out null bits. This should be called before we write a new row to row buffer. + */ + public void zeroOutNullBytes() { + for (int i = 0; i < nullBitsSize; i += 8) { + Platform.putLong(holder.buffer, startingOffset + i, 0L); + } + } + + private void zeroOutPaddingBytes(int numBytes) { + if ((numBytes & 0x07) > 0) { + Platform.putLong(holder.buffer, holder.cursor + ((numBytes >> 3) << 3), 0L); + } + } + + public BufferHolder holder() { return holder; } + + public boolean isNullAt(int ordinal) { + return BitSetMethods.isSet(holder.buffer, startingOffset, ordinal); + } + + public void setNullAt(int ordinal) { + BitSetMethods.set(holder.buffer, startingOffset, ordinal); + Platform.putLong(holder.buffer, getFieldOffset(ordinal), 0L); + } + + public long getFieldOffset(int ordinal) { + return startingOffset + nullBitsSize + 8 * ordinal; + } + + public void setOffsetAndSize(int ordinal, long size) { + setOffsetAndSize(ordinal, holder.cursor, size); + } + + public void setOffsetAndSize(int ordinal, long currentCursor, long size) { + final long relativeOffset = currentCursor - startingOffset; + final long fieldOffset = getFieldOffset(ordinal); + final long offsetAndSize = (relativeOffset << 32) | size; + + Platform.putLong(holder.buffer, fieldOffset, offsetAndSize); + } + + // Do word alignment for this row and grow the row buffer if needed. + // todo: remove this after we make unsafe array data word align. + public void alignToWords(int numBytes) { + final int remainder = numBytes & 0x07; + + if (remainder > 0) { + final int paddingBytes = 8 - remainder; + holder.grow(paddingBytes); + + for (int i = 0; i < paddingBytes; i++) { + Platform.putByte(holder.buffer, holder.cursor, (byte) 0); + holder.cursor++; + } + } + } + + public void write(int ordinal, boolean value) { + final long offset = getFieldOffset(ordinal); + Platform.putLong(holder.buffer, offset, 0L); + Platform.putBoolean(holder.buffer, offset, value); + } + + public void write(int ordinal, byte value) { + final long offset = getFieldOffset(ordinal); + Platform.putLong(holder.buffer, offset, 0L); + Platform.putByte(holder.buffer, offset, value); + } + + public void write(int ordinal, short value) { + final long offset = getFieldOffset(ordinal); + Platform.putLong(holder.buffer, offset, 0L); + Platform.putShort(holder.buffer, offset, value); + } + + public void write(int ordinal, int value) { + final long offset = getFieldOffset(ordinal); + Platform.putLong(holder.buffer, offset, 0L); + Platform.putInt(holder.buffer, offset, value); + } + + public void write(int ordinal, long value) { + Platform.putLong(holder.buffer, getFieldOffset(ordinal), value); + } + + public void write(int ordinal, float value) { + if (Float.isNaN(value)) { + value = Float.NaN; + } + final long offset = getFieldOffset(ordinal); + Platform.putLong(holder.buffer, offset, 0L); + Platform.putFloat(holder.buffer, offset, value); + } + + public void write(int ordinal, double value) { + if (Double.isNaN(value)) { + value = Double.NaN; + } + Platform.putDouble(holder.buffer, getFieldOffset(ordinal), value); + } + + public void write(int ordinal, byte[] input) { + if(input == null){ + return; + } + write(ordinal, input, 0, input.length); + } + + public void write(int ordinal, byte[] input, int offset, int numBytes) { + final int roundedSize = ByteArrayMethods.roundNumberOfBytesToNearestWord(numBytes); + + // grow the global buffer before writing data. + holder.grow(roundedSize); + + zeroOutPaddingBytes(numBytes); + + // Write the bytes to the variable length portion. + Platform.copyMemory(input, Platform.BYTE_ARRAY_OFFSET + offset, + holder.buffer, holder.cursor, numBytes); + + setOffsetAndSize(ordinal, numBytes); + + // move the cursor forward. + holder.cursor += roundedSize; + } + + /** + * different from Spark, we use java BigDecimal here, + * and we limit the max precision to be 38 because the bytes length limit to be 16 + * + * @param ordinal + * @param input + */ + public void write(int ordinal, BigDecimal input) { + + // grow the global buffer before writing data. + holder.grow(16); + + // zero-out the bytes + Platform.putLong(holder.buffer, holder.cursor, 0L); + Platform.putLong(holder.buffer, holder.cursor + 8, 0L); + + // Make sure Decimal object has the same scale as DecimalType. + // Note that we may pass in null Decimal object to set null for it. + if (input == null) { + BitSetMethods.set(holder.buffer, startingOffset, ordinal); + // keep the offset for future update + setOffsetAndSize(ordinal, 0L); + } else { + final byte[] bytes = input.unscaledValue().toByteArray(); + assert bytes.length <= 16; + + // Write the bytes to the variable length portion. + Platform.copyMemory(bytes, Platform.BYTE_ARRAY_OFFSET, holder.buffer, holder.cursor, bytes.length); + setOffsetAndSize(ordinal, bytes.length); + } + + // move the cursor forward. + holder.cursor += 16; + } + +} diff --git a/src/main/java/io/mycat/memory/unsafe/storage/ConnectionId.java b/src/main/java/io/mycat/memory/unsafe/storage/ConnectionId.java new file mode 100644 index 000000000..154309158 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/storage/ConnectionId.java @@ -0,0 +1,27 @@ +package io.mycat.memory.unsafe.storage; + +/** + * + * Created by zagnix on 2016/6/6. + * + */ +public abstract class ConnectionId { + protected String name; + public abstract String getBlockName(); + + @Override + public boolean equals(Object arg0) { + return super.equals(arg0); + } + + @Override + public int hashCode() { + return super.hashCode(); + } + + @Override + public String toString() { + return super.toString(); + } + +} diff --git a/src/main/java/io/mycat/memory/unsafe/storage/DataNodeDiskManager.java b/src/main/java/io/mycat/memory/unsafe/storage/DataNodeDiskManager.java new file mode 100644 index 000000000..dc3d86925 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/storage/DataNodeDiskManager.java @@ -0,0 +1,45 @@ +package io.mycat.memory.unsafe.storage; + + + +import io.mycat.memory.unsafe.utils.MycatPropertyConf; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; + +/** + * Created by zagnix on 2016/6/3. + */ +public class DataNodeDiskManager { + + private MycatPropertyConf conf; + private boolean deleteFilesOnStop; + private SerializerManager serializerManager; + + public DataNodeDiskManager(MycatPropertyConf conf, boolean deleteFilesOnStop, SerializerManager serializerManager){ + this.conf = conf; + this.deleteFilesOnStop = deleteFilesOnStop; + this.serializerManager = serializerManager; + } + + public DataNodeFileManager diskBlockManager() throws IOException { + return new DataNodeFileManager(conf, deleteFilesOnStop); + } + + + /** + * A short circuited method to get a block writer that can write data directly to disk. + * The Block will be appended to the File specified by filename. Callers should handle error + * cases. + */ + public DiskRowWriter getDiskWriter( + ConnectionId blockId, + File file, + SerializerInstance serializerInstance, + int bufferSize) throws IOException { + boolean syncWrites = conf.getBoolean("mycat.merge.sync", false); + return new DiskRowWriter(file, serializerInstance, bufferSize,new FileOutputStream(file), + syncWrites,blockId); + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/storage/DataNodeFileManager.java b/src/main/java/io/mycat/memory/unsafe/storage/DataNodeFileManager.java new file mode 100644 index 000000000..f199a3cb3 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/storage/DataNodeFileManager.java @@ -0,0 +1,179 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.storage; + + +import io.mycat.memory.unsafe.utils.JavaUtils; +import io.mycat.memory.unsafe.utils.MycatPropertyConf; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; + + +/** + * Creates and maintains the logical mapping between logical blocks and physical on-disk + * locations. One block is mapped to one file with a name given by its BlockId. + * + * Block files are hashed among the directories listed in mycat.local.dir + */ +public class DataNodeFileManager { + private static final Logger LOG = LoggerFactory.getLogger(DataNodeFileManager.class); + private MycatPropertyConf conf; + private boolean deleteFilesOnStop; + /** + * TODO 操作完成之后,需要删除临时文件 + */ + // The content of subDirs is immutable but the content of subDirs(i) is mutable. And the content + // of subDirs(i) is protected by the lock of subDirs(i) + // private val shutdownHook ; + /* Create one local directory for each path mentioned in spark.local.dir; then, inside this + * directory, create multiple subdirectories that we will hash files into, in order to avoid + * having really large inodes at the top level. */ + + private List localDirs ; + private int subDirsPerLocalDir; + + private ConcurrentHashMap> subDirs; + + + public DataNodeFileManager(MycatPropertyConf conf , boolean deleteFilesOnStop) throws IOException { + + this.conf = conf; + this.deleteFilesOnStop = deleteFilesOnStop; + + + subDirsPerLocalDir = conf.getInt("mycat.diskStore.subDirectories", 64); + localDirs = createLocalDirs(conf); + if (localDirs.isEmpty()) { + System.exit(-1); + } + subDirs = new ConcurrentHashMap>(localDirs.size()); + + + + for (int i = 0; i < localDirs.size() ; i++) { + ArrayList list = new ArrayList(subDirsPerLocalDir); + + for (int j = 0; j < subDirsPerLocalDir; j++) { + list.add(i,null); + } + + subDirs.put(i,list); + } + + } + + /** Produces a unique block id and File suitable for storing local intermediate results. */ + public TempDataNodeId createTempLocalBlock() throws IOException { + + TempDataNodeId blockId = new TempDataNodeId(UUID.randomUUID().toString()); + + while (getFile(blockId).exists()) { + blockId = new TempDataNodeId(UUID.randomUUID().toString()); + }; + + return blockId; + } + + + /** Looks up a file by hashing it into one of our local subdirectories. */ + // This method should be kept in sync with + // org.apache.spark.network.shuffle.ExternalShuffleBlockResolver#getFile(). + public File getFile(String filename) throws IOException { + // Figure out which local directory it hashes to, and which subdirectory in that + int hash = JavaUtils.nonNegativeHash(filename); + int dirId = hash % localDirs.size(); + int subDirId = (hash / localDirs.size()) % subDirsPerLocalDir; + + synchronized (this) { + File file = subDirs.get(dirId).get(subDirId); + if (file != null) { + + } else { + file = new File(localDirs.get(dirId), "%02x".format(String.valueOf(subDirId))); + if (!file.exists() && !file.mkdir()) { + throw new IOException("Failed to create local dir in $newDir."); + } + subDirs.get(dirId).add(subDirId,file); + } + } + + /** + *类似二维数组 + */ + return new File(subDirs.get(dirId).get(subDirId),filename); + } + + public File getFile(ConnectionId connid) throws IOException { + return getFile(connid.name); + } + + /**TODO config root + * Create local directories for storing block data. These directories are + * located inside configured local directories and won't + * be deleted on JVM exit when using the external shuffle service. + */ + private List createLocalDirs(MycatPropertyConf conf) { + + String rootDirs = conf.getString("mycat.local.dirs","datanode"); + + String rdir[] = rootDirs.split(","); + List dirs = new ArrayList(); + for (int i = 0; i 0){ + localDir = localDirs.get(i); + //System.out.println(localDir); + if (localDir.isDirectory() && localDir.exists()) { + try { + JavaUtils.deleteRecursively(localDir); + } catch(Exception e) { + LOG.error(e.getMessage()); + } + } + i++; + } + } + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/storage/DeserializationStream.java b/src/main/java/io/mycat/memory/unsafe/storage/DeserializationStream.java new file mode 100644 index 000000000..f888573ed --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/storage/DeserializationStream.java @@ -0,0 +1,16 @@ +package io.mycat.memory.unsafe.storage; + + + +/** + * Created by zagnix on 2016/6/3. + */ +public abstract class DeserializationStream { + /** The most general-purpose method to read an object. */ + public abstract T readObject(); + /** Reads the object representing the key of a key-value pair. */ + public T readKey(){return readObject();} + /** Reads the object representing the value of a key-value pair. */ + public T readValue(){ return readObject();} + public abstract void close(); +} diff --git a/src/main/java/io/mycat/memory/unsafe/storage/DiskRowWriter.java b/src/main/java/io/mycat/memory/unsafe/storage/DiskRowWriter.java new file mode 100644 index 000000000..99f4a6f28 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/storage/DiskRowWriter.java @@ -0,0 +1,255 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.storage; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.*; +import java.nio.channels.FileChannel; + +/** + * A class for writing JVM objects directly to a file on disk. This class allows data to be appended + * to an existing block and can guarantee atomicity in the case of faults as it allows the caller to + * revert partial writes. + * + * This class does not support concurrent writes. Also, once the writer has been opened it cannot be + * reopened again. + */ +public class DiskRowWriter extends OutputStream { + /** The file channel, used for repositioning / truncating the file. */ + private static final Logger LOG = LoggerFactory.getLogger(DiskRowWriter.class); + + private FileChannel channel = null; + private OutputStream bs = null; + private FileOutputStream fos = null; + private TimeTrackingOutputStream ts = null; + private SerializationStream objOut = null; + private boolean initialized = false; + private boolean hasBeenClosed = false; + private boolean commitAndCloseHasBeenCalled = false; + + /** + * Cursors used to represent positions in the file. + * + * xxxxxxxx|--------|--- | + * ^ ^ ^ + * | | finalPosition + * | reportedPosition + * initialPosition + * + * initialPosition: Offset in the file where we start writing. Immutable. + * reportedPosition: Position at the time of the last update to the write metrics. + * finalPosition: Offset where we stopped writing. Set on closeAndCommit() then never changed. + * -----: Current writes to the underlying file. + * xxxxx: Existing contents of the file. + */ + private long initialPosition = 0; + private long finalPosition = -1; + private long reportedPosition = 0; + + /** + * Keep track of number of records written and also use this to periodically + * output bytes written since the latter is expensive to do for each record. + */ + private long numRecordsWritten = 0; + + private File file; + private SerializerInstance serializerInstance; + private int bufferSize; + private OutputStream compressStream; + private boolean syncWrites; + // These write metrics concurrently shared with other active DiskBlockObjectWriters who + // are themselves performing writes. All updates must be relative. + /**ShuffleWriteMetrics writeMetrics,*/ + private ConnectionId blockId; + + + public DiskRowWriter( + File file, + SerializerInstance serializerInstance, + int bufferSize, + OutputStream compressStream , + boolean syncWrites, + ConnectionId blockId) throws IOException { + + this.file = file; + this.serializerInstance = serializerInstance; + this.bufferSize = bufferSize; + this.compressStream = compressStream; + this.syncWrites = syncWrites; + this.blockId = blockId; + initialPosition = file.length(); + reportedPosition = initialPosition; + } + + + public DiskRowWriter open() throws FileNotFoundException { + + if (hasBeenClosed) { + throw new IllegalStateException("Writer already closed. Cannot be reopened."); + } + + fos = new FileOutputStream(file,true); + ts = new TimeTrackingOutputStream(/**writeMetrics,*/ fos); + channel = fos.getChannel(); + bs = new BufferedOutputStream(ts,bufferSize); + objOut = serializerInstance.serializeStream(bs); + initialized = true; + + return this; + + } + + + @Override + public void close() { + if (initialized) { + try { + if (syncWrites) { + //Force outstanding writes to disk and track how long it takes + objOut.flush(); + long start = System.nanoTime(); + fos.getFD().sync(); + // writeMetrics.incWriteTime(System.nanoTime() - start); + } + } catch (IOException e) { + LOG.error(e.getMessage()); + }finally { + objOut.close(); + } + channel = null; + bs = null; + fos = null; + ts = null; + objOut = null; + initialized = false; + hasBeenClosed = true; + } + } + + public boolean isOpen(){ + return objOut != null; + } + + /** + * Flush the partial writes and commit them as a single atomic block. + */ + public void commitAndClose() throws IOException { + if (initialized) { + // NOTE: Because Kryo doesn’t flush the underlying stream we explicitly flush both the + // serializer stream and the lower level stream. + objOut.flush(); + bs.flush(); + close(); + finalPosition = file.length(); + // In certain compression codecs, more bytes are written after close() is called + //writeMetrics.incBytesWritten(finalPosition - reportedPosition) + } else { + finalPosition = file.length(); + } + commitAndCloseHasBeenCalled = true; + } + + + /** + * Reverts writes that haven’t been flushed yet. Callers should invoke this function + * when there are runtime exceptions. This method will not throw, though it may be + * unsuccessful in truncating written data. + * + * @return the file that this DiskRowWriter wrote to. + */ + public File revertPartialWritesAndClose() throws IOException { + // Discard current writes. We do this by flushing the outstanding writes and then + // truncating the file to its initial position. + try { + if (initialized) { + // writeMetrics.decBytesWritten(reportedPosition - initialPosition) + // writeMetrics.decRecordsWritten(numRecordsWritten) + objOut.flush(); + bs.flush(); + close(); + } + + FileOutputStream truncateStream = new FileOutputStream(file, true); + try { + truncateStream.getChannel().truncate(initialPosition); + return file; + } finally { + truncateStream.close(); + } + } catch(Exception e) { + LOG.error(e.getMessage()); + return file; + } + } + + /** + * Writes a key-value pair. + */ + private void write(Object key, Object value) throws IOException { + if (!initialized) { + open(); + } + + objOut.writeKey(key); + objOut.writeValue(value); + recordWritten(); + } + @Override + public void write(int b){ + throw new UnsupportedOperationException(); + } + @Override + public void write(byte [] kvBytes ,int offs, int len) throws IOException { + if (!initialized) { + open(); + } + + bs.write(kvBytes,offs, len); + } + + /** + * Notify the writer that a record worth of bytes has been written with OutputStream#write. + */ + public void recordWritten() throws IOException { + numRecordsWritten += 1; +//writeMetrics.incRecordsWritten(1) + +// TODO: call updateBytesWritten() less frequently. + if (numRecordsWritten % 32 == 0) { + updateBytesWritten(); + } + } + + /** + * Report the number of bytes written in this writer’s shuffle write metrics. + * Note that this is only valid before the underlying streams are closed. + */ + private void updateBytesWritten() throws IOException { + long pos = channel.position(); + //writeMetrics.incBytesWritten(pos - reportedPosition) + reportedPosition = pos; + } + + @Override + public void flush() throws IOException { + objOut.flush(); + bs.flush(); + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/storage/DummySerializerInstance.java b/src/main/java/io/mycat/memory/unsafe/storage/DummySerializerInstance.java new file mode 100644 index 000000000..c2b725d84 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/storage/DummySerializerInstance.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.storage; + + + +import io.mycat.memory.unsafe.Platform; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; + +/** + * Unfortunately, we need a serializer instance in order to construct a DiskRowWriter. + * Our shuffle write path doesn't actually use this serializer (since we end up calling the + * `write() OutputStream methods), but DiskRowWriter still calls some methods on it. To work + * around this, we pass a dummy no-op serializer. + */ + +public final class DummySerializerInstance extends SerializerInstance { + + public static final DummySerializerInstance INSTANCE = new DummySerializerInstance(); + + private DummySerializerInstance() { } + + @Override + public SerializationStream serializeStream(final OutputStream s) { + return new SerializationStream() { + @Override + public SerializationStream writeObject(Object o) { + return null; + } + + @Override + public void flush() { + // Need to implement this because DiskObjectWriter uses it to flush the compression stream + try { + s.flush(); + } catch (IOException e) { + Platform.throwException(e); + } + } +// public SerializationStream writeObject(T t, T ev1) { +// throw new UnsupportedOperationException(); +// } + + @Override + public void close() { + // Need to implement this because DiskObjectWriter uses it to close the compression stream + try { + s.close(); + } catch (IOException e) { + Platform.throwException(e); + } + } + }; + } + + + public ByteBuffer serialize(T t, T ev1) { + throw new UnsupportedOperationException(); + } + + + public DeserializationStream deserializeStream(InputStream s) { + throw new UnsupportedOperationException(); + } + + + public T deserialize(ByteBuffer bytes, ClassLoader loader, T ev1) { + throw new UnsupportedOperationException(); + } + + public T deserialize(ByteBuffer bytes, T ev1) { + throw new UnsupportedOperationException(); + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/storage/SerializationStream.java b/src/main/java/io/mycat/memory/unsafe/storage/SerializationStream.java new file mode 100644 index 000000000..5b905cdb7 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/storage/SerializationStream.java @@ -0,0 +1,30 @@ +package io.mycat.memory.unsafe.storage; + +import java.util.Iterator; + +/** + * Created by zagnix on 2016/6/3. + */ +public abstract class SerializationStream{ + + /** The most general-purpose method to write an object. */ + public abstract SerializationStream writeObject(T t); + /** Writes the object representing the key of a key-value pair. */ + public SerializationStream writeKey(T key){ + return writeObject(key); + } + /** Writes the object representing the value of a key-value pair. */ + public SerializationStream writeValue(T value){ + return writeObject(value); + } + + public abstract void flush(); + public abstract void close(); + + public SerializationStream writeAll(Iterator iter){ + while (iter.hasNext()) { + writeObject(iter.next()); + } + return this; + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/storage/SerializerInstance.java b/src/main/java/io/mycat/memory/unsafe/storage/SerializerInstance.java new file mode 100644 index 000000000..73afbb302 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/storage/SerializerInstance.java @@ -0,0 +1,12 @@ +package io.mycat.memory.unsafe.storage; + +import java.io.InputStream; +import java.io.OutputStream; + +/** + * Created by zagnix on 2016/6/3. + */ +public abstract class SerializerInstance { + protected abstract SerializationStream serializeStream(OutputStream s ); + protected abstract DeserializationStream deserializeStream(InputStream s); +} diff --git a/src/main/java/io/mycat/memory/unsafe/storage/SerializerManager.java b/src/main/java/io/mycat/memory/unsafe/storage/SerializerManager.java new file mode 100644 index 000000000..47c962409 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/storage/SerializerManager.java @@ -0,0 +1,25 @@ +package io.mycat.memory.unsafe.storage; + +import java.io.InputStream; +import java.io.OutputStream; + +/** + * Created by zagnix on 2016/6/3. + */ +public class SerializerManager { + + /** + * Wrap an output stream for compression if block compression is enabled for its block type + */ + public OutputStream wrapForCompression(ConnectionId blockId , OutputStream s){ + return s; + } + + /** + * Wrap an input stream for compression if block compression is enabled for its block type + */ + public InputStream wrapForCompression(ConnectionId blockId, InputStream s){ + return s; + } + +} diff --git a/src/main/java/io/mycat/memory/unsafe/storage/TempDataNodeId.java b/src/main/java/io/mycat/memory/unsafe/storage/TempDataNodeId.java new file mode 100644 index 000000000..f3fd0ed12 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/storage/TempDataNodeId.java @@ -0,0 +1,20 @@ +package io.mycat.memory.unsafe.storage; + +/** + * Created by zagnix on 2016/6/3. + */ +public class TempDataNodeId extends ConnectionId { + + private String uuid; + + public TempDataNodeId(String uuid) { + super(); + this.name = uuid; + this.uuid = uuid; + } + + @Override + public String getBlockName() { + return "temp_local_" + uuid; + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/storage/TimeTrackingOutputStream.java b/src/main/java/io/mycat/memory/unsafe/storage/TimeTrackingOutputStream.java new file mode 100644 index 000000000..af68f804b --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/storage/TimeTrackingOutputStream.java @@ -0,0 +1,50 @@ + + +package io.mycat.memory.unsafe.storage; + +import java.io.IOException; +import java.io.OutputStream; + +/** + * Intercepts write calls and tracks total time spent writing in order to update shuffle write + * metrics. Not thread safe. + */ +public final class TimeTrackingOutputStream extends OutputStream { + + /**private final ShuffleWriteMetrics writeMetrics;*/ + private final OutputStream outputStream; + + public TimeTrackingOutputStream(OutputStream outputStream) { + this.outputStream = outputStream; + } + + @Override + public void write(int b) throws IOException { + final long startTime = System.nanoTime(); + outputStream.write(b); + } + + @Override + public void write(byte[] b) throws IOException { + final long startTime = System.nanoTime(); + outputStream.write(b); + } + + @Override + public void write(byte[] b, int off, int len) throws IOException { + final long startTime = System.nanoTime(); + outputStream.write(b, off, len); + } + + @Override + public void flush() throws IOException { + final long startTime = System.nanoTime(); + outputStream.flush(); + } + + @Override + public void close() throws IOException { + final long startTime = System.nanoTime(); + outputStream.close(); + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/types/ByteArray.java b/src/main/java/io/mycat/memory/unsafe/types/ByteArray.java new file mode 100644 index 000000000..b85c915c2 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/types/ByteArray.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.types; + +import io.mycat.memory.unsafe.Platform; + +import java.util.Arrays; + +public final class ByteArray { + + public static final byte[] EMPTY_BYTE = new byte[0]; + + /** + * Writes the content of a byte array into a memory address, identified by an object and an + * offset. The target memory address must already been allocated, and have enough space to + * hold all the bytes in this string. + */ + public static void writeToMemory(byte[] src, Object target, long targetOffset) { + Platform.copyMemory(src, Platform.BYTE_ARRAY_OFFSET, target, targetOffset, src.length); + } + + /** + * Returns a 64-bit integer that can be used as the prefix used in sorting. + */ + public static long getPrefix(byte[] bytes) { + if (bytes == null) { + return 0L; + } else { + final int minLen = Math.min(bytes.length, 8); + long p = 0; + for (int i = 0; i < minLen; ++i) { + p |= (128L + Platform.getByte(bytes, Platform.BYTE_ARRAY_OFFSET + i)) + << (56 - 8 * i); + } + return p; + } + } + + public static byte[] subStringSQL(byte[] bytes, int pos, int len) { + // This pos calculation is according to UTF8String#subStringSQL + if (pos > bytes.length) { + return EMPTY_BYTE; + } + int start = 0; + int end; + if (pos > 0) { + start = pos - 1; + } else if (pos < 0) { + start = bytes.length + pos; + } + if ((bytes.length - start) < len) { + end = bytes.length; + } else { + end = start + len; + } + start = Math.max(start, 0); // underflow + if (start >= end) { + return EMPTY_BYTE; + } + return Arrays.copyOfRange(bytes, start, end); + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/types/CalendarInterval.java b/src/main/java/io/mycat/memory/unsafe/types/CalendarInterval.java new file mode 100644 index 000000000..2e92c1230 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/types/CalendarInterval.java @@ -0,0 +1,324 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.types; + +import java.io.Serializable; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * The internal representation of interval type. + */ +public final class CalendarInterval implements Serializable { + public static final long MICROS_PER_MILLI = 1000L; + public static final long MICROS_PER_SECOND = MICROS_PER_MILLI * 1000; + public static final long MICROS_PER_MINUTE = MICROS_PER_SECOND * 60; + public static final long MICROS_PER_HOUR = MICROS_PER_MINUTE * 60; + public static final long MICROS_PER_DAY = MICROS_PER_HOUR * 24; + public static final long MICROS_PER_WEEK = MICROS_PER_DAY * 7; + + /** + * A function to generate regex which matches interval string's unit part like "3 years". + * + * First, we can leave out some units in interval string, and we only care about the value of + * unit, so here we use non-capturing group to wrap the actual regex. + * At the beginning of the actual regex, we should match spaces before the unit part. + * Next is the number part, starts with an optional "-" to represent negative value. We use + * capturing group to wrap this part as we need the value later. + * Finally is the unit name, ends with an optional "s". + */ + private static String unitRegex(String unit) { + return "(?:\\s+(-?\\d+)\\s+" + unit + "s?)?"; + } + + private static Pattern p = Pattern.compile("interval" + unitRegex("year") + unitRegex("month") + + unitRegex("week") + unitRegex("day") + unitRegex("hour") + unitRegex("minute") + + unitRegex("second") + unitRegex("millisecond") + unitRegex("microsecond")); + + private static Pattern yearMonthPattern = + Pattern.compile("^(?:['|\"])?([+|-])?(\\d+)-(\\d+)(?:['|\"])?$"); + + private static Pattern dayTimePattern = + Pattern.compile("^(?:['|\"])?([+|-])?(\\d+) (\\d+):(\\d+):(\\d+)(\\.(\\d+))?(?:['|\"])?$"); + + private static Pattern quoteTrimPattern = Pattern.compile("^(?:['|\"])?(.*?)(?:['|\"])?$"); + + private static long toLong(String s) { + if (s == null) { + return 0; + } else { + return Long.parseLong(s); + } + } + + public static CalendarInterval fromString(String s) { + if (s == null) { + return null; + } + s = s.trim(); + Matcher m = p.matcher(s); + if (!m.matches() || s.equals("interval")) { + return null; + } else { + long months = toLong(m.group(1)) * 12 + toLong(m.group(2)); + long microseconds = toLong(m.group(3)) * MICROS_PER_WEEK; + microseconds += toLong(m.group(4)) * MICROS_PER_DAY; + microseconds += toLong(m.group(5)) * MICROS_PER_HOUR; + microseconds += toLong(m.group(6)) * MICROS_PER_MINUTE; + microseconds += toLong(m.group(7)) * MICROS_PER_SECOND; + microseconds += toLong(m.group(8)) * MICROS_PER_MILLI; + microseconds += toLong(m.group(9)); + return new CalendarInterval((int) months, microseconds); + } + } + + public static long toLongWithRange(String fieldName, + String s, long minValue, long maxValue) throws IllegalArgumentException { + long result = 0; + if (s != null) { + result = Long.parseLong(s); + if (result < minValue || result > maxValue) { + throw new IllegalArgumentException(String.format("%s %d outside range [%d, %d]", + fieldName, result, minValue, maxValue)); + } + } + return result; + } + + /** + * Parse YearMonth string in form: [-]YYYY-MM + * + * adapted from HiveIntervalYearMonth.valueOf + */ + public static CalendarInterval fromYearMonthString(String s) throws IllegalArgumentException { + CalendarInterval result = null; + if (s == null) { + throw new IllegalArgumentException("Interval year-month string was null"); + } + s = s.trim(); + Matcher m = yearMonthPattern.matcher(s); + if (!m.matches()) { + throw new IllegalArgumentException( + "Interval string does not match year-month format of 'y-m': " + s); + } else { + try { + int sign = m.group(1) != null && m.group(1).equals("-") ? -1 : 1; + int years = (int) toLongWithRange("year", m.group(2), 0, Integer.MAX_VALUE); + int months = (int) toLongWithRange("month", m.group(3), 0, 11); + result = new CalendarInterval(sign * (years * 12 + months), 0); + } catch (Exception e) { + throw new IllegalArgumentException( + "Error parsing interval year-month string: " + e.getMessage(), e); + } + } + return result; + } + + /** + * Parse dayTime string in form: [-]d HH:mm:ss.nnnnnnnnn + * + * adapted from HiveIntervalDayTime.valueOf + */ + public static CalendarInterval fromDayTimeString(String s) throws IllegalArgumentException { + CalendarInterval result = null; + if (s == null) { + throw new IllegalArgumentException("Interval day-time string was null"); + } + s = s.trim(); + Matcher m = dayTimePattern.matcher(s); + if (!m.matches()) { + throw new IllegalArgumentException( + "Interval string does not match day-time format of 'd h:m:s.n': " + s); + } else { + try { + int sign = m.group(1) != null && m.group(1).equals("-") ? -1 : 1; + long days = toLongWithRange("day", m.group(2), 0, Integer.MAX_VALUE); + long hours = toLongWithRange("hour", m.group(3), 0, 23); + long minutes = toLongWithRange("minute", m.group(4), 0, 59); + long seconds = toLongWithRange("second", m.group(5), 0, 59); + // Hive allow nanosecond precision interval + long nanos = toLongWithRange("nanosecond", m.group(7), 0L, 999999999L); + result = new CalendarInterval(0, sign * ( + days * MICROS_PER_DAY + hours * MICROS_PER_HOUR + minutes * MICROS_PER_MINUTE + + seconds * MICROS_PER_SECOND + nanos / 1000L)); + } catch (Exception e) { + throw new IllegalArgumentException( + "Error parsing interval day-time string: " + e.getMessage(), e); + } + } + return result; + } + + public static CalendarInterval fromSingleUnitString(String unit, String s) + throws IllegalArgumentException { + + CalendarInterval result = null; + if (s == null) { + throw new IllegalArgumentException(String.format("Interval %s string was null", unit)); + } + s = s.trim(); + Matcher m = quoteTrimPattern.matcher(s); + if (!m.matches()) { + throw new IllegalArgumentException( + "Interval string does not match day-time format of 'd h:m:s.n': " + s); + } else { + try { + if (unit.equals("year")) { + int year = (int) toLongWithRange("year", m.group(1), + Integer.MIN_VALUE / 12, Integer.MAX_VALUE / 12); + result = new CalendarInterval(year * 12, 0L); + + } else if (unit.equals("month")) { + int month = (int) toLongWithRange("month", m.group(1), + Integer.MIN_VALUE, Integer.MAX_VALUE); + result = new CalendarInterval(month, 0L); + + } else if (unit.equals("week")) { + long week = toLongWithRange("week", m.group(1), + Long.MIN_VALUE / MICROS_PER_WEEK, Long.MAX_VALUE / MICROS_PER_WEEK); + result = new CalendarInterval(0, week * MICROS_PER_WEEK); + + } else if (unit.equals("day")) { + long day = toLongWithRange("day", m.group(1), + Long.MIN_VALUE / MICROS_PER_DAY, Long.MAX_VALUE / MICROS_PER_DAY); + result = new CalendarInterval(0, day * MICROS_PER_DAY); + + } else if (unit.equals("hour")) { + long hour = toLongWithRange("hour", m.group(1), + Long.MIN_VALUE / MICROS_PER_HOUR, Long.MAX_VALUE / MICROS_PER_HOUR); + result = new CalendarInterval(0, hour * MICROS_PER_HOUR); + + } else if (unit.equals("minute")) { + long minute = toLongWithRange("minute", m.group(1), + Long.MIN_VALUE / MICROS_PER_MINUTE, Long.MAX_VALUE / MICROS_PER_MINUTE); + result = new CalendarInterval(0, minute * MICROS_PER_MINUTE); + + } else if (unit.equals("second")) { + long micros = parseSecondNano(m.group(1)); + result = new CalendarInterval(0, micros); + + } else if (unit.equals("millisecond")) { + long millisecond = toLongWithRange("millisecond", m.group(1), + Long.MIN_VALUE / MICROS_PER_MILLI, Long.MAX_VALUE / MICROS_PER_MILLI); + result = new CalendarInterval(0, millisecond * MICROS_PER_MILLI); + + } else if (unit.equals("microsecond")) { + long micros = Long.parseLong(m.group(1)); + result = new CalendarInterval(0, micros); + } + } catch (Exception e) { + throw new IllegalArgumentException("Error parsing interval string: " + e.getMessage(), e); + } + } + return result; + } + + /** + * Parse second_nano string in ss.nnnnnnnnn format to microseconds + */ + public static long parseSecondNano(String secondNano) throws IllegalArgumentException { + String[] parts = secondNano.split("\\."); + if (parts.length == 1) { + return toLongWithRange("second", parts[0], Long.MIN_VALUE / MICROS_PER_SECOND, + Long.MAX_VALUE / MICROS_PER_SECOND) * MICROS_PER_SECOND; + + } else if (parts.length == 2) { + long seconds = parts[0].equals("") ? 0L : toLongWithRange("second", parts[0], + Long.MIN_VALUE / MICROS_PER_SECOND, Long.MAX_VALUE / MICROS_PER_SECOND); + long nanos = toLongWithRange("nanosecond", parts[1], 0L, 999999999L); + return seconds * MICROS_PER_SECOND + nanos / 1000L; + + } else { + throw new IllegalArgumentException( + "Interval string does not match second-nano format of ss.nnnnnnnnn"); + } + } + + public final int months; + public final long microseconds; + + public CalendarInterval(int months, long microseconds) { + this.months = months; + this.microseconds = microseconds; + } + + public CalendarInterval add(CalendarInterval that) { + int months = this.months + that.months; + long microseconds = this.microseconds + that.microseconds; + return new CalendarInterval(months, microseconds); + } + + public CalendarInterval subtract(CalendarInterval that) { + int months = this.months - that.months; + long microseconds = this.microseconds - that.microseconds; + return new CalendarInterval(months, microseconds); + } + + public CalendarInterval negate() { + return new CalendarInterval(-this.months, -this.microseconds); + } + + @Override + public boolean equals(Object other) { + if (this == other) return true; + if (other == null || !(other instanceof CalendarInterval)) return false; + + CalendarInterval o = (CalendarInterval) other; + return this.months == o.months && this.microseconds == o.microseconds; + } + + @Override + public int hashCode() { + return 31 * months + (int) microseconds; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("interval"); + + if (months != 0) { + appendUnit(sb, months / 12, "year"); + appendUnit(sb, months % 12, "month"); + } + + if (microseconds != 0) { + long rest = microseconds; + appendUnit(sb, rest / MICROS_PER_WEEK, "week"); + rest %= MICROS_PER_WEEK; + appendUnit(sb, rest / MICROS_PER_DAY, "day"); + rest %= MICROS_PER_DAY; + appendUnit(sb, rest / MICROS_PER_HOUR, "hour"); + rest %= MICROS_PER_HOUR; + appendUnit(sb, rest / MICROS_PER_MINUTE, "minute"); + rest %= MICROS_PER_MINUTE; + appendUnit(sb, rest / MICROS_PER_SECOND, "second"); + rest %= MICROS_PER_SECOND; + appendUnit(sb, rest / MICROS_PER_MILLI, "millisecond"); + rest %= MICROS_PER_MILLI; + appendUnit(sb, rest, "microsecond"); + } + + return sb.toString(); + } + + private void appendUnit(StringBuilder sb, long value, String unit) { + if (value != 0) { + sb.append(' ').append(value).append(' ').append(unit).append('s'); + } + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/types/UTF8String.java b/src/main/java/io/mycat/memory/unsafe/types/UTF8String.java new file mode 100644 index 000000000..49ec71b29 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/types/UTF8String.java @@ -0,0 +1,1011 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.types; + +import com.esotericsoftware.kryo.Kryo; +import com.esotericsoftware.kryo.KryoSerializable; +import com.esotericsoftware.kryo.io.Input; +import com.esotericsoftware.kryo.io.Output; +import io.mycat.memory.unsafe.Platform; +import io.mycat.memory.unsafe.array.ByteArrayMethods; +import io.mycat.memory.unsafe.hash.Murmur3_x86_32; + + +import javax.annotation.Nonnull; +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Map; + + +/** + * A UTF-8 String for internal Spark use. + *

+ * A String encoded in UTF-8 as an Array[Byte], which can be used for comparison, + * search, see http://en.wikipedia.org/wiki/UTF-8 for details. + *

+ * Note: This is not designed for general use cases, should not be used outside SQL. + */ +public final class UTF8String implements Comparable, Externalizable, KryoSerializable, + Cloneable { + + // These are only updated by readExternal() or read() + @Nonnull + private Object base; + private long offset; + private int numBytes; + + public Object getBaseObject() { return base; } + public long getBaseOffset() { return offset; } + + private static int[] bytesOfCodePointInUTF8 = {2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 4, 4, 4, 4, + 5, 5, 5, 5, + 6, 6}; + + private static boolean isLittleEndian = ByteOrder.nativeOrder() == ByteOrder.LITTLE_ENDIAN; + + private static final UTF8String COMMA_UTF8 = UTF8String.fromString(","); + public static final UTF8String EMPTY_UTF8 = UTF8String.fromString(""); + + /** + * Creates an UTF8String from byte array, which should be encoded in UTF-8. + * + * Note: `bytes` will be hold by returned UTF8String. + */ + public static UTF8String fromBytes(byte[] bytes) { + if (bytes != null) { + return new UTF8String(bytes, Platform.BYTE_ARRAY_OFFSET, bytes.length); + } else { + return null; + } + } + + /** + * Creates an UTF8String from byte array, which should be encoded in UTF-8. + * + * Note: `bytes` will be hold by returned UTF8String. + */ + public static UTF8String fromBytes(byte[] bytes, int offset, int numBytes) { + if (bytes != null) { + return new UTF8String(bytes, Platform.BYTE_ARRAY_OFFSET + offset, numBytes); + } else { + return null; + } + } + + /** + * Creates an UTF8String from given address (base and offset) and length. + */ + public static UTF8String fromAddress(Object base, long offset, int numBytes) { + return new UTF8String(base, offset, numBytes); + } + + /** + * Creates an UTF8String from String. + */ + public static UTF8String fromString(String str) { + return str == null ? null : fromBytes(str.getBytes(StandardCharsets.UTF_8)); + } + + /** + * Creates an UTF8String that contains `length` spaces. + */ + public static UTF8String blankString(int length) { + byte[] spaces = new byte[length]; + Arrays.fill(spaces, (byte) ' '); + return fromBytes(spaces); + } + + protected UTF8String(Object base, long offset, int numBytes) { + this.base = base; + this.offset = offset; + this.numBytes = numBytes; + } + + // for serialization + public UTF8String() { + this(null, 0, 0); + } + + /** + * Writes the content of this string into a memory address, identified by an object and an offset. + * The target memory address must already been allocated, and have enough space to hold all the + * bytes in this string. + */ + public void writeToMemory(Object target, long targetOffset) { + Platform.copyMemory(base, offset, target, targetOffset, numBytes); + } + + public void writeTo(ByteBuffer buffer) { + assert(buffer.hasArray()); + byte[] target = buffer.array(); + int offset = buffer.arrayOffset(); + int pos = buffer.position(); + writeToMemory(target, Platform.BYTE_ARRAY_OFFSET + offset + pos); + buffer.position(pos + numBytes); + } + + /** + * Returns the number of bytes for a code point with the first byte as `b` + * @param b The first byte of a code point + */ + private static int numBytesForFirstByte(final byte b) { + final int offset = (b & 0xFF) - 192; + return (offset >= 0) ? bytesOfCodePointInUTF8[offset] : 1; + } + + /** + * Returns the number of bytes + */ + public int numBytes() { + return numBytes; + } + + /** + * Returns the number of code points in it. + */ + public int numChars() { + int len = 0; + for (int i = 0; i < numBytes; i += numBytesForFirstByte(getByte(i))) { + len += 1; + } + return len; + } + + /** + * Returns a 64-bit integer that can be used as the prefix used in sorting. + */ + public long getPrefix() { + // Since JVMs are either 4-byte aligned or 8-byte aligned, we check the size of the string. + // If size is 0, just return 0. + // If size is between 0 and 4 (inclusive), assume data is 4-byte aligned under the hood and + // use a getInt to fetch the prefix. + // If size is greater than 4, assume we have at least 8 bytes of data to fetch. + // After getting the data, we use a mask to mask out data that is not part of the string. + long p; + long mask = 0; + if (isLittleEndian) { + if (numBytes >= 8) { + p = Platform.getLong(base, offset); + } else if (numBytes > 4) { + p = Platform.getLong(base, offset); + mask = (1L << (8 - numBytes) * 8) - 1; + } else if (numBytes > 0) { + p = (long) Platform.getInt(base, offset); + mask = (1L << (8 - numBytes) * 8) - 1; + } else { + p = 0; + } + p = Long.reverseBytes(p); + } else { + // byteOrder == ByteOrder.BIG_ENDIAN + if (numBytes >= 8) { + p = Platform.getLong(base, offset); + } else if (numBytes > 4) { + p = Platform.getLong(base, offset); + mask = (1L << (8 - numBytes) * 8) - 1; + } else if (numBytes > 0) { + p = ((long) Platform.getInt(base, offset)) << 32; + mask = (1L << (8 - numBytes) * 8) - 1; + } else { + p = 0; + } + } + p &= ~mask; + return p; + } + + /** + * Returns the underline bytes, will be a copy of it if it's part of another array. + */ + public byte[] getBytes() { + // avoid copy if `base` is `byte[]` + if (offset == Platform.BYTE_ARRAY_OFFSET && base instanceof byte[] + && ((byte[]) base).length == numBytes) { + return (byte[]) base; + } else { + byte[] bytes = new byte[numBytes]; + Platform.copyMemory(base, offset, bytes, Platform.BYTE_ARRAY_OFFSET, numBytes); + return bytes; + } + } + + /** + * Returns a substring of this. + * @param start the position of first code point + * @param until the position after last code point, exclusive. + */ + public UTF8String substring(final int start, final int until) { + if (until <= start || start >= numBytes) { + return EMPTY_UTF8; + } + + int i = 0; + int c = 0; + while (i < numBytes && c < start) { + i += numBytesForFirstByte(getByte(i)); + c += 1; + } + + int j = i; + while (i < numBytes && c < until) { + i += numBytesForFirstByte(getByte(i)); + c += 1; + } + + if (i > j) { + byte[] bytes = new byte[i - j]; + Platform.copyMemory(base, offset + j, bytes, Platform.BYTE_ARRAY_OFFSET, i - j); + return fromBytes(bytes); + } else { + return EMPTY_UTF8; + } + } + + public UTF8String substringSQL(int pos, int length) { + // Information regarding the pos calculation: + // Hive and SQL use one-based indexing for SUBSTR arguments but also accept zero and + // negative indices for start positions. If a start index i is greater than 0, it + // refers to element i-1 in the sequence. If a start index i is less than 0, it refers + // to the -ith element before the end of the sequence. If a start index i is 0, it + // refers to the first element. + int len = numChars(); + int start = (pos > 0) ? pos -1 : ((pos < 0) ? len + pos : 0); + int end = (length == Integer.MAX_VALUE) ? len : start + length; + return substring(start, end); + } + + /** + * Returns whether this contains `substring` or not. + */ + public boolean contains(final UTF8String substring) { + if (substring.numBytes == 0) { + return true; + } + + byte first = substring.getByte(0); + for (int i = 0; i <= numBytes - substring.numBytes; i++) { + if (getByte(i) == first && matchAt(substring, i)) { + return true; + } + } + return false; + } + + /** + * Returns the byte at position `i`. + */ + private byte getByte(int i) { + return Platform.getByte(base, offset + i); + } + + private boolean matchAt(final UTF8String s, int pos) { + if (s.numBytes + pos > numBytes || pos < 0) { + return false; + } + return ByteArrayMethods.arrayEquals(base, offset + pos, s.base, s.offset, s.numBytes); + } + + public boolean startsWith(final UTF8String prefix) { + return matchAt(prefix, 0); + } + + public boolean endsWith(final UTF8String suffix) { + return matchAt(suffix, numBytes - suffix.numBytes); + } + + /** + * Returns the upper case of this string + */ + public UTF8String toUpperCase() { + if (numBytes == 0) { + return EMPTY_UTF8; + } + + byte[] bytes = new byte[numBytes]; + bytes[0] = (byte) Character.toTitleCase(getByte(0)); + for (int i = 0; i < numBytes; i++) { + byte b = getByte(i); + if (numBytesForFirstByte(b) != 1) { + // fallback + return toUpperCaseSlow(); + } + int upper = Character.toUpperCase((int) b); + if (upper > 127) { + // fallback + return toUpperCaseSlow(); + } + bytes[i] = (byte) upper; + } + return fromBytes(bytes); + } + + private UTF8String toUpperCaseSlow() { + return fromString(toString().toUpperCase()); + } + + /** + * Returns the lower case of this string + */ + public UTF8String toLowerCase() { + if (numBytes == 0) { + return EMPTY_UTF8; + } + + byte[] bytes = new byte[numBytes]; + bytes[0] = (byte) Character.toTitleCase(getByte(0)); + for (int i = 0; i < numBytes; i++) { + byte b = getByte(i); + if (numBytesForFirstByte(b) != 1) { + // fallback + return toLowerCaseSlow(); + } + int lower = Character.toLowerCase((int) b); + if (lower > 127) { + // fallback + return toLowerCaseSlow(); + } + bytes[i] = (byte) lower; + } + return fromBytes(bytes); + } + + private UTF8String toLowerCaseSlow() { + return fromString(toString().toLowerCase()); + } + + /** + * Returns the title case of this string, that could be used as title. + */ + public UTF8String toTitleCase() { + if (numBytes == 0) { + return EMPTY_UTF8; + } + + byte[] bytes = new byte[numBytes]; + for (int i = 0; i < numBytes; i++) { + byte b = getByte(i); + if (i == 0 || getByte(i - 1) == ' ') { + if (numBytesForFirstByte(b) != 1) { + // fallback + return toTitleCaseSlow(); + } + int upper = Character.toTitleCase(b); + if (upper > 127) { + // fallback + return toTitleCaseSlow(); + } + bytes[i] = (byte) upper; + } else { + bytes[i] = b; + } + } + return fromBytes(bytes); + } + + private UTF8String toTitleCaseSlow() { + StringBuffer sb = new StringBuffer(); + String s = toString(); + sb.append(s); + sb.setCharAt(0, Character.toTitleCase(sb.charAt(0))); + for (int i = 1; i < s.length(); i++) { + if (sb.charAt(i - 1) == ' ') { + sb.setCharAt(i, Character.toTitleCase(sb.charAt(i))); + } + } + return fromString(sb.toString()); + } + + /* + * Returns the index of the string `match` in this String. This string has to be a comma separated + * list. If `match` contains a comma 0 will be returned. If the `match` isn't part of this String, + * 0 will be returned, else the index of match (1-based index) + */ + public int findInSet(UTF8String match) { + if (match.contains(COMMA_UTF8)) { + return 0; + } + + int n = 1, lastComma = -1; + for (int i = 0; i < numBytes; i++) { + if (getByte(i) == (byte) ',') { + if (i - (lastComma + 1) == match.numBytes && + ByteArrayMethods.arrayEquals(base, offset + (lastComma + 1), match.base, match.offset, + match.numBytes)) { + return n; + } + lastComma = i; + n++; + } + } + if (numBytes - (lastComma + 1) == match.numBytes && + ByteArrayMethods.arrayEquals(base, offset + (lastComma + 1), match.base, match.offset, + match.numBytes)) { + return n; + } + return 0; + } + + /** + * Copy the bytes from the current UTF8String, and make a new UTF8String. + * @param start the start position of the current UTF8String in bytes. + * @param end the end position of the current UTF8String in bytes. + * @return a new UTF8String in the position of [start, end] of current UTF8String bytes. + */ + private UTF8String copyUTF8String(int start, int end) { + int len = end - start + 1; + byte[] newBytes = new byte[len]; + Platform.copyMemory(base, offset + start, newBytes, Platform.BYTE_ARRAY_OFFSET, len); + return UTF8String.fromBytes(newBytes); + } + + public UTF8String trim() { + int s = 0; + int e = this.numBytes - 1; + // skip all of the space (0x20) in the left side + while (s < this.numBytes && getByte(s) <= 0x20 && getByte(s) >= 0x00) s++; + // skip all of the space (0x20) in the right side + while (e >= 0 && getByte(e) <= 0x20 && getByte(e) >= 0x00) e--; + if (s > e) { + // empty string + return UTF8String.fromBytes(new byte[0]); + } else { + return copyUTF8String(s, e); + } + } + + public UTF8String trimLeft() { + int s = 0; + // skip all of the space (0x20) in the left side + while (s < this.numBytes && getByte(s) <= 0x20 && getByte(s) >= 0x00) s++; + if (s == this.numBytes) { + // empty string + return UTF8String.fromBytes(new byte[0]); + } else { + return copyUTF8String(s, this.numBytes - 1); + } + } + + public UTF8String trimRight() { + int e = numBytes - 1; + // skip all of the space (0x20) in the right side + while (e >= 0 && getByte(e) <= 0x20 && getByte(e) >= 0x00) e--; + + if (e < 0) { + // empty string + return UTF8String.fromBytes(new byte[0]); + } else { + return copyUTF8String(0, e); + } + } + + public UTF8String reverse() { + byte[] result = new byte[this.numBytes]; + + int i = 0; // position in byte + while (i < numBytes) { + int len = numBytesForFirstByte(getByte(i)); + Platform.copyMemory(this.base, this.offset + i, result, + Platform.BYTE_ARRAY_OFFSET + result.length - i - len, len); + + i += len; + } + + return UTF8String.fromBytes(result); + } + + public UTF8String repeat(int times) { + if (times <= 0) { + return EMPTY_UTF8; + } + + byte[] newBytes = new byte[numBytes * times]; + Platform.copyMemory(this.base, this.offset, newBytes, Platform.BYTE_ARRAY_OFFSET, numBytes); + + int copied = 1; + while (copied < times) { + int toCopy = Math.min(copied, times - copied); + System.arraycopy(newBytes, 0, newBytes, copied * numBytes, numBytes * toCopy); + copied += toCopy; + } + + return UTF8String.fromBytes(newBytes); + } + + /** + * Returns the position of the first occurrence of substr in + * current string from the specified position (0-based index). + * + * @param v the string to be searched + * @param start the start position of the current string for searching + * @return the position of the first occurrence of substr, if not found, -1 returned. + */ + public int indexOf(UTF8String v, int start) { + if (v.numBytes() == 0) { + return 0; + } + + // locate to the start position. + int i = 0; // position in byte + int c = 0; // position in character + while (i < numBytes && c < start) { + i += numBytesForFirstByte(getByte(i)); + c += 1; + } + + do { + if (i + v.numBytes > numBytes) { + return -1; + } + if (ByteArrayMethods.arrayEquals(base, offset + i, v.base, v.offset, v.numBytes)) { + return c; + } + i += numBytesForFirstByte(getByte(i)); + c += 1; + } while (i < numBytes); + + return -1; + } + + /** + * Find the `str` from left to right. + */ + private int find(UTF8String str, int start) { + assert (str.numBytes > 0); + while (start <= numBytes - str.numBytes) { + if (ByteArrayMethods.arrayEquals(base, offset + start, str.base, str.offset, str.numBytes)) { + return start; + } + start += 1; + } + return -1; + } + + /** + * Find the `str` from right to left. + */ + private int rfind(UTF8String str, int start) { + assert (str.numBytes > 0); + while (start >= 0) { + if (ByteArrayMethods.arrayEquals(base, offset + start, str.base, str.offset, str.numBytes)) { + return start; + } + start -= 1; + } + return -1; + } + + /** + * Returns the substring from string str before count occurrences of the delimiter delim. + * If count is positive, everything the left of the final delimiter (counting from left) is + * returned. If count is negative, every to the right of the final delimiter (counting from the + * right) is returned. subStringIndex performs a case-sensitive match when searching for delim. + */ + public UTF8String subStringIndex(UTF8String delim, int count) { + if (delim.numBytes == 0 || count == 0) { + return EMPTY_UTF8; + } + if (count > 0) { + int idx = -1; + while (count > 0) { + idx = find(delim, idx + 1); + if (idx >= 0) { + count --; + } else { + // can not find enough delim + return this; + } + } + if (idx == 0) { + return EMPTY_UTF8; + } + byte[] bytes = new byte[idx]; + Platform.copyMemory(base, offset, bytes, Platform.BYTE_ARRAY_OFFSET, idx); + return fromBytes(bytes); + + } else { + int idx = numBytes - delim.numBytes + 1; + count = -count; + while (count > 0) { + idx = rfind(delim, idx - 1); + if (idx >= 0) { + count --; + } else { + // can not find enough delim + return this; + } + } + if (idx + delim.numBytes == numBytes) { + return EMPTY_UTF8; + } + int size = numBytes - delim.numBytes - idx; + byte[] bytes = new byte[size]; + Platform.copyMemory(base, offset + idx + delim.numBytes, bytes, Platform.BYTE_ARRAY_OFFSET, size); + return fromBytes(bytes); + } + } + + /** + * Returns str, right-padded with pad to a length of len + * For example: + * ('hi', 5, '??') => 'hi???' + * ('hi', 1, '??') => 'h' + */ + public UTF8String rpad(int len, UTF8String pad) { + int spaces = len - this.numChars(); // number of char need to pad + if (spaces <= 0 || pad.numBytes() == 0) { + // no padding at all, return the substring of the current string + return substring(0, len); + } else { + int padChars = pad.numChars(); + int count = spaces / padChars; // how many padding string needed + // the partial string of the padding + UTF8String remain = pad.substring(0, spaces - padChars * count); + + byte[] data = new byte[this.numBytes + pad.numBytes * count + remain.numBytes]; + Platform.copyMemory(this.base, this.offset, data, Platform.BYTE_ARRAY_OFFSET, this.numBytes); + int offset = this.numBytes; + int idx = 0; + while (idx < count) { + Platform.copyMemory(pad.base, pad.offset, data, Platform.BYTE_ARRAY_OFFSET + offset, pad.numBytes); + ++ idx; + offset += pad.numBytes; + } + Platform.copyMemory(remain.base, remain.offset, data, Platform.BYTE_ARRAY_OFFSET + offset, remain.numBytes); + + return UTF8String.fromBytes(data); + } + } + + /** + * Returns str, left-padded with pad to a length of len. + * For example: + * ('hi', 5, '??') => '???hi' + * ('hi', 1, '??') => 'h' + */ + public UTF8String lpad(int len, UTF8String pad) { + int spaces = len - this.numChars(); // number of char need to pad + if (spaces <= 0 || pad.numBytes() == 0) { + // no padding at all, return the substring of the current string + return substring(0, len); + } else { + int padChars = pad.numChars(); + int count = spaces / padChars; // how many padding string needed + // the partial string of the padding + UTF8String remain = pad.substring(0, spaces - padChars * count); + + byte[] data = new byte[this.numBytes + pad.numBytes * count + remain.numBytes]; + + int offset = 0; + int idx = 0; + while (idx < count) { + Platform.copyMemory(pad.base, pad.offset, data, Platform.BYTE_ARRAY_OFFSET + offset, pad.numBytes); + ++ idx; + offset += pad.numBytes; + } + Platform.copyMemory(remain.base, remain.offset, data, Platform.BYTE_ARRAY_OFFSET + offset, remain.numBytes); + offset += remain.numBytes; + Platform.copyMemory(this.base, this.offset, data, Platform.BYTE_ARRAY_OFFSET + offset, numBytes()); + + return UTF8String.fromBytes(data); + } + } + + /** + * Concatenates input strings together into a single string. Returns null if any input is null. + */ + public static UTF8String concat(UTF8String... inputs) { + // Compute the total length of the result. + int totalLength = 0; + for (int i = 0; i < inputs.length; i++) { + if (inputs[i] != null) { + totalLength += inputs[i].numBytes; + } else { + return null; + } + } + + // Allocate a new byte array, and copy the inputs one by one into it. + final byte[] result = new byte[totalLength]; + int offset = 0; + for (int i = 0; i < inputs.length; i++) { + int len = inputs[i].numBytes; + Platform.copyMemory( + inputs[i].base, inputs[i].offset, + result, Platform.BYTE_ARRAY_OFFSET + offset, + len); + offset += len; + } + return fromBytes(result); + } + + /** + * Concatenates input strings together into a single string using the separator. + * A null input is skipped. For example, concat(",", "a", null, "c") would yield "a,c". + */ + public static UTF8String concatWs(UTF8String separator, UTF8String... inputs) { + if (separator == null) { + return null; + } + + int numInputBytes = 0; // total number of bytes from the inputs + int numInputs = 0; // number of non-null inputs + for (int i = 0; i < inputs.length; i++) { + if (inputs[i] != null) { + numInputBytes += inputs[i].numBytes; + numInputs++; + } + } + + if (numInputs == 0) { + // Return an empty string if there is no input, or all the inputs are null. + return fromBytes(new byte[0]); + } + + // Allocate a new byte array, and copy the inputs one by one into it. + // The size of the new array is the size of all inputs, plus the separators. + final byte[] result = new byte[numInputBytes + (numInputs - 1) * separator.numBytes]; + int offset = 0; + + for (int i = 0, j = 0; i < inputs.length; i++) { + if (inputs[i] != null) { + int len = inputs[i].numBytes; + Platform.copyMemory( + inputs[i].base, inputs[i].offset, + result, Platform.BYTE_ARRAY_OFFSET + offset, + len); + offset += len; + + j++; + // Add separator if this is not the last input. + if (j < numInputs) { + Platform.copyMemory( + separator.base, separator.offset, + result, Platform.BYTE_ARRAY_OFFSET + offset, + separator.numBytes); + offset += separator.numBytes; + } + } + } + return fromBytes(result); + } + + public UTF8String[] split(UTF8String pattern, int limit) { + String[] splits = toString().split(pattern.toString(), limit); + UTF8String[] res = new UTF8String[splits.length]; + for (int i = 0; i < res.length; i++) { + res[i] = fromString(splits[i]); + } + return res; + } + + // TODO: Need to use `Code Point` here instead of Char in case the character longer than 2 bytes + public UTF8String translate(Map dict) { + String srcStr = this.toString(); + + StringBuilder sb = new StringBuilder(); + for(int k = 0; k< srcStr.length(); k++) { + if (null == dict.get(srcStr.charAt(k))) { + sb.append(srcStr.charAt(k)); + } else if ('\0' != dict.get(srcStr.charAt(k))){ + sb.append(dict.get(srcStr.charAt(k))); + } + } + return fromString(sb.toString()); + } + + @Override + public String toString() { + return new String(getBytes(), StandardCharsets.UTF_8); + } + + @Override + public UTF8String clone() { + return fromBytes(getBytes()); + } + + @Override + public int compareTo(@Nonnull final UTF8String other) { + int len = Math.min(numBytes, other.numBytes); + // TODO: compare 8 bytes as unsigned long + for (int i = 0; i < len; i ++) { + // In UTF-8, the byte should be unsigned, so we should compare them as unsigned int. + int res = (getByte(i) & 0xFF) - (other.getByte(i) & 0xFF); + if (res != 0) { + return res; + } + } + return numBytes - other.numBytes; + } + + public int compare(final UTF8String other) { + return compareTo(other); + } + + @Override + public boolean equals(final Object other) { + if (other instanceof UTF8String) { + UTF8String o = (UTF8String) other; + if (numBytes != o.numBytes) { + return false; + } + return ByteArrayMethods.arrayEquals(base, offset, o.base, o.offset, numBytes); + } else { + return false; + } + } + + /** + * Levenshtein distance is a metric for measuring the distance of two strings. The distance is + * defined by the minimum number of single-character edits (i.e. insertions, deletions or + * substitutions) that are required to change one of the strings into the other. + */ + public int levenshteinDistance(UTF8String other) { + // Implementation adopted from org.apache.common.lang3.StringUtils.getLevenshteinDistance + + int n = numChars(); + int m = other.numChars(); + + if (n == 0) { + return m; + } else if (m == 0) { + return n; + } + + UTF8String s, t; + + if (n <= m) { + s = this; + t = other; + } else { + s = other; + t = this; + int swap; + swap = n; + n = m; + m = swap; + } + + int[] p = new int[n + 1]; + int[] d = new int[n + 1]; + int[] swap; + + int i, i_bytes, j, j_bytes, num_bytes_j, cost; + + for (i = 0; i <= n; i++) { + p[i] = i; + } + + for (j = 0, j_bytes = 0; j < m; j_bytes += num_bytes_j, j++) { + num_bytes_j = numBytesForFirstByte(t.getByte(j_bytes)); + d[0] = j + 1; + + for (i = 0, i_bytes = 0; i < n; i_bytes += numBytesForFirstByte(s.getByte(i_bytes)), i++) { + if (s.getByte(i_bytes) != t.getByte(j_bytes) || + num_bytes_j != numBytesForFirstByte(s.getByte(i_bytes))) { + cost = 1; + } else { + cost = (ByteArrayMethods.arrayEquals(t.base, t.offset + j_bytes, s.base, + s.offset + i_bytes, num_bytes_j)) ? 0 : 1; + } + d[i + 1] = Math.min(Math.min(d[i] + 1, p[i + 1] + 1), p[i] + cost); + } + + swap = p; + p = d; + d = swap; + } + + return p[n]; + } + + @Override + public int hashCode() { + return Murmur3_x86_32.hashUnsafeBytes(base, offset, numBytes, 42); + } + + /** + * Soundex mapping table + */ + private static final byte[] US_ENGLISH_MAPPING = {'0', '1', '2', '3', '0', '1', '2', '7', + '0', '2', '2', '4', '5', '5', '0', '1', '2', '6', '2', '3', '0', '1', '7', '2', '0', '2'}; + + /** + * Encodes a string into a Soundex value. Soundex is an encoding used to relate similar names, + * but can also be used as a general purpose scheme to find word with similar phonemes. + * https://en.wikipedia.org/wiki/Soundex + */ + public UTF8String soundex() { + if (numBytes == 0) { + return EMPTY_UTF8; + } + + byte b = getByte(0); + if ('a' <= b && b <= 'z') { + b -= 32; + } else if (b < 'A' || 'Z' < b) { + // first character must be a letter + return this; + } + byte[] sx = {'0', '0', '0', '0'}; + sx[0] = b; + int sxi = 1; + int idx = b - 'A'; + byte lastCode = US_ENGLISH_MAPPING[idx]; + + for (int i = 1; i < numBytes; i++) { + b = getByte(i); + if ('a' <= b && b <= 'z') { + b -= 32; + } else if (b < 'A' || 'Z' < b) { + // not a letter, skip it + lastCode = '0'; + continue; + } + idx = b - 'A'; + byte code = US_ENGLISH_MAPPING[idx]; + if (code == '7') { + // ignore it + } else { + if (code != '0' && code != lastCode) { + sx[sxi++] = code; + if (sxi > 3) break; + } + lastCode = code; + } + } + return UTF8String.fromBytes(sx); + } + + public void writeExternal(ObjectOutput out) throws IOException { + byte[] bytes = getBytes(); + out.writeInt(bytes.length); + out.write(bytes); + } + + public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { + offset = Platform.BYTE_ARRAY_OFFSET; + numBytes = in.readInt(); + base = new byte[numBytes]; + in.readFully((byte[]) base); + } + + @Override + public void write(Kryo kryo, Output out) { + byte[] bytes = getBytes(); + out.writeInt(bytes.length); + out.write(bytes); + } + + @Override + public void read(Kryo kryo, Input in) { + this.offset = Platform.BYTE_ARRAY_OFFSET; + this.numBytes = in.readInt(); + this.base = new byte[numBytes]; + in.read((byte[]) base); + } + +} diff --git a/src/main/java/io/mycat/memory/unsafe/utils/ByteUnit.java b/src/main/java/io/mycat/memory/unsafe/utils/ByteUnit.java new file mode 100644 index 000000000..7228fbffb --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/utils/ByteUnit.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.mycat.memory.unsafe.utils; + +public enum ByteUnit { + BYTE (1), + KiB (1024L), + MiB ((long) Math.pow(1024L, 2L)), + GiB ((long) Math.pow(1024L, 3L)), + TiB ((long) Math.pow(1024L, 4L)), + PiB ((long) Math.pow(1024L, 5L)), ; + + ByteUnit(long multiplier) { + this.multiplier = multiplier; + } + + // Interpret the provided number (d) with suffix (u) as this unit type. + // E.g. KiB.interpret(1, MiB) interprets 1MiB as its KiB representation = 1024k + public long convertFrom(long d, ByteUnit u) { + return u.convertTo(d, this); + } + + // Convert the provided number (d) interpreted as this unit type to unit type (u). + public long convertTo(long d, ByteUnit u) { + if (multiplier > u.multiplier) { + long ratio = multiplier / u.multiplier; + if (Long.MAX_VALUE / ratio < d) { + throw new IllegalArgumentException("Conversion of " + d + " exceeds Long.MAX_VALUE in " + + name() + ". Try a larger unit (e.g. MiB instead of KiB)"); + } + return d * ratio; + } else { + // Perform operations in this order to avoid potential overflow + // when computing d * multiplier + return d / (u.multiplier / multiplier); + } + } + + public double toBytes(long d) { + if (d < 0) { + throw new IllegalArgumentException("Negative size value. Size must be positive: " + d); + } + return d * multiplier; + } + + public long toKiB(long d) { return convertTo(d, KiB); } + public long toMiB(long d) { return convertTo(d, MiB); } + public long toGiB(long d) { return convertTo(d, GiB); } + public long toTiB(long d) { return convertTo(d, TiB); } + public long toPiB(long d) { return convertTo(d, PiB); } + + private final long multiplier; +} diff --git a/src/main/java/io/mycat/memory/unsafe/utils/BytesTools.java b/src/main/java/io/mycat/memory/unsafe/utils/BytesTools.java new file mode 100644 index 000000000..bbfe2d269 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/utils/BytesTools.java @@ -0,0 +1,970 @@ +package io.mycat.memory.unsafe.utils; + +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +import com.google.common.annotations.VisibleForTesting; + +import io.mycat.memory.unsafe.Platform; +import sun.misc.Unsafe; + +import java.io.UnsupportedEncodingException; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.nio.charset.Charset; +import java.nio.charset.IllegalCharsetNameException; +import java.nio.charset.UnsupportedCharsetException; +import java.util.concurrent.ConcurrentHashMap; + +/** + * Utility class that handles byte arrays, conversions to/from other types, + */ +@SuppressWarnings("restriction") +public class BytesTools { + + //HConstants.UTF8_ENCODING should be updated if this changed + /** When we encode strings, we always specify UTF8 encoding */ + private static final String UTF8_ENCODING = "UTF-8"; + + //HConstants.UTF8_CHARSET should be updated if this changed + /** When we encode strings, we always specify UTF8 encoding */ + private static final Charset UTF8_CHARSET = Charset.forName(UTF8_ENCODING); + + /** + * Size of boolean in bytes + */ + public static final int SIZEOF_BOOLEAN = Byte.SIZE / Byte.SIZE; + + /** + * Size of byte in bytes + */ + public static final int SIZEOF_BYTE = SIZEOF_BOOLEAN; + + /** + * Size of char in bytes + */ + public static final int SIZEOF_CHAR = Character.SIZE / Byte.SIZE; + + /** + * Size of double in bytes + */ + public static final int SIZEOF_DOUBLE = Double.SIZE / Byte.SIZE; + + /** + * Size of float in bytes + */ + public static final int SIZEOF_FLOAT = Float.SIZE / Byte.SIZE; + + /** + * Size of int in bytes + */ + public static final int SIZEOF_INT = Integer.SIZE / Byte.SIZE; + + /** + * Size of long in bytes + */ + public static final int SIZEOF_LONG = Long.SIZE / Byte.SIZE; + + /** + * Size of short in bytes + */ + public static final int SIZEOF_SHORT = Short.SIZE / Byte.SIZE; + + /** + * Convert a byte array to a int value + * @param buf + * @return int + * @throws NumberFormatException + */ + + public static int getInt(byte[] buf) throws NumberFormatException { + return getInt(buf, 0, buf.length); + } + + public static int getInt(byte[] buf, int offset, int endPos) throws NumberFormatException { + byte base = 10; + + int s; + for(s = offset; s < endPos && Character.isWhitespace((char)buf[s]); ++s) { + ; + } + if(s == endPos) { + throw new NumberFormatException(toString(buf)); + } else { + boolean negative = false; + if((char)buf[s] == 45) { + negative = true; + ++s; + } else if((char)buf[s] == 43) { + ++s; + } + + int save = s; + int cutoff = 2147483647 / base; + int cutlim = 2147483647 % base; + if(negative) { + ++cutlim; + } + + boolean overflow = false; + + int i; + for(i = 0; s < endPos; ++s) { + char c = (char)buf[s]; + if(Character.isDigit(c)) { + c = (char)(c - 48); + } else { + if(!Character.isLetter(c)) { + break; + } + + c = (char)(Character.toUpperCase(c) - 65 + 10); + } + + if(c >= base) { + break; + } + + if(i <= cutoff && (i != cutoff || c <= cutlim)) { + i *= base; + i += c; + } else { + overflow = true; + } + } + + if(s == save) { + throw new NumberFormatException(toString(buf)); + } else if(overflow) { + throw new NumberFormatException(toString(buf)); + } else { + return negative?-i:i; + } + } + } + + /** + * Convert a byte array to a long value + * @param buf + * @return + * @throws NumberFormatException + */ + public static long getLong(byte[] buf) throws NumberFormatException { + return getLong(buf, 0, buf.length); + } + + public static long getLong(byte[] buf, int offset, int endpos) throws NumberFormatException { + byte base = 10; + + int s; + for(s = offset; s < endpos && Character.isWhitespace((char)buf[s]); ++s) { + ; + } + + if(s == endpos) { + throw new NumberFormatException(toString(buf)); + } else { + boolean negative = false; + if((char)buf[s] == 45) { + negative = true; + ++s; + } else if((char)buf[s] == 43) { + ++s; + } + + int save = s; + long cutoff = 9223372036854775807L / (long)base; + long cutlim = (long)((int)(9223372036854775807L % (long)base)); + if(negative) { + ++cutlim; + } + + boolean overflow = false; + + long i; + for(i = 0L; s < endpos; ++s) { + char c = (char)buf[s]; + if(Character.isDigit(c)) { + c = (char)(c - 48); + } else { + if(!Character.isLetter(c)) { + break; + } + c = (char)(Character.toUpperCase(c) - 65 + 10); + } + + if(c >= base) { + break; + } + + if(i <= cutoff && (i != cutoff || (long)c <= cutlim)) { + i *= (long)base; + i += (long)c; + } else { + overflow = true; + } + } + + if(s == save) { + throw new NumberFormatException(toString(buf)); + } else if(overflow) { + throw new NumberFormatException(toString(buf)); + } else { + return negative?-i:i; + } + } + } + + /** + * Convert a byte array to a short value + * @param buf + * @return + * @throws NumberFormatException + */ + public static short getShort(byte[] buf) throws NumberFormatException { + return getShort(buf, 0, buf.length); + } + + public static short getShort(byte[] buf, int offset, int endpos) throws NumberFormatException { + byte base = 10; + + int s; + for(s = offset; s < endpos && Character.isWhitespace((char)buf[s]); ++s) { + ; + } + + if(s == endpos) { + throw new NumberFormatException(toString(buf)); + } else { + boolean negative = false; + if((char)buf[s] == 45) { + negative = true; + ++s; + } else if((char)buf[s] == 43) { + ++s; + } + + int save = s; + short cutoff = (short)(32767 / base); + short cutlim = (short)(32767 % base); + if(negative) { + ++cutlim; + } + + boolean overflow = false; + + short i; + for(i = 0; s < endpos; ++s) { + char c = (char)buf[s]; + if(Character.isDigit(c)) { + c = (char)(c - 48); + } else { + if(!Character.isLetter(c)) { + break; + } + + c = (char)(Character.toUpperCase(c) - 65 + 10); + } + + if(c >= base) { + break; + } + + if(i <= cutoff && (i != cutoff || c <= cutlim)) { + i = (short)(i * base); + i = (short)(i + c); + } else { + overflow = true; + } + } + + if(s == save) { + throw new NumberFormatException(toString(buf)); + } else if(overflow) { + throw new NumberFormatException(toString(buf)); + } else { + return negative?(short)(-i):i; + } + } + } + + /** + * Convert a byte array to a float value + * @param src + * @return + * @throws UnsupportedEncodingException + */ + public static float getFloat(byte [] src) throws UnsupportedEncodingException { + return Float.parseFloat(new String(src,"US-ASCII")); + } + + /** + * Convert a byte array to a double value + * @param src + * @return + * @throws UnsupportedEncodingException + */ + + public static double getDouble(byte [] src) throws UnsupportedEncodingException { + return Double.parseDouble(new String(src,"US-ASCII")); + } + + /** + * Convert a long value to a byte array + * @param l + * @return + * @throws UnsupportedEncodingException + */ + + + public static byte[] long2Bytes(long l) throws UnsupportedEncodingException { + String lstr = Long.toString(l); + return lstr.getBytes("US-ASCII"); + } + + /** + * Convert a int value to a byte array + * @param i + * @return + * @throws UnsupportedEncodingException + */ + + public static byte[] int2Bytes(int i) throws UnsupportedEncodingException { + String istr = Integer.toString(i); + return istr.getBytes("US-ASCII"); + } + + /** + * Convert a short value to a byte array + * @param i + * @return + * @throws UnsupportedEncodingException + */ + + public static byte[] short2Bytes(short i) throws UnsupportedEncodingException { + String sstr = Short.toString(i); + return sstr.getBytes("US-ASCII"); + } + + /** + * Convert a float value to a byte array + * @param f + * @return + * @throws UnsupportedEncodingException + */ + public static byte[] float2Bytes(float f) throws UnsupportedEncodingException { + String fstr = Float.toString(f); + return fstr.getBytes("US-ASCII"); + } + + /** + * Convert a double value to a byte array + * @param d + * @return + * @throws UnsupportedEncodingException + */ + public static byte[] double2Bytes(double d) throws UnsupportedEncodingException { + String dstr = Double.toString(d); + return dstr.getBytes("US-ASCII"); + } + + /** + * Returns a new byte array, copied from the given {@code buf}, + * from the index 0 (inclusive) to the limit (exclusive), + * regardless of the current position. + * The position and the other index parameters are not changed. + * + * @param buf a byte buffer + * @return the byte array + */ + public static byte[] toBytes(ByteBuffer buf) { + ByteBuffer dup = buf.duplicate(); + dup.position(0); + return readBytes(dup); + } + + private static byte[] readBytes(ByteBuffer buf) { + byte [] result = new byte[buf.remaining()]; + buf.get(result); + return result; + } + + /** + * @param b Presumed UTF-8 encoded byte array. + * @return String made from b + */ + public static String toString(final byte [] b) { + if (b == null) { + return null; + } + return toString(b, 0, b.length); + } + + /** + * Joins two byte arrays together using a separator. + * @param b1 The first byte array. + * @param sep The separator to use. + * @param b2 The second byte array. + */ + public static String toString(final byte [] b1, + String sep, + final byte [] b2) { + return toString(b1, 0, b1.length) + sep + toString(b2, 0, b2.length); + } + + /** + * This method will convert utf8 encoded bytes into a string. If + * the given byte array is null, this method will return null. + * + * @param b Presumed UTF-8 encoded byte array. + * @param off offset into array + * @return String made from b or null + */ + public static String toString(final byte [] b, int off) { + if (b == null) { + return null; + } + int len = b.length - off; + if (len <= 0) { + return ""; + } + return new String(b, off, len, UTF8_CHARSET); + } + + /** + * This method will convert utf8 encoded bytes into a string. If + * the given byte array is null, this method will return null. + * + * @param b Presumed UTF-8 encoded byte array. + * @param off offset into array + * @param len length of utf-8 sequence + * @return String made from b or null + */ + public static String toString(final byte [] b, int off, int len) { + if (b == null) { + return null; + } + if (len == 0) { + return ""; + } + return new String(b, off, len, UTF8_CHARSET); + } + + /** + * Write a printable representation of a byte array. + * + * @param b byte array + * @return string + * @see #toStringBinary(byte[], int, int) + */ + public static String toStringBinary(final byte [] b) { + if (b == null) + return "null"; + return toStringBinary(b, 0, b.length); + } + + /** + * Converts the given byte buffer to a printable representation, + * from the index 0 (inclusive) to the limit (exclusive), + * regardless of the current position. + * The position and the other index parameters are not changed. + * + * @param buf a byte buffer + * @return a string representation of the buffer's binary contents + * @see #toBytes(ByteBuffer) + */ + public static String toStringBinary(ByteBuffer buf) { + if (buf == null) + return "null"; + if (buf.hasArray()) { + return toStringBinary(buf.array(), buf.arrayOffset(), buf.limit()); + } + return toStringBinary(toBytes(buf)); + } + + private static final char[] HEX_CHARS_UPPER = { + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' + }; + + /** + * Write a printable representation of a byte array. Non-printable + * characters are hex escaped in the format \\x%02X, eg: + * \x00 \x05 etc + * + * @param b array to write out + * @param off offset to start at + * @param len length to write + * @return string output + */ + public static String toStringBinary(final byte [] b, int off, int len) { + StringBuilder result = new StringBuilder(); + // Just in case we are passed a 'len' that is > buffer length... + if (off >= b.length) return result.toString(); + if (off + len > b.length) len = b.length - off; + for (int i = off; i < off + len ; ++i) { + int ch = b[i] & 0xFF; + if (ch >= ' ' && ch <= '~' && ch != '\\') { + result.append((char)ch); + } else { + result.append("\\x"); + result.append(HEX_CHARS_UPPER[ch / 0x10]); + result.append(HEX_CHARS_UPPER[ch % 0x10]); + } + } + return result.toString(); + } + + private static boolean isHexDigit(char c) { + return + (c >= 'A' && c <= 'F') || + (c >= '0' && c <= '9'); + } + + /** + * Takes a ASCII digit in the range A-F0-9 and returns + * the corresponding integer/ordinal value. + * @param ch The hex digit. + * @return The converted hex value as a byte. + */ + public static byte toBinaryFromHex(byte ch) { + if (ch >= 'A' && ch <= 'F') + return (byte) ((byte)10 + (byte) (ch - 'A')); + // else + return (byte) (ch - '0'); + } + + public static byte [] toBytesBinary(String in) { + // this may be bigger than we need, but let's be safe. + byte [] b = new byte[in.length()]; + int size = 0; + for (int i = 0; i < in.length(); ++i) { + char ch = in.charAt(i); + if (ch == '\\' && in.length() > i+1 && in.charAt(i+1) == 'x') { + // ok, take next 2 hex digits. + char hd1 = in.charAt(i+2); + char hd2 = in.charAt(i+3); + + // they need to be A-F0-9: + if (!isHexDigit(hd1) || + !isHexDigit(hd2)) { + // bogus escape code, ignore: + continue; + } + // turn hex ASCII digit -> number + byte d = (byte) ((toBinaryFromHex((byte)hd1) << 4) + toBinaryFromHex((byte)hd2)); + + b[size++] = d; + i += 3; // skip 3 + } else { + b[size++] = (byte) ch; + } + } + // resize: + byte [] b2 = new byte[size]; + System.arraycopy(b, 0, b2, 0, size); + return b2; + } + + /** + * Converts a string to a UTF-8 byte array. + * @param s string + * @return the byte array + */ + public static byte[] toBytes(String s) { + return s.getBytes(UTF8_CHARSET); + } + + /** + * Convert a boolean to a byte array. True becomes -1 + * and false becomes 0. + * + * @param b value + * @return b encoded in a byte array. + */ + public static byte [] toBytes(final boolean b) { + return new byte[] { b ? (byte) -1 : (byte) 0 }; + } + + /** + * Reverses {@link #toBytes(boolean)} + * @param b array + * @return True or false. + */ + public static boolean toBoolean(final byte [] b) { + if (b.length != 1) { + throw new IllegalArgumentException("Array has wrong size: " + b.length); + } + return b[0] != (byte) 0; + } + + /** + * Convert a long value to a byte array using big-endian. + * + * @param val value to convert + * @return the byte array + */ + public static byte[] toBytes(long val) { + byte [] b = new byte[8]; + for (int i = 7; i > 0; i--) { + b[i] = (byte) val; + val >>>= 8; + } + b[0] = (byte) val; + return b; + } + + /** + * @param left left operand + * @param right right operand + * @return 0 if equal, < 0 if left is less than right, etc. + */ + public static int compareTo(final byte [] left, final byte [] right) { + return LexicographicalComparerHolder.BEST_COMPARER. + compareTo(left, 0, left.length, right, 0, right.length); + } + + /** + * Lexicographically compare two arrays. + * + * @param buffer1 left operand + * @param buffer2 right operand + * @param offset1 Where to start comparing in the left buffer + * @param offset2 Where to start comparing in the right buffer + * @param length1 How much to compare from the left buffer + * @param length2 How much to compare from the right buffer + * @return 0 if equal, < 0 if left is less than right, etc. + */ + public static int compareTo(byte[] buffer1, int offset1, int length1, + byte[] buffer2, int offset2, int length2) { + return LexicographicalComparerHolder.BEST_COMPARER. + compareTo(buffer1, offset1, length1, buffer2, offset2, length2); + } + + interface Comparer { + int compareTo( + T buffer1, int offset1, int length1, T buffer2, int offset2, int length2 + ); + } + + @VisibleForTesting + static Comparer lexicographicalComparerJavaImpl() { + return LexicographicalComparerHolder.PureJavaComparer.INSTANCE; + } + + /** + * Provides a lexicographical comparer implementation; either a Java + * implementation or a faster implementation based on {@link Unsafe}. + * + *

Uses reflection to gracefully fall back to the Java implementation if + * {@code Unsafe} isn't available. + */ + @VisibleForTesting + static class LexicographicalComparerHolder { + static final String UNSAFE_COMPARER_NAME = + LexicographicalComparerHolder.class.getName() + "$UnsafeComparer"; + + static final Comparer BEST_COMPARER = getBestComparer(); + /** + * Returns the Unsafe-using Comparer, or falls back to the pure-Java + * implementation if unable to do so. + */ + static Comparer getBestComparer() { + try { + Class theClass = Class.forName(UNSAFE_COMPARER_NAME); + + // yes, UnsafeComparer does implement Comparer + @SuppressWarnings("unchecked") + Comparer comparer = + (Comparer) theClass.getEnumConstants()[0]; + return comparer; + } catch (Throwable t) { // ensure we really catch *everything* + return lexicographicalComparerJavaImpl(); + } + } + + enum PureJavaComparer implements Comparer { + INSTANCE; + + @Override + public int compareTo(byte[] buffer1, int offset1, int length1, + byte[] buffer2, int offset2, int length2) { + // Short circuit equal case + if (buffer1 == buffer2 && + offset1 == offset2 && + length1 == length2) { + return 0; + } + // Bring WritableComparator code local + int end1 = offset1 + length1; + int end2 = offset2 + length2; + for (int i = offset1, j = offset2; i < end1 && j < end2; i++, j++) { + int a = (buffer1[i] & 0xff); + int b = (buffer2[j] & 0xff); + if (a != b) { + return a - b; + } + } + return length1 - length2; + } + } + } + + /** + * @param left left operand + * @param right right operand + * @return True if equal + */ + public static boolean equals(final byte [] left, final byte [] right) { + // Could use Arrays.equals? + //noinspection SimplifiableConditionalExpression + if (left == right) return true; + if (left == null || right == null) return false; + if (left.length != right.length) return false; + if (left.length == 0) return true; + + // Since we're often comparing adjacent sorted data, + // it's usual to have equal arrays except for the very last byte + // so check that first + if (left[left.length - 1] != right[right.length - 1]) return false; + + return compareTo(left, right) == 0; + } + + public static boolean equals(final byte[] left, int leftOffset, int leftLen, + final byte[] right, int rightOffset, int rightLen) { + // short circuit case + if (left == right && + leftOffset == rightOffset && + leftLen == rightLen) { + return true; + } + // different lengths fast check + if (leftLen != rightLen) { + return false; + } + if (leftLen == 0) { + return true; + } + + // Since we're often comparing adjacent sorted data, + // it's usual to have equal arrays except for the very last byte + // so check that first + if (left[leftOffset + leftLen - 1] != right[rightOffset + rightLen - 1]) return false; + + return LexicographicalComparerHolder.BEST_COMPARER. + compareTo(left, leftOffset, leftLen, right, rightOffset, rightLen) == 0; + } + + + /** + * @param a left operand + * @param buf right operand + * @return True if equal + */ + public static boolean equals(byte[] a, ByteBuffer buf) { + if (a == null) return buf == null; + if (buf == null) return false; + if (a.length != buf.remaining()) return false; + + // Thou shalt not modify the original byte buffer in what should be read only operations. + ByteBuffer b = buf.duplicate(); + for (byte anA : a) { + if (anA != b.get()) { + return false; + } + } + return true; + } + + + /** + * Return true if the byte array on the right is a prefix of the byte + * array on the left. + */ + public static boolean startsWith(byte[] bytes, byte[] prefix) { + return bytes != null && prefix != null && + bytes.length >= prefix.length && + LexicographicalComparerHolder.BEST_COMPARER. + compareTo(bytes, 0, prefix.length, prefix, 0, prefix.length) == 0; + } + + + /** + * @param a first third + * @param b second third + * @param c third third + * @return New array made from a, b and c + */ + public static byte [] add(final byte [] a, final byte [] b, final byte [] c) { + byte [] result = new byte[a.length + b.length + c.length]; + System.arraycopy(a, 0, result, 0, a.length); + System.arraycopy(b, 0, result, a.length, b.length); + System.arraycopy(c, 0, result, a.length + b.length, c.length); + return result; + } + + /** + * @param arrays all the arrays to concatenate together. + * @return New array made from the concatenation of the given arrays. + */ + public static byte [] add(final byte [][] arrays) { + int length = 0; + for (int i = 0; i < arrays.length; i++) { + length += arrays[i].length; + } + byte [] result = new byte[length]; + int index = 0; + for (int i = 0; i < arrays.length; i++) { + System.arraycopy(arrays[i], 0, result, index, arrays[i].length); + index += arrays[i].length; + } + return result; + } + + /** + * Split passed range. Expensive operation relatively. Uses BigInteger math. + * Useful splitting ranges for MapReduce jobs. + * @param a Beginning of range + * @param b End of range + * @param num Number of times to split range. Pass 1 if you want to split + * the range in two; i.e. one split. + * @return Array of dividing values + */ + + + + /** + * @param t operands + * @return Array of byte arrays made from passed array of Text + */ + public static byte [][] toByteArrays(final String [] t) { + byte [][] result = new byte[t.length][]; + for (int i = 0; i < t.length; i++) { + result[i] = BytesTools.toBytes(t[i]); + } + return result; + } + + /** + * @param t operands + * @return Array of binary byte arrays made from passed array of binary strings + */ + public static byte[][] toBinaryByteArrays(final String[] t) { + byte[][] result = new byte[t.length][]; + for (int i = 0; i < t.length; i++) { + result[i] = BytesTools.toBytesBinary(t[i]); + } + return result; + } + + /** + * @param column operand + * @return A byte array of a byte array where first and only entry is + * column + */ + public static byte [][] toByteArrays(final String column) { + return toByteArrays(toBytes(column)); + } + + /** + * @param column operand + * @return A byte array of a byte array where first and only entry is + * column + */ + public static byte [][] toByteArrays(final byte [] column) { + byte [][] result = new byte[1][]; + result[0] = column; + return result; + } + + + public static byte [] paddingInt(byte [] a){ + + if(a == null){ + return null; + } + + if (a.length==SIZEOF_INT){ + return a; + } + + byte [] b = new byte[SIZEOF_INT]; + if (Platform.littleEndian){ + for (int i = 0; i < SIZEOF_INT-a.length; i++) { + b[i] = 0x00; + } + System.arraycopy(a, 0, b,SIZEOF_INT-a.length, a.length); + }else { + System.arraycopy(a, 0, b, 0, a.length); + for (int i = a.length; i < SIZEOF_INT; i++) { + b[i] = 0x00; + } + } + return b; + } + + public static byte [] paddingLong(byte [] a){ + if(a == null){ + return null; + } + + if (a.length==SIZEOF_LONG){ + return a; + } + + byte [] b = new byte[SIZEOF_LONG]; + if (Platform.littleEndian){ + for (int i = 0; i < SIZEOF_LONG-a.length; i++) { + b[i] = 0x00; + } + System.arraycopy(a, 0, b,SIZEOF_LONG-a.length, a.length); + }else { + System.arraycopy(a, 0, b, 0, a.length); + for (int i = a.length; i < SIZEOF_LONG; i++) { + b[i] = 0x00; + } + } + return b; + } + + public static byte [] paddingShort(byte [] a){ + + if(a == null){ + return null; + } + + if (a.length==SIZEOF_SHORT){ + return a; + } + byte [] b = new byte[SIZEOF_SHORT]; + if (Platform.littleEndian){ + for (int i = 0; i < SIZEOF_SHORT-a.length; i++) { + b[i] = 0x00; + } + System.arraycopy(a, 0, b, SIZEOF_SHORT-a.length, a.length); + }else { + System.arraycopy(a, 0, b, 0, a.length); + for (int i = a.length; i < SIZEOF_SHORT; i++) { + b[i] = 0x00; + } + } + return b; + } +} \ No newline at end of file diff --git a/src/main/java/io/mycat/memory/unsafe/utils/JavaUtils.java b/src/main/java/io/mycat/memory/unsafe/utils/JavaUtils.java new file mode 100644 index 000000000..cb75a5f17 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/utils/JavaUtils.java @@ -0,0 +1,383 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.utils; + +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.Closeable; +import java.io.File; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Utility class, + */ + +public class JavaUtils { + private static final Logger logger = LoggerFactory.getLogger(JavaUtils.class); + + /** + * Define a default value for driver memory here since this value is referenced across the code + * base and nearly all files already use Utils.scala + */ + public static final long DEFAULT_DRIVER_MEM_MB = 1024; + + private static int MAX_DIR_CREATION_ATTEMPTS = 10; + + /** Closes the given object, ignoring IOExceptions. */ + public static void closeQuietly(Closeable closeable) { + try { + if (closeable != null) { + closeable.close(); + } + } catch (IOException e) { + logger.error("IOException should not have been thrown.", e); + } + } + + + /* + * Delete a file or directory and its contents recursively. + * Don't follow directories if they are symlinks. + * Throws an exception if deletion is unsuccessful. + */ + public static void deleteRecursively(File file) throws IOException { + if (file == null) { return; } + + if (file.isDirectory() && !isSymlink(file)) { + IOException savedIOException = null; + for (File child : listFilesSafely(file)) { + try { + deleteRecursively(child); + } catch (IOException e) { + // In case of multiple exceptions, only last one will be thrown + savedIOException = e; + } + } + if (savedIOException != null) { + throw savedIOException; + } + } + + boolean deleted = file.delete(); + // Delete can also fail if the file simply did not exist. + if (!deleted && file.exists()) { + throw new IOException("Failed to delete: " + file.getAbsolutePath()); + } + } + + private static File[] listFilesSafely(File file) throws IOException { + if (file.exists()) { + File[] files = file.listFiles(); + if (files == null) { + throw new IOException("Failed to list files for dir: " + file); + } + return files; + } else { + return new File[0]; + } + } + + private static boolean isSymlink(File file) throws IOException { + Preconditions.checkNotNull(file); + File fileInCanonicalDir = null; + if (file.getParent() == null) { + fileInCanonicalDir = file; + } else { + fileInCanonicalDir = new File(file.getParentFile().getCanonicalFile(), file.getName()); + } + return !fileInCanonicalDir.getCanonicalFile().equals(fileInCanonicalDir.getAbsoluteFile()); + } + + private static final ImmutableMap timeSuffixes = + ImmutableMap.builder() + .put("us", TimeUnit.MICROSECONDS) + .put("ms", TimeUnit.MILLISECONDS) + .put("s", TimeUnit.SECONDS) + .put("m", TimeUnit.MINUTES) + .put("min", TimeUnit.MINUTES) + .put("h", TimeUnit.HOURS) + .put("d", TimeUnit.DAYS) + .build(); + + private static final ImmutableMap byteSuffixes = + ImmutableMap.builder() + .put("b", ByteUnit.BYTE) + .put("k", ByteUnit.KiB) + .put("kb", ByteUnit.KiB) + .put("m", ByteUnit.MiB) + .put("mb", ByteUnit.MiB) + .put("g", ByteUnit.GiB) + .put("gb", ByteUnit.GiB) + .put("t", ByteUnit.TiB) + .put("tb", ByteUnit.TiB) + .put("p", ByteUnit.PiB) + .put("pb", ByteUnit.PiB) + .build(); + + /** + * Convert a passed time string (e.g. 50s, 100ms, or 250us) to a time count in the given unit. + * The unit is also considered the default if the given string does not specify a unit. + */ + public static long timeStringAs(String str, TimeUnit unit) { + String lower = str.toLowerCase().trim(); + + try { + Matcher m = Pattern.compile("(-?[0-9]+)([a-z]+)?").matcher(lower); + if (!m.matches()) { + throw new NumberFormatException("Failed to parse time string: " + str); + } + + long val = Long.parseLong(m.group(1)); + String suffix = m.group(2); + + // Check for invalid suffixes + if (suffix != null && !timeSuffixes.containsKey(suffix)) { + throw new NumberFormatException("Invalid suffix: \"" + suffix + "\""); + } + + // If suffix is valid use that, otherwise none was provided and use the default passed + return unit.convert(val, suffix != null ? timeSuffixes.get(suffix) : unit); + } catch (NumberFormatException e) { + String timeError = "Time must be specified as seconds (s), " + + "milliseconds (ms), microseconds (us), minutes (m or min), hour (h), or day (d). " + + "E.g. 50s, 100ms, or 250us."; + + throw new NumberFormatException(timeError + "\n" + e.getMessage()); + } + } + + /** + * Convert a time parameter such as (50s, 100ms, or 250us) to milliseconds for internal use. If + * no suffix is provided, the passed number is assumed to be in ms. + */ + public static long timeStringAsMs(String str) { + return timeStringAs(str, TimeUnit.MILLISECONDS); + } + + /** + * Convert a time parameter such as (50s, 100ms, or 250us) to seconds for internal use. If + * no suffix is provided, the passed number is assumed to be in seconds. + */ + public static long timeStringAsSec(String str) { + return timeStringAs(str, TimeUnit.SECONDS); + } + + /** + * Convert a passed byte string (e.g. 50b, 100kb, or 250mb) to the given. If no suffix is + * provided, a direct conversion to the provided unit is attempted. + */ + public static long byteStringAs(String str, ByteUnit unit) { + String lower = str.toLowerCase().trim(); + + try { + Matcher m = Pattern.compile("([0-9]+)([a-z]+)?").matcher(lower); + Matcher fractionMatcher = Pattern.compile("([0-9]+\\.[0-9]+)([a-z]+)?").matcher(lower); + + if (m.matches()) { + long val = Long.parseLong(m.group(1)); + String suffix = m.group(2); + + // Check for invalid suffixes + if (suffix != null && !byteSuffixes.containsKey(suffix)) { + throw new NumberFormatException("Invalid suffix: \"" + suffix + "\""); + } + + // If suffix is valid use that, otherwise none was provided and use the default passed + return unit.convertFrom(val, suffix != null ? byteSuffixes.get(suffix) : unit); + } else if (fractionMatcher.matches()) { + throw new NumberFormatException("Fractional values are not supported. Input was: " + + fractionMatcher.group(1)); + } else { + throw new NumberFormatException("Failed to parse byte string: " + str); + } + + } catch (NumberFormatException e) { + String byteError = "Size must be specified as bytes (b), " + + "kibibytes (k), mebibytes (m), gibibytes (g), tebibytes (t), or pebibytes(p). " + + "E.g. 50b, 100k, or 250m."; + + throw new NumberFormatException(byteError + "\n" + e.getMessage()); + } + } + + /** + * Convert a passed byte string (e.g. 50b, 100k, or 250m) to bytes for + * internal use. + * + * If no suffix is provided, the passed number is assumed to be in bytes. + */ + public static long byteStringAsBytes(String str) { + return byteStringAs(str, ByteUnit.BYTE); + } + + /** + * Convert a passed byte string (e.g. 50b, 100k, or 250m) to kibibytes for + * internal use. + * + * If no suffix is provided, the passed number is assumed to be in kibibytes. + */ + public static long byteStringAsKb(String str) { + return byteStringAs(str, ByteUnit.KiB); + } + + /** + * Convert a passed byte string (e.g. 50b, 100k, or 250m) to mebibytes for + * internal use. + * + * If no suffix is provided, the passed number is assumed to be in mebibytes. + */ + public static long byteStringAsMb(String str) { + return byteStringAs(str, ByteUnit.MiB); + } + + /** + * Convert a passed byte string (e.g. 50b, 100k, or 250m) to gibibytes for + * internal use. + * + * If no suffix is provided, the passed number is assumed to be in gibibytes. + */ + public static long byteStringAsGb(String str) { + return byteStringAs(str, ByteUnit.GiB); + } + + + public static String bytesToString(long size) { + long TB = 1L << 40; + long GB = 1L << 30; + long MB = 1L << 20; + long KB = 1L << 10; + double value = 0; + String unit = null; + + if (size >= 2*TB) { + value = size/TB; + unit = "TB"; + } else if (size >= 2*GB) { + value = size/GB; + unit = "GB"; + } else if (size >= 2*MB) { + value = size/MB; + unit = "MB"; + } else if (size >= 2*KB) { + value = size/KB; + unit = "KB"; + } else { + value = size; + unit = "B"; + } + + return value + " " + unit; + } + + + + public static String bytesToString2(long size) { + long TB = 1L << 40; + long GB = 1L << 30; + long MB = 1L << 20; + long KB = 1L << 10; + int value = 0; + String unit = null; + + if (size >= 2*TB) { + value =(int) (size/TB); + unit = "TB"; + } else if (size >= 2*GB) { + value = (int) (size/GB); + unit = "GB"; + } else if (size >= 2*MB) { + value = (int) (size/MB); + unit = "MB"; + } else if (size >= 2*KB) { + value = (int) (size/KB); + unit = "KB"; + } else { + value = (int) size; + unit = "B"; + } + + return value + unit; + } + + + public static File createDirectory(String rootDir, String blockmgr) throws IOException { + + int attempts = 0; + int maxAttempts = MAX_DIR_CREATION_ATTEMPTS; + File dir = null; + while (dir == null) { + attempts += 1; + if (attempts > maxAttempts) { + throw new IOException("Failed to create a temp directory (under " + rootDir + ") after " + + maxAttempts + " attempts!"); + } + try { + dir = new File(rootDir, blockmgr + "-" + UUID.randomUUID().toString()); + if (dir.exists() || !dir.mkdirs()) { + dir = null; + } + } catch (Exception e) { + logger.error(e.getMessage()); + } + } + + return dir.getCanonicalFile(); + } + + /* Calculates 'x' modulo 'mod', takes to consideration sign of x, +* i.e. if 'x' is negative, than 'x' % 'mod' is negative too +* so function return (x % mod) + mod in that case. +*/ + public static int nonNegativeMod(int x,int mod) { + int rawMod = x % mod; + int temp; + if (rawMod < 0) + temp= mod ; + else + temp =0; + return (rawMod + temp); + } + + + public static int nonNegativeHash(Object obj) { + // Required ? + if (obj == null) return 0; + + int hash = obj.hashCode(); + // math.abs fails for Int.MinValue + int hashAbs = 0; + + if (Integer.MAX_VALUE!= hash && Integer.MIN_VALUE != hash) + hashAbs = Math.abs(hash); + else + hashAbs = 0; + + // Nothing else to guard against ? + return hashAbs; + } + +} diff --git a/src/main/java/io/mycat/memory/unsafe/utils/MycatPropertyConf.java b/src/main/java/io/mycat/memory/unsafe/utils/MycatPropertyConf.java new file mode 100644 index 000000000..d74f9ed38 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/utils/MycatPropertyConf.java @@ -0,0 +1,139 @@ +package io.mycat.memory.unsafe.utils; + +import java.util.concurrent.ConcurrentHashMap; + +/** + * Created by zagnix on 2016/6/2. + */ +public class MycatPropertyConf { + + private ConcurrentHashMap settings = new ConcurrentHashMap(); + + public MycatPropertyConf(){ + + } + + /** Set a configuration variable. */ + public MycatPropertyConf set(String key, String value) { + set(key, value, false); + return this; + } + + + public MycatPropertyConf set(String key, String value, boolean silent){ + + if (key == null) { + throw new NullPointerException("null key"); + } + if (value == null) { + throw new NullPointerException("null value for " + key); + } + + if (!silent) { + } + + settings.put(key, value); + return this; + } + + public long getSizeAsBytes(String s, long i) { + String value = (String) settings.get(s); + if(value !=null){ + return byteStringAsBytes(value); + } + return i; + } + + public long getSizeAsBytes(String s, String defaultValue) { + String value = (String) settings.get(s); + if(value !=null){ + return byteStringAsBytes(value); + } + return byteStringAsBytes(defaultValue); + } + + + public double getDouble(String s, double v) { + return v; + } + + public boolean getBoolean(String s, boolean b) { + String value = (String) settings.get(s); + if(value !=null){ + + if(value.equals("true")){ + return true; + }else{ + return false; + } + } + return b; + } + + + public long getLong(String s, long l) { + return l; + } + + public boolean contains(String s) { + return true; + } + + public int getInt(String s, int i) { + return i; + } + + /** + * Convert a passed byte string (e.g. 50b, 100k, or 250m) to bytes for internal use. + * + * If no suffix is provided, the passed number is assumed to be in bytes. + */ + public Long byteStringAsBytes(String str) { + return JavaUtils.byteStringAsBytes(str); + } + + /** + * Convert a passed byte string (e.g. 50b, 100k, or 250m) to kibibytes for internal use. + * + * If no suffix is provided, the passed number is assumed to be in kibibytes. + */ + public Long byteStringAsKb(String str){ + return JavaUtils.byteStringAsKb(str); + } + + /** + * Convert a passed byte string (e.g. 50b, 100k, or 250m) to mebibytes for internal use. + * + * If no suffix is provided, the passed number is assumed to be in mebibytes. + */ + public Long byteStringAsMb(String str) { + return JavaUtils.byteStringAsMb(str); + } + + /** + * Convert a passed byte string (e.g. 50b, 100k, or 250m, 500g) to gibibytes for internal use. + * + * If no suffix is provided, the passed number is assumed to be in gibibytes. + */ + public Long byteStringAsGb(String str) { + return JavaUtils.byteStringAsGb(str); + } + + /** + * Convert a Java memory parameter passed to -Xmx (such as 300m or 1g) to a number of mebibytes. + */ + public int memoryStringToMb(String str){ + // Convert to bytes, rather than directly to MB, because when no units are specified the unit + // is assumed to be bytes + return (int) (JavaUtils.byteStringAsBytes(str) / 1024 / 1024); + } + + public String getString(String s, String defaultValue) { + + String value = (String) settings.get(s); + if(value !=null){ + return value; + } + return defaultValue; + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/utils/sort/AbstractScalaRowIterator.java b/src/main/java/io/mycat/memory/unsafe/utils/sort/AbstractScalaRowIterator.java new file mode 100644 index 000000000..c19d6c8f6 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/utils/sort/AbstractScalaRowIterator.java @@ -0,0 +1,23 @@ +package io.mycat.memory.unsafe.utils.sort; + +import java.util.Iterator; + +/** + * Created by zagnix 2016/6/6. + */ +public class AbstractScalaRowIterator implements Iterator { + @Override + public boolean hasNext() { + return false; + } + + @Override + public T next() { + return null; + } + + @Override + public void remove() { + + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/utils/sort/PrefixComparator.java b/src/main/java/io/mycat/memory/unsafe/utils/sort/PrefixComparator.java new file mode 100644 index 000000000..c55dae873 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/utils/sort/PrefixComparator.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.utils.sort; + +/** + * Compares 8-byte key prefixes in prefix sort. Subclasses may implement type-specific + * comparisons, such as lexicographic comparison for strings. + */ + +public abstract class PrefixComparator { + public abstract int compare(long prefix1, long prefix2); +} diff --git a/src/main/java/io/mycat/memory/unsafe/utils/sort/PrefixComparators.java b/src/main/java/io/mycat/memory/unsafe/utils/sort/PrefixComparators.java new file mode 100644 index 000000000..92c72b1f6 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/utils/sort/PrefixComparators.java @@ -0,0 +1,115 @@ + + +package io.mycat.memory.unsafe.utils.sort; + +import com.google.common.primitives.UnsignedLongs; +import io.mycat.memory.unsafe.types.ByteArray; + + +public class PrefixComparators { + private PrefixComparators() {} + + public static final PrefixComparator STRING = new UnsignedPrefixComparator(); + public static final PrefixComparator STRING_DESC = new UnsignedPrefixComparatorDesc(); + public static final PrefixComparator BINARY = new UnsignedPrefixComparator(); + public static final PrefixComparator BINARY_DESC = new UnsignedPrefixComparatorDesc(); + public static final PrefixComparator LONG = new SignedPrefixComparator(); + public static final PrefixComparator LONG_DESC = new SignedPrefixComparatorDesc(); + public static final PrefixComparator DOUBLE = new UnsignedPrefixComparator(); + public static final PrefixComparator DOUBLE_DESC = new UnsignedPrefixComparatorDesc(); + + public static final PrefixComparator RadixSortDemo = new RadixSortDemo(); + + + + public static final class BinaryPrefixComparator { + public static long computePrefix(byte[] bytes) { + return ByteArray.getPrefix(bytes); + } + } + + public static final class DoublePrefixComparator { + /** + * Converts the double into a value that compares correctly as an unsigned long. For more + * details see http://stereopsis.com/radix.html. + */ + public static long computePrefix(double value) { + // Java's doubleToLongBits already canonicalizes all NaN values to the smallest possible + // positive NaN, so there's nothing special we need to do for NaNs. + long bits = Double.doubleToLongBits(value); + // Negative floats compare backwards due to their sign-magnitude representation, so flip + // all the bits in this case. + long mask = -(bits >>> 63) | 0x8000000000000000L; + return bits ^ mask; + } + } + + /** + * Provides radix sort parameters. Comparators implementing this also are indicating that the + * ordering they define is compatible with radix sort. + */ + public abstract static class RadixSortSupport extends PrefixComparator { + /** @return Whether the sort should be descending in binary sort order. */ + public abstract boolean sortDescending(); + + /** @return Whether the sort should take into account the sign bit. */ + public abstract boolean sortSigned(); + } + + public static final class RadixSortDemo extends PrefixComparators.RadixSortSupport{ + + @Override + public boolean sortDescending() { + return false; + } + + @Override + public boolean sortSigned() { + return false; + } + + @Override + public int compare(long prefix1, long prefix2) { + return PrefixComparators.BINARY.compare(prefix1 & 0xffffff0000L, prefix1 & 0xffffff0000L); + } + } + // + // Standard prefix comparator implementations + // + + public static final class UnsignedPrefixComparator extends RadixSortSupport { + @Override public boolean sortDescending() { return false; } + @Override public boolean sortSigned() { return false; } + @Override + public int compare(long aPrefix, long bPrefix) { + return UnsignedLongs.compare(aPrefix, bPrefix); + } + } + + public static final class UnsignedPrefixComparatorDesc extends RadixSortSupport { + @Override public boolean sortDescending() { return true; } + @Override public boolean sortSigned() { return false; } + @Override + public int compare(long bPrefix, long aPrefix) { + return UnsignedLongs.compare(aPrefix, bPrefix); + } + } + + public static final class SignedPrefixComparator extends RadixSortSupport { + @Override public boolean sortDescending() { return false; } + @Override public boolean sortSigned() { return true; } + @Override + public int compare(long a, long b) { + return (a < b) ? -1 : (a > b) ? 1 : 0; + } + } + + public static final class SignedPrefixComparatorDesc extends RadixSortSupport { + @Override public boolean sortDescending() { return true; } + @Override public boolean sortSigned() { return true; } + @Override + public int compare(long b, long a) { + return (a < b) ? -1 : (a > b) ? 1 : 0; + } + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/utils/sort/RadixSort.java b/src/main/java/io/mycat/memory/unsafe/utils/sort/RadixSort.java new file mode 100644 index 000000000..8bb7155e9 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/utils/sort/RadixSort.java @@ -0,0 +1,254 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.utils.sort; + + +import io.mycat.memory.unsafe.Platform; +import io.mycat.memory.unsafe.array.LongArray; + +public class RadixSort { + + /** + * Sorts a given array of longs using least-significant-digit radix sort. This routine assumes + * you have extra space at the end of the array at least equal to the number of records. The + * sort is destructive and may relocate the data positioned within the array. + * + * @param array array of long elements followed by at least that many empty slots. + * @param numRecords number of data records in the array. + * @param startByteIndex the first byte (in range [0, 7]) to sort each long by, counting from the + * least significant byte. + * @param endByteIndex the last byte (in range [0, 7]) to sort each long by, counting from the + * least significant byte. Must be greater than startByteIndex. + * @param desc whether this is a descending (binary-order) sort. + * @param signed whether this is a signed (two's complement) sort. + * + * @return The starting index of the sorted data within the given array. We return this instead + * of always copying the data back to position zero for efficiency. + */ + public static int sort( + LongArray array, int numRecords, int startByteIndex, int endByteIndex, + boolean desc, boolean signed) { + assert startByteIndex >= 0 : "startByteIndex (" + startByteIndex + ") should >= 0"; + assert endByteIndex <= 7 : "endByteIndex (" + endByteIndex + ") should <= 7"; + assert endByteIndex > startByteIndex; + assert numRecords * 2 <= array.size(); + int inIndex = 0; + int outIndex = numRecords; + if (numRecords > 0) { + long[][] counts = getCounts(array, numRecords, startByteIndex, endByteIndex); + for (int i = startByteIndex; i <= endByteIndex; i++) { + if (counts[i] != null) { + sortAtByte( + array, numRecords, counts[i], i, inIndex, outIndex, + desc, signed && i == endByteIndex); + int tmp = inIndex; + inIndex = outIndex; + outIndex = tmp; + } + } + } + return inIndex; + } + + /** + * Performs a partial sort by copying data into destination offsets for each byte value at the + * specified byte offset. + * + * @param array array to partially sort. + * @param numRecords number of data records in the array. + * @param counts counts for each byte value. This routine destructively modifies this array. + * @param byteIdx the byte in a long to sort at, counting from the least significant byte. + * @param inIndex the starting index in the array where input data is located. + * @param outIndex the starting index where sorted output data should be written. + * @param desc whether this is a descending (binary-order) sort. + * @param signed whether this is a signed (two's complement) sort (only applies to last byte). + */ + private static void sortAtByte( + LongArray array, int numRecords, long[] counts, int byteIdx, int inIndex, int outIndex, + boolean desc, boolean signed) { + assert counts.length == 256; + long[] offsets = transformCountsToOffsets( + counts, numRecords, array.getBaseOffset() + outIndex * 8, 8, desc, signed); + Object baseObject = array.getBaseObject(); + long baseOffset = array.getBaseOffset() + inIndex * 8; + long maxOffset = baseOffset + numRecords * 8; + for (long offset = baseOffset; offset < maxOffset; offset += 8) { + long value = Platform.getLong(baseObject, offset); + int bucket = (int)((value >>> (byteIdx * 8)) & 0xff); + Platform.putLong(baseObject, offsets[bucket], value); + offsets[bucket] += 8; + } + } + + /** + * Computes a value histogram for each byte in the given array. + * + * @param array array to count records in. + * @param numRecords number of data records in the array. + * @param startByteIndex the first byte to compute counts for (the prior are skipped). + * @param endByteIndex the last byte to compute counts for. + * + * @return an array of eight 256-byte count arrays, one for each byte starting from the least + * significant byte. If the byte does not need sorting the array will be null. + */ + private static long[][] getCounts( + LongArray array, int numRecords, int startByteIndex, int endByteIndex) { + long[][] counts = new long[8][]; + // Optimization: do a fast pre-pass to determine which byte indices we can skip for sorting. + // If all the byte values at a particular index are the same we don't need to count it. + long bitwiseMax = 0; + long bitwiseMin = -1L; + long maxOffset = array.getBaseOffset() + numRecords * 8; + Object baseObject = array.getBaseObject(); + for (long offset = array.getBaseOffset(); offset < maxOffset; offset += 8) { + long value = Platform.getLong(baseObject, offset); + bitwiseMax |= value; + bitwiseMin &= value; + } + long bitsChanged = bitwiseMin ^ bitwiseMax; + // Compute counts for each byte index. + for (int i = startByteIndex; i <= endByteIndex; i++) { + if (((bitsChanged >>> (i * 8)) & 0xff) != 0) { + counts[i] = new long[256]; + // TODO(ekl) consider computing all the counts in one pass. + for (long offset = array.getBaseOffset(); offset < maxOffset; offset += 8) { + counts[i][(int)((Platform.getLong(baseObject, offset) >>> (i * 8)) & 0xff)]++; + } + } + } + return counts; + } + + /** + * Transforms counts into the proper unsafe output offsets for the sort type. + * + * @param counts counts for each byte value. This routine destructively modifies this array. + * @param numRecords number of data records in the original data array. + * @param outputOffset output offset in bytes from the base array object. + * @param bytesPerRecord size of each record (8 for plain sort, 16 for key-prefix sort). + * @param desc whether this is a descending (binary-order) sort. + * @param signed whether this is a signed (two's complement) sort. + * + * @return the input counts array. + */ + private static long[] transformCountsToOffsets( + long[] counts, int numRecords, long outputOffset, int bytesPerRecord, + boolean desc, boolean signed) { + assert counts.length == 256; + int start = signed ? 128 : 0; // output the negative records first (values 129-255). + if (desc) { + int pos = numRecords; + for (int i = start; i < start + 256; i++) { + pos -= counts[i & 0xff]; + counts[i & 0xff] = outputOffset + pos * bytesPerRecord; + } + } else { + int pos = 0; + for (int i = start; i < start + 256; i++) { + long tmp = counts[i & 0xff]; + counts[i & 0xff] = outputOffset + pos * bytesPerRecord; + pos += tmp; + } + } + return counts; + } + + /** + * Specialization of sort() for key-prefix arrays. In this type of array, each record consists + * of two longs, only the second of which is sorted on. + */ + public static int sortKeyPrefixArray( + LongArray array, + int numRecords, + int startByteIndex, + int endByteIndex, + boolean desc, + boolean signed) { + assert startByteIndex >= 0 : "startByteIndex (" + startByteIndex + ") should >= 0"; + assert endByteIndex <= 7 : "endByteIndex (" + endByteIndex + ") should <= 7"; + assert endByteIndex > startByteIndex; + assert numRecords * 4 <= array.size(); + int inIndex = 0; + int outIndex = numRecords * 2; + if (numRecords > 0) { + long[][] counts = getKeyPrefixArrayCounts(array, numRecords, startByteIndex, endByteIndex); + for (int i = startByteIndex; i <= endByteIndex; i++) { + if (counts[i] != null) { + sortKeyPrefixArrayAtByte( + array, numRecords, counts[i], i, inIndex, outIndex, + desc, signed && i == endByteIndex); + int tmp = inIndex; + inIndex = outIndex; + outIndex = tmp; + } + } + } + return inIndex; + } + + /** + * Specialization of getCounts() for key-prefix arrays. We could probably combine this with + * getCounts with some added parameters but that seems to hurt in benchmarks. + */ + private static long[][] getKeyPrefixArrayCounts( + LongArray array, int numRecords, int startByteIndex, int endByteIndex) { + long[][] counts = new long[8][]; + long bitwiseMax = 0; + long bitwiseMin = -1L; + long limit = array.getBaseOffset() + numRecords * 16; + Object baseObject = array.getBaseObject(); + for (long offset = array.getBaseOffset(); offset < limit; offset += 16) { + long value = Platform.getLong(baseObject, offset + 8); + bitwiseMax |= value; + bitwiseMin &= value; + } + long bitsChanged = bitwiseMin ^ bitwiseMax; + for (int i = startByteIndex; i <= endByteIndex; i++) { + if (((bitsChanged >>> (i * 8)) & 0xff) != 0) { + counts[i] = new long[256]; + for (long offset = array.getBaseOffset(); offset < limit; offset += 16) { + counts[i][(int)((Platform.getLong(baseObject, offset + 8) >>> (i * 8)) & 0xff)]++; + } + } + } + return counts; + } + + /** + * Specialization of sortAtByte() for key-prefix arrays. + */ + private static void sortKeyPrefixArrayAtByte( + LongArray array, int numRecords, long[] counts, int byteIdx, int inIndex, int outIndex, + boolean desc, boolean signed) { + assert counts.length == 256; + long[] offsets = transformCountsToOffsets( + counts, numRecords, array.getBaseOffset() + outIndex * 8, 16, desc, signed); + Object baseObject = array.getBaseObject(); + long baseOffset = array.getBaseOffset() + inIndex * 8; + long maxOffset = baseOffset + numRecords * 16; + for (long offset = baseOffset; offset < maxOffset; offset += 16) { + long key = Platform.getLong(baseObject, offset); + long prefix = Platform.getLong(baseObject, offset + 8); + int bucket = (int)((prefix >>> (byteIdx * 8)) & 0xff); + long dest = offsets[bucket]; + Platform.putLong(baseObject, dest, key); + Platform.putLong(baseObject, dest + 8, prefix); + offsets[bucket] += 16; + } + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/utils/sort/RecordComparator.java b/src/main/java/io/mycat/memory/unsafe/utils/sort/RecordComparator.java new file mode 100644 index 000000000..ef4f314e9 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/utils/sort/RecordComparator.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.utils.sort; + +/** + * Compares records for ordering. In cases where the entire sorting key can fit in the 8-byte + * prefix, this may simply return 0. + */ +public abstract class RecordComparator { + + /** + * Compare two records for order. + * + * @return a negative integer, zero, or a positive integer as the first record is less than, + * equal to, or greater than the second. + */ + public abstract int compare( + Object leftBaseObject, + long leftBaseOffset, + Object rightBaseObject, + long rightBaseOffset); +} diff --git a/src/main/java/io/mycat/memory/unsafe/utils/sort/RecordPointerAndKeyPrefix.java b/src/main/java/io/mycat/memory/unsafe/utils/sort/RecordPointerAndKeyPrefix.java new file mode 100644 index 000000000..c115a38b1 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/utils/sort/RecordPointerAndKeyPrefix.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.utils.sort; + +public final class RecordPointerAndKeyPrefix { + /** + * A pointer to a record; see {@link io.mycat.memory.unsafe.memory} for a + * description of how these addresses are encoded. + */ + public long recordPointer; + + /** + * A key prefix, for use in comparisons. + */ + public long keyPrefix; +} diff --git a/src/main/java/io/mycat/memory/unsafe/utils/sort/RowPrefixComputer.java b/src/main/java/io/mycat/memory/unsafe/utils/sort/RowPrefixComputer.java new file mode 100644 index 000000000..779ba5e6e --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/utils/sort/RowPrefixComputer.java @@ -0,0 +1,90 @@ +package io.mycat.memory.unsafe.utils.sort; + + +import io.mycat.memory.unsafe.row.StructType; +import io.mycat.memory.unsafe.row.UnsafeRow; +import io.mycat.memory.unsafe.utils.BytesTools; +import io.mycat.sqlengine.mpp.ColMeta; +import io.mycat.sqlengine.mpp.OrderCol; + +import javax.annotation.Nonnull; +import java.io.UnsupportedEncodingException; + +/** + * Created by zagnix on 2016/6/20. + */ +public class RowPrefixComputer extends UnsafeExternalRowSorter.PrefixComputer { + @Nonnull + private final StructType schema; + private final ColMeta colMeta; + + public RowPrefixComputer(StructType schema){ + this.schema = schema; + /** + * 通过计算得到排序关键词的第一个在行的索引下标 + */ + OrderCol[] orderCols = schema.getOrderCols(); + + if (orderCols != null && orderCols.length > 0){ + this.colMeta = orderCols[0].colMeta; + }else { + this.colMeta = null; + } + } + + protected long computePrefix(UnsafeRow row) throws UnsupportedEncodingException { + + if(this.colMeta == null){ + return 0; + } + + int orderIndexType = colMeta.colType; + + byte[] rowIndexElem = null; + + if(!row.isNullAt(colMeta.colIndex)) { + rowIndexElem = row.getBinary(colMeta.colIndex); + /** + * 这里注意一下,order by 排序的第一个字段 + */ + switch (orderIndexType) { + case ColMeta.COL_TYPE_INT: + case ColMeta.COL_TYPE_LONG: + case ColMeta.COL_TYPE_INT24: + return BytesTools.getInt(rowIndexElem); + case ColMeta.COL_TYPE_SHORT: + return BytesTools.getShort(rowIndexElem); + case ColMeta.COL_TYPE_LONGLONG: + return BytesTools.getLong(rowIndexElem); + case ColMeta.COL_TYPE_FLOAT: + return PrefixComparators.DoublePrefixComparator. + computePrefix(BytesTools.getFloat(rowIndexElem)); + case ColMeta.COL_TYPE_DOUBLE: + case ColMeta.COL_TYPE_DECIMAL: + case ColMeta.COL_TYPE_NEWDECIMAL: + return PrefixComparators.DoublePrefixComparator. + computePrefix(BytesTools.getDouble(rowIndexElem)); + case ColMeta.COL_TYPE_DATE: + case ColMeta.COL_TYPE_TIMSTAMP: + case ColMeta.COL_TYPE_TIME: + case ColMeta.COL_TYPE_YEAR: + case ColMeta.COL_TYPE_DATETIME: + case ColMeta.COL_TYPE_NEWDATE: + case ColMeta.COL_TYPE_BIT: + case ColMeta.COL_TYPE_VAR_STRING: + case ColMeta.COL_TYPE_STRING: + // ENUM和SET类型都是字符串,按字符串处理 + case ColMeta.COL_TYPE_ENUM: + case ColMeta.COL_TYPE_SET: + return PrefixComparators.BinaryPrefixComparator.computePrefix(rowIndexElem); + //BLOB相关类型和GEOMETRY类型不支持排序,略掉 + } + } else { + rowIndexElem = new byte[1]; + rowIndexElem[0] = UnsafeRow.NULL_MARK; + return PrefixComparators.BinaryPrefixComparator.computePrefix(rowIndexElem); + } + + return 0; + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/utils/sort/SortDataFormat.java b/src/main/java/io/mycat/memory/unsafe/utils/sort/SortDataFormat.java new file mode 100644 index 000000000..bb6d1aff3 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/utils/sort/SortDataFormat.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.utils.sort; + +/** + * Abstraction for sorting an arbitrary input buffer of data. This interface requires determining + * the sort key for a given element index, as well as swapping elements and moving data from one + * buffer to another. + * + * Example format: an array of numbers, where each element is also the key. + * See [[KVArraySortDataFormat]] for a more exciting format. + * + * Note: Declaring and instantiating multiple subclasses of this class would prevent JIT inlining + * overridden methods and hence decrease the shuffle performance. + * + * @tparam K Type of the sort key of each element + * @tparam Buffer Internal data structure used by a particular format (e.g., Array[Int]). + */ +// TODO: Making Buffer a real trait would be a better abstraction, but adds some complexity. + +public abstract class SortDataFormat { + + /** + * Creates a new mutable key for reuse. This should be implemented if you want to override + * [[getKey(Buffer, Int, K)]]. + */ + public abstract K newKey(); + + /** Return the sort key for the element at the given index. */ + protected abstract K getKey(Buffer data, int pos); + + /** + * Returns the sort key for the element at the given index and reuse the input key if possible. + * The default implementation ignores the reuse parameter and invokes [[getKey(Buffer, Int]]. + * If you want to override this method, you must implement [[newKey()]]. + */ + protected K getKey(Buffer data, int pos, K reuse) { + return getKey(data, pos); + } + + /** Swap two elements. */ + protected abstract void swap(Buffer data, int pos0,int pos1); + + /** Copy a single element from src(srcPos) to dst(dstPos). */ + protected abstract void copyElement(Buffer src, int srcPos,Buffer dst ,int dstPos); + + /** + * Copy a range of elements starting at src(srcPos) to dst, starting at dstPos. + * Overlapping ranges are allowed. + */ + protected abstract void copyRange(Buffer src, int srcPos,Buffer dst, int dstPos, int length); + + /** + * Allocates a Buffer that can hold up to 'length' elements. + * All elements of the buffer should be considered invalid until data is explicitly copied in. + */ + protected abstract Buffer allocate(int length); +} diff --git a/src/main/java/io/mycat/memory/unsafe/utils/sort/SortPrefixUtils.java b/src/main/java/io/mycat/memory/unsafe/utils/sort/SortPrefixUtils.java new file mode 100644 index 000000000..51798cf74 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/utils/sort/SortPrefixUtils.java @@ -0,0 +1,33 @@ +package io.mycat.memory.unsafe.utils.sort; + + +import io.mycat.memory.unsafe.row.StructType; + +/** + * Created by zagnix on 2016/6/6. + */ +public final class SortPrefixUtils { + public static boolean canSortFullyWithPrefix(long apply) { + return true; + } + + public static PrefixComparator getPrefixComparator(StructType keySchema) { + return null; + } + + public static UnsafeExternalRowSorter.PrefixComputer createPrefixGenerator(StructType keySchema) { + return null; + } + + /** + * A dummy prefix comparator which always claims that prefixes are equal. This is used in cases + * where we don't know how to generate or compare prefixes for a SortOrder. + */ + private class NoOpPrefixComparator extends PrefixComparator { + + @Override + public int compare(long prefix1, long prefix2) { + return 0; + } + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/utils/sort/Sorter.java b/src/main/java/io/mycat/memory/unsafe/utils/sort/Sorter.java new file mode 100644 index 000000000..21157b67b --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/utils/sort/Sorter.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.utils.sort; + +import java.util.Comparator; + +/** + * A simple wrapper over the Java implementation [[TimSort]]. + * + * The Java implementation is package private, and hence it cannot be called outside package + * org.opencloudb.memory.unsafe.utils.sort. This is a simple wrapper of it that is available to mycat. + */ +public class Sorter { + + private TimSort timSort = null; + + public Sorter(SortDataFormat s){ + timSort = new TimSort(s); + } + + /** + * Sorts the input buffer within range [lo, hi). + */ + public void sort(Buffer a, int lo, int hi, Comparator c) { + timSort.sort(a, lo, hi, c); + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/utils/sort/TestSorter.java b/src/main/java/io/mycat/memory/unsafe/utils/sort/TestSorter.java new file mode 100644 index 000000000..bcf9b20aa --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/utils/sort/TestSorter.java @@ -0,0 +1,170 @@ +package io.mycat.memory.unsafe.utils.sort; + +import io.mycat.memory.MyCatMemory; +import io.mycat.memory.unsafe.memory.mm.DataNodeMemoryManager; +import io.mycat.memory.unsafe.memory.mm.MemoryManager; +import io.mycat.memory.unsafe.row.BufferHolder; +import io.mycat.memory.unsafe.row.StructType; +import io.mycat.memory.unsafe.row.UnsafeRow; +import io.mycat.memory.unsafe.row.UnsafeRowWriter; +import io.mycat.memory.unsafe.utils.MycatPropertyConf; +import io.mycat.sqlengine.mpp.ColMeta; +import io.mycat.sqlengine.mpp.OrderCol; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.*; +import java.util.concurrent.CountDownLatch; + +/** + * Created by zagnix on 16-7-9. + */ +public class TestSorter implements Runnable { + private static final Logger logger = LoggerFactory.getLogger(TestSorter.class); + + private static final int TEST_SIZE = 1000000; + private static int TASK_SIZE = 100; + private static CountDownLatch countDownLatch = new CountDownLatch(100); + public void runSorter( MyCatMemory myCatMemory, + MemoryManager memoryManager, + MycatPropertyConf conf) throws NoSuchFieldException, IllegalAccessException, IOException { + DataNodeMemoryManager dataNodeMemoryManager = new DataNodeMemoryManager(memoryManager, + Thread.currentThread().getId()); + /** + * 1.schema ,模拟一个field字段值 + * + */ + int fieldCount = 3; + ColMeta colMeta = null; + Map colMetaMap = new HashMap(fieldCount); + colMeta = new ColMeta(0, ColMeta.COL_TYPE_STRING); + colMetaMap.put("id", colMeta); + colMeta = new ColMeta(1, ColMeta.COL_TYPE_STRING); + colMetaMap.put("name", colMeta); + colMeta = new ColMeta(2, ColMeta.COL_TYPE_STRING); + colMetaMap.put("age", colMeta); + + + OrderCol[] orderCols = new OrderCol[1]; + OrderCol orderCol = new OrderCol(colMetaMap.get("id"), + OrderCol.COL_ORDER_TYPE_ASC); + orderCols[0] = orderCol; + /** + * 2 .PrefixComputer + */ + StructType schema = new StructType(colMetaMap, fieldCount); + schema.setOrderCols(orderCols); + + UnsafeExternalRowSorter.PrefixComputer prefixComputer = + new RowPrefixComputer(schema); + + /** + * 3 .PrefixComparator 默认是ASC,可以选择DESC + */ + final PrefixComparator prefixComparator = PrefixComparators.LONG; + + UnsafeExternalRowSorter sorter = + new UnsafeExternalRowSorter(dataNodeMemoryManager, + myCatMemory, + schema, + prefixComparator, + prefixComputer, + conf.getSizeAsBytes("mycat.buffer.pageSize","1m"), + true, /**使用基数排序?true or false*/ + true); + UnsafeRow unsafeRow; + BufferHolder bufferHolder; + UnsafeRowWriter unsafeRowWriter; + String line = "testUnsafeRow"; + final Random rand = new Random(42); + for (int i = 0; i < TEST_SIZE; i++) { + unsafeRow = new UnsafeRow(3); + bufferHolder = new BufferHolder(unsafeRow); + unsafeRowWriter = new UnsafeRowWriter(bufferHolder,3); + bufferHolder.reset(); + + String key = getRandomString(rand.nextInt(300)+100); + + unsafeRowWriter.write(0,key.getBytes()); + unsafeRowWriter.write(1, line.getBytes()); + unsafeRowWriter.write(2, ("35" + 1).getBytes()); + + unsafeRow.setTotalSize(bufferHolder.totalSize()); + sorter.insertRow(unsafeRow); + } + Iterator iter = sorter.sort(); + UnsafeRow row = null; + int indexprint = 0; + while (iter.hasNext()) { + row = iter.next(); + indexprint++; + } + + sorter.cleanupResources(); + countDownLatch.countDown(); + + System.out.println("Thread ID :" + Thread.currentThread().getId() + "Index : " + indexprint); + } + + + public static String getRandomString(int length) { //length表示生成字符串的长度 + String base = "abcdefghijklmnopqrstuvwxyz0123456789"; + Random random = new Random(); + StringBuffer sb = new StringBuffer(); + for (int i = 0; i < length; i++) { + int number = random.nextInt(base.length()); + sb.append(base.charAt(number)); + } + return sb.toString(); + } + final MyCatMemory myCatMemory ; + final MemoryManager memoryManager; + final MycatPropertyConf conf; + + + public TestSorter( MyCatMemory myCatMemory, MemoryManager memoryManager,MycatPropertyConf conf) throws NoSuchFieldException, IllegalAccessException { + this.myCatMemory = myCatMemory; + this.memoryManager = memoryManager; + this.conf = conf; + } + + @Override + public void run() { + try { + runSorter(myCatMemory,memoryManager,conf); + } catch (NoSuchFieldException e) { + logger.error(e.getMessage()); + } catch (IllegalAccessException e) { + logger.error(e.getMessage()); + } catch (IOException e) { + logger.error(e.getMessage()); + } + } + + public static void main(String[] args) throws Exception { + + MyCatMemory myCatMemory ; + MemoryManager memoryManager; + MycatPropertyConf conf; + + myCatMemory = new MyCatMemory(); + memoryManager = myCatMemory.getResultMergeMemoryManager(); + conf = myCatMemory.getConf(); + + for (int i = 0; i < TASK_SIZE; i++) { + Thread thread = new Thread(new TestSorter(myCatMemory,memoryManager,conf)); + thread.start(); + } + + while (countDownLatch.getCount() != 0){ + System.err.println("count ========================>" + countDownLatch.getCount()); + Thread.sleep(1000); + } + + System.err.println(TASK_SIZE + " tasks sorter finished ok !!!!!!!!!"); + + System.exit(1); + } + +} diff --git a/src/main/java/io/mycat/memory/unsafe/utils/sort/TimSort.java b/src/main/java/io/mycat/memory/unsafe/utils/sort/TimSort.java new file mode 100644 index 000000000..cfc4a0919 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/utils/sort/TimSort.java @@ -0,0 +1,955 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Based on TimSort.java from the Android Open Source Project + * + * Copyright (C) 2008 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.utils.sort; + +import java.util.Comparator; + +/** + * A port of the Android TimSort class, which utilizes a "stable, adaptive, iterative mergesort." + * See the method comment on sort() for more details. + * + * This has been kept in Java with the original style in order to match very closely with the + * Android source code, and thus be easy to verify correctness. The class is package private. We put + * a simple Scala wrapper {@link io.mycat.memory.unsafe.utils.sort.Sorter}, which is available to + * The purpose of the port is to generalize the interface to the sort to accept input data formats + * besides simple arrays where every element is sorted individually. For instance, the AppendOnlyMap + * uses this to sort an Array with alternating elements of the form [key, value, key, value]. + * This generalization comes with minimal overhead -- see SortDataFormat for more information. + * + * We allow key reuse to prevent creating many key objects -- see SortDataFormat. + * + * @see io.mycat.memory.unsafe.utils.sort.SortDataFormat + * @see io.mycat.memory.unsafe.utils.sort.Sorter + */ +class TimSort { + + /** + * This is the minimum sized sequence that will be merged. Shorter + * sequences will be lengthened by calling binarySort. If the entire + * array is less than this length, no merges will be performed. + * + * This constant should be a power of two. It was 64 in Tim Peter's C + * implementation, but 32 was empirically determined to work better in + * this implementation. In the unlikely event that you set this constant + * to be a number that's not a power of two, you'll need to change the + * minRunLength computation. + * + * If you decrease this constant, you must change the stackLen + * computation in the TimSort constructor, or you risk an + * ArrayOutOfBounds exception. See listsort.txt for a discussion + * of the minimum stack length required as a function of the length + * of the array being sorted and the minimum merge sequence length. + */ + private static final int MIN_MERGE = 32; + + private final SortDataFormat s; + + public TimSort(SortDataFormat sortDataFormat) { + this.s = sortDataFormat; + } + + /** + * A stable, adaptive, iterative mergesort that requires far fewer than + * n lg(n) comparisons when running on partially sorted arrays, while + * offering performance comparable to a traditional mergesort when run + * on random arrays. Like all proper mergesorts, this sort is stable and + * runs O(n log n) time (worst case). In the worst case, this sort requires + * temporary storage space for n/2 object references; in the best case, + * it requires only a small constant amount of space. + * + * This implementation was adapted from Tim Peters's list sort for + * Python, which is described in detail here: + * + * http://svn.python.org/projects/python/trunk/Objects/listsort.txt + * + * Tim's C code may be found here: + * + * http://svn.python.org/projects/python/trunk/Objects/listobject.c + * + * The underlying techniques are described in this paper (and may have + * even earlier origins): + * + * "Optimistic Sorting and Information Theoretic Complexity" + * Peter McIlroy + * SODA (Fourth Annual ACM-SIAM Symposium on Discrete Algorithms), + * pp 467-474, Austin, Texas, 25-27 January 1993. + * + * While the API to this class consists solely of static methods, it is + * (privately) instantiable; a TimSort instance holds the state of an ongoing + * sort, assuming the input array is large enough to warrant the full-blown + * TimSort. Small arrays are sorted in place, using a binary insertion sort. + * + * @author Josh Bloch + */ + public void sort(Buffer a, int lo, int hi, Comparator c) { + assert c != null; + + int nRemaining = hi - lo; + if (nRemaining < 2) + return; // Arrays of size 0 and 1 are always sorted + + // If array is small, do a "mini-TimSort" with no merges + if (nRemaining < MIN_MERGE) { + int initRunLen = countRunAndMakeAscending(a, lo, hi, c); + binarySort(a, lo, hi, lo + initRunLen, c); + return; + } + + /** + * March over the array once, left to right, finding natural runs, + * extending short natural runs to minRun elements, and merging runs + * to maintain stack invariant. + */ + SortState sortState = new SortState(a, c, hi - lo); + int minRun = minRunLength(nRemaining); + do { + // Identify next run + int runLen = countRunAndMakeAscending(a, lo, hi, c); + + // If run is short, extend to min(minRun, nRemaining) + if (runLen < minRun) { + int force = nRemaining <= minRun ? nRemaining : minRun; + binarySort(a, lo, lo + force, lo + runLen, c); + runLen = force; + } + + // Push run onto pending-run stack, and maybe merge + sortState.pushRun(lo, runLen); + sortState.mergeCollapse(); + + // Advance to find next run + lo += runLen; + nRemaining -= runLen; + } while (nRemaining != 0); + + // Merge all remaining runs to complete sort + assert lo == hi; + sortState.mergeForceCollapse(); + assert sortState.stackSize == 1; + } + + /** + * Sorts the specified portion of the specified array using a binary + * insertion sort. This is the best method for sorting small numbers + * of elements. It requires O(n log n) compares, but O(n^2) data + * movement (worst case). + * + * If the initial part of the specified range is already sorted, + * this method can take advantage of it: the method assumes that the + * elements from index {@code lo}, inclusive, to {@code start}, + * exclusive are already sorted. + * + * @param a the array in which a range is to be sorted + * @param lo the index of the first element in the range to be sorted + * @param hi the index after the last element in the range to be sorted + * @param start the index of the first element in the range that is + * not already known to be sorted ({@code lo <= start <= hi}) + * @param c comparator to used for the sort + */ + @SuppressWarnings("fallthrough") + private void binarySort(Buffer a, int lo, int hi, int start, Comparator c) { + assert lo <= start && start <= hi; + if (start == lo) + start++; + + K key0 = s.newKey(); + K key1 = s.newKey(); + + Buffer pivotStore = s.allocate(1); + for ( ; start < hi; start++) { + s.copyElement(a, start, pivotStore, 0); + K pivot = s.getKey(pivotStore, 0, key0); + + // Set left (and right) to the index where a[start] (pivot) belongs + int left = lo; + int right = start; + assert left <= right; + /* + * Invariants: + * pivot >= all in [lo, left). + * pivot < all in [right, start). + */ + while (left < right) { + int mid = (left + right) >>> 1; + if (c.compare(pivot, s.getKey(a, mid, key1)) < 0) + right = mid; + else + left = mid + 1; + } + assert left == right; + + /* + * The invariants still hold: pivot >= all in [lo, left) and + * pivot < all in [left, start), so pivot belongs at left. Note + * that if there are elements equal to pivot, left points to the + * first slot after them -- that's why this sort is stable. + * Slide elements over to make room for pivot. + */ + int n = start - left; // The number of elements to move + // Switch is just an optimization for arraycopy in default case + switch (n) { + case 2: s.copyElement(a, left + 1, a, left + 2); + case 1: s.copyElement(a, left, a, left + 1); + break; + default: s.copyRange(a, left, a, left + 1, n); + } + s.copyElement(pivotStore, 0, a, left); + } + } + + /** + * Returns the length of the run beginning at the specified position in + * the specified array and reverses the run if it is descending (ensuring + * that the run will always be ascending when the method returns). + * + * A run is the longest ascending sequence with: + * + * a[lo] <= a[lo + 1] <= a[lo + 2] <= ... + * + * or the longest descending sequence with: + * + * a[lo] > a[lo + 1] > a[lo + 2] > ... + * + * For its intended use in a stable mergesort, the strictness of the + * definition of "descending" is needed so that the call can safely + * reverse a descending sequence without violating stability. + * + * @param a the array in which a run is to be counted and possibly reversed + * @param lo index of the first element in the run + * @param hi index after the last element that may be contained in the run. + It is required that {@code lo < hi}. + * @param c the comparator to used for the sort + * @return the length of the run beginning at the specified position in + * the specified array + */ + private int countRunAndMakeAscending(Buffer a, int lo, int hi, Comparator c) { + assert lo < hi; + int runHi = lo + 1; + if (runHi == hi) + return 1; + + K key0 = s.newKey(); + K key1 = s.newKey(); + + // Find end of run, and reverse range if descending + if (c.compare(s.getKey(a, runHi++, key0), s.getKey(a, lo, key1)) < 0) { // Descending + while (runHi < hi && c.compare(s.getKey(a, runHi, key0), s.getKey(a, runHi - 1, key1)) < 0) + runHi++; + reverseRange(a, lo, runHi); + } else { // Ascending + while (runHi < hi && c.compare(s.getKey(a, runHi, key0), s.getKey(a, runHi - 1, key1)) >= 0) + runHi++; + } + + return runHi - lo; + } + + /** + * Reverse the specified range of the specified array. + * + * @param a the array in which a range is to be reversed + * @param lo the index of the first element in the range to be reversed + * @param hi the index after the last element in the range to be reversed + */ + private void reverseRange(Buffer a, int lo, int hi) { + hi--; + while (lo < hi) { + s.swap(a, lo, hi); + lo++; + hi--; + } + } + + /** + * Returns the minimum acceptable run length for an array of the specified + * length. Natural runs shorter than this will be extended with + * {@link #binarySort}. + * + * Roughly speaking, the computation is: + * + * If n < MIN_MERGE, return n (it's too small to bother with fancy stuff). + * Else if n is an exact power of 2, return MIN_MERGE/2. + * Else return an int k, MIN_MERGE/2 <= k <= MIN_MERGE, such that n/k + * is close to, but strictly less than, an exact power of 2. + * + * For the rationale, see listsort.txt. + * + * @param n the length of the array to be sorted + * @return the length of the minimum run to be merged + */ + private int minRunLength(int n) { + assert n >= 0; + int r = 0; // Becomes 1 if any 1 bits are shifted off + while (n >= MIN_MERGE) { + r |= (n & 1); + n >>= 1; + } + return n + r; + } + + private class SortState { + + /** + * The Buffer being sorted. + */ + private final Buffer a; + + /** + * Length of the sort Buffer. + */ + private final int aLength; + + /** + * The comparator for this sort. + */ + private final Comparator c; + + /** + * When we get into galloping mode, we stay there until both runs win less + * often than MIN_GALLOP consecutive times. + */ + private static final int MIN_GALLOP = 7; + + /** + * This controls when we get *into* galloping mode. It is initialized + * to MIN_GALLOP. The mergeLo and mergeHi methods nudge it higher for + * random data, and lower for highly structured data. + */ + private int minGallop = MIN_GALLOP; + + /** + * Maximum initial size of tmp array, which is used for merging. The array + * can grow to accommodate demand. + * + * Unlike Tim's original C version, we do not allocate this much storage + * when sorting smaller arrays. This change was required for performance. + */ + private static final int INITIAL_TMP_STORAGE_LENGTH = 256; + + /** + * Temp storage for merges. + */ + private Buffer tmp; // Actual runtime type will be Object[], regardless of T + + /** + * Length of the temp storage. + */ + private int tmpLength = 0; + + /** + * A stack of pending runs yet to be merged. Run i starts at + * address base[i] and extends for len[i] elements. It's always + * true (so long as the indices are in bounds) that: + * + * runBase[i] + runLen[i] == runBase[i + 1] + * + * so we could cut the storage for this, but it's a minor amount, + * and keeping all the info explicit simplifies the code. + */ + private int stackSize = 0; // Number of pending runs on stack + private final int[] runBase; + private final int[] runLen; + + /** + * Creates a TimSort instance to maintain the state of an ongoing sort. + * + * @param a the array to be sorted + * @param c the comparator to determine the order of the sort + */ + private SortState(Buffer a, Comparator c, int len) { + this.aLength = len; + this.a = a; + this.c = c; + + // Allocate temp storage (which may be increased later if necessary) + tmpLength = len < 2 * INITIAL_TMP_STORAGE_LENGTH ? len >>> 1 : INITIAL_TMP_STORAGE_LENGTH; + tmp = s.allocate(tmpLength); + + /* + * Allocate runs-to-be-merged stack (which cannot be expanded). The + * stack length requirements are described in listsort.txt. The C + * version always uses the same stack length (85), but this was + * measured to be too expensive when sorting "mid-sized" arrays (e.g., + * 100 elements) in Java. Therefore, we use smaller (but sufficiently + * large) stack lengths for smaller arrays. The "magic numbers" in the + * computation below must be changed if MIN_MERGE is decreased. See + * the MIN_MERGE declaration above for more information. + */ + int stackLen = (len < 120 ? 5 : + len < 1542 ? 10 : + len < 119151 ? 19 : 40); + runBase = new int[stackLen]; + runLen = new int[stackLen]; + } + + /** + * Pushes the specified run onto the pending-run stack. + * + * @param runBase index of the first element in the run + * @param runLen the number of elements in the run + */ + private void pushRun(int runBase, int runLen) { + this.runBase[stackSize] = runBase; + this.runLen[stackSize] = runLen; + stackSize++; + } + + /** + * Examines the stack of runs waiting to be merged and merges adjacent runs + * until the stack invariants are reestablished: + * + * 1. runLen[i - 3] > runLen[i - 2] + runLen[i - 1] + * 2. runLen[i - 2] > runLen[i - 1] + * + * This method is called each time a new run is pushed onto the stack, + * so the invariants are guaranteed to hold for i < stackSize upon + * entry to the method. + */ + private void mergeCollapse() { + while (stackSize > 1) { + int n = stackSize - 2; + if ( (n >= 1 && runLen[n-1] <= runLen[n] + runLen[n+1]) + || (n >= 2 && runLen[n-2] <= runLen[n] + runLen[n-1])) { + if (runLen[n - 1] < runLen[n + 1]) + n--; + } else if (runLen[n] > runLen[n + 1]) { + break; // Invariant is established + } + mergeAt(n); + } + } + + /** + * Merges all runs on the stack until only one remains. This method is + * called once, to complete the sort. + */ + private void mergeForceCollapse() { + while (stackSize > 1) { + int n = stackSize - 2; + if (n > 0 && runLen[n - 1] < runLen[n + 1]) + n--; + mergeAt(n); + } + } + + /** + * Merges the two runs at stack indices i and i+1. Run i must be + * the penultimate or antepenultimate run on the stack. In other words, + * i must be equal to stackSize-2 or stackSize-3. + * + * @param i stack index of the first of the two runs to merge + */ + private void mergeAt(int i) { + assert stackSize >= 2; + assert i >= 0; + assert i == stackSize - 2 || i == stackSize - 3; + + int base1 = runBase[i]; + int len1 = runLen[i]; + int base2 = runBase[i + 1]; + int len2 = runLen[i + 1]; + assert len1 > 0 && len2 > 0; + assert base1 + len1 == base2; + + /* + * Record the length of the combined runs; if i is the 3rd-last + * run now, also slide over the last run (which isn't involved + * in this merge). The current run (i+1) goes away in any case. + */ + runLen[i] = len1 + len2; + if (i == stackSize - 3) { + runBase[i + 1] = runBase[i + 2]; + runLen[i + 1] = runLen[i + 2]; + } + stackSize--; + + K key0 = s.newKey(); + + /* + * Find where the first element of run2 goes in run1. Prior elements + * in run1 can be ignored (because they're already in place). + */ + int k = gallopRight(s.getKey(a, base2, key0), a, base1, len1, 0, c); + assert k >= 0; + base1 += k; + len1 -= k; + if (len1 == 0) + return; + + /* + * Find where the last element of run1 goes in run2. Subsequent elements + * in run2 can be ignored (because they're already in place). + */ + len2 = gallopLeft(s.getKey(a, base1 + len1 - 1, key0), a, base2, len2, len2 - 1, c); + assert len2 >= 0; + if (len2 == 0) + return; + + // Merge remaining runs, using tmp array with min(len1, len2) elements + if (len1 <= len2) + mergeLo(base1, len1, base2, len2); + else + mergeHi(base1, len1, base2, len2); + } + + /** + * Locates the position at which to insert the specified key into the + * specified sorted range; if the range contains an element equal to key, + * returns the index of the leftmost equal element. + * + * @param key the key whose insertion point to search for + * @param a the array in which to search + * @param base the index of the first element in the range + * @param len the length of the range; must be > 0 + * @param hint the index at which to begin the search, 0 <= hint < n. + * The closer hint is to the result, the faster this method will run. + * @param c the comparator used to order the range, and to search + * @return the int k, 0 <= k <= n such that a[b + k - 1] < key <= a[b + k], + * pretending that a[b - 1] is minus infinity and a[b + n] is infinity. + * In other words, key belongs at index b + k; or in other words, + * the first k elements of a should precede key, and the last n - k + * should follow it. + */ + private int gallopLeft(K key, Buffer a, int base, int len, int hint, Comparator c) { + assert len > 0 && hint >= 0 && hint < len; + int lastOfs = 0; + int ofs = 1; + K key0 = s.newKey(); + + if (c.compare(key, s.getKey(a, base + hint, key0)) > 0) { + // Gallop right until a[base+hint+lastOfs] < key <= a[base+hint+ofs] + int maxOfs = len - hint; + while (ofs < maxOfs && c.compare(key, s.getKey(a, base + hint + ofs, key0)) > 0) { + lastOfs = ofs; + ofs = (ofs << 1) + 1; + if (ofs <= 0) // int overflow + ofs = maxOfs; + } + if (ofs > maxOfs) + ofs = maxOfs; + + // Make offsets relative to base + lastOfs += hint; + ofs += hint; + } else { // key <= a[base + hint] + // Gallop left until a[base+hint-ofs] < key <= a[base+hint-lastOfs] + final int maxOfs = hint + 1; + while (ofs < maxOfs && c.compare(key, s.getKey(a, base + hint - ofs, key0)) <= 0) { + lastOfs = ofs; + ofs = (ofs << 1) + 1; + if (ofs <= 0) // int overflow + ofs = maxOfs; + } + if (ofs > maxOfs) + ofs = maxOfs; + + // Make offsets relative to base + int tmp = lastOfs; + lastOfs = hint - ofs; + ofs = hint - tmp; + } + assert -1 <= lastOfs && lastOfs < ofs && ofs <= len; + + /* + * Now a[base+lastOfs] < key <= a[base+ofs], so key belongs somewhere + * to the right of lastOfs but no farther right than ofs. Do a binary + * search, with invariant a[base + lastOfs - 1] < key <= a[base + ofs]. + */ + lastOfs++; + while (lastOfs < ofs) { + int m = lastOfs + ((ofs - lastOfs) >>> 1); + + if (c.compare(key, s.getKey(a, base + m, key0)) > 0) + lastOfs = m + 1; // a[base + m] < key + else + ofs = m; // key <= a[base + m] + } + assert lastOfs == ofs; // so a[base + ofs - 1] < key <= a[base + ofs] + return ofs; + } + + /** + * Like gallopLeft, except that if the range contains an element equal to + * key, gallopRight returns the index after the rightmost equal element. + * + * @param key the key whose insertion point to search for + * @param a the array in which to search + * @param base the index of the first element in the range + * @param len the length of the range; must be > 0 + * @param hint the index at which to begin the search, 0 <= hint < n. + * The closer hint is to the result, the faster this method will run. + * @param c the comparator used to order the range, and to search + * @return the int k, 0 <= k <= n such that a[b + k - 1] <= key < a[b + k] + */ + private int gallopRight(K key, Buffer a, int base, int len, int hint, Comparator c) { + assert len > 0 && hint >= 0 && hint < len; + + int ofs = 1; + int lastOfs = 0; + K key1 = s.newKey(); + + if (c.compare(key, s.getKey(a, base + hint, key1)) < 0) { + // Gallop left until a[b+hint - ofs] <= key < a[b+hint - lastOfs] + int maxOfs = hint + 1; + while (ofs < maxOfs && c.compare(key, s.getKey(a, base + hint - ofs, key1)) < 0) { + lastOfs = ofs; + ofs = (ofs << 1) + 1; + if (ofs <= 0) // int overflow + ofs = maxOfs; + } + if (ofs > maxOfs) + ofs = maxOfs; + + // Make offsets relative to b + int tmp = lastOfs; + lastOfs = hint - ofs; + ofs = hint - tmp; + } else { // a[b + hint] <= key + // Gallop right until a[b+hint + lastOfs] <= key < a[b+hint + ofs] + int maxOfs = len - hint; + while (ofs < maxOfs && c.compare(key, s.getKey(a, base + hint + ofs, key1)) >= 0) { + lastOfs = ofs; + ofs = (ofs << 1) + 1; + if (ofs <= 0) // int overflow + ofs = maxOfs; + } + if (ofs > maxOfs) + ofs = maxOfs; + + // Make offsets relative to b + lastOfs += hint; + ofs += hint; + } + assert -1 <= lastOfs && lastOfs < ofs && ofs <= len; + + /* + * Now a[b + lastOfs] <= key < a[b + ofs], so key belongs somewhere to + * the right of lastOfs but no farther right than ofs. Do a binary + * search, with invariant a[b + lastOfs - 1] <= key < a[b + ofs]. + */ + lastOfs++; + while (lastOfs < ofs) { + int m = lastOfs + ((ofs - lastOfs) >>> 1); + + if (c.compare(key, s.getKey(a, base + m, key1)) < 0) + ofs = m; // key < a[b + m] + else + lastOfs = m + 1; // a[b + m] <= key + } + assert lastOfs == ofs; // so a[b + ofs - 1] <= key < a[b + ofs] + return ofs; + } + + /** + * Merges two adjacent runs in place, in a stable fashion. The first + * element of the first run must be greater than the first element of the + * second run (a[base1] > a[base2]), and the last element of the first run + * (a[base1 + len1-1]) must be greater than all elements of the second run. + * + * For performance, this method should be called only when len1 <= len2; + * its twin, mergeHi should be called if len1 >= len2. (Either method + * may be called if len1 == len2.) + * + * @param base1 index of first element in first run to be merged + * @param len1 length of first run to be merged (must be > 0) + * @param base2 index of first element in second run to be merged + * (must be aBase + aLen) + * @param len2 length of second run to be merged (must be > 0) + */ + private void mergeLo(int base1, int len1, int base2, int len2) { + assert len1 > 0 && len2 > 0 && base1 + len1 == base2; + + // Copy first run into temp array + Buffer a = this.a; // For performance + Buffer tmp = ensureCapacity(len1); + s.copyRange(a, base1, tmp, 0, len1); + + int cursor1 = 0; // Indexes into tmp array + int cursor2 = base2; // Indexes int a + int dest = base1; // Indexes int a + + // Move first element of second run and deal with degenerate cases + s.copyElement(a, cursor2++, a, dest++); + if (--len2 == 0) { + s.copyRange(tmp, cursor1, a, dest, len1); + return; + } + if (len1 == 1) { + s.copyRange(a, cursor2, a, dest, len2); + s.copyElement(tmp, cursor1, a, dest + len2); // Last elt of run 1 to end of merge + return; + } + + K key0 = s.newKey(); + K key1 = s.newKey(); + + Comparator c = this.c; // Use local variable for performance + int minGallop = this.minGallop; // " " " " " + outer: + while (true) { + int count1 = 0; // Number of times in a row that first run won + int count2 = 0; // Number of times in a row that second run won + + /* + * Do the straightforward thing until (if ever) one run starts + * winning consistently. + */ + do { + assert len1 > 1 && len2 > 0; + if (c.compare(s.getKey(a, cursor2, key0), s.getKey(tmp, cursor1, key1)) < 0) { + s.copyElement(a, cursor2++, a, dest++); + count2++; + count1 = 0; + if (--len2 == 0) + break outer; + } else { + s.copyElement(tmp, cursor1++, a, dest++); + count1++; + count2 = 0; + if (--len1 == 1) + break outer; + } + } while ((count1 | count2) < minGallop); + + /* + * One run is winning so consistently that galloping may be a + * huge win. So try that, and continue galloping until (if ever) + * neither run appears to be winning consistently anymore. + */ + do { + assert len1 > 1 && len2 > 0; + count1 = gallopRight(s.getKey(a, cursor2, key0), tmp, cursor1, len1, 0, c); + if (count1 != 0) { + s.copyRange(tmp, cursor1, a, dest, count1); + dest += count1; + cursor1 += count1; + len1 -= count1; + if (len1 <= 1) // len1 == 1 || len1 == 0 + break outer; + } + s.copyElement(a, cursor2++, a, dest++); + if (--len2 == 0) + break outer; + + count2 = gallopLeft(s.getKey(tmp, cursor1, key0), a, cursor2, len2, 0, c); + if (count2 != 0) { + s.copyRange(a, cursor2, a, dest, count2); + dest += count2; + cursor2 += count2; + len2 -= count2; + if (len2 == 0) + break outer; + } + s.copyElement(tmp, cursor1++, a, dest++); + if (--len1 == 1) + break outer; + minGallop--; + } while (count1 >= MIN_GALLOP | count2 >= MIN_GALLOP); + if (minGallop < 0) + minGallop = 0; + minGallop += 2; // Penalize for leaving gallop mode + } // End of "outer" loop + this.minGallop = minGallop < 1 ? 1 : minGallop; // Write back to field + + if (len1 == 1) { + assert len2 > 0; + s.copyRange(a, cursor2, a, dest, len2); + s.copyElement(tmp, cursor1, a, dest + len2); // Last elt of run 1 to end of merge + } else if (len1 == 0) { + throw new IllegalArgumentException( + "Comparison method violates its general contract!"); + } else { + assert len2 == 0; + assert len1 > 1; + s.copyRange(tmp, cursor1, a, dest, len1); + } + } + + /** + * Like mergeLo, except that this method should be called only if + * len1 >= len2; mergeLo should be called if len1 <= len2. (Either method + * may be called if len1 == len2.) + * + * @param base1 index of first element in first run to be merged + * @param len1 length of first run to be merged (must be > 0) + * @param base2 index of first element in second run to be merged + * (must be aBase + aLen) + * @param len2 length of second run to be merged (must be > 0) + */ + private void mergeHi(int base1, int len1, int base2, int len2) { + assert len1 > 0 && len2 > 0 && base1 + len1 == base2; + + // Copy second run into temp array + Buffer a = this.a; // For performance + Buffer tmp = ensureCapacity(len2); + s.copyRange(a, base2, tmp, 0, len2); + + int cursor1 = base1 + len1 - 1; // Indexes into a + int cursor2 = len2 - 1; // Indexes into tmp array + int dest = base2 + len2 - 1; // Indexes into a + + K key0 = s.newKey(); + K key1 = s.newKey(); + + // Move last element of first run and deal with degenerate cases + s.copyElement(a, cursor1--, a, dest--); + if (--len1 == 0) { + s.copyRange(tmp, 0, a, dest - (len2 - 1), len2); + return; + } + if (len2 == 1) { + dest -= len1; + cursor1 -= len1; + s.copyRange(a, cursor1 + 1, a, dest + 1, len1); + s.copyElement(tmp, cursor2, a, dest); + return; + } + + Comparator c = this.c; // Use local variable for performance + int minGallop = this.minGallop; // " " " " " + outer: + while (true) { + int count1 = 0; // Number of times in a row that first run won + int count2 = 0; // Number of times in a row that second run won + + /* + * Do the straightforward thing until (if ever) one run + * appears to win consistently. + */ + do { + assert len1 > 0 && len2 > 1; + if (c.compare(s.getKey(tmp, cursor2, key0), s.getKey(a, cursor1, key1)) < 0) { + s.copyElement(a, cursor1--, a, dest--); + count1++; + count2 = 0; + if (--len1 == 0) + break outer; + } else { + s.copyElement(tmp, cursor2--, a, dest--); + count2++; + count1 = 0; + if (--len2 == 1) + break outer; + } + } while ((count1 | count2) < minGallop); + + /* + * One run is winning so consistently that galloping may be a + * huge win. So try that, and continue galloping until (if ever) + * neither run appears to be winning consistently anymore. + */ + do { + assert len1 > 0 && len2 > 1; + count1 = len1 - gallopRight(s.getKey(tmp, cursor2, key0), a, base1, len1, len1 - 1, c); + if (count1 != 0) { + dest -= count1; + cursor1 -= count1; + len1 -= count1; + s.copyRange(a, cursor1 + 1, a, dest + 1, count1); + if (len1 == 0) + break outer; + } + s.copyElement(tmp, cursor2--, a, dest--); + if (--len2 == 1) + break outer; + + count2 = len2 - gallopLeft(s.getKey(a, cursor1, key0), tmp, 0, len2, len2 - 1, c); + if (count2 != 0) { + dest -= count2; + cursor2 -= count2; + len2 -= count2; + s.copyRange(tmp, cursor2 + 1, a, dest + 1, count2); + if (len2 <= 1) // len2 == 1 || len2 == 0 + break outer; + } + s.copyElement(a, cursor1--, a, dest--); + if (--len1 == 0) + break outer; + minGallop--; + } while (count1 >= MIN_GALLOP | count2 >= MIN_GALLOP); + if (minGallop < 0) + minGallop = 0; + minGallop += 2; // Penalize for leaving gallop mode + } // End of "outer" loop + this.minGallop = minGallop < 1 ? 1 : minGallop; // Write back to field + + if (len2 == 1) { + assert len1 > 0; + dest -= len1; + cursor1 -= len1; + s.copyRange(a, cursor1 + 1, a, dest + 1, len1); + s.copyElement(tmp, cursor2, a, dest); // Move first elt of run2 to front of merge + } else if (len2 == 0) { + throw new IllegalArgumentException( + "Comparison method violates its general contract!"); + } else { + assert len1 == 0; + assert len2 > 0; + s.copyRange(tmp, 0, a, dest - (len2 - 1), len2); + } + } + + /** + * Ensures that the external array tmp has at least the specified + * number of elements, increasing its size if necessary. The size + * increases exponentially to ensure amortized linear time complexity. + * + * @param minCapacity the minimum required capacity of the tmp array + * @return tmp, whether or not it grew + */ + private Buffer ensureCapacity(int minCapacity) { + if (tmpLength < minCapacity) { + // Compute smallest power of 2 > minCapacity + int newSize = minCapacity; + newSize |= newSize >> 1; + newSize |= newSize >> 2; + newSize |= newSize >> 4; + newSize |= newSize >> 8; + newSize |= newSize >> 16; + newSize++; + + if (newSize < 0) // Not bloody likely! + newSize = minCapacity; + else + newSize = Math.min(newSize, aLength >>> 1); + + tmp = s.allocate(newSize); + tmpLength = newSize; + } + return tmp; + } + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeExternalRowSorter.java b/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeExternalRowSorter.java new file mode 100644 index 000000000..4d88fe81a --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeExternalRowSorter.java @@ -0,0 +1,307 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.utils.sort; + +import io.mycat.memory.MyCatMemory; +import io.mycat.memory.unsafe.Platform; +import io.mycat.memory.unsafe.memory.mm.DataNodeMemoryManager; +import io.mycat.memory.unsafe.row.StructType; +import io.mycat.memory.unsafe.row.UnsafeRow; +import io.mycat.sqlengine.mpp.OrderCol; +import io.mycat.sqlengine.mpp.RowDataPacketSorter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.annotation.Nonnull; +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.util.Iterator; +import java.util.List; + +public final class UnsafeExternalRowSorter { + + private final Logger logger = LoggerFactory.getLogger(UnsafeExternalSorter.class); + + private long numRowsInserted = 0; + private final StructType schema; + private final PrefixComputer prefixComputer; + private final UnsafeExternalSorter sorter; + private final PrefixComparator prefixComparator; + private final RecordComparator recordComparator; + + + public abstract static class PrefixComputer { + protected abstract long computePrefix(UnsafeRow row) throws UnsupportedEncodingException; + } + + public UnsafeExternalRowSorter(DataNodeMemoryManager dataNodeMemoryManager, + @Nonnull MyCatMemory myCatMemory, + StructType schema, + PrefixComparator prefixComparator, + PrefixComputer prefixComputer, + long pageSizeBytes, + boolean canUseRadixSort, + boolean enableSort) throws IOException { + this.schema = schema; + this.prefixComputer = prefixComputer; + this.prefixComparator = prefixComparator; + this.recordComparator = new RowComparator(schema); + sorter = UnsafeExternalSorter.create( + dataNodeMemoryManager, + myCatMemory.getBlockManager(), + myCatMemory.getSerializerManager(), + recordComparator, + prefixComparator, + myCatMemory.getConf().getSizeAsBytes("mycat.pointer.array.len","1K"), + pageSizeBytes, + canUseRadixSort, + enableSort); + } + + + public void insertRow(UnsafeRow row) throws IOException { + final long prefix = prefixComputer.computePrefix(row); + + sorter.insertRecord( + row.getBaseObject(), + row.getBaseOffset(), + row.getSizeInBytes(), + prefix); + + numRowsInserted++; + } + /** + * Return total rows + */ + public long getNumRowsInserted() { + return numRowsInserted; + } + /** + * Return the peak memory used so far, in bytes. + */ + public long getPeakMemoryUsage() { + return sorter.getPeakMemoryUsedBytes(); + } + + /** + * @return the total amount of time spent sorting data (in-memory only). + */ + public long getSortTimeNanos() { + return sorter.getSortTimeNanos(); + } + + public void cleanupResources() { + sorter.cleanupResources(); + } + + public Iterator sort() throws IOException { + try { + final UnsafeSorterIterator sortedIterator = sorter.getSortedIterator(); + if (!sortedIterator.hasNext()) { + cleanupResources(); + } + + return new AbstractScalaRowIterator() { + + private final int numFields = schema.length(); + private UnsafeRow row = new UnsafeRow(numFields); + + @Override + public boolean hasNext() { + return sortedIterator.hasNext(); + } + + @Override + public UnsafeRow next() { + try { + sortedIterator.loadNext(); + row.pointTo(sortedIterator.getBaseObject(), sortedIterator.getBaseOffset(), sortedIterator.getRecordLength()); + if (!hasNext()) { + UnsafeRow copy = row.copy(); // so that we don't have dangling pointers to freed page + row = null; // so that we don't keep references to the base object + cleanupResources(); + return copy; + } else { + return row; + } + } catch (IOException e) { + cleanupResources(); + // Scala iterators don't declare any checked exceptions, so we need to use this hack + // to re-throw the exception: + Platform.throwException(e); + } + throw new RuntimeException("Exception should have been re-thrown in next()"); + } + + @Override + public void remove() { + + } + }; + } catch (IOException e) { + cleanupResources(); + throw e; + } + } + + + public UnsafeSorterIterator getRowUnsafeSorterIterator() throws IOException{ + return sorter.getSortedIterator(); + } + + + public Iterator mergerSort(List list) throws IOException { + + UnsafeRowsMerger unsafeRowsMerger = new UnsafeRowsMerger(recordComparator,prefixComparator,list.size()); + + for (int i = 0; i () { + + private final int numFields = schema.length(); + private UnsafeRow row = new UnsafeRow(numFields); + + @Override + public boolean hasNext() { + return sortedIterator.hasNext(); + } + + @Override + public UnsafeRow next() { + try { + sortedIterator.loadNext(); + row.pointTo( + sortedIterator.getBaseObject(), + sortedIterator.getBaseOffset(), + sortedIterator.getRecordLength()); + if (!hasNext()) { + UnsafeRow copy = row.copy(); // so that we don't have dangling pointers to freed page + row = null; // so that we don't keep references to the base object + cleanupResources(); + return copy; + } else { + return row; + } + } catch (IOException e) { + cleanupResources(); + // Scala iterators don't declare any checked exceptions, so we need to use this hack + // to re-throw the exception: + Platform.throwException(e); + } + throw new RuntimeException("Exception should have been re-thrown in next()"); + } + + @Override + public void remove() { + + } + }; + } catch (IOException e) { + cleanupResources(); + throw e; + } + } + + + public Iterator sort(Iterator inputIterator) throws IOException { + + while (inputIterator.hasNext()) { + insertRow(inputIterator.next()); + } + + return sort(); + } + + + + private static final class RowComparator extends RecordComparator { + private final int numFields; + private final UnsafeRow row1; + private final UnsafeRow row2; + private final StructType schema; + + RowComparator(StructType schema) { + + assert schema.length()>=0; + + this.schema = schema; + this.numFields = schema.length(); + this.row1 = new UnsafeRow(numFields); + this.row2 = new UnsafeRow(numFields); + } + + @Override + public int compare(Object baseObj1, long baseOff1, Object baseObj2, long baseOff2) { + OrderCol[] orderCols = schema.getOrderCols(); + + if(orderCols == null){ + return 0; + } + + /**取出一行数据*/ + row1.pointTo(baseObj1, baseOff1, -1); + row2.pointTo(baseObj2, baseOff2, -1); + int cmp = 0; + int len = orderCols.length; + + int type = OrderCol.COL_ORDER_TYPE_ASC; /**升序*/ + + for (int i = 0; i < len; i++) { + int colIndex = orderCols[i].colMeta.colIndex; + /**取出一行数据中的列值,进行大小比对*/ + byte[] left = null; + byte[] right = null; + + + + if(!row1.isNullAt(colIndex)) { + left = row1.getBinary(colIndex); + }else { + left = new byte[1]; + left[0] = UnsafeRow.NULL_MARK; + } + + + if(!row2.isNullAt(colIndex)) { + right = row2.getBinary(colIndex); + }else { + right = new byte[1]; + right[0] = UnsafeRow.NULL_MARK; + } + + if (orderCols[i].orderType == type) { + cmp = RowDataPacketSorter.compareObject(left, right, orderCols[i]); + } else { + cmp = RowDataPacketSorter.compareObject(right, left, orderCols[i]); + } + if (cmp != 0) + return cmp; + } + return cmp; + } + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeExternalSorter.java b/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeExternalSorter.java new file mode 100644 index 000000000..84ac3b65a --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeExternalSorter.java @@ -0,0 +1,737 @@ + + +package io.mycat.memory.unsafe.utils.sort; + +import com.google.common.annotations.VisibleForTesting; + +import io.mycat.MycatServer; +import io.mycat.memory.unsafe.Platform; +import io.mycat.memory.unsafe.array.LongArray; +import io.mycat.memory.unsafe.memory.MemoryBlock; +import io.mycat.memory.unsafe.memory.mm.DataNodeMemoryManager; +import io.mycat.memory.unsafe.memory.mm.MemoryConsumer; +import io.mycat.memory.unsafe.storage.DataNodeDiskManager; +import io.mycat.memory.unsafe.storage.SerializerManager; +import io.mycat.memory.unsafe.utils.JavaUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.annotation.Nullable; +import java.io.File; +import java.io.IOException; +import java.util.LinkedList; +import java.util.Queue; + +/** + * External sorter based on {@link UnsafeInMemorySorter}. + */ +public final class UnsafeExternalSorter extends MemoryConsumer { + + private final Logger logger = LoggerFactory.getLogger(UnsafeExternalSorter.class); + + @Nullable + private final PrefixComparator prefixComparator; + @Nullable + private final RecordComparator recordComparator; + + + private final DataNodeMemoryManager dataNodeMemoryManager; + private final DataNodeDiskManager blockManager; + private final SerializerManager serializerManager; + + + /** The buffer size to use when writing spills using DiskRowWriter */ + private final int fileBufferSizeBytes; + + /** + * Memory pages that hold the records being sorted. The pages in this list are freed when + * spilling, although in principle we could recycle these pages across spills (on the other hand, + * this might not be necessary if we maintained a pool of re-usable pages in the DataNodeMemoryManager + * itself). + */ + private final LinkedList allocatedPages = new LinkedList(); + + private final LinkedList spillWriters = new LinkedList(); + + // These variables are reset after spilling: + @Nullable + private volatile UnsafeInMemorySorter inMemSorter; + + private MemoryBlock currentPage = null; + private long pageCursor = -1; + private long peakMemoryUsedBytes = 0; + private long totalSpillBytes = 0L; + private long totalSortTimeNanos = 0L; + private volatile SpillableIterator readingIterator = null; + + public static UnsafeExternalSorter createWithExistingInMemorySorter( + DataNodeMemoryManager dataNodeMemoryManager, + DataNodeDiskManager blockManager, + SerializerManager serializerManager, + RecordComparator recordComparator, + PrefixComparator prefixComparator, + int initialSize, + long pageSizeBytes, + UnsafeInMemorySorter inMemorySorter,boolean enableSort) throws IOException { + + UnsafeExternalSorter sorter = new UnsafeExternalSorter(dataNodeMemoryManager, blockManager, + serializerManager,recordComparator, prefixComparator, initialSize, + pageSizeBytes, inMemorySorter, false /* ignored */,enableSort); + + sorter.spill(Long.MAX_VALUE, sorter); + // The external sorter will be used to insert records, in-memory sorter is not needed. + sorter.inMemSorter = null; + return sorter; + } + + public static UnsafeExternalSorter create( + DataNodeMemoryManager dataNodeMemoryManager, + DataNodeDiskManager blockManager, + SerializerManager serializerManager, + RecordComparator recordComparator, + PrefixComparator prefixComparator, + long initialSize, + long pageSizeBytes, + boolean canUseRadixSort, + boolean enableSort) { + + return new UnsafeExternalSorter(dataNodeMemoryManager, blockManager, serializerManager, recordComparator, prefixComparator, initialSize, pageSizeBytes, null, + canUseRadixSort,enableSort); + + } + + private UnsafeExternalSorter( + DataNodeMemoryManager dataNodeMemoryManager, + DataNodeDiskManager blockManager, + SerializerManager serializerManager, + RecordComparator recordComparator, + PrefixComparator prefixComparator, + long initialSize, + long pageSizeBytes, + @Nullable UnsafeInMemorySorter existingInMemorySorter, + boolean canUseRadixSort,boolean enableSort) { + + super(dataNodeMemoryManager, pageSizeBytes); + + this.dataNodeMemoryManager = dataNodeMemoryManager; + this.blockManager = blockManager; + this.serializerManager = serializerManager; + this.recordComparator = recordComparator; + this.prefixComparator = prefixComparator; + + + if(MycatServer.getInstance().getMyCatMemory() != null){ + this.fileBufferSizeBytes = (int) MycatServer.getInstance(). + getMyCatMemory().getConf().getSizeAsBytes("mycat.merge.file.buffer", "32k"); + }else{ + this.fileBufferSizeBytes = 32*1024; + } + + if (existingInMemorySorter == null) { + this.inMemSorter = new UnsafeInMemorySorter( + this, dataNodeMemoryManager, recordComparator, prefixComparator, initialSize, canUseRadixSort,enableSort); + } else { + this.inMemSorter = existingInMemorySorter; + } + + this.peakMemoryUsedBytes = getMemoryUsage(); + } + + /** + * Marks the current page as no-more-space-available, and as a result, either allocate a + * new page or spill when we see the next record. + */ + @VisibleForTesting + public void closeCurrentPage() { + if (currentPage != null) { + pageCursor = currentPage.getBaseOffset() + currentPage.size(); + } + } + + /** + * Sort and spill the current records in response to memory pressure. + */ + @Override + public long spill(long size, MemoryConsumer trigger) throws IOException { + if (trigger != this) { + if (readingIterator != null) { + return readingIterator.spill(); + } + return 0L; // this should throw exception + } + + if (inMemSorter == null || inMemSorter.numRecords() <= 0) { + return 0L; + } + + logger.info("Thread" + Thread.currentThread().getId() +" spilling sort data of "+ JavaUtils.bytesToString(getMemoryUsage()) + +" to disk ("+ spillWriters.size()+" times so far)"); + + // We only write out contents of the inMemSorter if it is not empty. + if (inMemSorter.numRecords() > 0) { + + /** + * 创建一个写外存的SpillWriter,当前内存数据已经排序了,直接写到磁盘中. + */ + final UnsafeSorterSpillWriter spillWriter = new UnsafeSorterSpillWriter(blockManager, fileBufferSizeBytes,/**writeMetrics,*/ inMemSorter.numRecords()); + + /** + * 添加到SpillWriter列表中,标志了有多少 spillWriters.size()写到磁盘中了。 + */ + spillWriters.add(spillWriter); + + /** + * 获取本次内存中排序的迭代器,这个函数执行In Memory Sort use time sorter 或者 radix sorter + */ + final UnsafeSorterIterator sortedRecords = inMemSorter.getSortedIterator(); + + /** + * 一条一条记录写入磁盘 + */ + while (sortedRecords.hasNext()) { + /** + * + */ + sortedRecords.loadNext(); + /** + * 获取当前记录的起始对象实例,on-heap为obj,off-heap为null + */ + final Object baseObject = sortedRecords.getBaseObject(); + + /** + * 获取当前记录的相对起始对象实例地址偏移量 + */ + final long baseOffset = sortedRecords.getBaseOffset(); + + /** + * 当前记录的长度 + */ + final int recordLength = sortedRecords.getRecordLength(); + /** + * 把数据写入磁盘写入器中 Write a record to a spill file. + */ + spillWriter.write(baseObject, baseOffset, recordLength, sortedRecords.getKeyPrefix()); + } + + /** + * 关闭spillWriter + */ + spillWriter.close(); + } + + /** + * 释放当前sorter所占的内存数据 + */ + final long spillSize = freeMemory(); + // Note that this is more-or-less going to be a multiple of the page size, so wasted space in + // pages will currently be counted as memory spilled even though that space isn't actually + // written to disk. This also counts the space needed to store the sorter's pointer array. + inMemSorter.reset(); + // Reset the in-memory sorter's pointer array only after freeing up the memory pages holding the + // records. Otherwise, if the task is over allocated memory, then without freeing the memory + // pages, we might not be able to get memory for the pointer array. + + totalSpillBytes += spillSize; + return spillSize; + } + + /** + * Return the total memory usage of this sorter, including the data pages and the sorter's pointer + * array. + */ + private long getMemoryUsage() { + long totalPageSize = 0; + for (MemoryBlock page : allocatedPages) { + totalPageSize += page.size(); + } + return ((inMemSorter == null) ? 0 : inMemSorter.getMemoryUsage()) + totalPageSize; + } + + private void updatePeakMemoryUsed() { + long mem = getMemoryUsage(); + if (mem > peakMemoryUsedBytes) { + peakMemoryUsedBytes = mem; + } + } + + /** + * Return the peak memory used so far, in bytes. + */ + public long getPeakMemoryUsedBytes() { + updatePeakMemoryUsed(); + return peakMemoryUsedBytes; + } + + /** + * @return the total amount of time spent sorting data (in-memory only). + */ + public long getSortTimeNanos() { + UnsafeInMemorySorter sorter = inMemSorter; + if (sorter != null) { + return sorter.getSortTimeNanos(); + } + return totalSortTimeNanos; + } + + /** + * Return the total number of bytes that has been spilled into disk so far. + */ + public long getSpillSize() { + return totalSpillBytes; + } + + @VisibleForTesting + public int getNumberOfAllocatedPages() { + return allocatedPages.size(); + } + + /** + * Free this sorter's data pages. + * + * @return the number of bytes freed. + */ + private long freeMemory() { + updatePeakMemoryUsed(); + long memoryFreed = 0; + for (MemoryBlock block : allocatedPages) { + memoryFreed += block.size(); + freePage(block); + } + allocatedPages.clear(); + currentPage = null; + pageCursor = 0; + return memoryFreed; + } + + /** + * Deletes any spill files created by this sorter. + */ + private void deleteSpillFiles() { + for (UnsafeSorterSpillWriter spill : spillWriters) { + File file = spill.getFile(); + if(file == null) + continue; + try { + JavaUtils.deleteRecursively(file.getParentFile().getParentFile()); + } catch (IOException e) { + logger.error(e.getMessage()); + } + + if (file.exists()) { + if (!file.delete()) { + logger.error("Was unable to delete spill file {}", file.getAbsolutePath()); + } + } + } + } + + /** + * Frees this sorter's in-memory data structures and cleans up its spill files. + */ + public void cleanupResources() { + synchronized (this) { + deleteSpillFiles(); + freeMemory(); + if (inMemSorter != null) { + inMemSorter.free(); + inMemSorter = null; + } + } + } + + /** + * Checks whether there is enough space to insert an additional record in to the sort pointer + * array and grows the array if additional space is required. If the required space cannot be + * obtained, then the in-memory data will be spilled to disk. + */ + private void growPointerArrayIfNecessary() throws IOException { + assert(inMemSorter != null); + if (!inMemSorter.hasSpaceForAnotherRecord()) { + long used = inMemSorter.getMemoryUsage(); + LongArray array; + try { + // could trigger spilling + array = allocateLongArray(used / 8 * 2); + } catch (OutOfMemoryError e) { + // should have trigger spilling + if (!inMemSorter.hasSpaceForAnotherRecord()) { + logger.error("Unable to grow the pointer array"); + throw e; + } + return; + } + // check if spilling is triggered or not + if (inMemSorter.hasSpaceForAnotherRecord()) { + freeLongArray(array); + } else { + inMemSorter.expandPointerArray(array); + } + } + } + + /** + * Allocates more memory in order to insert an additional record. This will request additional + * memory from the memory manager and spill if the requested memory can not be obtained. + * + * @param required the required space in the data page, in bytes, including space for storing + * the record size. This must be less than or equal to the page size (records + * that exceed the page size are handled via a different code path which uses + * special overflow pages). + */ + private void acquireNewPageIfNecessary(int required) { + if (currentPage == null || + pageCursor + required > currentPage.getBaseOffset() + currentPage.size()) { + // TODO: try to find space on previous pages + currentPage = allocatePage(required); + pageCursor = currentPage.getBaseOffset(); + allocatedPages.add(currentPage); + } + } + + /** + * Write a record to the sorter. + */ + public void insertRecord(Object recordBase, long recordOffset, int length, long prefix) + throws IOException { + + growPointerArrayIfNecessary(); + // Need 4 bytes to store the record length. + final int required = length + 4; + acquireNewPageIfNecessary(required); + + final Object base = currentPage.getBaseObject(); + + final long recordAddress = dataNodeMemoryManager.encodePageNumberAndOffset(currentPage,pageCursor); + Platform.putInt(base, pageCursor, length); + pageCursor += 4; + Platform.copyMemory(recordBase,recordOffset,base,pageCursor,length); + pageCursor += length; + assert(inMemSorter != null); + inMemSorter.insertRecord(recordAddress,prefix); + } + + /** + * Write a key-value record to the sorter. The key and value will be put together in-memory, + * using the following format: + * + * record length (4 bytes), key length (4 bytes), key data, value data + * + * record length = key length + value length + 4 + */ + public void insertKVRecord(Object keyBase, long keyOffset, int keyLen, + Object valueBase, long valueOffset, int valueLen, long prefix) + throws IOException { + + growPointerArrayIfNecessary(); + final int required = keyLen + valueLen + 4 + 4; + acquireNewPageIfNecessary(required); + + /** + * 数据k-v插入currentPage(MemoryBlock)页内,当前插入位置pageCursor + */ + final Object base = currentPage.getBaseObject(); + /** + * 通过currentPage和pageCursor页内偏移量,codec一个地址处理,该条记录存数据的 + * 存数据的起始位置 + */ + final long recordAddress = dataNodeMemoryManager.encodePageNumberAndOffset(currentPage,pageCursor); + + /** + * 一条记录的总长度=keyLen + valueLen + record length (一般是int类型4个字节) + */ + Platform.putInt(base,pageCursor, keyLen + valueLen + 4/**record length所占的长度*/); + + /** + * 移动4个bytes + */ + pageCursor += 4; + /** + * 存key len的size + */ + Platform.putInt(base,pageCursor, keyLen); + + /** + * 移动4个bytes + */ + pageCursor += 4; + + /** + * 存key的值 + */ + Platform.copyMemory(keyBase, keyOffset, base, pageCursor, keyLen); + /** + * 移动keyLen个bytes + */ + pageCursor += keyLen; + + /** + * 存value的值 + */ + Platform.copyMemory(valueBase, valueOffset, base, pageCursor, valueLen); + + /** + * 移动valueLen个bytes + */ + pageCursor += valueLen; + + assert(inMemSorter != null); + /** + * 把对应的指针插入到longArray数组中, + * longArray存指向Page内一个指针的所存储的值 + */ + inMemSorter.insertRecord(recordAddress, prefix); + } + + /** + * Merges another UnsafeExternalSorters into this one, the other one will be emptied. + * + * @throws IOException + */ + public void merge(UnsafeExternalSorter other) throws IOException { + other.spill(); + spillWriters.addAll(other.spillWriters); + // remove them from `spillWriters`, or the files will be deleted in `cleanupResources`. + other.spillWriters.clear(); + other.cleanupResources(); + } + + /** + * SpillableIterator是一个支持内存+外存排序的迭代器 + * Returns a sorted iterator. It is the caller's responsibility to call `cleanupResources()` + * after consuming this iterator. + */ + + public UnsafeSorterIterator getSortedIterator() throws IOException { + assert(recordComparator != null); + if (spillWriters.isEmpty()) { + assert(inMemSorter != null); + readingIterator = new SpillableIterator(inMemSorter.getSortedIterator()); + return readingIterator; + } else { + /** + * 合并多个UnsafeSorterSpillWriter对应的文件,进行排序???? + */ + final UnsafeSorterSpillMerger spillMerger = + new UnsafeSorterSpillMerger(recordComparator, prefixComparator, spillWriters.size()); + + for (UnsafeSorterSpillWriter spillWriter : spillWriters) { + /** + * 通过UnsafeSorterSpillReader迭代器放入要合并的UnsafeSorterSpillMerger中 + */ + spillMerger.addSpillIfNotEmpty(spillWriter.getReader(serializerManager)); + } + if (inMemSorter != null) { + readingIterator = new SpillableIterator(inMemSorter.getSortedIterator()); + spillMerger.addSpillIfNotEmpty(readingIterator); + } + /** + * 最终调用排序器排序,重点分析函数 + */ + return spillMerger.getSortedIterator(); + } + } + + /** + * An UnsafeSorterIterator that support spilling. + */ + public class SpillableIterator extends UnsafeSorterIterator { + private UnsafeSorterIterator upstream; + private UnsafeSorterIterator nextUpstream = null; + private MemoryBlock lastPage = null; + private boolean loaded = false; + private int numRecords = 0; + + SpillableIterator(UnsafeInMemorySorter.SortedIterator inMemIterator) { + this.upstream = inMemIterator; + this.numRecords = inMemIterator.getNumRecords(); + } + + public int getNumRecords() { + return numRecords; + } + + public long spill() throws IOException { + synchronized (this) { + if (!(upstream instanceof UnsafeInMemorySorter.SortedIterator && nextUpstream == null + && numRecords > 0)) { + return 0L; + } + + UnsafeInMemorySorter.SortedIterator inMemIterator = + ((UnsafeInMemorySorter.SortedIterator) upstream).clone(); + + // Iterate over the records that have not been returned and spill them. + final UnsafeSorterSpillWriter spillWriter = + new UnsafeSorterSpillWriter(blockManager, fileBufferSizeBytes,/**writeMetrics,*/ numRecords); + while (inMemIterator.hasNext()) { + inMemIterator.loadNext(); + final Object baseObject = inMemIterator.getBaseObject(); + final long baseOffset = inMemIterator.getBaseOffset(); + final int recordLength = inMemIterator.getRecordLength(); + spillWriter.write(baseObject, baseOffset, recordLength, inMemIterator.getKeyPrefix()); + } + spillWriter.close(); + spillWriters.add(spillWriter); + nextUpstream = spillWriter.getReader(serializerManager); + + long released = 0L; + synchronized (UnsafeExternalSorter.this) { + // release the pages except the one that is used. There can still be a caller that + // is accessing the current record. We free this page in that caller's next loadNext() + // call. + for (MemoryBlock page : allocatedPages) { + if (!loaded || page.getBaseObject() != upstream.getBaseObject()) { + released += page.size(); + freePage(page); + } else { + lastPage = page; + } + } + allocatedPages.clear(); + } + + // in-memory sorter will not be used after spilling + assert(inMemSorter != null); + released += inMemSorter.getMemoryUsage(); + totalSortTimeNanos += inMemSorter.getSortTimeNanos(); + inMemSorter.free(); + inMemSorter = null; + totalSpillBytes += released; + return released; + } + } + + @Override + public boolean hasNext() { + return numRecords > 0; + } + + @Override + public void loadNext() throws IOException { + synchronized (this) { + loaded = true; + if (nextUpstream != null) { + // Just consumed the last record from in memory iterator + if (lastPage != null) { + freePage(lastPage); + lastPage = null; + } + upstream = nextUpstream; + nextUpstream = null; + } + numRecords--; + upstream.loadNext(); + } + } + + @Override + public Object getBaseObject() { + return upstream.getBaseObject(); + } + + @Override + public long getBaseOffset() { + return upstream.getBaseOffset(); + } + + @Override + public int getRecordLength() { + return upstream.getRecordLength(); + } + + @Override + public long getKeyPrefix() { + return upstream.getKeyPrefix(); + } + } + + /** + * Returns a iterator, which will return the rows in the order as inserted. + * + * It is the caller's responsibility to call `cleanupResources()` + * after consuming this iterator. + * + * TODO: support forced spilling + */ + public UnsafeSorterIterator getIterator() throws IOException { + /** + * 如果spillWriters为空说明,直接读取内存中即可 + */ + if (spillWriters.isEmpty()) { + assert(inMemSorter != null); + return inMemSorter.getSortedIterator(); + } else { + /** + * 否则将spillWriters对应的file中的数据,通过getReader对应UnsafeSorterSpillReader的 + * 读取器反序列化到UnsafeSorterIterator中,然后到添加到queue队列中 + * UnsafeSorterSpillReader也是UnsafeSorterIterator的子类 + */ + LinkedList queue = new LinkedList(); + for (UnsafeSorterSpillWriter spillWriter : spillWriters) { + queue.add(spillWriter.getReader(serializerManager)); + } + if (inMemSorter != null) { + queue.add(inMemSorter.getSortedIterator()); + } + /** + * ChainedIterator是一个UnsafeSorterIterator的子类 + * 实现将将多个UnsafeSorterIterator合成一个UnsafeSorterIterator + * 提供给应用使用 + */ + return new ChainedIterator(queue); + } + } + + /** + * Chain multiple UnsafeSorterIterator together as single one. + */ + static class ChainedIterator extends UnsafeSorterIterator { + + private final Queue iterators; + private UnsafeSorterIterator current; + private int numRecords; + + ChainedIterator(Queue iterators) { + assert iterators.size() > 0; + this.numRecords = 0; + for (UnsafeSorterIterator iter: iterators) { + this.numRecords += iter.getNumRecords(); + } + this.iterators = iterators; + this.current = iterators.remove(); + } + + @Override + public int getNumRecords() { + return numRecords; + } + + @Override + public boolean hasNext() { + while (!current.hasNext() && !iterators.isEmpty()) { + current = iterators.remove(); /**从队列中移除一个已经遍历完的UnsafeSorterIterator*/ + } + return current.hasNext(); + } + + @Override + public void loadNext() throws IOException { + while (!current.hasNext() && !iterators.isEmpty()) { + current = iterators.remove(); /**从队列中移除一个已经遍历完的UnsafeSorterIterator*/ + } + current.loadNext(); + } + + @Override + public Object getBaseObject() { return current.getBaseObject(); } + + @Override + public long getBaseOffset() { return current.getBaseOffset(); } + + @Override + public int getRecordLength() { return current.getRecordLength(); } + + @Override + public long getKeyPrefix() { return current.getKeyPrefix(); } + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeInMemorySorter.java b/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeInMemorySorter.java new file mode 100644 index 000000000..c3594e000 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeInMemorySorter.java @@ -0,0 +1,318 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.utils.sort; + + + + + +import io.mycat.memory.unsafe.Platform; +import io.mycat.memory.unsafe.array.LongArray; +import io.mycat.memory.unsafe.memory.mm.DataNodeMemoryManager; +import io.mycat.memory.unsafe.memory.mm.MemoryConsumer; + +import javax.annotation.Nullable; +import java.util.Comparator; + +/** + * Sorts records using an AlphaSort-style key-prefix sort. This sort stores pointers to records + * alongside a user-defined prefix of the record's sorting key. When the underlying sort algorithm + * compares records, it will first compare the stored key prefixes; if the prefixes are not equal, + * then we do not need to traverse the record pointers to compare the actual records. Avoiding these + * random memory accesses improves cache hit rates. + */ +public final class UnsafeInMemorySorter { + + private static final class SortComparator implements Comparator { + + private final RecordComparator recordComparator; + private final PrefixComparator prefixComparator; + private final DataNodeMemoryManager memoryManager; + + SortComparator( + RecordComparator recordComparator, + PrefixComparator prefixComparator, + DataNodeMemoryManager memoryManager) { + this.recordComparator = recordComparator; + this.prefixComparator = prefixComparator; + this.memoryManager = memoryManager; + } + + @Override + public int compare(RecordPointerAndKeyPrefix r1, RecordPointerAndKeyPrefix r2) { + + final int prefixComparisonResult = prefixComparator.compare(r1.keyPrefix, r2.keyPrefix); + + if (prefixComparisonResult == 0) { + final Object baseObject1 = memoryManager.getPage(r1.recordPointer); + final long baseOffset1 = memoryManager.getOffsetInPage(r1.recordPointer) + 4; // skip length + final Object baseObject2 = memoryManager.getPage(r2.recordPointer); + final long baseOffset2 = memoryManager.getOffsetInPage(r2.recordPointer) + 4; // skip length + return recordComparator.compare(baseObject1, baseOffset1, baseObject2, baseOffset2); + } else { + return prefixComparisonResult; + } + } + } + + private final MemoryConsumer consumer; + private final DataNodeMemoryManager memoryManager; + @Nullable + private final Sorter sorter; + @Nullable + private final Comparator sortComparator; + + /** + * If non-null, specifies the radix sort parameters and that radix sort will be used. + */ + @Nullable + private final PrefixComparators.RadixSortSupport radixSortSupport; + + /** + * Set to 2x for radix sort to reserve extra memory for sorting, otherwise 1x. + */ + private final int memoryAllocationFactor; + + /** + * Within this buffer, position {@code 2 * i} holds a pointer pointer to the record at + * index {@code i}, while position {@code 2 * i + 1} in the array holds an 8-byte key prefix. + */ + private LongArray array; + + /** + * The position in the sort buffer where new records can be inserted. + */ + private int pos = 0; + + private long initialSize; + + private long totalSortTimeNanos = 0L; + private boolean enableSort = true; + + public UnsafeInMemorySorter( + final MemoryConsumer consumer, + final DataNodeMemoryManager memoryManager, + final RecordComparator recordComparator, + final PrefixComparator prefixComparator, + long initialSize, + boolean canUseRadixSort,boolean enableSort) { + this(consumer, memoryManager, recordComparator, prefixComparator, + consumer.allocateLongArray(initialSize * 2), canUseRadixSort,enableSort); + } + + public UnsafeInMemorySorter( + final MemoryConsumer consumer, + final DataNodeMemoryManager memoryManager, + final RecordComparator recordComparator, + final PrefixComparator prefixComparator, + LongArray array, + boolean canUseRadixSort, + boolean enableSort) { + + this.consumer = consumer; + + this.memoryManager = memoryManager; + + this.initialSize = array.size(); + + if (recordComparator != null) { + this.sorter = new Sorter(UnsafeSortDataFormat.INSTANCE); + + this.sortComparator = new SortComparator(recordComparator, prefixComparator, memoryManager); + + if (canUseRadixSort && prefixComparator instanceof PrefixComparators.RadixSortSupport) { + this.radixSortSupport = (PrefixComparators.RadixSortSupport)prefixComparator; + } else { + this.radixSortSupport = null; + } + } else { + this.sorter = null; + this.sortComparator = null; + this.radixSortSupport = null; + } + this.enableSort = enableSort; + this.memoryAllocationFactor = this.radixSortSupport != null ? 2 : 1; + this.array = array; + } + + /** + * Free the memory used by pointer array. + */ + public void free() { + if (consumer != null) { + consumer.freeLongArray(array); + array = null; + } + } + + public void reset() { + if (consumer != null) { + consumer.freeLongArray(array); + this.array = consumer.allocateLongArray(initialSize); + } + pos = 0; + } + + /** + * @return the number of records that have been inserted into this sorter. + */ + public int numRecords() { + return pos / 2; + } + + /** + * @return the total amount of time spent sorting data (in-memory only). + */ + public long getSortTimeNanos() { + return totalSortTimeNanos; + } + + public long getMemoryUsage() { + return array.size() * 8; + } + + public boolean hasSpaceForAnotherRecord() { + return pos + 1 < (array.size() / memoryAllocationFactor); + } + + public void expandPointerArray(LongArray newArray) { + if (newArray.size() < array.size()) { + throw new OutOfMemoryError("Not enough memory to grow pointer array"); + } + Platform.copyMemory( + array.getBaseObject(), + array.getBaseOffset(), + newArray.getBaseObject(), + newArray.getBaseOffset(), + array.size() * (8 / memoryAllocationFactor)); + consumer.freeLongArray(array); + array = newArray; + } + + /** + * Inserts a record to be sorted. Assumes that the record pointer points to a record length + * stored as a 4-byte integer, followed by the record's bytes. + * + * @param recordPointer pointer to a record in a data page, encoded by {@link DataNodeMemoryManager}. + * @param keyPrefix a user-defined key prefix + */ + public void insertRecord(long recordPointer, long keyPrefix) { + if (!hasSpaceForAnotherRecord()) { + throw new IllegalStateException("There is no space for new record"); + } + /** + * 先插入recordPointer,然后插入keyPrefix值 + * */ + array.set(pos, recordPointer); + pos++; + array.set(pos, keyPrefix); + pos++; + } + + public final class SortedIterator extends UnsafeSorterIterator implements Cloneable { + + private final int numRecords; + private int position; + private int offset; + private Object baseObject; + private long baseOffset; + private long keyPrefix; + private int recordLength; + + private SortedIterator(int numRecords, int offset) { + this.numRecords = numRecords; + this.position = 0; + this.offset = offset; + } + + public SortedIterator clone() { + SortedIterator iter = new SortedIterator(numRecords, offset); + iter.position = position; + iter.baseObject = baseObject; + iter.baseOffset = baseOffset; + iter.keyPrefix = keyPrefix; + iter.recordLength = recordLength; + return iter; + } + + @Override + public int getNumRecords() { + return numRecords; + } + + @Override + public boolean hasNext() { + return position / 2 < numRecords; + } + + /** + * 更新迭代器相关指针信息,和当前记录内容的大小 + */ + @Override + public void loadNext() { + // This pointer points to a 4-byte record length, followed by the record's bytes + final long recordPointer = array.get(offset + position); + baseObject = memoryManager.getPage(recordPointer); + baseOffset = memoryManager.getOffsetInPage(recordPointer) + 4; // Skip over record length + recordLength = Platform.getInt(baseObject, baseOffset - 4); + keyPrefix = array.get(offset + position + 1); + position += 2; + } + + @Override + public Object getBaseObject() { return baseObject; } + + @Override + public long getBaseOffset() { return baseOffset; } + + @Override + public int getRecordLength() { return recordLength; } + + @Override + public long getKeyPrefix() { return keyPrefix; } + } + + /** + * Return an iterator over record pointers in sorted order. For efficiency, all calls to + * {@code next()} will return the same mutable object. + */ + public SortedIterator getSortedIterator() { + int offset = 0; + long start = System.nanoTime(); + if (sorter != null && enableSort) { + if (this.radixSortSupport != null) { + // TODO(ekl) we should handle NULL values before radix sort for efficiency, since they + // force a full-width sort (and we cannot radix-sort nullable long fields at all). + offset = RadixSort.sortKeyPrefixArray(array, pos / 2, 0, 7, radixSortSupport.sortDescending(),radixSortSupport.sortSigned()); + } else { + sorter.sort(array,0,pos / 2,sortComparator); + } + } + totalSortTimeNanos += System.nanoTime() - start; + return new SortedIterator(pos / 2, offset); + } + + /** + * Return an iterator over record pointers int not sorted order. For efficiency, all calls to + * {@code next()} will return the same mutable object. + */ + public SortedIterator getIterator() { + return new SortedIterator(pos / 2,0); + } + +} diff --git a/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeKVExternalSorter.java b/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeKVExternalSorter.java new file mode 100644 index 000000000..62e95cc62 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeKVExternalSorter.java @@ -0,0 +1,310 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.utils.sort; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.Ordering; +import io.mycat.memory.unsafe.KVIterator; +import io.mycat.memory.unsafe.Platform; +import io.mycat.memory.unsafe.map.BytesToBytesMap; +import io.mycat.memory.unsafe.memory.MemoryBlock; +import io.mycat.memory.unsafe.memory.mm.DataNodeMemoryManager; +import io.mycat.memory.unsafe.row.StructType; +import io.mycat.memory.unsafe.row.UnsafeRow; +import io.mycat.memory.unsafe.storage.DataNodeDiskManager; +import io.mycat.memory.unsafe.storage.SerializerManager; + + +import javax.annotation.Nullable; +import java.io.IOException; + +/** + * A class for performing external sorting on key-value records. Both key and value are UnsafeRows. + * + * Note that this class allows optionally passing in a {@link io.mycat.memory.unsafe.map.BytesToBytesMap} directly in order + * to perform in-place sorting of records in the map. + */ + +public final class UnsafeKVExternalSorter { + + private final StructType keySchema; + private final StructType valueSchema; + private final UnsafeExternalRowSorter.PrefixComputer prefixComputer; + private final UnsafeExternalSorter sorter; + + public UnsafeKVExternalSorter( + StructType keySchema, + StructType valueSchema, + DataNodeDiskManager blockManager, + SerializerManager serializerManager, + long pageSizeBytes) throws IOException { + this(keySchema, valueSchema, blockManager, serializerManager, pageSizeBytes, null); + } + + public UnsafeKVExternalSorter( + StructType keySchema, + StructType valueSchema, + DataNodeDiskManager blockManager, + SerializerManager serializerManager, + long pageSizeBytes, + @Nullable BytesToBytesMap map) throws IOException { + + this.keySchema = keySchema; + this.valueSchema = valueSchema; + + + /** + * 排序的key,根据前缀排序规则,进行比较 + * + */ + prefixComputer = SortPrefixUtils.createPrefixGenerator(keySchema); + /** + * 排序的key,根据前缀排序规则,进行比较 + * + */ + PrefixComparator prefixComparator = SortPrefixUtils.getPrefixComparator(keySchema); + + /** + * + */ + BaseOrdering ordering = new BaseOrdering(){ + @Override + public int compare(@Nullable UnsafeRow unsafeRow, @Nullable UnsafeRow t1) { + return 1; + } + }; + + KVComparator recordComparator = new KVComparator(ordering, keySchema.length()); + + DataNodeMemoryManager dataNodeMemoryManager = null; + if (map == null) { + sorter = UnsafeExternalSorter.create( + dataNodeMemoryManager, + blockManager, + serializerManager, + recordComparator, + prefixComparator, + 4096, + pageSizeBytes, + keySchema.length() == 1 && SortPrefixUtils.canSortFullyWithPrefix(keySchema.apply(0)),true); + } else { + // During spilling, the array in map will not be used, so we can borrow that and use it + // as the underline array for in-memory sorter (it's always large enough). + // Since we will not grow the array, it's fine to pass `null` as consumer. + + /** + * map.getArray()获取longArray指针数组 + */ + final UnsafeInMemorySorter inMemSorter = new UnsafeInMemorySorter( + null, dataNodeMemoryManager, recordComparator, prefixComparator, map.getArray(), + false /* TODO(ekl) we can only radix sort if the BytesToBytes load factor is <= 0.5 */,true); + + // We cannot use the destructive iterator here because we are reusing the existing memory + // pages in BytesToBytesMap to hold records during sorting. + // The only new memory we are allocating is the pointer/prefix array. + BytesToBytesMap.MapIterator iter = map.iterator(); + final int numKeyFields = keySchema.length(); + UnsafeRow row = new UnsafeRow(numKeyFields); + while (iter.hasNext()) { + final BytesToBytesMap.Location loc = iter.next(); + final Object baseObject = loc.getKeyBase(); + final long baseOffset = loc.getKeyOffset(); + + // Get encoded memory address + // baseObject + baseOffset point to the beginning of the key data in the map, but that + // the KV-pair's length data is stored in the word immediately before that address + MemoryBlock page = loc.getMemoryPage(); + long address = dataNodeMemoryManager.encodePageNumberAndOffset(page, baseOffset - 8); + + // Compute prefix + row.pointTo(baseObject, baseOffset, loc.getKeyLength()); + final long prefix = prefixComputer.computePrefix(row); + + inMemSorter.insertRecord(address, prefix); + } + + sorter = UnsafeExternalSorter.createWithExistingInMemorySorter( + dataNodeMemoryManager, + blockManager, + serializerManager, + new KVComparator(ordering, keySchema.length()), + prefixComparator, + 4096, + pageSizeBytes, + inMemSorter,true); + + // reset the map, so we can re-use it to insert new records. the inMemSorter will not used + // anymore, so the underline array could be used by map again. + map.reset(); + } + } + + /** + * Inserts a key-value record into the sorter. If the sorter no longer has enough memory to hold + * the record, the sorter sorts the existing records in-memory, writes them out as partially + * sorted runs, and then reallocates memory to hold the new record. + */ + public void insertKV(UnsafeRow key, UnsafeRow value) throws IOException { + final long prefix = prefixComputer.computePrefix(key); + sorter.insertKVRecord( + key.getBaseObject(), key.getBaseOffset(), key.getSizeInBytes(), + value.getBaseObject(), value.getBaseOffset(), value.getSizeInBytes(), prefix); + } + + /** + * Merges another UnsafeKVExternalSorter into `this`, the other one will be emptied. + * + * @throws IOException + */ + public void merge(UnsafeKVExternalSorter other) throws IOException { + sorter.merge(other.sorter); + } + + /** + * Returns a sorted iterator. It is the caller's responsibility to call `cleanupResources()` + * after consuming this iterator. + */ + public KVSorterIterator sortedIterator() throws IOException { + try { + final UnsafeSorterIterator underlying = sorter.getSortedIterator(); + if (!underlying.hasNext()) { + // Since we won't ever call next() on an empty iterator, we need to clean up resources + // here in order to prevent memory leaks. + cleanupResources(); + } + return new KVSorterIterator(underlying); + } catch (IOException e) { + cleanupResources(); + throw e; + } + } + + /** + * Return the total number of bytes that has been spilled into disk so far. + */ + public long getSpillSize() { + return sorter.getSpillSize(); + } + + /** + * Return the peak memory used so far, in bytes. + */ + public long getPeakMemoryUsedBytes() { + return sorter.getPeakMemoryUsedBytes(); + } + + /** + * Marks the current page as no-more-space-available, and as a result, either allocate a + * new page or spill when we see the next record. + */ + @VisibleForTesting + void closeCurrentPage() { + sorter.closeCurrentPage(); + } + + /** + * Frees this sorter's in-memory data structures and cleans up its spill files. + */ + public void cleanupResources() { + sorter.cleanupResources(); + } + + private static final class KVComparator extends RecordComparator { + private final BaseOrdering ordering; + private final UnsafeRow row1; + private final UnsafeRow row2; + private final int numKeyFields; + + KVComparator(BaseOrdering ordering, int numKeyFields) { + this.numKeyFields = numKeyFields; + this.row1 = new UnsafeRow(numKeyFields); + this.row2 = new UnsafeRow(numKeyFields); + this.ordering = ordering; + } + + @Override + public int compare(Object baseObj1, long baseOff1, Object baseObj2, long baseOff2) { + // Note that since ordering doesn't need the total length of the record, we just pass -1 + // into the row. + row1.pointTo(baseObj1, baseOff1 + 4, -1); + row2.pointTo(baseObj2, baseOff2 + 4, -1); + return ordering.compare(row1, row2); + } + } + + public class KVSorterIterator extends KVIterator { + private UnsafeRow key = new UnsafeRow(keySchema.length()); + private UnsafeRow value = new UnsafeRow(valueSchema.length()); + private final UnsafeSorterIterator underlying; + + private KVSorterIterator(UnsafeSorterIterator underlying) { + this.underlying = underlying; + } + + @Override + public boolean next() throws IOException { + try { + if (underlying.hasNext()) { + underlying.loadNext(); + + Object baseObj = underlying.getBaseObject(); + long recordOffset = underlying.getBaseOffset(); + int recordLen = underlying.getRecordLength(); + + // Note that recordLen = keyLen + valueLen + 4 bytes (for the keyLen itself) + int keyLen = Platform.getInt(baseObj, recordOffset); + int valueLen = recordLen - keyLen - 4; + key.pointTo(baseObj, recordOffset + 4, keyLen); + value.pointTo(baseObj, recordOffset + 4 + keyLen, valueLen); + + return true; + } else { + key = null; + value = null; + cleanupResources(); + return false; + } + } catch (IOException e) { + cleanupResources(); + throw e; + } + } + + @Override + public UnsafeRow getKey() { + return key; + } + + @Override + public UnsafeRow getValue() { + return value; + } + + @Override + public void close() { + cleanupResources(); + } + } + + class BaseOrdering extends Ordering { + @Override + public int compare(@Nullable UnsafeRow unsafeRow, @Nullable UnsafeRow t1) { + throw new UnsupportedOperationException(); + } + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeKeyValueSorter.java b/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeKeyValueSorter.java new file mode 100644 index 000000000..22acd1721 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeKeyValueSorter.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.utils.sort; + + + +import io.mycat.memory.unsafe.KVIterator; +import io.mycat.memory.unsafe.row.UnsafeRow; + +import java.io.IOException; + +public abstract class UnsafeKeyValueSorter { + + public abstract void insert(UnsafeRow key, UnsafeRow value); + + public abstract KVIterator sort() throws IOException; +} diff --git a/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeRowsMerger.java b/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeRowsMerger.java new file mode 100644 index 000000000..aca76df39 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeRowsMerger.java @@ -0,0 +1,97 @@ +package io.mycat.memory.unsafe.utils.sort; + +import java.io.IOException; +import java.util.Comparator; +import java.util.PriorityQueue; + +/** + * Created by zagnix on 2016/6/25. + */ +public final class UnsafeRowsMerger { + private int numRecords = 0; + private final PriorityQueue priorityQueue; + + UnsafeRowsMerger( + final RecordComparator recordComparator, + final PrefixComparator prefixComparator, + final int numSpills) { + + final Comparator comparator = new Comparator() { + @Override + public int compare(UnsafeSorterIterator left, UnsafeSorterIterator right) { + final int prefixComparisonResult = prefixComparator.compare(left.getKeyPrefix(), right.getKeyPrefix()); + if (prefixComparisonResult == 0) { + return recordComparator.compare( + left.getBaseObject(), left.getBaseOffset(), + right.getBaseObject(), right.getBaseOffset()); + } else { + return prefixComparisonResult; + } + } + }; + + /** + * 使用优先级队列实现多个Spill File 合并排序,并且支持已经排序内存记录 + * 重新写入一个排序文件中。 + */ + priorityQueue = new PriorityQueue(numSpills,comparator); + } + + /** + * Add an UnsafeSorterIterator to this merger + * + */ + public void addSpillIfNotEmpty(UnsafeSorterIterator iterator) throws IOException { + /** + * 添加迭代器到priorityQueue中 + */ + if (iterator.hasNext()) { + iterator.loadNext(); + priorityQueue.add(iterator); + numRecords += iterator.getNumRecords(); + } + } + + public int getNumRecords() { + return numRecords; + } + + public UnsafeSorterIterator getSortedIterator() throws IOException { + return new UnsafeSorterIterator() { + private UnsafeSorterIterator spillReader; + + @Override + public int getNumRecords() { + return numRecords; + } + + @Override + public boolean hasNext() { + return !priorityQueue.isEmpty() || (spillReader != null && spillReader.hasNext()); + } + + @Override + public void loadNext() throws IOException { + if (spillReader != null) { + if (spillReader.hasNext()) { + spillReader.loadNext(); + priorityQueue.add(spillReader); + } + } + spillReader = priorityQueue.remove(); + } + + @Override + public Object getBaseObject() { return spillReader.getBaseObject(); } + + @Override + public long getBaseOffset() { return spillReader.getBaseOffset(); } + + @Override + public int getRecordLength() { return spillReader.getRecordLength(); } + + @Override + public long getKeyPrefix() { return spillReader.getKeyPrefix(); } + }; + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeSortDataFormat.java b/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeSortDataFormat.java new file mode 100644 index 000000000..23b34c658 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeSortDataFormat.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.utils.sort; + + +import io.mycat.memory.unsafe.Platform; +import io.mycat.memory.unsafe.array.LongArray; +import io.mycat.memory.unsafe.memory.MemoryBlock; + +/** + * Supports sorting an array of (record pointer, key prefix) pairs. + * Used in {@link UnsafeInMemorySorter}. + *

+ * Within each long[] buffer, position {@code 2 * i} holds a pointer pointer to the record at + * index {@code i}, while position {@code 2 * i + 1} in the array holds an 8-byte key prefix. + */ +public final class UnsafeSortDataFormat + extends SortDataFormat { + + public static final UnsafeSortDataFormat INSTANCE = new UnsafeSortDataFormat(); + + private UnsafeSortDataFormat() { } + + @Override + public RecordPointerAndKeyPrefix getKey(LongArray data, int pos) { + // Since we re-use keys, this method shouldn't be called. + throw new UnsupportedOperationException(); + } + + @Override + public RecordPointerAndKeyPrefix newKey() { + return new RecordPointerAndKeyPrefix(); + } + + @Override + public RecordPointerAndKeyPrefix getKey(LongArray data, int pos, + RecordPointerAndKeyPrefix reuse) { + reuse.recordPointer = data.get(pos * 2); + reuse.keyPrefix = data.get(pos * 2 + 1); + return reuse; + } + + @Override + public void swap(LongArray data, int pos0, int pos1) { + long tempPointer = data.get(pos0 * 2); + long tempKeyPrefix = data.get(pos0 * 2 + 1); + data.set(pos0 * 2, data.get(pos1 * 2)); + data.set(pos0 * 2 + 1, data.get(pos1 * 2 + 1)); + data.set(pos1 * 2, tempPointer); + data.set(pos1 * 2 + 1, tempKeyPrefix); + } + + @Override + public void copyElement(LongArray src, int srcPos, LongArray dst, int dstPos) { + dst.set(dstPos * 2, src.get(srcPos * 2)); + dst.set(dstPos * 2 + 1, src.get(srcPos * 2 + 1)); + } + + @Override + public void copyRange(LongArray src, int srcPos, LongArray dst, int dstPos, int length) { + Platform.copyMemory( + src.getBaseObject(), + src.getBaseOffset() + srcPos * 16, + dst.getBaseObject(), + dst.getBaseOffset() + dstPos * 16, + length * 16); + } + + @Override + public LongArray allocate(int length) { + assert (length < Integer.MAX_VALUE / 2) : "Length " + length + " is too large"; + return new LongArray(MemoryBlock.fromLongArray(new long[length * 2])); + } + +} diff --git a/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeSorterIterator.java b/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeSorterIterator.java new file mode 100644 index 000000000..8a16caa0a --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeSorterIterator.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.utils.sort; + +import java.io.IOException; + +public abstract class UnsafeSorterIterator { + + public abstract boolean hasNext(); + + public abstract void loadNext() throws IOException; + + public abstract Object getBaseObject(); + + public abstract long getBaseOffset(); + + public abstract int getRecordLength(); + + public abstract long getKeyPrefix(); + + public abstract int getNumRecords(); +} diff --git a/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeSorterSpillMerger.java b/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeSorterSpillMerger.java new file mode 100644 index 000000000..73b305860 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeSorterSpillMerger.java @@ -0,0 +1,132 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.utils.sort; + +import java.io.IOException; +import java.util.Comparator; +import java.util.PriorityQueue; + +final class UnsafeSorterSpillMerger { + + private int numRecords = 0; + private final PriorityQueue priorityQueue; + + UnsafeSorterSpillMerger( + final RecordComparator recordComparator, + final PrefixComparator prefixComparator, + final int numSpills) { + + final Comparator comparator = new Comparator() { + @Override + public int compare(UnsafeSorterIterator left, UnsafeSorterIterator right) { + final int prefixComparisonResult = prefixComparator.compare(left.getKeyPrefix(), right.getKeyPrefix()); + if (prefixComparisonResult == 0) { + return recordComparator.compare( + left.getBaseObject(), left.getBaseOffset(), + right.getBaseObject(), right.getBaseOffset()); + } else { + return prefixComparisonResult; + } + } + }; + + /** + * 使用优先级队列实现多个Spill File 合并排序,并且支持已经排序内存记录 + * 重新写入一个排序文件中。 + */ + priorityQueue = new PriorityQueue(numSpills,comparator); + } + + /** + * Add an UnsafeSorterIterator to this merger + * + */ + public void addSpillIfNotEmpty(UnsafeSorterIterator spillReader) throws IOException { + /** + * 添加迭代器到priorityQueue中 + */ + if (spillReader.hasNext()) { + // We only add the spillReader to the priorityQueue if it is not empty. We do this to + // make sure the hasNext method of UnsafeSorterIterator returned by getSortedIterator + // does not return wrong result because hasNext will returns true + // at least priorityQueue.size() times. If we allow n spillReaders in the + // priorityQueue, we will have n extra empty records in the result of UnsafeSorterIterator. + + spillReader.loadNext(); + priorityQueue.add(spillReader); + numRecords += spillReader.getNumRecords(); + } + } + + /** + * 非常重要的一个排序迭代器 + * @return + * @throws IOException + */ + public UnsafeSorterIterator getSortedIterator() throws IOException { + return new UnsafeSorterIterator() { + /** + * 当前迭代器 + */ + private UnsafeSorterIterator spillReader; + + @Override + public int getNumRecords() { + return numRecords; + } + + @Override + public boolean hasNext() { + return !priorityQueue.isEmpty() || (spillReader != null && spillReader.hasNext()); + } + + @Override + public void loadNext() throws IOException { + if (spillReader != null) { + if (spillReader.hasNext()) { + spillReader.loadNext(); + /** + *添加一个完整迭代器集合给优先级队列, + *优先级队列为根据比较器自动调整想要的数据大小 + * 每次都将spillReader添加到队列中进行新的调整 + * 最后得到最小的元素,为出优先级队列做准备 + */ + priorityQueue.add(spillReader); + } + } + + /** + * 出队列,当前spillreader最小的元素出优先级队列 + */ + spillReader = priorityQueue.remove(); + } + + @Override + public Object getBaseObject() { return spillReader.getBaseObject(); } + + @Override + public long getBaseOffset() { return spillReader.getBaseOffset(); } + + @Override + public int getRecordLength() { return spillReader.getRecordLength(); } + + @Override + public long getKeyPrefix() { return spillReader.getKeyPrefix(); } + }; + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeSorterSpillReader.java b/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeSorterSpillReader.java new file mode 100644 index 000000000..07241e921 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeSorterSpillReader.java @@ -0,0 +1,120 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.utils.sort; + +import com.google.common.io.ByteStreams; +import com.google.common.io.Closeables; +import io.mycat.memory.unsafe.Platform; +import io.mycat.memory.unsafe.storage.ConnectionId; +import io.mycat.memory.unsafe.storage.SerializerManager; + + +import java.io.*; + +/** + * Reads spill files written by {@link UnsafeSorterSpillWriter} (see that class for a description + * of the file format). + */ +public final class UnsafeSorterSpillReader extends UnsafeSorterIterator implements Closeable { + + private InputStream in; + private DataInputStream din; + + // Variables that change with every record read: + private int recordLength; + private long keyPrefix; + private int numRecords; + private int numRecordsRemaining; + + private byte[] arr = new byte[1024 * 1024]; + private Object baseObject = arr; + private final long baseOffset = Platform.BYTE_ARRAY_OFFSET; + + public UnsafeSorterSpillReader( + SerializerManager serializerManager, + File file, + ConnectionId blockId) throws IOException { + assert (file.length() > 0); + final BufferedInputStream bs = new BufferedInputStream(new FileInputStream(file)); + try { + this.in = serializerManager.wrapForCompression(blockId,bs); + this.din = new DataInputStream(this.in); + numRecords = numRecordsRemaining = din.readInt(); + } catch (IOException e) { + Closeables.close(bs, /* swallowIOException = */ true); + throw e; + } + } + + @Override + public int getNumRecords() { + return numRecords; + } + + @Override + public boolean hasNext() { + return (numRecordsRemaining > 0); + } + + @Override + public void loadNext() throws IOException { + recordLength = din.readInt(); + keyPrefix = din.readLong(); + if (recordLength > arr.length) { + arr = new byte[recordLength]; + baseObject = arr; + } + ByteStreams.readFully(in, arr, 0, recordLength); + numRecordsRemaining--; + if (numRecordsRemaining == 0) { + close(); + } + } + + @Override + public Object getBaseObject() { + return baseObject; + } + + @Override + public long getBaseOffset() { + return baseOffset; + } + + @Override + public int getRecordLength() { + return recordLength; + } + + @Override + public long getKeyPrefix() { + return keyPrefix; + } + + @Override + public void close() throws IOException { + if (in != null) { + try { + in.close(); + } finally { + in = null; + din = null; + } + } + } +} diff --git a/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeSorterSpillWriter.java b/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeSorterSpillWriter.java new file mode 100644 index 000000000..49c5d6053 --- /dev/null +++ b/src/main/java/io/mycat/memory/unsafe/utils/sort/UnsafeSorterSpillWriter.java @@ -0,0 +1,199 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.utils.sort; + + + +import io.mycat.memory.unsafe.Platform; +import io.mycat.memory.unsafe.storage.*; + +import java.io.File; +import java.io.IOException; + +/** + * Spills a list of sorted records to disk. Spill files have the following format: + * + * [# of records (int)] [[len (int)][prefix (long)][data (bytes)]...] + */ +public final class UnsafeSorterSpillWriter { + + static final int DISK_WRITE_BUFFER_SIZE = 1024 * 1024; + + // Small writes to DiskRowWriter will be fairly inefficient. Since there doesn't seem to + // be an API to directly transfer bytes from managed memory to the disk writer, we buffer + // data through a byte array. + private byte[] writeBuffer = new byte[DISK_WRITE_BUFFER_SIZE]; + + private final File file; + private final ConnectionId conId; + private final int numRecordsToWrite; + private DiskRowWriter writer; + private DataNodeFileManager diskBlockManager; + private int numRecordsSpilled = 0; + + public UnsafeSorterSpillWriter( + DataNodeDiskManager blockManager, + int fileBufferSize, + int numRecordsToWrite) throws IOException { + + this.diskBlockManager = blockManager.diskBlockManager(); + this.conId = diskBlockManager.createTempLocalBlock(); + this.file = diskBlockManager.getFile(this.conId); + + this.numRecordsToWrite = numRecordsToWrite; + // Unfortunately, we need a serializer instance in order to construct a DiskRowWriter. + // Our write path doesn't actually use this serializer (since we end up calling the `write()` + // OutputStream methods), but DiskRowWriter still calls some methods on it. To work + // around this, we pass a dummy no-op serializer. + writer = blockManager.getDiskWriter(conId, file, DummySerializerInstance.INSTANCE, fileBufferSize/**,writeMetrics*/); + // Write the number of records + writeIntToBuffer(numRecordsToWrite, 0); + writer.write(writeBuffer, 0, 4); + } + + // Based on DataOutputStream.writeLong. + private void writeLongToBuffer(long v, int offset) throws IOException { + writeBuffer[offset + 0] = (byte)(v >>> 56); + writeBuffer[offset + 1] = (byte)(v >>> 48); + writeBuffer[offset + 2] = (byte)(v >>> 40); + writeBuffer[offset + 3] = (byte)(v >>> 32); + writeBuffer[offset + 4] = (byte)(v >>> 24); + writeBuffer[offset + 5] = (byte)(v >>> 16); + writeBuffer[offset + 6] = (byte)(v >>> 8); + writeBuffer[offset + 7] = (byte)(v >>> 0); + } + + // Based on DataOutputStream.writeInt. + private void writeIntToBuffer(int v, int offset) throws IOException { + writeBuffer[offset + 0] = (byte)(v >>> 24); + writeBuffer[offset + 1] = (byte)(v >>> 16); + writeBuffer[offset + 2] = (byte)(v >>> 8); + writeBuffer[offset + 3] = (byte)(v >>> 0); + } + + /** + * Write a record to a spill file. + * + * @param baseObject the base object / memory page containing the record + * @param baseOffset the base offset which points directly to the record data. + * @param recordLength the length of the record. + * @param keyPrefix a sort key prefix + */ + public void write( + Object baseObject, + long baseOffset, + int recordLength, + long keyPrefix) throws IOException { + if (numRecordsSpilled == numRecordsToWrite) { + throw new IllegalStateException( + "Number of records written exceeded numRecordsToWrite = " + numRecordsToWrite); + } else { + numRecordsSpilled++; + } + + /** + * [# of records (int)] [[len (int)][prefix (long)][data (bytes)]...] + * 一条记录在文件中格式 + * */ + + /** + * recordLength记录长度 4个bytes + */ + writeIntToBuffer(recordLength, 0); + /** + * 排序key,8个bytes + */ + writeLongToBuffer(keyPrefix, 4); + /** + * dataRemaining要写的真实数据长度bytes + */ + int dataRemaining = recordLength; + /** + * 写buffer剩余的空间 + */ + int freeSpaceInWriteBuffer = DISK_WRITE_BUFFER_SIZE - 4 - 8; // space used by prefix + len + + /** + *记录在内存中的地址偏移量 + */ + long recordReadPosition = baseOffset; + + while (dataRemaining > 0) { + /** + * 计算本次需要从内存中读取的实际数据,取freeSpaceInWriteBuffer和dataRemaining + * 中的最小值 + */ + final int toTransfer = Math.min(freeSpaceInWriteBuffer, dataRemaining); + + /** + * 执行数据拷贝动作,将baseObject的数据拷贝到writeBuffer中 + */ + Platform.copyMemory( + baseObject,/**srd*/ + recordReadPosition,/**offset*/ + writeBuffer, /**write dst*/ + Platform.BYTE_ARRAY_OFFSET + (DISK_WRITE_BUFFER_SIZE - freeSpaceInWriteBuffer),/**write offset*/ + toTransfer); + + /** + * 将writeBuffer中数据写到磁盘中 + */ + writer.write(writeBuffer, 0, (DISK_WRITE_BUFFER_SIZE - freeSpaceInWriteBuffer) + toTransfer); + /** + * 读指针移动toTransfer实际写的数据大小 + */ + recordReadPosition += toTransfer; + /** + * record还剩下多少数据要写入磁盘中 + */ + dataRemaining -= toTransfer; + /** + * 本次WriteBuffer初始化大小初始化为DISK_WRITE_BUFFER_SIZE + */ + freeSpaceInWriteBuffer = DISK_WRITE_BUFFER_SIZE; + } + + /** + * 写剩余数据到磁盘中 + */ + if (freeSpaceInWriteBuffer < DISK_WRITE_BUFFER_SIZE) { + + writer.write(writeBuffer, 0, (DISK_WRITE_BUFFER_SIZE - freeSpaceInWriteBuffer)); + + } + + /** + * writer类中数据统计 + */ + writer.recordWritten(); + } + + public void close() throws IOException { + writer.commitAndClose(); + writer = null; + writeBuffer = null; + } + + public File getFile() { + return file; + } + + public UnsafeSorterSpillReader getReader(SerializerManager serializerManager) throws IOException { + return new UnsafeSorterSpillReader(serializerManager, file, conId); + } +} diff --git a/src/main/java/io/mycat/migrate/BinlogIdleCheck.java b/src/main/java/io/mycat/migrate/BinlogIdleCheck.java new file mode 100644 index 000000000..5ee9dfb80 --- /dev/null +++ b/src/main/java/io/mycat/migrate/BinlogIdleCheck.java @@ -0,0 +1,121 @@ +package io.mycat.migrate; + +import com.alibaba.fastjson.JSON; +import io.mycat.util.ZKUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.sql.SQLException; +import java.util.Date; +import java.util.List; + +/** + * Created by magicdoom on 2016/12/14. + */ +public class BinlogIdleCheck implements Runnable { + private BinlogStream binlogStream; + private static final Logger LOGGER = LoggerFactory.getLogger(BinlogIdleCheck.class); + public BinlogIdleCheck(BinlogStream binlogStream) { + this.binlogStream = binlogStream; + } + + @Override public void run() { + ListmigrateTaskList= binlogStream.getMigrateTaskList(); + int sucessSwitchTask=0; + int fullSucessSwitchTask=0; + String taskPath=null; + String dataHost=null; + for (MigrateTask migrateTask : migrateTaskList) { + String zkPath=migrateTask.getZkpath(); + if(taskPath==null){ + taskPath=zkPath.substring(0,zkPath.lastIndexOf("/")) ; + dataHost=zkPath.substring(zkPath.lastIndexOf("/")+1); + } + if(migrateTask.isHaserror()||migrateTask.isHasExecute()) + { + continue; + } + Date lastDate= migrateTask.getLastBinlogDate(); + long diff = (new Date().getTime() - lastDate.getTime())/1000; + if((!migrateTask.isHaserror())&&diff>30){ + //暂定30秒空闲 则代表增量任务结束,开始切换 + sucessSwitchTask=sucessSwitchTask+1; + fullSucessSwitchTask=fullSucessSwitchTask+1; + + }else if(!migrateTask.isHaserror()){ + String sql=MigrateUtils.makeCountSql(migrateTask); + try { + long oldCount=MigrateUtils.execulteCount(sql,migrateTask.getFrom()); + long newCount=MigrateUtils.execulteCount(sql,migrateTask.getTo()); + if(oldCount!=0) { + double percent = newCount / oldCount; + if(percent>=0.9) { + sucessSwitchTask=sucessSwitchTask+1; + } + } + } catch (SQLException e) { + LOGGER.error("error:",e); + } catch (IOException e) { + LOGGER.error("error:",e); + } + + } + + } + + + try { + TaskNode taskNode = JSON.parseObject(ZKUtils.getConnection().getData().forPath(taskPath),TaskNode.class); + if(taskNode.getStatus()>=3){ + binlogStream.disconnect(); + } + } catch (Exception e) { + LOGGER.error("error:",e); + } + + + if(sucessSwitchTask==migrateTaskList.size()){ + String childTaskPath=taskPath+"/_prepare/"+dataHost; + try { + if( ZKUtils.getConnection().checkExists().forPath(childTaskPath)==null) { + ZKUtils.getConnection().create().creatingParentsIfNeeded().forPath(childTaskPath); + } + + } catch (Exception e) { + LOGGER.error("error:",e); + } + + } + + + //全部空闲后,如果已经开始切换了,则修改每个子任务状态 + if(fullSucessSwitchTask==migrateTaskList.size()){ + try { + TaskNode taskNode=JSON.parseObject(new String( ZKUtils.getConnection().getData().forPath(taskPath),"UTF-8"),TaskNode.class); + if(taskNode.getStatus()==2) { + + for (MigrateTask migrateTask : migrateTaskList) { + String zkPath = migrateTask.getZkpath() + "/" + migrateTask.getFrom() + "-" + migrateTask.getTo(); + if (ZKUtils.getConnection().checkExists().forPath(zkPath) != null) { + TaskStatus taskStatus = JSON.parseObject( + new String(ZKUtils.getConnection().getData().forPath(zkPath), "UTF-8"), TaskStatus.class); + if (taskStatus.getStatus() == 1) { + taskStatus.setStatus(3); + ZKUtils.getConnection().setData().forPath(zkPath, JSON.toJSONBytes(taskStatus)); + } + } + } + } + } catch (Exception e) { + LOGGER.error("error:",e); + } + + } + } + + + + + +} diff --git a/src/main/java/io/mycat/migrate/BinlogStream.java b/src/main/java/io/mycat/migrate/BinlogStream.java new file mode 100644 index 000000000..7054d8bf3 --- /dev/null +++ b/src/main/java/io/mycat/migrate/BinlogStream.java @@ -0,0 +1,580 @@ +package io.mycat.migrate; +import com.alibaba.druid.util.JdbcUtils; +import com.github.shyiko.mysql.binlog.BinaryLogClient; +import com.github.shyiko.mysql.binlog.event.*; +import com.google.common.base.Strings; +import io.mycat.MycatServer; +import io.mycat.backend.datasource.PhysicalDBNode; +import io.mycat.route.function.PartitionByCRC32PreSlot; +import io.mycat.server.util.SchemaUtil; +import io.mycat.sqlengine.OneRawSQLQueryResultHandler; +import io.mycat.sqlengine.SQLJob; +import io.mycat.util.DateUtil; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.io.Serializable; +import java.math.BigInteger; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.*; +import java.util.concurrent.*; + +import static io.mycat.util.dataMigrator.DataMigratorUtil.executeQuery; + +public class BinlogStream { + + private static Logger logger = LoggerFactory.getLogger(BinlogStream.class); + + private final String hostname; + private final int port; + private final String username; + private final String password; + private final ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor(); + private BinaryLogClient binaryLogClient; + + private long slaveID; + private String binglogFile; + private long binlogPos; + + private Set databaseSet=new HashSet<>(); + private Map semaphoreMap=new ConcurrentHashMap<>(); + + + + private List migrateTaskList; + + public List getMigrateTaskList() { + return migrateTaskList; + } + + public void setMigrateTaskList(List migrateTaskList) { + this.migrateTaskList = migrateTaskList; + for (MigrateTask migrateTask : migrateTaskList) { + databaseSet.add(MigrateUtils.getDatabaseFromDataNode(migrateTask.getFrom())) ; + String dataHostTo= MigrateUtils.getDataHostFromDataNode(migrateTask.getTo()); + if(!semaphoreMap.containsKey(dataHostTo)){ + int count=Double.valueOf( MycatServer.getInstance().getConfig().getDataHosts().get(dataHostTo).getSource().getSize()*0.8).intValue(); + semaphoreMap.put(dataHostTo,new Semaphore(1)) ; + } + } + } + + public long getSlaveID() { + return slaveID; + } + + public void setSlaveID(long slaveID) { + this.slaveID = slaveID; + } + + public String getBinglogFile() { + return binglogFile; + } + + public void setBinglogFile(String binglogFile) { + this.binglogFile = binglogFile; + } + + public long getBinlogPos() { + return binlogPos; + } + + public void setBinlogPos(long binlogPos) { + this.binlogPos = binlogPos; + } + + private volatile boolean groupEventsByTX = true; + + + + + + public BinlogStream(String hostname, int port, String username, String password) { + this.hostname = hostname; + this.port = port; + this.username = username; + this.password = password; + } + + public void setGroupEventsByTX(boolean groupEventsByTX) { + this.groupEventsByTX = groupEventsByTX; + } + + + + + public void connect() throws IOException { + initTaskDate(); + scheduler.scheduleAtFixedRate(new BinlogIdleCheck(this),5,15, TimeUnit.SECONDS); + allocateBinaryLogClient().connect(); + + } + + private void initTaskDate() { + Date curDate=new Date(); + for (MigrateTask migrateTask : migrateTaskList) { + migrateTask.setLastBinlogDate(curDate); + } + } + + public void connect(long timeoutInMilliseconds) throws IOException, TimeoutException { + initTaskDate(); + scheduler.scheduleAtFixedRate(new BinlogIdleCheck(this),5,15, TimeUnit.SECONDS); + allocateBinaryLogClient().connect(timeoutInMilliseconds); + + } + + private synchronized BinaryLogClient allocateBinaryLogClient() { + if (isConnected()) { + throw new IllegalStateException("MySQL replication stream is already open"); + } + binaryLogClient = new BinaryLogClient(hostname, port, username, password); + binaryLogClient.setBinlogFilename(getBinglogFile()); + binaryLogClient.setBinlogPosition(getBinlogPos()); + binaryLogClient.setServerId(getSlaveID()); + binaryLogClient.registerEventListener(new DelegatingEventListener()); + return binaryLogClient; + } + + + + + public synchronized boolean isConnected() { + return binaryLogClient != null && binaryLogClient.isConnected(); + } + + + + + + public synchronized void disconnect() throws IOException { + if (binaryLogClient != null) { + binaryLogClient.disconnect(); + binaryLogClient = null; + } + shutdownAndAwaitTermination( scheduler); + } + + + void shutdownAndAwaitTermination(ExecutorService pool) { + pool.shutdown(); // Disable new tasks from being submitted + try { + // Wait a while for existing tasks to terminate + if (!pool.awaitTermination(60, TimeUnit.SECONDS)) { + pool.shutdownNow(); // Cancel currently executing tasks + // Wait a while for tasks to respond to being cancelled + if (!pool.awaitTermination(60, TimeUnit.SECONDS)) + System.err.println("Pool did not terminate"); + } + } catch (InterruptedException ie) { + // (Re-)Cancel if current thread also interrupted + pool.shutdownNow(); + // Preserve interrupt status + Thread.currentThread().interrupt(); + } + } + + + private final class DelegatingEventListener implements BinaryLogClient.EventListener { + + private final Map tablesById = new HashMap(); + private final Map>> tablesColumnMap = new HashMap<>(); + + private boolean transactionInProgress; + private String binlogFilename; + + + //当发现ddl语句时 需要更新重新取列名 + private Map> loadColumn(String database,String table) + { + Map> rtn=new HashMap<>(); + List> list=null; + Connection con = null; + try { + con = DriverManager.getConnection("jdbc:mysql://"+hostname+":"+port,username,password); + list = executeQuery(con, "select COLUMN_NAME, ORDINAL_POSITION, DATA_TYPE, CHARACTER_SET_NAME from INFORMATION_SCHEMA.COLUMNS where table_name='"+table+"' and TABLE_SCHEMA='"+database+"'"); + + } catch (SQLException e) { + throw new RuntimeException(e); + }finally{ + JdbcUtils.close(con); + } + for (Map stringObjectMap : list) { + BigInteger pos= (BigInteger) stringObjectMap.get("ORDINAL_POSITION"); + rtn.put(pos.intValue(),stringObjectMap); + } + return rtn; + } + + @Override + public void onEvent(Event event) { + logger.debug(event.toString()); + EventType eventType = event.getHeader().getEventType(); + switch (eventType) { + case TABLE_MAP: + TableMapEventData tableMapEventData = event.getData(); + tablesById.put(tableMapEventData.getTableId(), tableMapEventData); + if(!tablesColumnMap.containsKey(tableMapEventData.getDatabase()+"."+tableMapEventData.getTable())) { + tablesColumnMap.put(tableMapEventData.getDatabase()+"."+tableMapEventData.getTable(),loadColumn(tableMapEventData.getDatabase(),tableMapEventData.getTable())) ; + } + break; + case ROTATE: + RotateEventData data= event.getData() ; + binlogFilename=data.getBinlogFilename(); + break; + case PRE_GA_WRITE_ROWS: + case WRITE_ROWS: + case EXT_WRITE_ROWS: + handleWriteRowsEvent(event); + break; + case PRE_GA_UPDATE_ROWS: + case UPDATE_ROWS: + case EXT_UPDATE_ROWS: + handleUpdateRowsEvent(event); + break; + case PRE_GA_DELETE_ROWS: + case DELETE_ROWS: + case EXT_DELETE_ROWS: + handleDeleteRowsEvent(event); + break; + case QUERY: + if (groupEventsByTX) { + QueryEventData queryEventData = event.getData(); + String query = queryEventData.getSql(); + if ("BEGIN".equals(query)) { + transactionInProgress = true; + } else if(!query.startsWith("#")) { + handleOtherSqlEvent(event); + } + } + break; + case XID: + if (groupEventsByTX) { + transactionInProgress = false; + } + break; + default: + // ignore + } + } + + private void exeSql(MigrateTask task,String sql){ + if(task.isHaserror()) + return; + task.setHasExecute(true); + String dataHostTo= MigrateUtils.getDataHostFromDataNode(task.getTo()); + Semaphore semaphore = semaphoreMap.get(dataHostTo); + try { + semaphore.acquire(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + SqlExecuteListener listener = new SqlExecuteListener(task, sql, BinlogStream.this, + semaphore); + OneRawSQLQueryResultHandler resultHandler = new OneRawSQLQueryResultHandler(new String[0], + listener); + resultHandler.setMark("binlog execute"); + PhysicalDBNode dn = MycatServer.getInstance().getConfig().getDataNodes().get(task.getTo()); + SQLJob sqlJob = new SQLJob(sql, dn.getDatabase(), resultHandler, dn.getDbPool().getSource()); + listener.setSqlJob(sqlJob); + sqlJob.run(); + } + + private void handleOtherSqlEvent(Event event) { + QueryEventData queryEventData = event.getData(); + logger.debug("receve sql:",queryEventData.getSql()); + SchemaUtil.SchemaInfo schemaInfo=SchemaUtil.parseSchema(queryEventData.getSql()); + if(isShouldBeFilter(queryEventData.getDatabase(),schemaInfo.table)) + return; + String query = queryEventData.getSql(); + for (MigrateTask migrateTask : migrateTaskList) { + if(schemaInfo.table.equalsIgnoreCase(migrateTask.getTable()) + &&queryEventData.getDatabase().equalsIgnoreCase(MigrateUtils.getDatabaseFromDataNode(migrateTask.getFrom()))){ + exeSql(migrateTask,query); + } + } + + + } + + private boolean isShouldBeFilter(String database,String table) + { + if(Strings.isNullOrEmpty(database)) + return true; + if(Strings.isNullOrEmpty(table)) + return true; + if(!databaseSet.contains(database.toLowerCase())){ + return true; + } + for (MigrateTask migrateTask : migrateTaskList) { + if(database.equals(MigrateUtils.getDatabaseFromDataNode(migrateTask.getFrom()))&&table.equalsIgnoreCase(migrateTask.getTable())){ + return false; + } + } + + + return true; + } + + + + private void handleWriteRowsEvent(Event event) { + WriteRowsEventData eventData = event.getData(); + TableMapEventData tableMapEvent = tablesById.get(eventData.getTableId()); + if(isShouldBeFilter(tableMapEvent.getDatabase(),tableMapEvent.getTable())) + return; + Map> xxx= tablesColumnMap.get(tableMapEvent.getDatabase()+"."+tableMapEvent.getTable()); + BitSet inculudeColumn= eventData.getIncludedColumns(); + StringBuilder sb=new StringBuilder("insert into "); + sb.append(tableMapEvent.getTable()) ; + sb.append("("); + int size= inculudeColumn.length() ; + List rows = eventData.getRows(); + + int slot=-1; + for (int i = 0; i coumnMap= xxx.get(column+1); + sb.append(coumnMap.get("COLUMN_NAME")); + if(i!=size-1){ + sb.append(","); + } + } + sb.append(") values "); + for (int i = 0; i < rows.size(); i++) { + Serializable[] value= rows.get(i); + sb.append(" ("); + for (int y = 0; y coumnMap= xxx.get(column+1); + String dataType= (String) coumnMap.get("DATA_TYPE"); + String columnName= (String) coumnMap.get("COLUMN_NAME"); + if("_slot".equalsIgnoreCase(columnName)){ + slot= value[y] instanceof BigInteger?((BigInteger) value[y]).intValue():((Integer) value[y]); + } + sb.append(convertBinlogValue(value[y],dataType)); + + if(y!=size-1){ + sb.append(","); + } + } + sb.append(")"); + if(i!=rows.size()-1){ + sb.append(","); + } + } + + checkIfExeSql(tableMapEvent, sb, slot); + + } + + private void checkIfExeSql(TableMapEventData tableMapEvent, StringBuilder sb, int slot) { + for (MigrateTask migrateTask : migrateTaskList) { + if(tableMapEvent.getTable().equalsIgnoreCase(migrateTask.getTable()) + &&tableMapEvent.getDatabase().equalsIgnoreCase(MigrateUtils.getDatabaseFromDataNode(migrateTask.getFrom()))){ + for (PartitionByCRC32PreSlot.Range range :migrateTask.getSlots()) { + if(range.end>=slot&&range.start<=slot) { + exeSql(migrateTask,sb.toString()); + return; + } + } + + } + } + } + + private Object convertBinlogValue(Serializable value,String dataType){ + if(value instanceof String ) { + return "'"+((String)value).replace("'","\\'")+"'"; + } else if(value instanceof byte[] ) { + //todo 需要确认编码 + return "'"+new String((byte[]) value).replace("'","\\'")+"'"; + }else if(value instanceof Date ) { + return "'"+dateToString((Date)value,dataType)+"'"; + }else if(("date".equalsIgnoreCase(dataType))&&value instanceof Long) + { + return "'"+dateToStringFromUTC((Long) value)+"'"; + //mariadb date + + } + else if("datetime".equalsIgnoreCase(dataType)&&value instanceof Long) + { + return "'"+datetimeToStringFromUTC((Long) value)+"'"; + //mariadb date + + } else if(("timestamp".equalsIgnoreCase(dataType))&&value instanceof Long) + { + return "'"+dateToString((Long) value)+"'"; + //mariadb date + + } + else{ + return value; + } + } + + private String dateToStringFromUTC(Long date){ + DateTime dt = new DateTime(date, DateTimeZone.UTC); + return dt.toString(DateUtil.DATE_PATTERN_ONLY_DATE); + } + + private String datetimeToStringFromUTC(Long date){ + DateTime dt = new DateTime(date, DateTimeZone.UTC); + return dt.toString(DateUtil.DATE_PATTERN_FULL); + } + private String dateToString(Long date){ + DateTime dt = new DateTime(date); + return dt.toString(DateUtil.DATE_PATTERN_FULL); + } + private String dateToString(Date date,String dateType){ + if("timestamp".equalsIgnoreCase(dateType)) + { + DateTime dt = new DateTime(date); + return dt.toString(DateUtil.DATE_PATTERN_FULL); + } else if("datetime".equalsIgnoreCase(dateType)) { + DateTime dt = new DateTime(date,DateTimeZone.UTC); + return dt.toString(DateUtil.DATE_PATTERN_FULL); + }else if("date".equalsIgnoreCase(dateType)) { + DateTime dt = new DateTime(date,DateTimeZone.UTC); + return dt.toString(DateUtil.DATE_PATTERN_ONLY_DATE); + } else + { + DateTime dt = new DateTime(date); + return dt.toString(DateUtil.DATE_PATTERN_FULL); + } + + } + private void handleUpdateRowsEvent(Event event) { + UpdateRowsEventData eventData = event.getData(); + TableMapEventData tableMapEvent = tablesById.get(eventData.getTableId()); + if(isShouldBeFilter(tableMapEvent.getDatabase(),tableMapEvent.getTable())) + return; + Map> xxx= tablesColumnMap.get(tableMapEvent.getDatabase()+"."+tableMapEvent.getTable()); + BitSet inculudeColumn= eventData.getIncludedColumns(); + StringBuilder sba=new StringBuilder("update "); + sba.append(tableMapEvent.getTable()) ; + sba.append(" set "); + int size= inculudeColumn.length() ; + + List> rows = eventData.getRows(); + for (Map.Entry row : rows) { + StringBuilder sb=new StringBuilder(sba); + int slot=-1; + Map.Entry rowMap= row ; + Serializable[] value= rowMap.getValue(); + Serializable[] key= rowMap.getKey(); + for (int i = 0; i coumnMap= xxx.get(column+1); + sb.append(coumnMap.get("COLUMN_NAME")); + sb.append("="); + String dataType= (String) coumnMap.get("DATA_TYPE"); + sb.append(convertBinlogValue(value[i],dataType)); + + if(i!=size-1){ + sb.append(","); + } + } + sb.append(" where "); + + BitSet includedColumnsBeforeUpdate= eventData.getIncludedColumnsBeforeUpdate(); + for (int i = 0; i coumnMap= xxx.get(column+1); + sb.append(coumnMap.get("COLUMN_NAME")); + Serializable value1 = key[i]; + if(value1==null){ + sb.append(" is null"); + } else { + sb.append("="); + String dataType = (String) coumnMap.get("DATA_TYPE"); + + sb.append(convertBinlogValue(value1, dataType)); + } + String columnName= (String) coumnMap.get("COLUMN_NAME"); + if("_slot".equalsIgnoreCase(columnName)){ + slot= value1 instanceof BigInteger?((BigInteger) value1).intValue():((Integer) value1); + } + if(i!=size-1){ + sb.append(" and "); + } + } + + checkIfExeSql(tableMapEvent,sb,slot); + } + + + + } + + private void handleDeleteRowsEvent(Event event) { + DeleteRowsEventData eventData = event.getData(); + TableMapEventData tableMapEvent = tablesById.get(eventData.getTableId()); + if(isShouldBeFilter(tableMapEvent.getDatabase(),tableMapEvent.getTable())) + return; + Map> xxx= tablesColumnMap.get(tableMapEvent.getDatabase()+"."+tableMapEvent.getTable()); + BitSet inculudeColumn= eventData.getIncludedColumns(); + StringBuilder sba=new StringBuilder("delete from "); + sba.append(tableMapEvent.getTable()) ; + sba.append(" where "); + int size= inculudeColumn.length() ; + List rows = eventData.getRows(); + for (Serializable[] row : rows) { + StringBuilder sb=new StringBuilder(sba); + Serializable[] value= row ; + + + int slot=-1; + for (int i = 0; i coumnMap= xxx.get(column+1); + sb.append(coumnMap.get("COLUMN_NAME")); + Serializable value1 = value[i]; + if(value1==null){ + sb.append(" is null"); + } else { + sb.append("="); + String dataType = (String) coumnMap.get("DATA_TYPE"); + + sb.append(convertBinlogValue(value1, dataType)); + } + String columnName= (String) coumnMap.get("COLUMN_NAME"); + if("_slot".equalsIgnoreCase(columnName)){ + slot= value1 instanceof BigInteger?((BigInteger) value1).intValue():((Integer) value1); + } + if(i!=size-1){ + sb.append(" and "); + } + } + checkIfExeSql(tableMapEvent,sb,slot); + + } + + + } + + + + } + + public static void main(String[] args) { +// BinlogStream stream=new BinlogStream("localhost",3301,"czn","MUXmux"); +// try { +// stream.setSlaveID(23511); +// stream.setBinglogFile("mysql-bin.000005"); +// stream.setBinlogPos(4); +// stream.connect(); +// +// } catch (IOException e) { +// e.printStackTrace(); +// } + + String sql="2'aa\"啊啊402"; + System.out.println(sql.replace("'","\\'")); + } +} diff --git a/src/main/java/io/mycat/migrate/BinlogStreamHoder.java b/src/main/java/io/mycat/migrate/BinlogStreamHoder.java new file mode 100644 index 000000000..94ad71ae5 --- /dev/null +++ b/src/main/java/io/mycat/migrate/BinlogStreamHoder.java @@ -0,0 +1,11 @@ +package io.mycat.migrate; + +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +/** + * Created by magicdoom on 2016/12/25. + */ +public class BinlogStreamHoder { + public static ConcurrentMap binlogStreamMap=new ConcurrentHashMap<>(); +} diff --git a/src/main/java/io/mycat/migrate/MigrateDumpRunner.java b/src/main/java/io/mycat/migrate/MigrateDumpRunner.java new file mode 100644 index 000000000..34c1252fc --- /dev/null +++ b/src/main/java/io/mycat/migrate/MigrateDumpRunner.java @@ -0,0 +1,221 @@ +package io.mycat.migrate; + +import com.alibaba.druid.util.JdbcUtils; +import com.alibaba.fastjson.JSON; +import com.google.common.base.Joiner; +import com.google.common.base.Strings; +import com.google.common.io.Files; +import io.mycat.MycatServer; +import io.mycat.backend.datasource.PhysicalDBNode; +import io.mycat.backend.datasource.PhysicalDBPool; +import io.mycat.backend.datasource.PhysicalDatasource; +import io.mycat.config.model.DBHostConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.memory.environment.OperatingSystem; +import io.mycat.route.function.PartitionByCRC32PreSlot.Range; +import io.mycat.util.ProcessUtil; +import io.mycat.util.ZKUtils; +import io.mycat.util.dataMigrator.DataMigratorUtil; +import io.mycat.util.dataMigrator.DataNode; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.nio.charset.Charset; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicInteger; + +import static io.mycat.util.dataMigrator.DataMigratorUtil.executeQuery; + + +/** + * Created by nange on 2016/12/1. + */ +public class MigrateDumpRunner implements Runnable { + private static final Logger LOGGER = LoggerFactory.getLogger(MigrateDumpRunner.class); + private MigrateTask task; + private CountDownLatch latch; + private AtomicInteger sucessTask; + public MigrateDumpRunner(MigrateTask task, CountDownLatch latch, AtomicInteger sucessTask) { + this.task = task; + this.latch = latch; + this.sucessTask=sucessTask; + } + + @Override public void run() { + try { + String mysqldump = "?mysqldump -h? -P? -u? -p? ? ? --single-transaction -q --default-character-set=utf8mb4 --hex-blob --where=\"?\" --master-data=1 -T \"?\" --fields-enclosed-by=\\\" --fields-terminated-by=, --lines-terminated-by=\\n --fields-escaped-by=\\\\ "; + PhysicalDBPool dbPool = MycatServer.getInstance().getConfig().getDataNodes().get(task.getFrom()).getDbPool(); + PhysicalDatasource datasource = dbPool.getSources()[dbPool.getActivedIndex()]; + DBHostConfig config = datasource.getConfig(); + File file = null; + String spath= querySecurePath(config); + if(Strings.isNullOrEmpty(spath)||"NULL".equalsIgnoreCase(spath)||"empty".equalsIgnoreCase(spath)) { + file = new File(SystemConfig.getHomePath() + File.separator + "temp", "dump" ); + // task.getFrom() + "_" + task.getTo() + Thread.currentThread().getId() + System.currentTimeMillis() + ""); + } else { + spath+= Thread.currentThread().getId() + System.currentTimeMillis(); + file=new File(spath); + } + file.mkdirs(); + + String encose= OperatingSystem.isWindows()?"\\":""; + String finalCmd = DataMigratorUtil + .paramsAssignment(mysqldump,"?", "", config.getIp(), String.valueOf(config.getPort()), config.getUser(), + config.getPassword(),MigrateUtils.getDatabaseFromDataNode(task.getFrom()), task.getTable() , makeWhere(task), file.getPath()); + List args= Arrays.asList("mysqldump", "-h"+config.getIp(), "-P"+String.valueOf(config.getPort()), "-u"+config.getUser(), + "-p"+config.getPassword(), MigrateUtils.getDatabaseFromDataNode(task.getFrom()), task.getTable(), "--single-transaction","-q","--default-character-set=utf8mb4","--hex-blob","--where="+makeWhere(task), "--master-data=1","-T"+file.getPath() + + ,"--fields-enclosed-by="+encose+"\"","--fields-terminated-by=,", "--lines-terminated-by=\\n", "--fields-escaped-by=\\\\"); + String result= ProcessUtil.execReturnString(args); + int logIndex = result.indexOf("MASTER_LOG_FILE='"); + int logPosIndex = result.indexOf("MASTER_LOG_POS="); + String logFile=result.substring(logIndex +17,logIndex +17+result.substring(logIndex +17).indexOf("'")) ; + String logPos=result.substring(logPosIndex +15,logPosIndex +15+result.substring(logPosIndex +15).indexOf(";")) ; + task.setBinlogFile(logFile); + task.setPos(Integer.parseInt(logPos)); + File dataFile = new File(file, task.getTable() + ".txt"); + + File sqlFile = new File(file, task.getTable() + ".sql"); + List createTable= Files.readLines(sqlFile,Charset.forName("UTF-8")) ; + + exeCreateTableToDn(extractCreateSql(createTable),task.getTo(),task.getTable()); + if(dataFile.length()>0) { + + loaddataToDn(dataFile, task.getTo(), task.getTable()); + } + pushMsgToZK(task.getZkpath(),task.getFrom()+"-"+task.getTo(),1,"sucess",logFile,logPos); + DataMigratorUtil.deleteDir(file); + sucessTask.getAndIncrement(); + } catch (Exception e) { + try { + pushMsgToZK(task.getZkpath(),task.getFrom()+"-"+task.getTo(),0,e.getMessage(),"",""); + } catch (Exception e1) { + } + LOGGER.error("error:",e); + } finally { + latch.countDown(); + } + + + } + + private String extractCreateSql(List lines){ + StringBuilder sb=new StringBuilder(); + boolean isAdd=false; + for (String line : lines) { + if(Strings.isNullOrEmpty(line)||line.startsWith("--")||line.startsWith("/*")||line.startsWith("DROP")) { + isAdd=false; + continue; + } + if(line.startsWith("CREATE")) { + isAdd=true; + } + + if(isAdd){ + sb.append(line).append("\n"); + } + } + String rtn=sb.toString(); + if(rtn.endsWith(";\n")){ + rtn= rtn.substring(0,rtn.length()-2); + } + return rtn.replace("CREATE TABLE","CREATE TABLE IF not EXISTS "); + } + + private void exeCreateTableToDn(String sql,String toDn,String table) throws SQLException { + PhysicalDBNode dbNode = MycatServer.getInstance().getConfig().getDataNodes().get(toDn); + PhysicalDBPool dbPool = dbNode.getDbPool(); + PhysicalDatasource datasource = dbPool.getSources()[dbPool.getActivedIndex()]; + DBHostConfig config = datasource.getConfig(); + Connection con = null; + try { + con = DriverManager.getConnection("jdbc:mysql://"+config.getUrl()+"/"+dbNode.getDatabase(),config.getUser(),config.getPassword()); + JdbcUtils.execute(con,sql, new ArrayList<>()); + } finally{ + JdbcUtils.close(con); + } + } + + + private void pushMsgToZK(String rootZkPath,String child,int status,String msg,String binlogFile,String pos) throws Exception { + String path = rootZkPath + "/" + child; + TaskStatus taskStatus=new TaskStatus(); + taskStatus.setMsg(msg); + taskStatus.setStatus(status); + task.setStatus(status); + taskStatus.setBinlogFile(binlogFile); + taskStatus.setPos(Long.parseLong(pos)); + + if(ZKUtils.getConnection().checkExists().forPath(path)==null ) + { + ZKUtils.getConnection().create().forPath(path, JSON.toJSONBytes(taskStatus)) ; + } else{ + ZKUtils.getConnection().setData().forPath(path, JSON.toJSONBytes(taskStatus)) ; + } + } + + private void loaddataToDn(File loaddataFile,String toDn,String table) throws SQLException, IOException { + PhysicalDBNode dbNode = MycatServer.getInstance().getConfig().getDataNodes().get(toDn); + PhysicalDBPool dbPool = dbNode.getDbPool(); + PhysicalDatasource datasource = dbPool.getSources()[dbPool.getActivedIndex()]; + DBHostConfig config = datasource.getConfig(); + Connection con = null; + try { + con = DriverManager.getConnection("jdbc:mysql://"+config.getUrl()+"/"+dbNode.getDatabase(),config.getUser(),config.getPassword()); + String sql = "load data local infile '"+loaddataFile.getPath().replace("\\","//")+"' replace into table "+table+" character set 'utf8mb4' fields terminated by ',' enclosed by '\"' ESCAPED BY '\\\\' lines terminated by '\\n'"; + JdbcUtils.execute(con,sql, new ArrayList<>()); + } finally{ + JdbcUtils.close(con); + } + } + + private String makeWhere(MigrateTask task) { + List whereList = new ArrayList<>(); + List slotRanges = task.getSlots(); + for (Range slotRange : slotRanges) { + if (slotRange.start == slotRange.end) { + whereList.add("_slot =" + slotRange.start); + } else { + whereList.add("(_slot >=" + slotRange.start + " and _slot <=" + slotRange.end+")"); + } + } + + return Joiner.on(" or ").join(whereList); + } + + private static String querySecurePath(DBHostConfig config ) { + List> list=null; + String path = null; + Connection con = null; + try { + con = DriverManager.getConnection("jdbc:mysql://"+config.getUrl(),config.getUser(),config.getPassword()); + list = executeQuery(con, "show variables like 'secure_file_priv'"); + if(list!=null&&list.size()==1) + path = (String) list.get(0).get("Value"); + } catch (SQLException e) { + throw new RuntimeException(e); + }finally{ + JdbcUtils.close(con); + } + return path; + } + + public static void main(String[] args) { + String result="\n" + "--\n" + "-- Position to start replication or point-in-time recovery from\n" + "--\n" + + "\n" + "CHANGE MASTER TO MASTER_LOG_FILE='NANGE-PC-bin.000021', MASTER_LOG_POS=154;\n"; + int logIndex = result.indexOf("MASTER_LOG_FILE='"); + int logPosIndex = result.indexOf("MASTER_LOG_POS="); + String logFile=result.substring(logIndex +17,logIndex +17+result.substring(logIndex +17).indexOf("'")) ; + String logPos=result.substring(logPosIndex +15,logPosIndex +15+result.substring(logPosIndex +15).indexOf(";")) ; + System.out.println(); + } +} diff --git a/src/main/java/io/mycat/migrate/MigrateMainRunner.java b/src/main/java/io/mycat/migrate/MigrateMainRunner.java new file mode 100644 index 000000000..a57ec77ef --- /dev/null +++ b/src/main/java/io/mycat/migrate/MigrateMainRunner.java @@ -0,0 +1,86 @@ +package io.mycat.migrate; + +import io.mycat.MycatServer; +import io.mycat.backend.datasource.PhysicalDBPool; +import io.mycat.backend.datasource.PhysicalDatasource; +import io.mycat.config.model.DBHostConfig; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * Created by magicdoom on 2016/12/8. + */ +public class MigrateMainRunner implements Runnable { + private static final Logger LOGGER = LoggerFactory.getLogger(MigrateMainRunner.class); + private String dataHost; + private List migrateTaskList; + + public MigrateMainRunner(String dataHost, List migrateTaskList) { + this.dataHost = dataHost; + this.migrateTaskList = migrateTaskList; + } + + @Override public void run() { + AtomicInteger sucessTask=new AtomicInteger(0); + CountDownLatch downLatch=new CountDownLatch(migrateTaskList.size()) ; + for (MigrateTask migrateTask : migrateTaskList) { + MycatServer.getInstance().getBusinessExecutor().submit( new MigrateDumpRunner(migrateTask,downLatch,sucessTask)) ; + } + try { + downLatch.await(2, TimeUnit.HOURS) ; + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + //同一个dataHost的任务合并执行,避免过多流量浪费 + if(sucessTask.get()==migrateTaskList.size()) + { + long binlogFileNum=-1; + String binlogFile=""; + long pos=-1; + for (MigrateTask migrateTask : migrateTaskList) { + if(binlogFileNum==-1){ + binlogFileNum=Integer.parseInt(migrateTask.getBinlogFile().substring(migrateTask.getBinlogFile().lastIndexOf(".")+1)) ; + binlogFile=migrateTask.getBinlogFile(); + pos=migrateTask.getPos(); + } else{ + int tempBinlogFileNum=Integer.parseInt(migrateTask.getBinlogFile().substring(migrateTask.getBinlogFile().lastIndexOf(".")+1)) ; + if(tempBinlogFileNum<=binlogFileNum&&migrateTask.getPos()<=pos) { + binlogFileNum=tempBinlogFileNum; + binlogFile=migrateTask.getBinlogFile(); + pos=migrateTask.getPos(); + } + } + } + String taskPath=migrateTaskList.get(0).getZkpath(); + taskPath=taskPath.substring(0,taskPath.lastIndexOf("/")); + String taskID=taskPath.substring(taskPath.lastIndexOf('/')+1,taskPath.length()); + + //开始增量数据迁移 + PhysicalDBPool dbPool= MycatServer.getInstance().getConfig().getDataHosts().get(dataHost); + PhysicalDatasource datasource = dbPool.getSources()[dbPool.getActivedIndex()]; + DBHostConfig config = datasource.getConfig(); + BinlogStream stream=new BinlogStream(config.getUrl().substring(0,config.getUrl().indexOf(":")),config.getPort(),config.getUser(),config.getPassword()); + try { + stream.setSlaveID(migrateTaskList.get(0).getSlaveId()); + stream.setBinglogFile(binlogFile); + stream.setBinlogPos(pos); + stream.setMigrateTaskList(migrateTaskList); + BinlogStreamHoder.binlogStreamMap.put(taskID,stream); + stream.connect(); + + } catch (IOException e) { + LOGGER.error("error:",e); + } + + } + } + + + +} diff --git a/src/main/java/io/mycat/migrate/MigrateTask.java b/src/main/java/io/mycat/migrate/MigrateTask.java new file mode 100644 index 000000000..5bb060652 --- /dev/null +++ b/src/main/java/io/mycat/migrate/MigrateTask.java @@ -0,0 +1,177 @@ +package io.mycat.migrate; + +import io.mycat.route.function.PartitionByCRC32PreSlot; +import io.mycat.route.function.PartitionByCRC32PreSlot.Range; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; + +/** + * Created by magicdoom on 2016/9/15. + */ +public class MigrateTask implements Serializable { + + private String from; + private String to; + private String table; + private List slots=new ArrayList<>(); + + private String method; + private String fclass=PartitionByCRC32PreSlot.class.getName(); + + private String schema; + + + private int slaveId; + + private transient String zkpath; + private transient String binlogFile; + private transient int pos; + private transient volatile Date lastBinlogDate; + private transient volatile boolean haserror=false; + private transient volatile int status; + + private transient volatile boolean hasExecute=false; + + public int getStatus() { + return status; + } + + public void setStatus(int status) { + this.status = status; + } + + public boolean isHaserror() { + return haserror; + } + + public void setHaserror(boolean haserror) { + this.haserror = haserror; + } + + public List getSlots() { + return slots; + } + + public void setSlots(List slots) { + this.slots = slots; + } + + public int getSize() + { int size=0; + for (Range slot : slots) { + size=size+slot.size; + } + return size; + } + + public boolean isHasExecute() { + return hasExecute; + } + + public void setHasExecute(boolean hasExecute) { + this.hasExecute = hasExecute; + } + + public String getBinlogFile() { + return binlogFile; + } + + public void setBinlogFile(String binlogFile) { + this.binlogFile = binlogFile; + } + + public int getPos() { + return pos; + } + + public void setPos(int pos) { + this.pos = pos; + } + + public String getFrom() { + return from; + } + + public Date getLastBinlogDate() { + return lastBinlogDate; + } + + public void setLastBinlogDate(Date lastBinlogDate) { + this.lastBinlogDate = lastBinlogDate; + } + + public void setFrom(String from) { + this.from = from; + } + + public String getTo() { + return to; + } + + public void setTo(String to) { + this.to = to; + } + + public String getTable() { + return table; + } + + public void setTable(String table) { + this.table = table; + } + + public String getMethod() { + return method; + } + + public void setMethod(String method) { + this.method = method; + } + + public String getFclass() { + return fclass; + } + + public void setFclass(String fclass) { + this.fclass = fclass; + } + + public String getSchema() { + return schema; + } + + public void setSchema(String schema) { + this.schema = schema; + } + + public int getSlaveId() { + return slaveId; + } + + public void setSlaveId(int slaveId) { + this.slaveId = slaveId; + } + + public String getZkpath() { + return zkpath; + } + + public void setZkpath(String zkpath) { + this.zkpath = zkpath; + } + + public void addSlots(Range range) + { + slots.add(range); + } + + public void addSlots(List ranges) + { + slots.addAll(ranges); + } + + +} diff --git a/src/main/java/io/mycat/migrate/MigrateTaskWatch.java b/src/main/java/io/mycat/migrate/MigrateTaskWatch.java new file mode 100644 index 000000000..68adb58fa --- /dev/null +++ b/src/main/java/io/mycat/migrate/MigrateTaskWatch.java @@ -0,0 +1,210 @@ +package io.mycat.migrate; + +import com.alibaba.fastjson.JSON; +import com.alibaba.fastjson.JSONArray; +import com.google.common.base.Splitter; +import com.google.common.base.Strings; +import com.google.common.collect.Lists; +import io.mycat.MycatServer; +import io.mycat.config.loader.zkprocess.comm.ZkConfig; +import io.mycat.config.loader.zkprocess.comm.ZkParamCfg; +import io.mycat.util.ZKUtils; +import org.apache.curator.framework.CuratorFramework; +import org.apache.curator.framework.recipes.cache.PathChildrenCache; +import org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent; +import org.apache.curator.framework.recipes.cache.PathChildrenCacheListener; +import org.apache.curator.framework.recipes.locks.InterProcessMutex; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.*; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +/** + * ......./migrate/schemal/taskid/datahost [任务数据] + * Created by magicdoom on 2016/9/28. + */ +public class MigrateTaskWatch { + private static final Logger LOGGER = LoggerFactory.getLogger(MigrateTaskWatch.class); + public static void start() + { + ZKUtils.addChildPathCache(ZKUtils.getZKBasePath() + "migrate", new PathChildrenCacheListener() { + @Override public void childEvent(CuratorFramework curatorFramework, + PathChildrenCacheEvent fevent) throws Exception { + + switch (fevent.getType()) { + case CHILD_ADDED: + LOGGER.info("table CHILD_ADDED: " + fevent.getData().getPath()); + ZKUtils.addChildPathCache(fevent.getData().getPath(),new TaskPathChildrenCacheListener()) ; + break; + default: + break; + } + } + }); + + } + + + + + + + private static class TaskPathChildrenCacheListener implements PathChildrenCacheListener { + @Override public void childEvent(CuratorFramework curatorFramework, + PathChildrenCacheEvent event) throws Exception { + switch (event.getType()) { + case CHILD_ADDED: + if(isTaskErrorOrSucess(event))break; + addOrUpdate(event); + String path = event.getData().getPath() + "/_prepare"; + if( curatorFramework.checkExists().forPath(path)==null){ + curatorFramework.create().creatingParentsIfNeeded().forPath(path); + } + ZKUtils.addChildPathCache(path,new SwitchPrepareListener()); + + String commitPath = event.getData().getPath() + "/_commit"; + if( curatorFramework.checkExists().forPath(commitPath)==null){ + curatorFramework.create().creatingParentsIfNeeded().forPath(commitPath); + } + ZKUtils.addChildPathCache(commitPath,new SwitchCommitListener()); + + + String cleanPath = event.getData().getPath() + "/_clean"; + if( curatorFramework.checkExists().forPath(cleanPath)==null){ + curatorFramework.create().creatingParentsIfNeeded().forPath(cleanPath); + } + ZKUtils.addChildPathCache(cleanPath,new SwitchCleanListener()); + LOGGER.info("table CHILD_ADDED: " + event.getData().getPath()); + break; + case CHILD_UPDATED: + if(isTaskErrorOrSucess(event))break; + addOrUpdate(event); + LOGGER.info("CHILD_UPDATED: " + event.getData().getPath()); + break; + default: + break; + } + } + + private boolean isTaskErrorOrSucess(PathChildrenCacheEvent event){ + try { + TaskNode pTaskNode= JSON.parseObject(event.getData().getData(),TaskNode.class); + if(pTaskNode.getStatus()>=4) { + return true; + } + } catch (Exception e){ + + } + + return false; + } + + private void addOrUpdate(PathChildrenCacheEvent event) throws Exception { + + InterProcessMutex taskLock =null; + try{ + String tpath= event.getData().getPath(); + String taskID=tpath.substring(tpath.lastIndexOf("/")+1,tpath.length()); + String lockPath= ZKUtils.getZKBasePath()+"lock/"+taskID+".lock"; + taskLock= new InterProcessMutex(ZKUtils.getConnection(), lockPath); + taskLock.acquire(20, TimeUnit.SECONDS); + String text = new String(ZKUtils.getConnection().getData().forPath(event.getData().getPath()), "UTF-8"); + + List dataNodeList= ZKUtils.getConnection().getChildren().forPath(event.getData().getPath()); + if(!dataNodeList.isEmpty()) { + if ((!Strings.isNullOrEmpty(text) )&& text.startsWith("{")) { + TaskNode taskNode = JSON.parseObject(text, TaskNode.class); + if (taskNode.getStatus() == 0) { + String boosterDataHosts = ZkConfig.getInstance().getValue(ZkParamCfg.MYCAT_BOOSTER_DATAHOSTS); + Set dataNodes = new HashSet<>(Splitter.on(",").trimResults().omitEmptyStrings().splitToList(boosterDataHosts)); + List finalMigrateList = new ArrayList<>(); + for (String s : dataNodeList) { + if ("_prepare".equals(s)) + continue; + if (dataNodes.contains(s)) { + String zkpath = event.getData().getPath() + "/" + s; + String data = new String(ZKUtils.getConnection().getData().forPath(zkpath), "UTF-8"); + List migrateTaskList = JSONArray.parseArray(data, MigrateTask.class); + for (MigrateTask migrateTask : migrateTaskList) { + migrateTask.setZkpath(zkpath); + } + finalMigrateList.addAll(migrateTaskList); + } + } + + Map> taskMap = mergerTaskForDataHost(finalMigrateList); + for (Map.Entry> stringListEntry : taskMap.entrySet()) { + String key = stringListEntry.getKey(); + List value = stringListEntry.getValue(); + MycatServer.getInstance().getBusinessExecutor().submit(new MigrateMainRunner(key, value)); + } + + // + taskNode.setStatus(1); + ZKUtils.getConnection().setData().forPath(event.getData().getPath(), JSON.toJSONBytes(taskNode)); + } else if (taskNode.getStatus() == 2) { + //start switch + + ScheduledExecutorService scheduledExecutorService = MycatServer.getInstance().getScheduler(); + Set allRunnerSet = SwitchPrepareCheckRunner.allSwitchRunnerSet; + if (!allRunnerSet.contains(taskID)) { + List dataHosts = ZKUtils.getConnection().getChildren().forPath(tpath); + List allTaskList = MigrateUtils.queryAllTask(tpath, removeStatusNode(dataHosts)); + allRunnerSet.add(taskID); + scheduledExecutorService.schedule(new SwitchPrepareCheckRunner(taskID, tpath, taskNode, + MigrateUtils.convertAllTask(allTaskList)), 1, TimeUnit.SECONDS); + + } + } + } + } + }finally { + if(taskLock!=null){ + taskLock.release(); + } + } + } + + private List removeStatusNode(List dataHosts){ + List resultList=new ArrayList<>(); + for (String dataHost : dataHosts) { + if("_prepare".equals(dataHost)||"_commit".equals(dataHost)||"_clean".equals(dataHost)) + { + continue; + } + resultList.add(dataHost); + } + + + + return resultList; + } + + + private static String getDataHostNameFromNode(String dataNode){ + return MycatServer.getInstance().getConfig().getDataNodes().get(dataNode).getDbPool().getHostName(); + } + + private static Map > mergerTaskForDataHost ( List migrateTaskList) + { + Map > taskMap=new HashMap<>(); + for (MigrateTask migrateTask : migrateTaskList) { + String dataHost=getDataHostNameFromNode(migrateTask.getFrom()); + if(taskMap.containsKey(dataHost)) { + taskMap.get(dataHost).add(migrateTask); + } else + { + taskMap.put(dataHost, Lists.newArrayList(migrateTask)) ; + } + } + + + return taskMap; + } + + + + } +} diff --git a/src/main/java/io/mycat/migrate/MigrateUtils.java b/src/main/java/io/mycat/migrate/MigrateUtils.java new file mode 100644 index 000000000..252d747f7 --- /dev/null +++ b/src/main/java/io/mycat/migrate/MigrateUtils.java @@ -0,0 +1,276 @@ +package io.mycat.migrate; + +import com.alibaba.druid.util.JdbcUtils; +import com.alibaba.fastjson.JSON; +import com.google.common.base.Joiner; +import com.google.common.base.Splitter; +import io.mycat.MycatServer; +import io.mycat.backend.datasource.PhysicalDBNode; +import io.mycat.backend.datasource.PhysicalDBPool; +import io.mycat.backend.datasource.PhysicalDatasource; +import io.mycat.config.model.DBHostConfig; +import io.mycat.route.function.PartitionByCRC32PreSlot; +import io.mycat.util.ZKUtils; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.*; + +import static io.mycat.route.function.PartitionByCRC32PreSlot.*; + +/** + * Created by magicdoom on 2016/9/16. + */ +public class MigrateUtils { + + /** + *扩容计算,以每一个源节点到一个目标节点为一个任务 + * @param table + * @param integerListMap 会进行修改,所以传入前请自己clone一份 + * @param oldDataNodes + * @param newDataNodes + * @param slotsTotalNum + * @return + */ + public static Map> balanceExpand(String table, Map> integerListMap, List oldDataNodes, + List newDataNodes,int slotsTotalNum) { + + int newNodeSize = oldDataNodes.size() + newDataNodes.size(); + int newSlotPerNode =slotsTotalNum / newNodeSize; + Map> newNodeTask = new HashMap<>(); + int gb=slotsTotalNum-newSlotPerNode*(newNodeSize); + for (int i = 0; i < integerListMap.size(); i++) { + + List rangeList = integerListMap.get(i); + int needMoveNum = getCurTotalSize(rangeList) - newSlotPerNode; + List allMoveList = getPartAndRemove(rangeList, needMoveNum); + for (int i1 = 0; i1 < newDataNodes.size(); i1++) { + String newDataNode = newDataNodes.get(i1); + if (allMoveList.size() == 0) + break; + List curRangeList = newNodeTask.get(newDataNode); + if (curRangeList == null) + curRangeList = new ArrayList<>(); + int hasSlots = getCurTotalSizeForTask(curRangeList); + int needMove =( i1==0)?newSlotPerNode - hasSlots+gb:newSlotPerNode - hasSlots; + if (needMove > 0) { + List moveList = getPartAndRemove(allMoveList, needMove); + MigrateTask task = new MigrateTask(); + task.setFrom( oldDataNodes.get(i)); + task.setTo( newDataNode); + task.setTable(table); + task.setSlots( moveList); + curRangeList.add(task); + newNodeTask.put(newDataNode, curRangeList); + } + } + + if(allMoveList.size()>0) + { + throw new RuntimeException("some slot has not moved to") ; + } + + + } + + return newNodeTask; + } + + + + private static List getPartAndRemove(List rangeList, int size) { + List result = new ArrayList<>(); + + for (int i = 0; i < rangeList.size(); i++) { + + Range range = rangeList.get(i); + if (range == null) + continue; + if (range.size == size) { + result.add(new Range(range.start, range.end)); + rangeList.set(i,null); + break; + } else if (range.size < size) { + result.add(new Range(range.start, range.end)); + size = size - range.size; + rangeList.set(i,null); + } else if (range.size > size) { + result.add(new Range(range.start, range.start+ size - 1)); + rangeList.set(i, new Range(range.start+ size, range.end)); + break; + } + + } + + for (int i = rangeList.size() - 1; i >= 0; i--) { + Range range = rangeList.get(i); + if (range == null) + rangeList.remove(i) ; + } + return result; + } + + private static int getCurTotalSizeForTask(List rangeList) { + int size = 0; + for (MigrateTask task : rangeList) { + size = size + getCurTotalSize(task.getSlots()); + } + return size; + } + + + public static List removeAndGetRemain(List oriRangeList, List rangeList) { + for (Range range : rangeList) { + oriRangeList=removeAndGetRemain(oriRangeList,range) ; + } + return oriRangeList; + } + + private static List removeAndGetRemain(List oriRangeList, Range newRange){ + List result=new ArrayList<>(); + for (Range range : oriRangeList) { + result.addAll(removeAndGetRemain(range,newRange)); + } + return result; + } + + private static List removeAndGetRemain(Range oriRange, Range newRange) { + + List result=new ArrayList<>(); + if(newRange.start>oriRange.end||newRange.end=oriRange.end){ + return result; + } else if(newRange.start>oriRange.start&&newRange.endoriRange.start&&newRange.end>=oriRange.end){ + result.add(new Range(oriRange.start,newRange.start-1)) ; + } + + + return result; + } + public static String convertRangeListToString(List rangeList) + { List rangeStringList=new ArrayList<>(); + for (Range range : rangeList) { + if(range.start==range.end){ + rangeStringList.add(String.valueOf(range.start)) ; + } else{ + rangeStringList.add(range.start+"-"+range.end) ; + } + } + return Joiner.on(',').join(rangeStringList); + } + + public static List convertRangeStringToList(String rangeStr){ + List ranges = Splitter.on(",").omitEmptyStrings().trimResults().splitToList(rangeStr); + List rangeList = new ArrayList<>(); + for (String range : ranges) { + List vv = Splitter.on("-").omitEmptyStrings().trimResults().splitToList(range); + if (vv.size() == 2) { + Range ran = new Range(Integer.parseInt(vv.get(0)), Integer.parseInt(vv.get(1))); + rangeList.add(ran); + + } else if (vv.size() == 1) { + Range ran = new Range(Integer.parseInt(vv.get(0)), Integer.parseInt(vv.get(0))); + rangeList.add(ran); + + } else { + throw new RuntimeException("load crc32slot datafile error:dn=value=" + range); + } + } + return rangeList; + } + + public static int getCurTotalSize(List rangeList) { + int size = 0; + for (Range range : rangeList) { + size = size + range.size; + } + return size; + } + + public static String getDatabaseFromDataNode(String dn){ + return MycatServer.getInstance().getConfig().getDataNodes().get(dn).getDatabase(); + } + public static String getDataHostFromDataNode(String dn){ + return MycatServer.getInstance().getConfig().getDataNodes().get(dn).getDbPool().getHostName(); + } + public static List convertAllTask(List allTasks){ + List resutlList=new ArrayList<>(); + for (MigrateTask allTask : allTasks) { + resutlList.addAll(allTask.getSlots()); + } + return resutlList; + } + public static List queryAllTask(String basePath, List dataHost) throws Exception { + List resutlList=new ArrayList<>(); + for (String dataHostName : dataHost) { + if("_prepare".equals(dataHostName)||"_commit".equals(dataHostName)||"_clean".equals(dataHostName)) + continue; + resutlList.addAll( JSON + .parseArray(new String(ZKUtils.getConnection().getData().forPath(basePath+"/"+dataHostName),"UTF-8") ,MigrateTask.class)); + } + return resutlList; + } + + public static String makeCountSql(MigrateTask task){ + StringBuilder sb=new StringBuilder(); + sb.append("select count(*) as count from "); + sb.append(task.getTable()).append(" where "); + List slots = task.getSlots(); + for (int i = 0; i < slots.size(); i++) { + Range range = slots.get(i); + if(i!=0) + sb.append(" or "); + if(range.start==range.end){ + sb.append(" _slot=").append(range.start); + } else { + sb.append(" (_slot>=").append(range.start); + sb.append(" and _slot<=").append(range.end).append(")"); + } + } + return sb.toString(); + } + + public static void execulteSql(String sql,String toDn) throws SQLException, IOException { + PhysicalDBNode dbNode = MycatServer.getInstance().getConfig().getDataNodes().get(toDn); + PhysicalDBPool dbPool = dbNode.getDbPool(); + PhysicalDatasource datasource = dbPool.getSources()[dbPool.getActivedIndex()]; + DBHostConfig config = datasource.getConfig(); + Connection con = null; + try { + con = DriverManager + .getConnection("jdbc:mysql://"+config.getUrl()+"/"+dbNode.getDatabase(),config.getUser(),config.getPassword()); + + JdbcUtils.execute(con,sql, new ArrayList<>()); + + } finally{ + JdbcUtils.close(con); + } + + } + public static long execulteCount(String sql,String toDn) throws SQLException, IOException { + PhysicalDBNode dbNode = MycatServer.getInstance().getConfig().getDataNodes().get(toDn); + PhysicalDBPool dbPool = dbNode.getDbPool(); + PhysicalDatasource datasource = dbPool.getSources()[dbPool.getActivedIndex()]; + DBHostConfig config = datasource.getConfig(); + Connection con = null; + try { + con = DriverManager.getConnection("jdbc:mysql://"+config.getUrl()+"/"+dbNode.getDatabase(),config.getUser(),config.getPassword()); + + List> result= JdbcUtils.executeQuery(con,sql, new ArrayList<>()); + if(result.size()==1){ + return (long) result.get(0).get("count"); + } + } finally{ + JdbcUtils.close(con); + } + return 0; + } +} diff --git a/src/main/java/io/mycat/migrate/SqlExecuteListener.java b/src/main/java/io/mycat/migrate/SqlExecuteListener.java new file mode 100644 index 000000000..3fa2f18d7 --- /dev/null +++ b/src/main/java/io/mycat/migrate/SqlExecuteListener.java @@ -0,0 +1,85 @@ +package io.mycat.migrate; + +import com.alibaba.fastjson.JSON; +import io.mycat.sqlengine.SQLJob; +import io.mycat.sqlengine.SQLQueryResult; +import io.mycat.sqlengine.SQLQueryResultListener; +import io.mycat.util.ZKUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Map; +import java.util.concurrent.Semaphore; + +/** + * Created by nange on 2016/12/13. + */ +public class SqlExecuteListener implements SQLQueryResultListener>> { + private static final Logger LOGGER = LoggerFactory.getLogger(SqlExecuteListener.class); + private MigrateTask task; + private String sql ; + private BinlogStream binlogStream; + private Semaphore semaphore; + private volatile SQLJob sqlJob; + + public SQLJob getSqlJob() { + return sqlJob; + } + + public void setSqlJob(SQLJob sqlJob) { + this.sqlJob = sqlJob; + } + + public SqlExecuteListener(MigrateTask task, String sql, BinlogStream binlogStream,Semaphore semaphore) { + this.task = task; + this.sql = sql; + this.binlogStream = binlogStream; + this.semaphore=semaphore; + } + + @Override public void onResult(SQLQueryResult> result) { + try { + if (!result.isSuccess()) { + try { + task.setHaserror(true); + pushMsgToZK(task.getZkpath(), task.getFrom() + "-" + task.getTo(), 2, "sql:" + sql + ";" + result.getErrMsg()); + close("sucess"); + binlogStream.disconnect(); + } catch (Exception e) { + LOGGER.error("error:", e); + close(e.getMessage()); + } + } else { + close("sucess"); + } + + task.setHasExecute(false); + }finally { + semaphore.release(); + } + } + + + + private void pushMsgToZK(String rootZkPath,String child,int status,String msg) throws Exception { + String path = rootZkPath + "/" + child; + TaskStatus taskStatus=new TaskStatus(); + taskStatus.setMsg(msg); + taskStatus.setStatus(status); + task.setStatus(status); + + if(ZKUtils.getConnection().checkExists().forPath(path)==null ) + { + ZKUtils.getConnection().create().forPath(path, JSON.toJSONBytes(taskStatus)) ; + } else{ + ZKUtils.getConnection().setData().forPath(path, JSON.toJSONBytes(taskStatus)) ; + } + } + public void close(String msg) { + SQLJob curJob = sqlJob; + if (curJob != null) { + curJob.teminate(msg); + sqlJob = null; + } + } +} diff --git a/src/main/java/io/mycat/migrate/SwitchCleanListener.java b/src/main/java/io/mycat/migrate/SwitchCleanListener.java new file mode 100644 index 000000000..dbb84321e --- /dev/null +++ b/src/main/java/io/mycat/migrate/SwitchCleanListener.java @@ -0,0 +1,118 @@ +package io.mycat.migrate; + +import com.alibaba.fastjson.JSON; +import com.google.common.base.Splitter; +import io.mycat.config.loader.zkprocess.comm.ZkConfig; +import io.mycat.config.loader.zkprocess.comm.ZkParamCfg; +import io.mycat.config.loader.zkprocess.zookeeper.ClusterInfo; +import io.mycat.route.RouteCheckRule; +import io.mycat.util.ZKUtils; +import org.apache.curator.framework.CuratorFramework; +import org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent; +import org.apache.curator.framework.recipes.cache.PathChildrenCacheListener; +import org.apache.curator.framework.recipes.locks.InterProcessMutex; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.Date; +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** 清理本地的阻止写的规则 slaveID relese create table + * Ceated by magicdoom on 2016/12/19. + */ +public class SwitchCleanListener implements PathChildrenCacheListener { + private static final Logger LOGGER = LoggerFactory.getLogger(SwitchCleanListener.class); + @Override public void childEvent(CuratorFramework curatorFramework, + PathChildrenCacheEvent event) throws Exception { + switch (event.getType()) { + case CHILD_ADDED: + checkSwitch(event); + break; + default: + break; + } + } + private void checkSwitch(PathChildrenCacheEvent event) { + InterProcessMutex taskLock =null; + try { + + String path=event.getData().getPath(); + String taskPath=path.substring(0,path.lastIndexOf("/_clean/")) ; + String taskID=taskPath.substring(taskPath.lastIndexOf('/')+1,taskPath.length()); + String lockPath= ZKUtils.getZKBasePath()+"lock/"+taskID+".lock"; + List sucessDataHost= ZKUtils.getConnection().getChildren().forPath(path.substring(0,path.lastIndexOf('/'))); + TaskNode pTaskNode= JSON.parseObject(ZKUtils.getConnection().getData().forPath(taskPath),TaskNode.class); + + String custerName = ZkConfig.getInstance().getValue(ZkParamCfg.ZK_CFG_CLUSTERID); + ClusterInfo clusterInfo= JSON.parseObject(ZKUtils.getConnection().getData().forPath("/mycat/"+custerName) , ClusterInfo.class); + List clusterNodeList= Splitter.on(',').omitEmptyStrings().splitToList(clusterInfo.getClusterNodes()); + if(sucessDataHost.size()==clusterNodeList.size()) { + + RouteCheckRule.migrateRuleMap.remove(pTaskNode.getSchema().toUpperCase()); + + List needToCloseWatch=new ArrayList<>(); + List dataHosts= ZKUtils.getConnection().getChildren().forPath(taskPath); + for (String dataHostName : dataHosts) { + if ("_prepare".equals(dataHostName) || "_commit".equals(dataHostName) || "_clean".equals(dataHostName)) + { + needToCloseWatch.add( taskPath+"/"+dataHostName ); + } + } + ZKUtils.closeWatch(needToCloseWatch); + + + taskLock= new InterProcessMutex(ZKUtils.getConnection(), lockPath); + taskLock.acquire(20, TimeUnit.SECONDS); + TaskNode taskNode= JSON.parseObject(ZKUtils.getConnection().getData().forPath(taskPath),TaskNode.class); + if(taskNode.getStatus()==3){ + taskNode.setStatus(5); //clean sucess + + //释放slaveIDs + + + + for (String dataHostName : dataHosts) { + if("_prepare".equals(dataHostName)||"_commit".equals(dataHostName)||"_clean".equals(dataHostName)) + continue; + List migrateTaskList= JSON + .parseArray(new String(ZKUtils.getConnection().getData().forPath(taskPath+"/"+dataHostName),"UTF-8") ,MigrateTask.class); + int slaveId= migrateTaskList.get(0).getSlaveId(); + String slavePath=ZKUtils.getZKBasePath()+"slaveIDs/"+dataHostName+"/"+slaveId; + if( ZKUtils.getConnection().checkExists().forPath(slavePath)!=null) { + ZKUtils.getConnection().delete().forPath(slavePath); + } + } + + + + + + + ZKUtils.getConnection().setData().forPath(taskPath,JSON.toJSONBytes(taskNode)) ; + LOGGER.info("task end",new Date()); + } + + } + + } catch (Exception e) { + LOGGER.error("error:",e); + } + finally { + if(taskLock!=null){ + try { + taskLock.release(); + } catch (Exception ignored) { + + } + } + } + } + + + + + + +} diff --git a/src/main/java/io/mycat/migrate/SwitchCommitListener.java b/src/main/java/io/mycat/migrate/SwitchCommitListener.java new file mode 100644 index 000000000..1afff3515 --- /dev/null +++ b/src/main/java/io/mycat/migrate/SwitchCommitListener.java @@ -0,0 +1,315 @@ +package io.mycat.migrate; + +import com.alibaba.druid.util.JdbcUtils; +import com.alibaba.fastjson.JSON; +import com.alibaba.fastjson.JSONArray; +import com.alibaba.fastjson.JSONObject; +import com.google.common.base.Joiner; +import com.google.common.base.Splitter; +import io.mycat.MycatServer; +import io.mycat.backend.datasource.PhysicalDBNode; +import io.mycat.backend.datasource.PhysicalDBPool; +import io.mycat.backend.datasource.PhysicalDatasource; +import io.mycat.config.loader.zkprocess.comm.ZkConfig; +import io.mycat.config.loader.zkprocess.comm.ZkParamCfg; +import io.mycat.config.loader.zkprocess.zookeeper.ClusterInfo; +import io.mycat.config.model.DBHostConfig; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.TableConfig; +import io.mycat.config.model.rule.RuleConfig; +import io.mycat.route.function.PartitionByCRC32PreSlot.Range; +import io.mycat.route.function.TableRuleAware; +import io.mycat.util.ZKUtils; +import org.apache.curator.framework.CuratorFramework; +import org.apache.curator.framework.api.transaction.CuratorTransactionFinal; +import org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent; +import org.apache.curator.framework.recipes.cache.PathChildrenCacheListener; +import org.apache.curator.framework.recipes.locks.InterProcessMutex; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.*; +import java.util.concurrent.TimeUnit; + + + +/** + * Created by magicdoom on 2016/12/19. + */ +public class SwitchCommitListener implements PathChildrenCacheListener { + private static final Logger LOGGER = LoggerFactory.getLogger(SwitchCommitListener.class); + @Override public void childEvent(CuratorFramework curatorFramework, + PathChildrenCacheEvent event) throws Exception { + switch (event.getType()) { + case CHILD_ADDED: + checkCommit(event); + break; + default: + break; + } + } + private void checkCommit(PathChildrenCacheEvent event) { + InterProcessMutex taskLock =null; + try { + + String path=event.getData().getPath(); + String taskPath=path.substring(0,path.lastIndexOf("/_commit/")) ; + String taskID=taskPath.substring(taskPath.lastIndexOf('/')+1,taskPath.length()); + String lockPath= ZKUtils.getZKBasePath()+"lock/"+taskID+".lock"; + List sucessDataHost= ZKUtils.getConnection().getChildren().forPath(path.substring(0,path.lastIndexOf('/'))); + String custerName = ZkConfig.getInstance().getValue(ZkParamCfg.ZK_CFG_CLUSTERID); + ClusterInfo clusterInfo= JSON.parseObject(ZKUtils.getConnection().getData().forPath("/mycat/"+custerName) , ClusterInfo.class); + List clusterNodeList= Splitter.on(',').omitEmptyStrings().splitToList(clusterInfo.getClusterNodes()); + if(sucessDataHost.size()==clusterNodeList.size()){ + + List taskDataHost= ZKUtils.getConnection().getChildren().forPath(taskPath); + List allTaskList=MigrateUtils.queryAllTask(taskPath,taskDataHost); + taskLock= new InterProcessMutex(ZKUtils.getConnection(), lockPath); + taskLock.acquire(120, TimeUnit.SECONDS); + TaskNode taskNode= JSON.parseObject(ZKUtils.getConnection().getData().forPath(taskPath),TaskNode.class); + if(taskNode.getStatus()==2){ + taskNode.setStatus(3); + //开始切换 且个节点已经禁止写入并且无原有写入在执行 + try { + + + CuratorTransactionFinal transactionFinal=null; + check(taskID,allTaskList); + SchemaConfig schemaConfig = MycatServer.getInstance().getConfig().getSchemas().get(taskNode.getSchema()); + TableConfig tableConfig = schemaConfig.getTables().get(taskNode.getTable().toUpperCase()); + List newDataNodes = Splitter.on(",").omitEmptyStrings().trimResults().splitToList(taskNode.getAdd()); + List allNewDataNodes = tableConfig.getDataNodes(); + allNewDataNodes.addAll(newDataNodes); + //先修改rule config + InterProcessMutex ruleLock = new InterProcessMutex(ZKUtils.getConnection(), ZKUtils.getZKBasePath()+"lock/rules.lock");; + try { + ruleLock.acquire(30, TimeUnit.SECONDS); + transactionFinal= modifyZkRules(transactionFinal,tableConfig.getRule().getFunctionName(),newDataNodes); + transactionFinal= modifyTableConfigRules(transactionFinal,taskNode.getSchema(),taskNode.getTable(),newDataNodes); + } + finally { + ruleLock.release(); + } + + transactionFinal= modifyRuleData(transactionFinal,allTaskList,tableConfig,allNewDataNodes); + transactionFinal.setData().forPath(taskPath,JSON.toJSONBytes(taskNode)); + + clean(taskID,allTaskList); + transactionFinal.commit() ; + + forceTableRuleToLocal(); + pushACKToClean(taskPath); + } catch (Exception e){ + //todo 异常to Zk + LOGGER.error("error:",e); + } + //todo 清理规则 顺利拉下ruledata保证一定更新到本地 + + }else if(taskNode.getStatus()==3){ + forceTableRuleToLocal(); + pushACKToClean(taskPath); + } + + } + + + + } catch (Exception e) { + LOGGER.error("error:",e); + } + finally { + if(taskLock!=null){ + try { + taskLock.release(); + } catch (Exception ignored) { + + } + } + } + } + + private void forceTableRuleToLocal(){} + + private void pushACKToClean(String taskPath) throws Exception { + String myID= ZkConfig.getInstance().getValue(ZkParamCfg.ZK_CFG_MYID); + String path=taskPath+"/_clean/"+myID; + if(ZKUtils.getConnection().checkExists().forPath(path)==null ){ + ZKUtils.getConnection().create().creatingParentsIfNeeded().forPath(path); + } + } + + + private void clean( String taskID,List allTaskList) throws IOException, SQLException { + + //clean + for (MigrateTask migrateTask : allTaskList) { + String sql=makeCleanSql(migrateTask); + MigrateUtils.execulteSql(sql,migrateTask.getFrom()); + } + } + + private void check(String taskID, List allTaskList) throws SQLException, IOException { + for (MigrateTask migrateTask : allTaskList) { + String sql= MigrateUtils.makeCountSql(migrateTask); + long oldCount=MigrateUtils.execulteCount(sql,migrateTask.getFrom()); + long newCount=MigrateUtils.execulteCount(sql,migrateTask.getTo()); + if(oldCount!=newCount){ + throw new RuntimeException("migrate task ("+taskID+") check fail,because fromNode:" + +migrateTask.getFrom()+"("+oldCount+")"+" and toNode:"+migrateTask.getTo()+"("+newCount+") and sql is "+sql); + } + } + } + + private String makeCleanSql(MigrateTask task){ + StringBuilder sb=new StringBuilder(); + sb.append("delete from "); + sb.append(task.getTable()).append(" where "); + List slots = task.getSlots(); + for (int i = 0; i < slots.size(); i++) { + Range range = slots.get(i); + if(i!=0) + sb.append(" or "); + if(range.start==range.end){ + sb.append(" _slot=").append(range.start); + } else { + sb.append("( _slot>=").append(range.start); + sb.append(" and _slot<=").append(range.end).append(")"); + } + } + return sb.toString(); + } + + + + private CuratorTransactionFinal modifyRuleData( CuratorTransactionFinal transactionFinal, List allTaskList, TableConfig tableConfig,List allNewDataNodes ) + throws Exception { + + InterProcessMutex ruleDataLock =null; + try { + String path= ZKUtils.getZKBasePath()+"lock/ruledata.lock"; + ruleDataLock= new InterProcessMutex(ZKUtils.getConnection(), path); + ruleDataLock.acquire(30, TimeUnit.SECONDS); + RuleConfig ruleConfig= tableConfig.getRule(); + String ruleName=((TableRuleAware)ruleConfig.getRuleAlgorithm()).getRuleName()+".properties"; + String rulePath=ZKUtils.getZKBasePath()+"ruledata/"+ruleName; + CuratorFramework zk = ZKUtils.getConnection(); + byte[] ruleData=zk.getData().forPath(rulePath); + Properties prop = new Properties(); + prop.load(new ByteArrayInputStream(ruleData)); + for (MigrateTask migrateTask : allTaskList) { + modifyRuleData(prop,migrateTask,allNewDataNodes); + } + ByteArrayOutputStream out=new ByteArrayOutputStream(); + prop.store(out, "WARNING !!!Please do not modify or delete this file!!!"); + if(transactionFinal==null){ + transactionFinal= ZKUtils.getConnection().inTransaction().setData().forPath(rulePath, out.toByteArray()).and(); + } else { + transactionFinal.setData().forPath(rulePath, out.toByteArray()); + } + }finally { + try { + if(ruleDataLock!=null) + ruleDataLock.release(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + return transactionFinal; + } + + private void modifyRuleData( Properties prop ,MigrateTask task ,List allNewDataNodes){ + int fromIndex=-1; + int toIndex=-1; + List dataNodes= allNewDataNodes; + for (int i = 0; i < dataNodes.size(); i++) { + String dataNode = dataNodes.get(i); + if(dataNode.equalsIgnoreCase(task.getFrom())){ + fromIndex=i; + } else + if(dataNode.equalsIgnoreCase(task.getTo())){ + toIndex=i; + } + } + String from= prop.getProperty(String.valueOf(fromIndex)) ; + String to= prop.getProperty(String.valueOf(toIndex)) ; + String fromRemain=removeRangesRemain(from,task.getSlots()); + String taskRanges = MigrateUtils.convertRangeListToString(task.getSlots()); + String newTo=to==null? taskRanges : to+","+taskRanges; + prop.setProperty(String.valueOf(fromIndex),fromRemain); + prop.setProperty(String.valueOf(toIndex),newTo); + } + + private String removeRangesRemain(String ori,List rangeList){ + List ranges=MigrateUtils.convertRangeStringToList(ori); + List ramain= MigrateUtils.removeAndGetRemain(ranges,rangeList); + return MigrateUtils.convertRangeListToString(ramain); + } + + + + private static CuratorTransactionFinal modifyZkRules( CuratorTransactionFinal transactionFinal,String ruleName ,List newDataNodes ) + throws Exception { + CuratorFramework client= ZKUtils.getConnection(); + String rulePath= ZKUtils.getZKBasePath() + "rules/function"; + JSONArray jsonArray= JSON.parseArray(new String(client.getData().forPath(rulePath) ,"UTF-8")) ; + for (Object obj: jsonArray) { + JSONObject func= (JSONObject) obj; + if(ruleName.equalsIgnoreCase(func.getString("name"))) { + JSONArray property= func.getJSONArray("property") ; + for (Object o : property) { + JSONObject count= (JSONObject) o; + if("count".equals(count.getString("name"))){ + Integer xcount=Integer.parseInt( count.getString("value")) ; + count.put("value",String.valueOf(xcount+newDataNodes.size())) ; + + if(transactionFinal==null){ + transactionFinal= ZKUtils.getConnection().inTransaction().setData().forPath(rulePath, JSON.toJSONBytes(jsonArray)).and(); + } else { + transactionFinal.setData().forPath(rulePath, JSON.toJSONBytes(jsonArray)); + } + } + } + } + + } + return transactionFinal; + } + + private static CuratorTransactionFinal modifyTableConfigRules( CuratorTransactionFinal transactionFinal,String schemal,String table ,List newDataNodes ) + throws Exception { + CuratorFramework client= ZKUtils.getConnection(); + String rulePath= ZKUtils.getZKBasePath() + "schema/schema"; + JSONArray jsonArray= JSON.parseArray(new String(client.getData().forPath(rulePath) ,"UTF-8")) ; + for (Object obj: jsonArray) { + JSONObject func= (JSONObject) obj; + if(schemal.equalsIgnoreCase(func.getString("name"))) { + + JSONArray property = func.getJSONArray("table"); + for (Object o : property) { + JSONObject tt= (JSONObject) o; + String tableName = tt.getString("name"); + String dataNode = tt.getString("dataNode"); + if (table.equalsIgnoreCase(tableName)) { + List allDataNodes = new ArrayList<>(); + allDataNodes.add(dataNode); + allDataNodes.addAll(newDataNodes); + tt.put("dataNode", Joiner.on(",").join(allDataNodes)); + if(transactionFinal==null){ + transactionFinal= ZKUtils.getConnection().inTransaction().setData().forPath(rulePath, JSON.toJSONBytes(jsonArray)).and(); + } else { + transactionFinal.setData().forPath(rulePath, JSON.toJSONBytes(jsonArray)); + } + } + + } + } + } + return transactionFinal; + } +} diff --git a/src/main/java/io/mycat/migrate/SwitchPrepareCheckRunner.java b/src/main/java/io/mycat/migrate/SwitchPrepareCheckRunner.java new file mode 100644 index 000000000..affa74e13 --- /dev/null +++ b/src/main/java/io/mycat/migrate/SwitchPrepareCheckRunner.java @@ -0,0 +1,160 @@ +package io.mycat.migrate; + +import com.alibaba.fastjson.JSON; +import com.google.common.collect.Sets; +import io.mycat.MycatServer; +import io.mycat.backend.BackendConnection; +import io.mycat.config.loader.zkprocess.comm.ZkConfig; +import io.mycat.config.loader.zkprocess.comm.ZkParamCfg; +import io.mycat.net.NIOProcessor; +import io.mycat.route.RouteCheckRule; +import io.mycat.route.RouteResultset; +import io.mycat.route.RouteResultsetNode; +import io.mycat.route.function.PartitionByCRC32PreSlot; +import io.mycat.util.ZKUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Collection; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +/** + * Created by nange on 2016/12/20. + */ +public class SwitchPrepareCheckRunner implements Runnable { + private static final Logger LOGGER = LoggerFactory.getLogger(SwitchPrepareListener.class); + public static Set allSwitchRunnerSet= Sets.newConcurrentHashSet(); + + private String taskID; + private String taskPath; + private TaskNode taskNode; + private List rangeList; + + public SwitchPrepareCheckRunner( String taskID, String taskPath, + TaskNode taskNode,List rangeList) { + this.taskID = taskID; + this.taskPath = taskPath; + this.taskNode = taskNode; + this.rangeList=rangeList; + } + + @Override public void run() { + if(!allSwitchRunnerSet.contains(taskID)){ + return; + } + ScheduledExecutorService scheduledExecutorService= MycatServer.getInstance().getScheduler(); + ConcurrentMap>> migrateRuleMap = RouteCheckRule.migrateRuleMap; + String schemal = taskNode.getSchema().toUpperCase(); + if(!migrateRuleMap.containsKey(schemal)||!migrateRuleMap.get( + schemal).containsKey(taskNode.getTable().toUpperCase())){ + scheduledExecutorService.schedule(this,3, TimeUnit.SECONDS); + return; + } + boolean isHasInTransation=false; + NIOProcessor[] processors=MycatServer.getInstance().getProcessors(); + for (NIOProcessor processor : processors) { + Collection backendConnections= processor.getBackends().values(); + for (BackendConnection backendConnection : backendConnections) { + isHasInTransation= checkIsInTransation(backendConnection); + if(isHasInTransation){ + scheduledExecutorService.schedule(this,3, TimeUnit.SECONDS); + return; + } + } + } + + for (BackendConnection backendConnection : NIOProcessor.backends_old) { + isHasInTransation= checkIsInTransation(backendConnection); + if(isHasInTransation){ + scheduledExecutorService.schedule(this,3, TimeUnit.SECONDS); + return; + } + } + + //增加判断binlog完成 + if(!isHasInTransation){ + try { + + //先判断后端binlog都完成了才算本任务完成 + boolean allIncrentmentSucess=true; + List dataHosts= ZKUtils.getConnection().getChildren().forPath(taskPath); + for (String dataHostName : dataHosts) { + if("_prepare".equals(dataHostName)||"_commit".equals(dataHostName)||"_clean".equals(dataHostName)) + continue; + List migrateTaskList= JSON + .parseArray(new String(ZKUtils.getConnection().getData().forPath(taskPath+"/"+dataHostName),"UTF-8") ,MigrateTask.class); + for (MigrateTask migrateTask : migrateTaskList) { + String zkPath =taskPath+"/"+dataHostName+ "/" + migrateTask.getFrom() + "-" + migrateTask.getTo(); + if (ZKUtils.getConnection().checkExists().forPath(zkPath) != null) { + TaskStatus taskStatus = JSON.parseObject( + new String(ZKUtils.getConnection().getData().forPath(zkPath), "UTF-8"), TaskStatus.class); + if (taskStatus.getStatus() != 3) { + allIncrentmentSucess=false; + break; + } + }else{ + allIncrentmentSucess=false; + break; + } + } + } + if(allIncrentmentSucess) { + //需要关闭binlog,不然后续的清楚老数据会删除数据 + BinlogStream stream= BinlogStreamHoder.binlogStreamMap.get(taskID); + if(stream!=null){ + BinlogStreamHoder.binlogStreamMap.remove(taskID); + stream.disconnect(); + } + + String myID = ZkConfig.getInstance().getValue(ZkParamCfg.ZK_CFG_MYID); + String path = taskPath + "/_commit/" + myID; + if (ZKUtils.getConnection().checkExists().forPath(path) == null) { + ZKUtils.getConnection().create().creatingParentsIfNeeded().forPath(path); + } + allSwitchRunnerSet.remove(taskID); + } else { + scheduledExecutorService.schedule(this,3, TimeUnit.SECONDS); + } + } catch (Exception e) { + LOGGER.error("error:",e); + } + } + + } + + + + + private boolean checkIsInTransation(BackendConnection backendConnection) { + if(!taskNode.getSchema().equalsIgnoreCase(backendConnection.getSchema())) + return false; + + Object attach= backendConnection.getAttachment(); + if(attach instanceof RouteResultsetNode) { + RouteResultsetNode resultsetNode= (RouteResultsetNode) attach; + RouteResultset rrs= resultsetNode.getSource(); + for (String table : rrs.getTables()) { + if(table.equalsIgnoreCase(taskNode.getTable())) { + int slot = resultsetNode.getSlot(); + if(slot <0&&resultsetNode.isUpdateSql()) + { + return true; + + } else if(resultsetNode.isUpdateSql()) { + for (PartitionByCRC32PreSlot.Range range : rangeList) { + if(slot>=range.start&&slot<=range.end){ + return true; + } + } + } + } + } + } + return false; + } + +} diff --git a/src/main/java/io/mycat/migrate/SwitchPrepareListener.java b/src/main/java/io/mycat/migrate/SwitchPrepareListener.java new file mode 100644 index 000000000..24010b4c7 --- /dev/null +++ b/src/main/java/io/mycat/migrate/SwitchPrepareListener.java @@ -0,0 +1,105 @@ +package io.mycat.migrate; + +import com.alibaba.fastjson.JSON; +import io.mycat.route.RouteCheckRule; +import io.mycat.route.function.PartitionByCRC32PreSlot; +import io.mycat.util.ZKUtils; +import org.apache.curator.framework.CuratorFramework; +import org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent; +import org.apache.curator.framework.recipes.cache.PathChildrenCacheListener; +import org.apache.curator.framework.recipes.locks.InterProcessMutex; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Date; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; + +/** + * Created by magicdoom on 2016/12/19. + */ +public class SwitchPrepareListener implements PathChildrenCacheListener { + private static final Logger LOGGER = LoggerFactory.getLogger(SwitchPrepareListener.class); + @Override public void childEvent(CuratorFramework curatorFramework, + PathChildrenCacheEvent event) throws Exception { + switch (event.getType()) { + case CHILD_ADDED: + checkSwitch(event); + break; + default: + break; + } + } + private void checkSwitch(PathChildrenCacheEvent event) { + InterProcessMutex taskLock =null; + try { + + String path=event.getData().getPath(); + String taskPath=path.substring(0,path.lastIndexOf("/_prepare/")) ; + String taskID=taskPath.substring(taskPath.lastIndexOf('/')+1,taskPath.length()); + String lockPath= ZKUtils.getZKBasePath()+"lock/"+taskID+".lock"; + List sucessDataHost= ZKUtils.getConnection().getChildren().forPath(path.substring(0,path.lastIndexOf('/'))); + List allTaskList=MigrateUtils.queryAllTask(taskPath,sucessDataHost); + TaskNode pTaskNode= JSON.parseObject(ZKUtils.getConnection().getData().forPath(taskPath),TaskNode.class); + + ConcurrentMap> tableRuleMap= + RouteCheckRule.migrateRuleMap.containsKey(pTaskNode.getSchema().toUpperCase()) ? + RouteCheckRule.migrateRuleMap.get(pTaskNode.getSchema().toUpperCase()) : + new ConcurrentHashMap(); + tableRuleMap.put(pTaskNode.getTable().toUpperCase(),MigrateUtils.convertAllTask(allTaskList)); + RouteCheckRule.migrateRuleMap.put(pTaskNode.getSchema().toUpperCase(),tableRuleMap); + + + taskLock= new InterProcessMutex(ZKUtils.getConnection(), lockPath); + taskLock.acquire(20, TimeUnit.SECONDS); + + List dataHost= ZKUtils.getConnection().getChildren().forPath(taskPath) ; + if(getRealSize(dataHost)==sucessDataHost.size()){ + TaskNode taskNode= JSON.parseObject(ZKUtils.getConnection().getData().forPath(taskPath),TaskNode.class); + if(taskNode.getStatus()==1){ + taskNode.setStatus(2); //prepare switch + LOGGER.info("task switch:",new Date()); + ZKUtils.getConnection().setData().forPath(taskPath,JSON.toJSONBytes(taskNode)) ; + } + } + } catch (Exception e) { + LOGGER.error("error:",e); + } + finally { + if(taskLock!=null){ + try { + taskLock.release(); + } catch (Exception ignored) { + + } + } + } + } + + private int getRealSize(List dataHosts){ + int size=dataHosts.size(); + Set set=new HashSet(dataHosts); + if(set.contains("_prepare")) { + size=size-1; + } + if(set.contains("_commit")) { + size=size-1; + } + if(set.contains("_clean")) { + size=size-1; + } + return size; + } + + + + + + + + +} diff --git a/src/main/java/io/mycat/migrate/TaskNode.java b/src/main/java/io/mycat/migrate/TaskNode.java new file mode 100644 index 000000000..db797696e --- /dev/null +++ b/src/main/java/io/mycat/migrate/TaskNode.java @@ -0,0 +1,54 @@ +package io.mycat.migrate; + +import java.io.Serializable; + +/** + * Created by magicdoom on 2016/9/28. + */ +public class TaskNode implements Serializable { + private String sql; + private int status ; //0=init 1=start 2=prepare switch 3=commit sucess 4=error 5=clean sucess 6=error process end + private String schema; + private String table; + private String add; + + public String getSql() { + return sql; + } + + public String getTable() { + return table; + } + + public void setTable(String table) { + this.table = table; + } + + public String getAdd() { + return add; + } + + public void setAdd(String add) { + this.add = add; + } + + public void setSql(String sql) { + this.sql = sql; + } + + public int getStatus() { + return status; + } + + public void setStatus(int status) { + this.status = status; + } + + public String getSchema() { + return schema; + } + + public void setSchema(String schema) { + this.schema = schema; + } +} diff --git a/src/main/java/io/mycat/migrate/TaskStatus.java b/src/main/java/io/mycat/migrate/TaskStatus.java new file mode 100644 index 000000000..be390bb36 --- /dev/null +++ b/src/main/java/io/mycat/migrate/TaskStatus.java @@ -0,0 +1,46 @@ +package io.mycat.migrate; + +import java.io.Serializable; + +/** + * Created by nange on 2016/12/7. + */ +public class TaskStatus implements Serializable { + private int status; //0= dump error 1=dump sucess 2=increnment error 3=increment sucess 4=other error + private String msg; + private String binlogFile; + private long pos; + private String lastDate; + + public int getStatus() { + return status; + } + + public void setStatus(int status) { + this.status = status; + } + + public String getMsg() { + return msg; + } + + public void setMsg(String msg) { + this.msg = msg; + } + + public String getBinlogFile() { + return binlogFile; + } + + public void setBinlogFile(String binlogFile) { + this.binlogFile = binlogFile; + } + + public long getPos() { + return pos; + } + + public void setPos(long pos) { + this.pos = pos; + } +} diff --git a/src/main/java/io/mycat/net/AIOAcceptor.java b/src/main/java/io/mycat/net/AIOAcceptor.java new file mode 100644 index 000000000..64c772d5e --- /dev/null +++ b/src/main/java/io/mycat/net/AIOAcceptor.java @@ -0,0 +1,164 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.StandardSocketOptions; +import java.nio.channels.AsynchronousChannelGroup; +import java.nio.channels.AsynchronousServerSocketChannel; +import java.nio.channels.AsynchronousSocketChannel; +import java.nio.channels.CompletionHandler; +import java.nio.channels.NetworkChannel; +import java.util.concurrent.atomic.AtomicLong; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.MycatServer; +import io.mycat.net.factory.FrontendConnectionFactory; + +/** + * @author mycat + */ +public final class AIOAcceptor implements SocketAcceptor, + CompletionHandler { + private static final Logger LOGGER = LoggerFactory.getLogger(AIOAcceptor.class); + private static final AcceptIdGenerator ID_GENERATOR = new AcceptIdGenerator(); + + private final int port; + private final AsynchronousServerSocketChannel serverChannel; + private final FrontendConnectionFactory factory; + + private long acceptCount; + private final String name; + + public AIOAcceptor(String name, String ip, int port, + FrontendConnectionFactory factory, AsynchronousChannelGroup group) + throws IOException { + this.name = name; + this.port = port; + this.factory = factory; + serverChannel = AsynchronousServerSocketChannel.open(group); + /** 设置TCP属性 */ + serverChannel.setOption(StandardSocketOptions.SO_REUSEADDR, true); + serverChannel.setOption(StandardSocketOptions.SO_RCVBUF, 1024 * 16 * 2); + // backlog=100 + serverChannel.bind(new InetSocketAddress(ip, port), 100); + } + + public String getName() { + return name; + } + + public void start() { + this.pendingAccept(); + } + + public int getPort() { + return port; + } + + public long getAcceptCount() { + return acceptCount; + } + + private void accept(NetworkChannel channel, Long id) { + try { + FrontendConnection c = factory.make(channel); + c.setAccepted(true); + c.setId(id); + NIOProcessor processor = MycatServer.getInstance().nextProcessor(); + c.setProcessor(processor); + c.register(); + } catch (Exception e) { + LOGGER.error("AioAcceptorError", e); + closeChannel(channel); + } + } + + private void pendingAccept() { + if (serverChannel.isOpen()) { + serverChannel.accept(ID_GENERATOR.getId(), this); + } else { + throw new IllegalStateException( + "MyCAT Server Channel has been closed"); + } + + } + + @Override + public void completed(AsynchronousSocketChannel result, Long id) { + accept(result, id); + // next pending waiting + pendingAccept(); + + } + + @Override + public void failed(Throwable exc, Long id) { + LOGGER.info("acception connect failed:" + exc); + // next pending waiting + pendingAccept(); + + } + + private static void closeChannel(NetworkChannel channel) { + if (channel == null) { + return; + } + try { + channel.close(); + } catch (IOException e) { + LOGGER.error("AioAcceptorError", e); + } + } + + /** + * 前端连接ID生成器 + * + * @author mycat + */ + private static class AcceptIdGenerator { + + private static final long MAX_VALUE = 0xffffffffL; + + private AtomicLong acceptId = new AtomicLong(); + private final Object lock = new Object(); + + private long getId() { + long newValue = acceptId.getAndIncrement(); + if (newValue >= MAX_VALUE) { + synchronized (lock) { + newValue = acceptId.getAndIncrement(); + if (newValue >= MAX_VALUE) { + acceptId.set(0); + } + } + return acceptId.getAndDecrement(); + } else { + return newValue; + } + } + } +} diff --git a/src/main/java/io/mycat/net/AIOConnector.java b/src/main/java/io/mycat/net/AIOConnector.java new file mode 100644 index 000000000..194601fdd --- /dev/null +++ b/src/main/java/io/mycat/net/AIOConnector.java @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net; + +import java.nio.channels.CompletionHandler; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.MycatServer; +import java.util.concurrent.atomic.AtomicLong; + +/** + * @author mycat + */ +public final class AIOConnector implements SocketConnector, + CompletionHandler { + private static final Logger LOGGER = LoggerFactory.getLogger(AIOConnector.class); + private static final ConnectIdGenerator ID_GENERATOR = new ConnectIdGenerator(); + + public AIOConnector() { + + } + + @Override + public void completed(Void result, BackendAIOConnection attachment) { + finishConnect(attachment); + } + + @Override + public void failed(Throwable exc, BackendAIOConnection conn) { + conn.onConnectFailed(exc); + } + + private void finishConnect(BackendAIOConnection c) { + try { + if (c.finishConnect()) { + c.setId(ID_GENERATOR.getId()); + NIOProcessor processor = MycatServer.getInstance() + .nextProcessor(); + c.setProcessor(processor); + c.register(); + } + } catch (Exception e) { + c.onConnectFailed(e); + LOGGER.info("connect err " , e); + c.close(e.toString()); + } + } + + /** + * 后端连接ID生成器 + * + * @author mycat + */ + private static class ConnectIdGenerator { + + private static final long MAX_VALUE = Long.MAX_VALUE; + + private AtomicLong connectId = new AtomicLong(0); + + private long getId() { + return connectId.incrementAndGet(); + } + } +} diff --git a/src/main/java/io/mycat/net/AIOSocketWR.java b/src/main/java/io/mycat/net/AIOSocketWR.java new file mode 100644 index 000000000..fcade53d9 --- /dev/null +++ b/src/main/java/io/mycat/net/AIOSocketWR.java @@ -0,0 +1,222 @@ +package io.mycat.net; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.AsynchronousSocketChannel; +import java.nio.channels.CompletionHandler; + +import java.util.concurrent.atomic.AtomicBoolean; + +import io.mycat.util.TimeUtil; + +public class AIOSocketWR extends SocketWR +{ + private static final AIOReadHandler aioReadHandler = new AIOReadHandler(); + private static final AIOWriteHandler aioWriteHandler = new AIOWriteHandler(); + private final AsynchronousSocketChannel channel; + protected final AbstractConnection con; + protected final AtomicBoolean writing = new AtomicBoolean(false); + + + public AIOSocketWR(AbstractConnection conn) + { + channel = (AsynchronousSocketChannel) conn.getChannel(); + this.con = conn; + } + + @Override + public void asynRead() + { + ByteBuffer theBuffer = con.readBuffer; + if (theBuffer == null) + { + theBuffer = con.processor.getBufferPool().allocate(con.processor.getBufferPool().getChunkSize()); + con.readBuffer = theBuffer; + channel.read(theBuffer, this, aioReadHandler); + + } else if (theBuffer.hasRemaining()) + { + channel.read(theBuffer, this, aioReadHandler); + } else + { + throw new java.lang.IllegalArgumentException("full buffer to read "); + } + + } + + private void asynWrite(final ByteBuffer buffer) + { + + buffer.flip(); + this.channel.write(buffer, this, aioWriteHandler); + + + } + +// public int flushChannel(final AsynchronousSocketChannel channel, +// final ByteBuffer bb, final long writeTimeout) +// { +// +// if (!bb.hasRemaining()) +// { +// return 0; +// } +// int nWrite = bb.limit(); +// try +// { +// while (bb.hasRemaining()) +// { +// channel.write(bb).get(writeTimeout, TimeUnit.SECONDS); +// } +// } catch (Exception ie) +// { +// con.close("write failed " + ie); +// +// } +// return nWrite; +// } + + + /** + * return true ,means no more data + * + * @return + */ + private boolean write0() + { + if (!writing.compareAndSet(false, true)) + { + return false; + } + ByteBuffer theBuffer = con.writeBuffer; + if (theBuffer == null || !theBuffer.hasRemaining()) + {// writeFinished,但要区分bufer是否NULL,不NULL,要回收 + if (theBuffer != null) + { + con.recycle(theBuffer); + con.writeBuffer = null; + + } + // poll again + ByteBuffer buffer = con.writeQueue.poll(); + // more data + if (buffer != null) + { + if (buffer.limit() == 0) + { + con.recycle(buffer); + con.writeBuffer = null; + con.close("quit cmd"); + writing.set(false); + return true; + } else + { + con.writeBuffer = buffer; + asynWrite(buffer); + return false; + } + } else + { + // no buffer + writing.set(false); + return true; + } + } else + { + theBuffer.compact(); + asynWrite(theBuffer); + return false; + } + + } + + protected void onWriteFinished(int result) + { + + con.netOutBytes += result; + con.processor.addNetOutBytes(result); + con.lastWriteTime = TimeUtil.currentTimeMillis(); + boolean noMoreData = this.write0(); + if (noMoreData) + { + this.doNextWriteCheck(); + } + + } + + public void doNextWriteCheck() + { + + boolean noMoreData = false; + noMoreData = this.write0(); + if (noMoreData + && !con.writeQueue.isEmpty()) + { + this.write0(); + } + + + } +} + +class AIOWriteHandler implements CompletionHandler { + + @Override + public void completed(final Integer result, final AIOSocketWR wr) { + try { + + wr.writing.set(false); + + if (result >= 0) { + wr.onWriteFinished(result); + } else { + wr.con.close("write erro " + result); + } + } catch (Exception e) { + AbstractConnection.LOGGER.warn("caught aio process err:", e); + } + + } + + @Override + public void failed(Throwable exc, AIOSocketWR wr) { + wr.writing.set(false); + wr.con.close("write failed " + exc); + } + +} + + +class AIOReadHandler implements CompletionHandler +{ + @Override + public void completed(final Integer i, final AIOSocketWR wr) + { + // con.getProcessor().getExecutor().execute(new Runnable() { + // public void run() { + if (i > 0) + { + try + { + wr.con.onReadData(i); + wr.con.asynRead(); + } catch (IOException e) + { + wr.con.close("handle err:" + e); + } + } else if (i == -1) + { + // System.out.println("read -1 xxxxxxxxx "+con); + wr.con.close("client closed"); + } + // } + // }); + } + + @Override + public void failed(Throwable exc, AIOSocketWR wr) + { + wr.con.close(exc.toString()); + + } +} diff --git a/src/main/java/io/mycat/net/AbstractConnection.java b/src/main/java/io/mycat/net/AbstractConnection.java new file mode 100644 index 000000000..8b4b5b358 --- /dev/null +++ b/src/main/java/io/mycat/net/AbstractConnection.java @@ -0,0 +1,616 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net; + +import java.io.IOException; +import java.net.Socket; +import java.nio.ByteBuffer; +import java.nio.channels.AsynchronousChannel; +import java.nio.channels.NetworkChannel; +import java.nio.channels.SocketChannel; +import java.util.List; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.atomic.AtomicBoolean; + +import com.google.common.base.Strings; + +import io.mycat.backend.mysql.CharsetUtil; +import io.mycat.util.CompressUtil; +import io.mycat.util.TimeUtil; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +/** + * @author mycat + */ +public abstract class AbstractConnection implements NIOConnection { + + protected static final Logger LOGGER = LoggerFactory.getLogger(AbstractConnection.class); + + protected String host; + protected int localPort; + protected int port; + protected long id; + protected volatile String charset; + protected volatile int charsetIndex; + + protected final NetworkChannel channel; + protected NIOProcessor processor; + protected NIOHandler handler; + + protected int packetHeaderSize; + protected int maxPacketSize; + protected volatile ByteBuffer readBuffer; + protected volatile ByteBuffer writeBuffer; + + protected final ConcurrentLinkedQueue writeQueue = new ConcurrentLinkedQueue(); + + protected volatile int readBufferOffset; + protected long lastLargeMessageTime; + protected final AtomicBoolean isClosed; + protected boolean isSocketClosed; + protected long startupTime; + protected long lastReadTime; + protected long lastWriteTime; + protected long netInBytes; + protected long netOutBytes; + protected int writeAttempts; + + protected volatile boolean isSupportCompress = false; + protected final ConcurrentLinkedQueue decompressUnfinishedDataQueue = new ConcurrentLinkedQueue(); + protected final ConcurrentLinkedQueue compressUnfinishedDataQueue = new ConcurrentLinkedQueue(); + + private long idleTimeout; + + private final SocketWR socketWR; + + public AbstractConnection(NetworkChannel channel) { + this.channel = channel; + boolean isAIO = (channel instanceof AsynchronousChannel); + if (isAIO) { + socketWR = new AIOSocketWR(this); + } else { + socketWR = new NIOSocketWR(this); + } + this.isClosed = new AtomicBoolean(false); + this.startupTime = TimeUtil.currentTimeMillis(); + this.lastReadTime = startupTime; + this.lastWriteTime = startupTime; + } + + public String getCharset() { + return charset; + } + + public boolean setCharset(String charset) { + + // 修复PHP字符集设置错误, 如: set names 'utf8' + if (charset != null) { + charset = charset.replace("'", ""); + } + + int ci = CharsetUtil.getIndex(charset); + if (ci > 0) { + this.charset = charset.equalsIgnoreCase("utf8mb4") ? "utf8" : charset; + this.charsetIndex = ci; + return true; + } else { + return false; + } + } + + public boolean isSupportCompress() { + return isSupportCompress; + } + + public void setSupportCompress(boolean isSupportCompress) { + this.isSupportCompress = isSupportCompress; + } + + public int getCharsetIndex() { + return charsetIndex; + } + + public long getIdleTimeout() { + return idleTimeout; + } + + public SocketWR getSocketWR() { + return socketWR; + } + + public void setIdleTimeout(long idleTimeout) { + this.idleTimeout = idleTimeout; + } + + public int getLocalPort() { + return localPort; + } + + public String getHost() { + return host; + } + + public void setHost(String host) { + this.host = host; + } + + public int getPort() { + return port; + } + + public void setPort(int port) { + this.port = port; + } + + public void setLocalPort(int localPort) { + this.localPort = localPort; + } + + public long getId() { + return id; + } + + public void setId(long id) { + this.id = id; + } + + public boolean isIdleTimeout() { + return TimeUtil.currentTimeMillis() > Math.max(lastWriteTime, lastReadTime) + idleTimeout; + } + + public NetworkChannel getChannel() { + return channel; + } + + public int getPacketHeaderSize() { + return packetHeaderSize; + } + + public void setPacketHeaderSize(int packetHeaderSize) { + this.packetHeaderSize = packetHeaderSize; + } + + public int getMaxPacketSize() { + return maxPacketSize; + } + + public void setMaxPacketSize(int maxPacketSize) { + this.maxPacketSize = maxPacketSize; + } + + public long getStartupTime() { + return startupTime; + } + + public long getLastReadTime() { + return lastReadTime; + } + + public void setProcessor(NIOProcessor processor) { + this.processor = processor; + int size = processor.getBufferPool().getChunkSize(); + this.readBuffer = processor.getBufferPool().allocate(size); + } + + public long getLastWriteTime() { + return lastWriteTime; + } + + public void setLastWriteTime(long lasttime){ + this.lastWriteTime = lasttime; + } + + public long getNetInBytes() { + return netInBytes; + } + + public long getNetOutBytes() { + return netOutBytes; + } + + public int getWriteAttempts() { + return writeAttempts; + } + + public NIOProcessor getProcessor() { + return processor; + } + + public ByteBuffer getReadBuffer() { + return readBuffer; + } + + public ByteBuffer allocate() { + int size = this.processor.getBufferPool().getChunkSize(); + ByteBuffer buffer = this.processor.getBufferPool().allocate(size); + return buffer; + } + + public final void recycle(ByteBuffer buffer) { + this.processor.getBufferPool().recycle(buffer); + } + + public void setHandler(NIOHandler handler) { + this.handler = handler; + } + + @Override + public void handle(byte[] data) { + if (isSupportCompress()) { + List packs = CompressUtil.decompressMysqlPacket(data, decompressUnfinishedDataQueue); + for (byte[] pack : packs) { + if (pack.length != 0) { + handler.handle(pack); + } + } + } else { + handler.handle(data); + } + } + + @Override + public void register() throws IOException { + + } + + public void asynRead() throws IOException { + this.socketWR.asynRead(); + } + + public void doNextWriteCheck() throws IOException { + this.socketWR.doNextWriteCheck(); + } + + /** + * 读取可能的Socket字节流 + */ + public void onReadData(int got) throws IOException { + + if (isClosed.get()) { + return; + } + + lastReadTime = TimeUtil.currentTimeMillis(); + if (got < 0) { + this.close("stream closed"); + return; + } else if (got == 0 + && !this.channel.isOpen()) { + this.close("socket closed"); + return; + } + netInBytes += got; + processor.addNetInBytes(got); + + // 循环处理字节信息 + int offset = readBufferOffset, length = 0, position = readBuffer.position(); + for (;;) { + length = getPacketLength(readBuffer, offset); + if (length == -1) { + if (offset != 0) { + this.readBuffer = compactReadBuffer(readBuffer, offset); + } else if (readBuffer != null && !readBuffer.hasRemaining()) { + throw new RuntimeException( "invalid readbuffer capacity ,too little buffer size " + + readBuffer.capacity()); + } + break; + } + + if (position >= offset + length && readBuffer != null) { + + // handle this package + readBuffer.position(offset); + byte[] data = new byte[length]; + readBuffer.get(data, 0, length); + handle(data); + + // maybe handle stmt_close + if(isClosed()) { + return ; + } + + // offset to next position + offset += length; + + // reached end + if (position == offset) { + // if cur buffer is temper none direct byte buffer and not + // received large message in recent 30 seconds + // then change to direct buffer for performance + if (readBuffer != null && !readBuffer.isDirect() + && lastLargeMessageTime < lastReadTime - 30 * 1000L) { // used temp heap + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("change to direct con read buffer ,cur temp buf size :" + readBuffer.capacity()); + } + recycle(readBuffer); + readBuffer = processor.getBufferPool().allocate(processor.getBufferPool().getConReadBuferChunk()); + } else { + if (readBuffer != null) { + readBuffer.clear(); + } + } + // no more data ,break + readBufferOffset = 0; + break; + } else { + // try next package parse + readBufferOffset = offset; + if(readBuffer != null) { + readBuffer.position(position); + } + continue; + } + + + + } else { + // not read whole message package ,so check if buffer enough and + // compact readbuffer + if (!readBuffer.hasRemaining()) { + readBuffer = ensureFreeSpaceOfReadBuffer(readBuffer, offset, length); + } + break; + } + } + } + + private boolean isConReadBuffer(ByteBuffer buffer) { + return buffer.capacity() == processor.getBufferPool().getConReadBuferChunk() && buffer.isDirect(); + } + + private ByteBuffer ensureFreeSpaceOfReadBuffer(ByteBuffer buffer, + int offset, final int pkgLength) { + // need a large buffer to hold the package + if (pkgLength > maxPacketSize) { + throw new IllegalArgumentException("Packet size over the limit."); + } else if (buffer.capacity() < pkgLength) { + + ByteBuffer newBuffer = processor.getBufferPool().allocate(pkgLength); + lastLargeMessageTime = TimeUtil.currentTimeMillis(); + buffer.position(offset); + newBuffer.put(buffer); + readBuffer = newBuffer; + + recycle(buffer); + readBufferOffset = 0; + return newBuffer; + + } else { + if (offset != 0) { + // compact bytebuffer only + return compactReadBuffer(buffer, offset); + } else { + throw new RuntimeException(" not enough space"); + } + } + } + + private ByteBuffer compactReadBuffer(ByteBuffer buffer, int offset) { + if(buffer == null) { + return null; + } + buffer.limit(buffer.position()); + buffer.position(offset); + buffer = buffer.compact(); + readBufferOffset = 0; + return buffer; + } + + public void write(byte[] data) { + ByteBuffer buffer = allocate(); + buffer = writeToBuffer(data, buffer); + write(buffer); + + } + + private final void writeNotSend(ByteBuffer buffer) { + if (isSupportCompress()) { + ByteBuffer newBuffer = CompressUtil.compressMysqlPacket(buffer, this, compressUnfinishedDataQueue); + writeQueue.offer(newBuffer); + + } else { + writeQueue.offer(buffer); + } + } + + + @Override + public final void write(ByteBuffer buffer) { + + if (isSupportCompress()) { + ByteBuffer newBuffer = CompressUtil.compressMysqlPacket(buffer, this, compressUnfinishedDataQueue); + writeQueue.offer(newBuffer); + } else { + writeQueue.offer(buffer); + } + + // if ansyn write finishe event got lock before me ,then writing + // flag is set false but not start a write request + // so we check again + try { + this.socketWR.doNextWriteCheck(); + } catch (Exception e) { + LOGGER.warn("write err:", e); + this.close("write err:" + e); + } + } + + + public ByteBuffer checkWriteBuffer(ByteBuffer buffer, int capacity, boolean writeSocketIfFull) { + if (capacity > buffer.remaining()) { + if (writeSocketIfFull) { + writeNotSend(buffer); + return processor.getBufferPool().allocate(capacity); + } else {// Relocate a larger buffer + buffer.flip(); + ByteBuffer newBuf = processor.getBufferPool().allocate(capacity + buffer.limit() + 1); + newBuf.put(buffer); + this.recycle(buffer); + return newBuf; + } + } else { + return buffer; + } + } + + public ByteBuffer writeToBuffer(byte[] src, ByteBuffer buffer) { + int offset = 0; + int length = src.length; + int remaining = buffer.remaining(); + while (length > 0) { + if (remaining >= length) { + buffer.put(src, offset, length); + break; + } else { + buffer.put(src, offset, remaining); + writeNotSend(buffer); + buffer = allocate(); + offset += remaining; + length -= remaining; + remaining = buffer.remaining(); + continue; + } + } + return buffer; + } + + @Override + public void close(String reason) { + if (!isClosed.get()) { + closeSocket(); + isClosed.set(true); + if (processor != null) { + processor.removeConnection(this); + } + this.cleanup(); + isSupportCompress = false; + + // ignore null information + if (Strings.isNullOrEmpty(reason)) { + return; + } + LOGGER.info("close connection,reason:" + reason + " ," + this); + if (reason.contains("connection,reason:java.net.ConnectException")) { + throw new RuntimeException(" errr"); + } + } else { + // make sure cleanup again + // Fix issue#1616 + this.cleanup(); + } + } + + public boolean isClosed() { + return isClosed.get(); + } + + public void idleCheck() { + if (isIdleTimeout()) { + LOGGER.info(toString() + " idle timeout"); + close(" idle "); + } + } + + /** + * 清理资源 + */ + protected void cleanup() { + + // 清理资源占用 + if (readBuffer != null) { + this.recycle(readBuffer); + this.readBuffer = null; + this.readBufferOffset = 0; + } + + if (writeBuffer != null) { + recycle(writeBuffer); + this.writeBuffer = null; + } + + if (!decompressUnfinishedDataQueue.isEmpty()) { + decompressUnfinishedDataQueue.clear(); + } + + if (!compressUnfinishedDataQueue.isEmpty()) { + compressUnfinishedDataQueue.clear(); + } + + ByteBuffer buffer = null; + while ((buffer = writeQueue.poll()) != null) { + recycle(buffer); + } + } + + protected int getPacketLength(ByteBuffer buffer, int offset) { + int headerSize = getPacketHeaderSize(); + if ( isSupportCompress() ) { + headerSize = 7; + } + + if (buffer.position() < offset + headerSize) { + return -1; + } else { + int length = buffer.get(offset) & 0xff; + length |= (buffer.get(++offset) & 0xff) << 8; + length |= (buffer.get(++offset) & 0xff) << 16; + return length + headerSize; + } + } + + public ConcurrentLinkedQueue getWriteQueue() { + return writeQueue; + } + + private void closeSocket() { + if (channel != null) { + if (channel instanceof SocketChannel) { + Socket socket = ((SocketChannel) channel).socket(); + if (socket != null) { + try { + socket.close(); + } catch (IOException e) { + LOGGER.error("closeChannelError", e); + } + } + } + + boolean isSocketClosed = true; + try { + channel.close(); + } catch (Exception e) { + LOGGER.error("AbstractConnectionCloseError", e); + } + + boolean closed = isSocketClosed && (!channel.isOpen()); + if (closed == false) { + LOGGER.warn("close socket of connnection failed " + this); + } + } + } + public void onConnectfinish() { + LOGGER.debug("连接后台真正完成"); + } +} diff --git a/src/main/java/io/mycat/net/BIOConnection.java b/src/main/java/io/mycat/net/BIOConnection.java new file mode 100644 index 000000000..7ab798a91 --- /dev/null +++ b/src/main/java/io/mycat/net/BIOConnection.java @@ -0,0 +1,5 @@ +package io.mycat.net; + +public interface BIOConnection extends ClosableConnection{ + +} diff --git a/src/main/java/io/mycat/server/packet/BinaryPacket.java b/src/main/java/io/mycat/net/BackendAIOConnection.java similarity index 50% rename from src/main/java/io/mycat/server/packet/BinaryPacket.java rename to src/main/java/io/mycat/net/BackendAIOConnection.java index 5dacfec23..c968d01f8 100644 --- a/src/main/java/io/mycat/server/packet/BinaryPacket.java +++ b/src/main/java/io/mycat/net/BackendAIOConnection.java @@ -21,57 +21,64 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.packet; - -import io.mycat.net.BufferArray; -import io.mycat.server.packet.util.BufferUtil; -import io.mycat.server.packet.util.StreamUtil; +package io.mycat.net; import java.io.IOException; -import java.io.InputStream; -import java.nio.ByteBuffer; +import java.net.InetSocketAddress; +import java.nio.channels.NetworkChannel; + +import io.mycat.backend.BackendConnection; /** * @author mycat */ -public class BinaryPacket extends MySQLPacket { - public static final byte OK = 1; - public static final byte ERROR = 2; - public static final byte HEADER = 3; - public static final byte FIELD = 4; - public static final byte FIELD_EOF = 5; - public static final byte ROW = 6; - public static final byte PACKET_EOF = 7; +public abstract class BackendAIOConnection extends AbstractConnection implements + BackendConnection { - public byte[] data; + + + protected boolean isFinishConnect; - public void read(InputStream in) throws IOException { - packetLength = StreamUtil.readUB3(in); - packetId = StreamUtil.read(in); - byte[] ab = new byte[packetLength]; - StreamUtil.read(in, ab, 0, ab.length); - data = ab; + public BackendAIOConnection(NetworkChannel channel) { + super(channel); } - @Override - public void write(BufferArray bufferArray) { - int size = calcPacketSize(); - ByteBuffer buffer = bufferArray.checkWriteBuffer(packetHeaderSize - + size); - BufferUtil.writeUB3(buffer, size); - buffer.put(packetId); - bufferArray.write(data); + public void register() throws IOException { + this.asynRead(); + } + + public void setHost(String host) { + this.host = host; } - @Override - public int calcPacketSize() { - return data == null ? 0 : data.length; + + public void setPort(int port) { + this.port = port; } - @Override - protected String getPacketInfo() { - return "MySQL Binary Packet"; + + + + public void discardClose(String reason){ + //跨节点处理,中断后端连接时关闭 + } + public abstract void onConnectFailed(Throwable e); + + public boolean finishConnect() throws IOException { + localPort = ((InetSocketAddress) channel.getLocalAddress()).getPort(); + isFinishConnect = true; + return true; } + public void setProcessor(NIOProcessor processor) { + super.setProcessor(processor); + processor.addBackend(this); + } + + @Override + public String toString() { + return "BackendConnection [id=" + id + ", host=" + host + ", port=" + + port + ", localPort=" + localPort + "]"; + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/net/BufferPool.java b/src/main/java/io/mycat/net/BufferPool.java deleted file mode 100644 index 9b765c51c..000000000 --- a/src/main/java/io/mycat/net/BufferPool.java +++ /dev/null @@ -1,191 +0,0 @@ -package io.mycat.net; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.concurrent.ConcurrentLinkedQueue; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * @author wuzh - */ -public final class BufferPool { - // this value not changed ,isLocalCacheThread use it - public static final String LOCAL_BUF_THREAD_PREX = "$_"; - private final ThreadLocalBufferPool localBufferPool; - private static final Logger LOGGER = LoggerFactory - .getLogger(BufferPool.class); - private final int chunkSize; - private final int conReadBuferChunk; - private final ConcurrentLinkedQueue items = new ConcurrentLinkedQueue(); - /** - * 只用于Connection读取Socket事件,每个Connection一个ByteBuffer(Direct), - * 此ByteBufer通常应该能容纳2-N个 应用消息的报文长度, - * 对于超出的报文长度,则由BufferPool单独份分配临时的堆内ByteBuffer - */ - private final ConcurrentLinkedQueue conReadBuferQueue = new ConcurrentLinkedQueue(); - private long sharedOptsCount; - private int newCreated; - private final long threadLocalCount; - private final long capactiy; - - public BufferPool(long bufferSize, int chunkSize, int conReadBuferChunk, - int threadLocalPercent) { - this.chunkSize = chunkSize; - this.conReadBuferChunk = conReadBuferChunk; - long size = bufferSize / chunkSize; - size = (bufferSize % chunkSize == 0) ? size : size + 1; - this.capactiy = size; - threadLocalCount = threadLocalPercent * capactiy / 100; - for (int i = 0; i < capactiy; i++) { - items.offer(createDirectBuffer(chunkSize)); - } - localBufferPool = new ThreadLocalBufferPool(threadLocalCount); - } - - private static final boolean isLocalCacheThread() { - final String thname = Thread.currentThread().getName(); - return (thname.length() < LOCAL_BUF_THREAD_PREX.length()) ? false - : (thname.charAt(0) == '$' && thname.charAt(1) == '_'); - - } - - public int getConReadBuferChunk() { - return conReadBuferChunk; - } - - public int getChunkSize() { - return chunkSize; - } - - public long getSharedOptsCount() { - return sharedOptsCount; - } - - public long size() { - return this.items.size(); - } - - public long capacity() { - return capactiy + newCreated; - } - - public ByteBuffer allocateConReadBuffer() { - ByteBuffer result = conReadBuferQueue.poll(); - if (result != null) { - return result; - } else { - return createDirectBuffer(conReadBuferChunk); - } - - } - - public BufferArray allocateArray() { - return new BufferArray(this); - } - - public ByteBuffer allocate() { - ByteBuffer node = null; - if (isLocalCacheThread()) { - // allocate from threadlocal - node = localBufferPool.get().poll(); - if (node != null) { - return node; - } - } - node = items.poll(); - if (node == null) { - newCreated++; - node = this.createDirectBuffer(chunkSize); - } - return node; - } - - private boolean checkValidBuffer(ByteBuffer buffer) { - // 拒绝回收null和容量大于chunkSize的缓存 - if (buffer == null || !buffer.isDirect()) { - return false; - } else if (buffer.capacity() != chunkSize) { - LOGGER.warn("cant' recycle a buffer not equals my pool chunksize " - + chunkSize + " he is " + buffer.capacity()); - throw new RuntimeException("bad size"); - - // return false; - } - buffer.clear(); - return true; - } - - public void recycleConReadBuffer(ByteBuffer buffer) { - if (buffer == null || !buffer.isDirect()) { - return; - } else if (buffer.capacity() != conReadBuferChunk) { - LOGGER.warn("cant' recycle a buffer not equals my pool con read chunksize " - + buffer.capacity()); - } else { - buffer.clear(); - this.conReadBuferQueue.add(buffer); - } - } - - public void recycle(ByteBuffer buffer) { - if (!checkValidBuffer(buffer)) { - return; - } - if (isLocalCacheThread()) { - BufferQueue localQueue = localBufferPool.get(); - if (localQueue.snapshotSize() < threadLocalCount) { - localQueue.put(buffer); - } else { - // recyle 3/4 thread local buffer - items.addAll(localQueue.removeItems(threadLocalCount * 3 / 4)); - items.offer(buffer); - sharedOptsCount++; - } - } else { - sharedOptsCount++; - items.offer(buffer); - } - - } - - public boolean testIfDuplicate(ByteBuffer buffer) { - for (ByteBuffer exists : items) { - if (exists == buffer) { - return true; - } - } - return false; - - } - - private ByteBuffer createDirectBuffer(int size) { - // for performance - return ByteBuffer.allocateDirect(size); - } - - public ByteBuffer allocate(int size) { - if (size <= this.chunkSize) { - return allocate(); - } else { - LOGGER.warn("allocate buffer size large than default chunksize:" - + this.chunkSize + " he want " + size); - throw new RuntimeException("execuddd"); - // return createTempBuffer(size); - } - } - - public static void main(String[] args) { - BufferPool pool = new BufferPool(1024 * 5, 1024, 1024 * 3, 2); - long i = pool.capacity(); - ArrayList all = new ArrayList(); - for (int j = 0; j <= i; j++) { - all.add(pool.allocate()); - } - for (ByteBuffer buf : all) { - pool.recycle(buf); - } - System.out.println(pool.size()); - } -} diff --git a/src/main/java/io/mycat/net/BufferQueue.java b/src/main/java/io/mycat/net/BufferQueue.java deleted file mode 100644 index f7ac17a02..000000000 --- a/src/main/java/io/mycat/net/BufferQueue.java +++ /dev/null @@ -1,67 +0,0 @@ -package io.mycat.net; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Iterator; -import java.util.LinkedList; - -/** - * @author wuzh - */ -public final class BufferQueue { - private final long total; - private final LinkedList items = new LinkedList(); - - public BufferQueue(long capacity) { - this.total = capacity; - } - - /** - * used for statics - * - * @return - */ - public long snapshotSize() { - return this.items.size(); - } - - public Collection removeItems(long count) { - - ArrayList removed = new ArrayList(); - Iterator itor = items.iterator(); - while (itor.hasNext()) { - removed.add(itor.next()); - itor.remove(); - if (removed.size() >= count) { - break; - } - } - return removed; - } - - /** - * - * @param buffer - * @throws InterruptedException - */ - public void put(ByteBuffer buffer) { - this.items.offer(buffer); - if (items.size() > total) { - throw new java.lang.RuntimeException( - "bufferQueue size exceeded ,maybe sql returned too many records ,cursize:" - + items.size()); - - } - } - - public ByteBuffer poll() { - ByteBuffer buf = items.poll(); - return buf; - } - - public boolean isEmpty() { - return items.isEmpty(); - } - -} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/ConnectIdGenerator.java b/src/main/java/io/mycat/net/ConnectIdGenerator.java deleted file mode 100644 index bee4f8bd0..000000000 --- a/src/main/java/io/mycat/net/ConnectIdGenerator.java +++ /dev/null @@ -1,27 +0,0 @@ -package io.mycat.net; - -/** - * 连接ID生成器 - * - * @author mycat - */ -public class ConnectIdGenerator { - - private static final long MAX_VALUE = Long.MAX_VALUE; - private static ConnectIdGenerator instance=new ConnectIdGenerator(); - public static ConnectIdGenerator getINSTNCE() - { - return instance; - } - private long connectId = 0L; - private final Object lock = new Object(); - - public long getId() { - synchronized (lock) { - if (connectId >= MAX_VALUE) { - connectId = 0L; - } - return ++connectId; - } - } -} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/Connection.java b/src/main/java/io/mycat/net/Connection.java deleted file mode 100644 index 7c66ce4b4..000000000 --- a/src/main/java/io/mycat/net/Connection.java +++ /dev/null @@ -1,716 +0,0 @@ -package io.mycat.net; - -import io.mycat.util.TimeUtil; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.SelectionKey; -import java.nio.channels.Selector; -import java.nio.channels.SocketChannel; -import java.util.List; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.ReentrantLock; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * @author wuzh - */ -public abstract class Connection implements ClosableConnection{ - public static Logger LOGGER = LoggerFactory.getLogger(Connection.class); - protected String host; - protected int port; - protected int localPort; - protected long id; - - public enum State { - connecting, connected, closing, closed, failed - } - - private State state = State.connecting; - - // 连接的方向,in表示是客户端连接过来的,out表示自己作为客户端去连接对端Sever - public enum Direction { - in, out - } - - private Direction direction = Direction.in; - - protected final SocketChannel channel; - - private SelectionKey processKey; - private static final int OP_NOT_READ = ~SelectionKey.OP_READ; - private static final int OP_NOT_WRITE = ~SelectionKey.OP_WRITE; - private ByteBuffer readBuffer; - private ByteBuffer writeBuffer; - private final ConcurrentLinkedQueue writeQueue = new ConcurrentLinkedQueue(); - private final ReentrantLock writeQueueLock = new ReentrantLock(); - private int readBufferOffset; - private long lastLargeMessageTime; - protected boolean isClosed; - protected boolean isSocketClosed; - protected long startupTime; - protected long lastReadTime; - protected long lastWriteTime; - protected int netInBytes; - protected int netOutBytes; - protected int pkgTotalSize; - protected int pkgTotalCount; - private long idleTimeout; - private long lastPerfCollectTime; - @SuppressWarnings("rawtypes") - protected NIOHandler handler; - private int maxPacketSize; - private int packetHeaderSize; - - public Connection(SocketChannel channel) { - this.channel = channel; - this.isClosed = false; - this.startupTime = TimeUtil.currentTimeMillis(); - this.lastReadTime = startupTime; - this.lastWriteTime = startupTime; - this.lastPerfCollectTime = startupTime; - } - - public void resetPerfCollectTime() { - netInBytes = 0; - netOutBytes = 0; - pkgTotalCount = 0; - pkgTotalSize = 0; - lastPerfCollectTime = TimeUtil.currentTimeMillis(); - } - - public long getLastPerfCollectTime() { - return lastPerfCollectTime; - } - - public long getIdleTimeout() { - return idleTimeout; - } - - public void setIdleTimeout(long idleTimeout) { - this.idleTimeout = idleTimeout; - } - - public String getHost() { - return host; - } - - public void setHost(String host) { - this.host = host; - } - - public int getPort() { - return port; - } - - public void setPort(int port) { - this.port = port; - } - - public long getId() { - return id; - } - - public int getLocalPort() { - return localPort; - } - - public void setLocalPort(int localPort) { - this.localPort = localPort; - } - - public void setId(long id) { - this.id = id; - } - - public boolean isIdleTimeout() { - return TimeUtil.currentTimeMillis() > Math.max(lastWriteTime, - lastReadTime) + idleTimeout; - - } - - public SocketChannel getChannel() { - return channel; - } - - public long getStartupTime() { - return startupTime; - } - - public long getLastReadTime() { - return lastReadTime; - } - - public long getLastWriteTime() { - return lastWriteTime; - } - - public long getNetInBytes() { - return netInBytes; - } - - public long getNetOutBytes() { - return netOutBytes; - } - - public ByteBuffer getReadBuffer() { - return readBuffer; - } - - private ByteBuffer allocate() { - ByteBuffer buffer = NetSystem.getInstance().getBufferPool().allocate(); - return buffer; - } - - private final void recycle(ByteBuffer buffer) { - NetSystem.getInstance().getBufferPool().recycle(buffer); - } - - public void setHandler(NIOHandler handler) { - this.handler = handler; - - } - - @SuppressWarnings("rawtypes") - public NIOHandler getHandler() { - return this.handler; - } - - @SuppressWarnings("unchecked") - public void handle(final ByteBuffer data, final int start, - final int readedLength) { - handler.handle(this, data, start, readedLength); - } - - /** - * 读取可能的Socket字节流 - * - * @param got - * @throws IOException - */ - public void onReadData(int got) throws IOException { - if (isClosed) { - return; - } - lastReadTime = TimeUtil.currentTimeMillis(); - if (got < 0) { - this.close("stream closed"); - return; - } else if (got == 0) { - if (!this.channel.isOpen()) { - this.close("socket closed"); - return; - } - } - netInBytes += got; - // System.out.println("readed new size "+got); - NetSystem.getInstance().addNetInBytes(got); - - // 循环处理字节信息 - int offset = readBufferOffset, length = 0, position = readBuffer.position(); - while(readBuffer != null && !isClosed) { - length = getPacketLength(readBuffer, offset, position); - // LOGGER.info("message lenth "+length+" offset "+offset+" positon "+position+" capactiy "+readBuffer.capacity()); - // System.out.println("message lenth "+length+" offset "+offset+" positon "+position); - if (length == -1) { - if (offset != 0) { - this.readBuffer = compactReadBuffer(readBuffer, offset); - } else if (readBuffer != null && !readBuffer.hasRemaining()) { - throw new RuntimeException( - "invalid readbuffer capacity ,too little buffer size " - + readBuffer.capacity()); - } - break; - } - pkgTotalCount++; - pkgTotalSize += length; - // check if a complete message packge received - if (offset + length <= position && readBuffer != null) { - // handle this package - readBuffer.position(offset); - handle(readBuffer, offset, length); - - // maybe handle stmt_close - if(isClosed()) { - return ; - } - - // offset to next position - offset += length; - // reached end - if (position == offset) { - // if cur buffer is temper none direct byte buffer and not - // received large message in recent 30 seconds - // then change to direct buffer for performance - if (readBuffer != null && !readBuffer.isDirect() - && lastLargeMessageTime < lastReadTime - 30 * 1000L) {// used - // temp - // heap - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("change to direct con read buffer ,cur temp buf size :" - + readBuffer.capacity()); - } - recycle(readBuffer); - readBuffer = NetSystem.getInstance().getBufferPool() - .allocateConReadBuffer(); - } else { - if (readBuffer != null) - readBuffer.clear(); - } - // no more data ,break - readBufferOffset = 0; - break; - } else { - // try next package parse - readBufferOffset = offset; - if(readBuffer != null) - readBuffer.position(position); - continue; - } - } else { - // not read whole message package ,so check if buffer enough and - // compact readbuffer - if (!readBuffer.hasRemaining()) { - readBuffer = ensureFreeSpaceOfReadBuffer(readBuffer, - offset, length); - } - break; - } - } - } - - public boolean isConnected() { - return (this.state == Connection.State.connected); - } - - private boolean isConReadBuffer(ByteBuffer buffer) { - return buffer.capacity() == NetSystem.getInstance().getBufferPool() - .getConReadBuferChunk() - && buffer.isDirect(); - } - - private ByteBuffer ensureFreeSpaceOfReadBuffer(ByteBuffer buffer, - int offset, final int pkgLength) { - // need a large buffer to hold the package - if (pkgLength > maxPacketSize) { - throw new IllegalArgumentException("Packet size over the limit."); - } else if (buffer.capacity() < pkgLength) { - ByteBuffer newBuffer = NetSystem.getInstance().getBufferPool() - .allocate(pkgLength); - lastLargeMessageTime = TimeUtil.currentTimeMillis(); - buffer.position(offset); - newBuffer.put(buffer); - readBuffer = newBuffer; - if (isConReadBuffer(buffer)) { - NetSystem.getInstance().getBufferPool() - .recycleConReadBuffer(buffer); - } else { - recycle(buffer); - } - readBufferOffset = 0; - return newBuffer; - - } else { - if (offset != 0) { - // compact bytebuffer only - return compactReadBuffer(buffer, offset); - } else { - throw new RuntimeException(" not enough space"); - } - } - } - - private ByteBuffer compactReadBuffer(ByteBuffer buffer, int offset) { - if(buffer == null) return null; - buffer.limit(buffer.position()); - buffer.position(offset); - buffer = buffer.compact(); - readBufferOffset = 0; - return buffer; - } - - public void write(byte[] src) { - try { - writeQueueLock.lock(); - ByteBuffer buffer = this.allocate(); - int offset = 0; - int remains = src.length; - while (remains > 0) { - int writeable = buffer.remaining(); - if (writeable >= remains) { - // can write whole srce - buffer.put(src, offset, remains); - this.writeQueue.offer(buffer); - break; - } else { - // can write partly - buffer.put(src, offset, writeable); - offset += writeable; - remains -= writeable; - writeQueue.offer(buffer); - buffer = allocate(); - continue; - } - - } - } finally { - writeQueueLock.unlock(); - } - this.enableWrite(true); - } - - /** - * note only use this method when the input buffer is shared - * - * @param buffer - * @param from - * @param lenth - */ - public final void write(ByteBuffer buffer, int from, int lenth) { - try { - writeQueueLock.lock(); - buffer.position(from); - int remainByts = lenth; - while (remainByts > 0) { - ByteBuffer newBuf = allocate(); - int batchSize = newBuf.capacity(); - for (int i = 0; i < batchSize & remainByts > 0; i++) { - newBuf.put(buffer.get()); - remainByts--; - } - writeQueue.offer(newBuf); - } - } finally { - writeQueueLock.unlock(); - } - this.enableWrite(true); - - } - - public final void write(ByteBuffer buffer) { - try { - writeQueueLock.lock(); - writeQueue.offer(buffer); - } finally { - writeQueueLock.unlock(); - } - this.enableWrite(true); - } - - @SuppressWarnings("unchecked") - public void close(String reason) { - if (!isClosed) { - closeSocket(); - this.cleanup(); - isClosed = true; - NetSystem.getInstance().removeConnection(this); - LOGGER.info("close connection,reason:" + reason + " ," + this); - if (handler != null) { - handler.onClosed(this, reason); - } - } - } - - /** - * asyn close (executed later in thread) - * 该函数使用多线程异步关闭 Connection,会存在并发安全问题,暂时注释 - * @param reason - */ -// public void asynClose(final String reason) { -// Runnable runn = new Runnable() { -// public void run() { -// Connection.this.close(reason); -// } -// }; -// NetSystem.getInstance().getTimer().schedule(runn, 1, TimeUnit.SECONDS); -// -// } - - public boolean isClosed() { - return isClosed; - } - - public void idleCheck() { - if (isIdleTimeout()) { - LOGGER.info(toString() + " idle timeout"); - close(" idle "); - } - } - - /** - * 清理资源 - */ - - protected void cleanup() { - - // 清理资源占用 - if (readBuffer != null) { - if (isConReadBuffer(readBuffer)) { - NetSystem.getInstance().getBufferPool() - .recycleConReadBuffer(readBuffer); - - } else { - this.recycle(readBuffer); - } - this.readBuffer = null; - this.readBufferOffset = 0; - } - if (writeBuffer != null) { - recycle(writeBuffer); - this.writeBuffer = null; - } - - ByteBuffer buffer = null; - while ((buffer = writeQueue.poll()) != null) { - recycle(buffer); - } - } - - protected final int getPacketLength(ByteBuffer buffer, int offset, - int position) { - if (position < offset + packetHeaderSize) { - return -1; - } else { - int length = buffer.get(offset) & 0xff; - length |= (buffer.get(++offset) & 0xff) << 8; - length |= (buffer.get(++offset) & 0xff) << 16; - return length + packetHeaderSize; - } - } - - public ConcurrentLinkedQueue getWriteQueue() { - return writeQueue; - } - - @SuppressWarnings("unchecked") - public void register(Selector selector) throws IOException { - processKey = channel.register(selector, SelectionKey.OP_READ, this); - NetSystem.getInstance().addConnection(this); - readBuffer = NetSystem.getInstance().getBufferPool() - .allocateConReadBuffer(); - this.handler.onConnected(this); - - } - - public void doWriteQueue() { - try { - boolean noMoreData = write0(); - lastWriteTime = TimeUtil.currentTimeMillis(); - if (noMoreData && writeQueue.isEmpty()) { - if ((processKey.isValid() && (processKey.interestOps() & SelectionKey.OP_WRITE) != 0)) { - disableWrite(); - } - - } else { - - if ((processKey.isValid() && (processKey.interestOps() & SelectionKey.OP_WRITE) == 0)) { - enableWrite(false); - } - } - - } catch (IOException e) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("caught err:", e); - } - close("err:" + e); - } - - } - - public void write(BufferArray bufferArray) { - try { - writeQueueLock.lock(); - List blockes = bufferArray.getWritedBlockLst(); - if (!bufferArray.getWritedBlockLst().isEmpty()) { - for (ByteBuffer curBuf : blockes) { - writeQueue.offer(curBuf); - } - } - ByteBuffer curBuf = bufferArray.getCurWritingBlock(); - if (curBuf.position() == 0) {// empty - this.recycle(curBuf); - } else { - writeQueue.offer(curBuf); - } - } finally { - writeQueueLock.unlock(); - bufferArray.clear(); - } - this.enableWrite(true); - - } - - private boolean write0() throws IOException { - - int written = 0; - ByteBuffer buffer = writeBuffer; - if (buffer != null) { - while (buffer.hasRemaining()) { - written = channel.write(buffer); - if (written > 0) { - netOutBytes += written; - NetSystem.getInstance().addNetOutBytes(written); - - } else { - break; - } - } - - if (buffer.hasRemaining()) { - return false; - } else { - writeBuffer = null; - recycle(buffer); - } - } - while ((buffer = writeQueue.poll()) != null) { - if (buffer.limit() == 0) { - recycle(buffer); - close("quit send"); - return true; - } - buffer.flip(); - while (buffer.hasRemaining()) { - written = channel.write(buffer); - if (written > 0) { - netOutBytes += written; - NetSystem.getInstance().addNetOutBytes(written); - lastWriteTime = TimeUtil.currentTimeMillis(); - } else { - break; - } - } - if (buffer.hasRemaining()) { - writeBuffer = buffer; - return false; - } else { - recycle(buffer); - } - } - return true; - } - - private void disableWrite() { - try { - SelectionKey key = this.processKey; - key.interestOps(key.interestOps() & OP_NOT_WRITE); - } catch (Exception e) { - LOGGER.warn("can't disable write " + e + " con " + this); - } - - } - - public void enableWrite(boolean wakeup) { - boolean needWakeup = false; - try { - SelectionKey key = this.processKey; - key.interestOps(key.interestOps() | SelectionKey.OP_WRITE); - needWakeup = true; - } catch (Exception e) { - LOGGER.warn("can't enable write " + e); - - } - if (needWakeup && wakeup) { - processKey.selector().wakeup(); - } - } - - public void disableRead() { - - SelectionKey key = this.processKey; - key.interestOps(key.interestOps() & OP_NOT_READ); - } - - public void enableRead() { - - boolean needWakeup = false; - try { - SelectionKey key = this.processKey; - key.interestOps(key.interestOps() | SelectionKey.OP_READ); - needWakeup = true; - } catch (Exception e) { - LOGGER.warn("enable read fail " + e); - } - if (needWakeup) { - processKey.selector().wakeup(); - } - } - - public void setState(State newState) { - this.state = newState; - } - - /** - * 异步读取数据,only nio thread call - * - * @throws IOException - */ - protected void asynRead() throws IOException { - if (this.isClosed) { - return; - } - int got = channel.read(readBuffer); - onReadData(got); - - } - - private void closeSocket() { - - if (channel != null) { - boolean isSocketClosed = true; - try { - processKey.cancel(); - channel.close(); - } catch (Throwable e) { - } - boolean closed = isSocketClosed && (!channel.isOpen()); - if (closed == false) { - LOGGER.warn("close socket of connnection failed " + this); - } - - } - } - - public State getState() { - return state; - } - - public Direction getDirection() { - return direction; - } - - public void setDirection(Connection.Direction in) { - this.direction = in; - - } - - public int getPkgTotalSize() { - return pkgTotalSize; - } - - public int getPkgTotalCount() { - return pkgTotalCount; - } - - @Override - public String toString() { - return "Connection [host=" + host + ", port=" + port + ", id=" + id - + ", state=" + state + ", direction=" + direction - + ", startupTime=" + startupTime + ", lastReadTime=" - + lastReadTime + ", lastWriteTime=" + lastWriteTime + "]"; - } - - public void setMaxPacketSize(int maxPacketSize) { - this.maxPacketSize = maxPacketSize; - - } - - public void setPacketHeaderSize(int packetHeaderSize) { - this.packetHeaderSize = packetHeaderSize; - - } - -} diff --git a/src/main/java/io/mycat/net/ConnectionException.java b/src/main/java/io/mycat/net/ConnectionException.java index 85a6f4710..4cc0e1f82 100644 --- a/src/main/java/io/mycat/net/ConnectionException.java +++ b/src/main/java/io/mycat/net/ConnectionException.java @@ -1,3 +1,26 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ package io.mycat.net; public class ConnectionException extends RuntimeException { diff --git a/src/main/java/io/mycat/net/ConnectionFactory.java b/src/main/java/io/mycat/net/ConnectionFactory.java deleted file mode 100644 index 6a55a6892..000000000 --- a/src/main/java/io/mycat/net/ConnectionFactory.java +++ /dev/null @@ -1,92 +0,0 @@ -package io.mycat.net; - -import java.io.IOException; -import java.net.StandardSocketOptions; -import java.nio.ByteBuffer; -import java.nio.channels.SocketChannel; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * @author wuzh - */ -public abstract class ConnectionFactory { - - /** - * 创建一个具体的连接 - * - * @param channel - * @return Connection - * @throws IOException - */ - protected abstract Connection makeConnection(SocketChannel channel) - throws IOException; - - /** - * NIOHandler是无状态的,多个连接共享一个,因此建议作为 Factory的私有变量 - * - * @return NIOHandler - */ - @SuppressWarnings("rawtypes") - protected abstract NIOHandler getNIOHandler(); - - @SuppressWarnings("unchecked") - public Connection make(SocketChannel channel) throws IOException { - channel.setOption(StandardSocketOptions.SO_REUSEADDR, true); - // 子类完成具体连接创建工作 - Connection c = makeConnection(channel); - // 设置连接的参数 - NetSystem.getInstance().setSocketParams(c,true); - // 设置NIOHandler - c.setHandler(getNIOHandler()); - return c; - } -} - -@SuppressWarnings("rawtypes") -class NIOHandlerWrap implements NIOHandler { - protected static final Logger LOGGER = LoggerFactory - .getLogger(NIOHandlerWrap.class); - private final NIOHandler handler; - - public NIOHandlerWrap(NIOHandler handler) { - super(); - this.handler = handler; - } - - @SuppressWarnings("unchecked") - @Override - public void onConnected(Connection con) throws IOException { - con.setState(Connection.State.connecting); - String info = con.getDirection() == Connection.Direction.in ? "remote peer connected to me " - + con - : " connected to remote peer " + con; - LOGGER.info(info); - handler.onConnected(con); - - } - - @SuppressWarnings("unchecked") - @Override - public void onConnectFailed(Connection con, Throwable e) { - LOGGER.warn("connection failed: " + e + " con " + con); - handler.onConnectFailed(con, e); - - } - - @SuppressWarnings("unchecked") - @Override - public void handle(Connection con, ByteBuffer data, int start, - int readeLength) { - handler.handle(con, data, start, readeLength); - } - - @SuppressWarnings("unchecked") - @Override - public void onClosed(Connection con, String reason) { - handler.onClosed(con, reason); - - } - -} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/ExecutorUtil.java b/src/main/java/io/mycat/net/ExecutorUtil.java deleted file mode 100644 index 861e8aba5..000000000 --- a/src/main/java/io/mycat/net/ExecutorUtil.java +++ /dev/null @@ -1,24 +0,0 @@ -package io.mycat.net; - -import java.util.concurrent.LinkedTransferQueue; - -/** - * 生成一个有名字的(Nameable)Executor,容易进行跟踪和监控 - * - * @author wuzh - */ -public class ExecutorUtil { - - public static final NameableExecutor create(String name, int size) { - NameableThreadFactory factory = new NameableThreadFactory(name, true); - return new NameableExecutor(name, size, - new LinkedTransferQueue(), factory); - } - - public static final NamebleScheduledExecutor createSheduledExecute( - String name, int size) { - NameableThreadFactory factory = new NameableThreadFactory(name, true); - return new NamebleScheduledExecutor(name, size, factory); - } - -} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/FrontendConnection.java b/src/main/java/io/mycat/net/FrontendConnection.java new file mode 100644 index 000000000..0fcab2ae1 --- /dev/null +++ b/src/main/java/io/mycat/net/FrontendConnection.java @@ -0,0 +1,569 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.MycatServer; +import io.mycat.backend.mysql.CharsetUtil; +import io.mycat.backend.mysql.MySQLMessage; +import io.mycat.config.Capabilities; +import io.mycat.config.ErrorCode; +import io.mycat.config.Versions; +import io.mycat.net.handler.*; +import io.mycat.net.mysql.ErrorPacket; +import io.mycat.net.mysql.HandshakePacket; +import io.mycat.net.mysql.HandshakeV10Packet; +import io.mycat.net.mysql.MySQLPacket; +import io.mycat.net.mysql.OkPacket; +import io.mycat.util.CompressUtil; +import io.mycat.util.RandomUtil; + +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.net.InetSocketAddress; +import java.nio.channels.AsynchronousSocketChannel; +import java.nio.channels.NetworkChannel; +import java.nio.channels.SocketChannel; +import java.util.List; +import java.util.Set; + +/** + * @author mycat + */ +public abstract class FrontendConnection extends AbstractConnection { + + private static final Logger LOGGER = LoggerFactory.getLogger(FrontendConnection.class); + + protected long id; + protected String host; + protected int port; + protected int localPort; + protected long idleTimeout; + protected byte[] seed; + protected String user; + protected String schema; + protected String executeSql; + + protected FrontendPrivileges privileges; + protected FrontendQueryHandler queryHandler; + protected FrontendPrepareHandler prepareHandler; + protected LoadDataInfileHandler loadDataInfileHandler; + protected boolean isAccepted; + protected boolean isAuthenticated; + + public FrontendConnection(NetworkChannel channel) throws IOException { + super(channel); + InetSocketAddress localAddr = (InetSocketAddress) channel.getLocalAddress(); + InetSocketAddress remoteAddr = null; + if (channel instanceof SocketChannel) { + remoteAddr = (InetSocketAddress) ((SocketChannel) channel).getRemoteAddress(); + + } else if (channel instanceof AsynchronousSocketChannel) { + remoteAddr = (InetSocketAddress) ((AsynchronousSocketChannel) channel).getRemoteAddress(); + } + + this.host = remoteAddr.getHostString(); + this.port = localAddr.getPort(); + this.localPort = remoteAddr.getPort(); + this.handler = new FrontendAuthenticator(this); + } + + public long getId() { + return id; + } + + public void setId(long id) { + this.id = id; + } + + public String getHost() { + return host; + } + + public void setHost(String host) { + this.host = host; + } + + public int getPort() { + return port; + } + + public void setPort(int port) { + this.port = port; + } + + public int getLocalPort() { + return localPort; + } + + public void setLocalPort(int localPort) { + this.localPort = localPort; + } + + public void setAccepted(boolean isAccepted) { + this.isAccepted = isAccepted; + } + + public void setProcessor(NIOProcessor processor) { + super.setProcessor(processor); + processor.addFrontend(this); + } + + public LoadDataInfileHandler getLoadDataInfileHandler() { + return loadDataInfileHandler; + } + + public void setLoadDataInfileHandler(LoadDataInfileHandler loadDataInfileHandler) { + this.loadDataInfileHandler = loadDataInfileHandler; + } + + public void setQueryHandler(FrontendQueryHandler queryHandler) { + this.queryHandler = queryHandler; + } + + public void setPrepareHandler(FrontendPrepareHandler prepareHandler) { + this.prepareHandler = prepareHandler; + } + + public void setAuthenticated(boolean isAuthenticated) { + this.isAuthenticated = isAuthenticated; + } + + public FrontendPrivileges getPrivileges() { + return privileges; + } + + public void setPrivileges(FrontendPrivileges privileges) { + this.privileges = privileges; + } + + public String getUser() { + return user; + } + + public void setUser(String user) { + this.user = user; + } + + public String getSchema() { + return schema; + } + + public void setSchema(String schema) { + this.schema = schema; + } + + public String getExecuteSql() { + return executeSql; + } + + public void setExecuteSql(String executeSql) { + this.executeSql = executeSql; + } + + public byte[] getSeed() { + return seed; + } + + public boolean setCharsetIndex(int ci) { + String charset = CharsetUtil.getCharset(ci); + if (charset != null) { + return setCharset(charset); + } else { + return false; + } + } + + public void writeErrMessage(int errno, String msg) { + writeErrMessage((byte) 1, errno, msg); + } + + public void writeErrMessage(byte id, int errno, String msg) { + ErrorPacket err = new ErrorPacket(); + err.packetId = id; + err.errno = errno; + err.message = encodeString(msg, charset); + err.write(this); + } + + public void initDB(byte[] data) { + + MySQLMessage mm = new MySQLMessage(data); + mm.position(5); + String db = mm.readString(); + + // 检查schema的有效性 + if (db == null || !privileges.schemaExists(db)) { + writeErrMessage(ErrorCode.ER_BAD_DB_ERROR, "Unknown database '" + db + "'"); + return; + } + + if (!privileges.userExists(user, host)) { + writeErrMessage(ErrorCode.ER_ACCESS_DENIED_ERROR, "Access denied for user '" + user + "'"); + return; + } + + Set schemas = privileges.getUserSchemas(user); + if (schemas == null || schemas.size() == 0 || schemas.contains(db)) { + this.schema = db; + write(writeToBuffer(OkPacket.OK, allocate())); + } else { + String s = "Access denied for user '" + user + "' to database '" + db + "'"; + writeErrMessage(ErrorCode.ER_DBACCESS_DENIED_ERROR, s); + } + } + + + public void loadDataInfileStart(String sql) { + if (loadDataInfileHandler != null) { + try { + loadDataInfileHandler.start(sql); + } catch (Exception e) { + LOGGER.error("load data error", e); + writeErrMessage(ErrorCode.ERR_HANDLE_DATA, e.getMessage()); + } + + } else { + writeErrMessage(ErrorCode.ER_UNKNOWN_COM_ERROR, "load data infile sql is not unsupported!"); + } + } + + public void loadDataInfileData(byte[] data) { + if (loadDataInfileHandler != null) { + try { + loadDataInfileHandler.handle(data); + } catch (Exception e) { + LOGGER.error("load data error", e); + writeErrMessage(ErrorCode.ERR_HANDLE_DATA, e.getMessage()); + } + } else { + writeErrMessage(ErrorCode.ER_UNKNOWN_COM_ERROR, "load data infile data is not unsupported!"); + } + + } + + public void loadDataInfileEnd(byte packID) { + if (loadDataInfileHandler != null) { + try { + loadDataInfileHandler.end(packID); + } catch (Exception e) { + LOGGER.error("load data error", e); + writeErrMessage(ErrorCode.ERR_HANDLE_DATA, e.getMessage()); + } + } else { + writeErrMessage(ErrorCode.ER_UNKNOWN_COM_ERROR, "load data infile end is not unsupported!"); + } + } + + + public void query(String sql) { + + if (sql == null || sql.length() == 0) { + writeErrMessage(ErrorCode.ER_NOT_ALLOWED_COMMAND, "Empty SQL"); + return; + } + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(new StringBuilder().append(this).append(" ").append(sql).toString()); + } + + // remove last ';' + if (sql.endsWith(";")) { + sql = sql.substring(0, sql.length() - 1); + } + + // 记录SQL + this.setExecuteSql(sql); + + // 防火墙策略( SQL 黑名单/ 注入攻击) + if ( !privileges.checkFirewallSQLPolicy( user, sql ) ) { + writeErrMessage(ErrorCode.ERR_WRONG_USED, + "The statement is unsafe SQL, reject for user '" + user + "'"); + return; + } + + // DML 权限检查 + try { + boolean isPassed = privileges.checkDmlPrivilege(user, schema, sql); + if ( !isPassed ) { + writeErrMessage(ErrorCode.ERR_WRONG_USED, + "The statement DML privilege check is not passed, reject for user '" + user + "'"); + return; + } + } catch( com.alibaba.druid.sql.parser.ParserException e1) { + writeErrMessage(ErrorCode.ERR_WRONG_USED, e1.getMessage()); + LOGGER.error("parse exception", e1 ); + return; + } + + // 执行查询 + if (queryHandler != null) { + queryHandler.setReadOnly(privileges.isReadOnly(user)); + queryHandler.query(sql); + + } else { + writeErrMessage(ErrorCode.ER_UNKNOWN_COM_ERROR, "Query unsupported!"); + } + } + + public void query(byte[] data) { + + // 取得语句 + String sql = null; + try { + MySQLMessage mm = new MySQLMessage(data); + mm.position(5); + sql = mm.readString(charset); + } catch (UnsupportedEncodingException e) { + writeErrMessage(ErrorCode.ER_UNKNOWN_CHARACTER_SET, "Unknown charset '" + charset + "'"); + return; + } + + this.query( sql ); + } + + public void stmtPrepare(byte[] data) { + if (prepareHandler != null) { + // 取得语句 + MySQLMessage mm = new MySQLMessage(data); + mm.position(5); + String sql = null; + try { + sql = mm.readString(charset); + } catch (UnsupportedEncodingException e) { + writeErrMessage(ErrorCode.ER_UNKNOWN_CHARACTER_SET, + "Unknown charset '" + charset + "'"); + return; + } + if (sql == null || sql.length() == 0) { + writeErrMessage(ErrorCode.ER_NOT_ALLOWED_COMMAND, "Empty SQL"); + return; + } + + // 记录SQL + this.setExecuteSql(sql); + + // 执行预处理 + prepareHandler.prepare(sql); + } else { + writeErrMessage(ErrorCode.ER_UNKNOWN_COM_ERROR, "Prepare unsupported!"); + } + } + + public void stmtSendLongData(byte[] data) { + if(prepareHandler != null) { + prepareHandler.sendLongData(data); + } else { + writeErrMessage(ErrorCode.ER_UNKNOWN_COM_ERROR, "Prepare unsupported!"); + } + } + + public void stmtReset(byte[] data) { + if(prepareHandler != null) { + prepareHandler.reset(data); + } else { + writeErrMessage(ErrorCode.ER_UNKNOWN_COM_ERROR, "Prepare unsupported!"); + } + } + + public void stmtExecute(byte[] data) { + if (prepareHandler != null) { + prepareHandler.execute(data); + } else { + writeErrMessage(ErrorCode.ER_UNKNOWN_COM_ERROR, "Prepare unsupported!"); + } + } + + public void stmtClose(byte[] data) { + if (prepareHandler != null) { + prepareHandler.close( data ); + } else { + writeErrMessage(ErrorCode.ER_UNKNOWN_COM_ERROR, "Prepare unsupported!"); + } + } + + public void ping() { + write(writeToBuffer(OkPacket.OK, allocate())); + } + + public void heartbeat(byte[] data) { + write(writeToBuffer(OkPacket.OK, allocate())); + } + + public void kill(byte[] data) { + writeErrMessage(ErrorCode.ER_UNKNOWN_COM_ERROR, "Unknown command"); + } + + public void unknown(byte[] data) { + writeErrMessage(ErrorCode.ER_UNKNOWN_COM_ERROR, "Unknown command"); + } + + @Override + public void register() throws IOException { + if (!isClosed.get()) { + + // 生成认证数据 + byte[] rand1 = RandomUtil.randomBytes(8); + byte[] rand2 = RandomUtil.randomBytes(12); + + // 保存认证数据 + byte[] seed = new byte[rand1.length + rand2.length]; + System.arraycopy(rand1, 0, seed, 0, rand1.length); + System.arraycopy(rand2, 0, seed, rand1.length, rand2.length); + this.seed = seed; + + // 发送握手数据包 + boolean useHandshakeV10 = MycatServer.getInstance().getConfig().getSystem().getUseHandshakeV10() == 1; + if(useHandshakeV10) { + HandshakeV10Packet hs = new HandshakeV10Packet(); + hs.packetId = 0; + hs.protocolVersion = Versions.PROTOCOL_VERSION; + hs.serverVersion = Versions.SERVER_VERSION; + hs.threadId = id; + hs.seed = rand1; + hs.serverCapabilities = getServerCapabilities(); + hs.serverCharsetIndex = (byte) (charsetIndex & 0xff); + hs.serverStatus = 2; + hs.restOfScrambleBuff = rand2; + hs.write(this); + } else { + HandshakePacket hs = new HandshakePacket(); + hs.packetId = 0; + hs.protocolVersion = Versions.PROTOCOL_VERSION; + hs.serverVersion = Versions.SERVER_VERSION; + hs.threadId = id; + hs.seed = rand1; + hs.serverCapabilities = getServerCapabilities(); + hs.serverCharsetIndex = (byte) (charsetIndex & 0xff); + hs.serverStatus = 2; + hs.restOfScrambleBuff = rand2; + hs.write(this); + } + + // asynread response + this.asynRead(); + } + } + + @Override + public void handle(final byte[] data) { + + if (isSupportCompress()) { + List packs = CompressUtil.decompressMysqlPacket(data, decompressUnfinishedDataQueue); + for (byte[] pack : packs) { + if (pack.length != 0) { + rawHandle(pack); + } + } + + } else { + rawHandle(data); + } + } + + public void rawHandle(final byte[] data) { + + //load data infile 客户端会发空包 长度为4 + if (data.length == 4 && data[0] == 0 && data[1] == 0 && data[2] == 0) { + // load in data空包 + loadDataInfileEnd(data[3]); + return; + } + //修改quit的判断,当load data infile 分隔符为\001 时可能会出现误判断的bug. + if (data.length>4 && data[0] == 1 && data[1] == 0 && data[2]== 0 && data[3] == 0 &&data[4] == MySQLPacket.COM_QUIT) { + this.getProcessor().getCommands().doQuit(); + this.close("quit cmd"); + return; + } + handler.handle(data); + } + + protected int getServerCapabilities() { + int flag = 0; + flag |= Capabilities.CLIENT_LONG_PASSWORD; + flag |= Capabilities.CLIENT_FOUND_ROWS; + flag |= Capabilities.CLIENT_LONG_FLAG; + flag |= Capabilities.CLIENT_CONNECT_WITH_DB; + // flag |= Capabilities.CLIENT_NO_SCHEMA; + boolean usingCompress= MycatServer.getInstance().getConfig().getSystem().getUseCompression()==1 ; + if (usingCompress) { + flag |= Capabilities.CLIENT_COMPRESS; + } + + flag |= Capabilities.CLIENT_ODBC; + flag |= Capabilities.CLIENT_LOCAL_FILES; + flag |= Capabilities.CLIENT_IGNORE_SPACE; + flag |= Capabilities.CLIENT_PROTOCOL_41; + flag |= Capabilities.CLIENT_INTERACTIVE; + // flag |= Capabilities.CLIENT_SSL; + flag |= Capabilities.CLIENT_IGNORE_SIGPIPE; + flag |= Capabilities.CLIENT_TRANSACTIONS; + // flag |= ServerDefs.CLIENT_RESERVED; + flag |= Capabilities.CLIENT_SECURE_CONNECTION; + flag |= Capabilities.CLIENT_MULTI_STATEMENTS; + flag |= Capabilities.CLIENT_MULTI_RESULTS; + boolean useHandshakeV10 = MycatServer.getInstance().getConfig().getSystem().getUseHandshakeV10() == 1; + if(useHandshakeV10) { + flag |= Capabilities.CLIENT_PLUGIN_AUTH; + } + return flag; + } + + protected boolean isConnectionReset(Throwable t) { + if (t instanceof IOException) { + String msg = t.getMessage(); + return (msg != null && msg.contains("Connection reset by peer")); + } + return false; + } + + @Override + public String toString() { + return new StringBuilder().append("[thread=") + .append(Thread.currentThread().getName()).append(",class=") + .append(getClass().getSimpleName()).append(",id=").append(id) + .append(",host=").append(host).append(",port=").append(port) + .append(",schema=").append(schema).append(']').toString(); + } + + private final static byte[] encodeString(String src, String charset) { + if (src == null) { + return null; + } + if (charset == null) { + return src.getBytes(); + } + try { + return src.getBytes(charset); + } catch (UnsupportedEncodingException e) { + return src.getBytes(); + } + } + + @Override + public void close(String reason) { + super.close(isAuthenticated ? reason : ""); + } +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/NIOAcceptor.java b/src/main/java/io/mycat/net/NIOAcceptor.java index 2ce351ab1..859072c8a 100644 --- a/src/main/java/io/mycat/net/NIOAcceptor.java +++ b/src/main/java/io/mycat/net/NIOAcceptor.java @@ -1,3 +1,26 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ package io.mycat.net; import java.io.IOException; @@ -10,23 +33,28 @@ import java.nio.channels.SocketChannel; import java.util.Set; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import io.mycat.util.SelectorUtil; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.MycatServer; +import io.mycat.net.factory.FrontendConnectionFactory; /** - * @author wuzh + * @author mycat */ -public final class NIOAcceptor extends Thread { +public final class NIOAcceptor extends Thread implements SocketAcceptor{ private static final Logger LOGGER = LoggerFactory.getLogger(NIOAcceptor.class); + private static final AcceptIdGenerator ID_GENERATOR = new AcceptIdGenerator(); + private final int port; - private final Selector selector; + private volatile Selector selector; private final ServerSocketChannel serverChannel; - private final ConnectionFactory factory; + private final FrontendConnectionFactory factory; private long acceptCount; private final NIOReactorPool reactorPool; - public NIOAcceptor(String name, String bindIp, int port, - ConnectionFactory factory, NIOReactorPool reactorPool) + public NIOAcceptor(String name, String bindIp,int port, + FrontendConnectionFactory factory, NIOReactorPool reactorPool) throws IOException { super.setName(name); this.port = port; @@ -53,51 +81,67 @@ public long getAcceptCount() { @Override public void run() { - final Selector selector = this.selector; + int invalidSelectCount = 0; for (;;) { + final Selector tSelector = this.selector; ++acceptCount; try { - selector.select(1000L); - Set keys = selector.selectedKeys(); - try { - for (SelectionKey key : keys) { - if (key.isValid() && key.isAcceptable()) { - accept(); - } else { - key.cancel(); + long start = System.nanoTime(); + tSelector.select(1000L); + long end = System.nanoTime(); + Set keys = tSelector.selectedKeys(); + if (keys.size() == 0 && (end - start) < SelectorUtil.MIN_SELECT_TIME_IN_NANO_SECONDS ) + { + invalidSelectCount++; + } + else + { + try { + for (SelectionKey key : keys) { + if (key.isValid() && key.isAcceptable()) { + accept(); + } else { + key.cancel(); + } } + } finally { + keys.clear(); + invalidSelectCount = 0; } - } finally { - keys.clear(); } - } catch (Throwable e) { + if (invalidSelectCount > SelectorUtil.REBUILD_COUNT_THRESHOLD) + { + final Selector rebuildSelector = SelectorUtil.rebuildSelector(this.selector); + if (rebuildSelector != null) + { + this.selector = rebuildSelector; + } + invalidSelectCount = 0; + } + } catch (Exception e) { LOGGER.warn(getName(), e); } } } - /** - * 接受新连接 - */ private void accept() { SocketChannel channel = null; try { channel = serverChannel.accept(); channel.configureBlocking(false); - Connection c = factory.make(channel); - c.setDirection(Connection.Direction.in); - c.setId(ConnectIdGenerator.getINSTNCE().getId()); - InetSocketAddress remoteAddr = (InetSocketAddress) channel - .getRemoteAddress(); - c.setHost(remoteAddr.getHostString()); - c.setPort(remoteAddr.getPort()); - // 派发此连接到某个Reactor处理 + FrontendConnection c = factory.make(channel); + c.setAccepted(true); + c.setId(ID_GENERATOR.getId()); + NIOProcessor processor = (NIOProcessor) MycatServer.getInstance() + .nextProcessor(); + c.setProcessor(processor); + NIOReactor reactor = reactorPool.getNextReactor(); reactor.postRegister(c); - } catch (Throwable e) { + } catch (Exception e) { + LOGGER.warn(getName(), e); closeChannel(channel); - LOGGER.warn(getName(), e); } } @@ -110,12 +154,36 @@ private static void closeChannel(SocketChannel channel) { try { socket.close(); } catch (IOException e) { + LOGGER.error("closeChannelError", e); } } try { channel.close(); } catch (IOException e) { + LOGGER.error("closeChannelError", e); + } + } + + /** + * 前端连接ID生成器 + * + * @author mycat + */ + private static class AcceptIdGenerator { + + private static final long MAX_VALUE = 0xffffffffL; + + private long acceptId = 0L; + private final Object lock = new Object(); + + private long getId() { + synchronized (lock) { + if (acceptId >= MAX_VALUE) { + acceptId = 0L; + } + return ++acceptId; + } } } -} \ No newline at end of file +} diff --git a/src/main/java/io/mycat/server/sqlhandler/BeginHandler.java b/src/main/java/io/mycat/net/NIOConnection.java similarity index 74% rename from src/main/java/io/mycat/server/sqlhandler/BeginHandler.java rename to src/main/java/io/mycat/net/NIOConnection.java index 23674f74d..0ba3e0869 100644 --- a/src/main/java/io/mycat/server/sqlhandler/BeginHandler.java +++ b/src/main/java/io/mycat/net/NIOConnection.java @@ -21,24 +21,31 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.sqlhandler; +package io.mycat.net; -import io.mycat.server.MySQLFrontConnection; +import java.io.IOException; +import java.nio.ByteBuffer; /** * @author mycat */ -public final class BeginHandler { - private static final byte[] AC_OFF = new byte[] { 7, 0, 0, 1, 0, 0, 0, 0, - 0, 0, 0 }; +public interface NIOConnection extends ClosableConnection{ - public static void handle(String stmt, MySQLFrontConnection c) { - if (c.isAutocommit()) { - c.setAutocommit(false); - c.write(AC_OFF); - } else { - c.getSession2().commit(); - } - } + /** + * connected + */ + void register() throws IOException; + /** + * 处理数据 + */ + void handle(byte[] data); + + /** + * 写出一块缓存数据 + */ + void write(ByteBuffer buffer); + + + } \ No newline at end of file diff --git a/src/main/java/io/mycat/net/NIOConnector.java b/src/main/java/io/mycat/net/NIOConnector.java index 782b75a32..3c3339c70 100644 --- a/src/main/java/io/mycat/net/NIOConnector.java +++ b/src/main/java/io/mycat/net/NIOConnector.java @@ -1,151 +1,196 @@ -package io.mycat.net; - -import io.mycat.backend.postgresql.PostgreSQLBackendConnection; -import io.mycat.backend.postgresql.utils.PacketUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.net.InetSocketAddress; -import java.nio.ByteBuffer; -import java.nio.channels.SelectionKey; -import java.nio.channels.Selector; -import java.nio.channels.SocketChannel; -import java.util.Set; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.LinkedBlockingQueue; - -/** - * NIO 连接器,用于连接对方Sever - * - * @author wuzh - */ -public final class NIOConnector extends Thread { - private static final Logger LOGGER = LoggerFactory.getLogger(NIOConnector.class); - - - private final String name; - private final Selector selector; - private final BlockingQueue connectQueue; - private long connectCount; - private final NIOReactorPool reactorPool; - - public NIOConnector(String name, NIOReactorPool reactorPool) - throws IOException { - super.setName(name); - this.name = name; - this.selector = Selector.open(); - this.reactorPool = reactorPool; - this.connectQueue = new LinkedBlockingQueue(); - } - - public long getConnectCount() { - return connectCount; - } - - /** - * 添加一个需要异步连接的Connection到队列中,等待连接 - * - * @param Connection - */ - public void postConnect(Connection c) { - connectQueue.offer(c); - selector.wakeup(); - } - - @Override - public void run() { - final Selector selector = this.selector; - for (;;) { - ++connectCount; - try { - selector.select(1000L); - connect(selector); - Set keys = selector.selectedKeys(); - try { - for (SelectionKey key : keys) { - Object att = key.attachment(); - if (att != null && key.isValid() && key.isConnectable()) { - finishConnect(key, att); - if (att instanceof PostgreSQLBackendConnection){//ONLY PG SENG - SocketChannel sc = (SocketChannel) key.channel(); - sendStartupPacket(sc,att); - } - } else { - key.cancel(); - } - } - } finally { - keys.clear(); - } - } catch (Throwable e) { - LOGGER.warn(name, e); - } - } - } - - //TODO COOLLF 暂时为权宜之计,后续要进行代码结构封调整. - private static void sendStartupPacket(SocketChannel socketChannel, Object _att) throws IOException { - PostgreSQLBackendConnection att = (PostgreSQLBackendConnection) _att; - ByteBuffer buffer = PacketUtils.makeStartUpPacket(att.getUser(), att.getSchema()); - buffer.flip(); - socketChannel.write(buffer); - } - - - private void connect(Selector selector) { - Connection c = null; - while ((c = connectQueue.poll()) != null) { - try { - SocketChannel channel = (SocketChannel) c.getChannel(); - channel.register(selector, SelectionKey.OP_CONNECT, c); - channel.connect(new InetSocketAddress(c.host, c.port)); - } catch (Throwable e) { - c.close("connect failed:" + e.toString()); - } - } - } - - @SuppressWarnings("unchecked") - private void finishConnect(SelectionKey key, Object att) { - Connection c = (Connection) att; - try { - if (finishConnect(c, (SocketChannel) c.channel)) { - clearSelectionKey(key); - c.setId(ConnectIdGenerator.getINSTNCE().getId()); - System.out.println("----------------ConnectIdGenerator.getINSTNCE().getId()-----------------"+ConnectIdGenerator.getINSTNCE().getId()); - NIOReactor reactor = reactorPool.getNextReactor(); - reactor.postRegister(c); - - } - } catch (Throwable e) { - clearSelectionKey(key); - c.close(e.toString()); - c.getHandler().onConnectFailed(c, e); - - } - } - - private boolean finishConnect(Connection c, SocketChannel channel) - throws IOException { - System.out.println("----------------finishConnect-----------------"); - if (channel.isConnectionPending()) { - System.out.println("----------------finishConnect-isConnectionPending-----------------"); - channel.finishConnect(); - // c.setLocalPort(channel.socket().getLocalPort()); - return true; - } else { - return false; - } - } - - private void clearSelectionKey(SelectionKey key) { - if (key.isValid()) { - key.attach(null); - key.cancel(); - } - } - - - -} \ No newline at end of file +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.nio.channels.SelectionKey; +import java.nio.channels.Selector; +import java.nio.channels.SocketChannel; +import java.util.Set; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.MycatServer; +import java.util.concurrent.atomic.AtomicLong; + +import io.mycat.util.SelectorUtil; + +/** + * @author mycat + */ +public final class NIOConnector extends Thread implements SocketConnector { + private static final Logger LOGGER = LoggerFactory.getLogger(NIOConnector.class); + public static final ConnectIdGenerator ID_GENERATOR = new ConnectIdGenerator(); + + private final String name; + private volatile Selector selector; + private final BlockingQueue connectQueue; + private long connectCount; + private final NIOReactorPool reactorPool; + + public NIOConnector(String name, NIOReactorPool reactorPool) + throws IOException { + super.setName(name); + this.name = name; + this.selector = Selector.open(); + this.reactorPool = reactorPool; + this.connectQueue = new LinkedBlockingQueue(); + } + + public long getConnectCount() { + return connectCount; + } + + public void postConnect(AbstractConnection c) { + connectQueue.offer(c); + selector.wakeup(); + } + + @Override + public void run() { + int invalidSelectCount = 0; + for (;;) { + final Selector tSelector = this.selector; + ++connectCount; + try { + long start = System.nanoTime(); + tSelector.select(1000L); + long end = System.nanoTime(); + connect(tSelector); + Set keys = tSelector.selectedKeys(); + if (keys.size() == 0 && (end - start) < SelectorUtil.MIN_SELECT_TIME_IN_NANO_SECONDS ) + { + invalidSelectCount++; + } + else + { + try { + for (SelectionKey key : keys) + { + Object att = key.attachment(); + if (att != null && key.isValid() && key.isConnectable()) + { + finishConnect(key, att); + } else + { + key.cancel(); + } + } + } finally + { + invalidSelectCount = 0; + keys.clear(); + } + } + if (invalidSelectCount > SelectorUtil.REBUILD_COUNT_THRESHOLD) + { + final Selector rebuildSelector = SelectorUtil.rebuildSelector(this.selector); + if (rebuildSelector != null) + { + this.selector = rebuildSelector; + } + invalidSelectCount = 0; + } + } catch (Exception e) { + LOGGER.warn(name, e); + } + } + } + + private void connect(Selector selector) { + AbstractConnection c = null; + while ((c = connectQueue.poll()) != null) { + try { + SocketChannel channel = (SocketChannel) c.getChannel(); + channel.register(selector, SelectionKey.OP_CONNECT, c); + channel.connect(new InetSocketAddress(c.host, c.port)); + + } catch (Exception e) { + LOGGER.error("error:",e); + c.close(e.toString()); + } + } + } + + private void finishConnect(SelectionKey key, Object att) { + BackendAIOConnection c = (BackendAIOConnection) att; + try { + if (finishConnect(c, (SocketChannel) c.channel)) { + clearSelectionKey(key); + c.setId(ID_GENERATOR.getId()); + NIOProcessor processor = MycatServer.getInstance() + .nextProcessor(); + c.setProcessor(processor); + NIOReactor reactor = reactorPool.getNextReactor(); + reactor.postRegister(c); + c.onConnectfinish(); + } + } catch (Exception e) { + clearSelectionKey(key); + LOGGER.error("error:",e); + c.close(e.toString()); + c.onConnectFailed(e); + + } + } + + private boolean finishConnect(AbstractConnection c, SocketChannel channel) + throws IOException { + if (channel.isConnectionPending()) { + channel.finishConnect(); + + c.setLocalPort(channel.socket().getLocalPort()); + return true; + } else { + return false; + } + } + + private void clearSelectionKey(SelectionKey key) { + if (key.isValid()) { + key.attach(null); + key.cancel(); + } + } + + /** + * 后端连接ID生成器 + * + * @author mycat + */ + public static class ConnectIdGenerator { + + private static final long MAX_VALUE = Long.MAX_VALUE; + private AtomicLong connectId = new AtomicLong(0); + + public long getId() { + return connectId.incrementAndGet(); + } + } + +} diff --git a/src/main/java/io/mycat/net/NIOHandler.java b/src/main/java/io/mycat/net/NIOHandler.java index db74a4415..0acb4d0a7 100644 --- a/src/main/java/io/mycat/net/NIOHandler.java +++ b/src/main/java/io/mycat/net/NIOHandler.java @@ -1,49 +1,33 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ package io.mycat.net; -import java.io.IOException; -import java.nio.ByteBuffer; - /** - * NIOHandler是无状态的,多个连接共享一个,用于处理连接的事件,每个方法需要不阻塞,尽快返回结果 - * - * @author wuzh + * @author mycat */ -public interface NIOHandler { - - /** - * 连接建立成功的通知事件 - * - * @param con - * 当前连接 - */ - public void onConnected(T con) throws IOException; - - /** - * 连接失败 - * - * @param con - * 失败的连接 - * @param e - * 连接异常 - */ - public void onConnectFailed(T con, Throwable e); - - /** - * 连接关闭通知 - * @param con - * @throws IOException - */ - public void onClosed(T con,String reason); +public interface NIOHandler { - /** - * 收到数据需要处理 - * - * @param con - * 当前连接 - * @param data - * 收到的数据包 - * @param readedLength 数据包的长度 - */ - void handle(T con, ByteBuffer data,int start,int readedLength); + void handle(byte[] data); } \ No newline at end of file diff --git a/src/main/java/io/mycat/net/NIOProcessor.java b/src/main/java/io/mycat/net/NIOProcessor.java new file mode 100644 index 000000000..9d12f048e --- /dev/null +++ b/src/main/java/io/mycat/net/NIOProcessor.java @@ -0,0 +1,240 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net; + +import java.io.IOException; +import java.util.Iterator; +import java.util.Map.Entry; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicInteger; + +import io.mycat.buffer.BufferPool; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.MycatServer; +import io.mycat.backend.BackendConnection; +import io.mycat.statistic.CommandCount; +import io.mycat.util.NameableExecutor; +import io.mycat.util.TimeUtil; + +/** + * @author mycat + */ +public final class NIOProcessor { + + private static final Logger LOGGER = LoggerFactory.getLogger("NIOProcessor"); + + private final String name; + private final BufferPool bufferPool; + private final NameableExecutor executor; + private final ConcurrentMap frontends; + private final ConcurrentMap backends; + private final CommandCount commands; + private long netInBytes; + private long netOutBytes; + + // TODO: add by zhuam + // reload @@config_all 后, 老的backends 全部移往 backends_old, 待检测任务进行销毁 + public final static ConcurrentLinkedQueue backends_old = new ConcurrentLinkedQueue(); + + //前端已连接数 + private AtomicInteger frontendsLength = new AtomicInteger(0); + + public NIOProcessor(String name, BufferPool bufferPool, + NameableExecutor executor) throws IOException { + this.name = name; + this.bufferPool = bufferPool; + this.executor = executor; + this.frontends = new ConcurrentHashMap(); + this.backends = new ConcurrentHashMap(); + this.commands = new CommandCount(); + } + + public String getName() { + return name; + } + + public BufferPool getBufferPool() { + return bufferPool; + } + + public int getWriteQueueSize() { + int total = 0; + for (FrontendConnection fron : frontends.values()) { + total += fron.getWriteQueue().size(); + } + for (BackendConnection back : backends.values()) { + if (back instanceof BackendAIOConnection) { + total += ((BackendAIOConnection) back).getWriteQueue().size(); + } + } + return total; + + } + + public NameableExecutor getExecutor() { + return this.executor; + } + + public CommandCount getCommands() { + return this.commands; + } + + public long getNetInBytes() { + return this.netInBytes; + } + + public void addNetInBytes(long bytes) { + this.netInBytes += bytes; + } + + public long getNetOutBytes() { + return this.netOutBytes; + } + + public void addNetOutBytes(long bytes) { + this.netOutBytes += bytes; + } + + public void addFrontend(FrontendConnection c) { + this.frontends.put(c.getId(), c); + this.frontendsLength.incrementAndGet(); + } + + public ConcurrentMap getFrontends() { + return this.frontends; + } + + public int getForntedsLength(){ + return this.frontendsLength.get(); + } + + public void addBackend(BackendConnection c) { + this.backends.put(c.getId(), c); + } + + public ConcurrentMap getBackends() { + return this.backends; + } + + /** + * 定时执行该方法,回收部分资源。 + */ + public void checkBackendCons() { + backendCheck(); + } + + /** + * 定时执行该方法,回收部分资源。 + */ + public void checkFrontCons() { + frontendCheck(); + } + + // 前端连接检查 + private void frontendCheck() { + Iterator> it = frontends.entrySet() + .iterator(); + while (it.hasNext()) { + FrontendConnection c = it.next().getValue(); + + // 删除空连接 + if (c == null) { + it.remove(); + this.frontendsLength.decrementAndGet(); + continue; + } + + // 清理已关闭连接,否则空闲检查。 + if (c.isClosed()) { + // 此处在高并发情况下会存在并发问题, fixed #1072 极有可能解决了 #700 + //c.cleanup(); + it.remove(); + this.frontendsLength.decrementAndGet(); + } else { + // very important ,for some data maybe not sent + checkConSendQueue(c); + c.idleCheck(); + } + } + } + + private void checkConSendQueue(AbstractConnection c) { + // very important ,for some data maybe not sent + if (!c.writeQueue.isEmpty()) { + c.getSocketWR().doNextWriteCheck(); + } + } + + // 后端连接检查 + private void backendCheck() { + long sqlTimeout = MycatServer.getInstance().getConfig().getSystem().getSqlExecuteTimeout() * 1000L; + Iterator> it = backends.entrySet().iterator(); + while (it.hasNext()) { + BackendConnection c = it.next().getValue(); + + // 删除空连接 + if (c == null) { + it.remove(); + continue; + } + // SQL执行超时的连接关闭 + if (c.isBorrowed() && c.getLastTime() < TimeUtil.currentTimeMillis() - sqlTimeout) { + LOGGER.warn("found backend connection SQL timeout ,close it " + c); + c.close("sql timeout"); + } + + // 清理已关闭连接,否则空闲检查。 + if (c.isClosed()) { + it.remove(); + + } else { + // very important ,for some data maybe not sent + if (c instanceof AbstractConnection) { + checkConSendQueue((AbstractConnection) c); + } + c.idleCheck(); + } + } + } + + public void removeConnection(AbstractConnection con) { + if (con instanceof BackendConnection) { + this.backends.remove(con.getId()); + } else { + this.frontends.remove(con.getId()); + this.frontendsLength.decrementAndGet(); + } + + } + //jdbc连接用这个释放 + public void removeConnection(BackendConnection con){ + this.backends.remove(con.getId()); + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/NIOReactor.java b/src/main/java/io/mycat/net/NIOReactor.java index d43f70714..41018d174 100644 --- a/src/main/java/io/mycat/net/NIOReactor.java +++ b/src/main/java/io/mycat/net/NIOReactor.java @@ -1,3 +1,26 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ package io.mycat.net; import java.io.IOException; @@ -8,17 +31,22 @@ import java.util.Set; import java.util.concurrent.ConcurrentLinkedQueue; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import io.mycat.util.SelectorUtil; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * 网络事件反应器 * - * @author wuzh + *

+ * Catch exceptions such as OOM so that the reactor can keep running for response client! + *

+ * @since 2016-03-30 + * + * @author mycat, Uncle-pan + * */ public final class NIOReactor { - private static final Logger LOGGER = LoggerFactory - .getLogger(NIOReactor.class); + private static final Logger LOGGER = LoggerFactory.getLogger(NIOReactor.class); private final String name; private final RW reactorR; @@ -31,12 +59,12 @@ final void startup() { new Thread(reactorR, name + "-RW").start(); } - final void postRegister(Connection c) { + final void postRegister(AbstractConnection c) { reactorR.registerQueue.offer(c); reactorR.selector.wakeup(); } - final Queue getRegisterQueue() { + final Queue getRegisterQueue() { return reactorR.registerQueue; } @@ -45,86 +73,118 @@ final long getReactCount() { } private final class RW implements Runnable { - private final Selector selector; - private final ConcurrentLinkedQueue registerQueue; + private volatile Selector selector; + private final ConcurrentLinkedQueue registerQueue; private long reactCount; private RW() throws IOException { this.selector = Selector.open(); - this.registerQueue = new ConcurrentLinkedQueue(); + this.registerQueue = new ConcurrentLinkedQueue(); } @Override public void run() { - final Selector selector = this.selector; + int invalidSelectCount = 0; Set keys = null; for (;;) { ++reactCount; try { - selector.select(500L); - register(selector); - keys = selector.selectedKeys(); - for (SelectionKey key : keys) { - Connection con = null; - try { - Object att = key.attachment(); - if (att != null && key.isValid()) { - con = (Connection) att; - if (key.isReadable()) { - try { - con.asynRead(); - } catch (Throwable e) { - if (!(e instanceof java.io.IOException)) { - LOGGER.warn("caught err: "+con, e); + final Selector tSelector = this.selector; + long start = System.nanoTime(); + tSelector.select(500L); + long end = System.nanoTime(); + register(tSelector); + keys = tSelector.selectedKeys(); + if (keys.size() == 0 && (end - start) < SelectorUtil.MIN_SELECT_TIME_IN_NANO_SECONDS ) + { + invalidSelectCount++; + } + else + { + invalidSelectCount = 0; + for (SelectionKey key : keys) { + AbstractConnection con = null; + try { + Object att = key.attachment(); + if (att != null) { + con = (AbstractConnection) att; + if (key.isValid() && key.isReadable()) { + try { + con.asynRead(); + } catch (IOException e) { + con.close("program err:" + e.toString()); + continue; + } catch (Exception e) { + LOGGER.warn("caught err:", e); + con.close("program err:" + e.toString()); + continue; } - con.close("program err:" + e.toString()); - continue; } + if (key.isValid() && key.isWritable()) { + con.doNextWriteCheck(); + } + } else { + key.cancel(); } - if (key.isWritable()) { - con.doWriteQueue(); - } - } else { - key.cancel(); - } - } catch (Throwable e) { - if (e instanceof CancelledKeyException) { + } catch (CancelledKeyException e) { if (LOGGER.isDebugEnabled()) { LOGGER.debug(con + " socket key canceled"); } - } else { + } catch (Exception e) { LOGGER.warn(con + " " + e); + } catch (final Throwable e) { + // Catch exceptions such as OOM and close connection if exists + //so that the reactor can keep running! + // @author Uncle-pan + // @since 2016-03-30 + if (con != null) { + con.close("Bad: " + e); + } + LOGGER.error("caught err: ", e); + continue; } - } - } - } catch (Throwable e) { + if (invalidSelectCount > SelectorUtil.REBUILD_COUNT_THRESHOLD) + { + final Selector rebuildSelector = SelectorUtil.rebuildSelector(this.selector); + if (rebuildSelector != null) + { + this.selector = rebuildSelector; + } + invalidSelectCount = 0; + } + } catch (Exception e) { LOGGER.warn(name, e); + } catch (final Throwable e){ + // Catch exceptions such as OOM so that the reactor can keep running! + // @author Uncle-pan + // @since 2016-03-30 + LOGGER.error("caught err: ", e); } finally { if (keys != null) { keys.clear(); } + } } } private void register(Selector selector) { - + AbstractConnection c = null; if (registerQueue.isEmpty()) { return; } - Connection c = null; while ((c = registerQueue.poll()) != null) { try { - c.register(selector); - } catch (Throwable e) { - LOGGER.warn("register error ", e); - c.close("register err"); + ((NIOSocketWR) c.getSocketWR()).register(selector); + c.register(); + } catch (Exception e) { + c.close("register err" + e.toString()); } } } } -} \ No newline at end of file +} diff --git a/src/main/java/io/mycat/net/NIOReactorPool.java b/src/main/java/io/mycat/net/NIOReactorPool.java index 32c256ff3..f9fc8ae69 100644 --- a/src/main/java/io/mycat/net/NIOReactorPool.java +++ b/src/main/java/io/mycat/net/NIOReactorPool.java @@ -16,10 +16,15 @@ public NIOReactorPool(String name, int poolSize) throws IOException { } public NIOReactor getNextReactor() { +// if (++nextReactor == reactors.length) { +// nextReactor = 0; +// } +// return reactors[nextReactor]; + int i = ++nextReactor; if (i >= reactors.length) { - i=nextReactor = 0; - } - return reactors[i]; + i=nextReactor = 0; + } + return reactors[i]; } } diff --git a/src/main/java/io/mycat/net/NIOSocketWR.java b/src/main/java/io/mycat/net/NIOSocketWR.java new file mode 100644 index 000000000..a61e2c99d --- /dev/null +++ b/src/main/java/io/mycat/net/NIOSocketWR.java @@ -0,0 +1,199 @@ +package io.mycat.net; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.SelectionKey; +import java.nio.channels.Selector; +import java.nio.channels.SocketChannel; +import java.util.concurrent.atomic.AtomicBoolean; + +import io.mycat.util.TimeUtil; + +public class NIOSocketWR extends SocketWR { + private SelectionKey processKey; + private static final int OP_NOT_READ = ~SelectionKey.OP_READ; + private static final int OP_NOT_WRITE = ~SelectionKey.OP_WRITE; + private final AbstractConnection con; + private final SocketChannel channel; + private final AtomicBoolean writing = new AtomicBoolean(false); + + public NIOSocketWR(AbstractConnection con) { + this.con = con; + this.channel = (SocketChannel) con.channel; + } + + public void register(Selector selector) throws IOException { + try { + processKey = channel.register(selector, SelectionKey.OP_READ, con); + } finally { + if (con.isClosed.get()) { + clearSelectionKey(); + } + } + } + + public void doNextWriteCheck() { + + if (!writing.compareAndSet(false, true)) { + return; + } + + try { + boolean noMoreData = write0(); + writing.set(false); + if (noMoreData && con.writeQueue.isEmpty()) { + if ((processKey.isValid() && (processKey.interestOps() & SelectionKey.OP_WRITE) != 0)) { + disableWrite(); + } + + } else { + + if ((processKey.isValid() && (processKey.interestOps() & SelectionKey.OP_WRITE) == 0)) { + enableWrite(false); + } + } + + } catch (IOException e) { + if (AbstractConnection.LOGGER.isDebugEnabled()) { + AbstractConnection.LOGGER.debug("caught err:", e); + } + con.close("err:" + e); + } + + } + + private boolean write0() throws IOException { + + int written = 0; + ByteBuffer buffer = con.writeBuffer; + if (buffer != null) { + while (buffer.hasRemaining()) { + written = channel.write(buffer); + if (written > 0) { + con.netOutBytes += written; + con.processor.addNetOutBytes(written); + con.lastWriteTime = TimeUtil.currentTimeMillis(); + } else { + break; + } + } + + if (buffer.hasRemaining()) { + con.writeAttempts++; + return false; + } else { + con.writeBuffer = null; + con.recycle(buffer); + } + } + while ((buffer = con.writeQueue.poll()) != null) { + if (buffer.limit() == 0) { + con.recycle(buffer); + con.close("quit send"); + return true; + } + + buffer.flip(); + try { + while (buffer.hasRemaining()) { + written = channel.write(buffer);// java.io.IOException: + // Connection reset by peer + if (written > 0) { + con.lastWriteTime = TimeUtil.currentTimeMillis(); + con.netOutBytes += written; + con.processor.addNetOutBytes(written); + con.lastWriteTime = TimeUtil.currentTimeMillis(); + } else { + break; + } + } + } catch (IOException e) { + con.recycle(buffer); + throw e; + } + if (buffer.hasRemaining()) { + con.writeBuffer = buffer; + con.writeAttempts++; + return false; + } else { + con.recycle(buffer); + } + } + return true; + } + + private void disableWrite() { + try { + SelectionKey key = this.processKey; + key.interestOps(key.interestOps() & OP_NOT_WRITE); + } catch (Exception e) { + AbstractConnection.LOGGER.warn("can't disable write " + e + " con " + + con); + } + + } + + private void enableWrite(boolean wakeup) { + boolean needWakeup = false; + try { + SelectionKey key = this.processKey; + key.interestOps(key.interestOps() | SelectionKey.OP_WRITE); + needWakeup = true; + } catch (Exception e) { + AbstractConnection.LOGGER.warn("can't enable write " + e); + + } + if (needWakeup && wakeup) { + processKey.selector().wakeup(); + } + } + + public void disableRead() { + + SelectionKey key = this.processKey; + key.interestOps(key.interestOps() & OP_NOT_READ); + } + + public void enableRead() { + + boolean needWakeup = false; + try { + SelectionKey key = this.processKey; + key.interestOps(key.interestOps() | SelectionKey.OP_READ); + needWakeup = true; + } catch (Exception e) { + AbstractConnection.LOGGER.warn("enable read fail " + e); + } + if (needWakeup) { + processKey.selector().wakeup(); + } + } + + private void clearSelectionKey() { + try { + SelectionKey key = this.processKey; + if (key != null && key.isValid()) { + key.attach(null); + key.cancel(); + } + } catch (Exception e) { + AbstractConnection.LOGGER.warn("clear selector keys err:" + e); + } + } + + @Override + public void asynRead() throws IOException { + ByteBuffer theBuffer = con.readBuffer; + if (theBuffer == null) { + + theBuffer = con.processor.getBufferPool().allocate(con.processor.getBufferPool().getChunkSize()); + + con.readBuffer = theBuffer; + } + + int got = channel.read(theBuffer); + + con.onReadData(got); + } + +} diff --git a/src/main/java/io/mycat/net/NameableExecutor.java b/src/main/java/io/mycat/net/NameableExecutor.java deleted file mode 100644 index c4a78bd98..000000000 --- a/src/main/java/io/mycat/net/NameableExecutor.java +++ /dev/null @@ -1,23 +0,0 @@ -package io.mycat.net; - -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; - -/** - * @author wuzh - */ -public class NameableExecutor extends ThreadPoolExecutor implements NameableExecutorService{ - protected final String name; - - public NameableExecutor(String name, int size, BlockingQueue queue, ThreadFactory factory) { - super(size, size, Long.MAX_VALUE, TimeUnit.NANOSECONDS, queue, factory); - this.name = name; - } - - public String getName() { - return name; - } - -} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/NameableExecutorService.java b/src/main/java/io/mycat/net/NameableExecutorService.java deleted file mode 100644 index abb62c22d..000000000 --- a/src/main/java/io/mycat/net/NameableExecutorService.java +++ /dev/null @@ -1,11 +0,0 @@ -package io.mycat.net; - -import java.util.concurrent.ExecutorService; - -/** - * @author wuzh - */ -public interface NameableExecutorService extends ExecutorService { - - public String getName(); -} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/NamebleScheduledExecutor.java b/src/main/java/io/mycat/net/NamebleScheduledExecutor.java deleted file mode 100644 index e82e4c074..000000000 --- a/src/main/java/io/mycat/net/NamebleScheduledExecutor.java +++ /dev/null @@ -1,21 +0,0 @@ -package io.mycat.net; - -import java.util.concurrent.ScheduledThreadPoolExecutor; -import java.util.concurrent.ThreadFactory; - -public class NamebleScheduledExecutor extends ScheduledThreadPoolExecutor - implements NameableExecutorService { - private final String name; - - public NamebleScheduledExecutor(String name, int corePoolSize, - ThreadFactory threadFactory) { - super(corePoolSize, threadFactory); - this.name = name; - } - - @Override - public String getName() { - return name; - } - -} diff --git a/src/main/java/io/mycat/net/NetSystem.java b/src/main/java/io/mycat/net/NetSystem.java deleted file mode 100644 index 0b28214e8..000000000 --- a/src/main/java/io/mycat/net/NetSystem.java +++ /dev/null @@ -1,202 +0,0 @@ -package io.mycat.net; - -import io.mycat.MycatServer; -import io.mycat.backend.BackendConnection; -import io.mycat.server.config.node.SystemConfig; -import io.mycat.util.TimeUtil; - -import java.io.IOException; -import java.net.StandardSocketOptions; -import java.nio.channels.NetworkChannel; -import java.util.Iterator; -import java.util.Map.Entry; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * 存放当前所有连接的信息,包括客户端和服务端等,以及Network部分所使用共用对象 - * - * @author wuzhih - * - */ -public class NetSystem { - private static final Logger LOGGER = LoggerFactory - .getLogger(NetSystem.class); - public static final int RUNNING = 0; - public static final int SHUTING_DOWN = -1; - // private static final Logger LOGGER = Logger.getLogger("NetSystem"); - private static NetSystem INSTANCE; - private final BufferPool bufferPool; - // 用来执行那些耗时的任务 - private final NameableExecutor executor; - // 用来执行定时任务 - private final NamebleScheduledExecutor timer; - private final ConcurrentMap allConnections; - private long netInBytes; - private long netOutBytes; - private SystemConfig netConfig; - private NIOConnector connector; - - public static NetSystem getInstance() { - return INSTANCE; - } - - public NetSystem(BufferPool bufferPool, NameableExecutor executor, - NamebleScheduledExecutor timer) throws IOException { - this.bufferPool = bufferPool; - this.executor = executor; - this.timer = timer; - this.allConnections = new ConcurrentHashMap(); - INSTANCE = this; - } - - public BufferPool getBufferPool() { - return bufferPool; - } - - public NIOConnector getConnector() { - return connector; - } - - public void setConnector(NIOConnector connector) { - this.connector = connector; - } - - public int getWriteQueueSize() { - int total = 0; - for (Connection con : allConnections.values()) { - total += con.getWriteQueue().size(); - } - - return total; - - } - - public SystemConfig getNetConfig() { - return netConfig; - } - - public void setNetConfig(SystemConfig netConfig) { - this.netConfig = netConfig; - } - - public NameableExecutor getExecutor() { - return executor; - } - - public NamebleScheduledExecutor getTimer() { - return timer; - } - - public long getNetInBytes() { - return netInBytes; - } - - public void addNetInBytes(long bytes) { - netInBytes += bytes; - } - - public long getNetOutBytes() { - return netOutBytes; - } - - public void addNetOutBytes(long bytes) { - netOutBytes += bytes; - } - - /** - * 添加一个连接到系统中被监控 - * - * @param c - */ - public void addConnection(Connection c) { - allConnections.put(c.getId(), c); - } - - public ConcurrentMap getAllConnectios() { - return allConnections; - } - - /** - * 定时执行该方法,回收部分资源。 - */ - public void checkConnections() { - Iterator> it = allConnections.entrySet() - .iterator(); - while (it.hasNext()) { - Connection c = it.next().getValue(); - - // 删除空连接 - if (c == null) { - it.remove(); - continue; - } - - // 清理已关闭连接,否则空闲检查。 - if (c.isClosed()) { - c.cleanup(); - it.remove(); - } else { - // very important ,for some data maybe not sent - checkConSendQueue(c); - if (c instanceof BackendConnection) { - long sqlTimeOut = MycatServer.getInstance().getConfig() - .getSystem().getSqlExecuteTimeout() * 1000L; - BackendConnection backCon = (BackendConnection) c; - // SQL执行超时的连接关闭 - if (backCon.isBorrowed() - && backCon.getLastTime() < TimeUtil - .currentTimeMillis() - sqlTimeOut) { - LOGGER.warn("found backend connection SQL timeout ,close it " - + c); - c.close("sql timeout"); - } - } - c.idleCheck(); - } - } - } - - private void checkConSendQueue(Connection c) { - // very important ,for some data maybe not sent - if (!c.getWriteQueue().isEmpty()) { - c.enableWrite(true); - } - } - - public void removeConnection(Connection con) { - this.allConnections.remove(con.getId()); - - } - - public void setSocketParams(Connection con, boolean isFrontChannel) - throws IOException { - int sorcvbuf = 0; - int sosndbuf = 0; - int soNoDelay = 0; - if (isFrontChannel) { - sorcvbuf = netConfig.getFrontsocketsorcvbuf(); - sosndbuf = netConfig.getFrontsocketsosndbuf(); - soNoDelay = netConfig.getFrontSocketNoDelay(); - } else { - sorcvbuf = netConfig.getBacksocketsorcvbuf(); - sosndbuf = netConfig.getBacksocketsosndbuf(); - soNoDelay = netConfig.getBackSocketNoDelay(); - } - NetworkChannel channel = con.getChannel(); - channel.setOption(StandardSocketOptions.SO_RCVBUF, sorcvbuf); - channel.setOption(StandardSocketOptions.SO_SNDBUF, sosndbuf); - channel.setOption(StandardSocketOptions.TCP_NODELAY, soNoDelay == 1); - channel.setOption(StandardSocketOptions.SO_REUSEADDR, true); - channel.setOption(StandardSocketOptions.SO_KEEPALIVE, true); - - con.setMaxPacketSize(netConfig.getMaxPacketSize()); - con.setPacketHeaderSize(netConfig.getPacketHeaderSize()); - con.setIdleTimeout(netConfig.getIdleTimeout()); - - } - -} diff --git a/src/main/java/io/mycat/net/SocketAcceptor.java b/src/main/java/io/mycat/net/SocketAcceptor.java new file mode 100644 index 000000000..f35664897 --- /dev/null +++ b/src/main/java/io/mycat/net/SocketAcceptor.java @@ -0,0 +1,11 @@ +package io.mycat.net; + +public interface SocketAcceptor { + + void start(); + + String getName(); + + int getPort(); + +} diff --git a/src/main/java/io/mycat/net/SocketConnector.java b/src/main/java/io/mycat/net/SocketConnector.java new file mode 100644 index 000000000..36647ffcb --- /dev/null +++ b/src/main/java/io/mycat/net/SocketConnector.java @@ -0,0 +1,5 @@ +package io.mycat.net; + +public interface SocketConnector { + +} diff --git a/src/main/java/io/mycat/net/SocketWR.java b/src/main/java/io/mycat/net/SocketWR.java new file mode 100644 index 000000000..47b087b87 --- /dev/null +++ b/src/main/java/io/mycat/net/SocketWR.java @@ -0,0 +1,9 @@ +package io.mycat.net; + +import java.io.IOException; + + +public abstract class SocketWR { + public abstract void asynRead() throws IOException; + public abstract void doNextWriteCheck() ; +} diff --git a/src/main/java/io/mycat/net/ThreadLocalBufferPool.java b/src/main/java/io/mycat/net/ThreadLocalBufferPool.java deleted file mode 100644 index 3b5753314..000000000 --- a/src/main/java/io/mycat/net/ThreadLocalBufferPool.java +++ /dev/null @@ -1,14 +0,0 @@ -package io.mycat.net; - - -public class ThreadLocalBufferPool extends ThreadLocal { - private final long size; - - public ThreadLocalBufferPool(long size) { - this.size = size; - } - - protected synchronized BufferQueue initialValue() { - return new BufferQueue(size); - } -} diff --git a/src/main/java/io/mycat/net/WriteEventCheckRunner.java b/src/main/java/io/mycat/net/WriteEventCheckRunner.java new file mode 100644 index 000000000..207f18ebe --- /dev/null +++ b/src/main/java/io/mycat/net/WriteEventCheckRunner.java @@ -0,0 +1,21 @@ +package io.mycat.net; + +public class WriteEventCheckRunner implements Runnable { + private final SocketWR socketWR; + private volatile boolean finshed = true; + + public WriteEventCheckRunner(SocketWR socketWR) { + this.socketWR = socketWR; + } + + public boolean isFinished() { + return finshed; + } + + @Override + public void run() { + finshed = false; + socketWR.doNextWriteCheck(); + finshed = true; + } +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/factory/BackendConnectionFactory.java b/src/main/java/io/mycat/net/factory/BackendConnectionFactory.java new file mode 100644 index 000000000..1a28da745 --- /dev/null +++ b/src/main/java/io/mycat/net/factory/BackendConnectionFactory.java @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.factory; + +import java.io.IOException; +import java.nio.channels.AsynchronousSocketChannel; +import java.nio.channels.NetworkChannel; +import java.nio.channels.SocketChannel; + +import io.mycat.MycatServer; + +/** + * @author mycat + */ +public abstract class BackendConnectionFactory { + + protected NetworkChannel openSocketChannel(boolean isAIO) + throws IOException { + if (isAIO) { + return AsynchronousSocketChannel + .open(MycatServer.getInstance().getNextAsyncChannelGroup()); + } else { + SocketChannel channel = null; + channel = SocketChannel.open(); + channel.configureBlocking(false); + return channel; + } + + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/factory/FrontendConnectionFactory.java b/src/main/java/io/mycat/net/factory/FrontendConnectionFactory.java new file mode 100644 index 000000000..8f3ee03a7 --- /dev/null +++ b/src/main/java/io/mycat/net/factory/FrontendConnectionFactory.java @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.factory; + +import java.io.IOException; +import java.net.StandardSocketOptions; +import java.nio.channels.NetworkChannel; + +import io.mycat.MycatServer; +import io.mycat.net.FrontendConnection; + +/** + * @author mycat + */ +public abstract class FrontendConnectionFactory { + protected abstract FrontendConnection getConnection(NetworkChannel channel) + throws IOException; + + public FrontendConnection make(NetworkChannel channel) throws IOException { + channel.setOption(StandardSocketOptions.SO_REUSEADDR, true); + channel.setOption(StandardSocketOptions.SO_KEEPALIVE, true); + + FrontendConnection c = getConnection(channel); + MycatServer.getInstance().getConfig().setSocketParams(c, true); + return c; + } + + + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/handler/BackendAsyncHandler.java b/src/main/java/io/mycat/net/handler/BackendAsyncHandler.java new file mode 100644 index 000000000..4928b427c --- /dev/null +++ b/src/main/java/io/mycat/net/handler/BackendAsyncHandler.java @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.handler; + +import java.util.concurrent.Executor; + +import io.mycat.net.NIOHandler; + +/** + * @author mycat + */ +public abstract class BackendAsyncHandler implements NIOHandler { + + protected void offerData(byte[] data, Executor executor) { + handleData(data); + + // if (dataQueue.offer(data)) { + // handleQueue(executor); + // } else { + // offerDataError(); + // } + } + + protected abstract void offerDataError(); + + protected abstract void handleData(byte[] data); + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/handler/FrontendAuthenticator.java b/src/main/java/io/mycat/net/handler/FrontendAuthenticator.java new file mode 100644 index 000000000..b0470d8a9 --- /dev/null +++ b/src/main/java/io/mycat/net/handler/FrontendAuthenticator.java @@ -0,0 +1,244 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.handler; + +import java.nio.ByteBuffer; +import java.security.NoSuchAlgorithmException; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.MycatServer; +import io.mycat.backend.mysql.SecurityUtil; +import io.mycat.config.Capabilities; +import io.mycat.config.ErrorCode; +import io.mycat.config.model.UserConfig; +import io.mycat.net.FrontendConnection; +import io.mycat.net.NIOHandler; +import io.mycat.net.NIOProcessor; +import io.mycat.net.mysql.AuthPacket; +import io.mycat.net.mysql.MySQLPacket; +import io.mycat.net.mysql.QuitPacket; + +/** + * 前端认证处理器 + * + * @author mycat + */ +public class FrontendAuthenticator implements NIOHandler { + + private static final Logger LOGGER = LoggerFactory.getLogger(FrontendAuthenticator.class); + private static final byte[] AUTH_OK = new byte[] { 7, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0 }; + + protected final FrontendConnection source; + + public FrontendAuthenticator(FrontendConnection source) { + this.source = source; + } + + @Override + public void handle(byte[] data) { + // check quit packet + if (data.length == QuitPacket.QUIT.length && data[4] == MySQLPacket.COM_QUIT) { + source.close("quit packet"); + return; + } + + AuthPacket auth = new AuthPacket(); + auth.read(data); + + //huangyiming add + int nopassWordLogin = MycatServer.getInstance().getConfig().getSystem().getNonePasswordLogin(); + //如果无密码登陆则跳过密码验证这个步骤 + boolean skipPassWord = false; + String defaultUser = ""; + if(nopassWordLogin == 1){ + skipPassWord = true; + Map userMaps = MycatServer.getInstance().getConfig().getUsers(); + if(!userMaps.isEmpty()){ + setDefaultAccount(auth, userMaps); + } + } + // check user + if (!checkUser(auth.user, source.getHost())) { + failure(ErrorCode.ER_ACCESS_DENIED_ERROR, "Access denied for user '" + auth.user + "' with host '" + source.getHost()+ "'"); + return; + } + // check password + if (!skipPassWord && !checkPassword(auth.password, auth.user)) { + failure(ErrorCode.ER_ACCESS_DENIED_ERROR, "Access denied for user '" + auth.user + "', because password is error "); + return; + } + + // check degrade + if ( isDegrade( auth.user ) ) { + failure(ErrorCode.ER_ACCESS_DENIED_ERROR, "Access denied for user '" + auth.user + "', because service be degraded "); + return; + } + + // check schema + switch (checkSchema(auth.database, auth.user)) { + case ErrorCode.ER_BAD_DB_ERROR: + failure(ErrorCode.ER_BAD_DB_ERROR, "Unknown database '" + auth.database + "'"); + break; + case ErrorCode.ER_DBACCESS_DENIED_ERROR: + String s = "Access denied for user '" + auth.user + "' to database '" + auth.database + "'"; + failure(ErrorCode.ER_DBACCESS_DENIED_ERROR, s); + break; + default: + success(auth); + } + } + + /** + * 设置了无密码登陆的情况下把客户端传过来的用户账号改变为默认账户 + * @param auth + * @param userMaps + */ + private void setDefaultAccount(AuthPacket auth, Map userMaps) { + String defaultUser; + Iterator items = userMaps.values().iterator(); + while(items.hasNext()){ + UserConfig userConfig = items.next(); + if(userConfig.isDefaultAccount()){ + defaultUser = userConfig.getName(); + auth.user = defaultUser; + } + } + } + + //TODO: add by zhuam + //前端 connection 达到该用户设定的阀值后, 立马降级拒绝连接 + protected boolean isDegrade(String user) { + + int benchmark = source.getPrivileges().getBenchmark(user); + if ( benchmark > 0 ) { + + int forntedsLength = 0; + NIOProcessor[] processors = MycatServer.getInstance().getProcessors(); + for (NIOProcessor p : processors) { + forntedsLength += p.getForntedsLength(); + } + + if ( forntedsLength >= benchmark ) { + return true; + } + } + + return false; + } + + protected boolean checkUser(String user, String host) { + return source.getPrivileges().userExists(user, host); + } + + protected boolean checkPassword(byte[] password, String user) { + String pass = source.getPrivileges().getPassword(user); + + // check null + if (pass == null || pass.length() == 0) { + if (password == null || password.length == 0) { + return true; + } else { + return false; + } + } + if (password == null || password.length == 0) { + return false; + } + + // encrypt + byte[] encryptPass = null; + try { + encryptPass = SecurityUtil.scramble411(pass.getBytes(), source.getSeed()); + } catch (NoSuchAlgorithmException e) { + LOGGER.warn(source.toString(), e); + return false; + } + if (encryptPass != null && (encryptPass.length == password.length)) { + int i = encryptPass.length; + while (i-- != 0) { + if (encryptPass[i] != password[i]) { + return false; + } + } + } else { + return false; + } + + return true; + } + + protected int checkSchema(String schema, String user) { + if (schema == null) { + return 0; + } + FrontendPrivileges privileges = source.getPrivileges(); + if (!privileges.schemaExists(schema)) { + return ErrorCode.ER_BAD_DB_ERROR; + } + Set schemas = privileges.getUserSchemas(user); + if (schemas == null || schemas.size() == 0 || schemas.contains(schema)) { + return 0; + } else { + return ErrorCode.ER_DBACCESS_DENIED_ERROR; + } + } + + protected void success(AuthPacket auth) { + source.setAuthenticated(true); + source.setUser(auth.user); + source.setSchema(auth.database); + source.setCharsetIndex(auth.charsetIndex); + source.setHandler(new FrontendCommandHandler(source)); + + if (LOGGER.isInfoEnabled()) { + StringBuilder s = new StringBuilder(); + s.append(source).append('\'').append(auth.user).append("' login success"); + byte[] extra = auth.extra; + if (extra != null && extra.length > 0) { + s.append(",extra:").append(new String(extra)); + } + LOGGER.info(s.toString()); + } + + ByteBuffer buffer = source.allocate(); + source.write(source.writeToBuffer(AUTH_OK, buffer)); + boolean clientCompress = Capabilities.CLIENT_COMPRESS==(Capabilities.CLIENT_COMPRESS & auth.clientFlags); + boolean usingCompress= MycatServer.getInstance().getConfig().getSystem().getUseCompression()==1 ; + if(clientCompress&&usingCompress) + { + source.setSupportCompress(true); + } + } + + protected void failure(int errno, String info) { + LOGGER.error(source.toString() + info); + source.writeErrMessage((byte) 2, errno, info); + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/handler/FrontendCommandHandler.java b/src/main/java/io/mycat/net/handler/FrontendCommandHandler.java new file mode 100644 index 000000000..41d21422e --- /dev/null +++ b/src/main/java/io/mycat/net/handler/FrontendCommandHandler.java @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.handler; + +import io.mycat.backend.mysql.MySQLMessage; +import io.mycat.config.ErrorCode; +import io.mycat.net.FrontendConnection; +import io.mycat.net.NIOHandler; +import io.mycat.net.mysql.MySQLPacket; +import io.mycat.statistic.CommandCount; + +/** + * 前端命令处理器 + * + * @author mycat + */ +public class FrontendCommandHandler implements NIOHandler +{ + + protected final FrontendConnection source; + protected final CommandCount commands; + + public FrontendCommandHandler(FrontendConnection source) + { + this.source = source; + this.commands = source.getProcessor().getCommands(); + } + + @Override + public void handle(byte[] data) + { + if(source.getLoadDataInfileHandler()!=null&&source.getLoadDataInfileHandler().isStartLoadData()) + { + MySQLMessage mm = new MySQLMessage(data); + int packetLength = mm.readUB3(); + if(packetLength+4==data.length) + { + source.loadDataInfileData(data); + } + return; + } + switch (data[4]) + { + case MySQLPacket.COM_INIT_DB: + commands.doInitDB(); + source.initDB(data); + break; + case MySQLPacket.COM_QUERY: + commands.doQuery(); + source.query(data); + break; + case MySQLPacket.COM_PING: + commands.doPing(); + source.ping(); + break; + case MySQLPacket.COM_QUIT: + commands.doQuit(); + source.close("quit cmd"); + break; + case MySQLPacket.COM_PROCESS_KILL: + commands.doKill(); + source.kill(data); + break; + case MySQLPacket.COM_STMT_PREPARE: + commands.doStmtPrepare(); + source.stmtPrepare(data); + break; + case MySQLPacket.COM_STMT_SEND_LONG_DATA: + commands.doStmtSendLongData(); + source.stmtSendLongData(data); + break; + case MySQLPacket.COM_STMT_RESET: + commands.doStmtReset(); + source.stmtReset(data); + break; + case MySQLPacket.COM_STMT_EXECUTE: + commands.doStmtExecute(); + source.stmtExecute(data); + break; + case MySQLPacket.COM_STMT_CLOSE: + commands.doStmtClose(); + source.stmtClose(data); + break; + case MySQLPacket.COM_HEARTBEAT: + commands.doHeartbeat(); + source.heartbeat(data); + break; + default: + commands.doOther(); + source.writeErrMessage(ErrorCode.ER_UNKNOWN_COM_ERROR, + "Unknown command"); + + } + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/FrontendPrepareHandler.java b/src/main/java/io/mycat/net/handler/FrontendPrepareHandler.java similarity index 90% rename from src/main/java/io/mycat/server/FrontendPrepareHandler.java rename to src/main/java/io/mycat/net/handler/FrontendPrepareHandler.java index 67240d99e..e070242a4 100644 --- a/src/main/java/io/mycat/server/FrontendPrepareHandler.java +++ b/src/main/java/io/mycat/net/handler/FrontendPrepareHandler.java @@ -21,20 +21,25 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server; +package io.mycat.net.handler; /** * SQL预处理处理器 * - * @author mycat + * @author mycat, CrazyPig */ public interface FrontendPrepareHandler { - + void prepare(String sql); + + void sendLongData(byte[] data); + void reset(byte[] data); + void execute(byte[] data); void close(byte[] data); void clear(); + } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/FrontendPrivileges.java b/src/main/java/io/mycat/net/handler/FrontendPrivileges.java similarity index 69% rename from src/main/java/io/mycat/server/FrontendPrivileges.java rename to src/main/java/io/mycat/net/handler/FrontendPrivileges.java index 8328a4ad9..a6c12c3ec 100644 --- a/src/main/java/io/mycat/server/FrontendPrivileges.java +++ b/src/main/java/io/mycat/net/handler/FrontendPrivileges.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server; +package io.mycat.net.handler; import java.util.Set; @@ -57,6 +57,38 @@ public interface FrontendPrivileges { * @param user * @return */ - boolean isReadOnly(String user); + Boolean isReadOnly(String user); + + /** + * 获取设定的系统最大连接数的降级阀值 + * @param user + * @return + */ + int getBenchmark(String user); + + + /** + * 检查防火墙策略 + * (白名单策略) + * @param user + * @param host + * @return + */ + boolean checkFirewallWhiteHostPolicy(String user, String host); + + /** + * 检查防火墙策略 + * (SQL黑名单及注入策略) + * @param sql + * @return + */ + boolean checkFirewallSQLPolicy(String user, String sql); + + + /** + * 检查 SQL 语句的 DML 权限 + * @return + */ + boolean checkDmlPrivilege(String user, String schema, String sql); } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/FrontendQueryHandler.java b/src/main/java/io/mycat/net/handler/FrontendQueryHandler.java similarity index 97% rename from src/main/java/io/mycat/server/FrontendQueryHandler.java rename to src/main/java/io/mycat/net/handler/FrontendQueryHandler.java index 6fdf6809d..e04088f8c 100644 --- a/src/main/java/io/mycat/server/FrontendQueryHandler.java +++ b/src/main/java/io/mycat/net/handler/FrontendQueryHandler.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server; +package io.mycat.net.handler; /** * 查询处理器 diff --git a/src/main/java/io/mycat/server/LoadDataInfileHandler.java b/src/main/java/io/mycat/net/handler/LoadDataInfileHandler.java similarity index 97% rename from src/main/java/io/mycat/server/LoadDataInfileHandler.java rename to src/main/java/io/mycat/net/handler/LoadDataInfileHandler.java index 62d145528..82378bb23 100644 --- a/src/main/java/io/mycat/server/LoadDataInfileHandler.java +++ b/src/main/java/io/mycat/net/handler/LoadDataInfileHandler.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server; +package io.mycat.net.handler; /** * load data infile diff --git a/src/main/java/io/mycat/net/mysql/AuthPacket.java b/src/main/java/io/mycat/net/mysql/AuthPacket.java new file mode 100644 index 000000000..9f6fad9cc --- /dev/null +++ b/src/main/java/io/mycat/net/mysql/AuthPacket.java @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.mysql; + +import java.io.IOException; +import java.io.OutputStream; +import java.nio.ByteBuffer; + +import io.mycat.backend.mysql.BufferUtil; +import io.mycat.backend.mysql.MySQLMessage; +import io.mycat.backend.mysql.StreamUtil; +import io.mycat.config.Capabilities; +import io.mycat.net.BackendAIOConnection; + +/** + * From client to server during initial handshake. + * + *
+ * Bytes                        Name
+ * -----                        ----
+ * 4                            client_flags
+ * 4                            max_packet_size
+ * 1                            charset_number
+ * 23                           (filler) always 0x00...
+ * n (Null-Terminated String)   user
+ * n (Length Coded Binary)      scramble_buff (1 + x bytes)
+ * n (Null-Terminated String)   databasename (optional)
+ * 
+ * @see http://forge.mysql.com/wiki/MySQL_Internals_ClientServer_Protocol#Client_Authentication_Packet
+ * 
+ * + * @author mycat + */ +public class AuthPacket extends MySQLPacket { + private static final byte[] FILLER = new byte[23]; + + public long clientFlags; + public long maxPacketSize; + public int charsetIndex; + public byte[] extra;// from FILLER(23) + public String user; + public byte[] password; + public String database; + + public void read(byte[] data) { + MySQLMessage mm = new MySQLMessage(data); + packetLength = mm.readUB3(); + packetId = mm.read(); + clientFlags = mm.readUB4(); + maxPacketSize = mm.readUB4(); + charsetIndex = (mm.read() & 0xff); + // read extra + int current = mm.position(); + int len = (int) mm.readLength(); + if (len > 0 && len < FILLER.length) { + byte[] ab = new byte[len]; + System.arraycopy(mm.bytes(), mm.position(), ab, 0, len); + this.extra = ab; + } + mm.position(current + FILLER.length); + user = mm.readStringWithNull(); + password = mm.readBytesWithLength(); + if (((clientFlags & Capabilities.CLIENT_CONNECT_WITH_DB) != 0) && mm.hasRemaining()) { + database = mm.readStringWithNull(); + } + } + + public void write(OutputStream out) throws IOException { + StreamUtil.writeUB3(out, calcPacketSize()); + StreamUtil.write(out, packetId); + StreamUtil.writeUB4(out, clientFlags); + StreamUtil.writeUB4(out, maxPacketSize); + StreamUtil.write(out, (byte) charsetIndex); + out.write(FILLER); + if (user == null) { + StreamUtil.write(out, (byte) 0); + } else { + StreamUtil.writeWithNull(out, user.getBytes()); + } + if (password == null) { + StreamUtil.write(out, (byte) 0); + } else { + StreamUtil.writeWithLength(out, password); + } + if (database == null) { + StreamUtil.write(out, (byte) 0); + } else { + StreamUtil.writeWithNull(out, database.getBytes()); + } + } + + @Override + public void write(BackendAIOConnection c) { + ByteBuffer buffer = c.allocate(); + BufferUtil.writeUB3(buffer, calcPacketSize()); + buffer.put(packetId); + BufferUtil.writeUB4(buffer, clientFlags); + BufferUtil.writeUB4(buffer, maxPacketSize); + buffer.put((byte) charsetIndex); + buffer = c.writeToBuffer(FILLER, buffer); + if (user == null) { + buffer = c.checkWriteBuffer(buffer, 1,true); + buffer.put((byte) 0); + } else { + byte[] userData = user.getBytes(); + buffer = c.checkWriteBuffer(buffer, userData.length + 1,true); + BufferUtil.writeWithNull(buffer, userData); + } + if (password == null) { + buffer = c.checkWriteBuffer(buffer, 1,true); + buffer.put((byte) 0); + } else { + buffer = c.checkWriteBuffer(buffer, BufferUtil.getLength(password),true); + BufferUtil.writeWithLength(buffer, password); + } + if (database == null) { + buffer = c.checkWriteBuffer(buffer, 1,true); + buffer.put((byte) 0); + } else { + byte[] databaseData = database.getBytes(); + buffer = c.checkWriteBuffer(buffer, databaseData.length + 1,true); + BufferUtil.writeWithNull(buffer, databaseData); + } + c.write(buffer); + } + + @Override + public int calcPacketSize() { + int size = 32;// 4+4+1+23; + size += (user == null) ? 1 : user.length() + 1; + size += (password == null) ? 1 : BufferUtil.getLength(password); + size += (database == null) ? 1 : database.length() + 1; + return size; + } + + @Override + protected String getPacketInfo() { + return "MySQL Authentication Packet"; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/mysql/BinaryPacket.java b/src/main/java/io/mycat/net/mysql/BinaryPacket.java new file mode 100644 index 000000000..530cede09 --- /dev/null +++ b/src/main/java/io/mycat/net/mysql/BinaryPacket.java @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.mysql; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; + +import io.mycat.backend.mysql.BufferUtil; +import io.mycat.backend.mysql.StreamUtil; +import io.mycat.net.BackendAIOConnection; +import io.mycat.net.FrontendConnection; + +/** + * @author mycat + */ +public class BinaryPacket extends MySQLPacket { + public static final byte OK = 1; + public static final byte ERROR = 2; + public static final byte HEADER = 3; + public static final byte FIELD = 4; + public static final byte FIELD_EOF = 5; + public static final byte ROW = 6; + public static final byte PACKET_EOF = 7; + + public byte[] data; + + public void read(InputStream in) throws IOException { + packetLength = StreamUtil.readUB3(in); + packetId = StreamUtil.read(in); + byte[] ab = new byte[packetLength]; + StreamUtil.read(in, ab, 0, ab.length); + data = ab; + } + + @Override + public ByteBuffer write(ByteBuffer buffer, FrontendConnection c,boolean writeSocketIfFull) { + buffer = c.checkWriteBuffer(buffer, c.getPacketHeaderSize(),writeSocketIfFull); + BufferUtil.writeUB3(buffer, calcPacketSize()); + buffer.put(packetId); + buffer = c.writeToBuffer(data, buffer); + return buffer; + } + @Override + public void write(BackendAIOConnection c) { + ByteBuffer buffer = c.allocate(); + buffer= c.checkWriteBuffer(buffer,c.getPacketHeaderSize()+calcPacketSize(),false); + BufferUtil.writeUB3(buffer, calcPacketSize()); + buffer.put(packetId); + buffer.put(data); + c.write(buffer); + } + + @Override + public int calcPacketSize() { + return data == null ? 0 : data.length; + } + + @Override + protected String getPacketInfo() { + return "MySQL Binary Packet"; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/mysql/BinaryRowDataPacket.java b/src/main/java/io/mycat/net/mysql/BinaryRowDataPacket.java new file mode 100644 index 000000000..fae8e5b88 --- /dev/null +++ b/src/main/java/io/mycat/net/mysql/BinaryRowDataPacket.java @@ -0,0 +1,396 @@ +package io.mycat.net.mysql; + + +import java.nio.ByteBuffer; +import java.text.ParseException; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; + +import io.mycat.backend.mysql.BufferUtil; +import io.mycat.config.Fields; +import io.mycat.memory.unsafe.row.UnsafeRow; +import io.mycat.net.FrontendConnection; +import io.mycat.util.ByteUtil; +import io.mycat.util.DateUtil; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * ProtocolBinary::ResultsetRow: + * row of a binary resultset (COM_STMT_EXECUTE) + + * Payload + * 1 packet header [00] + * string[$len] NULL-bitmap, length: (column_count + 7 + 2) / 8 + * string[$len] values + * + * A Binary Protocol Resultset Row is made up of the NULL bitmap + * containing as many bits as we have columns in the resultset + 2 + * and the values for columns that are not NULL in the Binary Protocol Value format. + * + * @see @http://dev.mysql.com/doc/internals/en/binary-protocol-resultset-row.html#packet-ProtocolBinary::ResultsetRow + * @see @http://dev.mysql.com/doc/internals/en/binary-protocol-value.html + * @author CrazyPig + * + */ +public class BinaryRowDataPacket extends MySQLPacket { + private static final Logger LOGGER = LoggerFactory.getLogger(BinaryRowDataPacket.class); + public int fieldCount; + public List fieldValues; + public byte packetHeader = (byte) 0; + public byte[] nullBitMap; + + public List fieldPackets; + + public BinaryRowDataPacket() {} + + /** + * 从UnsafeRow转换成BinaryRowDataPacket + * + * 说明: 当开启isOffHeapuseOffHeapForMerge参数时,会使用UnsafeRow封装数据, + * 因此需要从这个对象里面将数据封装成BinaryRowDataPacket + * + * @param fieldPackets + * @param unsafeRow + */ + public void read(List fieldPackets, UnsafeRow unsafeRow) { + this.fieldPackets = fieldPackets; + this.fieldCount = unsafeRow.numFields(); + this.fieldValues = new ArrayList(fieldCount); + this.packetId = unsafeRow.packetId; + this.nullBitMap = new byte[(fieldCount + 7 + 2) / 8]; + + for(int i = 0; i < this.fieldCount; i++) { + byte[] fv = unsafeRow.getBinary(i); + FieldPacket fieldPk = fieldPackets.get(i); + if(fv == null) { + storeNullBitMap(i); + this.fieldValues.add(fv); + } else { + convert(fv, fieldPk); + } + } + } + + /** + * 从RowDataPacket转换成BinaryRowDataPacket + * @param fieldPackets 字段包集合 + * @param rowDataPk 文本协议行数据包 + */ + public void read(List fieldPackets, RowDataPacket rowDataPk) { + this.fieldPackets = fieldPackets; + this.fieldCount = rowDataPk.fieldCount; + this.fieldValues = new ArrayList(fieldCount); + this.packetId = rowDataPk.packetId; + this.nullBitMap = new byte[(fieldCount + 7 + 2) / 8]; + + List _fieldValues = rowDataPk.fieldValues; + for (int i = 0; i < fieldCount; i++) { + byte[] fv = _fieldValues.get(i); + FieldPacket fieldPk = fieldPackets.get(i); + if (fv == null) { // 字段值为null,根据协议规定存储nullBitMap + storeNullBitMap(i); + this.fieldValues.add(fv); + } else { + convert(fv, fieldPk); + } + } + } + + private void storeNullBitMap(int i) { + int bitMapPos = (i + 2) / 8; + int bitPos = (i + 2) % 8; + this.nullBitMap[bitMapPos] |= (byte) (1 << bitPos); + } + + /** + * 从RowDataPacket的fieldValue的数据转化成BinaryRowDataPacket的fieldValue数据 + * @param fv + * @param fieldPk + */ + private void convert(byte[] fv, FieldPacket fieldPk) { + + int fieldType = fieldPk.type; + switch (fieldType) { + case Fields.FIELD_TYPE_STRING: + case Fields.FIELD_TYPE_VARCHAR: + case Fields.FIELD_TYPE_VAR_STRING: + case Fields.FIELD_TYPE_ENUM: + case Fields.FIELD_TYPE_SET: + case Fields.FIELD_TYPE_LONG_BLOB: + case Fields.FIELD_TYPE_MEDIUM_BLOB: + case Fields.FIELD_TYPE_BLOB: + case Fields.FIELD_TYPE_TINY_BLOB: + case Fields.FIELD_TYPE_GEOMETRY: + case Fields.FIELD_TYPE_BIT: + case Fields.FIELD_TYPE_DECIMAL: + case Fields.FIELD_TYPE_NEW_DECIMAL: + // Fields + // value (lenenc_str) -- string + + // Example + // 03 66 6f 6f -- string = "foo" + this.fieldValues.add(fv); + break; + case Fields.FIELD_TYPE_LONGLONG: + // Fields + // value (8) -- integer + + // Example + // 01 00 00 00 00 00 00 00 -- int64 = 1 + long longVar = ByteUtil.getLong(fv); + this.fieldValues.add(ByteUtil.getBytes(longVar)); + break; + case Fields.FIELD_TYPE_LONG: + case Fields.FIELD_TYPE_INT24: + // Fields + // value (4) -- integer + + // Example + // 01 00 00 00 -- int32 = 1 + int intVar = ByteUtil.getInt(fv); + this.fieldValues.add(ByteUtil.getBytes(intVar)); + break; + case Fields.FIELD_TYPE_SHORT: + case Fields.FIELD_TYPE_YEAR: + // Fields + // value (2) -- integer + + // Example + // 01 00 -- int16 = 1 + short shortVar = ByteUtil.getShort(fv); + this.fieldValues.add(ByteUtil.getBytes(shortVar)); + break; + case Fields.FIELD_TYPE_TINY: + // Fields + // value (1) -- integer + + // Example + // 01 -- int8 = 1 + int tinyVar = ByteUtil.getInt(fv); + byte[] bytes = new byte[1]; + bytes[0] = (byte)tinyVar; + this.fieldValues.add(bytes); + break; + case Fields.FIELD_TYPE_DOUBLE: + // Fields + // value (string.fix_len) -- (len=8) double + + // Example + // 66 66 66 66 66 66 24 40 -- double = 10.2 + double doubleVar = ByteUtil.getDouble(fv); + this.fieldValues.add(ByteUtil.getBytes(doubleVar)); + break; + case Fields.FIELD_TYPE_FLOAT: + // Fields + // value (string.fix_len) -- (len=4) float + + // Example + // 33 33 23 41 -- float = 10.2 + float floatVar = ByteUtil.getFloat(fv); + this.fieldValues.add(ByteUtil.getBytes(floatVar)); + break; + case Fields.FIELD_TYPE_DATE: + try { + Date dateVar = DateUtil.parseDate(ByteUtil.getDate(fv), DateUtil.DATE_PATTERN_ONLY_DATE); + this.fieldValues.add(ByteUtil.getBytes(dateVar, false)); + } catch(org.joda.time.IllegalFieldValueException e1) { + // 当时间为 0000-00-00 00:00:00 的时候, 默认返回 1970-01-01 08:00:00.0 + this.fieldValues.add(ByteUtil.getBytes(new Date(0L), false)); + } catch (ParseException e) { + LOGGER.error("error",e); + } + break; + case Fields.FIELD_TYPE_DATETIME: + case Fields.FIELD_TYPE_TIMESTAMP: + String dateStr = ByteUtil.getDate(fv); + Date dateTimeVar = null; + try { + if (dateStr.indexOf(".") > 0) { + dateTimeVar = DateUtil.parseDate(dateStr, DateUtil.DATE_PATTERN_FULL); + this.fieldValues.add(ByteUtil.getBytes(dateTimeVar, false)); + } else { + dateTimeVar = DateUtil.parseDate(dateStr, DateUtil.DEFAULT_DATE_PATTERN); + this.fieldValues.add(ByteUtil.getBytes(dateTimeVar, false)); + } + } catch(org.joda.time.IllegalFieldValueException e1) { + // 当时间为 0000-00-00 00:00:00 的时候, 默认返回 1970-01-01 08:00:00.0 + this.fieldValues.add(ByteUtil.getBytes(new Date(0L), false)); + + } catch (ParseException e) { + LOGGER.error("error",e); + } + break; + case Fields.FIELD_TYPE_TIME: + String timeStr = ByteUtil.getTime(fv); + Date timeVar = null; + try { + if (timeStr.indexOf(".") > 0) { + timeVar = DateUtil.parseDate(timeStr, DateUtil.TIME_PATTERN_FULL); + this.fieldValues.add(ByteUtil.getBytes(timeVar, true)); + } else { + timeVar = DateUtil.parseDate(timeStr, DateUtil.DEFAULT_TIME_PATTERN); + this.fieldValues.add(ByteUtil.getBytes(timeVar, true)); + } + + } catch(org.joda.time.IllegalFieldValueException e1) { + // 当时间为 0000-00-00 00:00:00 的时候, 默认返回 1970-01-01 08:00:00.0 + this.fieldValues.add(ByteUtil.getBytes(new Date(0L), true)); + + } catch (ParseException e) { + LOGGER.error("error",e); + } + break; + } + + } + + public void write(FrontendConnection conn) { + + int size = calcPacketSize(); + int packetHeaderSize = conn.getPacketHeaderSize(); + int totalSize = size + packetHeaderSize; + ByteBuffer bb = null; + + bb = conn.getProcessor().getBufferPool().allocate(totalSize); + + BufferUtil.writeUB3(bb, calcPacketSize()); + bb.put(packetId); + bb.put(packetHeader); // packet header [00] + bb.put(nullBitMap); // NULL-Bitmap + for(int i = 0; i < fieldCount; i++) { // values + byte[] fv = fieldValues.get(i); + if(fv != null) { + FieldPacket fieldPk = this.fieldPackets.get(i); + int fieldType = fieldPk.type; + switch(fieldType) { + case Fields.FIELD_TYPE_STRING: + case Fields.FIELD_TYPE_VARCHAR: + case Fields.FIELD_TYPE_VAR_STRING: + case Fields.FIELD_TYPE_ENUM: + case Fields.FIELD_TYPE_SET: + case Fields.FIELD_TYPE_LONG_BLOB: + case Fields.FIELD_TYPE_MEDIUM_BLOB: + case Fields.FIELD_TYPE_BLOB: + case Fields.FIELD_TYPE_TINY_BLOB: + case Fields.FIELD_TYPE_GEOMETRY: + case Fields.FIELD_TYPE_BIT: + case Fields.FIELD_TYPE_DECIMAL: + case Fields.FIELD_TYPE_NEW_DECIMAL: + // 长度编码的字符串需要一个字节来存储长度(0表示空字符串) + BufferUtil.writeLength(bb, fv.length); + break; + default: + break; + } + if(fv.length > 0) { + bb.put(fv); + } + } + } + conn.write(bb); + + } + + @Override + public ByteBuffer write(ByteBuffer bb, FrontendConnection c, + boolean writeSocketIfFull) { + int size = calcPacketSize(); + int packetHeaderSize = c.getPacketHeaderSize(); + int totalSize = size + packetHeaderSize; + bb = c.checkWriteBuffer(bb, totalSize, writeSocketIfFull); + BufferUtil.writeUB3(bb, size); + bb.put(packetId); + bb.put(packetHeader); // packet header [00] + bb.put(nullBitMap); // NULL-Bitmap + for(int i = 0; i < fieldCount; i++) { // values + byte[] fv = fieldValues.get(i); + if(fv != null) { + FieldPacket fieldPk = this.fieldPackets.get(i); + int fieldType = fieldPk.type; + switch(fieldType) { + case Fields.FIELD_TYPE_STRING: + case Fields.FIELD_TYPE_VARCHAR: + case Fields.FIELD_TYPE_VAR_STRING: + case Fields.FIELD_TYPE_ENUM: + case Fields.FIELD_TYPE_SET: + case Fields.FIELD_TYPE_LONG_BLOB: + case Fields.FIELD_TYPE_MEDIUM_BLOB: + case Fields.FIELD_TYPE_BLOB: + case Fields.FIELD_TYPE_TINY_BLOB: + case Fields.FIELD_TYPE_GEOMETRY: + case Fields.FIELD_TYPE_BIT: + case Fields.FIELD_TYPE_DECIMAL: + case Fields.FIELD_TYPE_NEW_DECIMAL: + // 长度编码的字符串需要一个字节来存储长度(0表示空字符串) + BufferUtil.writeLength(bb, fv.length); + break; + default: + break; + } + if(fv.length > 0) { + bb.put(fv); + } + } + } + return bb; + } + + @Override + public int calcPacketSize() { + int size = 0; + size = size + 1 + nullBitMap.length; + for(int i = 0, n = fieldValues.size(); i < n; i++) { + byte[] value = fieldValues.get(i); + if(value != null) { + FieldPacket fieldPk = this.fieldPackets.get(i); + int fieldType = fieldPk.type; + switch(fieldType) { + case Fields.FIELD_TYPE_STRING: + case Fields.FIELD_TYPE_VARCHAR: + case Fields.FIELD_TYPE_VAR_STRING: + case Fields.FIELD_TYPE_ENUM: + case Fields.FIELD_TYPE_SET: + case Fields.FIELD_TYPE_LONG_BLOB: + case Fields.FIELD_TYPE_MEDIUM_BLOB: + case Fields.FIELD_TYPE_BLOB: + case Fields.FIELD_TYPE_TINY_BLOB: + case Fields.FIELD_TYPE_GEOMETRY: + case Fields.FIELD_TYPE_BIT: + case Fields.FIELD_TYPE_DECIMAL: + case Fields.FIELD_TYPE_NEW_DECIMAL: + /* + * 长度编码的字符串需要计算存储长度, 根据mysql协议文档描述 + * To convert a length-encoded integer into its numeric value, check the first byte: + * If it is < 0xfb, treat it as a 1-byte integer. + * If it is 0xfc, it is followed by a 2-byte integer. + * If it is 0xfd, it is followed by a 3-byte integer. + * If it is 0xfe, it is followed by a 8-byte integer. + * + */ + if(value.length != 0) { + /* + * 长度编码的字符串需要计算存储长度,不能简单默认只有1个字节是表示长度,当数据足够长,占用的就不止1个字节 + */ +// size = size + 1 + value.length; + size = size + BufferUtil.getLength(value); + } else { + size = size + 1; // 处理空字符串,只计算长度1个字节 + } + break; + default: + size = size + value.length; + break; + } + } + } + return size; + } + + @Override + protected String getPacketInfo() { + return "MySQL Binary RowData Packet"; + } +} diff --git a/src/main/java/io/mycat/server/packet/CommandPacket.java b/src/main/java/io/mycat/net/mysql/CommandPacket.java similarity index 72% rename from src/main/java/io/mycat/server/packet/CommandPacket.java rename to src/main/java/io/mycat/net/mysql/CommandPacket.java index c88dabf60..a889b6063 100644 --- a/src/main/java/io/mycat/server/packet/CommandPacket.java +++ b/src/main/java/io/mycat/net/mysql/CommandPacket.java @@ -21,15 +21,17 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.packet; - -import io.mycat.net.BufferArray; -import io.mycat.net.Connection; -import io.mycat.net.NetSystem; -import io.mycat.server.packet.util.BufferUtil; +package io.mycat.net.mysql; +import java.io.IOException; +import java.io.OutputStream; import java.nio.ByteBuffer; +import io.mycat.backend.mysql.BufferUtil; +import io.mycat.backend.mysql.MySQLMessage; +import io.mycat.backend.mysql.StreamUtil; +import io.mycat.net.BackendAIOConnection; + /** * From client to server whenever the client wants the server to do something. * @@ -89,57 +91,55 @@ */ public class CommandPacket extends MySQLPacket { - public byte command; - public byte[] arg; - - public void read(byte[] data) { - MySQLMessage mm = new MySQLMessage(data); - packetLength = mm.readUB3(); - packetId = mm.read(); - command = mm.read(); - arg = mm.readBytes(); - } + public byte command; + public byte[] arg; + public void read(byte[] data) { + MySQLMessage mm = new MySQLMessage(data); + packetLength = mm.readUB3(); + packetId = mm.read(); + command = mm.read(); + arg = mm.readBytes(); + } - public void write(BufferArray bufferArray) { - int size = calcPacketSize(); - ByteBuffer buffer = bufferArray.checkWriteBuffer(packetHeaderSize - + size); - BufferUtil.writeUB3(buffer, calcPacketSize()); - buffer.put(packetId); - buffer.put(command); - buffer = bufferArray.write(arg); - } - public void write(Connection conn) { - int size = calcPacketSize(); - int totalSize = size + packetHeaderSize; - if (NetSystem.getInstance().getBufferPool().getChunkSize() >= totalSize) { - ByteBuffer buffer = NetSystem.getInstance().getBufferPool() - .allocate(); - BufferUtil.writeUB3(buffer, size); - buffer.put(packetId); - buffer.put(command); - buffer.put(arg); - conn.write(buffer); - } else { - BufferArray bufArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); - write(bufArray); - conn.write(bufArray); - } + public void write(OutputStream out) throws IOException { + StreamUtil.writeUB3(out, calcPacketSize()); + StreamUtil.write(out, packetId); + StreamUtil.write(out, command); + out.write(arg); + } - } + @Override + public void write(BackendAIOConnection c) { + ByteBuffer buffer = c.allocate(); + try { + BufferUtil.writeUB3(buffer, calcPacketSize()); + buffer.put(packetId); + buffer.put(command); + buffer = c.writeToBuffer(arg, buffer); + c.write(buffer); + } catch(java.nio.BufferOverflowException e1) { + //fixed issues #98 #1072 + buffer = c.checkWriteBuffer(buffer, c.getPacketHeaderSize() + calcPacketSize(), false); + BufferUtil.writeUB3(buffer, calcPacketSize()); + buffer.put(packetId); + buffer.put(command); + buffer = c.writeToBuffer(arg, buffer); + c.write(buffer); + } + } - @Override - public int calcPacketSize() { - return 1 + arg.length; - } + @Override + public int calcPacketSize() { + return 1 + arg.length; + } - @Override - protected String getPacketInfo() { - return "MySQL Command Packet"; - } + @Override + protected String getPacketInfo() { + return "MySQL Command Packet"; + } + } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/packet/EOFPacket.java b/src/main/java/io/mycat/net/mysql/EOFPacket.java similarity index 62% rename from src/main/java/io/mycat/server/packet/EOFPacket.java rename to src/main/java/io/mycat/net/mysql/EOFPacket.java index cadef9b3d..c2ba29e6f 100644 --- a/src/main/java/io/mycat/server/packet/EOFPacket.java +++ b/src/main/java/io/mycat/net/mysql/EOFPacket.java @@ -21,15 +21,15 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.packet; - -import io.mycat.net.BufferArray; -import io.mycat.net.Connection; -import io.mycat.net.NetSystem; -import io.mycat.server.packet.util.BufferUtil; +package io.mycat.net.mysql; import java.nio.ByteBuffer; +import io.mycat.backend.mysql.BufferUtil; +import io.mycat.backend.mysql.MySQLMessage; +import io.mycat.buffer.BufferArray; +import io.mycat.net.FrontendConnection; + /** * From Server To Client, at the end of a series of Field Packets, and at the * end of a series of Data Packets.With prepared statements, EOF Packet can also @@ -48,22 +48,43 @@ * @author mycat */ public class EOFPacket extends MySQLPacket { - public static final byte FIELD_COUNT = (byte) 0xfe; + public static final byte FIELD_COUNT = (byte) 0xfe; - public byte fieldCount = FIELD_COUNT; - public int warningCount; - public int status = 2; + public byte fieldCount = FIELD_COUNT; + public int warningCount; + public int status = 2; - public void read(byte[] data) { - MySQLMessage mm = new MySQLMessage(data); - packetLength = mm.readUB3(); - packetId = mm.read(); - fieldCount = mm.read(); - warningCount = mm.readUB2(); - status = mm.readUB2(); - } + public void read(byte[] data) { + MySQLMessage mm = new MySQLMessage(data); + packetLength = mm.readUB3(); + packetId = mm.read(); + fieldCount = mm.read(); + warningCount = mm.readUB2(); + status = mm.readUB2(); + } + + @Override + public ByteBuffer write(ByteBuffer buffer, FrontendConnection c,boolean writeSocketIfFull) { + int size = calcPacketSize(); + buffer = c.checkWriteBuffer(buffer, c.getPacketHeaderSize() + size,writeSocketIfFull); + BufferUtil.writeUB3(buffer, size); + buffer.put(packetId); + buffer.put(fieldCount); + BufferUtil.writeUB2(buffer, warningCount); + BufferUtil.writeUB2(buffer, status); + return buffer; + } + + @Override + public int calcPacketSize() { + return 5;// 1+2+2; + } + + @Override + protected String getPacketInfo() { + return "MySQL EOF Packet"; + } - @Override public void write(BufferArray bufferArray) { int size = calcPacketSize(); ByteBuffer buffer = bufferArray.checkWriteBuffer(packetHeaderSize @@ -75,25 +96,4 @@ public void write(BufferArray bufferArray) { BufferUtil.writeUB2(buffer, status); } - public void write(Connection con) { - int size = calcPacketSize(); - ByteBuffer buffer = NetSystem.getInstance().getBufferPool().allocate(); - BufferUtil.writeUB3(buffer, size); - buffer.put(packetId); - buffer.put(fieldCount); - BufferUtil.writeUB2(buffer, warningCount); - BufferUtil.writeUB2(buffer, status); - con.write(buffer); - } - - @Override - public int calcPacketSize() { - return 5;// 1+2+2; - } - - @Override - protected String getPacketInfo() { - return "MySQL EOF Packet"; - } - } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/packet/EmptyPacket.java b/src/main/java/io/mycat/net/mysql/EmptyPacket.java similarity index 97% rename from src/main/java/io/mycat/server/packet/EmptyPacket.java rename to src/main/java/io/mycat/net/mysql/EmptyPacket.java index 57758c765..b9ce941a1 100644 --- a/src/main/java/io/mycat/server/packet/EmptyPacket.java +++ b/src/main/java/io/mycat/net/mysql/EmptyPacket.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.packet; +package io.mycat.net.mysql; /** * @author mycat暂时只发现在load data infile时用到 diff --git a/src/main/java/io/mycat/server/packet/ErrorPacket.java b/src/main/java/io/mycat/net/mysql/ErrorPacket.java similarity index 80% rename from src/main/java/io/mycat/server/packet/ErrorPacket.java rename to src/main/java/io/mycat/net/mysql/ErrorPacket.java index 38570028e..c2efd06cc 100644 --- a/src/main/java/io/mycat/server/packet/ErrorPacket.java +++ b/src/main/java/io/mycat/net/mysql/ErrorPacket.java @@ -21,14 +21,15 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.packet; - -import io.mycat.net.NetSystem; -import io.mycat.server.GenalMySQLConnection; -import io.mycat.server.packet.util.BufferUtil; +package io.mycat.net.mysql; +import java.io.ByteArrayOutputStream; import java.nio.ByteBuffer; +import io.mycat.backend.mysql.BufferUtil; +import io.mycat.backend.mysql.MySQLMessage; +import io.mycat.net.FrontendConnection; + /** * From server to client in response to command, if error. * @@ -83,8 +84,17 @@ public void read(byte[] data) { message = mm.readBytes(); } + public byte[] writeToBytes(FrontendConnection c) { + ByteBuffer buffer = c.allocate(); + buffer = write(buffer, c, false); + buffer.flip(); + byte[] data = new byte[buffer.limit()]; + buffer.get(data); + c.recycle(buffer); + return data; + } public byte[] writeToBytes() { - ByteBuffer buffer = ByteBuffer.allocate(calcPacketSize() + 4); + ByteBuffer buffer = ByteBuffer.allocate(calcPacketSize()+4); int size = calcPacketSize(); BufferUtil.writeUB3(buffer, size); buffer.put(packetId); @@ -101,10 +111,12 @@ public byte[] writeToBytes() { return data; } - - public void write(GenalMySQLConnection c) { - ByteBuffer buffer = NetSystem.getInstance().getBufferPool().allocate(); + @Override + public ByteBuffer write(ByteBuffer buffer, FrontendConnection c, + boolean writeSocketIfFull) { int size = calcPacketSize(); + buffer = c.checkWriteBuffer(buffer, c.getPacketHeaderSize() + size, + writeSocketIfFull); BufferUtil.writeUB3(buffer, size); buffer.put(packetId); buffer.put(fieldCount); @@ -112,11 +124,17 @@ public void write(GenalMySQLConnection c) { buffer.put(mark); buffer.put(sqlState); if (message != null) { - buffer.put(message); - + buffer = c.writeToBuffer(message, buffer); } - c.write(buffer); + return buffer; + } + + + public void write(FrontendConnection c) { + ByteBuffer buffer = c.allocate(); + buffer = this.write(buffer, c, true); + c.write(buffer); } @Override diff --git a/src/main/java/io/mycat/server/packet/ExecutePacket.java b/src/main/java/io/mycat/net/mysql/ExecutePacket.java similarity index 88% rename from src/main/java/io/mycat/server/packet/ExecutePacket.java rename to src/main/java/io/mycat/net/mysql/ExecutePacket.java index 6863a6f68..4343f39a8 100644 --- a/src/main/java/io/mycat/server/packet/ExecutePacket.java +++ b/src/main/java/io/mycat/net/mysql/ExecutePacket.java @@ -21,14 +21,15 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.packet; - -import io.mycat.server.packet.util.BindValue; -import io.mycat.server.packet.util.BindValueUtil; -import io.mycat.server.packet.util.PreparedStatement; +package io.mycat.net.mysql; import java.io.UnsupportedEncodingException; +import io.mycat.backend.mysql.BindValue; +import io.mycat.backend.mysql.BindValueUtil; +import io.mycat.backend.mysql.MySQLMessage; +import io.mycat.backend.mysql.PreparedStatement; + /** *
  *  Bytes                      Name
@@ -75,10 +76,10 @@
  * 
  *  values:                    for all non-NULL values, each parameters appends its value
  *                             as described in Row Data Packet: Binary (column values)
- * @see http://dev.mysql.com/doc/internals/en/execute-packet.html
+ * @see https://dev.mysql.com/doc/internals/en/com-stmt-execute.html
  * 
* - * @author mycat + * @author mycat, CrazyPig */ public class ExecutePacket extends MySQLPacket { @@ -107,17 +108,15 @@ public void read(byte[] data, String charset) throws UnsupportedEncodingExceptio // 读取NULL指示器数据 int parameterCount = values.length; - nullBitMap = new byte[(parameterCount + 7) / 8]; - for (int i = 0; i < nullBitMap.length; i++) { - nullBitMap[i] = mm.read(); + if(parameterCount > 0) { + nullBitMap = new byte[(parameterCount + 7) / 8]; + for (int i = 0; i < nullBitMap.length; i++) { + nullBitMap[i] = mm.read(); + } + + // 当newParameterBoundFlag==1时,更新参数类型。 + newParameterBoundFlag = mm.read(); } - - if(parameterCount <= 0) { - return ; - } - - // 当newParameterBoundFlag==1时,更新参数类型。 - newParameterBoundFlag = mm.read(); if (newParameterBoundFlag == (byte) 1) { for (int i = 0; i < parameterCount; i++) { pstmt.getParametersType()[i] = mm.readUB2(); @@ -133,6 +132,9 @@ public void read(byte[] data, String charset) throws UnsupportedEncodingExceptio bv.isNull = true; } else { BindValueUtil.read(mm, bv, charset); + if(bv.isLongData) { + bv.value = pstmt.getLongData(i); + } } values[i] = bv; } diff --git a/src/main/java/io/mycat/server/packet/FieldPacket.java b/src/main/java/io/mycat/net/mysql/FieldPacket.java similarity index 91% rename from src/main/java/io/mycat/server/packet/FieldPacket.java rename to src/main/java/io/mycat/net/mysql/FieldPacket.java index 70fc67709..23d362781 100644 --- a/src/main/java/io/mycat/server/packet/FieldPacket.java +++ b/src/main/java/io/mycat/net/mysql/FieldPacket.java @@ -21,13 +21,15 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.packet; - -import io.mycat.net.BufferArray; -import io.mycat.server.packet.util.BufferUtil; +package io.mycat.net.mysql; import java.nio.ByteBuffer; +import io.mycat.backend.mysql.BufferUtil; +import io.mycat.backend.mysql.MySQLMessage; +import io.mycat.buffer.BufferArray; +import io.mycat.net.FrontendConnection; + /** * From Server To Client, part of Result Set Packets. One for each column in the * result set. Thus, if the value of field_columns in the Result Set Header @@ -93,12 +95,15 @@ public void read(BinaryPacket bin) { } @Override - public void write(BufferArray bufferArray) { + public ByteBuffer write(ByteBuffer buffer, FrontendConnection c, + boolean writeSocketIfFull) { int size = calcPacketSize(); - ByteBuffer buffer = bufferArray.checkWriteBuffer(packetHeaderSize + size); + buffer = c.checkWriteBuffer(buffer, c.getPacketHeaderSize() + size, + writeSocketIfFull); BufferUtil.writeUB3(buffer, size); buffer.put(packetId); writeBody(buffer); + return buffer; } @Override @@ -162,4 +167,12 @@ private void writeBody(ByteBuffer buffer) { } } + public void write(BufferArray bufferArray) { + int size = calcPacketSize(); + ByteBuffer buffer = bufferArray.checkWriteBuffer(packetHeaderSize + size); + BufferUtil.writeUB3(buffer, size); + buffer.put(packetId); + writeBody(buffer); + } + } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/packet/HandshakePacket.java b/src/main/java/io/mycat/net/mysql/HandshakePacket.java similarity index 93% rename from src/main/java/io/mycat/server/packet/HandshakePacket.java rename to src/main/java/io/mycat/net/mysql/HandshakePacket.java index 3907369e6..cabb27fa2 100644 --- a/src/main/java/io/mycat/server/packet/HandshakePacket.java +++ b/src/main/java/io/mycat/net/mysql/HandshakePacket.java @@ -21,14 +21,14 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.packet; - -import io.mycat.net.NetSystem; -import io.mycat.server.GenalMySQLConnection; -import io.mycat.server.packet.util.BufferUtil; +package io.mycat.net.mysql; import java.nio.ByteBuffer; +import io.mycat.backend.mysql.BufferUtil; +import io.mycat.backend.mysql.MySQLMessage; +import io.mycat.net.FrontendConnection; + /** * From server to client during initial handshake. * @@ -93,8 +93,8 @@ public void read(byte[] data) { restOfScrambleBuff = mm.readBytesWithNull(); } - public void write(GenalMySQLConnection c) { - ByteBuffer buffer =NetSystem.getInstance().getBufferPool().allocate(); + public void write(FrontendConnection c) { + ByteBuffer buffer = c.allocate(); BufferUtil.writeUB3(buffer, calcPacketSize()); buffer.put(packetId); buffer.put(protocolVersion); diff --git a/src/main/java/io/mycat/net/mysql/HandshakeV10Packet.java b/src/main/java/io/mycat/net/mysql/HandshakeV10Packet.java new file mode 100644 index 000000000..2a1b8f0e1 --- /dev/null +++ b/src/main/java/io/mycat/net/mysql/HandshakeV10Packet.java @@ -0,0 +1,153 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.mysql; + +import java.nio.ByteBuffer; + +import io.mycat.config.Capabilities; +import io.mycat.backend.mysql.BufferUtil; +import io.mycat.net.FrontendConnection; + +/** + * From mycat server to client during initial handshake. + * + *
+ * Bytes                        Name
+ * -----                        ----
+ * 1                            protocol_version (always 0x0a)
+ * n (string[NULL])             server_version
+ * 4                            thread_id
+ * 8 (string[8])                auth-plugin-data-part-1
+ * 1                            (filler) always 0x00
+ * 2                            capability flags (lower 2 bytes)
+ *   if more data in the packet:
+ * 1                            character set
+ * 2                            status flags
+ * 2                            capability flags (upper 2 bytes)
+ *   if capabilities & CLIENT_PLUGIN_AUTH {
+ * 1                            length of auth-plugin-data
+ *   } else {
+ * 1                            0x00
+ *   }
+ * 10 (string[10])              reserved (all 0x00)
+ *   if capabilities & CLIENT_SECURE_CONNECTION {
+ * string[$len]   auth-plugin-data-part-2 ($len=MAX(13, length of auth-plugin-data - 8))
+ *   }
+ *   if capabilities & CLIENT_PLUGIN_AUTH {
+ * string[NUL]    auth-plugin name
+ * }
+ * 
+ * @see http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#Protocol::HandshakeV10
+ * 
+ * + * @author CrazyPig + * @since 2016-11-13 + * + */ +public class HandshakeV10Packet extends MySQLPacket { + private static final byte[] FILLER_10 = new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + private static final byte[] DEFAULT_AUTH_PLUGIN_NAME = "mysql_native_password".getBytes(); + + public byte protocolVersion; + public byte[] serverVersion; + public long threadId; + public byte[] seed; // auth-plugin-data-part-1 + public int serverCapabilities; + public byte serverCharsetIndex; + public int serverStatus; + public byte[] restOfScrambleBuff; // auth-plugin-data-part-2 + public byte[] authPluginName = DEFAULT_AUTH_PLUGIN_NAME; + + public void write(FrontendConnection c) { + + ByteBuffer buffer = c.allocate(); + BufferUtil.writeUB3(buffer, calcPacketSize()); + buffer.put(packetId); + buffer.put(protocolVersion); + BufferUtil.writeWithNull(buffer, serverVersion); + BufferUtil.writeUB4(buffer, threadId); + buffer.put(seed); + buffer.put((byte)0); // [00] filler + BufferUtil.writeUB2(buffer, serverCapabilities); // capability flags (lower 2 bytes) + buffer.put(serverCharsetIndex); + BufferUtil.writeUB2(buffer, serverStatus); + BufferUtil.writeUB2(buffer, (serverCapabilities >> 16)); // capability flags (upper 2 bytes) + if((serverCapabilities & Capabilities.CLIENT_PLUGIN_AUTH) != 0) { + if(restOfScrambleBuff.length <= 13) { + buffer.put((byte) (seed.length + 13)); + } else { + buffer.put((byte) (seed.length + restOfScrambleBuff.length)); + } + } else { + buffer.put((byte) 0); + } + buffer.put(FILLER_10); + if((serverCapabilities & Capabilities.CLIENT_SECURE_CONNECTION) != 0) { + buffer.put(restOfScrambleBuff); + // restOfScrambleBuff.length always to be 12 + if(restOfScrambleBuff.length < 13) { + for(int i = 13 - restOfScrambleBuff.length; i > 0; i--) { + buffer.put((byte)0); + } + } + } + if((serverCapabilities & Capabilities.CLIENT_PLUGIN_AUTH) != 0) { + BufferUtil.writeWithNull(buffer, authPluginName); + } + c.write(buffer); + } + + @Override + public int calcPacketSize() { + int size = 1; // protocol version + size += (serverVersion.length + 1); // server version + size += 4; // connection id + size += seed.length; + size += 1; // [00] filler + size += 2; // capability flags (lower 2 bytes) + size += 1; // character set + size += 2; // status flags + size += 2; // capability flags (upper 2 bytes) + size += 1; + size += 10; // reserved (all [00]) + if((serverCapabilities & Capabilities.CLIENT_SECURE_CONNECTION) != 0) { + // restOfScrambleBuff.length always to be 12 + if(restOfScrambleBuff.length <= 13) { + size += 13; + } else { + size += restOfScrambleBuff.length; + } + } + if((serverCapabilities & Capabilities.CLIENT_PLUGIN_AUTH) != 0) { + size += (authPluginName.length + 1); // auth-plugin name + } + return size; + } + + @Override + protected String getPacketInfo() { + return "MySQL HandshakeV10 Packet"; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/packet/HeartbeatPacket.java b/src/main/java/io/mycat/net/mysql/HeartbeatPacket.java similarity index 87% rename from src/main/java/io/mycat/server/packet/HeartbeatPacket.java rename to src/main/java/io/mycat/net/mysql/HeartbeatPacket.java index 79d0dba44..5a04c3d8c 100644 --- a/src/main/java/io/mycat/server/packet/HeartbeatPacket.java +++ b/src/main/java/io/mycat/net/mysql/HeartbeatPacket.java @@ -21,14 +21,14 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.packet; - -import io.mycat.net.NetSystem; -import io.mycat.server.GenalMySQLConnection; -import io.mycat.server.packet.util.BufferUtil; +package io.mycat.net.mysql; import java.nio.ByteBuffer; +import io.mycat.backend.mysql.BufferUtil; +import io.mycat.backend.mysql.MySQLMessage; +import io.mycat.net.BackendAIOConnection; + /** * From client to server when the client do heartbeat between mycat cluster. * @@ -53,8 +53,9 @@ public void read(byte[] data) { id = mm.readLength(); } - public void write(GenalMySQLConnection c) { - ByteBuffer buffer =NetSystem.getInstance().getBufferPool().allocate(); + @Override + public void write(BackendAIOConnection c) { + ByteBuffer buffer = c.allocate(); BufferUtil.writeUB3(buffer, calcPacketSize()); buffer.put(packetId); buffer.put(command); diff --git a/src/main/java/io/mycat/net/mysql/LongDataPacket.java b/src/main/java/io/mycat/net/mysql/LongDataPacket.java new file mode 100644 index 000000000..03c0a2206 --- /dev/null +++ b/src/main/java/io/mycat/net/mysql/LongDataPacket.java @@ -0,0 +1,72 @@ +package io.mycat.net.mysql; + +import io.mycat.backend.mysql.MySQLMessage; + +/** + * + *
+ * 
+ * COM_STMT_SEND_LONG_DATA sends the data for a column. Repeating to send it, appends the data to the parameter.
+ * No response is sent back to the client.
+
+ * COM_STMT_SEND_LONG_DATA:
+ * COM_STMT_SEND_LONG_DATA
+ * direction: client -> server
+ * response: none
+
+ * payload:
+ *   1              [18] COM_STMT_SEND_LONG_DATA
+ *   4              statement-id
+ *   2              param-id
+ *   n              data
+ * 
+ * 
+ * + * @see https://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html + * + * @author CrazyPig + * @since 2016-09-08 + * + */ +public class LongDataPacket extends MySQLPacket { + + private static final byte PACKET_FALG = (byte) 24; + private long pstmtId; + private long paramId; + private byte[] longData = new byte[0]; + + public void read(byte[] data) { + MySQLMessage mm = new MySQLMessage(data); + packetLength = mm.readUB3(); + packetId = mm.read(); + byte code = mm.read(); + assert code == PACKET_FALG; + pstmtId = mm.readUB4(); + paramId = mm.readUB2(); + this.longData = mm.readBytes(packetLength - (1 + 4 + 2)); + } + + @Override + public int calcPacketSize() { + return 1 + 4 + 2 + this.longData.length; + } + + @Override + protected String getPacketInfo() { + return "MySQL Long Data Packet"; + } + + public long getPstmtId() { + return pstmtId; + } + + public long getParamId() { + return paramId; + } + + public byte[] getLongData() { + return longData; + } + + +} diff --git a/src/main/java/io/mycat/net/mysql/MySQLPacket.java b/src/main/java/io/mycat/net/mysql/MySQLPacket.java new file mode 100644 index 000000000..8adaf44aa --- /dev/null +++ b/src/main/java/io/mycat/net/mysql/MySQLPacket.java @@ -0,0 +1,222 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.mysql; + +import java.nio.ByteBuffer; + +import io.mycat.net.BackendAIOConnection; +import io.mycat.net.FrontendConnection; + +/** + * @author mycat + */ +public abstract class MySQLPacket { + /** + * none, this is an internal thread state + */ + public static final byte COM_SLEEP = 0; + + /** + * mysql_close + */ + public static final byte COM_QUIT = 1; + + /** + * mysql_select_db + */ + public static final byte COM_INIT_DB = 2; + + /** + * mysql_real_query + */ + public static final byte COM_QUERY = 3; + + /** + * mysql_list_fields + */ + public static final byte COM_FIELD_LIST = 4; + + /** + * mysql_create_db (deprecated) + */ + public static final byte COM_CREATE_DB = 5; + + /** + * mysql_drop_db (deprecated) + */ + public static final byte COM_DROP_DB = 6; + + /** + * mysql_refresh + */ + public static final byte COM_REFRESH = 7; + + /** + * mysql_shutdown + */ + public static final byte COM_SHUTDOWN = 8; + + /** + * mysql_stat + */ + public static final byte COM_STATISTICS = 9; + + /** + * mysql_list_processes + */ + public static final byte COM_PROCESS_INFO = 10; + + /** + * none, this is an internal thread state + */ + public static final byte COM_CONNECT = 11; + + /** + * mysql_kill + */ + public static final byte COM_PROCESS_KILL = 12; + + /** + * mysql_dump_debug_info + */ + public static final byte COM_DEBUG = 13; + + /** + * mysql_ping + */ + public static final byte COM_PING = 14; + + /** + * none, this is an internal thread state + */ + public static final byte COM_TIME = 15; + + /** + * none, this is an internal thread state + */ + public static final byte COM_DELAYED_INSERT = 16; + + /** + * mysql_change_user + */ + public static final byte COM_CHANGE_USER = 17; + + /** + * used by slave server mysqlbinlog + */ + public static final byte COM_BINLOG_DUMP = 18; + + /** + * used by slave server to get master table + */ + public static final byte COM_TABLE_DUMP = 19; + + /** + * used by slave to log connection to master + */ + public static final byte COM_CONNECT_OUT = 20; + + /** + * used by slave to register to master + */ + public static final byte COM_REGISTER_SLAVE = 21; + + /** + * mysql_stmt_prepare + */ + public static final byte COM_STMT_PREPARE = 22; + + /** + * mysql_stmt_execute + */ + public static final byte COM_STMT_EXECUTE = 23; + + /** + * mysql_stmt_send_long_data + */ + public static final byte COM_STMT_SEND_LONG_DATA = 24; + + /** + * mysql_stmt_close + */ + public static final byte COM_STMT_CLOSE = 25; + + /** + * mysql_stmt_reset + */ + public static final byte COM_STMT_RESET = 26; + + /** + * mysql_set_server_option + */ + public static final byte COM_SET_OPTION = 27; + + /** + * mysql_stmt_fetch + */ + public static final byte COM_STMT_FETCH = 28; + + /** + * Mycat heartbeat + */ + public static final byte COM_HEARTBEAT = 64; + + //包头大小 + public static final int packetHeaderSize = 4; + + + public int packetLength; + public byte packetId; + + /** + * 把数据包写到buffer中,如果buffer满了就把buffer通过前端连接写出 (writeSocketIfFull=true)。 + */ + public ByteBuffer write(ByteBuffer buffer, FrontendConnection c,boolean writeSocketIfFull) { + throw new UnsupportedOperationException(); + } + + /** + * 把数据包通过后端连接写出,一般使用buffer机制来提高写的吞吐量。 + */ + public void write(BackendAIOConnection c) { + throw new UnsupportedOperationException(); + } + + /** + * 计算数据包大小,不包含包头长度。 + */ + public abstract int calcPacketSize(); + + /** + * 取得数据包信息 + */ + protected abstract String getPacketInfo(); + + @Override + public String toString() { + return new StringBuilder().append(getPacketInfo()).append("{length=").append(packetLength).append(",id=") + .append(packetId).append('}').toString(); + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/packet/OkPacket.java b/src/main/java/io/mycat/net/mysql/OkPacket.java similarity index 80% rename from src/main/java/io/mycat/server/packet/OkPacket.java rename to src/main/java/io/mycat/net/mysql/OkPacket.java index 5759eb0c3..d497473bf 100644 --- a/src/main/java/io/mycat/server/packet/OkPacket.java +++ b/src/main/java/io/mycat/net/mysql/OkPacket.java @@ -21,14 +21,15 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.packet; - -import io.mycat.net.NetSystem; -import io.mycat.server.GenalMySQLConnection; -import io.mycat.server.packet.util.BufferUtil; +package io.mycat.net.mysql; import java.nio.ByteBuffer; +import io.mycat.MycatServer; +import io.mycat.backend.mysql.BufferUtil; +import io.mycat.backend.mysql.MySQLMessage; +import io.mycat.net.FrontendConnection; + /** * From server to client in response to command, if no error and no result set. * @@ -87,27 +88,21 @@ public void read(byte[] data) { } } - public byte[] writeToBytes() { - ByteBuffer buffer=NetSystem.getInstance().getBufferPool().allocate(); - BufferUtil.writeUB3(buffer, calcPacketSize()); - buffer.put(packetId); - buffer.put(fieldCount); - BufferUtil.writeLength(buffer, affectedRows); - BufferUtil.writeLength(buffer, insertId); - BufferUtil.writeUB2(buffer, serverStatus); - BufferUtil.writeUB2(buffer, warningCount); - if (message != null) { - BufferUtil.writeWithLength(buffer, message); - } - buffer.flip(); - byte[] data = new byte[buffer.limit()]; - buffer.get(data); + public byte[] writeToBytes(FrontendConnection c) { + ByteBuffer buffer = c.allocate(); + this.write(buffer, c); + buffer.flip(); + byte[] data = new byte[buffer.limit()]; + buffer.get(data); + c.recycle(buffer); + return data; + } - return data; - } + private ByteBuffer write(ByteBuffer buffer, FrontendConnection c) { - public void write(GenalMySQLConnection c) { - ByteBuffer buffer=NetSystem.getInstance().getBufferPool().allocate(); + int size = calcPacketSize(); + buffer = c.checkWriteBuffer(buffer, c.getPacketHeaderSize() + size, + true); BufferUtil.writeUB3(buffer, calcPacketSize()); buffer.put(packetId); buffer.put(fieldCount); @@ -118,10 +113,16 @@ public void write(GenalMySQLConnection c) { if (message != null) { BufferUtil.writeWithLength(buffer, message); } + + return buffer; + + } + + public void write(FrontendConnection c) { + ByteBuffer buffer = write(c.allocate(), c); c.write(buffer); } - @Override public int calcPacketSize() { int i = 1; @@ -139,4 +140,25 @@ protected String getPacketInfo() { return "MySQL OK Packet"; } + public byte[] writeToBytes() { + + int totalSize = calcPacketSize() + packetHeaderSize; + ByteBuffer buffer=MycatServer.getInstance().getBufferPool().allocate(totalSize); + BufferUtil.writeUB3(buffer, calcPacketSize()); + buffer.put(packetId); + buffer.put(fieldCount); + BufferUtil.writeLength(buffer, affectedRows); + BufferUtil.writeLength(buffer, insertId); + BufferUtil.writeUB2(buffer, serverStatus); + BufferUtil.writeUB2(buffer, warningCount); + if (message != null) { + BufferUtil.writeWithLength(buffer, message); + } + buffer.flip(); + byte[] data = new byte[buffer.limit()]; + buffer.get(data); + MycatServer.getInstance().getBufferPool().recycle(buffer); + return data; + } + } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/packet/PingPacket.java b/src/main/java/io/mycat/net/mysql/PingPacket.java similarity index 97% rename from src/main/java/io/mycat/server/packet/PingPacket.java rename to src/main/java/io/mycat/net/mysql/PingPacket.java index 9f89528d2..34054bdf4 100644 --- a/src/main/java/io/mycat/server/packet/PingPacket.java +++ b/src/main/java/io/mycat/net/mysql/PingPacket.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.packet; +package io.mycat.net.mysql; /** * @author mycat diff --git a/src/main/java/io/mycat/server/packet/PreparedOkPacket.java b/src/main/java/io/mycat/net/mysql/PreparedOkPacket.java similarity index 77% rename from src/main/java/io/mycat/server/packet/PreparedOkPacket.java rename to src/main/java/io/mycat/net/mysql/PreparedOkPacket.java index dffe9d123..e93f76a86 100644 --- a/src/main/java/io/mycat/server/packet/PreparedOkPacket.java +++ b/src/main/java/io/mycat/net/mysql/PreparedOkPacket.java @@ -21,15 +21,13 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.packet; - -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.server.GenalMySQLConnection; -import io.mycat.server.packet.util.BufferUtil; +package io.mycat.net.mysql; import java.nio.ByteBuffer; +import io.mycat.backend.mysql.BufferUtil; +import io.mycat.net.FrontendConnection; + /** *
  * From server to client, in response to prepared statement initialization packet. 
@@ -72,9 +70,10 @@ public PreparedOkPacket() {
         this.filler = 0;
     }
 
-    public void write(GenalMySQLConnection c) {
-        ByteBuffer buffer =NetSystem.getInstance().getBufferPool().allocate();
+    @Override
+    public ByteBuffer write(ByteBuffer buffer, FrontendConnection c,boolean writeSocketIfFull) {
         int size = calcPacketSize();
+        buffer = c.checkWriteBuffer(buffer, c.getPacketHeaderSize() + size,writeSocketIfFull);
         BufferUtil.writeUB3(buffer, size);
         buffer.put(packetId);
         buffer.put(flag);
@@ -83,21 +82,7 @@ public void write(GenalMySQLConnection c) {
         BufferUtil.writeUB2(buffer, parametersNumber);
         buffer.put(filler);
         BufferUtil.writeUB2(buffer, warningCount);
-        c.write(buffer);
-    }
-    
-    public void write(BufferArray bufferArray) {
-    	int size = calcPacketSize();
-    	int totalSize = size + packetHeaderSize;
-    	ByteBuffer buffer = bufferArray.checkWriteBuffer(totalSize);
-    	BufferUtil.writeUB3(buffer, size);
-    	buffer.put(packetId);
-        buffer.put(flag);
-        BufferUtil.writeUB4(buffer, statementId);
-        BufferUtil.writeUB2(buffer, columnsNumber);
-        BufferUtil.writeUB2(buffer, parametersNumber);
-        buffer.put(filler);
-        BufferUtil.writeUB2(buffer, warningCount);
+        return buffer;
     }
 
     @Override
diff --git a/src/main/java/io/mycat/server/packet/QuitPacket.java b/src/main/java/io/mycat/net/mysql/QuitPacket.java
similarity index 97%
rename from src/main/java/io/mycat/server/packet/QuitPacket.java
rename to src/main/java/io/mycat/net/mysql/QuitPacket.java
index 9ff2c6f3d..d187b31a1 100644
--- a/src/main/java/io/mycat/server/packet/QuitPacket.java
+++ b/src/main/java/io/mycat/net/mysql/QuitPacket.java
@@ -21,7 +21,7 @@
  * https://code.google.com/p/opencloudb/.
  *
  */
-package io.mycat.server.packet;
+package io.mycat.net.mysql;
 
 /**
  * @author mycat
diff --git a/src/main/java/io/mycat/server/packet/Reply323Packet.java b/src/main/java/io/mycat/net/mysql/Reply323Packet.java
similarity index 86%
rename from src/main/java/io/mycat/server/packet/Reply323Packet.java
rename to src/main/java/io/mycat/net/mysql/Reply323Packet.java
index 31811eb7f..7c5c44361 100644
--- a/src/main/java/io/mycat/server/packet/Reply323Packet.java
+++ b/src/main/java/io/mycat/net/mysql/Reply323Packet.java
@@ -21,17 +21,16 @@
  * https://code.google.com/p/opencloudb/.
  *
  */
-package io.mycat.server.packet;
-
-import io.mycat.net.NetSystem;
-import io.mycat.server.GenalMySQLConnection;
-import io.mycat.server.packet.util.BufferUtil;
-import io.mycat.server.packet.util.StreamUtil;
+package io.mycat.net.mysql;
 
 import java.io.IOException;
 import java.io.OutputStream;
 import java.nio.ByteBuffer;
 
+import io.mycat.backend.mysql.BufferUtil;
+import io.mycat.backend.mysql.StreamUtil;
+import io.mycat.net.BackendAIOConnection;
+
 /**
  * @author mycat
  */
@@ -49,8 +48,9 @@ public void write(OutputStream out) throws IOException {
         }
     }
 
-    public void write(GenalMySQLConnection c) {
-        ByteBuffer buffer =NetSystem.getInstance().getBufferPool().allocate();
+    @Override
+    public void write(BackendAIOConnection c) {
+        ByteBuffer buffer = c.allocate();
         BufferUtil.writeUB3(buffer, calcPacketSize());
         buffer.put(packetId);
         if (seed == null) {
diff --git a/src/main/java/io/mycat/server/packet/RequestFilePacket.java b/src/main/java/io/mycat/net/mysql/RequestFilePacket.java
similarity index 84%
rename from src/main/java/io/mycat/server/packet/RequestFilePacket.java
rename to src/main/java/io/mycat/net/mysql/RequestFilePacket.java
index d8aef6676..12785bdd2 100644
--- a/src/main/java/io/mycat/server/packet/RequestFilePacket.java
+++ b/src/main/java/io/mycat/net/mysql/RequestFilePacket.java
@@ -21,14 +21,13 @@
  * https://code.google.com/p/opencloudb/.
  *
  */
-package io.mycat.server.packet;
-
-import io.mycat.net.NetSystem;
-import io.mycat.server.MySQLFrontConnection;
-import io.mycat.server.packet.util.BufferUtil;
+package io.mycat.net.mysql;
 
 import java.nio.ByteBuffer;
 
+import io.mycat.backend.mysql.BufferUtil;
+import io.mycat.net.FrontendConnection;
+
 /**
  * load data local infile 向客户端请求发送文件用
  */
@@ -39,11 +38,11 @@ public class RequestFilePacket extends MySQLPacket
     public byte[] fileName;
 
 
-    public void write(MySQLFrontConnection c)
+    @Override
+    public ByteBuffer write(ByteBuffer buffer, FrontendConnection c, boolean writeSocketIfFull)
     {
         int size = calcPacketSize();
-        ByteBuffer buffer = NetSystem.getInstance().getBufferPool()
-				.allocate();
+        buffer = c.checkWriteBuffer(buffer, c.getPacketHeaderSize() + size, writeSocketIfFull);
         BufferUtil.writeUB3(buffer, size);
         buffer.put(packetId);
         buffer.put(command);
@@ -55,6 +54,8 @@ public void write(MySQLFrontConnection c)
         }
 
         c.write(buffer);
+
+        return buffer;
     }
 
     @Override
diff --git a/src/main/java/io/mycat/net/mysql/ResetPacket.java b/src/main/java/io/mycat/net/mysql/ResetPacket.java
new file mode 100644
index 000000000..874a711b6
--- /dev/null
+++ b/src/main/java/io/mycat/net/mysql/ResetPacket.java
@@ -0,0 +1,55 @@
+package io.mycat.net.mysql;
+
+import io.mycat.backend.mysql.MySQLMessage;
+
+/**
+ * 
+ * 
+ * COM_STMT_RESET resets the data of a prepared statement which was accumulated with COM_STMT_SEND_LONG_DATA commands and closes the cursor if it was opened with COM_STMT_EXECUTE
+
+ * The server will send a OK_Packet if the statement could be reset, a ERR_Packet if not.
+ * 
+ * COM_STMT_RESET:
+ * COM_STMT_RESET
+ * direction: client -> server
+ * response: OK or ERR
+
+ * payload:
+ *   1              [1a] COM_STMT_RESET
+ *   4              statement-id
+ * 
+ * 
+ * + * @author CrazyPig + * @since 2016-09-08 + * + */ +public class ResetPacket extends MySQLPacket { + + private static final byte PACKET_FALG = (byte) 26; + private long pstmtId; + + public void read(byte[] data) { + MySQLMessage mm = new MySQLMessage(data); + packetLength = mm.readUB3(); + packetId = mm.read(); + byte code = mm.read(); + assert code == PACKET_FALG; + pstmtId = mm.readUB4(); + } + + @Override + public int calcPacketSize() { + return 1 + 4; + } + + @Override + protected String getPacketInfo() { + return "MySQL Reset Packet"; + } + + public long getPstmtId() { + return pstmtId; + } + +} diff --git a/src/main/java/io/mycat/server/packet/ResultSetHeaderPacket.java b/src/main/java/io/mycat/net/mysql/ResultSetHeaderPacket.java similarity index 62% rename from src/main/java/io/mycat/server/packet/ResultSetHeaderPacket.java rename to src/main/java/io/mycat/net/mysql/ResultSetHeaderPacket.java index 22c95b480..5f0977192 100644 --- a/src/main/java/io/mycat/server/packet/ResultSetHeaderPacket.java +++ b/src/main/java/io/mycat/net/mysql/ResultSetHeaderPacket.java @@ -21,13 +21,15 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.packet; - -import io.mycat.net.BufferArray; -import io.mycat.server.packet.util.BufferUtil; +package io.mycat.net.mysql; import java.nio.ByteBuffer; +import io.mycat.backend.mysql.BufferUtil; +import io.mycat.backend.mysql.MySQLMessage; +import io.mycat.buffer.BufferArray; +import io.mycat.net.FrontendConnection; + /** * From server to client after command, if no error and result set -- that is, * if the command was a query which returned a result set. The Result Set Header @@ -53,21 +55,33 @@ */ public class ResultSetHeaderPacket extends MySQLPacket { - public int fieldCount; - public long extra; + public int fieldCount; + public long extra; - public void read(byte[] data) { - MySQLMessage mm = new MySQLMessage(data); - this.packetLength = mm.readUB3(); - this.packetId = mm.read(); - this.fieldCount = (int) mm.readLength(); - if (mm.hasRemaining()) { - this.extra = mm.readLength(); - } - } + public void read(byte[] data) { + MySQLMessage mm = new MySQLMessage(data); + this.packetLength = mm.readUB3(); + this.packetId = mm.read(); + this.fieldCount = (int) mm.readLength(); + if (mm.hasRemaining()) { + this.extra = mm.readLength(); + } + } - @Override - public void write(BufferArray bufferArray) { + @Override + public ByteBuffer write(ByteBuffer buffer, FrontendConnection c,boolean writeSocketIfFull) { + int size = calcPacketSize(); + buffer = c.checkWriteBuffer(buffer, c.getPacketHeaderSize() + size,writeSocketIfFull); + BufferUtil.writeUB3(buffer, size); + buffer.put(packetId); + BufferUtil.writeLength(buffer, fieldCount); + if (extra > 0) { + BufferUtil.writeLength(buffer, extra); + } + return buffer; + } + + public void write(BufferArray bufferArray) { int size = calcPacketSize(); ByteBuffer buffer = bufferArray .checkWriteBuffer(packetHeaderSize + size); @@ -79,18 +93,20 @@ public void write(BufferArray bufferArray) { } } - @Override - public int calcPacketSize() { - int size = BufferUtil.getLength(fieldCount); - if (extra > 0) { - size += BufferUtil.getLength(extra); - } - return size; - } + @Override + public int calcPacketSize() { + int size = BufferUtil.getLength(fieldCount); + if (extra > 0) { + size += BufferUtil.getLength(extra); + } + return size; + } - @Override - protected String getPacketInfo() { - return "MySQL ResultSetHeader Packet"; - } + @Override + protected String getPacketInfo() { + return "MySQL ResultSetHeader Packet"; + } + + } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/packet/RowDataPacket.java b/src/main/java/io/mycat/net/mysql/RowDataPacket.java similarity index 76% rename from src/main/java/io/mycat/server/packet/RowDataPacket.java rename to src/main/java/io/mycat/net/mysql/RowDataPacket.java index 41379b288..3de6c5b60 100644 --- a/src/main/java/io/mycat/server/packet/RowDataPacket.java +++ b/src/main/java/io/mycat/net/mysql/RowDataPacket.java @@ -21,17 +21,17 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.packet; - -import io.mycat.net.BufferArray; -import io.mycat.net.Connection; -import io.mycat.net.NetSystem; -import io.mycat.server.packet.util.BufferUtil; +package io.mycat.net.mysql; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; +import io.mycat.backend.mysql.BufferUtil; +import io.mycat.backend.mysql.MySQLMessage; +import io.mycat.buffer.BufferArray; +import io.mycat.net.FrontendConnection; + /** * From server to client. One packet for each row in the result set. * @@ -56,7 +56,9 @@ */ public class RowDataPacket extends MySQLPacket { private static final byte NULL_MARK = (byte) 251; - private static final byte EMPTY_MARK = (byte) 0; + private static final byte EMPTY_MARK = (byte) 0; + + public byte[] value; public int fieldCount; public final List fieldValues; @@ -66,16 +68,16 @@ public RowDataPacket(int fieldCount) { } public void add(byte[] value) { - // 这里应该修改value + //这里应该修改value fieldValues.add(value); } - public void addFieldCount(int add) { - // 这里应该修改field - fieldCount = fieldCount + add; + //这里应该修改field + fieldCount=fieldCount+add; } - + public void read(byte[] data) { + value = data; MySQLMessage mm = new MySQLMessage(data); packetLength = mm.readUB3(); packetId = mm.read(); @@ -85,56 +87,28 @@ public void read(byte[] data) { } @Override - public void write(BufferArray bufferArray) { - int size = calcPacketSize(); - ByteBuffer buffer = bufferArray.checkWriteBuffer(packetHeaderSize - + size); - BufferUtil.writeUB3(buffer, size); - buffer.put(packetId); + public ByteBuffer write(ByteBuffer bb, FrontendConnection c, + boolean writeSocketIfFull) { + bb = c.checkWriteBuffer(bb, c.getPacketHeaderSize(), writeSocketIfFull); + BufferUtil.writeUB3(bb, calcPacketSize()); + bb.put(packetId); for (int i = 0; i < fieldCount; i++) { byte[] fv = fieldValues.get(i); - if (fv == null) { - buffer = bufferArray.checkWriteBuffer(1); - buffer.put(RowDataPacket.NULL_MARK); - } else if (fv.length == 0) { - buffer = bufferArray.checkWriteBuffer(1); - buffer.put(RowDataPacket.EMPTY_MARK); - } else { - buffer = bufferArray.checkWriteBuffer(BufferUtil - .getLength(fv.length)); - BufferUtil.writeLength(buffer, fv.length); - bufferArray.write(fv); + if (fv == null ) { + bb = c.checkWriteBuffer(bb, 1, writeSocketIfFull); + bb.put(RowDataPacket.NULL_MARK); + }else if (fv.length == 0) { + bb = c.checkWriteBuffer(bb, 1, writeSocketIfFull); + bb.put(RowDataPacket.EMPTY_MARK); + } + else { + bb = c.checkWriteBuffer(bb, BufferUtil.getLength(fv), + writeSocketIfFull); + BufferUtil.writeLength(bb, fv.length); + bb = c.writeToBuffer(fv, bb); } } - } - - public void write(Connection conn) { - int size = calcPacketSize(); - int totalSize = size + packetHeaderSize; - if (NetSystem.getInstance().getBufferPool().getChunkSize() >= totalSize) { - ByteBuffer buffer = NetSystem.getInstance().getBufferPool() - .allocate(); - BufferUtil.writeUB3(buffer, size); - buffer.put(packetId); - for (int i = 0; i < fieldCount; i++) { - byte[] fv = fieldValues.get(i); - if (fv == null) { - buffer.put(RowDataPacket.NULL_MARK); - } else if (fv.length == 0) { - buffer.put(RowDataPacket.EMPTY_MARK); - } else { - BufferUtil.writeLength(buffer, fv.length); - buffer.put(fv); - } - } - conn.write(buffer); - } else { - BufferArray bufArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); - write(bufArray); - conn.write(bufArray); - } - + return bb; } @Override @@ -152,4 +126,26 @@ protected String getPacketInfo() { return "MySQL RowData Packet"; } + public void write(BufferArray bufferArray) { + int size = calcPacketSize(); + ByteBuffer buffer = bufferArray.checkWriteBuffer(packetHeaderSize + size); + BufferUtil.writeUB3(buffer, size); + buffer.put(packetId); + for (int i = 0; i < fieldCount; i++) { + byte[] fv = fieldValues.get(i); + if (fv == null) { + buffer = bufferArray.checkWriteBuffer(1); + buffer.put(RowDataPacket.NULL_MARK); + } else if (fv.length == 0) { + buffer = bufferArray.checkWriteBuffer(1); + buffer.put(RowDataPacket.EMPTY_MARK); + } else { + buffer = bufferArray.checkWriteBuffer(BufferUtil + .getLength(fv.length)); + BufferUtil.writeLength(buffer, fv.length); + bufferArray.write(fv); + } + } + } + } \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/AuthenticationCleartextPassword.java b/src/main/java/io/mycat/net/postgres/AuthenticationCleartextPassword.java new file mode 100644 index 000000000..4e6256d1b --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/AuthenticationCleartextPassword.java @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * AuthenticationCleartextPassword (B)
+ * Byte1('R') Identifies the message as an authentication request. 
+ * Int32(8) Length of message contents in bytes, including self. 
+ * Int32(3) Specifies that a clear-text password is required.
+ * 
+ * + * @author mycat + */ +public class AuthenticationCleartextPassword extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/AuthenticationGSS.java b/src/main/java/io/mycat/net/postgres/AuthenticationGSS.java new file mode 100644 index 000000000..dc3b2b79f --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/AuthenticationGSS.java @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * AuthenticationGSS (B) 
+ * Byte1('R') Identifies the message as an authentication request. 
+ * Int32(8) Length of message contents in bytes, including self. 
+ * Int32(7) Specifies that GSSAPI authentication is required.
+ * 
+ * + * @author mycat + */ +public class AuthenticationGSS extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/AuthenticationGSSContinue.java b/src/main/java/io/mycat/net/postgres/AuthenticationGSSContinue.java new file mode 100644 index 000000000..68d71feb8 --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/AuthenticationGSSContinue.java @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * AuthenticationGSSContinue (B) 
+ * Byte1('R') Identifies the message as an authentication request. 
+ * Int32 Length of message contents in bytes, including self. 
+ * Int32(8) Specifies that this message contains GSSAPI or SSPI data. 
+ * Byten GSSAPI or SSPI authentication data.
+ * 
+ * + * @author mycat + */ +public class AuthenticationGSSContinue extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/AuthenticationKerberosV5.java b/src/main/java/io/mycat/net/postgres/AuthenticationKerberosV5.java new file mode 100644 index 000000000..c3089f8e8 --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/AuthenticationKerberosV5.java @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * AuthenticationKerberosV5 (B) 
+ * Byte1('R') Identifies the message as an authentication request. 
+ * Int32(8) Length of message contents in bytes, including self. 
+ * Int32(2) Specifies that Kerberos V5 authentication is required.
+ * 
+ * + * @author mycat + */ +public class AuthenticationKerberosV5 extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/AuthenticationMD5Password.java b/src/main/java/io/mycat/net/postgres/AuthenticationMD5Password.java new file mode 100644 index 000000000..5ae6c7526 --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/AuthenticationMD5Password.java @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * AuthenticationMD5Password (B)
+ * Byte1('R') Identifies the message as an authentication request. 
+ * Int32(12) Length of message contents in bytes, including self. 
+ * Int32(5) Specifies that an MD5-encrypted password is required. 
+ * Byte4 The salt to use when encrypting the password.
+ * 
+ * + * @author mycat + */ +public class AuthenticationMD5Password extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/AuthenticationOk.java b/src/main/java/io/mycat/net/postgres/AuthenticationOk.java new file mode 100644 index 000000000..fc964550f --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/AuthenticationOk.java @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * AuthenticationOk (B) 
+ * Byte1('R') Identifies the message as an authentication request. 
+ * Int32(8) Length of message contents in bytes, including self. 
+ * Int32(0) Specifies that the authentication was successful.
+ * 
+ * + * @author mycat + */ +public class AuthenticationOk extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/AuthenticationSCMCredential.java b/src/main/java/io/mycat/net/postgres/AuthenticationSCMCredential.java new file mode 100644 index 000000000..fb06eb308 --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/AuthenticationSCMCredential.java @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * AuthenticationSCMCredential (B) 
+ * Byte1('R') Identifies the message as an authentication request. 
+ * Int32(8) Length of message contents in bytes, including self. 
+ * Int32(6) Specifies that an SCM credentials message is required.
+ * 
+ * + * @author mycat + */ +public class AuthenticationSCMCredential extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/AuthenticationSSPI.java b/src/main/java/io/mycat/net/postgres/AuthenticationSSPI.java new file mode 100644 index 000000000..68dfad2e2 --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/AuthenticationSSPI.java @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * AuthenticationSSPI (B) 
+ * Byte1('R') Identifies the message as an authentication request. 
+ * Int32(8) Length of message contents in bytes, including self. 
+ * Int32(9) Specifies that SSPI authentication is required.
+ * 
+ * + * @author mycat + */ +public class AuthenticationSSPI extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/BackendKeyData.java b/src/main/java/io/mycat/net/postgres/BackendKeyData.java new file mode 100644 index 000000000..5108807d3 --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/BackendKeyData.java @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * BackendKeyData (B) 
+ * Byte1('K') Identifies the message as cancellation key data. 
+ *            The frontend must save these values if it wishes to be able to
+ *            issue CancelRequest messages later. 
+ * Int32(12) Length of message contents in bytes, including self. 
+ * Int32 The process ID of this backend. 
+ * Int32 The secret key of this backend.
+ * 
+ * + * @author mycat + */ +public class BackendKeyData extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/Bind.java b/src/main/java/io/mycat/net/postgres/Bind.java new file mode 100644 index 000000000..c5699b88d --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/Bind.java @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * Bind (F) 
+ * Byte1('B') Identifies the message as a Bind command. 
+ * Int32 Length of message contents in bytes, including self. 
+ * String The name of the destination portal (an empty string selects the unnamed portal).
+ * String The name of the source prepared statement (an empty string selects the unnamed 
+ *        prepared statement). 
+ * Int16 The number of parameter format codes that follow (denoted C below). 
+ *       This can be zero to indicate that there are no parameters or that the parameters 
+ *       all use the default format(text); or one, in which case the specified format code 
+ *       is applied to all parameters; or it can equal the actual number of parameters. 
+ * Int16[C] The parameter format codes. Each must presently be zero (text) or one(binary). 
+ * Int16 The number of parameter values that follow (possibly zero). This must match the 
+ *       number of parameters needed by the query. Next, the following pair of fields appear 
+ *       for each parameter: 
+ * Int32 The length of the parameter value, in bytes (this count does not include
+ *       itself). Can be zero. As a special case, -1 indicates a NULL parameter
+ *       value. No value bytes follow in the NULL case. 
+ * Byten The value of the parameter, in the format indicated by the associated format code. 
+ *       n is the above length. After the last parameter, the following fields appear:
+ * Int16 The number of result-column format codes that follow (denoted R
+ *       below). This can be zero to indicate that there are no result columns or
+ *       that the result columns should all use the default format (text); or one,
+ *       in which case the specified format code is applied to all result columns
+ *       (if any); or it can equal the actual number of result columns of the query. 
+ * Int16[R] The result-column format codes. Each must presently be zero (text) or one (binary).
+ * 
+ * + * @author mycat + */ +public class Bind extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/config/node/QuarantineConfig.java b/src/main/java/io/mycat/net/postgres/BindComplete.java similarity index 74% rename from src/main/java/io/mycat/server/config/node/QuarantineConfig.java rename to src/main/java/io/mycat/net/postgres/BindComplete.java index 27a26b133..0d827f6c8 100644 --- a/src/main/java/io/mycat/server/config/node/QuarantineConfig.java +++ b/src/main/java/io/mycat/net/postgres/BindComplete.java @@ -21,27 +21,17 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.config.node; - -import java.util.HashMap; -import java.util.Map; -import java.util.Set; +package io.mycat.net.postgres; /** - * 隔离区配置定义 + *
+ * BindComplete (B) 
+ * Byte1('2') Identifies the message as a Bind-complete indicator. 
+ * Int32(4) Length of message contents in bytes, including self.
+ * 
* * @author mycat */ -public final class QuarantineConfig { - - private final Map> hosts; - - public QuarantineConfig() { - hosts = new HashMap>(); - } - - public Map> getHosts() { - return hosts; - } +public class BindComplete extends PostgresPacket { } \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/CancelRequest.java b/src/main/java/io/mycat/net/postgres/CancelRequest.java new file mode 100644 index 000000000..a140d839f --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/CancelRequest.java @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * CancelRequest (F) 
+ * Int32(16) Length of message contents in bytes,including self. 
+ * Int32(80877102) The cancel request code. The value is chosen to 
+ *                 contain 1234 in the most significant 16 bits, and 
+ *                 5678 in the least 16 significant bits. (To avoid 
+ *                 confusion, this code must not be the same as any 
+ *                 protocol version number.) 
+ * Int32 The process ID of the target backend. 
+ * Int32 The secret key for the target backend.
+ * 
+ * + * @author mycat + */ +public class CancelRequest extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/Close.java b/src/main/java/io/mycat/net/postgres/Close.java new file mode 100644 index 000000000..1577fa9d9 --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/Close.java @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * Close (F) 
+ * Byte1('C') Identifies the message as a Close command. 
+ * Int32 Length of message contents in bytes, including self. 
+ * Byte1 'S' to close a prepared statement; or 'P' to close a portal. 
+ * String The name of the prepared statement or portal to close (an 
+ *        empty string selects the unnamed prepared statement or portal).
+ * 
+ * + * @author mycat + */ +public class Close extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/CloseComplete.java b/src/main/java/io/mycat/net/postgres/CloseComplete.java new file mode 100644 index 000000000..4906727bd --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/CloseComplete.java @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * CloseComplete (B) 
+ * Byte1('3') Identifies the message as a Close-complete indicator. 
+ * Int32(4) Length of message contents in bytes, including self.
+ * 
+ * + * @author mycat + */ +public class CloseComplete extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/CommandComplete.java b/src/main/java/io/mycat/net/postgres/CommandComplete.java new file mode 100644 index 000000000..ba5324856 --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/CommandComplete.java @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * CommandComplete (B)
+ * Byte1('C') Identifies the message as a command-completed response.     
+ * Int32 Length of message contents in bytes, including self.     
+ * String The command tag. This is usually a single word that identifies which SQL command was completed. 
+ *        For an INSERT command, the tag is INSERT oid rows, where rows is the number of rows inserted. 
+ *        oid is the object ID of the inserted row if rows is 1 and the target table has OIDs; otherwise oid is 0.  
+ *        For a DELETE command, the tag is DELETE rows where rows is the number of rows deleted. 
+ *        For an UPDATE command, the tag is UPDATE rows where rows is the number of rows updated. 
+ *        For a SELECT or CREATE TABLE AS command, the tag is SELECT rows where rows is the number of rows retrieved. 
+ *        For a MOVE command, the tag is MOVE rows where rows is the number of rows the cursor's position has been changed by. 
+ *        For a FETCH command, the tag is FETCH rows where rows is the number of rows that have been retrieved from the cursor. 
+ *        For a COPY command, the tag is COPY rows where rows is the number of rows copied. 
+ *        (Note: the row count appears only in PostgreSQL 8.2 and later.)
+ * 
+ * + * @author mycat + */ +public class CommandComplete extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/CopyBothResponse.java b/src/main/java/io/mycat/net/postgres/CopyBothResponse.java new file mode 100644 index 000000000..096994143 --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/CopyBothResponse.java @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * CopyBothResponse (B) 
+ * Byte1('W') Identifies the message as a Start Copy Both response. 
+ *            This message is used only for Streaming Replication. 
+ * Int32 Length of message contents in bytes, including self. 
+ * Int8 0 indicates the overall COPY format is textual (rows separated 
+ *      by newlines, columns separated by separator characters, etc). 
+ *      1 indicates the overall copy format is binary (similar to DataRow 
+ *      format). See COPY for more information. 
+ * Int16 The number of columns in the data to be copied(denoted N below). 
+ * Int16[N] The format codes to be used for each column. Each must presently 
+ *          be zero (text) or one (binary). All must be zero if the overall 
+ *          copy format is textual.
+ * 
+ * + * @author mycat + */ +public class CopyBothResponse extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/CopyData.java b/src/main/java/io/mycat/net/postgres/CopyData.java new file mode 100644 index 000000000..64476d658 --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/CopyData.java @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * CopyData (F & B) 
+ * Byte1('d') Identifies the message as COPY data. 
+ * Int32 Length of message contents in bytes, including self. 
+ * Byten Data that forms part of a COPY data stream. Messages sent from the backend will
+ *       always correspond to single data rows, but messages sent by frontends
+ *       might divide the data stream arbitrarily.
+ * 
+ * + * @author mycat + */ +public class CopyData extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/CopyDone.java b/src/main/java/io/mycat/net/postgres/CopyDone.java new file mode 100644 index 000000000..140e943bb --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/CopyDone.java @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * CopyDone (F & B) 
+ * Byte1('c') Identifies the message as a COPY-complete indicator. 
+ * Int32(4) Length of message contents in bytes, including self.
+ * 
+ * + * @author mycat + */ +public class CopyDone extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/CopyFail.java b/src/main/java/io/mycat/net/postgres/CopyFail.java new file mode 100644 index 000000000..73205fdf4 --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/CopyFail.java @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * CopyFail (F) 
+ * Byte1('f') Identifies the message as a COPY-failure indicator. 
+ * Int32 Length of message contents in bytes, including self.
+ * String An error message to report as the cause of failure.
+ * 
+ * + * @author mycat + */ +public class CopyFail extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/backend/MySQLDataSource.java b/src/main/java/io/mycat/net/postgres/CopyInResponse.java similarity index 55% rename from src/main/java/io/mycat/backend/MySQLDataSource.java rename to src/main/java/io/mycat/net/postgres/CopyInResponse.java index 809029530..15376428a 100644 --- a/src/main/java/io/mycat/backend/MySQLDataSource.java +++ b/src/main/java/io/mycat/net/postgres/CopyInResponse.java @@ -21,43 +21,27 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.backend; - -import java.io.IOException; - -import io.mycat.backend.heartbeat.DBHeartbeat; -import io.mycat.backend.heartbeat.MySQLHeartbeat; -import io.mycat.backend.nio.MySQLBackendConnectionFactory; -import io.mycat.server.config.node.DBHostConfig; -import io.mycat.server.config.node.DataHostConfig; -import io.mycat.server.executors.ResponseHandler; +package io.mycat.net.postgres; /** + *
+ * CopyInResponse (B)     
+ * Byte1('G') Identifies the message as a Start Copy In response. 
+ *            The frontend must now send copy-in data (if not prepared 
+ *            to do so, send a CopyFail message).
+ * Int32 Length of message contents in bytes, including self.
+ * Int8 0 indicates the overall COPY format is textual (rows separated 
+ *      by newlines, columns separated by separator characters, etc). 
+ *      1 indicates the overall copy format is binary (similar to DataRow 
+ *      format). See COPY for more information.
+ * Int16 The number of columns in the data to be copied (denoted N below).
+ * Int16[N] The format codes to be used for each column. Each must presently 
+ *          be zero (text) or one (binary). All must be zero if the overall 
+ *          copy format is textual.
+ * 
+ * * @author mycat */ -public class MySQLDataSource extends PhysicalDatasource { - - private final MySQLBackendConnectionFactory factory; - - public MySQLDataSource(DBHostConfig config, DataHostConfig hostConfig, - boolean isReadNode) { - super(config, hostConfig, isReadNode); - this.factory = new MySQLBackendConnectionFactory(); - - } - - @Override - public void createNewConnection(ResponseHandler handler,String schema) throws IOException { - factory.make(this, handler,schema); - } - - @Override - public DBHeartbeat createHeartBeat() { - return new MySQLHeartbeat(this); - } - - - - +public class CopyInResponse extends PostgresPacket { } \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/CopyOutResponse.java b/src/main/java/io/mycat/net/postgres/CopyOutResponse.java new file mode 100644 index 000000000..67e4f776a --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/CopyOutResponse.java @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * CopyOutResponse (B) 
+ * Byte1('H') Identifies the message as a Start Copy Out response. 
+ *            This message will be followed by copy-out data. Int32 Length of
+ *            message contents in bytes, including self. 
+ * Int8 0 indicates the overall COPY format is textual (rows separated by 
+ *      newlines, columns separated by separator characters, etc). 1 indicates 
+ *      the overall copy format is binary(similar to DataRow format). 
+ *      See COPY for more information. 
+ * Int16 The number of columns in the data to be copied (denoted N below). 
+ * Int16[N] The format codes to be used for each column. Each must presently 
+ *          be zero(text) or one (binary). All must be zero if the overall 
+ *          copy format is textual.
+ * 
+ * + * @author mycat + */ +public class CopyOutResponse extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/DataRow.java b/src/main/java/io/mycat/net/postgres/DataRow.java new file mode 100644 index 000000000..2f7c60a2c --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/DataRow.java @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * DataRow (B) 
+ * Byte1('D') Identifies the message as a data row. 
+ * Int32 Length of message contents in bytes, including self. 
+ * Int16 The number of column values that follow (possibly zero). 
+ *       Next, the following pair of fields appear for each column: 
+ * Int32 The length of the column value, in bytes(this count does not 
+ *       include itself). Can be zero. As a special case, -1 indicates 
+ *       a NULL column value. No value bytes follow in the NULL case.
+ * Byten The value of the column, in the format indicated by the associated
+ *       format code. n is the above length.
+ * 
+ * + * @author mycat + */ +public class DataRow extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/Describe.java b/src/main/java/io/mycat/net/postgres/Describe.java new file mode 100644 index 000000000..5a1f448c0 --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/Describe.java @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * Describe (F) 
+ * Byte1('D') Identifies the message as a Describe command.
+ * Int32 Length of message contents in bytes, including self. 
+ * Byte1 'S' to describe a prepared statement; or 'P' to describe a portal. 
+ * String The name of the prepared statement or portal to describe (an empty 
+ *        string selects the unnamed prepared statement or portal).
+ * 
+ * + * @author mycat + */ +public class Describe extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/EmptyQueryResponse.java b/src/main/java/io/mycat/net/postgres/EmptyQueryResponse.java new file mode 100644 index 000000000..dc85141df --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/EmptyQueryResponse.java @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * EmptyQueryResponse (B) 
+ * Byte1('I') Identifies the message as a response to an empty query 
+ *            string. (This substitutes for CommandComplete.) 
+ * Int32(4) Length of message contents in bytes, including self.
+ * 
+ * + * @author mycat + */ +public class EmptyQueryResponse extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/ErrorResponse.java b/src/main/java/io/mycat/net/postgres/ErrorResponse.java new file mode 100644 index 000000000..cf6080d00 --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/ErrorResponse.java @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * ErrorResponse (B) 
+ * Byte1('E') Identifies the message as an error. 
+ * Int32 Length of message contents in bytes, including self. 
+ *       The message body consists of one or more identified fields, 
+ *       followed by a zero byte as a terminator. Fields can appear 
+ *       in any order. For each field there is the following: 
+ * Byte1 A code identifying the field type; if zero, this is the
+ *       message terminator and no string follows. The presently defined 
+ *       field types are listed in Section 46.6. Since more field types 
+ *       might be added in future, frontends should silently ignore 
+ *       fields of unrecognized type.
+ * String The field value.
+ * 
+ * + * @author mycat + */ +public class ErrorResponse extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/Execute.java b/src/main/java/io/mycat/net/postgres/Execute.java new file mode 100644 index 000000000..72f0da2f5 --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/Execute.java @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * Execute (F) 
+ * Byte1('E') Identifies the message as an Execute command.
+ * Int32 Length of message contents in bytes, including self. 
+ * String The name of the portal to execute (an empty string 
+ *        selects the unnamed portal). 
+ * Int32 Maximum number of rows to return, if portal contains a
+ *       query that returns rows (ignored otherwise). 
+ *       Zero denotes "no limit".
+ * 
+ * + * @author mycat + */ +public class Execute extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/Flush.java b/src/main/java/io/mycat/net/postgres/Flush.java new file mode 100644 index 000000000..38d89b1da --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/Flush.java @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * Flush (F) 
+ * Byte1('H') Identifies the message as a Flush command. 
+ * Int32(4) Length of message contents in bytes, including self.
+ * 
+ * + * @author mycat + */ +public class Flush extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/FunctionCall.java b/src/main/java/io/mycat/net/postgres/FunctionCall.java new file mode 100644 index 000000000..4b6fc3f72 --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/FunctionCall.java @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * FunctionCall (F) 
+ * Byte1('F') Identifies the message as a function call.
+ * Int32 Length of message contents in bytes, including self. 
+ * Int32 Specifies the object ID of the function to call. 
+ * Int16 The number of argument format codes that follow (denoted C below). 
+ *       This can be zero to indicate that there are no arguments or that 
+ *       the arguments all use the default format (text); or one, in which 
+ *       case the specified format code is applied to all arguments; or it 
+ *       can equal the actual number of arguments.
+ * Int16[C] The argument format codes. Each must presently be zero (text) or
+ *          one (binary). 
+ * Int16 Specifies the number of arguments being supplied to the function. 
+ *       Next, the following pair of fields appear for each argument: 
+ * Int32 The length of the argument value, in bytes (this count does not include 
+ *       itself). Can be zero. As a special case, -1 indicates a NULL argument 
+ *       value. No value bytes follow in the NULL case. 
+ * Byten The value of the argument, in the format indicated by the associated 
+ *       format code. n is the above length. After the last argument, the 
+ *       following field appears: 
+ * Int16 The format code for the function result. Must presently be zero (text) 
+ *       or one (binary).
+ * 
+ * + * @author mycat + */ +public class FunctionCall extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/FunctionCallResponse.java b/src/main/java/io/mycat/net/postgres/FunctionCallResponse.java new file mode 100644 index 000000000..f9b2e9029 --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/FunctionCallResponse.java @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * FunctionCallResponse (B) 
+ * Byte1('V') Identifies the message as a function call result. 
+ * Int32 Length of message contents in bytes, including self.
+ * Int32 The length of the function result value, in bytes (this count does
+ *       not include itself). Can be zero. As a special case, -1 indicates a 
+ *       NULL function result. No value bytes follow in the NULL case. 
+ * Byten The value of the function result, in the format indicated by the 
+ *       associated format code. n is the above length.
+ * 
+ * + * @author mycat + */ +public class FunctionCallResponse extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/NoData.java b/src/main/java/io/mycat/net/postgres/NoData.java new file mode 100644 index 000000000..975c9375b --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/NoData.java @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * NoData (B) 
+ * Byte1('n') Identifies the message as a no-data indicator.
+ * Int32(4) Length of message contents in bytes, including self.
+ * 
+ * + * @author mycat + */ +public class NoData extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/NoticeResponse.java b/src/main/java/io/mycat/net/postgres/NoticeResponse.java new file mode 100644 index 000000000..596eda180 --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/NoticeResponse.java @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * NoticeResponse (B) 
+ * Byte1('N') Identifies the message as a notice. 
+ * Int32 Length of message contents in bytes, including self. The message 
+ *       body consists of one or more identified fields, followed by a zero 
+ *       byte as a terminator. Fields can appear in any order. For each 
+ *       field there is the following: 
+ * Byte1 A code identifying the field type; if zero, this is the message 
+ *       terminator and no string follows. The presently defined field types 
+ *       are listed in Section 46.6. Since more field types might be added
+ *       in future, frontends should silently ignore fields of unrecognized 
+ *       type.
+ * String The field value.
+ * 
+ * + * @author mycat + */ +public class NoticeResponse extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/NotificationResponse.java b/src/main/java/io/mycat/net/postgres/NotificationResponse.java new file mode 100644 index 000000000..b97a0e359 --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/NotificationResponse.java @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * NotificationResponse (B) 
+ * Byte1('A') Identifies the message as a notification response. 
+ * Int32 Length of message contents in bytes,including self. 
+ * Int32 The process ID of the notifying backend process.
+ * String The name of the channel that the notify has been raised on. 
+ * String The "payload" string passed from the notifying process.
+ * 
+ * + * @author mycat + */ +public class NotificationResponse extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/ParameterDescription.java b/src/main/java/io/mycat/net/postgres/ParameterDescription.java new file mode 100644 index 000000000..655001e84 --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/ParameterDescription.java @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * ParameterDescription (B) 
+ * Byte1('t') Identifies the message as a parameter description. 
+ * Int32 Length of message contents in bytes, including self.
+ * Int16 The number of parameters used by the statement (can be zero). 
+ *       Then,for each parameter, there is the following: 
+ * Int32 Specifies the object ID of the parameter data type.
+ * 
+ * + * @author mycat + */ +public class ParameterDescription extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/ParameterStatus.java b/src/main/java/io/mycat/net/postgres/ParameterStatus.java new file mode 100644 index 000000000..39c06f2d4 --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/ParameterStatus.java @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * ParameterStatus (B) 
+ * Byte1('S') Identifies the message as a run-time parameter status report. 
+ * Int32 Length of message contents in bytes,including self. 
+ * String The name of the run-time parameter being reported.
+ * String The current value of the parameter.
+ * 
+ * + * @author mycat + */ +public class ParameterStatus extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/Parse.java b/src/main/java/io/mycat/net/postgres/Parse.java new file mode 100644 index 000000000..7890c23e1 --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/Parse.java @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * Parse (F) 
+ * Byte1('P') Identifies the message as a Parse command. 
+ * Int32 Length of message contents in bytes, including self. 
+ * String The name of the destination prepared statement (an empty string 
+ *        selects the unnamed prepared statement). 
+ * String The query string to be parsed. 
+ * Int16 The number of parameter data types specified (can be zero). Note 
+ *       that this is not an indication of the number of parameters that 
+ *       might appear in the query string, only the number that the frontend 
+ *       wants to prespecify types for. Then, for each parameter, there is 
+ *       the following: 
+ * Int32 Specifies the object ID of the parameter data type. Placing a zero 
+ *       here is equivalent to leaving the type unspecified.
+ * 
+ * + * @author mycat + */ +public class Parse extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/ParseComplete.java b/src/main/java/io/mycat/net/postgres/ParseComplete.java new file mode 100644 index 000000000..c652ea426 --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/ParseComplete.java @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * ParseComplete (B) 
+ * Byte1('1') Identifies the message as a Parse-complete indicator. 
+ * Int32(4) Length of message contents in bytes, including self.
+ * 
+ * + * @author mycat + */ +public class ParseComplete extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/PasswordMessage.java b/src/main/java/io/mycat/net/postgres/PasswordMessage.java new file mode 100644 index 000000000..32a933d3a --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/PasswordMessage.java @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * PasswordMessage (F) 
+ * Byte1('p') Identifies the message as a password response. Note that this is 
+ *            also used for GSSAPI and SSPI response messages (which is really
+ *            a design error, since the contained data is not a null-terminated 
+ *            string in that case, but can be arbitrary binary data).
+ * Int32 Length of message contents in bytes, including self. 
+ * String The password(encrypted, if requested).
+ * 
+ * + * @author mycat + */ +public class PasswordMessage extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/PortalSuspended.java b/src/main/java/io/mycat/net/postgres/PortalSuspended.java new file mode 100644 index 000000000..da99a85ff --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/PortalSuspended.java @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * PortalSuspended (B) 
+ * Byte1('s') Identifies the message as a portal-suspended indicator. Note this 
+ *            only appears if an Execute message's row-count limit was reached. 
+ * Int32(4) Length of message contents in bytes, including self.
+ * 
+ * + * @author mycat + */ +public class PortalSuspended extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/PostgresPacket.java b/src/main/java/io/mycat/net/postgres/PostgresPacket.java new file mode 100644 index 000000000..6b25e1107 --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/PostgresPacket.java @@ -0,0 +1,248 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + * @see http://www.postgresql.org/docs/9.1/interactive/protocol.html + * @author mycat + */ +public abstract class PostgresPacket { + /** + *
+     * AuthenticationOk (B)   
+     * AuthenticationKerberosV5 (B)       
+     * AuthenticationCleartextPassword (B)    
+     * AuthenticationMD5Password (B)    
+     * AuthenticationSCMCredential (B)     
+     * AuthenticationGSS (B)     
+     * AuthenticationSSPI (B)      
+     * AuthenticationGSSContinue (B)
+     * 
+ */ + public static final byte AUTHENTICATION = (byte) 'R'; + + /** + * BackendKeyData (B) + */ + public static final byte BACKEND_KEY_DATA = (byte) 'K'; + + /** + * Bind (F) + */ + public static final byte BIND = (byte) 'B'; + + /** + * BindComplete (B) + */ + public static final byte BIND_COMPLETE = (byte) '2'; + + /** + * CancelRequest (F) + */ + + /** + * Close (F) + */ + public static final byte CLOSE = (byte) 'C'; + + /** + * CloseComplete (B) + */ + public static final byte CLOSE_COMPLETE = (byte) '3'; + + /** + * CommandComplete (B) + */ + public static final byte COMMAND_COMPLETE = (byte) 'C'; + + /** + * CopyData (F & B) + */ + public static final byte COPY_DATA = (byte) 'd'; + + /** + * CopyDone (F & B) + */ + public static final byte COPY_DONE = (byte) 'c'; + + /** + * CopyFail (F) + */ + public static final byte COPY_FAIL = (byte) 'f'; + + /** + * CopyInResponse (B) + */ + public static final byte COPY_IN_RESPONSE = (byte) 'G'; + + /** + * CopyOutResponse (B) + */ + public static final byte COPY_OUT_RESPONSE = (byte) 'H'; + + /** + * CopyBothResponse (B) + */ + public static final byte COPY_BOTH_RESPONSE = (byte) 'W'; + + /** + * DataRow (B) + */ + public static final byte DATA_ROW = (byte) 'D'; + + /** + * Describe (F) + */ + public static final byte DESCRIBE = (byte) 'D'; + + /** + * EmptyQueryResponse (B) + */ + public static final byte EMPTY_QUERY_RESPONSE = (byte) 'I'; + + /** + * ErrorResponse (B) + */ + public static final byte ERROR_RESPONSE = (byte) 'E'; + + /** + * Execute (F) + */ + public static final byte EXECUTE = (byte) 'E'; + + /** + * Flush (F) + */ + public static final byte FLUSH = (byte) 'H'; + + /** + * FunctionCall (F) + */ + public static final byte FUNCTION_CALL = (byte) 'F'; + + /** + * FunctionCallResponse (B) + */ + public static final byte FUNCTION_CALL_RESPONSE = (byte) 'V'; + + /** + * NoData (B) + */ + public static final byte NO_DATA = (byte) 'n'; + + /** + * NoticeResponse (B) + */ + public static final byte NOTICE_RESPONSE = (byte) 'N'; + + /** + * NotificationResponse (B) + */ + public static final byte NOTIFICATION_RESPONSE = (byte) 'A'; + + /** + * ParameterDescription (B) + */ + public static final byte PARAMETER_DESCRIPTION = (byte) 't'; + + /** + * ParameterStatus (B) + */ + public static final byte PARAMETER_STATUS = (byte) 'S'; + + /** + * Parse (F) + */ + public static final byte PARSE = (byte) 'P'; + + /** + * ParseComplete (B) + */ + public static final byte PARSE_COMPLETE = (byte) '1'; + + /** + * PasswordMessage (F) + */ + public static final byte PASSWORD_MESSAGE = (byte) 'p'; + + /** + * PortalSuspended (B) + */ + public static final byte PORTAL_SUSPENDED = (byte) 's'; + + /** + * Query (F) + */ + public static final byte QUERY = (byte) 'Q'; + + /** + * ReadyForQuery (B) + */ + public static final byte READY_FOR_QUERY = (byte) 'Z'; + + /** + * RowDescription (B) + */ + public static final byte ROW_DESCRIPTION = (byte) 'T'; + + /** + * SSLRequest (F) + */ + + /** + * StartupMessage (F) + */ + + /** + * Sync (F) + */ + public static final byte SYNC = (byte) 'S'; + + /** + * Terminate (F) + */ + public static final byte TERMINATE = (byte) 'X'; + + private byte type; + private int length; + + public byte getType() { + return type; + } + + public void setType(byte type) { + this.type = type; + } + + public int getLength() { + return length; + } + + public void setLength(int length) { + this.length = length; + } + + + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/Query.java b/src/main/java/io/mycat/net/postgres/Query.java new file mode 100644 index 000000000..32e9b6031 --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/Query.java @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * Query (F) 
+ * Byte1('Q') Identifies the message as a simple query. 
+ * Int32 Length of message contents in bytes, including self. 
+ * String The query string itself.
+ * 
+ * + * @author mycat + */ +public class Query extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/ReadyForQuery.java b/src/main/java/io/mycat/net/postgres/ReadyForQuery.java new file mode 100644 index 000000000..6a5fd28dc --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/ReadyForQuery.java @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * ReadyForQuery (B) 
+ * Byte1('Z') Identifies the message type. ReadyForQuery is sent whenever the 
+ *            backend is ready for a new query cycle. 
+ * Int32(5) Length of message contents in bytes, including self. 
+ * Byte1 Current backend transaction status indicator. Possible values are 'I' 
+ *       if idle(not in a transaction block); 'T' if in a transaction block; 
+ *       or 'E' if in a failed transaction block (queries will be rejected until
+ *       block is ended).
+ * 
+ * + * @author mycat + */ +public class ReadyForQuery extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/RowDescription.java b/src/main/java/io/mycat/net/postgres/RowDescription.java new file mode 100644 index 000000000..7b9ee86b1 --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/RowDescription.java @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * RowDescription (B) 
+ * Byte1('T') Identifies the message as a row description. 
+ * Int32 Length of message contents in bytes, including self.
+ * Int16 Specifies the number of fields in a row (can be zero). Then, for
+ *       each field,there is the following: String The field name. 
+ * Int32 If the field can be identified as a column of a specific table, 
+ *       the object ID of the table; otherwise zero. 
+ * Int16 If the field can be identified as a column of a specific table, the 
+ *       attribute number of the column; otherwise zero. 
+ * Int32 The object ID of the field's data type. 
+ * Int16 The data type size (see pg_type.typlen). Note that negative values 
+ *       denote variable-width types. 
+ * Int32 The type modifier (see pg_attribute.atttypmod). The meaning of the 
+ *       modifier is type-specific.
+ * Int16 The format code being used for the field. Currently will be zero
+ *       (text) or one (binary). In a RowDescription returned from the 
+ *       statement variant of Describe, the format code is not yet known and 
+ *       will always be zero.
+ * 
+ * + * @author mycat + */ +public class RowDescription extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/SSLRequest.java b/src/main/java/io/mycat/net/postgres/SSLRequest.java new file mode 100644 index 000000000..ac74de952 --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/SSLRequest.java @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * SSLRequest (F) 
+ * Int32(8) Length of message contents in bytes, including self. 
+ * Int32(80877103) The SSL request code. The value is chosen to contain 1234 in 
+ *                 the most significant 16 bits, and 5679 in the least 16 significant 
+ *                 bits. (To avoid confusion, this code must not be the same as any 
+ *                 protocol version number.)
+ * 
+ * + * @author mycat + */ +public class SSLRequest extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/StartupMessage.java b/src/main/java/io/mycat/net/postgres/StartupMessage.java new file mode 100644 index 000000000..cccb51181 --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/StartupMessage.java @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * StartupMessage (F) 
+ * Int32 Length of message contents in bytes, including self. 
+ * Int32(196608) The protocol version number. The most significant 16 bits are 
+ *               the major version number (3 for the protocol described here).
+ *               The least significant 16 bits are the minor version number (0 
+ *               for the protocol described here). The protocol version number 
+ *               is followed by one or more pairs of parameter name and value 
+ *               strings. A zero byte is required as a terminator after the 
+ *               last name/value pair. Parameters can appear in any order. user 
+ *               is required, others are optional. Each parameter is specified as: 
+ * String The parameter name. Currently recognized names are: 
+ *        user The database user name to connect as. Required; there is no default. 
+ *        database The database to connect to. Defaults to the user name. 
+ *        options Command-line arguments for the backend. (This is deprecated in 
+ *                favor of setting individual run-time parameters.) In addition to
+ *                the above, any run-time parameter that can be set at backend start 
+ *                time might be listed. Such settings will be applied during backend 
+ *                start (after parsing the command-line options if any). The values 
+ *                will act as session defaults. 
+ * String The parameter value.
+ * 
+ * + * @author mycat + */ +public class StartupMessage extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/Sync.java b/src/main/java/io/mycat/net/postgres/Sync.java new file mode 100644 index 000000000..c15b1a847 --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/Sync.java @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * Sync (F) 
+ * Byte1('S') Identifies the message as a Sync command. 
+ * Int32(4) Length of message contents in bytes, including self.
+ * 
+ * + * @author mycat + */ +public class Sync extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/postgres/Terminate.java b/src/main/java/io/mycat/net/postgres/Terminate.java new file mode 100644 index 000000000..4f4a5c2e3 --- /dev/null +++ b/src/main/java/io/mycat/net/postgres/Terminate.java @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.net.postgres; + +/** + *
+ * Terminate (F) 
+ * Byte1('X') Identifies the message as a termination.
+ * Int32(4) Length of message contents in bytes, including self.
+ * 
+ * + * @author mycat + */ +public class Terminate extends PostgresPacket { + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/route/MyCATSequnceProcessor.java b/src/main/java/io/mycat/route/MyCATSequnceProcessor.java index e2f171c98..0d8c4edca 100644 --- a/src/main/java/io/mycat/route/MyCATSequnceProcessor.java +++ b/src/main/java/io/mycat/route/MyCATSequnceProcessor.java @@ -1,119 +1,60 @@ package io.mycat.route; -import io.mycat.MycatServer; -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.route.parser.druid.DruidSequenceHandler; -import io.mycat.server.ErrorCode; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.util.StringUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.TimeUnit; +import io.mycat.MycatServer; +import io.mycat.config.ErrorCode; +import io.mycat.route.parser.druid.DruidSequenceHandler; public class MyCATSequnceProcessor { - private static final Logger LOGGER = LoggerFactory - .getLogger(MyCATSequnceProcessor.class); - private LinkedBlockingQueue seqSQLQueue = new LinkedBlockingQueue(); - private volatile boolean running = true; - - public MyCATSequnceProcessor() { - new ExecuteThread().start(); + private static final Logger LOGGER = LoggerFactory.getLogger(MyCATSequnceProcessor.class); + + //使用Druid解析器实现sequence处理 @兵临城下 + private static final DruidSequenceHandler sequenceHandler = new DruidSequenceHandler(MycatServer + .getInstance().getConfig().getSystem().getSequnceHandlerType()); + + private static class InnerMyCATSequnceProcessor{ + private static MyCATSequnceProcessor INSTANCE = new MyCATSequnceProcessor(); } - - public void addNewSql(SessionSQLPair pair) { - seqSQLQueue.add(pair); + + public static MyCATSequnceProcessor getInstance(){ + return InnerMyCATSequnceProcessor.INSTANCE; } - - private void outRawData(MySQLFrontConnection sc, String value) { - byte packetId = 0; - int fieldCount = 1; - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); - - ResultSetHeaderPacket headerPkg = new ResultSetHeaderPacket(); - headerPkg.fieldCount = fieldCount; - headerPkg.packetId = ++packetId; - - headerPkg.write(bufferArray); - FieldPacket fieldPkg = new FieldPacket(); - fieldPkg.packetId = ++packetId; - fieldPkg.name = StringUtil.encode("SEQUNCE", sc.getCharset()); - fieldPkg.write(bufferArray); - EOFPacket eofPckg = new EOFPacket(); - eofPckg.packetId = ++packetId; - eofPckg.write(bufferArray); - - RowDataPacket rowDataPkg = new RowDataPacket(fieldCount); - rowDataPkg.packetId = ++packetId; - rowDataPkg.add(StringUtil.encode(value, sc.getCharset())); - rowDataPkg.write(bufferArray); - // write last eof - EOFPacket lastEof = new EOFPacket(); - lastEof.packetId = ++packetId; - lastEof.write(bufferArray); - - // write buffer - sc.write(bufferArray); + + private MyCATSequnceProcessor() { } - private void executeSeq(SessionSQLPair pair) { + /** + * 锁的粒度控制到序列级别.一个序列一把锁. + * 如果是 db 方式, 可以 给 mycat_sequence表的 name 列 加索引.可以借助mysql 行级锁 提高并发 + * @param pair + */ + public void executeSeq(SessionSQLPair pair) { try { - /* - * // @micmiu 扩展NodeToString实现自定义全局序列号 NodeToString strHandler = new - * ExtNodeToString4SEQ(MycatServer - * .getInstance().getConfig().getSystem() .getSequnceHandlerType()); - * // 如果存在sequence 转化sequence为实际数值 String charset = - * pair.session.getSource().getCharset(); QueryTreeNode ast = - * SQLParserDelegate.parse(pair.sql, charset == null ? "utf-8" : - * charset); String sql = strHandler.toString(ast); if - * (sql.toUpperCase().startsWith("SELECT")) { String - * value=sql.substring("SELECT".length()).trim(); - * outRawData(pair.session.getSource(),value); return; } - */ - - // 使用Druid解析器实现sequence处理 @兵临城下 - DruidSequenceHandler sequenceHandler = new DruidSequenceHandler( - MycatServer.getInstance().getConfig().getSystem() - .getSequnceHandlerType()); - + /*// @micmiu 扩展NodeToString实现自定义全局序列号 + NodeToString strHandler = new ExtNodeToString4SEQ(MycatServer + .getInstance().getConfig().getSystem() + .getSequnceHandlerType()); + // 如果存在sequence 转化sequence为实际数值 String charset = pair.session.getSource().getCharset(); - String executeSql = sequenceHandler.getExecuteSql(pair.sql, + QueryTreeNode ast = SQLParserDelegate.parse(pair.sql, charset == null ? "utf-8" : charset); - - pair.session.getSource().routeEndExecuteSQL(executeSql, pair.type, - pair.schema); + String sql = strHandler.toString(ast); + if (sql.toUpperCase().startsWith("SELECT")) { + String value=sql.substring("SELECT".length()).trim(); + outRawData(pair.session.getSource(),value); + return; + }*/ + + String charset = pair.session.getSource().getCharset(); + String executeSql = sequenceHandler.getExecuteSql(pair,charset == null ? "utf-8":charset); + + pair.session.getSource().routeEndExecuteSQL(executeSql, pair.type,pair.schema); } catch (Exception e) { - LOGGER.error("MyCATSequenceProcessor.executeSeq(SesionSQLPair)", e); - pair.session.getSource().writeErrMessage(ErrorCode.ER_YES, - "mycat sequnce err." + e); + LOGGER.error("MyCATSequenceProcessor.executeSeq(SesionSQLPair)",e); + pair.session.getSource().writeErrMessage(ErrorCode.ER_YES,"mycat sequnce err." + e); return; } } - - public void shutdown() { - running = false; - } - - class ExecuteThread extends Thread { - public void run() { - while (running) { - try { - SessionSQLPair pair = seqSQLQueue.poll(100, - TimeUnit.MILLISECONDS); - if (pair != null) { - executeSeq(pair); - } - } catch (Exception e) { - LOGGER.warn("MyCATSequenceProcessor$ExecutorThread", e); - } - } - } - } } diff --git a/src/main/java/io/mycat/route/Procedure.java b/src/main/java/io/mycat/route/Procedure.java new file mode 100644 index 000000000..4f0e5117a --- /dev/null +++ b/src/main/java/io/mycat/route/Procedure.java @@ -0,0 +1,183 @@ +package io.mycat.route; + +import com.google.common.base.*; + +import java.io.Serializable; +import java.sql.Types; +import java.util.*; + +/** + * Created by magicdoom on 2016/3/24. + * + * + * 1.no return + + ok + + + 2.simple + + ok + row + eof + + + 3.list + + + row + row + row + row + eof + ok + + */ +public class Procedure implements Serializable +{ + private String originSql; + private String name; + private String callSql; + private String setSql ; + private String selectSql; + private Set selectColumns=new LinkedHashSet<>(); + private Set listFields=new LinkedHashSet<>(); + private boolean isResultList=false; + + public boolean isResultList() + { + return isResultList; + } + public boolean isResultSimpleValue() + { + return selectSql!=null&&!isResultList; + } + public boolean isResultNothing() + { + return selectSql==null&&!isResultList; + } + public void setResultList(boolean resultList) + { + isResultList = resultList; + } + + public String toPreCallSql(String dbType) + { + StringBuilder sb=new StringBuilder(); + sb.append("{ call ") ; + sb.append(this.getName()).append("(") ; + Collection paramters= this.getParamterMap().values(); + int j=0; + for (ProcedureParameter paramter : paramters) + { + + String name="?"; + String joinStr= j==this.getParamterMap().size()-1?name:name+"," ; + sb.append(joinStr); + j++; + } + sb.append(")}") ; + return sb.toString(); + } + + public String toChangeCallSql(String dbType) + { + StringBuilder sb=new StringBuilder(); + sb.append("call ") ; + sb.append(this.getName()).append("(") ; + Collection paramters= this.getParamterMap().values(); + int j=0; + for (ProcedureParameter paramter : paramters) + { + Object value=paramter.getValue()!=null&& Types.VARCHAR==paramter.getJdbcType() ?"'"+paramter.getValue()+"'":paramter.getValue(); + String name=paramter.getValue()==null?paramter.getName():String.valueOf(value); + String joinStr= j==this.getParamterMap().size()-1?name:name+"," ; + sb.append(joinStr); + j++; + } + sb.append(")") ; + if(isResultSimpleValue()) + { + sb.append(";select "); + sb.append( Joiner.on(",").join(selectColumns) ); + } + return sb.toString(); + } + + public Set getListFields() + { + return listFields; + } + + public void setListFields(Set listFields) + { + this.listFields = listFields; + } + + public Set getSelectColumns() + { + return selectColumns; + } + + public String getSetSql() + { + return setSql; + } + + public void setSetSql(String setSql) + { + this.setSql = setSql; + } + + public String getSelectSql() + { + return selectSql; + } + + public void setSelectSql(String selectSql) + { + this.selectSql = selectSql; + } + + private Map paramterMap=new LinkedHashMap<>(); + + public String getOriginSql() + { + return originSql; + } + + public void setOriginSql(String originSql) + { + this.originSql = originSql; + } + + public Map getParamterMap() + { + return paramterMap; + } + + public void setParamterMap(Map paramterMap) + { + this.paramterMap = paramterMap; + } + + public String getName() + { + return name; + } + + public void setName(String name) + { + this.name = name; + } + + public String getCallSql() + { + return callSql; + } + + public void setCallSql(String callSql) + { + this.callSql = callSql; + } +} diff --git a/src/main/java/io/mycat/route/ProcedureParameter.java b/src/main/java/io/mycat/route/ProcedureParameter.java new file mode 100644 index 000000000..19520fa20 --- /dev/null +++ b/src/main/java/io/mycat/route/ProcedureParameter.java @@ -0,0 +1,77 @@ +package io.mycat.route; + +import java.io.Serializable; +import java.sql.Types; + +/** + * Created by magicdoom on 2016/3/24. + */ +public class ProcedureParameter implements Serializable +{ + public static final String IN="in"; + public static final String OUT="out"; + public static final String INOUT="inout"; + + + private int index; + private String name; + + //in out inout + private String parameterType; + + //java.sql.Types + private int jdbcType= Types.VARCHAR; + + private Object value; + + + public Object getValue() + { + return value; + } + + public void setValue(Object value) + { + this.value = value; + } + + public int getIndex() + { + return index; + } + + public void setIndex(int index) + { + this.index = index; + } + + public String getName() + { + return name; + } + + public void setName(String name) + { + this.name = name; + } + + public String getParameterType() + { + return parameterType; + } + + public void setParameterType(String parameterType) + { + this.parameterType = parameterType; + } + + public int getJdbcType() + { + return jdbcType; + } + + public void setJdbcType(int jdbcType) + { + this.jdbcType = jdbcType; + } +} diff --git a/src/main/java/io/mycat/route/RouteCheckRule.java b/src/main/java/io/mycat/route/RouteCheckRule.java new file mode 100644 index 000000000..7690b07af --- /dev/null +++ b/src/main/java/io/mycat/route/RouteCheckRule.java @@ -0,0 +1,17 @@ +package io.mycat.route; + +import io.mycat.route.function.PartitionByCRC32PreSlot; +import io.mycat.route.function.PartitionByCRC32PreSlot.Range; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +/** + * 迁移切换时准备切换阶段需要禁止写操作和读所有分片的sql + */ +public class RouteCheckRule { + public static ConcurrentMap>> migrateRuleMap=new ConcurrentHashMap<>(); + +} diff --git a/src/main/java/io/mycat/route/RouteResultset.java b/src/main/java/io/mycat/route/RouteResultset.java index a877906c9..9b3b0a182 100644 --- a/src/main/java/io/mycat/route/RouteResultset.java +++ b/src/main/java/io/mycat/route/RouteResultset.java @@ -1,344 +1,443 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.route; - -import io.mycat.route.util.PageSQLUtil; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.sqlengine.mpp.HavingCols; -import io.mycat.util.FormatUtil; - -import java.io.Serializable; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.Map; - -/** - * @author mycat - */ -public final class RouteResultset implements Serializable { - private String statement; // 原始语句 - private final int sqlType; - private RouteResultsetNode[] nodes; // 路由结果节点 - - private int limitStart; - private boolean cacheAble; - // used to store table's ID->datanodes cache - // format is table.primaryKey - private String primaryKey; - // limit output total - private int limitSize; - private SQLMerge sqlMerge; - - private boolean callStatement = false; // 处理call关键字 - - // 是否为全局表,只有在insert、update、delete、ddl里会判断并修改。默认不是全局表,用于修正全局表修改数据的反馈。 - private boolean globalTableFlag = false; - - //是否完成了路由 - private boolean isFinishedRoute = false; - - //是否自动提交,此属性主要用于记录ServerConnection上的autocommit状态 - private boolean autocommit = true; - - private boolean isLoadData=false; - - //是否可以在从库运行,此属性主要供RouteResultsetNode获取 - private Boolean canRunInReadDB; - - public boolean isLoadData() - { - return isLoadData; - } - - public void setLoadData(boolean isLoadData) - { - this.isLoadData = isLoadData; - } - - public boolean isFinishedRoute() { - return isFinishedRoute; - } - - public void setFinishedRoute(boolean isFinishedRoute) { - this.isFinishedRoute = isFinishedRoute; - } - - public boolean isGlobalTable() { - return globalTableFlag; - } - - public void setGlobalTable(boolean globalTableFlag) { - this.globalTableFlag = globalTableFlag; - } - - public RouteResultset(String stmt, int sqlType) { - this.statement = stmt; - this.limitSize = -1; - this.sqlType = sqlType; - } - - public void resetNodes() { - if (nodes != null) { - for (RouteResultsetNode node : nodes) { - node.resetStatement(); - } - } - } - - public void copyLimitToNodes() { - - if(nodes!=null) - { - for (RouteResultsetNode node : nodes) - { - if(node.getLimitSize()==-1&&node.getLimitStart()==0) - { - node.setLimitStart(limitStart); - node.setLimitSize(limitSize); - } - } - - } - } - - - public SQLMerge getSqlMerge() { - return sqlMerge; - } - - public boolean isCacheAble() { - return cacheAble; - } - - public void setCacheAble(boolean cacheAble) { - this.cacheAble = cacheAble; - } - - public boolean needMerge() { - return limitSize > 0 || sqlMerge != null; - } - - public int getSqlType() { - return sqlType; - } - - public boolean isHasAggrColumn() { - return (sqlMerge != null) && sqlMerge.isHasAggrColumn(); - } - - public int getLimitStart() { - return limitStart; - } - - public String[] getGroupByCols() { - return (sqlMerge != null) ? sqlMerge.getGroupByCols() : null; - } - - private SQLMerge createSQLMergeIfNull() { - if (sqlMerge == null) { - sqlMerge = new SQLMerge(); - } - return sqlMerge; - } - - public Map getMergeCols() { - return (sqlMerge != null) ? sqlMerge.getMergeCols() : null; - } - - public void setLimitStart(int limitStart) { - this.limitStart = limitStart; - } - - public String getPrimaryKey() { - return primaryKey; - } - - public boolean hasPrimaryKeyToCache() { - return primaryKey != null; - } - - public void setPrimaryKey(String primaryKey) { - if (!primaryKey.contains(".")) { - throw new java.lang.IllegalArgumentException( - "must be table.primarykey fomat :" + primaryKey); - } - this.primaryKey = primaryKey; - } - - /** - * return primary key items ,first is table name ,seconds is primary key - * - * @return - */ - public String[] getPrimaryKeyItems() { - return primaryKey.split("\\."); - } - - public void setOrderByCols(LinkedHashMap orderByCols) { - if (orderByCols != null && !orderByCols.isEmpty()) { - createSQLMergeIfNull().setOrderByCols(orderByCols); - } - } - - public void setHasAggrColumn(boolean hasAggrColumn) { - if (hasAggrColumn) { - createSQLMergeIfNull().setHasAggrColumn(true); - } - } - - public void setGroupByCols(String[] groupByCols) { - if (groupByCols != null && groupByCols.length > 0) { - createSQLMergeIfNull().setGroupByCols(groupByCols); - } - } - - public void setMergeCols(Map mergeCols) { - if (mergeCols != null && !mergeCols.isEmpty()) { - createSQLMergeIfNull().setMergeCols(mergeCols); - } - - } - - public LinkedHashMap getOrderByCols() { - return (sqlMerge != null) ? sqlMerge.getOrderByCols() : null; - - } - - public String getStatement() { - return statement; - } - - public RouteResultsetNode[] getNodes() { - return nodes; - } - - public void setNodes(RouteResultsetNode[] nodes) { - if(nodes!=null) - { - int nodeSize=nodes.length; - for (RouteResultsetNode node : nodes) - { - node.setTotalNodeSize(nodeSize); - } - - } - this.nodes = nodes; - } - - /** - * @return -1 if no limit - */ - public int getLimitSize() { - return limitSize; - } - - public void setLimitSize(int limitSize) { - this.limitSize = limitSize; - } - - public void setStatement(String statement) { - this.statement = statement; - } - - public boolean isCallStatement() { - return callStatement; - } - - public void setCallStatement(boolean callStatement) { - this.callStatement = callStatement; - } - - public void changeNodeSqlAfterAddLimit(SchemaConfig schemaConfig, String sourceDbType, String sql, int offset, int count, boolean isNeedConvert) { - if (nodes != null) - { - - Map dataNodeDbTypeMap = schemaConfig.getDataNodeDbTypeMap(); - Map sqlMapCache = new HashMap<>(); - for (RouteResultsetNode node : nodes) - { - String dbType = dataNodeDbTypeMap.get(node.getName()); - if (sourceDbType.equalsIgnoreCase("mysql")) - { - node.setStatement(sql); //mysql之前已经加好limit - } else if (sqlMapCache.containsKey(dbType)) - { - node.setStatement(sqlMapCache.get(dbType)); - } else if(isNeedConvert) - { - String nativeSql = PageSQLUtil.convertLimitToNativePageSql(dbType, sql, offset, count); - sqlMapCache.put(dbType, nativeSql); - node.setStatement(nativeSql); - } else { - node.setStatement(sql); - } - - node.setLimitStart(offset); - node.setLimitSize(count); - } - - - } - } - - public boolean isAutocommit() { - return autocommit; - } - - public void setAutocommit(boolean autocommit) { - this.autocommit = autocommit; - } - - public Boolean getCanRunInReadDB() { - return canRunInReadDB; - } - - public void setCanRunInReadDB(Boolean canRunInReadDB) { - this.canRunInReadDB = canRunInReadDB; - } - - public HavingCols getHavingCols() { - return (sqlMerge != null) ? sqlMerge.getHavingCols() : null; - } - - public void setHavings(HavingCols havings) { - if (havings != null) { - createSQLMergeIfNull().setHavingCols(havings); - } - } - - @Override - public String toString() { - StringBuilder s = new StringBuilder(); - s.append(statement).append(", route={"); - if (nodes != null) { - for (int i = 0; i < nodes.length; ++i) { - s.append("\n ").append(FormatUtil.format(i + 1, 3)); - s.append(" -> ").append(nodes[i]); - } - } - s.append("\n}"); - return s.toString(); - } - -} \ No newline at end of file +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.route; + +import com.alibaba.druid.sql.ast.SQLStatement; + +import io.mycat.MycatServer; +import io.mycat.config.MycatConfig; +import io.mycat.config.model.SchemaConfig; +import io.mycat.route.parser.util.PageSQLUtil; +import io.mycat.sqlengine.mpp.HavingCols; +import io.mycat.util.FormatUtil; + +import java.io.Serializable; +import java.util.*; + +/** + * @author mycat + */ +public final class RouteResultset implements Serializable { + private String statement; // 原始语句 + private final int sqlType; + private RouteResultsetNode[] nodes; // 路由结果节点 + private Set subTables; + private SQLStatement sqlStatement; + + + private int limitStart; + private boolean cacheAble; + // used to store table's ID->datanodes cache + // format is table.primaryKey + private String primaryKey; + // limit output total + private int limitSize; + private SQLMerge sqlMerge; + + private boolean callStatement = false; // 处理call关键字 + + // 是否为全局表,只有在insert、update、delete、ddl里会判断并修改。默认不是全局表,用于修正全局表修改数据的反馈。 + private boolean globalTableFlag = false; + + //是否完成了路由 + private boolean isFinishedRoute = false; + + //是否自动提交,此属性主要用于记录ServerConnection上的autocommit状态 + private boolean autocommit = true; + + private boolean isLoadData=false; + + //是否可以在从库运行,此属性主要供RouteResultsetNode获取 + private Boolean canRunInReadDB; + + // 强制走 master,可以通过 RouteResultset的属性canRunInReadDB=false + // 传给 RouteResultsetNode 来实现,但是 强制走 slave需要增加一个属性来实现: + private Boolean runOnSlave = null; // 默认null表示不施加影响 + + //key=dataNode value=slot + private Map dataNodeSlotMap=new HashMap<>(); + + private boolean selectForUpdate; + + public boolean isSelectForUpdate() { + return selectForUpdate; + } + + public void setSelectForUpdate(boolean selectForUpdate) { + this.selectForUpdate = selectForUpdate; + } + + + private List tables; + + public List getTables() { + return tables; + } + + public void setTables(List tables) { + this.tables = tables; + } + + public Map getDataNodeSlotMap() { + return dataNodeSlotMap; + } + + public void setDataNodeSlotMap(Map dataNodeSlotMap) { + this.dataNodeSlotMap = dataNodeSlotMap; + } + + public Boolean getRunOnSlave() { + return runOnSlave; + } + + public void setRunOnSlave(Boolean runOnSlave) { + this.runOnSlave = runOnSlave; + } + private Procedure procedure; + + public Procedure getProcedure() + { + return procedure; + } + + public void setProcedure(Procedure procedure) + { + this.procedure = procedure; + } + + public boolean isLoadData() + { + return isLoadData; + } + + public void setLoadData(boolean isLoadData) + { + this.isLoadData = isLoadData; + } + + public boolean isFinishedRoute() { + return isFinishedRoute; + } + + public void setFinishedRoute(boolean isFinishedRoute) { + this.isFinishedRoute = isFinishedRoute; + } + + public boolean isGlobalTable() { + return globalTableFlag; + } + + public void setGlobalTable(boolean globalTableFlag) { + this.globalTableFlag = globalTableFlag; + } + + public RouteResultset(String stmt, int sqlType) { + this.statement = stmt; + this.limitSize = -1; + this.sqlType = sqlType; + } + + public void resetNodes() { + if (nodes != null) { + for (RouteResultsetNode node : nodes) { + node.resetStatement(); + } + } + } + + public void copyLimitToNodes() { + + if(nodes!=null) + { + for (RouteResultsetNode node : nodes) + { + if(node.getLimitSize()==-1&&node.getLimitStart()==0) + { + node.setLimitStart(limitStart); + node.setLimitSize(limitSize); + } + } + + } + } + + + public SQLMerge getSqlMerge() { + return sqlMerge; + } + + public boolean isCacheAble() { + return cacheAble; + } + + public void setCacheAble(boolean cacheAble) { + this.cacheAble = cacheAble; + } + + public boolean needMerge() { + return limitSize > 0 || sqlMerge != null; + } + + public int getSqlType() { + return sqlType; + } + + public boolean isHasAggrColumn() { + return (sqlMerge != null) && sqlMerge.isHasAggrColumn(); + } + + public int getLimitStart() { + return limitStart; + } + + public String[] getGroupByCols() { + return (sqlMerge != null) ? sqlMerge.getGroupByCols() : null; + } + + private SQLMerge createSQLMergeIfNull() { + if (sqlMerge == null) { + sqlMerge = new SQLMerge(); + } + return sqlMerge; + } + + public Map getMergeCols() { + return (sqlMerge != null) ? sqlMerge.getMergeCols() : null; + } + + public void setLimitStart(int limitStart) { + this.limitStart = limitStart; + } + + public String getPrimaryKey() { + return primaryKey; + } + + public boolean hasPrimaryKeyToCache() { + return primaryKey != null; + } + + public void setPrimaryKey(String primaryKey) { + if (!primaryKey.contains(".")) { + throw new java.lang.IllegalArgumentException( + "must be table.primarykey fomat :" + primaryKey); + } + this.primaryKey = primaryKey; + } + + /** + * return primary key items ,first is table name ,seconds is primary key + * + * @return + */ + public String[] getPrimaryKeyItems() { + return primaryKey.split("\\."); + } + + public void setOrderByCols(LinkedHashMap orderByCols) { + if (orderByCols != null && !orderByCols.isEmpty()) { + createSQLMergeIfNull().setOrderByCols(orderByCols); + } + } + + public void setHasAggrColumn(boolean hasAggrColumn) { + if (hasAggrColumn) { + createSQLMergeIfNull().setHasAggrColumn(true); + } + } + + public void setGroupByCols(String[] groupByCols) { + if (groupByCols != null && groupByCols.length > 0) { + createSQLMergeIfNull().setGroupByCols(groupByCols); + } + } + + public void setMergeCols(Map mergeCols) { + if (mergeCols != null && !mergeCols.isEmpty()) { + createSQLMergeIfNull().setMergeCols(mergeCols); + } + + } + + public LinkedHashMap getOrderByCols() { + return (sqlMerge != null) ? sqlMerge.getOrderByCols() : null; + + } + + public String getStatement() { + return statement; + } + + public RouteResultsetNode[] getNodes() { + return nodes; + } + + public void setNodes(RouteResultsetNode[] nodes) { + if(nodes!=null) + { + int nodeSize=nodes.length; + for (RouteResultsetNode node : nodes) + { + node.setTotalNodeSize(nodeSize); + } + + } + this.nodes = nodes; + } + + /** + * @return -1 if no limit + */ + public int getLimitSize() { + return limitSize; + } + + public void setLimitSize(int limitSize) { + this.limitSize = limitSize; + } + + public void setStatement(String statement) { + this.statement = statement; + } + + public boolean isCallStatement() { + return callStatement; + } + + public void setCallStatement(boolean callStatement) { + this.callStatement = callStatement; + if(nodes!=null) + { + for (RouteResultsetNode node : nodes) + { + node.setCallStatement(callStatement); + } + + } + } + + public void changeNodeSqlAfterAddLimit(SchemaConfig schemaConfig, String sourceDbType, String sql, int offset, int count, boolean isNeedConvert) { + if (nodes != null) + { + + Map dataNodeDbTypeMap = schemaConfig.getDataNodeDbTypeMap(); + Map sqlMapCache = new HashMap<>(); + for (RouteResultsetNode node : nodes) + { + String dbType = dataNodeDbTypeMap.get(node.getName()); + if (dbType.equalsIgnoreCase("mysql")) + { + node.setStatement(sql); //mysql之前已经加好limit + } else if (sqlMapCache.containsKey(dbType)) + { + node.setStatement(sqlMapCache.get(dbType)); + } else if(isNeedConvert) + { + String nativeSql = PageSQLUtil.convertLimitToNativePageSql(dbType, sql, offset, count); + sqlMapCache.put(dbType, nativeSql); + node.setStatement(nativeSql); + } else { + node.setStatement(sql); + } + + node.setLimitStart(offset); + node.setLimitSize(count); + } + + + } + } + + public boolean isAutocommit() { + return autocommit; + } + + public void setAutocommit(boolean autocommit) { + this.autocommit = autocommit; + } + + public Boolean getCanRunInReadDB() { + return canRunInReadDB; + } + + public void setCanRunInReadDB(Boolean canRunInReadDB) { + this.canRunInReadDB = canRunInReadDB; + } + + public HavingCols getHavingCols() { + return (sqlMerge != null) ? sqlMerge.getHavingCols() : null; + } + + public void setSubTables(Set subTables) { + this.subTables = subTables; + } + + public void setHavings(HavingCols havings) { + if (havings != null) { + createSQLMergeIfNull().setHavingCols(havings); + } + } + + // Added by winbill, 20160314, for having clause, Begin ==> + public void setHavingColsName(Object[] names) { + if (names != null && names.length > 0) { + createSQLMergeIfNull().setHavingColsName(names); + } + } + // Added by winbill, 20160314, for having clause, End <== + + public SQLStatement getSqlStatement() { + return this.sqlStatement; + } + + public void setSqlStatement(SQLStatement sqlStatement) { + this.sqlStatement = sqlStatement; + } + + public Set getSubTables() { + return this.subTables; + } + + public boolean isDistTable(){ + if(this.getSubTables()!=null && !this.getSubTables().isEmpty() ){ + return true; + } + return false; + } + + @Override + public String toString() { + StringBuilder s = new StringBuilder(); + s.append(statement).append(", route={"); + if (nodes != null) { + for (int i = 0; i < nodes.length; ++i) { + s.append("\n ").append(FormatUtil.format(i + 1, 3)); + s.append(" -> ").append(nodes[i]); + } + } + s.append("\n}"); + return s.toString(); + } + +} diff --git a/src/main/java/io/mycat/route/RouteResultsetNode.java b/src/main/java/io/mycat/route/RouteResultsetNode.java index 3584714b6..ae5ed2172 100644 --- a/src/main/java/io/mycat/route/RouteResultsetNode.java +++ b/src/main/java/io/mycat/route/RouteResultsetNode.java @@ -23,11 +23,13 @@ */ package io.mycat.route; +import java.io.Serializable; +import java.util.Map; +import java.util.Set; + import io.mycat.server.parser.ServerParse; import io.mycat.sqlengine.mpp.LoadData; -import java.io.Serializable; - /** * @author mycat */ @@ -42,13 +44,23 @@ public final class RouteResultsetNode implements Serializable , Comparable 0) { - int hintLength = isMatchOldHint ? OLD_MYCAT_HINT.length() : NEW_MYCAT_HINT.length(); - + if (endPos > 0) { // 用!mycat:内部的语句来做路由分析 - String hint = stmt.substring(hintLength, endPos).trim(); - int firstSplitPos = hint.indexOf(HINT_SPLIT); - +// int hintLength = isMatchOldHint ? OLD_MYCAT_HINT.length() : NEW_MYCAT_HINT.length(); + String hint = stmt.substring(hintLength, endPos).trim(); + + int firstSplitPos = hint.indexOf(HINT_SPLIT); if(firstSplitPos > 0 ){ - String hintType = hint.substring(0,firstSplitPos).trim().toLowerCase(Locale.US); - String hintValue = hint.substring(firstSplitPos + HINT_SPLIT.length()).trim(); - if(hintValue.length()==0){ - LOGGER.warn("comment int sql must meet :/*!mycat:type=value*/ or /*#mycat:type=value*/: "+stmt); - throw new SQLSyntaxErrorException("comment int sql must meet :/*!mycat:type=value*/ or /*#mycat:type=value*/: "+stmt); + Map hintMap= parseHint(hint); + String hintType = (String) hintMap.get(MYCAT_HINT_TYPE); + String hintSql = (String) hintMap.get(hintType); + if( hintSql.length() == 0 ) { + LOGGER.warn("comment int sql must meet :/*!mycat:type=value*/ or /*#mycat:type=value*/ or /*mycat:type=value*/: "+stmt); + throw new SQLSyntaxErrorException("comment int sql must meet :/*!mycat:type=value*/ or /*#mycat:type=value*/ or /*mycat:type=value*/: "+stmt); } String realSQL = stmt.substring(endPos + "*/".length()).trim(); HintHandler hintHandler = HintHandlerFactory.getHintHandler(hintType); - if(hintHandler != null){ - rrs = hintHandler.route(sysconf,schema,sqlType,realSQL,charset,sc,tableId2DataNodeCache,hintValue); + if( hintHandler != null ) { + + if ( hintHandler instanceof HintSQLHandler) { + /** + * 修复 注解SQL的 sqlType 与 实际SQL的 sqlType 不一致问题, 如: hint=SELECT,real=INSERT + * fixed by zhuam + */ + int hintSqlType = ServerParse.parse( hintSql ) & 0xff; + rrs = hintHandler.route(sysconf, schema, sqlType, realSQL, charset, sc, tableId2DataNodeCache, hintSql,hintSqlType,hintMap); + + } else { + rrs = hintHandler.route(sysconf, schema, sqlType, realSQL, charset, sc, tableId2DataNodeCache, hintSql,sqlType,hintMap); + } + }else{ LOGGER.warn("TODO , support hint sql type : " + hintType); } + }else{//fixed by runfriends@126.com - LOGGER.warn("comment in sql must meet :/*!mycat:type=value*/ or /*#mycat:type=value*/: "+stmt); - throw new SQLSyntaxErrorException("comment in sql must meet :/*!mcat:type=value*/ or /*#mycat:type=value*/: "+stmt); + LOGGER.warn("comment in sql must meet :/*!mycat:type=value*/ or /*#mycat:type=value*/ or /*mycat:type=value*/: "+stmt); + throw new SQLSyntaxErrorException("comment in sql must meet :/*!mcat:type=value*/ or /*#mycat:type=value*/ or /*mycat:type=value*/: "+stmt); } } } else { @@ -113,10 +139,122 @@ public RouteResultset route(SystemConfig sysconf, SchemaConfig schema, charset, sc, tableId2DataNodeCache); } - if (rrs!=null && sqlType == ServerParse.SELECT && rrs.isCacheAble()) { + if (rrs != null && sqlType == ServerParse.SELECT && rrs.isCacheAble()) { sqlRouteCache.putIfAbsent(cacheKey, rrs); } + checkMigrateRule(schema.getName(),rrs,sqlType); return rrs; } -} \ No newline at end of file + //数据迁移的切换准备阶段,需要拒绝写操作和所有的跨多节点写操作 + private void checkMigrateRule(String schemal,RouteResultset rrs,int sqlType ) throws SQLNonTransientException { + if(rrs!=null&&rrs.getTables()!=null){ + boolean isUpdate=isUpdateSql(sqlType); + if(!isUpdate)return; + ConcurrentMap> tableRules= RouteCheckRule.migrateRuleMap.get(schemal.toUpperCase()) ; + if(tableRules!=null){ + for (String table : rrs.getTables()) { + List rangeList= tableRules.get(table.toUpperCase()) ; + if(rangeList!=null&&!rangeList.isEmpty()){ + if(rrs.getNodes().length>1&&isUpdate){ + throw new SQLNonTransientException ("schema:"+schemal+",table:"+table+",sql:"+rrs.getStatement()+" is not allowed,because table is migrate switching,please wait for a moment"); + } + for (PartitionByCRC32PreSlot.Range range : rangeList) { + RouteResultsetNode[] routeResultsetNodes= rrs.getNodes(); + for (RouteResultsetNode routeResultsetNode : routeResultsetNodes) { + int slot=routeResultsetNode.getSlot(); + if(isUpdate&&slot>=range.start&&slot<=range.end){ + throw new SQLNonTransientException ("schema:"+schemal+",table:"+table+",sql:"+rrs.getStatement()+" is not allowed,because table is migrate switching,please wait for a moment"); + + } + } + } + } + } + } + } + } + + + private boolean isUpdateSql(int type) { + return ServerParse.INSERT==type||ServerParse.UPDATE==type||ServerParse.DELETE==type||ServerParse.DDL==type; + } + + public static int isHintSql(String sql){ + int j = 0; + int len = sql.length(); + if(sql.charAt(j++) == '/' && sql.charAt(j++) == '*'){ + char c = sql.charAt(j); + // 过滤掉 空格 和 * 两种字符, 支持: "/** !mycat: */" 和 "/** #mycat: */" 形式的注解 + while(j < len && c != '!' && c != '#' && (c == ' ' || c == '*')){ + c = sql.charAt(++j); + } + //注解支持的'!'不被mysql单库兼容, + //注解支持的'#'不被mybatis兼容 + //注解支持的':'不被hibernate兼容 + //考虑用mycat字符前缀标志Hintsql:"/** mycat: */" + if(sql.charAt(j)=='m'){ + j--; + } + if(j + 6 >= len) {// prevent the following sql.charAt overflow + return -1; // false + } + if(sql.charAt(++j) == 'm' && sql.charAt(++j) == 'y' && sql.charAt(++j) == 'c' + && sql.charAt(++j) == 'a' && sql.charAt(++j) == 't' && (sql.charAt(++j) == ':' || sql.charAt(j) == '#')) { + return j + 1; // true,同时返回注解部分的长度 + } + } + return -1; // false + } + + private Map parseHint( String sql) + { + Map map=new HashMap(); + int y=0; + int begin=0; + for(int i=0;i orderByCols; private HavingCols havingCols; + private Object[] havingColsName; // Added by winbill, 20160314, for having clause private Map mergeCols; private String[] groupByCols; private boolean hasAggrColumn; @@ -75,4 +76,12 @@ public HavingCols getHavingCols() { public void setHavingCols(HavingCols havingCols) { this.havingCols = havingCols; } + + public Object[] getHavingColsName() { + return havingColsName; + } + + public void setHavingColsName(Object[] havingColsName) { + this.havingColsName = havingColsName; + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/route/SessionSQLPair.java b/src/main/java/io/mycat/route/SessionSQLPair.java index b595d26f8..9ae712a9c 100644 --- a/src/main/java/io/mycat/route/SessionSQLPair.java +++ b/src/main/java/io/mycat/route/SessionSQLPair.java @@ -1,7 +1,7 @@ package io.mycat.route; +import io.mycat.config.model.SchemaConfig; import io.mycat.server.NonBlockingSession; -import io.mycat.server.config.node.SchemaConfig; public class SessionSQLPair { public final NonBlockingSession session; diff --git a/src/main/java/io/mycat/route/factory/RouteStrategyFactory.java b/src/main/java/io/mycat/route/factory/RouteStrategyFactory.java index fc52cca22..d922a9e29 100644 --- a/src/main/java/io/mycat/route/factory/RouteStrategyFactory.java +++ b/src/main/java/io/mycat/route/factory/RouteStrategyFactory.java @@ -1,12 +1,15 @@ package io.mycat.route.factory; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + import io.mycat.MycatServer; +import io.mycat.config.model.SystemConfig; import io.mycat.route.RouteStrategy; import io.mycat.route.impl.DruidMycatRouteStrategy; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; - /** * 路由策略工厂类 * @author wang.dw @@ -14,37 +17,48 @@ */ public class RouteStrategyFactory { private static RouteStrategy defaultStrategy = null; - private static boolean isInit = false; + private static volatile boolean isInit = false; private static ConcurrentMap strategyMap = new ConcurrentHashMap(); - - private RouteStrategyFactory() { - - } - - private static void init() { - String defaultSqlParser = MycatServer.getInstance().getConfig().getSystem().getDefaultSqlParser(); + public static void init() { + SystemConfig config = MycatServer.getInstance().getConfig().getSystem(); + + String defaultSqlParser = config.getDefaultSqlParser(); defaultSqlParser = defaultSqlParser == null ? "" : defaultSqlParser; //修改为ConcurrentHashMap,避免并发问题 strategyMap.putIfAbsent("druidparser", new DruidMycatRouteStrategy()); - + defaultStrategy = strategyMap.get(defaultSqlParser); if(defaultStrategy == null) { defaultStrategy = strategyMap.get("druidparser"); + defaultSqlParser = "druidparser"; } + config.setDefaultSqlParser(defaultSqlParser); + isInit = true; + } + private RouteStrategyFactory() { + } + + public static RouteStrategy getRouteStrategy() { - if(!isInit) { - init(); - isInit = true; - } +// if(!isInit) { +// synchronized(RouteStrategyFactory.class){ +// if(!isInit){ +// init(); +// } +// } +// } return defaultStrategy; } public static RouteStrategy getRouteStrategy(String parserType) { - if(!isInit) { - init(); - isInit = true; - } +// if(!isInit) { +// synchronized(RouteStrategyFactory.class){ +// if(!isInit){ +// init(); +// } +// } +// } return strategyMap.get(parserType); } } diff --git a/src/main/java/io/mycat/route/function/AbstractPartitionAlgorithm.java b/src/main/java/io/mycat/route/function/AbstractPartitionAlgorithm.java index ac70b66a1..9abfc45dd 100644 --- a/src/main/java/io/mycat/route/function/AbstractPartitionAlgorithm.java +++ b/src/main/java/io/mycat/route/function/AbstractPartitionAlgorithm.java @@ -1,9 +1,10 @@ package io.mycat.route.function; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.Map; +import io.mycat.config.model.TableConfig; +import io.mycat.config.model.rule.RuleAlgorithm; +import java.io.Serializable; +import java.util.List; /** * 路由分片函数抽象类 @@ -12,11 +13,11 @@ * @author lxy * */ -public abstract class AbstractPartitionAlgorithm implements RuleAlgorithm { - private Map config = new LinkedHashMap(); +public abstract class AbstractPartitionAlgorithm implements RuleAlgorithm ,Serializable { @Override - public void init() { } + public void init() { + } /** * 返回所有被路由到的节点的编号 @@ -24,10 +25,10 @@ public void init() { } * 返回null表示没有节点被路由到 */ @Override - public Integer[] calculateRange(String beginValue, String endValue) { + public Integer[] calculateRange(String beginValue, String endValue) { return new Integer[0]; } - + /** * 对于存储数据按顺序存放的字段做范围路由,可以使用这个函数 * @param algorithm @@ -35,7 +36,7 @@ public Integer[] calculateRange(String beginValue, String endValue) { * @param endValue * @return */ - public static Integer[] calculateSequenceRange(AbstractPartitionAlgorithm algorithm, String beginValue, String endValue) { + public static Integer[] calculateSequenceRange(AbstractPartitionAlgorithm algorithm, String beginValue, String endValue) { Integer begin = 0, end = 0; begin = algorithm.calculate(beginValue); end = algorithm.calculate(endValue); @@ -43,26 +44,59 @@ public static Integer[] calculateSequenceRange(AbstractPartitionAlgorithm algori if(begin == null || end == null){ return new Integer[0]; } - + if (end >= begin) { int len = end-begin+1; Integer [] re = new Integer[len]; - + for(int i =0;i getConfig() { - return config; + + /** + * + * 分片表所跨的节点数与分片算法分区数一致性校验 + * @param tableConf + * @return + * -1 if table datanode size < rule function partition size + * 0 if table datanode size == rule function partition size + * 1 if table datanode size > rule function partition size + */ + public final int suitableFor(TableConfig tableConf) { + int nPartition = getPartitionNum(); + if(nPartition > 0) { // 对于有限制分区数的规则,进行检查 + int dnSize = tableConf.getDataNodes().size(); + boolean distTable = tableConf.isDistTable(); + List tables = tableConf.getDistTables(); + if(distTable){ + if(tables.size() < nPartition){ + return -1; + } else if(dnSize > nPartition) { + return 1; + } + }else{ + if(dnSize < nPartition) { + return -1; + } else if(dnSize > nPartition) { + return 1; + } + } + } + return 0; } - public void setConfig(Map config) { - this.config = config; + + /** + * 返回分区数, 返回-1表示分区数没有限制 + * @return + */ + public int getPartitionNum() { + return -1; // 表示没有限制 } - + } diff --git a/src/main/java/io/mycat/route/function/AutoPartitionByLong.java b/src/main/java/io/mycat/route/function/AutoPartitionByLong.java index 5005a1db6..b33d33b29 100644 --- a/src/main/java/io/mycat/route/function/AutoPartitionByLong.java +++ b/src/main/java/io/mycat/route/function/AutoPartitionByLong.java @@ -1,104 +1,168 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.route.function; - - -import java.util.Set; - -/** - * auto partition by Long ,can be used in auto increment primary key partition - * - * @author wuzhi - */ -public class AutoPartitionByLong extends AbstractPartitionAlgorithm implements RuleAlgorithm{ - - private LongRange[] longRongs; - private int defaultNode = -1; - - - @Override - public void init() { - initialize(); - } - - @Override - public Integer calculate(String columnValue) { - long value = Long.valueOf(columnValue); - Integer rst = null; - for (LongRange longRang : this.longRongs) { - if (value <= longRang.valueEnd && value >= longRang.valueStart) { - return longRang.nodeIndx; - } - } - //数据超过范围,暂时使用配置的默认节点 - if(rst ==null && defaultNode>=0){ - return defaultNode ; - } - return rst; - } - - @Override - public Integer[] calculateRange(String beginValue, String endValue) { - return AbstractPartitionAlgorithm.calculateSequenceRange(this, beginValue, endValue); - } - - private void initialize() { - if (this.getConfig().isEmpty()) { - throw new RuntimeException("can't find range config, like 0 "); - } - longRongs = new LongRange[this.getConfig().size()]; - Set keys = this.getConfig().keySet(); - int i=0; - for(String key : keys){ - String pairs[] = key.trim().split("-"); - long longStart = NumberParseUtil.parseLong(pairs[0].trim()); - long longEnd = NumberParseUtil.parseLong(pairs[1].trim()); - int nodeId = Integer.parseInt(String.valueOf(this.getConfig().get(key))); - longRongs[i] = new LongRange(nodeId, longStart, longEnd); - i++; - } - } - - public int getDefaultNode() { - return defaultNode; - } - - public void setDefaultNode(int defaultNode) { - this.defaultNode = defaultNode; - } - - static class LongRange { - public final int nodeIndx; - public final long valueStart; - public final long valueEnd; - - public LongRange(int nodeIndx, long valueStart, long valueEnd) { - super(); - this.nodeIndx = nodeIndx; - this.valueStart = valueStart; - this.valueEnd = valueEnd; - } - - } +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.route.function; + +import java.io.BufferedReader; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.Set; + +import io.mycat.config.model.rule.RuleAlgorithm; + +/** + * auto partition by Long ,can be used in auto increment primary key partition + * + * @author wuzhi + */ +public class AutoPartitionByLong extends AbstractPartitionAlgorithm implements RuleAlgorithm{ + + private String mapFile; + private LongRange[] longRongs; + + private int defaultNode = -1; + @Override + public void init() { + + initialize(); + } + + public void setMapFile(String mapFile) { + this.mapFile = mapFile; + } + + @Override + public Integer calculate(String columnValue) { +// columnValue = NumberParseUtil.eliminateQoute(columnValue); + try { + long value = Long.parseLong(columnValue); + Integer rst = null; + for (LongRange longRang : this.longRongs) { + if (value <= longRang.valueEnd && value >= longRang.valueStart) { + return longRang.nodeIndx; + } + } + //数据超过范围,暂时使用配置的默认节点 + if (rst == null && defaultNode >= 0) { + return defaultNode; + } + return rst; + } catch (NumberFormatException e){ + throw new IllegalArgumentException(new StringBuilder().append("columnValue:").append(columnValue).append(" Please eliminate any quote and non number within it.").toString(),e); + } + } + + @Override + public Integer[] calculateRange(String beginValue, String endValue) { + return AbstractPartitionAlgorithm.calculateSequenceRange(this, beginValue, endValue); + } + + @Override + public int getPartitionNum() { +// int nPartition = longRongs.length; + + /* + * fix #1284 这里的统计应该统计Range的nodeIndex的distinct总数 + */ + Set distNodeIdxSet = new HashSet(); + for(LongRange range : longRongs) { + distNodeIdxSet.add(range.nodeIndx); + } + int nPartition = distNodeIdxSet.size(); + return nPartition; + } + + private void initialize() { + BufferedReader in = null; + try { + // FileInputStream fin = new FileInputStream(new File(fileMapPath)); + InputStream fin = this.getClass().getClassLoader() + .getResourceAsStream(mapFile); + if (fin == null) { + throw new RuntimeException("can't find class resource file " + + mapFile); + } + in = new BufferedReader(new InputStreamReader(fin)); + LinkedList longRangeList = new LinkedList(); + + for (String line = null; (line = in.readLine()) != null;) { + line = line.trim(); + if (line.startsWith("#") || line.startsWith("//")) { + continue; + } + int ind = line.indexOf('='); + if (ind < 0) { + System.out.println(" warn: bad line int " + mapFile + " :" + + line); + continue; + } + String pairs[] = line.substring(0, ind).trim().split("-"); + long longStart = NumberParseUtil.parseLong(pairs[0].trim()); + long longEnd = NumberParseUtil.parseLong(pairs[1].trim()); + int nodeId = Integer.parseInt(line.substring(ind + 1) + .trim()); + longRangeList + .add(new LongRange(nodeId, longStart, longEnd)); + + } + longRongs = longRangeList.toArray(new LongRange[longRangeList + .size()]); + } catch (Exception e) { + if (e instanceof RuntimeException) { + throw (RuntimeException) e; + } else { + throw new RuntimeException(e); + } + + } finally { + try { + in.close(); + } catch (Exception e2) { + } + } + } + + public int getDefaultNode() { + return defaultNode; + } + + public void setDefaultNode(int defaultNode) { + this.defaultNode = defaultNode; + } + + static class LongRange { + public final int nodeIndx; + public final long valueStart; + public final long valueEnd; + + public LongRange(int nodeIndx, long valueStart, long valueEnd) { + super(); + this.nodeIndx = nodeIndx; + this.valueStart = valueStart; + this.valueEnd = valueEnd; + } + + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/route/function/LatestMonthPartion.java b/src/main/java/io/mycat/route/function/LatestMonthPartion.java index 7a234a382..f871162ad 100644 --- a/src/main/java/io/mycat/route/function/LatestMonthPartion.java +++ b/src/main/java/io/mycat/route/function/LatestMonthPartion.java @@ -33,17 +33,20 @@ public void setSplitOneDay(int split) { } @Override - public Integer calculate(String columnValue) { - int valueLen = columnValue.length(); - int day = Integer.valueOf(columnValue.substring(valueLen - 4, - valueLen - 2)); - int hour = Integer.valueOf(columnValue.substring(valueLen - 2)); - int dnIndex = (day - 1) * splitOneDay + hour / hourSpan; - return dnIndex; - + public Integer calculate(String columnValue) { + try { + int valueLen = columnValue.length(); + int day = Integer.parseInt(columnValue.substring(valueLen - 4, + valueLen - 2)); + int hour = Integer.parseInt(columnValue.substring(valueLen - 2)); + int dnIndex = (day - 1) * splitOneDay + hour / hourSpan; + return dnIndex; + }catch (NumberFormatException e){ + throw new IllegalArgumentException(new StringBuilder().append("columnValue:").append(columnValue).append(" Please check if the format satisfied.").toString(),e); + } } - public Integer[] calculateRange(String beginValue, String endValue) { + public Integer[] calculateRange(String beginValue, String endValue) { return calculateSequenceRange(this,beginValue, endValue); } diff --git a/src/main/java/io/mycat/route/function/NumberParseUtil.java b/src/main/java/io/mycat/route/function/NumberParseUtil.java index a4b7b3e69..3ff729a62 100644 --- a/src/main/java/io/mycat/route/function/NumberParseUtil.java +++ b/src/main/java/io/mycat/route/function/NumberParseUtil.java @@ -24,6 +24,30 @@ package io.mycat.route.function; public class NumberParseUtil { + /** + * 只去除开头结尾的引号,而且是结对去除,语法不对的话通不过 + * @param number + * @return + */ + public static String eliminateQoute(String number){ + number = number.trim(); + if(number.contains("\"")){ + if(number.charAt(0)=='\"'){ + number = number.substring(1); + if(number.charAt(number.length()-1)=='\"'){ + number = number.substring(0,number.length()-1); + } + } + }else if(number.contains("\'")){ + if(number.charAt(0)=='\''){ + number = number.substring(1); + if(number.charAt(number.length()-1)=='\''){ + number = number.substring(0,number.length()-1); + } + } + } + return number; + } /** * can parse values like 200M ,200K,200M1(2000001) diff --git a/src/main/java/io/mycat/route/function/PartitionByCRC32PreSlot.java b/src/main/java/io/mycat/route/function/PartitionByCRC32PreSlot.java new file mode 100644 index 000000000..43c17abc9 --- /dev/null +++ b/src/main/java/io/mycat/route/function/PartitionByCRC32PreSlot.java @@ -0,0 +1,310 @@ +package io.mycat.route.function; + +import com.google.common.base.Joiner; +import com.google.common.base.Splitter; +import com.google.common.io.Files; +import io.mycat.config.model.SystemConfig; +import io.mycat.config.model.rule.RuleAlgorithm; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.*; +import java.nio.charset.Charset; +import java.util.*; + +/** + * 自动迁移御用分片算法,预分slot 102400个,映射到dn上,再conf下会保存映射文件,请不要修改 + * + * @author nange magicdoom@gmail.com + */ +public class PartitionByCRC32PreSlot extends AbstractPartitionAlgorithm + implements RuleAlgorithm, TableRuleAware, SlotFunction,ReloadFunction { + + private static final Logger LOGGER = LoggerFactory.getLogger("PartitionByCRC32PreSlot"); + + public static final int DEFAULT_SLOTS_NUM = 102400; + + private static final Charset DEFAULT_CHARSET = Charset.forName("UTF-8"); + private Map> rangeMap = new TreeMap<>(); + + private int count; + //slot:index + private int[] rangeMap2 = new int[DEFAULT_SLOTS_NUM]; + private int slot = -1; + + public Map> getRangeMap() { + return rangeMap; + } + + public void saveSlotMapping(Map> rangeMap) { + this.rangeMap = rangeMap; + + Properties prop = new Properties(); + File file = new File(SystemConfig.getHomePath(), "conf" + File.separator +"ruledata"+ File.separator + ruleName + ".properties"); + if (file.exists()) + file.delete(); + for (Map.Entry> integerListEntry : rangeMap.entrySet()) { + String key = String.valueOf(integerListEntry.getKey()); + List values = new ArrayList<>(); + for (Range range : integerListEntry.getValue()) { + values.add(range.start + "-" + range.end); + } + prop.setProperty(key, Joiner.on(",").join(values)); + } + try { + Files.createParentDirs(file); + } catch (IOException e) { + throw new RuntimeException(e); + } + try (FileOutputStream out = new FileOutputStream(file)) { + prop.store(out, "WARNING !!!Please do not modify or delete this file!!!"); + } catch (IOException e) { + throw new RuntimeException(e); + } + + } + + private Properties loadProps(String name, boolean forceNew) { + Properties prop = new Properties(); + File file = new File(SystemConfig.getHomePath(), "conf" + File.separator +"ruledata"+ File.separator + ruleName + ".properties"); + if (file.exists() && forceNew) + file.delete(); + if (!file.exists()) { + prop = genarateP(); + try { + Files.createParentDirs(file); + } catch (IOException e) { + throw new RuntimeException(e); + } + try (FileOutputStream out = new FileOutputStream(file)) { + prop.store(out, "WARNING !!!Please do not modify or delete this file!!!"); + } catch (IOException e) { + throw new RuntimeException(e); + } + return prop; + } + + try (FileInputStream filein = new FileInputStream(file)) { + prop.load(filein); + } catch (Exception e) { + throw new RuntimeException(e); + } + return prop; + } + + private Properties genarateP() { + int slotSize = DEFAULT_SLOTS_NUM / count; + Properties prop = new Properties(); + for (int i = 0; i < count; i++) { + if (i == count - 1) { + prop.put(String.valueOf(i), i * slotSize + "-" + (DEFAULT_SLOTS_NUM - 1)); + } else { + prop.put(String.valueOf(i), i * slotSize + "-" + ((i + 1) * slotSize - 1)); + } + + } + + return prop; + } + + private Map> convertToMap(Properties p) { + Map> map = new TreeMap<>(); + for (Object o : p.keySet()) { + String k = (String) o; + String v = p.getProperty(k); + List ranges = Splitter.on(",").omitEmptyStrings().trimResults().splitToList(v); + List rangeList = new ArrayList<>(); + for (String range : ranges) { + List vv = Splitter.on("-").omitEmptyStrings().trimResults().splitToList(range); + if (vv.size() == 2) { + Range ran = new Range(Integer.parseInt(vv.get(0)), Integer.parseInt(vv.get(1))); + rangeList.add(ran); + + } else if (vv.size() == 1) { + Range ran = new Range(Integer.parseInt(vv.get(0)), Integer.parseInt(vv.get(0))); + rangeList.add(ran); + + } else { + throw new RuntimeException("load crc32slot datafile error:dn=" + k + ",value=" + range); + } + } + map.put(Integer.parseInt(k), rangeList); + } + + return map; + } + + @Override public void init() { + + super.init(); + if (ruleName != null) { + Properties p = loadProps(ruleName, false); + rangeMap = convertToMap(p); + hack(); + } + } + + public void reInit() { + + if (ruleName != null) { + Properties p = loadProps(ruleName, true); + rangeMap = convertToMap(p); + hack(); + } + } + + + private void hack( ) + { + //todo 优化 + Iterator>> iterator = rangeMap.entrySet().iterator(); + while (iterator + .hasNext()) { + Map.Entry> rangeEntry = iterator.next(); + List range = rangeEntry.getValue(); + for (Range range1 : range) { + for(int i=range1.start;i<=range1.end;i++) + { + rangeMap2[i]=rangeEntry.getKey() ; + } + } + + } + } + /** + * 节点的数量 + * + * @param count + */ + public void setCount(int count) { + this.count = count; + } + + @Override public Integer calculate(String columnValue) { + if (ruleName == null) + throw new RuntimeException(); + PureJavaCrc32 crc32 = new PureJavaCrc32(); + byte[] bytes = columnValue.getBytes(DEFAULT_CHARSET); + crc32.update(bytes, 0, bytes.length); + long x = crc32.getValue(); + int slot = (int) (x % DEFAULT_SLOTS_NUM); + this.slot = slot; + return rangeMap2[slot]; +// //todo 优化 +// for (Map.Entry> rangeEntry : rangeMap.entrySet()) { +// List range = rangeEntry.getValue(); +// for (Range range1 : range) { +// if (slot >= range1.start && slot <= range1.end) { +// this.slot = slot; +// return rangeEntry.getKey(); +// } +// } +// +// } +// this.slot = slot; +// int slotSize = DEFAULT_SLOTS_NUM / count; +// +// int index = slot / slotSize; +// if (slotSize * count != DEFAULT_SLOTS_NUM && index > count - 1) { +// index = (count - 1); +// } +// return index; + } + + @Override public int getPartitionNum() { + return this.count; + } + + private static void hashTest() throws IOException { + PartitionByCRC32PreSlot hash = new PartitionByCRC32PreSlot(); + hash.setRuleName("test"); + hash.count = 1024;//分片数 + + hash.reInit(); + long start = System.currentTimeMillis(); + int[] bucket = new int[hash.count]; + + Map> hashed = new HashMap<>(); + + int total = 1000_0000;//数据量 + int c = 0; + for (int i = 100_0000; i < total + 100_0000; i++) {//假设分片键从100万开始 + c++; + int h = hash.calculate(Integer.toString(i)); + if (h >= hash.count) { + System.out.println("error:" + h); + } + bucket[h]++; + List list = hashed.get(h); + if (list == null) { + list = new ArrayList<>(); + hashed.put(h, list); + } + list.add(i); + } + System.out.println(c + " " + total); + double d = 0; + c = 0; + int idx = 0; + System.out.println("index bucket ratio"); + for (int i : bucket) { + d += i / (double) total; + c += i; + System.out.println(idx++ + " " + i + " " + (i / (double) total)); + } + System.out.println(d + " " + c); + + long used = System.currentTimeMillis() - start; + + System.out.println("tps " + total * 1000.0 / used); + System.out.println("****************************************************"); + + } + + public static void main(String[] args) throws IOException { + hashTest(); + } + + private String tableName; + private String ruleName; + + @Override public void setTableName(String tableName) { + this.tableName = tableName; + } + + @Override public void setRuleName(String ruleName) { + this.ruleName = ruleName; + } + + @Override public String getTableName() { + return tableName; + } + + @Override public String getRuleName() { + return ruleName; + } + + @Override public int slotValue() { + return slot; + } + + @Override public void reload() { + init(); + } + + public static class Range implements Serializable { + public Range(int start, int end) { + this.start = start; + this.end = end; + size = end - start + 1; + } + + public Range() { + } + + public int start; + public int end; + + public int size; + } +} diff --git a/src/main/java/io/mycat/route/function/PartitionByDate.java b/src/main/java/io/mycat/route/function/PartitionByDate.java index a25479c22..be8008f8c 100644 --- a/src/main/java/io/mycat/route/function/PartitionByDate.java +++ b/src/main/java/io/mycat/route/function/PartitionByDate.java @@ -1,11 +1,12 @@ package io.mycat.route.function; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.text.ParseException; import java.text.SimpleDateFormat; +import java.util.*; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import io.mycat.config.model.rule.RuleAlgorithm; /** * 例子 按日期列分区 格式 between操作解析的范例 @@ -26,6 +27,7 @@ public class PartitionByDate extends AbstractPartitionAlgorithm implements RuleA private long endDate; private int nCount; + private ThreadLocal formatter; private static final long oneDay = 86400000; @@ -40,15 +42,21 @@ public void init() { endDate = new SimpleDateFormat(dateFormat).parse(sEndDate).getTime(); nCount = (int) ((endDate - beginDate) / partionTime) + 1; } + formatter = new ThreadLocal() { + @Override + protected SimpleDateFormat initialValue() { + return new SimpleDateFormat(dateFormat); + } + }; } catch (ParseException e) { throw new java.lang.IllegalArgumentException(e); } } @Override - public Integer calculate(String columnValue) { + public Integer calculate(String columnValue) { try { - long targetTime = new SimpleDateFormat(dateFormat).parse(columnValue).getTime(); + long targetTime = formatter.get().parse(columnValue).getTime(); int targetPartition = (int) ((targetTime - beginDate) / partionTime); if(targetTime>endDate && nCount!=0){ @@ -57,14 +65,42 @@ public Integer calculate(String columnValue) { return targetPartition; } catch (ParseException e) { - throw new java.lang.IllegalArgumentException(e); - + throw new IllegalArgumentException(new StringBuilder().append("columnValue:").append(columnValue).append(" Please check if the format satisfied.").toString(),e); } } @Override - public Integer[] calculateRange(String beginValue, String endValue) { - return AbstractPartitionAlgorithm.calculateSequenceRange(this, beginValue, endValue); + public Integer[] calculateRange(String beginValue, String endValue) { + SimpleDateFormat format = new SimpleDateFormat(this.dateFormat); + try { + Date beginDate = format.parse(beginValue); + Date endDate = format.parse(endValue); + Calendar cal = Calendar.getInstance(); + List list = new ArrayList(); + while(beginDate.getTime() <= endDate.getTime()){ + Integer nodeValue = this.calculate(format.format(beginDate)); + if(Collections.frequency(list, nodeValue) < 1) list.add(nodeValue); + cal.setTime(beginDate); + cal.add(Calendar.DATE, 1); + beginDate = cal.getTime(); + } + + Integer[] nodeArray = new Integer[list.size()]; + for (int i=0;i 0 ? count : -1; } public void setsBeginDate(String sBeginDate) { diff --git a/src/main/java/io/mycat/route/function/PartitionByFileMap.java b/src/main/java/io/mycat/route/function/PartitionByFileMap.java index 6a3f3411c..dd2850eb7 100644 --- a/src/main/java/io/mycat/route/function/PartitionByFileMap.java +++ b/src/main/java/io/mycat/route/function/PartitionByFileMap.java @@ -1,110 +1,161 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.route.function; - - -import java.util.HashMap; -import java.util.Map; -import java.util.Set; - -/** - * - * @author mycat - */ -public class PartitionByFileMap extends AbstractPartitionAlgorithm implements RuleAlgorithm { - - private Map app2Partition; - /** - * Map app2Partition中key值的类型:默认值为0,0表示Integer,非零表示String - */ - private int type; - - /** - * 默认节点在map中的key - */ - private static final String DEFAULT_NODE = "DEFAULT_NODE"; - - /** - * 默认节点:小于0表示不设置默认节点,大于等于0表示设置默认节点 - * - * 默认节点的作用:枚举分片时,如果碰到不识别的枚举值,就让它路由到默认节点 - * 如果不配置默认节点(defaultNode值小于0表示不配置默认节点),碰到 - * 不识别的枚举值就会报错, - * like this:can't find datanode for sharding column:column_name val:ffffffff - */ - private int defaultNode = -1; - - @Override - public void init() { - initialize(); - } - - public void setType(int type) { - this.type = type; - } - public void setDefaultNode(int defaultNode) { - this.defaultNode = defaultNode; - } - public int getType() { - return type; - } - public int getDefaultNode() { - return defaultNode; - } - - @Override - public Integer calculate(String columnValue) { - Object value = columnValue; - if(type == 0) { - value = Integer.valueOf(columnValue); - } - Integer rst = null; - Integer pid = app2Partition.get(value); - if (pid != null) { - rst = pid; - } else { - rst =app2Partition.get(DEFAULT_NODE); - } - return rst; - } - - private void initialize() { - if (this.getConfig().isEmpty()) { - throw new RuntimeException("can't find enum config, like 01 "); - } - Set keys = this.getConfig().keySet(); - app2Partition = new HashMap(); - for(String key : keys){ - if(type == 0) { - app2Partition.put(Integer.valueOf(key), Integer.valueOf(String.valueOf(this.getConfig().get(key)))); - }else { - app2Partition.put(key, Integer.valueOf((String)this.getConfig().get(key))); - } - } - //设置默认节点 - if(defaultNode >= 0) { - app2Partition.put(DEFAULT_NODE, defaultNode); - } - } +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.route.function; + +import java.io.BufferedReader; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import io.mycat.config.model.rule.RuleAlgorithm; + +/** + * + * @author mycat + */ +public class PartitionByFileMap extends AbstractPartitionAlgorithm implements RuleAlgorithm { + + private String mapFile; + private Map app2Partition; + /** + * Map app2Partition中key值的类型:默认值为0,0表示Integer,非零表示String + */ + private int type; + + /** + * 默认节点在map中的key + */ + private static final String DEFAULT_NODE = "DEFAULT_NODE"; + + /** + * 默认节点:小于0表示不设置默认节点,大于等于0表示设置默认节点 + * + * 默认节点的作用:枚举分片时,如果碰到不识别的枚举值,就让它路由到默认节点 + * 如果不配置默认节点(defaultNode值小于0表示不配置默认节点),碰到 + * 不识别的枚举值就会报错, + * like this:can't find datanode for sharding column:column_name val:ffffffff + */ + private int defaultNode = -1; + + @Override + public void init() { + + initialize(); + } + + public void setMapFile(String mapFile) { + this.mapFile = mapFile; + } + + public void setType(int type) { + this.type = type; + } + + public void setDefaultNode(int defaultNode) { + this.defaultNode = defaultNode; + } + + @Override + public Integer calculate(String columnValue) { + try { + Object value = columnValue; + if (type == 0) { + value = Integer.valueOf(columnValue); + } + Integer rst = null; + Integer pid = app2Partition.get(value); + if (pid != null) { + rst = pid; + } else { + rst = app2Partition.get(DEFAULT_NODE); + } + return rst; + } catch (NumberFormatException e){ + throw new IllegalArgumentException(new StringBuilder().append("columnValue:").append(columnValue).append(" Please check if the format satisfied.").toString(),e); + } + } + + @Override + public int getPartitionNum() { + Set set = new HashSet(app2Partition.values()); + int count = set.size(); + return count; + } + + private void initialize() { + BufferedReader in = null; + try { + // FileInputStream fin = new FileInputStream(new File(fileMapPath)); + InputStream fin = this.getClass().getClassLoader() + .getResourceAsStream(mapFile); + if (fin == null) { + throw new RuntimeException("can't find class resource file " + + mapFile); + } + in = new BufferedReader(new InputStreamReader(fin)); + + app2Partition = new HashMap(); + + for (String line = null; (line = in.readLine()) != null;) { + line = line.trim(); + if (line.startsWith("#") || line.startsWith("//")) { + continue; + } + int ind = line.indexOf('='); + if (ind < 0) { + continue; + } + try { + String key = line.substring(0, ind).trim(); + int pid = Integer.parseInt(line.substring(ind + 1).trim()); + if(type == 0) { + app2Partition.put(Integer.parseInt(key), pid); + } else { + app2Partition.put(key, pid); + } + } catch (Exception e) { + } + } + //设置默认节点 + if(defaultNode >= 0) { + app2Partition.put(DEFAULT_NODE, defaultNode); + } + } catch (Exception e) { + if (e instanceof RuntimeException) { + throw (RuntimeException) e; + } else { + throw new RuntimeException(e); + } + + } finally { + try { + in.close(); + } catch (Exception e2) { + } + } + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/route/function/PartitionByHashMod.java b/src/main/java/io/mycat/route/function/PartitionByHashMod.java new file mode 100644 index 000000000..ca3251580 --- /dev/null +++ b/src/main/java/io/mycat/route/function/PartitionByHashMod.java @@ -0,0 +1,65 @@ +package io.mycat.route.function; + +import io.mycat.config.model.rule.RuleAlgorithm; + +import java.math.BigInteger; + +/** + * 哈希值取模 + * 根据分片列的哈希值对分片个数取模,哈希算法为Wang/Jenkins + * 用法和简单取模相似,规定分片个数和分片列即可。 + * + * @author Hash Zhang + */ +public class PartitionByHashMod extends AbstractPartitionAlgorithm implements RuleAlgorithm { + private boolean watch = false; + private int count; + + public void setCount(int count) { + this.count = count; + if ((count & (count - 1)) == 0) { + watch = true; + } + } + + /** + * Using Wang/Jenkins Hash + * + * @param key + * @return hash value + */ + protected int hash(int key) { + key = (~key) + (key << 21); // key = (key << 21) - key - 1; + key = key ^ (key >> 24); + key = (key + (key << 3)) + (key << 8); // key * 265 + key = key ^ (key >> 14); + key = (key + (key << 2)) + (key << 4); // key * 21 + key = key ^ (key >> 28); + key = key + (key << 31); + return key; + } + + @Override + public Integer calculate(String columnValue) { +// columnValue = columnValue.replace("\'", " "); +// columnValue = columnValue.trim(); + BigInteger bigNum = new BigInteger(hash(columnValue.hashCode()) + "").abs(); + // if count==2^n, then m%count == m&(count-1) + if (watch) { + return bigNum.intValue() & (count - 1); + } + return (bigNum.mod(BigInteger.valueOf(count))).intValue(); + } + + @Override + public void init() { + super.init(); + } + + @Override + public int getPartitionNum() { + int count = this.count; + return count; + } + +} diff --git a/src/main/java/io/mycat/route/function/PartitionByHotDate.java b/src/main/java/io/mycat/route/function/PartitionByHotDate.java new file mode 100644 index 000000000..216a454d4 --- /dev/null +++ b/src/main/java/io/mycat/route/function/PartitionByHotDate.java @@ -0,0 +1,149 @@ +package io.mycat.route.function; + +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.Calendar; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.config.model.rule.RuleAlgorithm; + +/** + * 根据日期查询日志数据 冷热数据分布 ,最近n个月的到实时交易库查询,超过n个月的按照m天分片 + * + * @author sw + * + * + + create_time + sharding-by-hotdate + + + + yyyy-MM-dd + 10 + 30 + + */ +public class PartitionByHotDate extends AbstractPartitionAlgorithm implements RuleAlgorithm { + private static final Logger LOGGER = LoggerFactory.getLogger(PartitionByHotDate.class); + + private String dateFormat; + private String sLastDay; + private String sPartionDay; + + private long sLastTime; + private long partionTime; + private ThreadLocal formatter; + + private long beginDate; + + private static final long oneDay = 86400000; + + @Override + public void init() { + try { + formatter = new ThreadLocal() { + @Override + protected SimpleDateFormat initialValue() { + return new SimpleDateFormat(dateFormat); + } + }; + sLastTime = Integer.valueOf(sLastDay); + partionTime = Integer.parseInt(sPartionDay) * oneDay; + } catch (Exception e) { + throw new java.lang.IllegalArgumentException(e); + } + } + + @Override + public Integer calculate(String columnValue) { + Integer targetPartition = -1; + try { + long targetTime = formatter.get().parse(columnValue).getTime(); + Calendar now = Calendar.getInstance(); + long nowTime = now.getTimeInMillis(); + + beginDate = nowTime - sLastTime * oneDay; + + long diffDays = (nowTime - targetTime) / (1000 * 60 * 60 * 24) + 1; + if(diffDays-sLastTime <= 0 || diffDays<0 ){ + targetPartition = 0; + }else{ + targetPartition = (int) ((beginDate - targetTime) / partionTime) + 1; + } + + LOGGER.debug("PartitionByHotDate calculate for " + columnValue + " return " + targetPartition); + return targetPartition; + } catch (ParseException e) { + throw new IllegalArgumentException(new StringBuilder().append("columnValue:").append(columnValue).append(" Please check if the format satisfied.").toString(),e); + } + } + + @Override + public Integer[] calculateRange(String beginValue, String endValue) { + Integer[] targetPartition = null; + try { + long startTime = formatter.get().parse(beginValue).getTime(); + long endTime = formatter.get().parse(endValue).getTime(); + Calendar now = Calendar.getInstance(); + long nowTime = now.getTimeInMillis(); + + long limitDate = nowTime - sLastTime * oneDay; + long diffDays = (nowTime - startTime) / (1000 * 60 * 60 * 24) + 1; + if(diffDays-sLastTime <= 0 || diffDays<0 ){ + Integer [] re = new Integer[1]; + re[0] = 0; + targetPartition = re ; + }else{ + Integer [] re = null; + Integer begin = 0, end = 0; + end = this.calculate(beginValue); + boolean hasLimit = false; + if(endTime-limitDate > 0){ + endTime = limitDate; + hasLimit = true; + } + begin = this.calculate(formatter.get().format(endTime)); + if(begin == null || end == null){ + return re; + } + if (end >= begin) { + int len = end-begin+1; + if(hasLimit){ + re = new Integer[len+1]; + re[0] = 0; + for(int i =0;i> hashed=new HashMap<>(); - + int total=1000_0000;//数据量 int c=0; for(int i=100_0000;i partition){ + private static void rehashTest(List partition) { PartitionByMod hash=new PartitionByMod(); hash.count=110;//分片数 hash.init(); - + int[] bucket=new int[hash.count]; - + int total=partition.size();//数据量 int c=0; for(int i:partition){//假设分片键从100万开始 @@ -118,7 +131,11 @@ private static void rehashTest(List partition){ System.out.println(idx+++" "+i+" "+(i/(double)total)); } } - public static void main(String[] args) { - hashTest(); + public static void main(String[] args) { +// hashTest(); + PartitionByMod partitionByMod = new PartitionByMod(); + partitionByMod.count=8; + partitionByMod.calculate("\"6\""); + partitionByMod.calculate("\'6\'"); } } \ No newline at end of file diff --git a/src/main/java/io/mycat/route/function/PartitionByMonth.java b/src/main/java/io/mycat/route/function/PartitionByMonth.java index 76b84b769..dab413804 100644 --- a/src/main/java/io/mycat/route/function/PartitionByMonth.java +++ b/src/main/java/io/mycat/route/function/PartitionByMonth.java @@ -1,9 +1,14 @@ package io.mycat.route.function; - import java.text.ParseException; import java.text.SimpleDateFormat; +import java.util.ArrayList; import java.util.Calendar; +import java.util.Collections; +import java.util.List; + +import io.mycat.config.model.rule.RuleAlgorithm; +import org.apache.log4j.Logger; /** * 例子 按月份列分区 ,每个自然月一个分片,格式 between操作解析的范例 @@ -13,9 +18,15 @@ */ public class PartitionByMonth extends AbstractPartitionAlgorithm implements RuleAlgorithm { + private static final Logger LOGGER = Logger.getLogger(PartitionByDate.class); private String sBeginDate; private String dateFormat; + private String sEndDate; private Calendar beginDate; + private Calendar endDate; + private int nPartition; + + private ThreadLocal formatter; @Override public void init() { @@ -23,29 +34,108 @@ public void init() { beginDate = Calendar.getInstance(); beginDate.setTime(new SimpleDateFormat(dateFormat) .parse(sBeginDate)); + formatter = new ThreadLocal() { + @Override + protected SimpleDateFormat initialValue() { + return new SimpleDateFormat(dateFormat); + } + }; + if(sEndDate!=null&&!sEndDate.equals("")) { + endDate = Calendar.getInstance(); + endDate.setTime(new SimpleDateFormat(dateFormat).parse(sEndDate)); + nPartition = ((endDate.get(Calendar.YEAR) - beginDate.get(Calendar.YEAR)) * 12 + + endDate.get(Calendar.MONTH) - beginDate.get(Calendar.MONTH)) + 1; + + if (nPartition <= 0) { + throw new java.lang.IllegalArgumentException("Incorrect time range for month partitioning!"); + } + } else { + nPartition = -1; + } } catch (ParseException e) { throw new java.lang.IllegalArgumentException(e); } } + /** + * For circulatory partition, calculated value of target partition needs to be + * rotated to fit the partition range + */ + private int reCalculatePartition(int targetPartition) { + /** + * If target date is previous of start time of partition setting, shift + * the delta range between target and start date to be positive value + */ + if (targetPartition < 0) { + targetPartition = nPartition - (-targetPartition) % nPartition; + } + + if (targetPartition >= nPartition) { + targetPartition = targetPartition % nPartition; + } + + return targetPartition; + } + @Override - public Integer calculate(String columnValue) { + public Integer calculate(String columnValue) { try { + int targetPartition; Calendar curTime = Calendar.getInstance(); - curTime.setTime(new SimpleDateFormat(dateFormat).parse(columnValue)); - return (curTime.get(Calendar.YEAR) - beginDate.get(Calendar.YEAR)) + curTime.setTime(formatter.get().parse(columnValue)); + targetPartition = ((curTime.get(Calendar.YEAR) - beginDate.get(Calendar.YEAR)) * 12 + curTime.get(Calendar.MONTH) - - beginDate.get(Calendar.MONTH); + - beginDate.get(Calendar.MONTH)); + + /** + * For circulatory partition, calculated value of target partition needs to be + * rotated to fit the partition range + */ + if (nPartition > 0) { + targetPartition = reCalculatePartition(targetPartition); + } + return targetPartition; } catch (ParseException e) { - throw new java.lang.IllegalArgumentException(e); + throw new IllegalArgumentException(new StringBuilder().append("columnValue:").append(columnValue).append(" Please check if the format satisfied.").toString(),e); } } @Override public Integer[] calculateRange(String beginValue, String endValue) { - return AbstractPartitionAlgorithm.calculateSequenceRange(this, - beginValue, endValue); + try { + int startPartition, endPartition; + Calendar partitionTime = Calendar.getInstance(); + SimpleDateFormat format = new SimpleDateFormat(dateFormat); + partitionTime.setTime(format.parse(beginValue)); + startPartition = ((partitionTime.get(Calendar.YEAR) - beginDate.get(Calendar.YEAR)) + * 12 + partitionTime.get(Calendar.MONTH) + - beginDate.get(Calendar.MONTH)); + partitionTime.setTime(format.parse(endValue)); + endPartition = ((partitionTime.get(Calendar.YEAR) - beginDate.get(Calendar.YEAR)) + * 12 + partitionTime.get(Calendar.MONTH) + - beginDate.get(Calendar.MONTH)); + + List list = new ArrayList<>(); + + while (startPartition <= endPartition) { + Integer nodeValue = reCalculatePartition(startPartition); + if (Collections.frequency(list, nodeValue) < 1) + list.add(nodeValue); + startPartition++; + } + int size = list.size(); + return (list.toArray(new Integer[size])); + } catch (ParseException e) { + LOGGER.error("error",e); + return new Integer[0]; + } + } + + @Override + public int getPartitionNum() { + int nPartition = this.nPartition; + return nPartition; } public void setsBeginDate(String sBeginDate) { @@ -56,4 +146,8 @@ public void setDateFormat(String dateFormat) { this.dateFormat = dateFormat; } + public void setsEndDate(String sEndDate) { + this.sEndDate = sEndDate; + } + } diff --git a/src/main/java/io/mycat/route/function/PartitionByMurmurHash.java b/src/main/java/io/mycat/route/function/PartitionByMurmurHash.java index d617032a5..e43b8afc1 100644 --- a/src/main/java/io/mycat/route/function/PartitionByMurmurHash.java +++ b/src/main/java/io/mycat/route/function/PartitionByMurmurHash.java @@ -1,180 +1,272 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.route.function; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.SortedMap; -import java.util.TreeMap; - -import com.google.common.hash.HashFunction; -import com.google.common.hash.Hashing; - -import io.mycat.server.exception.MurmurHashException; - -/** - * consistancy hash, murmur hash - * implemented by Guava - * @author wuzhih - * - */ -public class PartitionByMurmurHash extends AbstractPartitionAlgorithm implements RuleAlgorithm { - private static final int DEFAULT_VIRTUAL_BUCKET_TIMES=160; - - private int seed; - private int count; - private int virtualBucketTimes=DEFAULT_VIRTUAL_BUCKET_TIMES; - - private HashFunction hash; - - private SortedMap bucketMap; - @Override - public void init() { - try{ - bucketMap=new TreeMap<>(); - generateBucketMap(); - }catch(Exception e){ - throw new MurmurHashException(e); - } - } - - private void generateBucketMap(){ - hash=Hashing.murmur3_32(seed);//计算一致性哈希的对象 - for(int i=0;i tail = bucketMap.tailMap(hash.hashUnencodedChars(columnValue).asInt()); - if (tail.isEmpty()) { - return bucketMap.get(bucketMap.firstKey()); - } - return tail.get(tail.firstKey()); - } - - private static void hashTest() throws IOException{ - PartitionByMurmurHash hash=new PartitionByMurmurHash(); - hash.count=1000;//分片数 - hash.init(); - - int[] bucket=new int[hash.count]; - - Map> hashed=new HashMap<>(); - - int total=1000_0000;//数据量 - int c=0; - for(int i=100_0000;i list=hashed.get(h); - if(list==null){ - list=new ArrayList<>(); - hashed.put(h, list); - } - list.add(i); - } - System.out.println(c+" "+total); - double d=0; - c=0; - int idx=0; - System.out.println("index bucket ratio"); - for(int i:bucket){ - d+=i/(double)total; - c+=i; - System.out.println(idx+++" "+i+" "+(i/(double)total)); - } - System.out.println(d+" "+c); - - Properties props=new Properties(); - for(Map.Entry entry:hash.bucketMap.entrySet()){ - props.setProperty(entry.getKey().toString(), entry.getValue().toString()); - } - ByteArrayOutputStream out=new ByteArrayOutputStream(); - props.store(out, null); - - props.clear(); - props.load(new ByteArrayInputStream(out.toByteArray())); - System.out.println(props); - System.out.println("****************************************************"); -// rehashTest(hashed.get(0)); - } - private static void rehashTest(List partition){ - PartitionByMurmurHash hash=new PartitionByMurmurHash(); - hash.count=12;//分片数 - hash.init(); - - int[] bucket=new int[hash.count]; - - int total=partition.size();//数据量 - int c=0; - for(int i:partition){//假设分片键从100万开始 - c++; - int h=hash.calculate(Integer.toString(i)); - bucket[h]++; - } - System.out.println(c+" "+total); - c=0; - int idx=0; - System.out.println("index bucket ratio"); - for(int i:bucket){ - c+=i; - System.out.println(idx+++" "+i+" "+(i/(double)total)); - } - } - public static void main(String[] args) throws IOException { - hashTest(); - } -} +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.route.function; + +import java.io.BufferedReader; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.nio.charset.Charset; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.SortedMap; +import java.util.TreeMap; + +import com.google.common.hash.HashFunction; +import com.google.common.hash.Hashing; + +import io.mycat.config.model.rule.RuleAlgorithm; +import io.mycat.util.exception.MurmurHashException; + +/** + * consistancy hash, murmur hash + * implemented by Guava + * @author wuzhih + * + */ +public class PartitionByMurmurHash extends AbstractPartitionAlgorithm implements RuleAlgorithm { + private static final int DEFAULT_VIRTUAL_BUCKET_TIMES=160; + private static final int DEFAULT_WEIGHT=1; + private static final Charset DEFAULT_CHARSET=Charset.forName("UTF-8"); + + private int seed; + private int count; + private int virtualBucketTimes=DEFAULT_VIRTUAL_BUCKET_TIMES; + private Map weightMap=new HashMap<>(); +// private String bucketMapPath; + + private HashFunction hash; + + private SortedMap bucketMap; + @Override + public void init() { + try{ + bucketMap=new TreeMap<>(); +// boolean serializableBucketMap=bucketMapPath!=null && bucketMapPath.length()>0; +// if(serializableBucketMap){ +// File bucketMapFile=new File(bucketMapPath); +// if(bucketMapFile.exists() && bucketMapFile.length()>0){ +// loadBucketMapFile(); +// return; +// } +// } + generateBucketMap(); +// if(serializableBucketMap){ +// storeBucketMap(); +// } + }catch(Exception e){ + throw new MurmurHashException(e); + } + } + + private void generateBucketMap(){ + hash=Hashing.murmur3_32(seed);//计算一致性哈希的对象 + for(int i=0;i0?weight:1); + } + } + } +// /** +// * 保存一致性hash的虚拟节点文件路径。 +// * 如果这个文件不存在或是空文件就按照指定的count, weightMapFile等构造新的MurmurHash数据结构并保存到这个路径的文件里。 +// * 如果这个文件已存在且不是空文件就加载这个文件里的内容作为MurmurHash数据结构,此时其它参数都忽略。 +// * 除第一次以外在之后增加节点时可以直接修改这个文件,不过不推荐这么做。如果节点数量变化了,推荐删除这个文件。 +// * 可以不指定这个路径,不指定路径时不会保存murmur hash +// * @param bucketMapPath +// */ +// public void setBucketMapPath(String bucketMapPath){ +// this.bucketMapPath=bucketMapPath; +// } + @Override + public Integer calculate(String columnValue) { + SortedMap tail = bucketMap.tailMap(hash.hashUnencodedChars(columnValue).asInt()); + if (tail.isEmpty()) { + return bucketMap.get(bucketMap.firstKey()); + } + return tail.get(tail.firstKey()); + } + + @Override + public int getPartitionNum() { + int nPartition = this.count; + return nPartition; + } + + private static void hashTest() throws IOException{ + PartitionByMurmurHash hash=new PartitionByMurmurHash(); + hash.count=10;//分片数 + hash.init(); + + int[] bucket=new int[hash.count]; + + Map> hashed=new HashMap<>(); + + int total=1000_0000;//数据量 + int c=0; + for(int i=100_0000;i list=hashed.get(h); + if(list==null){ + list=new ArrayList<>(); + hashed.put(h, list); + } + list.add(i); + } + System.out.println(c+" "+total); + double d=0; + c=0; + int idx=0; + System.out.println("index bucket ratio"); + for(int i:bucket){ + d+=i/(double)total; + c+=i; + System.out.println(idx+++" "+i+" "+(i/(double)total)); + } + System.out.println(d+" "+c); + + Properties props=new Properties(); + for(Map.Entry entry:hash.bucketMap.entrySet()){ + props.setProperty(entry.getKey().toString(), entry.getValue().toString()); + } + ByteArrayOutputStream out=new ByteArrayOutputStream(); + props.store(out, null); + + props.clear(); + props.load(new ByteArrayInputStream(out.toByteArray())); + System.out.println(props); + System.out.println("****************************************************"); +// rehashTest(hashed.get(0)); + } + private static void rehashTest(List partition){ + PartitionByMurmurHash hash=new PartitionByMurmurHash(); + hash.count=12;//分片数 + hash.init(); + + int[] bucket=new int[hash.count]; + + int total=partition.size();//数据量 + int c=0; + for(int i:partition){//假设分片键从100万开始 + c++; + int h=hash.calculate(Integer.toString(i)); + bucket[h]++; + } + System.out.println(c+" "+total); + c=0; + int idx=0; + System.out.println("index bucket ratio"); + for(int i:bucket){ + c+=i; + System.out.println(idx+++" "+i+" "+(i/(double)total)); + } + } + public static void main(String[] args) throws IOException { + hashTest(); + } +} diff --git a/src/main/java/io/mycat/route/function/PartitionByPattern.java b/src/main/java/io/mycat/route/function/PartitionByPattern.java index 3c6ef7f5b..7e822a951 100644 --- a/src/main/java/io/mycat/route/function/PartitionByPattern.java +++ b/src/main/java/io/mycat/route/function/PartitionByPattern.java @@ -1,104 +1,165 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.route.function; - - -import java.util.Set; -import java.util.regex.Pattern; - -/** - * auto partition by Long - * - * @author hexiaobin - */ -public class PartitionByPattern extends AbstractPartitionAlgorithm implements RuleAlgorithm { - private static final int PARTITION_LENGTH = 1024; - private int patternValue = PARTITION_LENGTH;// 分区长度,取模数值 - private LongRange[] longRongs; - private int defaultNode = 0;// 包含非数值字符,默认存储节点 - private static final Pattern pattern = Pattern.compile("[0-9]*");; - - @Override - public void init() { - initialize(); - } - - public void setPatternValue(int patternValue) { - this.patternValue = patternValue; - } - public void setDefaultNode(int defaultNode) { - this.defaultNode = defaultNode; - } - - @Override - public Integer calculate(String columnValue) { - if (!isNumeric(columnValue)) { - return defaultNode; - } - long value = Long.valueOf(columnValue); - Integer rst = null; - for (LongRange longRang : this.longRongs) { - long hash = value % patternValue; - if (hash <= longRang.valueEnd && hash >= longRang.valueStart) { - return longRang.nodeIndx; - } - } - return rst; - } - - public static boolean isNumeric(String str) { - return pattern.matcher(str).matches(); - } - - private void initialize() { - if (this.getConfig().isEmpty()) { - throw new RuntimeException("can't find ByPattern config, like 0 "); - } - longRongs = new LongRange[this.getConfig().size()]; - Set keys = this.getConfig().keySet(); - int i=0; - for(String key : keys){ - String pairs[] = key.trim().split("-"); - long longStart = NumberParseUtil.parseLong(pairs[0].trim()); - long longEnd = NumberParseUtil.parseLong(pairs[1].trim()); - int nodeId = Integer.parseInt((String)this.getConfig().get(key)); - longRongs[i] = new LongRange(nodeId, longStart, longEnd); - i++; - } - } - - static class LongRange { - public final int nodeIndx; - public final long valueStart; - public final long valueEnd; - - public LongRange(int nodeIndx, long valueStart, long valueEnd) { - super(); - this.nodeIndx = nodeIndx; - this.valueStart = valueStart; - this.valueEnd = valueEnd; - } - - } +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.route.function; + +import java.io.BufferedReader; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.Set; +import java.util.regex.Pattern; + +import io.mycat.config.model.rule.RuleAlgorithm; +import io.mycat.route.function.PartitionByPrefixPattern.LongRange; + +/** + * auto partition by Long + * + * @author hexiaobin + */ +public class PartitionByPattern extends AbstractPartitionAlgorithm implements RuleAlgorithm { + private static final int PARTITION_LENGTH = 1024; + private int patternValue = PARTITION_LENGTH;// 分区长度,取模数值 + private String mapFile; + private LongRange[] longRongs; + private int defaultNode = 0;// 包含非数值字符,默认存储节点 + private static final Pattern pattern = Pattern.compile("[0-9]*");; + + @Override + public void init() { + + initialize(); + } + + public void setMapFile(String mapFile) { + this.mapFile = mapFile; + } + + public void setPatternValue(int patternValue) { + this.patternValue = patternValue; + } + + public void setDefaultNode(int defaultNode) { + this.defaultNode = defaultNode; + } + + @Override + public Integer calculate(String columnValue) { + if (!isNumeric(columnValue)) { + return defaultNode; + } + long value = Long.parseLong(columnValue); + Integer rst = null; + for (LongRange longRang : this.longRongs) { + long hash = value % patternValue; + if (hash <= longRang.valueEnd && hash >= longRang.valueStart) { + return longRang.nodeIndx; + } + } + return rst; + } + + @Override + public int getPartitionNum() { +// int nPartition = this.longRongs.length; + /* + * fix #1284 这里的统计应该统计Range的nodeIndex的distinct总数 + */ + Set distNodeIdxSet = new HashSet(); + for(LongRange range : longRongs) { + distNodeIdxSet.add(range.nodeIndx); + } + int nPartition = distNodeIdxSet.size(); + return nPartition; + } + + public static boolean isNumeric(String str) { + return pattern.matcher(str).matches(); + } + + private void initialize() { + BufferedReader in = null; + try { + // FileInputStream fin = new FileInputStream(new File(fileMapPath)); + InputStream fin = this.getClass().getClassLoader() + .getResourceAsStream(mapFile); + if (fin == null) { + throw new RuntimeException("can't find class resource file " + + mapFile); + } + in = new BufferedReader(new InputStreamReader(fin)); + LinkedList longRangeList = new LinkedList(); + + for (String line = null; (line = in.readLine()) != null;) { + line = line.trim(); + if (line.startsWith("#") || line.startsWith("//")) { + continue; + } + int ind = line.indexOf('='); + if (ind < 0) { + System.out.println(" warn: bad line int " + mapFile + " :" + + line); + continue; + } + String pairs[] = line.substring(0, ind).trim().split("-"); + long longStart = Long.parseLong(pairs[0].trim()); + long longEnd = Long.parseLong(pairs[1].trim()); + int nodeId = Integer.parseInt(line.substring(ind + 1) + .trim()); + longRangeList + .add(new LongRange(nodeId, longStart, longEnd)); + + } + longRongs = longRangeList.toArray(new LongRange[longRangeList + .size()]); + } catch (Exception e) { + if (e instanceof RuntimeException) { + throw (RuntimeException) e; + } else { + throw new RuntimeException(e); + } + + } finally { + try { + in.close(); + } catch (Exception e2) { + } + } + } + + static class LongRange { + public final int nodeIndx; + public final long valueStart; + public final long valueEnd; + + public LongRange(int nodeIndx, long valueStart, long valueEnd) { + super(); + this.nodeIndx = nodeIndx; + this.valueStart = valueStart; + this.valueEnd = valueEnd; + } + + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/route/function/PartitionByPrefixPattern.java b/src/main/java/io/mycat/route/function/PartitionByPrefixPattern.java index 46e2b89f9..e0514c39d 100644 --- a/src/main/java/io/mycat/route/function/PartitionByPrefixPattern.java +++ b/src/main/java/io/mycat/route/function/PartitionByPrefixPattern.java @@ -1,102 +1,166 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.route.function; - - -import java.util.Set; - -/** - * partition by Prefix length ,can be used in String partition - * - * @author hexiaobin - */ -public class PartitionByPrefixPattern extends AbstractPartitionAlgorithm implements RuleAlgorithm { - private static final int PARTITION_LENGTH = 1024; - private int patternValue = PARTITION_LENGTH;// 分区长度,取模数值(默认为1024) - private int prefixLength;// 字符前几位进行ASCII码取和 - private LongRange[] longRongs; - - @Override - public void init() { - initialize(); - } - public void setPatternValue(int patternValue) { - this.patternValue = patternValue; - } - - public void setPrefixLength(int prefixLength) { - this.prefixLength = prefixLength; - } - - @Override - public Integer calculate(String columnValue) { - int pattern = Integer.valueOf(patternValue); - int Length = Integer.valueOf(prefixLength); - - Length = columnValue.length() < Length ? columnValue.length() : Length; - int sum = 0; - for (int i = 0; i < Length; i++) { - sum = sum + columnValue.charAt(i); - } - Integer rst = null; - for (LongRange longRang : this.longRongs) { - long hash = sum % patternValue; - if (hash <= longRang.valueEnd && hash >= longRang.valueStart) { - return longRang.nodeIndx; - } - } - return rst; - } - - private void initialize() { - if (this.getConfig().isEmpty()) { - throw new RuntimeException("can't find PrefixPattern config, like 0 "); - } - longRongs = new LongRange[this.getConfig().size()]; - Set keys = this.getConfig().keySet(); - int i=0; - for(String key : keys){ - String pairs[] = key.trim().split("-"); - long longStart = NumberParseUtil.parseLong(pairs[0].trim()); - long longEnd = NumberParseUtil.parseLong(pairs[1].trim()); - int nodeId = Integer.parseInt((String)this.getConfig().get(key)); - longRongs[i] = new LongRange(nodeId, longStart, longEnd); - i++; - } - } - - static class LongRange { - public final int nodeIndx; - public final long valueStart; - public final long valueEnd; - - public LongRange(int nodeIndx, long valueStart, long valueEnd) { - super(); - this.nodeIndx = nodeIndx; - this.valueStart = valueStart; - this.valueEnd = valueEnd; - } - - } +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.route.function; + +import java.io.BufferedReader; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.Set; + +import io.mycat.config.model.rule.RuleAlgorithm; +import io.mycat.route.function.AutoPartitionByLong.LongRange; + +/** + * partition by Prefix length ,can be used in String partition + * + * @author hexiaobin + */ +public class PartitionByPrefixPattern extends AbstractPartitionAlgorithm implements RuleAlgorithm { + private static final int PARTITION_LENGTH = 1024; + private int patternValue = PARTITION_LENGTH;// 分区长度,取模数值(默认为1024) + private int prefixLength;// 字符前几位进行ASCII码取和 + private String mapFile; + private LongRange[] longRongs; + + @Override + public void init() { + + initialize(); + } + + public void setMapFile(String mapFile) { + this.mapFile = mapFile; + } + + public void setPatternValue(int patternValue) { + this.patternValue = patternValue; + } + + public void setPrefixLength(int prefixLength) { + this.prefixLength = prefixLength; + } + + @Override + public Integer calculate(String columnValue) { + try { + int Length = Integer.valueOf(prefixLength); + + Length = columnValue.length() < Length ? columnValue.length() : Length; + int sum = 0; + for (int i = 0; i < Length; i++) { + sum = sum + columnValue.charAt(i); + } + Integer rst = null; + for (LongRange longRang : this.longRongs) { + long hash = sum % patternValue; + if (hash <= longRang.valueEnd && hash >= longRang.valueStart) { + return longRang.nodeIndx; + } + } + return rst; + } catch (NumberFormatException e){ + throw new IllegalArgumentException(new StringBuilder().append("columnValue:").append(columnValue).append(" Please eliminate any quote and non number within it.").toString(),e); + } + } + + @Override + public int getPartitionNum() { +// int nPartition = this.longRongs.length; + /* + * fix #1284 这里的统计应该统计Range的nodeIndex的distinct总数 + */ + Set distNodeIdxSet = new HashSet(); + for(LongRange range : longRongs) { + distNodeIdxSet.add(range.nodeIndx); + } + int nPartition = distNodeIdxSet.size(); + return nPartition; + } + + private void initialize() { + BufferedReader in = null; + try { + // FileInputStream fin = new FileInputStream(new File(fileMapPath)); + InputStream fin = this.getClass().getClassLoader() + .getResourceAsStream(mapFile); + if (fin == null) { + throw new RuntimeException("can't find class resource file " + + mapFile); + } + in = new BufferedReader(new InputStreamReader(fin)); + LinkedList longRangeList = new LinkedList(); + + for (String line = null; (line = in.readLine()) != null;) { + line = line.trim(); + if (line.startsWith("#") || line.startsWith("//")) { + continue; + } + int ind = line.indexOf('='); + if (ind < 0) { + System.out.println(" warn: bad line int " + mapFile + " :" + + line); + continue; + } + String pairs[] = line.substring(0, ind).trim().split("-"); + long longStart = NumberParseUtil.parseLong(pairs[0].trim()); + long longEnd = NumberParseUtil.parseLong(pairs[1].trim()); + int nodeId = Integer.parseInt(line.substring(ind + 1) + .trim()); + longRangeList + .add(new LongRange(nodeId, longStart, longEnd)); + + } + longRongs = longRangeList.toArray(new LongRange[longRangeList + .size()]); + } catch (Exception e) { + if (e instanceof RuntimeException) { + throw (RuntimeException) e; + } else { + throw new RuntimeException(e); + } + + } finally { + try { + in.close(); + } catch (Exception e2) { + } + } + } + + static class LongRange { + public final int nodeIndx; + public final long valueStart; + public final long valueEnd; + + public LongRange(int nodeIndx, long valueStart, long valueEnd) { + super(); + this.nodeIndx = nodeIndx; + this.valueStart = valueStart; + this.valueEnd = valueEnd; + } + + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/route/function/PartitionByRangeDateHash.java b/src/main/java/io/mycat/route/function/PartitionByRangeDateHash.java index cd751422d..62f456918 100644 --- a/src/main/java/io/mycat/route/function/PartitionByRangeDateHash.java +++ b/src/main/java/io/mycat/route/function/PartitionByRangeDateHash.java @@ -1,8 +1,10 @@ package io.mycat.route.function; import com.google.common.hash.Hashing; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import io.mycat.config.model.rule.RuleAlgorithm; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.text.ParseException; import java.text.SimpleDateFormat; @@ -31,6 +33,8 @@ public class PartitionByRangeDateHash extends AbstractPartitionAlgorithm impleme private String groupPartionSize; private int intGroupPartionSize; + private ThreadLocal formatter; + @Override public void init() { @@ -39,6 +43,12 @@ public void init() beginDate = new SimpleDateFormat(dateFormat).parse(sBeginDate) .getTime(); intGroupPartionSize = Integer.parseInt(groupPartionSize); + formatter = new ThreadLocal() { + @Override + protected SimpleDateFormat initialValue() { + return new SimpleDateFormat(dateFormat); + } + }; if (intGroupPartionSize <= 0) { throw new RuntimeException("groupPartionSize must >0,but cur is " + intGroupPartionSize); @@ -51,11 +61,10 @@ public void init() } @Override - public Integer calculate(String columnValue) - { + public Integer calculate(String columnValue) { try { - long targetTime = new SimpleDateFormat(dateFormat).parse( + long targetTime = formatter.get().parse( columnValue).getTime(); int targetPartition = (int) ((targetTime - beginDate) / partionTime); int innerIndex = Hashing.consistentHash(targetTime,intGroupPartionSize); @@ -63,8 +72,7 @@ public Integer calculate(String columnValue) } catch (ParseException e) { - throw new IllegalArgumentException(e); - + throw new IllegalArgumentException(new StringBuilder().append("columnValue:").append(columnValue).append(" Please check if the format satisfied.").toString(),e); } } diff --git a/src/main/java/io/mycat/route/function/PartitionByRangeMod.java b/src/main/java/io/mycat/route/function/PartitionByRangeMod.java index 9b220d348..d29e44c9a 100644 --- a/src/main/java/io/mycat/route/function/PartitionByRangeMod.java +++ b/src/main/java/io/mycat/route/function/PartitionByRangeMod.java @@ -2,8 +2,8 @@ * Copyright (c) 2015, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the * terms of the GNU General Public License version 2 only, as published by the * Free Software Foundation. * @@ -16,57 +16,80 @@ * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address + * + * Any questions about this component can be directed to it's project Web address * https://github.com/MyCATApache/Mycat-Server. * */ package io.mycat.route.function; - +import java.io.BufferedReader; +import java.io.InputStream; +import java.io.InputStreamReader; import java.math.BigInteger; -import java.util.Set; +import java.util.LinkedList; + +import io.mycat.config.model.rule.RuleAlgorithm; /** * 先进行范围分片计算出分片组,组内再取模 * 优点可以避免扩容时的数据迁移,又可以一定程度上避免范围分片的热点问题 - * + * * @author wuzhi */ public class PartitionByRangeMod extends AbstractPartitionAlgorithm implements RuleAlgorithm{ + private String mapFile; private LongRange[] longRanges; + private int defaultNode = -1; - @Override public void init() { + initialize(); } + public void setMapFile(String mapFile) { + this.mapFile = mapFile; + } + @Override - public Integer calculate(String columnValue) { - long value = Long.valueOf(columnValue); - Integer rst = null; - int nodeIndex=0; - for (LongRange longRang : this.longRanges) { - if (value <= longRang.valueEnd && value >= longRang.valueStart) { - BigInteger bigNum = new BigInteger(columnValue).abs(); - int innerIndex= (bigNum.mod(BigInteger.valueOf(longRang.groupSize))).intValue(); - return nodeIndex+innerIndex; - } else - { - nodeIndex+= longRang.groupSize; - } + public Integer calculate(String columnValue) { +// columnValue = NumberParseUtil.eliminateQoute(columnValue); + try { + long value = Long.parseLong(columnValue); + Integer rst = null; + int nodeIndex = 0; + for (LongRange longRang : this.longRanges) { + if (value <= longRang.valueEnd && value >= longRang.valueStart) { + BigInteger bigNum = new BigInteger(columnValue).abs(); + int innerIndex = (bigNum.mod(BigInteger.valueOf(longRang.groupSize))).intValue(); + return nodeIndex + innerIndex; + } else { + nodeIndex += longRang.groupSize; + } + } + //数据超过范围,暂时使用配置的默认节点 + if (rst == null && defaultNode >= 0) { + return defaultNode; + } + return rst; + } catch (NumberFormatException e) { + throw new IllegalArgumentException(new StringBuilder().append("columnValue:").append(columnValue).append(" Please eliminate any quote and non number within it.").toString(), e); } - //数据超过范围,暂时使用配置的默认节点 - if(rst ==null && defaultNode>=0){ - return defaultNode ; + } + + @Override + public int getPartitionNum() { + int nPartition = 0; + for(LongRange longRange : this.longRanges) { + nPartition += longRange.groupSize; } - return rst; + return nPartition; } - public Integer calculateStart(String columnValue) { - long value = Long.valueOf(columnValue); + public Integer calculateStart(String columnValue) { + long value = Long.parseLong(columnValue); Integer rst = null; int nodeIndex=0; for (LongRange longRang : this.longRanges) { @@ -85,7 +108,7 @@ public Integer calculateStart(String columnValue) { return rst; } public Integer calculateEnd(String columnValue) { - long value = Long.valueOf(columnValue); + long value = Long.parseLong(columnValue); Integer rst = null; int nodeIndex=0; for (LongRange longRang : this.longRanges) { @@ -103,7 +126,7 @@ public Integer calculateEnd(String columnValue) { } return rst; } - + @Override public Integer[] calculateRange(String beginValue, String endValue) { Integer begin = 0, end = 0; @@ -131,22 +154,53 @@ public Integer[] calculateRange(String beginValue, String endValue) { private void initialize() { - if (this.getConfig().isEmpty()) { - throw new RuntimeException("can't find PartitionByRangeMod config, like 0 "); - } - longRanges = new LongRange[this.getConfig().size()]; - Set keys = this.getConfig().keySet(); - int i=0; - for(String key : keys){ - String pairs[] = key.trim().split("-"); - long longStart = NumberParseUtil.parseLong(pairs[0].trim()); - long longEnd = NumberParseUtil.parseLong(pairs[1].trim()); - int nodeId = Integer.parseInt((String)this.getConfig().get(key)); - longRanges[i] = new LongRange(nodeId, longStart, longEnd); - i++; + BufferedReader in = null; + try { + InputStream fin = this.getClass().getClassLoader() + .getResourceAsStream(mapFile); + if (fin == null) { + throw new RuntimeException("can't find class resource file " + + mapFile); + } + in = new BufferedReader(new InputStreamReader(fin)); + LinkedList longRangeList = new LinkedList(); + + for (String line = null; (line = in.readLine()) != null;) { + line = line.trim(); + if (line.startsWith("#") || line.startsWith("//")) { + continue; + } + int ind = line.indexOf('='); + if (ind < 0) { + System.out.println(" warn: bad line int " + mapFile + " :" + + line); + continue; + } + String pairs[] = line.substring(0, ind).trim().split("-"); + long longStart = NumberParseUtil.parseLong(pairs[0].trim()); + long longEnd = NumberParseUtil.parseLong(pairs[1].trim()); + int nodeId = Integer.parseInt(line.substring(ind + 1) + .trim()); + longRangeList + .add(new LongRange(nodeId, longStart, longEnd)); + + } + longRanges = longRangeList.toArray(new LongRange[longRangeList.size()]); + } catch (Exception e) { + if (e instanceof RuntimeException) { + throw (RuntimeException) e; + } else { + throw new RuntimeException(e); + } + + } finally { + try { + in.close(); + } catch (Exception e2) { + } } } - + public int getDefaultNode() { return defaultNode; } diff --git a/src/main/java/io/mycat/route/function/PartitionByString.java b/src/main/java/io/mycat/route/function/PartitionByString.java index ae8c2520e..7767a261b 100644 --- a/src/main/java/io/mycat/route/function/PartitionByString.java +++ b/src/main/java/io/mycat/route/function/PartitionByString.java @@ -1,117 +1,127 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.route.function; - -import io.mycat.route.util.PartitionUtil; -import io.mycat.util.Pair; -import io.mycat.util.StringUtil; - -/** - * @author
yangwenx - */ -public final class PartitionByString extends AbstractPartitionAlgorithm implements RuleAlgorithm { - - private int hashSliceStart = 0; - /** 0 means str.length(), -1 means str.length()-1 */ - private int hashSliceEnd = 8; - protected int[] count; - protected int[] length; - protected PartitionUtil partitionUtil; - - public void setPartitionCount(String partitionCount) { - this.count = toIntArray(partitionCount); - } - - public void setPartitionLength(String partitionLength) { - this.length = toIntArray(partitionLength); - } - - - public void setHashLength(int hashLength) { - setHashSlice(String.valueOf(hashLength)); - } - - public void setHashSlice(String hashSlice) { - Pair p = sequenceSlicing(hashSlice); - hashSliceStart = p.getKey(); - hashSliceEnd = p.getValue(); - } - - - /** - * "2" -> (0,2)
- * "1:2" -> (1,2)
- * "1:" -> (1,0)
- * "-1:" -> (-1,0)
- * ":-1" -> (0,-1)
- * ":" -> (0,0)
- */ - public static Pair sequenceSlicing(String slice) { - int ind = slice.indexOf(':'); - if (ind < 0) { - int i = Integer.parseInt(slice.trim()); - if (i >= 0) { - return new Pair(0, i); - } else { - return new Pair(i, 0); - } - } - String left = slice.substring(0, ind).trim(); - String right = slice.substring(1 + ind).trim(); - int start, end; - if (left.length() <= 0) { - start = 0; - } else { - start = Integer.parseInt(left); - } - if (right.length() <= 0) { - end = 0; - } else { - end = Integer.parseInt(right); - } - return new Pair(start, end); - } - - @Override - public void init() { - partitionUtil = new PartitionUtil(count,length); - - } - private static int[] toIntArray(String string) { - String[] strs = io.mycat.util.SplitUtil.split(string, ',', true); - int[] ints = new int[strs.length]; - for (int i = 0; i < strs.length; ++i) { - ints[i] = Integer.parseInt(strs[i]); - } - return ints; - } - @Override - public Integer calculate(String key) { - int start = hashSliceStart >= 0 ? hashSliceStart : key.length() + hashSliceStart; - int end = hashSliceEnd > 0 ? hashSliceEnd : key.length() + hashSliceEnd; - long hash = StringUtil.hash(key, start, end); - return partitionUtil.partition(hash); - } - +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.route.function; + +import io.mycat.config.model.rule.RuleAlgorithm; +import io.mycat.route.parser.util.Pair; +import io.mycat.route.util.PartitionUtil; +import io.mycat.util.StringUtil; + +/** + * @author yangwenx + */ +public final class PartitionByString extends AbstractPartitionAlgorithm implements RuleAlgorithm { + + private int hashSliceStart = 0; + /** 0 means str.length(), -1 means str.length()-1 */ + private int hashSliceEnd = 8; + protected int[] count; + protected int[] length; + protected PartitionUtil partitionUtil; + + public void setPartitionCount(String partitionCount) { + this.count = toIntArray(partitionCount); + } + + public void setPartitionLength(String partitionLength) { + this.length = toIntArray(partitionLength); + } + + + public void setHashLength(int hashLength) { + setHashSlice(String.valueOf(hashLength)); + } + + public void setHashSlice(String hashSlice) { + Pair p = sequenceSlicing(hashSlice); + hashSliceStart = p.getKey(); + hashSliceEnd = p.getValue(); + } + + + /** + * "2" -> (0,2)
+ * "1:2" -> (1,2)
+ * "1:" -> (1,0)
+ * "-1:" -> (-1,0)
+ * ":-1" -> (0,-1)
+ * ":" -> (0,0)
+ */ + public static Pair sequenceSlicing(String slice) { + int ind = slice.indexOf(':'); + if (ind < 0) { + int i = Integer.parseInt(slice.trim()); + if (i >= 0) { + return new Pair(0, i); + } else { + return new Pair(i, 0); + } + } + String left = slice.substring(0, ind).trim(); + String right = slice.substring(1 + ind).trim(); + int start, end; + if (left.length() <= 0) { + start = 0; + } else { + start = Integer.parseInt(left); + } + if (right.length() <= 0) { + end = 0; + } else { + end = Integer.parseInt(right); + } + return new Pair(start, end); + } + + @Override + public void init() { + partitionUtil = new PartitionUtil(count,length); + + } + private static int[] toIntArray(String string) { + String[] strs = io.mycat.util.SplitUtil.split(string, ',', true); + int[] ints = new int[strs.length]; + for (int i = 0; i < strs.length; ++i) { + ints[i] = Integer.parseInt(strs[i]); + } + return ints; + } + @Override + public Integer calculate(String key) { + int start = hashSliceStart >= 0 ? hashSliceStart : key.length() + hashSliceStart; + int end = hashSliceEnd > 0 ? hashSliceEnd : key.length() + hashSliceEnd; + long hash = StringUtil.hash(key, start, end); + return partitionUtil.partition(hash); + } + + @Override + public int getPartitionNum() { + int nPartition = 0; + for(int i = 0; i < count.length; i++) { + nPartition += count[i]; + } + return nPartition; + } + } \ No newline at end of file diff --git a/src/main/java/io/mycat/route/function/PartitionByVelocity.java b/src/main/java/io/mycat/route/function/PartitionByVelocity.java deleted file mode 100644 index f9180eccd..000000000 --- a/src/main/java/io/mycat/route/function/PartitionByVelocity.java +++ /dev/null @@ -1,45 +0,0 @@ -package io.mycat.route.function; - -import io.mycat.route.util.VelocityUtil; - - -/** - * 根据Velocity模板语言,分库分表规则更加灵活,例如一共100个分库,字段中包含时间信息,取时间的月份与天,hashCode再对100取余 - * - * id - * - - * - * @author yan.yan@huawei.com - */ -public class PartitionByVelocity extends AbstractPartitionAlgorithm implements RuleAlgorithm { - //分片字段名 - private String columnName; - //规则 - private String rule; - - - public void setColumnName(String str) { - columnName = str; - } - - public void setRule(String str) { - rule = str; - } - - @Override - public void init() { - - } - - @Override - public Integer calculate(String columnValue) { - String partitionSubString = VelocityUtil.evalDBRule(columnName, columnValue, rule); - int partition = Integer.parseInt(partitionSubString); - return partition; - } -} diff --git a/src/main/java/io/mycat/route/function/PartitionDirectBySubString.java b/src/main/java/io/mycat/route/function/PartitionDirectBySubString.java index 4b13a9a70..f34f96db3 100644 --- a/src/main/java/io/mycat/route/function/PartitionDirectBySubString.java +++ b/src/main/java/io/mycat/route/function/PartitionDirectBySubString.java @@ -1,51 +1,59 @@ -package io.mycat.route.function; - - -/** - * 直接根据字符子串(必须是数字)计算分区号(由应用传递参数,显式指定分区号)。 - * - * 9 - * 2 - * 8 - * 0 - * - */ -public class PartitionDirectBySubString extends AbstractPartitionAlgorithm implements RuleAlgorithm { - // 字符子串起始索引(zero-based) - private int startIndex; - // 字串长度 - private int size; - // 分区数量 - private int partitionCount; - // 默认分区(在分区数量定义时,字串标示的分区编号不在分区数量内时,使用默认分区) - private int defaultPartition; - - public void setStartIndex(String str) { - startIndex = Integer.parseInt(str); - } - - public void setSize(String str) { - size = Integer.parseInt(str); - } - - public void setPartitionCount(String partitionCount) { - this.partitionCount = Integer.parseInt(partitionCount); - } - - public void setDefaultPartition(String defaultPartition) { - this.defaultPartition = Integer.parseInt(defaultPartition); - } - - @Override - public void init() { - - } - - @Override - public Integer calculate(String columnValue) { - String partitionSubString = columnValue.substring(startIndex, startIndex + size); - int partition = Integer.parseInt(partitionSubString, 10); - return partitionCount > 0 && partition >= partitionCount - ? defaultPartition : partition; - } -} +package io.mycat.route.function; + +import io.mycat.config.model.rule.RuleAlgorithm; + +/** + * 直接根据字符子串(必须是数字)计算分区号(由应用传递参数,显式指定分区号)。 + * + * 9 + * 2 + * 8 + * 0 + * + */ +public class PartitionDirectBySubString extends AbstractPartitionAlgorithm implements RuleAlgorithm { + // 字符子串起始索引(zero-based) + private int startIndex; + // 字串长度 + private int size; + // 分区数量 + private int partitionCount; + // 默认分区(在分区数量定义时,字串标示的分区编号不在分区数量内时,使用默认分区) + private int defaultPartition; + + public void setStartIndex(String str) { + startIndex = Integer.parseInt(str); + } + + public void setSize(String str) { + size = Integer.parseInt(str); + } + + public void setPartitionCount(String partitionCount) { + this.partitionCount = Integer.parseInt(partitionCount); + } + + public void setDefaultPartition(String defaultPartition) { + this.defaultPartition = Integer.parseInt(defaultPartition); + } + + @Override + public void init() { + + } + + @Override + public Integer calculate(String columnValue) { + String partitionSubString = columnValue.substring(startIndex, startIndex + size); + int partition = Integer.parseInt(partitionSubString, 10); + return partitionCount > 0 && partition >= partitionCount + ? defaultPartition : partition; + } + + @Override + public int getPartitionNum() { + int nPartition = this.partitionCount; + return nPartition; + } + +} diff --git a/src/main/java/io/mycat/route/function/PureJavaCrc32.java b/src/main/java/io/mycat/route/function/PureJavaCrc32.java new file mode 100644 index 000000000..0f663a9f9 --- /dev/null +++ b/src/main/java/io/mycat/route/function/PureJavaCrc32.java @@ -0,0 +1,667 @@ +package io.mycat.route.function; + +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + import java.util.HashSet; + import java.util.Set; + import java.util.zip.Checksum; + +/** + * A pure-java implementation of the CRC32 checksum that uses + * the same polynomial as the built-in native CRC32. + * + * This is to avoid the JNI overhead for certain uses of Checksumming + * where many small pieces of data are checksummed in succession. + * + * The current version is ~10x to 1.8x as fast as Sun's native + * java.util.zip.CRC32 in Java 1.6 + * + * @see java.util.zip.CRC32 + * + * This class is copied from hadoop-commons project. + * (The initial patch added PureJavaCrc32 was HADOOP-6148) + */ +public class PureJavaCrc32 implements Checksum { + + /** the current CRC value, bit-flipped */ + private int crc; + + /** Create a new PureJavaCrc32 object. */ + public PureJavaCrc32() { + reset(); + } + + @Override + public long getValue() { + return (~crc) & 0xffffffffL; + } + + @Override + public void reset() { + crc = 0xffffffff; + } + + @Override + public void update(byte[] b, int off, int len) { + int localCrc = crc; + + while(len > 7) { + final int c0 =(b[off+0] ^ localCrc) & 0xff; + final int c1 =(b[off+1] ^ (localCrc >>>= 8)) & 0xff; + final int c2 =(b[off+2] ^ (localCrc >>>= 8)) & 0xff; + final int c3 =(b[off+3] ^ (localCrc >>>= 8)) & 0xff; + localCrc = (T[T8_7_start + c0] ^ T[T8_6_start + c1]) + ^ (T[T8_5_start + c2] ^ T[T8_4_start + c3]); + + final int c4 = b[off+4] & 0xff; + final int c5 = b[off+5] & 0xff; + final int c6 = b[off+6] & 0xff; + final int c7 = b[off+7] & 0xff; + + localCrc ^= (T[T8_3_start + c4] ^ T[T8_2_start + c5]) + ^ (T[T8_1_start + c6] ^ T[T8_0_start + c7]); + + off += 8; + len -= 8; + } + + /* loop unroll - duff's device style */ + switch(len) { + case 7: localCrc = (localCrc >>> 8) ^ T[T8_0_start + ((localCrc ^ b[off++]) & 0xff)]; + case 6: localCrc = (localCrc >>> 8) ^ T[T8_0_start + ((localCrc ^ b[off++]) & 0xff)]; + case 5: localCrc = (localCrc >>> 8) ^ T[T8_0_start + ((localCrc ^ b[off++]) & 0xff)]; + case 4: localCrc = (localCrc >>> 8) ^ T[T8_0_start + ((localCrc ^ b[off++]) & 0xff)]; + case 3: localCrc = (localCrc >>> 8) ^ T[T8_0_start + ((localCrc ^ b[off++]) & 0xff)]; + case 2: localCrc = (localCrc >>> 8) ^ T[T8_0_start + ((localCrc ^ b[off++]) & 0xff)]; + case 1: localCrc = (localCrc >>> 8) ^ T[T8_0_start + ((localCrc ^ b[off++]) & 0xff)]; + default: + /* nothing */ + } + + // Publish crc out to object + crc = localCrc; + } + + @Override + final public void update(int b) { + crc = (crc >>> 8) ^ T[T8_0_start + ((crc ^ b) & 0xff)]; + } + + /* + * CRC-32 lookup tables generated by the polynomial 0xEDB88320. + * See also TestPureJavaCrc32.Table. + */ + private static final int T8_0_start = 0*256; + private static final int T8_1_start = 1*256; + private static final int T8_2_start = 2*256; + private static final int T8_3_start = 3*256; + private static final int T8_4_start = 4*256; + private static final int T8_5_start = 5*256; + private static final int T8_6_start = 6*256; + private static final int T8_7_start = 7*256; + + private static final int[] T = new int[] { + /* T8_0 */ + 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, + 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, + 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, + 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, + 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, + 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, + 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, + 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, + 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, + 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, + 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, + 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, + 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, + 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, + 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, + 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, + 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, + 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, + 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, + 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, + 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, + 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, + 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, + 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, + 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, + 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, + 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, + 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, + 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, + 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, + 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, + 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, + 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, + 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, + 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, + 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, + 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, + 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, + 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, + 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, + 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, + 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, + 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, + 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, + 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, + 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, + 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, + 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, + 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, + 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, + 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, + 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, + 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, + 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, + 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, + 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, + 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, + 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, + 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, + 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, + 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, + 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, + 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, + 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D, + /* T8_1 */ + 0x00000000, 0x191B3141, 0x32366282, 0x2B2D53C3, + 0x646CC504, 0x7D77F445, 0x565AA786, 0x4F4196C7, + 0xC8D98A08, 0xD1C2BB49, 0xFAEFE88A, 0xE3F4D9CB, + 0xACB54F0C, 0xB5AE7E4D, 0x9E832D8E, 0x87981CCF, + 0x4AC21251, 0x53D92310, 0x78F470D3, 0x61EF4192, + 0x2EAED755, 0x37B5E614, 0x1C98B5D7, 0x05838496, + 0x821B9859, 0x9B00A918, 0xB02DFADB, 0xA936CB9A, + 0xE6775D5D, 0xFF6C6C1C, 0xD4413FDF, 0xCD5A0E9E, + 0x958424A2, 0x8C9F15E3, 0xA7B24620, 0xBEA97761, + 0xF1E8E1A6, 0xE8F3D0E7, 0xC3DE8324, 0xDAC5B265, + 0x5D5DAEAA, 0x44469FEB, 0x6F6BCC28, 0x7670FD69, + 0x39316BAE, 0x202A5AEF, 0x0B07092C, 0x121C386D, + 0xDF4636F3, 0xC65D07B2, 0xED705471, 0xF46B6530, + 0xBB2AF3F7, 0xA231C2B6, 0x891C9175, 0x9007A034, + 0x179FBCFB, 0x0E848DBA, 0x25A9DE79, 0x3CB2EF38, + 0x73F379FF, 0x6AE848BE, 0x41C51B7D, 0x58DE2A3C, + 0xF0794F05, 0xE9627E44, 0xC24F2D87, 0xDB541CC6, + 0x94158A01, 0x8D0EBB40, 0xA623E883, 0xBF38D9C2, + 0x38A0C50D, 0x21BBF44C, 0x0A96A78F, 0x138D96CE, + 0x5CCC0009, 0x45D73148, 0x6EFA628B, 0x77E153CA, + 0xBABB5D54, 0xA3A06C15, 0x888D3FD6, 0x91960E97, + 0xDED79850, 0xC7CCA911, 0xECE1FAD2, 0xF5FACB93, + 0x7262D75C, 0x6B79E61D, 0x4054B5DE, 0x594F849F, + 0x160E1258, 0x0F152319, 0x243870DA, 0x3D23419B, + 0x65FD6BA7, 0x7CE65AE6, 0x57CB0925, 0x4ED03864, + 0x0191AEA3, 0x188A9FE2, 0x33A7CC21, 0x2ABCFD60, + 0xAD24E1AF, 0xB43FD0EE, 0x9F12832D, 0x8609B26C, + 0xC94824AB, 0xD05315EA, 0xFB7E4629, 0xE2657768, + 0x2F3F79F6, 0x362448B7, 0x1D091B74, 0x04122A35, + 0x4B53BCF2, 0x52488DB3, 0x7965DE70, 0x607EEF31, + 0xE7E6F3FE, 0xFEFDC2BF, 0xD5D0917C, 0xCCCBA03D, + 0x838A36FA, 0x9A9107BB, 0xB1BC5478, 0xA8A76539, + 0x3B83984B, 0x2298A90A, 0x09B5FAC9, 0x10AECB88, + 0x5FEF5D4F, 0x46F46C0E, 0x6DD93FCD, 0x74C20E8C, + 0xF35A1243, 0xEA412302, 0xC16C70C1, 0xD8774180, + 0x9736D747, 0x8E2DE606, 0xA500B5C5, 0xBC1B8484, + 0x71418A1A, 0x685ABB5B, 0x4377E898, 0x5A6CD9D9, + 0x152D4F1E, 0x0C367E5F, 0x271B2D9C, 0x3E001CDD, + 0xB9980012, 0xA0833153, 0x8BAE6290, 0x92B553D1, + 0xDDF4C516, 0xC4EFF457, 0xEFC2A794, 0xF6D996D5, + 0xAE07BCE9, 0xB71C8DA8, 0x9C31DE6B, 0x852AEF2A, + 0xCA6B79ED, 0xD37048AC, 0xF85D1B6F, 0xE1462A2E, + 0x66DE36E1, 0x7FC507A0, 0x54E85463, 0x4DF36522, + 0x02B2F3E5, 0x1BA9C2A4, 0x30849167, 0x299FA026, + 0xE4C5AEB8, 0xFDDE9FF9, 0xD6F3CC3A, 0xCFE8FD7B, + 0x80A96BBC, 0x99B25AFD, 0xB29F093E, 0xAB84387F, + 0x2C1C24B0, 0x350715F1, 0x1E2A4632, 0x07317773, + 0x4870E1B4, 0x516BD0F5, 0x7A468336, 0x635DB277, + 0xCBFAD74E, 0xD2E1E60F, 0xF9CCB5CC, 0xE0D7848D, + 0xAF96124A, 0xB68D230B, 0x9DA070C8, 0x84BB4189, + 0x03235D46, 0x1A386C07, 0x31153FC4, 0x280E0E85, + 0x674F9842, 0x7E54A903, 0x5579FAC0, 0x4C62CB81, + 0x8138C51F, 0x9823F45E, 0xB30EA79D, 0xAA1596DC, + 0xE554001B, 0xFC4F315A, 0xD7626299, 0xCE7953D8, + 0x49E14F17, 0x50FA7E56, 0x7BD72D95, 0x62CC1CD4, + 0x2D8D8A13, 0x3496BB52, 0x1FBBE891, 0x06A0D9D0, + 0x5E7EF3EC, 0x4765C2AD, 0x6C48916E, 0x7553A02F, + 0x3A1236E8, 0x230907A9, 0x0824546A, 0x113F652B, + 0x96A779E4, 0x8FBC48A5, 0xA4911B66, 0xBD8A2A27, + 0xF2CBBCE0, 0xEBD08DA1, 0xC0FDDE62, 0xD9E6EF23, + 0x14BCE1BD, 0x0DA7D0FC, 0x268A833F, 0x3F91B27E, + 0x70D024B9, 0x69CB15F8, 0x42E6463B, 0x5BFD777A, + 0xDC656BB5, 0xC57E5AF4, 0xEE530937, 0xF7483876, + 0xB809AEB1, 0xA1129FF0, 0x8A3FCC33, 0x9324FD72, + /* T8_2 */ + 0x00000000, 0x01C26A37, 0x0384D46E, 0x0246BE59, + 0x0709A8DC, 0x06CBC2EB, 0x048D7CB2, 0x054F1685, + 0x0E1351B8, 0x0FD13B8F, 0x0D9785D6, 0x0C55EFE1, + 0x091AF964, 0x08D89353, 0x0A9E2D0A, 0x0B5C473D, + 0x1C26A370, 0x1DE4C947, 0x1FA2771E, 0x1E601D29, + 0x1B2F0BAC, 0x1AED619B, 0x18ABDFC2, 0x1969B5F5, + 0x1235F2C8, 0x13F798FF, 0x11B126A6, 0x10734C91, + 0x153C5A14, 0x14FE3023, 0x16B88E7A, 0x177AE44D, + 0x384D46E0, 0x398F2CD7, 0x3BC9928E, 0x3A0BF8B9, + 0x3F44EE3C, 0x3E86840B, 0x3CC03A52, 0x3D025065, + 0x365E1758, 0x379C7D6F, 0x35DAC336, 0x3418A901, + 0x3157BF84, 0x3095D5B3, 0x32D36BEA, 0x331101DD, + 0x246BE590, 0x25A98FA7, 0x27EF31FE, 0x262D5BC9, + 0x23624D4C, 0x22A0277B, 0x20E69922, 0x2124F315, + 0x2A78B428, 0x2BBADE1F, 0x29FC6046, 0x283E0A71, + 0x2D711CF4, 0x2CB376C3, 0x2EF5C89A, 0x2F37A2AD, + 0x709A8DC0, 0x7158E7F7, 0x731E59AE, 0x72DC3399, + 0x7793251C, 0x76514F2B, 0x7417F172, 0x75D59B45, + 0x7E89DC78, 0x7F4BB64F, 0x7D0D0816, 0x7CCF6221, + 0x798074A4, 0x78421E93, 0x7A04A0CA, 0x7BC6CAFD, + 0x6CBC2EB0, 0x6D7E4487, 0x6F38FADE, 0x6EFA90E9, + 0x6BB5866C, 0x6A77EC5B, 0x68315202, 0x69F33835, + 0x62AF7F08, 0x636D153F, 0x612BAB66, 0x60E9C151, + 0x65A6D7D4, 0x6464BDE3, 0x662203BA, 0x67E0698D, + 0x48D7CB20, 0x4915A117, 0x4B531F4E, 0x4A917579, + 0x4FDE63FC, 0x4E1C09CB, 0x4C5AB792, 0x4D98DDA5, + 0x46C49A98, 0x4706F0AF, 0x45404EF6, 0x448224C1, + 0x41CD3244, 0x400F5873, 0x4249E62A, 0x438B8C1D, + 0x54F16850, 0x55330267, 0x5775BC3E, 0x56B7D609, + 0x53F8C08C, 0x523AAABB, 0x507C14E2, 0x51BE7ED5, + 0x5AE239E8, 0x5B2053DF, 0x5966ED86, 0x58A487B1, + 0x5DEB9134, 0x5C29FB03, 0x5E6F455A, 0x5FAD2F6D, + 0xE1351B80, 0xE0F771B7, 0xE2B1CFEE, 0xE373A5D9, + 0xE63CB35C, 0xE7FED96B, 0xE5B86732, 0xE47A0D05, + 0xEF264A38, 0xEEE4200F, 0xECA29E56, 0xED60F461, + 0xE82FE2E4, 0xE9ED88D3, 0xEBAB368A, 0xEA695CBD, + 0xFD13B8F0, 0xFCD1D2C7, 0xFE976C9E, 0xFF5506A9, + 0xFA1A102C, 0xFBD87A1B, 0xF99EC442, 0xF85CAE75, + 0xF300E948, 0xF2C2837F, 0xF0843D26, 0xF1465711, + 0xF4094194, 0xF5CB2BA3, 0xF78D95FA, 0xF64FFFCD, + 0xD9785D60, 0xD8BA3757, 0xDAFC890E, 0xDB3EE339, + 0xDE71F5BC, 0xDFB39F8B, 0xDDF521D2, 0xDC374BE5, + 0xD76B0CD8, 0xD6A966EF, 0xD4EFD8B6, 0xD52DB281, + 0xD062A404, 0xD1A0CE33, 0xD3E6706A, 0xD2241A5D, + 0xC55EFE10, 0xC49C9427, 0xC6DA2A7E, 0xC7184049, + 0xC25756CC, 0xC3953CFB, 0xC1D382A2, 0xC011E895, + 0xCB4DAFA8, 0xCA8FC59F, 0xC8C97BC6, 0xC90B11F1, + 0xCC440774, 0xCD866D43, 0xCFC0D31A, 0xCE02B92D, + 0x91AF9640, 0x906DFC77, 0x922B422E, 0x93E92819, + 0x96A63E9C, 0x976454AB, 0x9522EAF2, 0x94E080C5, + 0x9FBCC7F8, 0x9E7EADCF, 0x9C381396, 0x9DFA79A1, + 0x98B56F24, 0x99770513, 0x9B31BB4A, 0x9AF3D17D, + 0x8D893530, 0x8C4B5F07, 0x8E0DE15E, 0x8FCF8B69, + 0x8A809DEC, 0x8B42F7DB, 0x89044982, 0x88C623B5, + 0x839A6488, 0x82580EBF, 0x801EB0E6, 0x81DCDAD1, + 0x8493CC54, 0x8551A663, 0x8717183A, 0x86D5720D, + 0xA9E2D0A0, 0xA820BA97, 0xAA6604CE, 0xABA46EF9, + 0xAEEB787C, 0xAF29124B, 0xAD6FAC12, 0xACADC625, + 0xA7F18118, 0xA633EB2F, 0xA4755576, 0xA5B73F41, + 0xA0F829C4, 0xA13A43F3, 0xA37CFDAA, 0xA2BE979D, + 0xB5C473D0, 0xB40619E7, 0xB640A7BE, 0xB782CD89, + 0xB2CDDB0C, 0xB30FB13B, 0xB1490F62, 0xB08B6555, + 0xBBD72268, 0xBA15485F, 0xB853F606, 0xB9919C31, + 0xBCDE8AB4, 0xBD1CE083, 0xBF5A5EDA, 0xBE9834ED, + /* T8_3 */ + 0x00000000, 0xB8BC6765, 0xAA09C88B, 0x12B5AFEE, + 0x8F629757, 0x37DEF032, 0x256B5FDC, 0x9DD738B9, + 0xC5B428EF, 0x7D084F8A, 0x6FBDE064, 0xD7018701, + 0x4AD6BFB8, 0xF26AD8DD, 0xE0DF7733, 0x58631056, + 0x5019579F, 0xE8A530FA, 0xFA109F14, 0x42ACF871, + 0xDF7BC0C8, 0x67C7A7AD, 0x75720843, 0xCDCE6F26, + 0x95AD7F70, 0x2D111815, 0x3FA4B7FB, 0x8718D09E, + 0x1ACFE827, 0xA2738F42, 0xB0C620AC, 0x087A47C9, + 0xA032AF3E, 0x188EC85B, 0x0A3B67B5, 0xB28700D0, + 0x2F503869, 0x97EC5F0C, 0x8559F0E2, 0x3DE59787, + 0x658687D1, 0xDD3AE0B4, 0xCF8F4F5A, 0x7733283F, + 0xEAE41086, 0x525877E3, 0x40EDD80D, 0xF851BF68, + 0xF02BF8A1, 0x48979FC4, 0x5A22302A, 0xE29E574F, + 0x7F496FF6, 0xC7F50893, 0xD540A77D, 0x6DFCC018, + 0x359FD04E, 0x8D23B72B, 0x9F9618C5, 0x272A7FA0, + 0xBAFD4719, 0x0241207C, 0x10F48F92, 0xA848E8F7, + 0x9B14583D, 0x23A83F58, 0x311D90B6, 0x89A1F7D3, + 0x1476CF6A, 0xACCAA80F, 0xBE7F07E1, 0x06C36084, + 0x5EA070D2, 0xE61C17B7, 0xF4A9B859, 0x4C15DF3C, + 0xD1C2E785, 0x697E80E0, 0x7BCB2F0E, 0xC377486B, + 0xCB0D0FA2, 0x73B168C7, 0x6104C729, 0xD9B8A04C, + 0x446F98F5, 0xFCD3FF90, 0xEE66507E, 0x56DA371B, + 0x0EB9274D, 0xB6054028, 0xA4B0EFC6, 0x1C0C88A3, + 0x81DBB01A, 0x3967D77F, 0x2BD27891, 0x936E1FF4, + 0x3B26F703, 0x839A9066, 0x912F3F88, 0x299358ED, + 0xB4446054, 0x0CF80731, 0x1E4DA8DF, 0xA6F1CFBA, + 0xFE92DFEC, 0x462EB889, 0x549B1767, 0xEC277002, + 0x71F048BB, 0xC94C2FDE, 0xDBF98030, 0x6345E755, + 0x6B3FA09C, 0xD383C7F9, 0xC1366817, 0x798A0F72, + 0xE45D37CB, 0x5CE150AE, 0x4E54FF40, 0xF6E89825, + 0xAE8B8873, 0x1637EF16, 0x048240F8, 0xBC3E279D, + 0x21E91F24, 0x99557841, 0x8BE0D7AF, 0x335CB0CA, + 0xED59B63B, 0x55E5D15E, 0x47507EB0, 0xFFEC19D5, + 0x623B216C, 0xDA874609, 0xC832E9E7, 0x708E8E82, + 0x28ED9ED4, 0x9051F9B1, 0x82E4565F, 0x3A58313A, + 0xA78F0983, 0x1F336EE6, 0x0D86C108, 0xB53AA66D, + 0xBD40E1A4, 0x05FC86C1, 0x1749292F, 0xAFF54E4A, + 0x322276F3, 0x8A9E1196, 0x982BBE78, 0x2097D91D, + 0x78F4C94B, 0xC048AE2E, 0xD2FD01C0, 0x6A4166A5, + 0xF7965E1C, 0x4F2A3979, 0x5D9F9697, 0xE523F1F2, + 0x4D6B1905, 0xF5D77E60, 0xE762D18E, 0x5FDEB6EB, + 0xC2098E52, 0x7AB5E937, 0x680046D9, 0xD0BC21BC, + 0x88DF31EA, 0x3063568F, 0x22D6F961, 0x9A6A9E04, + 0x07BDA6BD, 0xBF01C1D8, 0xADB46E36, 0x15080953, + 0x1D724E9A, 0xA5CE29FF, 0xB77B8611, 0x0FC7E174, + 0x9210D9CD, 0x2AACBEA8, 0x38191146, 0x80A57623, + 0xD8C66675, 0x607A0110, 0x72CFAEFE, 0xCA73C99B, + 0x57A4F122, 0xEF189647, 0xFDAD39A9, 0x45115ECC, + 0x764DEE06, 0xCEF18963, 0xDC44268D, 0x64F841E8, + 0xF92F7951, 0x41931E34, 0x5326B1DA, 0xEB9AD6BF, + 0xB3F9C6E9, 0x0B45A18C, 0x19F00E62, 0xA14C6907, + 0x3C9B51BE, 0x842736DB, 0x96929935, 0x2E2EFE50, + 0x2654B999, 0x9EE8DEFC, 0x8C5D7112, 0x34E11677, + 0xA9362ECE, 0x118A49AB, 0x033FE645, 0xBB838120, + 0xE3E09176, 0x5B5CF613, 0x49E959FD, 0xF1553E98, + 0x6C820621, 0xD43E6144, 0xC68BCEAA, 0x7E37A9CF, + 0xD67F4138, 0x6EC3265D, 0x7C7689B3, 0xC4CAEED6, + 0x591DD66F, 0xE1A1B10A, 0xF3141EE4, 0x4BA87981, + 0x13CB69D7, 0xAB770EB2, 0xB9C2A15C, 0x017EC639, + 0x9CA9FE80, 0x241599E5, 0x36A0360B, 0x8E1C516E, + 0x866616A7, 0x3EDA71C2, 0x2C6FDE2C, 0x94D3B949, + 0x090481F0, 0xB1B8E695, 0xA30D497B, 0x1BB12E1E, + 0x43D23E48, 0xFB6E592D, 0xE9DBF6C3, 0x516791A6, + 0xCCB0A91F, 0x740CCE7A, 0x66B96194, 0xDE0506F1, + /* T8_4 */ + 0x00000000, 0x3D6029B0, 0x7AC05360, 0x47A07AD0, + 0xF580A6C0, 0xC8E08F70, 0x8F40F5A0, 0xB220DC10, + 0x30704BC1, 0x0D106271, 0x4AB018A1, 0x77D03111, + 0xC5F0ED01, 0xF890C4B1, 0xBF30BE61, 0x825097D1, + 0x60E09782, 0x5D80BE32, 0x1A20C4E2, 0x2740ED52, + 0x95603142, 0xA80018F2, 0xEFA06222, 0xD2C04B92, + 0x5090DC43, 0x6DF0F5F3, 0x2A508F23, 0x1730A693, + 0xA5107A83, 0x98705333, 0xDFD029E3, 0xE2B00053, + 0xC1C12F04, 0xFCA106B4, 0xBB017C64, 0x866155D4, + 0x344189C4, 0x0921A074, 0x4E81DAA4, 0x73E1F314, + 0xF1B164C5, 0xCCD14D75, 0x8B7137A5, 0xB6111E15, + 0x0431C205, 0x3951EBB5, 0x7EF19165, 0x4391B8D5, + 0xA121B886, 0x9C419136, 0xDBE1EBE6, 0xE681C256, + 0x54A11E46, 0x69C137F6, 0x2E614D26, 0x13016496, + 0x9151F347, 0xAC31DAF7, 0xEB91A027, 0xD6F18997, + 0x64D15587, 0x59B17C37, 0x1E1106E7, 0x23712F57, + 0x58F35849, 0x659371F9, 0x22330B29, 0x1F532299, + 0xAD73FE89, 0x9013D739, 0xD7B3ADE9, 0xEAD38459, + 0x68831388, 0x55E33A38, 0x124340E8, 0x2F236958, + 0x9D03B548, 0xA0639CF8, 0xE7C3E628, 0xDAA3CF98, + 0x3813CFCB, 0x0573E67B, 0x42D39CAB, 0x7FB3B51B, + 0xCD93690B, 0xF0F340BB, 0xB7533A6B, 0x8A3313DB, + 0x0863840A, 0x3503ADBA, 0x72A3D76A, 0x4FC3FEDA, + 0xFDE322CA, 0xC0830B7A, 0x872371AA, 0xBA43581A, + 0x9932774D, 0xA4525EFD, 0xE3F2242D, 0xDE920D9D, + 0x6CB2D18D, 0x51D2F83D, 0x167282ED, 0x2B12AB5D, + 0xA9423C8C, 0x9422153C, 0xD3826FEC, 0xEEE2465C, + 0x5CC29A4C, 0x61A2B3FC, 0x2602C92C, 0x1B62E09C, + 0xF9D2E0CF, 0xC4B2C97F, 0x8312B3AF, 0xBE729A1F, + 0x0C52460F, 0x31326FBF, 0x7692156F, 0x4BF23CDF, + 0xC9A2AB0E, 0xF4C282BE, 0xB362F86E, 0x8E02D1DE, + 0x3C220DCE, 0x0142247E, 0x46E25EAE, 0x7B82771E, + 0xB1E6B092, 0x8C869922, 0xCB26E3F2, 0xF646CA42, + 0x44661652, 0x79063FE2, 0x3EA64532, 0x03C66C82, + 0x8196FB53, 0xBCF6D2E3, 0xFB56A833, 0xC6368183, + 0x74165D93, 0x49767423, 0x0ED60EF3, 0x33B62743, + 0xD1062710, 0xEC660EA0, 0xABC67470, 0x96A65DC0, + 0x248681D0, 0x19E6A860, 0x5E46D2B0, 0x6326FB00, + 0xE1766CD1, 0xDC164561, 0x9BB63FB1, 0xA6D61601, + 0x14F6CA11, 0x2996E3A1, 0x6E369971, 0x5356B0C1, + 0x70279F96, 0x4D47B626, 0x0AE7CCF6, 0x3787E546, + 0x85A73956, 0xB8C710E6, 0xFF676A36, 0xC2074386, + 0x4057D457, 0x7D37FDE7, 0x3A978737, 0x07F7AE87, + 0xB5D77297, 0x88B75B27, 0xCF1721F7, 0xF2770847, + 0x10C70814, 0x2DA721A4, 0x6A075B74, 0x576772C4, + 0xE547AED4, 0xD8278764, 0x9F87FDB4, 0xA2E7D404, + 0x20B743D5, 0x1DD76A65, 0x5A7710B5, 0x67173905, + 0xD537E515, 0xE857CCA5, 0xAFF7B675, 0x92979FC5, + 0xE915E8DB, 0xD475C16B, 0x93D5BBBB, 0xAEB5920B, + 0x1C954E1B, 0x21F567AB, 0x66551D7B, 0x5B3534CB, + 0xD965A31A, 0xE4058AAA, 0xA3A5F07A, 0x9EC5D9CA, + 0x2CE505DA, 0x11852C6A, 0x562556BA, 0x6B457F0A, + 0x89F57F59, 0xB49556E9, 0xF3352C39, 0xCE550589, + 0x7C75D999, 0x4115F029, 0x06B58AF9, 0x3BD5A349, + 0xB9853498, 0x84E51D28, 0xC34567F8, 0xFE254E48, + 0x4C059258, 0x7165BBE8, 0x36C5C138, 0x0BA5E888, + 0x28D4C7DF, 0x15B4EE6F, 0x521494BF, 0x6F74BD0F, + 0xDD54611F, 0xE03448AF, 0xA794327F, 0x9AF41BCF, + 0x18A48C1E, 0x25C4A5AE, 0x6264DF7E, 0x5F04F6CE, + 0xED242ADE, 0xD044036E, 0x97E479BE, 0xAA84500E, + 0x4834505D, 0x755479ED, 0x32F4033D, 0x0F942A8D, + 0xBDB4F69D, 0x80D4DF2D, 0xC774A5FD, 0xFA148C4D, + 0x78441B9C, 0x4524322C, 0x028448FC, 0x3FE4614C, + 0x8DC4BD5C, 0xB0A494EC, 0xF704EE3C, 0xCA64C78C, + /* T8_5 */ + 0x00000000, 0xCB5CD3A5, 0x4DC8A10B, 0x869472AE, + 0x9B914216, 0x50CD91B3, 0xD659E31D, 0x1D0530B8, + 0xEC53826D, 0x270F51C8, 0xA19B2366, 0x6AC7F0C3, + 0x77C2C07B, 0xBC9E13DE, 0x3A0A6170, 0xF156B2D5, + 0x03D6029B, 0xC88AD13E, 0x4E1EA390, 0x85427035, + 0x9847408D, 0x531B9328, 0xD58FE186, 0x1ED33223, + 0xEF8580F6, 0x24D95353, 0xA24D21FD, 0x6911F258, + 0x7414C2E0, 0xBF481145, 0x39DC63EB, 0xF280B04E, + 0x07AC0536, 0xCCF0D693, 0x4A64A43D, 0x81387798, + 0x9C3D4720, 0x57619485, 0xD1F5E62B, 0x1AA9358E, + 0xEBFF875B, 0x20A354FE, 0xA6372650, 0x6D6BF5F5, + 0x706EC54D, 0xBB3216E8, 0x3DA66446, 0xF6FAB7E3, + 0x047A07AD, 0xCF26D408, 0x49B2A6A6, 0x82EE7503, + 0x9FEB45BB, 0x54B7961E, 0xD223E4B0, 0x197F3715, + 0xE82985C0, 0x23755665, 0xA5E124CB, 0x6EBDF76E, + 0x73B8C7D6, 0xB8E41473, 0x3E7066DD, 0xF52CB578, + 0x0F580A6C, 0xC404D9C9, 0x4290AB67, 0x89CC78C2, + 0x94C9487A, 0x5F959BDF, 0xD901E971, 0x125D3AD4, + 0xE30B8801, 0x28575BA4, 0xAEC3290A, 0x659FFAAF, + 0x789ACA17, 0xB3C619B2, 0x35526B1C, 0xFE0EB8B9, + 0x0C8E08F7, 0xC7D2DB52, 0x4146A9FC, 0x8A1A7A59, + 0x971F4AE1, 0x5C439944, 0xDAD7EBEA, 0x118B384F, + 0xE0DD8A9A, 0x2B81593F, 0xAD152B91, 0x6649F834, + 0x7B4CC88C, 0xB0101B29, 0x36846987, 0xFDD8BA22, + 0x08F40F5A, 0xC3A8DCFF, 0x453CAE51, 0x8E607DF4, + 0x93654D4C, 0x58399EE9, 0xDEADEC47, 0x15F13FE2, + 0xE4A78D37, 0x2FFB5E92, 0xA96F2C3C, 0x6233FF99, + 0x7F36CF21, 0xB46A1C84, 0x32FE6E2A, 0xF9A2BD8F, + 0x0B220DC1, 0xC07EDE64, 0x46EAACCA, 0x8DB67F6F, + 0x90B34FD7, 0x5BEF9C72, 0xDD7BEEDC, 0x16273D79, + 0xE7718FAC, 0x2C2D5C09, 0xAAB92EA7, 0x61E5FD02, + 0x7CE0CDBA, 0xB7BC1E1F, 0x31286CB1, 0xFA74BF14, + 0x1EB014D8, 0xD5ECC77D, 0x5378B5D3, 0x98246676, + 0x852156CE, 0x4E7D856B, 0xC8E9F7C5, 0x03B52460, + 0xF2E396B5, 0x39BF4510, 0xBF2B37BE, 0x7477E41B, + 0x6972D4A3, 0xA22E0706, 0x24BA75A8, 0xEFE6A60D, + 0x1D661643, 0xD63AC5E6, 0x50AEB748, 0x9BF264ED, + 0x86F75455, 0x4DAB87F0, 0xCB3FF55E, 0x006326FB, + 0xF135942E, 0x3A69478B, 0xBCFD3525, 0x77A1E680, + 0x6AA4D638, 0xA1F8059D, 0x276C7733, 0xEC30A496, + 0x191C11EE, 0xD240C24B, 0x54D4B0E5, 0x9F886340, + 0x828D53F8, 0x49D1805D, 0xCF45F2F3, 0x04192156, + 0xF54F9383, 0x3E134026, 0xB8873288, 0x73DBE12D, + 0x6EDED195, 0xA5820230, 0x2316709E, 0xE84AA33B, + 0x1ACA1375, 0xD196C0D0, 0x5702B27E, 0x9C5E61DB, + 0x815B5163, 0x4A0782C6, 0xCC93F068, 0x07CF23CD, + 0xF6999118, 0x3DC542BD, 0xBB513013, 0x700DE3B6, + 0x6D08D30E, 0xA65400AB, 0x20C07205, 0xEB9CA1A0, + 0x11E81EB4, 0xDAB4CD11, 0x5C20BFBF, 0x977C6C1A, + 0x8A795CA2, 0x41258F07, 0xC7B1FDA9, 0x0CED2E0C, + 0xFDBB9CD9, 0x36E74F7C, 0xB0733DD2, 0x7B2FEE77, + 0x662ADECF, 0xAD760D6A, 0x2BE27FC4, 0xE0BEAC61, + 0x123E1C2F, 0xD962CF8A, 0x5FF6BD24, 0x94AA6E81, + 0x89AF5E39, 0x42F38D9C, 0xC467FF32, 0x0F3B2C97, + 0xFE6D9E42, 0x35314DE7, 0xB3A53F49, 0x78F9ECEC, + 0x65FCDC54, 0xAEA00FF1, 0x28347D5F, 0xE368AEFA, + 0x16441B82, 0xDD18C827, 0x5B8CBA89, 0x90D0692C, + 0x8DD55994, 0x46898A31, 0xC01DF89F, 0x0B412B3A, + 0xFA1799EF, 0x314B4A4A, 0xB7DF38E4, 0x7C83EB41, + 0x6186DBF9, 0xAADA085C, 0x2C4E7AF2, 0xE712A957, + 0x15921919, 0xDECECABC, 0x585AB812, 0x93066BB7, + 0x8E035B0F, 0x455F88AA, 0xC3CBFA04, 0x089729A1, + 0xF9C19B74, 0x329D48D1, 0xB4093A7F, 0x7F55E9DA, + 0x6250D962, 0xA90C0AC7, 0x2F987869, 0xE4C4ABCC, + /* T8_6 */ + 0x00000000, 0xA6770BB4, 0x979F1129, 0x31E81A9D, + 0xF44F2413, 0x52382FA7, 0x63D0353A, 0xC5A73E8E, + 0x33EF4E67, 0x959845D3, 0xA4705F4E, 0x020754FA, + 0xC7A06A74, 0x61D761C0, 0x503F7B5D, 0xF64870E9, + 0x67DE9CCE, 0xC1A9977A, 0xF0418DE7, 0x56368653, + 0x9391B8DD, 0x35E6B369, 0x040EA9F4, 0xA279A240, + 0x5431D2A9, 0xF246D91D, 0xC3AEC380, 0x65D9C834, + 0xA07EF6BA, 0x0609FD0E, 0x37E1E793, 0x9196EC27, + 0xCFBD399C, 0x69CA3228, 0x582228B5, 0xFE552301, + 0x3BF21D8F, 0x9D85163B, 0xAC6D0CA6, 0x0A1A0712, + 0xFC5277FB, 0x5A257C4F, 0x6BCD66D2, 0xCDBA6D66, + 0x081D53E8, 0xAE6A585C, 0x9F8242C1, 0x39F54975, + 0xA863A552, 0x0E14AEE6, 0x3FFCB47B, 0x998BBFCF, + 0x5C2C8141, 0xFA5B8AF5, 0xCBB39068, 0x6DC49BDC, + 0x9B8CEB35, 0x3DFBE081, 0x0C13FA1C, 0xAA64F1A8, + 0x6FC3CF26, 0xC9B4C492, 0xF85CDE0F, 0x5E2BD5BB, + 0x440B7579, 0xE27C7ECD, 0xD3946450, 0x75E36FE4, + 0xB044516A, 0x16335ADE, 0x27DB4043, 0x81AC4BF7, + 0x77E43B1E, 0xD19330AA, 0xE07B2A37, 0x460C2183, + 0x83AB1F0D, 0x25DC14B9, 0x14340E24, 0xB2430590, + 0x23D5E9B7, 0x85A2E203, 0xB44AF89E, 0x123DF32A, + 0xD79ACDA4, 0x71EDC610, 0x4005DC8D, 0xE672D739, + 0x103AA7D0, 0xB64DAC64, 0x87A5B6F9, 0x21D2BD4D, + 0xE47583C3, 0x42028877, 0x73EA92EA, 0xD59D995E, + 0x8BB64CE5, 0x2DC14751, 0x1C295DCC, 0xBA5E5678, + 0x7FF968F6, 0xD98E6342, 0xE86679DF, 0x4E11726B, + 0xB8590282, 0x1E2E0936, 0x2FC613AB, 0x89B1181F, + 0x4C162691, 0xEA612D25, 0xDB8937B8, 0x7DFE3C0C, + 0xEC68D02B, 0x4A1FDB9F, 0x7BF7C102, 0xDD80CAB6, + 0x1827F438, 0xBE50FF8C, 0x8FB8E511, 0x29CFEEA5, + 0xDF879E4C, 0x79F095F8, 0x48188F65, 0xEE6F84D1, + 0x2BC8BA5F, 0x8DBFB1EB, 0xBC57AB76, 0x1A20A0C2, + 0x8816EAF2, 0x2E61E146, 0x1F89FBDB, 0xB9FEF06F, + 0x7C59CEE1, 0xDA2EC555, 0xEBC6DFC8, 0x4DB1D47C, + 0xBBF9A495, 0x1D8EAF21, 0x2C66B5BC, 0x8A11BE08, + 0x4FB68086, 0xE9C18B32, 0xD82991AF, 0x7E5E9A1B, + 0xEFC8763C, 0x49BF7D88, 0x78576715, 0xDE206CA1, + 0x1B87522F, 0xBDF0599B, 0x8C184306, 0x2A6F48B2, + 0xDC27385B, 0x7A5033EF, 0x4BB82972, 0xEDCF22C6, + 0x28681C48, 0x8E1F17FC, 0xBFF70D61, 0x198006D5, + 0x47ABD36E, 0xE1DCD8DA, 0xD034C247, 0x7643C9F3, + 0xB3E4F77D, 0x1593FCC9, 0x247BE654, 0x820CEDE0, + 0x74449D09, 0xD23396BD, 0xE3DB8C20, 0x45AC8794, + 0x800BB91A, 0x267CB2AE, 0x1794A833, 0xB1E3A387, + 0x20754FA0, 0x86024414, 0xB7EA5E89, 0x119D553D, + 0xD43A6BB3, 0x724D6007, 0x43A57A9A, 0xE5D2712E, + 0x139A01C7, 0xB5ED0A73, 0x840510EE, 0x22721B5A, + 0xE7D525D4, 0x41A22E60, 0x704A34FD, 0xD63D3F49, + 0xCC1D9F8B, 0x6A6A943F, 0x5B828EA2, 0xFDF58516, + 0x3852BB98, 0x9E25B02C, 0xAFCDAAB1, 0x09BAA105, + 0xFFF2D1EC, 0x5985DA58, 0x686DC0C5, 0xCE1ACB71, + 0x0BBDF5FF, 0xADCAFE4B, 0x9C22E4D6, 0x3A55EF62, + 0xABC30345, 0x0DB408F1, 0x3C5C126C, 0x9A2B19D8, + 0x5F8C2756, 0xF9FB2CE2, 0xC813367F, 0x6E643DCB, + 0x982C4D22, 0x3E5B4696, 0x0FB35C0B, 0xA9C457BF, + 0x6C636931, 0xCA146285, 0xFBFC7818, 0x5D8B73AC, + 0x03A0A617, 0xA5D7ADA3, 0x943FB73E, 0x3248BC8A, + 0xF7EF8204, 0x519889B0, 0x6070932D, 0xC6079899, + 0x304FE870, 0x9638E3C4, 0xA7D0F959, 0x01A7F2ED, + 0xC400CC63, 0x6277C7D7, 0x539FDD4A, 0xF5E8D6FE, + 0x647E3AD9, 0xC209316D, 0xF3E12BF0, 0x55962044, + 0x90311ECA, 0x3646157E, 0x07AE0FE3, 0xA1D90457, + 0x579174BE, 0xF1E67F0A, 0xC00E6597, 0x66796E23, + 0xA3DE50AD, 0x05A95B19, 0x34414184, 0x92364A30, + /* T8_7 */ + 0x00000000, 0xCCAA009E, 0x4225077D, 0x8E8F07E3, + 0x844A0EFA, 0x48E00E64, 0xC66F0987, 0x0AC50919, + 0xD3E51BB5, 0x1F4F1B2B, 0x91C01CC8, 0x5D6A1C56, + 0x57AF154F, 0x9B0515D1, 0x158A1232, 0xD92012AC, + 0x7CBB312B, 0xB01131B5, 0x3E9E3656, 0xF23436C8, + 0xF8F13FD1, 0x345B3F4F, 0xBAD438AC, 0x767E3832, + 0xAF5E2A9E, 0x63F42A00, 0xED7B2DE3, 0x21D12D7D, + 0x2B142464, 0xE7BE24FA, 0x69312319, 0xA59B2387, + 0xF9766256, 0x35DC62C8, 0xBB53652B, 0x77F965B5, + 0x7D3C6CAC, 0xB1966C32, 0x3F196BD1, 0xF3B36B4F, + 0x2A9379E3, 0xE639797D, 0x68B67E9E, 0xA41C7E00, + 0xAED97719, 0x62737787, 0xECFC7064, 0x205670FA, + 0x85CD537D, 0x496753E3, 0xC7E85400, 0x0B42549E, + 0x01875D87, 0xCD2D5D19, 0x43A25AFA, 0x8F085A64, + 0x562848C8, 0x9A824856, 0x140D4FB5, 0xD8A74F2B, + 0xD2624632, 0x1EC846AC, 0x9047414F, 0x5CED41D1, + 0x299DC2ED, 0xE537C273, 0x6BB8C590, 0xA712C50E, + 0xADD7CC17, 0x617DCC89, 0xEFF2CB6A, 0x2358CBF4, + 0xFA78D958, 0x36D2D9C6, 0xB85DDE25, 0x74F7DEBB, + 0x7E32D7A2, 0xB298D73C, 0x3C17D0DF, 0xF0BDD041, + 0x5526F3C6, 0x998CF358, 0x1703F4BB, 0xDBA9F425, + 0xD16CFD3C, 0x1DC6FDA2, 0x9349FA41, 0x5FE3FADF, + 0x86C3E873, 0x4A69E8ED, 0xC4E6EF0E, 0x084CEF90, + 0x0289E689, 0xCE23E617, 0x40ACE1F4, 0x8C06E16A, + 0xD0EBA0BB, 0x1C41A025, 0x92CEA7C6, 0x5E64A758, + 0x54A1AE41, 0x980BAEDF, 0x1684A93C, 0xDA2EA9A2, + 0x030EBB0E, 0xCFA4BB90, 0x412BBC73, 0x8D81BCED, + 0x8744B5F4, 0x4BEEB56A, 0xC561B289, 0x09CBB217, + 0xAC509190, 0x60FA910E, 0xEE7596ED, 0x22DF9673, + 0x281A9F6A, 0xE4B09FF4, 0x6A3F9817, 0xA6959889, + 0x7FB58A25, 0xB31F8ABB, 0x3D908D58, 0xF13A8DC6, + 0xFBFF84DF, 0x37558441, 0xB9DA83A2, 0x7570833C, + 0x533B85DA, 0x9F918544, 0x111E82A7, 0xDDB48239, + 0xD7718B20, 0x1BDB8BBE, 0x95548C5D, 0x59FE8CC3, + 0x80DE9E6F, 0x4C749EF1, 0xC2FB9912, 0x0E51998C, + 0x04949095, 0xC83E900B, 0x46B197E8, 0x8A1B9776, + 0x2F80B4F1, 0xE32AB46F, 0x6DA5B38C, 0xA10FB312, + 0xABCABA0B, 0x6760BA95, 0xE9EFBD76, 0x2545BDE8, + 0xFC65AF44, 0x30CFAFDA, 0xBE40A839, 0x72EAA8A7, + 0x782FA1BE, 0xB485A120, 0x3A0AA6C3, 0xF6A0A65D, + 0xAA4DE78C, 0x66E7E712, 0xE868E0F1, 0x24C2E06F, + 0x2E07E976, 0xE2ADE9E8, 0x6C22EE0B, 0xA088EE95, + 0x79A8FC39, 0xB502FCA7, 0x3B8DFB44, 0xF727FBDA, + 0xFDE2F2C3, 0x3148F25D, 0xBFC7F5BE, 0x736DF520, + 0xD6F6D6A7, 0x1A5CD639, 0x94D3D1DA, 0x5879D144, + 0x52BCD85D, 0x9E16D8C3, 0x1099DF20, 0xDC33DFBE, + 0x0513CD12, 0xC9B9CD8C, 0x4736CA6F, 0x8B9CCAF1, + 0x8159C3E8, 0x4DF3C376, 0xC37CC495, 0x0FD6C40B, + 0x7AA64737, 0xB60C47A9, 0x3883404A, 0xF42940D4, + 0xFEEC49CD, 0x32464953, 0xBCC94EB0, 0x70634E2E, + 0xA9435C82, 0x65E95C1C, 0xEB665BFF, 0x27CC5B61, + 0x2D095278, 0xE1A352E6, 0x6F2C5505, 0xA386559B, + 0x061D761C, 0xCAB77682, 0x44387161, 0x889271FF, + 0x825778E6, 0x4EFD7878, 0xC0727F9B, 0x0CD87F05, + 0xD5F86DA9, 0x19526D37, 0x97DD6AD4, 0x5B776A4A, + 0x51B26353, 0x9D1863CD, 0x1397642E, 0xDF3D64B0, + 0x83D02561, 0x4F7A25FF, 0xC1F5221C, 0x0D5F2282, + 0x079A2B9B, 0xCB302B05, 0x45BF2CE6, 0x89152C78, + 0x50353ED4, 0x9C9F3E4A, 0x121039A9, 0xDEBA3937, + 0xD47F302E, 0x18D530B0, 0x965A3753, 0x5AF037CD, + 0xFF6B144A, 0x33C114D4, 0xBD4E1337, 0x71E413A9, + 0x7B211AB0, 0xB78B1A2E, 0x39041DCD, 0xF5AE1D53, + 0x2C8E0FFF, 0xE0240F61, 0x6EAB0882, 0xA201081C, + 0xA8C40105, 0x646E019B, 0xEAE10678, 0x264B06E6 + }; + + public static void main(String[] args) { + Set xx=new HashSet(); + long start=System.currentTimeMillis(); + int i1 = 100000000; + for (int i = 0; i< i1; i++) + { + PureJavaCrc32 crc32 = new PureJavaCrc32(); + byte[] bytes = String.valueOf(i).getBytes(); + crc32.update(bytes,0,bytes.length); + long x = crc32.getValue(); + x=x%65535; + if(xx.contains(x)) + { + System.out.println(x); + } + + // xx.add(x); + + } + long used=System.currentTimeMillis()-start; + + System.out.println("tps "+i1*1000.0/used); + System.out.println(".............................."+xx.size()); + + } +} diff --git a/src/main/java/io/mycat/route/function/ReloadFunction.java b/src/main/java/io/mycat/route/function/ReloadFunction.java new file mode 100644 index 000000000..879c4052d --- /dev/null +++ b/src/main/java/io/mycat/route/function/ReloadFunction.java @@ -0,0 +1,8 @@ +package io.mycat.route.function; + +/** + * Created by magicdoom on 2016/9/17. + */ +public interface ReloadFunction { + void reload(); +} diff --git a/src/main/java/io/mycat/route/function/SlotFunction.java b/src/main/java/io/mycat/route/function/SlotFunction.java new file mode 100644 index 000000000..28107632a --- /dev/null +++ b/src/main/java/io/mycat/route/function/SlotFunction.java @@ -0,0 +1,8 @@ +package io.mycat.route.function; + +/** + * Created by magicdoom on 2016/9/17. + */ +public interface SlotFunction { + int slotValue(); +} diff --git a/src/main/java/io/mycat/route/function/TableRuleAware.java b/src/main/java/io/mycat/route/function/TableRuleAware.java new file mode 100644 index 000000000..a46d43f46 --- /dev/null +++ b/src/main/java/io/mycat/route/function/TableRuleAware.java @@ -0,0 +1,14 @@ +package io.mycat.route.function; + +/** + * Created by magicdoom on 2016/9/5. + * 考虑一类新分片算法 属于有状态算法 + * 比如PartitionByCRC32PreSlot 如果迁移过数据的话,slot映射规则会进行改变 + * 所以必须对应一张表单独一个实例 实现此接口后会根据不同表自动创建新实例 + */ +public interface TableRuleAware { + void setTableName(String tableName); + void setRuleName(String ruleName); + String getTableName(); + String getRuleName(); +} diff --git a/src/main/java/io/mycat/route/handler/HintCatletHandler.java b/src/main/java/io/mycat/route/handler/HintCatletHandler.java index 7c5e856f6..ce567113f 100644 --- a/src/main/java/io/mycat/route/handler/HintCatletHandler.java +++ b/src/main/java/io/mycat/route/handler/HintCatletHandler.java @@ -1,17 +1,18 @@ package io.mycat.route.handler; +import java.sql.SQLNonTransientException; +import java.util.Map; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import io.mycat.MycatServer; import io.mycat.cache.LayerCachePool; +import io.mycat.catlets.Catlet; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; import io.mycat.route.RouteResultset; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SystemConfig; -import io.mycat.sqlengine.Catlet; +import io.mycat.server.ServerConnection; import io.mycat.sqlengine.EngineCtx; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.sql.SQLNonTransientException; /** * 处理注释中类型为catlet 的情况,每个catlet为一个用户自定义Java代码类,用于进行复杂查询SQL(只能是查询SQL)的自定义执行过程, @@ -19,8 +20,7 @@ */ public class HintCatletHandler implements HintHandler { - private static final Logger LOGGER = LoggerFactory - .getLogger(HintCatletHandler.class); + private static final Logger LOGGER = LoggerFactory.getLogger(HintCatletHandler.class); /** * 从全局的schema列表中查询指定的schema是否存在, 如果存在则替换connection属性中原有的schema, @@ -39,9 +39,9 @@ public class HintCatletHandler implements HintHandler { */ @Override public RouteResultset route(SystemConfig sysConfig, SchemaConfig schema, - int sqlType, String realSQL, String charset, - MySQLFrontConnection sc, LayerCachePool cachePool, - String hintSQLValue) throws SQLNonTransientException { + int sqlType, String realSQL, String charset, ServerConnection sc, + LayerCachePool cachePool, String hintSQLValue,int hintSqlType, Map hintMap) + throws SQLNonTransientException { // sc.setEngineCtx ctx String cateletClass = hintSQLValue; if (LOGGER.isDebugEnabled()) { @@ -51,11 +51,10 @@ public RouteResultset route(SystemConfig sysConfig, SchemaConfig schema, try { Catlet catlet = (Catlet) MycatServer.getInstance() .getCatletClassLoader().getInstanceofClass(cateletClass); - catlet.route(sysConfig, schema, sqlType, realSQL, charset, sc, - cachePool); + catlet.route(sysConfig, schema, sqlType, realSQL,charset, sc, cachePool); catlet.processSQL(realSQL, new EngineCtx(sc.getSession2())); } catch (Exception e) { - LOGGER.warn("catlet error " + e); + LOGGER.warn("catlet error "+e); throw new SQLNonTransientException(e); } return null; diff --git a/src/main/java/io/mycat/route/handler/HintDataNodeHandler.java b/src/main/java/io/mycat/route/handler/HintDataNodeHandler.java index 305d07e10..05ea2be31 100644 --- a/src/main/java/io/mycat/route/handler/HintDataNodeHandler.java +++ b/src/main/java/io/mycat/route/handler/HintDataNodeHandler.java @@ -1,18 +1,18 @@ package io.mycat.route.handler; - import java.sql.SQLNonTransientException; +import java.util.Map; -import org.apache.log4j.Logger; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; import io.mycat.MycatServer; -import io.mycat.backend.PhysicalDBNode; +import io.mycat.backend.datasource.PhysicalDBNode; import io.mycat.cache.LayerCachePool; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; import io.mycat.route.RouteResultset; import io.mycat.route.util.RouterUtil; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SystemConfig; +import io.mycat.server.ServerConnection; /** * 处理注释中类型为datanode 的情况 @@ -21,13 +21,11 @@ */ public class HintDataNodeHandler implements HintHandler { - private static final Logger LOGGER = Logger.getLogger(HintSchemaHandler.class); + private static final Logger LOGGER = LoggerFactory.getLogger(HintSchemaHandler.class); @Override - public RouteResultset route(SystemConfig sysConfig, SchemaConfig schema, - int sqlType, String realSQL, String charset, - MySQLFrontConnection sc, LayerCachePool cachePool, - String hintSQLValue) + public RouteResultset route(SystemConfig sysConfig, SchemaConfig schema, int sqlType, String realSQL, + String charset, ServerConnection sc, LayerCachePool cachePool, String hintSQLValue,int hintSqlType, Map hintMap) throws SQLNonTransientException { String stmt = realSQL; @@ -49,4 +47,4 @@ public RouteResultset route(SystemConfig sysConfig, SchemaConfig schema, return rrs; } -} \ No newline at end of file +} diff --git a/src/main/java/io/mycat/route/handler/HintHandler.java b/src/main/java/io/mycat/route/handler/HintHandler.java index ed65e1a07..8ce04d17b 100644 --- a/src/main/java/io/mycat/route/handler/HintHandler.java +++ b/src/main/java/io/mycat/route/handler/HintHandler.java @@ -1,12 +1,13 @@ package io.mycat.route.handler; +import java.sql.SQLNonTransientException; +import java.util.Map; + import io.mycat.cache.LayerCachePool; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; import io.mycat.route.RouteResultset; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SystemConfig; - -import java.sql.SQLNonTransientException; +import io.mycat.server.ServerConnection; /** * 按照注释中包含指定类型的内容做路由解析 @@ -15,7 +16,7 @@ public interface HintHandler { public RouteResultset route(SystemConfig sysConfig, SchemaConfig schema, - int sqlType, String realSQL, String charset, - MySQLFrontConnection sc, LayerCachePool cachePool, - String hintSQLValue) throws SQLNonTransientException; + int sqlType, String realSQL, String charset, ServerConnection sc, + LayerCachePool cachePool, String hintSQLValue, int hintSqlType, Map hintMap) + throws SQLNonTransientException; } diff --git a/src/main/java/io/mycat/route/handler/HintHandlerFactory.java b/src/main/java/io/mycat/route/handler/HintHandlerFactory.java index 78ebf5c0d..1a477e1fa 100644 --- a/src/main/java/io/mycat/route/handler/HintHandlerFactory.java +++ b/src/main/java/io/mycat/route/handler/HintHandlerFactory.java @@ -2,9 +2,12 @@ import java.util.HashMap; import java.util.Map; +import java.util.concurrent.FutureTask; public class HintHandlerFactory { - private static boolean isInit = false; + + private static volatile boolean isInit = false; + //sql注释的类型处理handler 集合,现在支持两种类型的处理:sql,schema private static Map hintHandlerMap = new HashMap(); @@ -16,11 +19,22 @@ private static void init() { hintHandlerMap.put("schema",new HintSchemaHandler()); hintHandlerMap.put("datanode",new HintDataNodeHandler()); hintHandlerMap.put("catlet",new HintCatletHandler()); + + // 新增sql hint(注解)/*#mycat:db_type=master*/ 和 /*#mycat:db_type=slave*/ 和 /*mycat:db_type=slave*/ + // 该hint可以和 /*balance*/ 一起使用 + // 实现强制走 master 和 强制走 slave + hintHandlerMap.put("db_type", new HintMasterDBHandler()); + isInit = true; // 修复多次初始化的bug } + // 双重校验锁 fix 线程安全问题 public static HintHandler getHintHandler(String hintType) { if(!isInit) { - init(); + synchronized(HintHandlerFactory.class){ + if(!isInit) { + init(); + } + } } return hintHandlerMap.get(hintType); } diff --git a/src/main/java/io/mycat/route/handler/HintMasterDBHandler.java b/src/main/java/io/mycat/route/handler/HintMasterDBHandler.java new file mode 100644 index 000000000..3b809eda4 --- /dev/null +++ b/src/main/java/io/mycat/route/handler/HintMasterDBHandler.java @@ -0,0 +1,89 @@ +package io.mycat.route.handler; + + +import java.sql.SQLNonTransientException; +import java.util.Map; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import io.mycat.cache.LayerCachePool; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.route.RouteResultset; +import io.mycat.route.factory.RouteStrategyFactory; +import io.mycat.server.ServerConnection; +import io.mycat.server.parser.ServerParse; + +/** + * 处理情况 sql hint: mycat:db_type=master/slave
+ * 后期可能会考虑增加 mycat:db_type=slave_newest,实现走延迟最小的slave + * @author digdeep@126.com + */ +// /*#mycat:db_type=master*/ +// /*#mycat:db_type=slave*/ +// /*#mycat:db_type=slave_newest*/ +// 强制走 master 和 强制走 slave +public class HintMasterDBHandler implements HintHandler { + + private static final Logger LOGGER = LoggerFactory.getLogger(HintMasterDBHandler.class); + + @Override + public RouteResultset route(SystemConfig sysConfig, SchemaConfig schema, int sqlType, + String realSQL, String charset, + ServerConnection sc, LayerCachePool cachePool, String hintSQLValue, int hintSqlType, Map hintMap) + throws SQLNonTransientException { + +// LOGGER.debug("realSQL: " + realSQL); // select * from travelrecord limit 1 +// LOGGER.debug("sqlType: " + sqlType); // 7 +// LOGGER.debug("schema.getName(): " + schema.getName()); // TESTDB +// LOGGER.debug("schema.getName(): " + schema.getDataNode()); // null +// LOGGER.debug("hintSQLValue: " + hintSQLValue); // master/slave + + RouteResultset rrs = RouteStrategyFactory.getRouteStrategy() + .route(sysConfig, schema, sqlType, + realSQL, charset, sc, cachePool); + + LOGGER.debug("schema.rrs(): " + rrs); // master + Boolean isRouteToMaster = null; // 默认不施加任何影响 + + LOGGER.debug("hintSQLValue:::::::::" + hintSQLValue); // slave + + if(hintSQLValue != null && !hintSQLValue.trim().equals("")){ + if(hintSQLValue.trim().equalsIgnoreCase("master")) { + isRouteToMaster = true; + } + if(hintSQLValue.trim().equalsIgnoreCase("slave")){ +// if(rrs.getCanRunInReadDB() != null && !rrs.getCanRunInReadDB()){ +// isRouteToMaster = null; +// LOGGER.warn(realSQL + " can not run in slave."); +// }else{ +// isRouteToMaster = false; +// } + if(sqlType == ServerParse.DELETE || sqlType == ServerParse.INSERT + ||sqlType == ServerParse.REPLACE || sqlType == ServerParse.UPDATE + || sqlType == ServerParse.DDL){ + LOGGER.error("should not use hint 'db_type' to route 'delete', 'insert', 'replace', 'update', 'ddl' to a slave db."); + isRouteToMaster = null; // 不施加任何影响 + }else{ + isRouteToMaster = false; + } + } + } + + if(isRouteToMaster == null){ // 默认不施加任何影响 + LOGGER.warn(" sql hint 'db_type' error, ignore this hint."); + return rrs; + } + + if(isRouteToMaster) {// 强制走 master + rrs.setRunOnSlave(false); + } + + if(!isRouteToMaster) {// 强制走slave + rrs.setRunOnSlave(true); + } + + LOGGER.debug("rrs.getRunOnSlave():" + rrs.getRunOnSlave()); + return rrs; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/route/handler/HintSQLHandler.java b/src/main/java/io/mycat/route/handler/HintSQLHandler.java index 3778c19f9..78de5159a 100644 --- a/src/main/java/io/mycat/route/handler/HintSQLHandler.java +++ b/src/main/java/io/mycat/route/handler/HintSQLHandler.java @@ -1,21 +1,37 @@ package io.mycat.route.handler; +import java.sql.SQLNonTransientException; +import java.sql.Types; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import com.alibaba.druid.sql.ast.SQLExpr; +import com.alibaba.druid.sql.ast.SQLStatement; +import com.alibaba.druid.sql.ast.expr.SQLIntegerExpr; +import com.alibaba.druid.sql.ast.expr.SQLNumberExpr; +import com.alibaba.druid.sql.ast.expr.SQLTextLiteralExpr; +import com.alibaba.druid.sql.ast.expr.SQLValuableExpr; +import com.alibaba.druid.sql.ast.statement.*; +import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlSelectQueryBlock; +import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser; +import com.alibaba.druid.sql.parser.SQLStatementParser; +import com.google.common.base.Splitter; +import com.google.common.base.Strings; import io.mycat.cache.LayerCachePool; -import io.mycat.route.RouteResultset; -import io.mycat.route.RouteResultsetNode; -import io.mycat.route.RouteStrategy; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.route.*; import io.mycat.route.factory.RouteStrategyFactory; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SystemConfig; +import io.mycat.server.ServerConnection; import io.mycat.server.parser.ServerParse; -import java.sql.SQLNonTransientException; - /** * 处理注释中 类型为sql的情况 (按照 注释中的sql做路由解析,而不是实际的sql) */ public class HintSQLHandler implements HintHandler { + private RouteStrategy routeStrategy; public HintSQLHandler() { @@ -24,28 +40,156 @@ public HintSQLHandler() { @Override public RouteResultset route(SystemConfig sysConfig, SchemaConfig schema, - int sqlType, String realSQL, String charset, MySQLFrontConnection sc, - LayerCachePool cachePool, String hintSQLValue) - throws SQLNonTransientException { - - RouteResultset rrs = routeStrategy.route(sysConfig, schema, sqlType, + int sqlType, String realSQL, String charset, ServerConnection sc, + LayerCachePool cachePool, String hintSQLValue,int hintSqlType, Map hintMap) + throws SQLNonTransientException { + + RouteResultset rrs = routeStrategy.route(sysConfig, schema, hintSqlType, hintSQLValue, charset, sc, cachePool); + // 替换RRS中的SQL执行 RouteResultsetNode[] oldRsNodes = rrs.getNodes(); RouteResultsetNode[] newRrsNodes = new RouteResultsetNode[oldRsNodes.length]; for (int i = 0; i < newRrsNodes.length; i++) { newRrsNodes[i] = new RouteResultsetNode(oldRsNodes[i].getName(), oldRsNodes[i].getSqlType(), realSQL); + newRrsNodes[i].setSlot(oldRsNodes[i].getSlot()); } rrs.setNodes(newRrsNodes); // 判断是否为调用存储过程的SQL语句,这里不能用SQL解析器来解析判断是否为CALL语句 - int rs = ServerParse.parse(realSQL); - int realSQLType = rs & 0xff; - if (ServerParse.CALL == realSQLType) { + if (ServerParse.CALL == sqlType) { rrs.setCallStatement(true); + + Procedure procedure=parseProcedure(realSQL,hintMap); + rrs.setProcedure(procedure); + // String sql=procedure.toChangeCallSql(null); + String sql=realSQL; + for (RouteResultsetNode node : rrs.getNodes()) + { + node.setProcedure(procedure); + node.setHintMap(hintMap); + node.setStatement(sql); + } + } return rrs; } + + + + private Procedure parseProcedure(String sql,Map hintMap) + { + boolean fields = hintMap.containsKey("list_fields"); + boolean isResultList= hintMap != null && ("list".equals(hintMap.get("result_type"))|| fields); + Procedure procedure=new Procedure(); + procedure.setOriginSql(sql); + procedure.setResultList(isResultList); + List sqls= Splitter.on(";").trimResults().splitToList(sql) ; + Set outSet=new HashSet<>(); + for (int i = sqls.size() - 1; i >= 0; i--) + { + String s = sqls.get(i); + if(Strings.isNullOrEmpty(s)) { + continue; + } + SQLStatementParser parser = new MySqlStatementParser(s); + SQLStatement statement = parser.parseStatement(); + if(statement instanceof SQLSelectStatement) + { + MySqlSelectQueryBlock selectQuery= (MySqlSelectQueryBlock) ((SQLSelectStatement) statement).getSelect().getQuery(); + if(selectQuery!=null) + { + List selectItems= selectQuery.getSelectList(); + for (SQLSelectItem selectItem : selectItems) + { + String select = selectItem.toString(); + outSet.add(select) ; + procedure.getSelectColumns().add(select); + } + } + procedure.setSelectSql(s); + } else if(statement instanceof SQLCallStatement) + { + SQLCallStatement sqlCallStatement = (SQLCallStatement) statement; + procedure.setName(sqlCallStatement.getProcedureName().getSimpleName()); + List paramterList= sqlCallStatement.getParameters(); + for (int i1 = 0; i1 < paramterList.size(); i1++) + { + SQLExpr sqlExpr = paramterList.get(i1); + String pName = sqlExpr.toString(); + String pType=outSet.contains(pName)? ProcedureParameter.OUT:ProcedureParameter.IN; + ProcedureParameter parameter=new ProcedureParameter(); + parameter.setIndex(i1+1); + parameter.setName(pName); + parameter.setParameterType(pType); + if(pName.startsWith("@")) + { + procedure.getParamterMap().put(pName, parameter); + } else + { + procedure.getParamterMap().put(String.valueOf(i1+1), parameter); + } + + + } + procedure.setCallSql(s); + } else if(statement instanceof SQLSetStatement) + { + procedure.setSetSql(s); + SQLSetStatement setStatement= (SQLSetStatement) statement; + List sets= setStatement.getItems(); + for (SQLAssignItem set : sets) + { + String name=set.getTarget().toString(); + SQLExpr value=set.getValue(); + ProcedureParameter parameter = procedure.getParamterMap().get(name); + if(parameter!=null) + { + if (value instanceof SQLIntegerExpr) + { + parameter.setValue(((SQLIntegerExpr) value).getNumber()); + parameter.setJdbcType(Types.INTEGER); + } else if(value instanceof SQLNumberExpr) + { + parameter.setValue(((SQLNumberExpr) value).getNumber()); + parameter.setJdbcType(Types.NUMERIC); + } + else if(value instanceof SQLTextLiteralExpr) + { + parameter.setValue(((SQLTextLiteralExpr) value).getText()); + parameter.setJdbcType(Types.VARCHAR); + } + else + if (value instanceof SQLValuableExpr) + { + parameter.setValue(((SQLValuableExpr) value).getValue()); + parameter.setJdbcType(Types.VARCHAR); + } + } + } + } + + } + if(fields) + { + String list_fields =(String) hintMap.get("list_fields"); + List listFields = Splitter.on(",").trimResults().splitToList( list_fields); + for (String field : listFields) + { + if(!procedure.getParamterMap().containsKey(field)) + { + ProcedureParameter parameter=new ProcedureParameter(); + parameter.setParameterType(ProcedureParameter.OUT); + parameter.setName(field); + parameter.setJdbcType(-10); + parameter.setIndex(procedure.getParamterMap().size()+1); + procedure.getParamterMap().put(field,parameter); + } + } + procedure.getListFields().addAll(listFields); + } + return procedure; + } } diff --git a/src/main/java/io/mycat/route/handler/HintSchemaHandler.java b/src/main/java/io/mycat/route/handler/HintSchemaHandler.java index 66214a3f2..ed94f1c85 100644 --- a/src/main/java/io/mycat/route/handler/HintSchemaHandler.java +++ b/src/main/java/io/mycat/route/handler/HintSchemaHandler.java @@ -1,25 +1,25 @@ package io.mycat.route.handler; +import java.sql.SQLNonTransientException; +import java.util.Map; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import io.mycat.MycatServer; import io.mycat.cache.LayerCachePool; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; import io.mycat.route.RouteResultset; import io.mycat.route.RouteStrategy; import io.mycat.route.factory.RouteStrategyFactory; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SystemConfig; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.sql.SQLNonTransientException; +import io.mycat.server.ServerConnection; /** * 处理注释中类型为schema 的情况(按照指定schema做路由解析) */ public class HintSchemaHandler implements HintHandler { - private static final Logger LOGGER = LoggerFactory - .getLogger(HintSchemaHandler.class); + private static final Logger LOGGER = LoggerFactory.getLogger(HintSchemaHandler.class); private RouteStrategy routeStrategy; @@ -43,10 +43,9 @@ public HintSchemaHandler() { */ @Override public RouteResultset route(SystemConfig sysConfig, SchemaConfig schema, - int sqlType, String realSQL, String charset, MySQLFrontConnection sc, - LayerCachePool cachePool, String hintSQLValue) + int sqlType, String realSQL, String charset, ServerConnection sc, + LayerCachePool cachePool, String hintSQLValue,int hintSqlType, Map hintMap) throws SQLNonTransientException { - SchemaConfig tempSchema = MycatServer.getInstance().getConfig().getSchemas().get(hintSQLValue); if (tempSchema != null) { return routeStrategy.route(sysConfig, tempSchema, sqlType, realSQL, charset, sc, cachePool); diff --git a/src/main/java/io/mycat/route/impl/AbstractRouteStrategy.java b/src/main/java/io/mycat/route/impl/AbstractRouteStrategy.java index bf43c59a0..0a93e2591 100644 --- a/src/main/java/io/mycat/route/impl/AbstractRouteStrategy.java +++ b/src/main/java/io/mycat/route/impl/AbstractRouteStrategy.java @@ -1,110 +1,118 @@ package io.mycat.route.impl; +import java.sql.SQLNonTransientException; +import java.sql.SQLSyntaxErrorException; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import io.mycat.MycatServer; import io.mycat.cache.LayerCachePool; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; import io.mycat.route.RouteResultset; import io.mycat.route.RouteStrategy; import io.mycat.route.util.RouterUtil; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SystemConfig; +import io.mycat.server.ServerConnection; import io.mycat.server.parser.ServerParse; import io.mycat.sqlengine.mpp.LoadData; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.sql.SQLNonTransientException; -import java.sql.SQLSyntaxErrorException; public abstract class AbstractRouteStrategy implements RouteStrategy { + private static final Logger LOGGER = LoggerFactory.getLogger(AbstractRouteStrategy.class); @Override - public RouteResultset route(SystemConfig sysConfig, SchemaConfig schema,int sqlType, String origSQL, - String charset, MySQLFrontConnection sc, LayerCachePool cachePool) throws SQLNonTransientException { + public RouteResultset route(SystemConfig sysConfig, SchemaConfig schema, int sqlType, String origSQL, + String charset, ServerConnection sc, LayerCachePool cachePool) throws SQLNonTransientException { + + //对应schema标签checkSQLschema属性,把表示schema的字符去掉 + if (schema.isCheckSQLSchema()) { + origSQL = RouterUtil.removeSchema(origSQL, schema.getName()); + } - //process some before route logic - if (beforeRouteProcess(schema, sqlType, origSQL, sc)) return null; + /** + * 处理一些路由之前的逻辑 + * 全局序列号,父子表插入 + */ + if ( beforeRouteProcess(schema, sqlType, origSQL, sc) ) { + return null; + } - // user handler + /** + * SQL 语句拦截 + */ String stmt = MycatServer.getInstance().getSqlInterceptor().interceptSQL(origSQL, sqlType); - - if (origSQL != stmt && LOGGER.isDebugEnabled()) { + if (!origSQL.equals(stmt) && LOGGER.isDebugEnabled()) { LOGGER.debug("sql intercepted to " + stmt + " from " + origSQL); } - if (schema.isCheckSQLSchema()) { - stmt = RouterUtil.removeSchema(stmt, schema.getName()); - } + RouteResultset rrs = new RouteResultset(stmt, sqlType); - if ( LOGGER.isDebugEnabled()&&origSQL.startsWith(LoadData.loadDataHint)) - { - rrs.setCacheAble(false);//优化debug loaddata输出cache的日志会极大降低性能 - } + /** + * 优化debug loaddata输出cache的日志会极大降低性能 + */ + if (LOGGER.isDebugEnabled() && origSQL.startsWith(LoadData.loadDataHint)) { + rrs.setCacheAble(false); + } - //rrs携带ServerConnection的autocommit状态用于在sql解析的时候遇到select ... for update的时候动态设定RouteResultsetNode的canRunInReadDB属性 + /** + * rrs携带ServerConnection的autocommit状态用于在sql解析的时候遇到 + * select ... for update的时候动态设定RouteResultsetNode的canRunInReadDB属性 + */ if (sc != null ) { rrs.setAutocommit(sc.isAutocommit()); } - //ddl create deal - if(ServerParse.DDL==sqlType){ - return RouterUtil.routeToDDLNode(rrs, sqlType, stmt,schema); + /** + * DDL 语句的路由 + */ + if (ServerParse.DDL == sqlType) { + return RouterUtil.routeToDDLNode(rrs, sqlType, stmt, schema); } - // check if there is sharding in schema + /** + * 检查是否有分片 + */ if (schema.isNoSharding() && ServerParse.SHOW != sqlType) { rrs = RouterUtil.routeToSingleNode(rrs, schema.getDataNode(), stmt); -// return RouterUtil.routeToSingleNode(rrs, schema.getDataNode(), stmt); } else { - RouteResultset returnedSet=routeSystemInfo(schema, sqlType, stmt, rrs); - if(returnedSet==null){ - rrs = routeNormalSqlWithAST(schema, stmt, rrs, charset, cachePool); -// return routeNormalSqlWithAST(schema, stmt, rrs, charset, cachePool); + RouteResultset returnedSet = routeSystemInfo(schema, sqlType, stmt, rrs); + if (returnedSet == null) { + rrs = routeNormalSqlWithAST(schema, stmt, rrs, charset, cachePool,sqlType,sc); } } return rrs; } - private boolean beforeRouteProcess(SchemaConfig schema, int sqlType, String origSQL, MySQLFrontConnection sc) throws SQLNonTransientException { - return RouterUtil.processWithMycatSeq(schema, sqlType, origSQL, sc) || - (sqlType == ServerParse.INSERT && RouterUtil.processERChildTable(schema, origSQL, sc)) || - (sqlType == ServerParse.INSERT && RouterUtil.processInsert(schema, sqlType, origSQL,sc)); + /** + * 路由之前必要的处理 + * 主要是全局序列号插入,还有子表插入 + */ + private boolean beforeRouteProcess(SchemaConfig schema, int sqlType, String origSQL, ServerConnection sc) + throws SQLNonTransientException { + + return RouterUtil.processWithMycatSeq(schema, sqlType, origSQL, sc) + || (sqlType == ServerParse.INSERT && RouterUtil.processERChildTable(schema, origSQL, sc)) + || (sqlType == ServerParse.INSERT && RouterUtil.processInsert(schema, sqlType, origSQL, sc)); } /** * 通过解析AST语法树类来寻找路由 - * @param schema - * @param stmt - * @param rrs - * @param charset - * @param cachePool - * @return - * @throws SQLNonTransientException */ - public abstract RouteResultset routeNormalSqlWithAST(SchemaConfig schema,String stmt,RouteResultset rrs,String charset,LayerCachePool cachePool) throws SQLNonTransientException; + public abstract RouteResultset routeNormalSqlWithAST(SchemaConfig schema, String stmt, RouteResultset rrs, + String charset, LayerCachePool cachePool,int sqlType,ServerConnection sc) throws SQLNonTransientException; /** - * - * @param schema - * @param sqlType - * @param stmt - * @param rrs - * @return - * @throws SQLSyntaxErrorException + * 路由信息指令, 如 SHOW、SELECT@@、DESCRIBE */ - public abstract RouteResultset routeSystemInfo(SchemaConfig schema,int sqlType,String stmt,RouteResultset rrs) throws SQLSyntaxErrorException; + public abstract RouteResultset routeSystemInfo(SchemaConfig schema, int sqlType, String stmt, RouteResultset rrs) + throws SQLSyntaxErrorException; /** - * show 之类的语句 - * @param schema - * @param rrs - * @param stmt - * @return - * @throws SQLSyntaxErrorException + * 解析 Show 之类的语句 */ - public abstract RouteResultset analyseShowSQL(SchemaConfig schema,RouteResultset rrs, String stmt) throws SQLNonTransientException; + public abstract RouteResultset analyseShowSQL(SchemaConfig schema, RouteResultset rrs, String stmt) + throws SQLNonTransientException; } diff --git a/src/main/java/io/mycat/route/impl/DruidMycatRouteStrategy.java b/src/main/java/io/mycat/route/impl/DruidMycatRouteStrategy.java index 828b1b07c..ba33f1e0c 100644 --- a/src/main/java/io/mycat/route/impl/DruidMycatRouteStrategy.java +++ b/src/main/java/io/mycat/route/impl/DruidMycatRouteStrategy.java @@ -1,46 +1,110 @@ package io.mycat.route.impl; +import java.sql.SQLNonTransientException; +import java.sql.SQLSyntaxErrorException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.alibaba.druid.sql.SQLUtils; +import com.alibaba.druid.sql.ast.SQLObject; import com.alibaba.druid.sql.ast.SQLStatement; +import com.alibaba.druid.sql.ast.expr.SQLAllExpr; +import com.alibaba.druid.sql.ast.expr.SQLBinaryOpExpr; +import com.alibaba.druid.sql.ast.expr.SQLExistsExpr; +import com.alibaba.druid.sql.ast.expr.SQLIdentifierExpr; +import com.alibaba.druid.sql.ast.expr.SQLInSubQueryExpr; +import com.alibaba.druid.sql.ast.expr.SQLQueryExpr; +import com.alibaba.druid.sql.ast.statement.SQLDeleteStatement; +import com.alibaba.druid.sql.ast.statement.SQLExprTableSource; +import com.alibaba.druid.sql.ast.statement.SQLInsertStatement; +import com.alibaba.druid.sql.ast.statement.SQLSelect; +import com.alibaba.druid.sql.ast.statement.SQLSelectQuery; import com.alibaba.druid.sql.ast.statement.SQLSelectStatement; +import com.alibaba.druid.sql.ast.statement.SQLTableSource; +import com.alibaba.druid.sql.ast.statement.SQLUpdateStatement; +import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlInsertStatement; import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlReplaceStatement; +import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlSelectQueryBlock; import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser; import com.alibaba.druid.sql.parser.SQLStatementParser; +import com.alibaba.druid.stat.TableStat.Relationship; import com.google.common.base.Strings; + +import io.mycat.MycatServer; +import io.mycat.backend.mysql.nio.handler.MiddlerQueryResultHandler; +import io.mycat.backend.mysql.nio.handler.MiddlerResultHandler; +import io.mycat.backend.mysql.nio.handler.SecondHandler; import io.mycat.cache.LayerCachePool; +import io.mycat.config.ErrorCode; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.TableConfig; +import io.mycat.config.model.rule.RuleConfig; import io.mycat.route.RouteResultset; import io.mycat.route.RouteResultsetNode; -import io.mycat.route.parser.druid.*; +import io.mycat.route.function.SlotFunction; +import io.mycat.route.impl.middlerResultStrategy.BinaryOpResultHandler; +import io.mycat.route.impl.middlerResultStrategy.InSubQueryResultHandler; +import io.mycat.route.impl.middlerResultStrategy.RouteMiddlerReaultHandler; +import io.mycat.route.impl.middlerResultStrategy.SQLAllResultHandler; +import io.mycat.route.impl.middlerResultStrategy.SQLExistsResultHandler; +import io.mycat.route.impl.middlerResultStrategy.SQLQueryResultHandler; +import io.mycat.route.parser.druid.DruidParser; +import io.mycat.route.parser.druid.DruidParserFactory; +import io.mycat.route.parser.druid.DruidShardingParseInfo; +import io.mycat.route.parser.druid.MycatSchemaStatVisitor; +import io.mycat.route.parser.druid.MycatStatementParser; +import io.mycat.route.parser.druid.RouteCalculateUnit; +import io.mycat.route.parser.util.ParseUtil; import io.mycat.route.util.RouterUtil; -import io.mycat.server.config.node.SchemaConfig; +import io.mycat.server.NonBlockingSession; +import io.mycat.server.ServerConnection; import io.mycat.server.parser.ServerParse; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.sql.SQLNonTransientException; -import java.sql.SQLSyntaxErrorException; -import java.util.Iterator; -import java.util.SortedSet; -import java.util.TreeSet; public class DruidMycatRouteStrategy extends AbstractRouteStrategy { - private static final Logger LOGGER = LoggerFactory.getLogger(DruidMycatRouteStrategy.class); + + public static final Logger LOGGER = LoggerFactory.getLogger(DruidMycatRouteStrategy.class); + + private static Map,RouteMiddlerReaultHandler> middlerResultHandler = new HashMap<>(); + + static{ + middlerResultHandler.put(SQLQueryExpr.class, new SQLQueryResultHandler()); + middlerResultHandler.put(SQLBinaryOpExpr.class, new BinaryOpResultHandler()); + middlerResultHandler.put(SQLInSubQueryExpr.class, new InSubQueryResultHandler()); + middlerResultHandler.put(SQLExistsExpr.class, new SQLExistsResultHandler()); + middlerResultHandler.put(SQLAllExpr.class, new SQLAllResultHandler()); + } + @Override public RouteResultset routeNormalSqlWithAST(SchemaConfig schema, - String stmt, RouteResultset rrs, String charset, - LayerCachePool cachePool) throws SQLNonTransientException { - SQLStatementParser parser =null; - if(schema.isNeedSupportMultiDBType()) - { + String stmt, RouteResultset rrs,String charset, + LayerCachePool cachePool,int sqlType,ServerConnection sc) throws SQLNonTransientException { + + /** + * 只有mysql时只支持mysql语法 + */ + SQLStatementParser parser = null; + if (schema.isNeedSupportMultiDBType()) { parser = new MycatStatementParser(stmt); - } else - { - parser = new MySqlStatementParser(stmt); //只有mysql时只支持mysql语法 + } else { + parser = new MySqlStatementParser(stmt); } MycatSchemaStatVisitor visitor = null; SQLStatement statement; - //解析出现问题统一抛SQL语法错误 + + /** + * 解析出现问题统一抛SQL语法错误 + */ try { statement = parser.parseStatement(); visitor = new MycatSchemaStatVisitor(); @@ -49,22 +113,292 @@ public RouteResultset routeNormalSqlWithAST(SchemaConfig schema, throw new SQLSyntaxErrorException(t); } - //检验unsupported statement + /** + * 检验unsupported statement + */ checkUnSupportedStatement(statement); - - DruidParser druidParser = DruidParserFactory.create(schema,statement,visitor); + DruidParser druidParser = DruidParserFactory.create(schema, statement, visitor); druidParser.parser(schema, rrs, statement, stmt,cachePool,visitor); + DruidShardingParseInfo ctx= druidParser.getCtx() ; + rrs.setTables(ctx.getTables()); + + if(visitor.isSubqueryRelationOr()){ + String err = "In subQuery,the or condition is not supported."; + LOGGER.error(err); + throw new SQLSyntaxErrorException(err); + } + + /* 按照以下情况路由 + 1.2.1 可以直接路由. + 1.2.2 两个表夸库join的sql.调用calat + 1.2.3 需要先执行subquery 的sql.把subquery拆分出来.获取结果后,与outerquery + */ + + //add huangyiming 分片规则不一样的且表中带查询条件的则走Catlet + List tables = ctx.getTables(); + SchemaConfig schemaConf = MycatServer.getInstance().getConfig().getSchemas().get(schema.getName()); + int index = 0; + RuleConfig firstRule = null; + boolean directRoute = true; + Set firstDataNodes = new HashSet(); + Map tconfigs = schemaConf==null?null:schemaConf.getTables(); + + Map rulemap = new HashMap<>(); + if(tconfigs!=null){ + for(String tableName : tables){ + TableConfig tc = tconfigs.get(tableName); + if(tc == null){ + //add 别名中取 + Map tableAliasMap = ctx.getTableAliasMap(); + if(tableAliasMap !=null && tableAliasMap.get(tableName) !=null){ + tc = schemaConf.getTables().get(tableAliasMap.get(tableName)); + } + } - //DruidParser解析过程中已完成了路由的直接返回 - if(rrs.isFinishedRoute()) { + if(index == 0){ + if(tc !=null){ + firstRule= tc.getRule(); + //没有指定分片规则时,不做处理 + if(firstRule==null){ + continue; + } + firstDataNodes.addAll(tc.getDataNodes()); + rulemap.put(tc.getName(), firstRule); + } + }else{ + if(tc !=null){ + //ER关系表的时候是可能存在字表中没有tablerule的情况,所以加上判断 + RuleConfig ruleCfg = tc.getRule(); + if(ruleCfg==null){ //没有指定分片规则时,不做处理 + continue; + } + Set dataNodes = new HashSet(); + dataNodes.addAll(tc.getDataNodes()); + rulemap.put(tc.getName(), ruleCfg); + //如果匹配规则不相同或者分片的datanode不相同则需要走子查询处理 + if(firstRule!=null&&((ruleCfg !=null && !ruleCfg.getRuleAlgorithm().equals(firstRule.getRuleAlgorithm()) )||( !dataNodes.equals(firstDataNodes)))){ + directRoute = false; + break; + } + } + } + index++; + } + } + + RouteResultset rrsResult = rrs; + if(directRoute){ //直接路由 + if(!RouterUtil.isAllGlobalTable(ctx, schemaConf)){ + if(rulemap.size()>1&&!checkRuleField(rulemap,visitor)){ + String err = "In case of slice table,there is no rule field in the relationship condition!"; + LOGGER.error(err); + throw new SQLSyntaxErrorException(err); + } + } + rrsResult = directRoute(rrs,ctx,schema,druidParser,statement,cachePool); + }else{ + int subQuerySize = visitor.getSubQuerys().size(); + if(subQuerySize==0&&ctx.getTables().size()==2){ //两表关联,考虑使用catlet + if(!visitor.getRelationships().isEmpty()){ + rrs.setCacheAble(false); + rrs.setFinishedRoute(true); + rrsResult = catletRoute(schema,ctx.getSql(),charset,sc); + }else{ + rrsResult = directRoute(rrs,ctx,schema,druidParser,statement,cachePool); + } + }else if(subQuerySize==1){ //只涉及一张表的子查询,使用 MiddlerResultHandler 获取中间结果后,改写原有 sql 继续执行 TODO 后期可能会考虑多个子查询的情况. + SQLSelect sqlselect = visitor.getSubQuerys().iterator().next(); + if(!visitor.getRelationships().isEmpty()){ // 当 inner query 和 outer query 有关联条件时,暂不支持 + String err = "In case of slice table,sql have different rules,the relationship condition is not supported."; + LOGGER.error(err); + throw new SQLSyntaxErrorException(err); + }else{ + SQLSelectQuery sqlSelectQuery = sqlselect.getQuery(); + if(((MySqlSelectQueryBlock)sqlSelectQuery).getFrom() instanceof SQLExprTableSource) { + rrs.setCacheAble(false); + rrs.setFinishedRoute(true); + rrsResult = middlerResultRoute(schema,charset,sqlselect,sqlType,statement,sc); + } + } + }else if(subQuerySize >=2){ + String err = "In case of slice table,sql has different rules,currently only one subQuery is supported."; + LOGGER.error(err); + throw new SQLSyntaxErrorException(err); + } + } + return rrsResult; + } + + /** + * 子查询中存在关联查询的情况下,检查关联字段是否是分片字段 + * @param rulemap + * @param ships + * @return + */ + private boolean checkRuleField(Map rulemap,MycatSchemaStatVisitor visitor){ + + if(!MycatServer.getInstance().getConfig().getSystem().isSubqueryRelationshipCheck()){ + return true; + } + + Set ships = visitor.getRelationships(); + Iterator iter = ships.iterator(); + while(iter.hasNext()){ + Relationship ship = iter.next(); + String lefttable = ship.getLeft().getTable().toUpperCase(); + String righttable = ship.getRight().getTable().toUpperCase(); + // 如果是同一个表中的关联条件,不做处理 + if(lefttable.equals(righttable)){ + return true; + } + RuleConfig leftconfig = rulemap.get(lefttable); + RuleConfig rightconfig = rulemap.get(righttable); + + if(null!=leftconfig&&null!=rightconfig + &&leftconfig.equals(rightconfig) + &&leftconfig.getColumn().equals(ship.getLeft().getName().toUpperCase()) + &&rightconfig.getColumn().equals(ship.getRight().getName().toUpperCase())){ + return true; + } + } + return false; + } + + private RouteResultset middlerResultRoute(final SchemaConfig schema,final String charset,final SQLSelect sqlselect, + final int sqlType,final SQLStatement statement,final ServerConnection sc){ + + final String middlesql = SQLUtils.toMySqlString(sqlselect); + + MiddlerResultHandler middlerResultHandler = new MiddlerQueryResultHandler<>(new SecondHandler() { + @Override + public void doExecute(List param) { + sc.getSession2().setMiddlerResultHandler(null); + String sqls = null; + // 路由计算 + RouteResultset rrs = null; + try { + + sqls = buildSql(statement,sqlselect,param); + rrs = MycatServer + .getInstance() + .getRouterservice() + .route(MycatServer.getInstance().getConfig().getSystem(), + schema, sqlType,sqls.toLowerCase(), charset,sc ); + + } catch (Exception e) { + StringBuilder s = new StringBuilder(); + LOGGER.warn(s.append(this).append(sqls).toString() + " err:" + e.toString(),e); + String msg = e.getMessage(); + sc.writeErrMessage(ErrorCode.ER_PARSE_ERROR, msg == null ? e.getClass().getSimpleName() : msg); + return; + } + NonBlockingSession noBlockSession = new NonBlockingSession(sc.getSession2().getSource()); + noBlockSession.setMiddlerResultHandler(null); + //session的预编译标示传递 + noBlockSession.setPrepared(sc.getSession2().isPrepared()); + if (rrs != null) { + noBlockSession.setCanClose(false); + noBlockSession.execute(rrs, ServerParse.SELECT); + } + } + } ); + sc.getSession2().setMiddlerResultHandler(middlerResultHandler); + sc.getSession2().setCanClose(false); + + // 路由计算 + RouteResultset rrs = null; + try { + rrs = MycatServer + .getInstance() + .getRouterservice() + .route(MycatServer.getInstance().getConfig().getSystem(), + schema, ServerParse.SELECT, middlesql, charset, sc); + + } catch (Exception e) { + StringBuilder s = new StringBuilder(); + LOGGER.warn(s.append(this).append(middlesql).toString() + " err:" + e.toString(),e); + String msg = e.getMessage(); + sc.writeErrMessage(ErrorCode.ER_PARSE_ERROR, msg == null ? e.getClass().getSimpleName() : msg); + return null; + } + + if(rrs!=null){ + rrs.setCacheAble(false); + } + return rrs; + } + + /** + * 获取子查询执行结果后,改写原始sql 继续执行. + * @param statement + * @param sqlselect + * @param param + * @return + */ + private String buildSql(SQLStatement statement,SQLSelect sqlselect,List param){ + + SQLObject parent = sqlselect.getParent(); + RouteMiddlerReaultHandler handler = middlerResultHandler.get(parent.getClass()); + if(handler==null){ + throw new UnsupportedOperationException(parent.getClass()+" current is not supported "); + } + return handler.dohandler(statement, sqlselect, parent, param); + } + + /** + * 两个表的情况,catlet + * @param schema + * @param stmt + * @param charset + * @param sc + * @return + */ + private RouteResultset catletRoute(SchemaConfig schema,String stmt,String charset,ServerConnection sc){ + RouteResultset rrs = null; + try { + rrs = MycatServer + .getInstance() + .getRouterservice() + .route(MycatServer.getInstance().getConfig().getSystem(), + schema, ServerParse.SELECT, "/*!mycat:catlet=io.mycat.catlets.ShareJoin */ "+stmt, charset, sc); + + }catch(Exception e){ + + } + return rrs; + } + + /** + * 直接结果路由 + * @param rrs + * @param ctx + * @param schema + * @param druidParser + * @param statement + * @param cachePool + * @return + * @throws SQLNonTransientException + */ + private RouteResultset directRoute(RouteResultset rrs,DruidShardingParseInfo ctx,SchemaConfig schema, + DruidParser druidParser,SQLStatement statement,LayerCachePool cachePool) throws SQLNonTransientException{ + + //改写sql:如insert语句主键自增长, 在直接结果路由的情况下,进行sql 改写处理 + druidParser.changeSql(schema, rrs, statement,cachePool); + + /** + * DruidParser 解析过程中已完成了路由的直接返回 + */ + if ( rrs.isFinishedRoute() ) { return rrs; } -// rrs.setStatement(druidParser.getCtx().getSql()); - //没有from的的select语句或其他 - if(druidParser.getCtx().getTables().size() == 0) { - return RouterUtil.routeToSingleNode(rrs, schema.getRandomDataNode(),druidParser.getCtx().getSql()); + /** + * 没有from的select语句或其他 + */ + if((ctx.getTables() == null || ctx.getTables().size() == 0)&&(ctx.getTableAliasMap()==null||ctx.getTableAliasMap().isEmpty())) + { + return RouterUtil.routeToSingleNode(rrs, schema.getRandomDataNode(), druidParser.getCtx().getSql()); } if(druidParser.getCtx().getRouteCalculateUnits().size() == 0) { @@ -73,31 +407,94 @@ public RouteResultset routeNormalSqlWithAST(SchemaConfig schema, } SortedSet nodeSet = new TreeSet(); - for(RouteCalculateUnit unit : druidParser.getCtx().getRouteCalculateUnits()) { + boolean isAllGlobalTable = RouterUtil.isAllGlobalTable(ctx, schema); + for(RouteCalculateUnit unit: druidParser.getCtx().getRouteCalculateUnits()) { RouteResultset rrsTmp = RouterUtil.tryRouteForTables(schema, druidParser.getCtx(), unit, rrs, isSelect(statement), cachePool); - if(rrsTmp != null) { + if(rrsTmp != null&&rrsTmp.getNodes()!=null) { for(RouteResultsetNode node :rrsTmp.getNodes()) { nodeSet.add(node); } } + if(isAllGlobalTable) {//都是全局表时只计算一遍路由 + break; + } } RouteResultsetNode[] nodes = new RouteResultsetNode[nodeSet.size()]; int i = 0; - for (Iterator iterator = nodeSet.iterator(); iterator.hasNext();) { - nodes[i] = (RouteResultsetNode) iterator.next(); + for (RouteResultsetNode aNodeSet : nodeSet) { + nodes[i] = aNodeSet; + if(statement instanceof MySqlInsertStatement &&ctx.getTables().size()==1&&schema.getTables().containsKey(ctx.getTables().get(0))) { + RuleConfig rule = schema.getTables().get(ctx.getTables().get(0)).getRule(); + if(rule!=null&& rule.getRuleAlgorithm() instanceof SlotFunction){ + aNodeSet.setStatement(ParseUtil.changeInsertAddSlot(aNodeSet.getStatement(),aNodeSet.getSlot())); + } + } i++; - + } + rrs.setNodes(nodes); + + //分表 + /** + * subTables="t_order$1-2,t_order3" + *目前分表 1.6 开始支持 幵丏 dataNode 在分表条件下只能配置一个,分表条件下不支持join。 + */ + if(rrs.isDistTable()){ + return this.routeDisTable(statement,rrs); + } + return rrs; + } + + private SQLExprTableSource getDisTable(SQLTableSource tableSource,RouteResultsetNode node) throws SQLSyntaxErrorException{ + if(node.getSubTableName()==null){ + String msg = " sub table not exists for " + node.getName() + " on " + tableSource; + LOGGER.error("DruidMycatRouteStrategyError " + msg); + throw new SQLSyntaxErrorException(msg); } - rrs.setNodes(nodes); + SQLIdentifierExpr sqlIdentifierExpr = new SQLIdentifierExpr(); + sqlIdentifierExpr.setParent(tableSource.getParent()); + sqlIdentifierExpr.setName(node.getSubTableName()); + SQLExprTableSource from2 = new SQLExprTableSource(sqlIdentifierExpr); + return from2; + } + + private RouteResultset routeDisTable(SQLStatement statement, RouteResultset rrs) throws SQLSyntaxErrorException{ + SQLTableSource tableSource = null; + if(statement instanceof SQLInsertStatement) { + SQLInsertStatement insertStatement = (SQLInsertStatement) statement; + tableSource = insertStatement.getTableSource(); + for (RouteResultsetNode node : rrs.getNodes()) { + SQLExprTableSource from2 = getDisTable(tableSource, node); + insertStatement.setTableSource(from2); + node.setStatement(insertStatement.toString()); + } + } + if(statement instanceof SQLDeleteStatement) { + SQLDeleteStatement deleteStatement = (SQLDeleteStatement) statement; + tableSource = deleteStatement.getTableSource(); + for (RouteResultsetNode node : rrs.getNodes()) { + SQLExprTableSource from2 = getDisTable(tableSource, node); + deleteStatement.setTableSource(from2); + node.setStatement(deleteStatement.toString()); + } + } + if(statement instanceof SQLUpdateStatement) { + SQLUpdateStatement updateStatement = (SQLUpdateStatement) statement; + tableSource = updateStatement.getTableSource(); + for (RouteResultsetNode node : rrs.getNodes()) { + SQLExprTableSource from2 = getDisTable(tableSource, node); + updateStatement.setTableSource(from2); + node.setStatement(updateStatement.toString()); + } + } - return rrs; } - - + /** + * SELECT 语句 + */ private boolean isSelect(SQLStatement statement) { if(statement instanceof SQLSelectStatement) { return true; @@ -118,11 +515,12 @@ private void checkUnSupportedStatement(SQLStatement statement) throws SQLSyntaxE } /** - * + * 分析 SHOW SQL */ @Override public RouteResultset analyseShowSQL(SchemaConfig schema, RouteResultset rrs, String stmt) throws SQLSyntaxErrorException { + String upStmt = stmt.toUpperCase(); int tabInd = upStmt.indexOf(" TABLES"); if (tabInd > 0) {// show tables @@ -135,32 +533,41 @@ public RouteResultset analyseShowSQL(SchemaConfig schema, stmt = "SHOW TABLES" + stmt.substring(end); } } - String defaultNode= schema.getDataNode(); + String defaultNode= schema.getDataNode(); if(!Strings.isNullOrEmpty(defaultNode)) { - return RouterUtil.routeToSingleNode(rrs, defaultNode, stmt); + return RouterUtil.routeToSingleNode(rrs, defaultNode, stmt); } return RouterUtil.routeToMultiNode(false, rrs, schema.getMetaDataNodes(), stmt); } - // show index or column + + /** + * show index or column + */ int[] indx = RouterUtil.getSpecPos(upStmt, 0); if (indx[0] > 0) { - // has table + /** + * has table + */ int[] repPos = { indx[0] + indx[1], 0 }; String tableName = RouterUtil.getShowTableName(stmt, repPos); - // IN DB pattern + /** + * IN DB pattern + */ int[] indx2 = RouterUtil.getSpecPos(upStmt, indx[0] + indx[1] + 1); if (indx2[0] > 0) {// find LIKE OR WHERE repPos[1] = RouterUtil.getSpecEndPos(upStmt, indx2[0] + indx2[1]); } - stmt = stmt.substring(0, indx[0]) + " FROM " + tableName - + stmt.substring(repPos[1]); + stmt = stmt.substring(0, indx[0]) + " FROM " + tableName + stmt.substring(repPos[1]); RouterUtil.routeForTableMeta(rrs, schema, tableName, stmt); return rrs; } - // show create table tableName + + /** + * show create table tableName + */ int[] createTabInd = RouterUtil.getCreateTablePos(upStmt, 0); if (createTabInd[0] > 0) { int tableNameIndex = createTabInd[0] + createTabInd[1]; @@ -179,11 +586,6 @@ public RouteResultset analyseShowSQL(SchemaConfig schema, } - - - - - // /** // * 为一个表进行条件路由 // * @param schema @@ -268,7 +670,8 @@ public RouteResultset routeSystemInfo(SchemaConfig schema, int sqlType, case ServerParse.SHOW:// if origSQL is like show tables return analyseShowSQL(schema, rrs, stmt); case ServerParse.SELECT://if origSQL is like select @@ - if(stmt.contains("@@")){ + int index = stmt.indexOf("@@"); + if(index > 0 && "SELECT".equals(stmt.substring(0, index).trim().toUpperCase())){ return analyseDoubleAtSgin(schema, rrs, stmt); } break; @@ -282,20 +685,17 @@ public RouteResultset routeSystemInfo(SchemaConfig schema, int sqlType, /** * 对Desc语句进行分析 返回数据路由集合 - * - * @param schema - * 数据库名 - * @param rrs - * 数据路由集合 - * @param stmt - * 执行语句 - * @param ind - * 第一个' '的位置 - * @return RouteResultset(数据路由集合) + * * + * @param schema 数据库名 + * @param rrs 数据路由集合 + * @param stmt 执行语句 + * @param ind 第一个' '的位置 + * @return RouteResultset (数据路由集合) * @author mycat */ private static RouteResultset analyseDescrSQL(SchemaConfig schema, RouteResultset rrs, String stmt, int ind) { + final String MATCHED_FEATURE = "DESCRIBE "; final String MATCHED2_FEATURE = "DESC "; int pos = 0; @@ -325,8 +725,7 @@ private static RouteResultset analyseDescrSQL(SchemaConfig schema, } // 重置ind坐标。BEN GONG - ind = pos; - + ind = pos; int[] repPos = { ind, 0 }; String tableName = RouterUtil.getTableName(stmt, repPos); @@ -338,26 +737,20 @@ private static RouteResultset analyseDescrSQL(SchemaConfig schema, /** * 根据执行语句判断数据路由 * - * @param schema - * 数据库名 - * @param rrs - * 数据路由集合 - * @param stmt - * 执行sql - * @return RouteResultset数据路由集合 + * @param schema 数据库名 + * @param rrs 数据路由集合 + * @param stmt 执行sql + * @return RouteResultset 数据路由集合 * @throws SQLSyntaxErrorException * @author mycat */ private RouteResultset analyseDoubleAtSgin(SchemaConfig schema, - RouteResultset rrs, String stmt) throws SQLSyntaxErrorException { + RouteResultset rrs, String stmt) throws SQLSyntaxErrorException { String upStmt = stmt.toUpperCase(); - int atSginInd = upStmt.indexOf(" @@"); if (atSginInd > 0) { - return RouterUtil.routeToMultiNode(false, rrs, - schema.getMetaDataNodes(), stmt); + return RouterUtil.routeToMultiNode(false, rrs, schema.getMetaDataNodes(), stmt); } - return RouterUtil.routeToSingleNode(rrs, schema.getRandomDataNode(), stmt); } } \ No newline at end of file diff --git a/src/main/java/io/mycat/route/impl/middlerResultStrategy/BinaryOpResultHandler.java b/src/main/java/io/mycat/route/impl/middlerResultStrategy/BinaryOpResultHandler.java new file mode 100644 index 000000000..602b69e26 --- /dev/null +++ b/src/main/java/io/mycat/route/impl/middlerResultStrategy/BinaryOpResultHandler.java @@ -0,0 +1,81 @@ +package io.mycat.route.impl.middlerResultStrategy; + +import java.util.List; + +import com.alibaba.druid.sql.ast.SQLExprImpl; +import com.alibaba.druid.sql.ast.SQLObject; +import com.alibaba.druid.sql.ast.SQLStatement; +import com.alibaba.druid.sql.ast.expr.SQLBinaryOpExpr; +import com.alibaba.druid.sql.ast.expr.SQLInListExpr; +import com.alibaba.druid.sql.ast.expr.SQLInSubQueryExpr; +import com.alibaba.druid.sql.ast.expr.SQLListExpr; +import com.alibaba.druid.sql.ast.expr.SQLNullExpr; +import com.alibaba.druid.sql.ast.expr.SQLQueryExpr; +import com.alibaba.druid.sql.ast.statement.SQLSelect; + +public class BinaryOpResultHandler implements RouteMiddlerReaultHandler { + + @Override + public String dohandler(SQLStatement statement, SQLSelect sqlselect, SQLObject parent, List param) { + + SQLBinaryOpExpr pp = (SQLBinaryOpExpr)parent; + if(pp.getLeft() instanceof SQLQueryExpr){ + SQLQueryExpr left = (SQLQueryExpr)pp.getLeft(); + if(left.getSubQuery().equals(sqlselect)){ + SQLExprImpl listExpr = null; + if(null==param||param.isEmpty()){ + listExpr = new SQLNullExpr(); + }else{ + listExpr = new SQLListExpr(); + listExpr.setParent(left.getParent()); + ((SQLListExpr)listExpr).getItems().addAll(param); + } + pp.setLeft(listExpr); + } + }else if(pp.getRight() instanceof SQLQueryExpr){ + SQLQueryExpr right = (SQLQueryExpr)pp.getRight(); + if(right.getSubQuery().equals(sqlselect)){ + SQLExprImpl listExpr = null; + if(null==param||param.isEmpty()){ + listExpr = new SQLNullExpr(); + }else{ + listExpr = new SQLListExpr(); + listExpr.setParent(right.getParent()); + ((SQLListExpr)listExpr).getItems().addAll(param); + } + pp.setRight(listExpr); + + } + }else if(pp.getLeft() instanceof SQLInSubQueryExpr){ + SQLInSubQueryExpr left = (SQLInSubQueryExpr)pp.getLeft(); + if(left.getSubQuery().equals(sqlselect)){ + SQLExprImpl inlistExpr = null; + if(null==param||param.isEmpty()){ + inlistExpr = new SQLNullExpr(); + }else{ + inlistExpr = new SQLInListExpr(); + ((SQLInListExpr)inlistExpr).setTargetList(param); + ((SQLInListExpr)inlistExpr).setExpr(pp.getRight()); + ((SQLInListExpr)inlistExpr).setNot(left.isNot()); + ((SQLInListExpr)inlistExpr).setParent(left.getParent()); + } + pp.setLeft(inlistExpr); + } + }else if(pp.getRight() instanceof SQLInSubQueryExpr){ + SQLInSubQueryExpr right = (SQLInSubQueryExpr)pp.getRight(); + if(right.getSubQuery().equals(sqlselect)){ + SQLExprImpl listExpr = null; + if(null==param||param.isEmpty()){ + listExpr = new SQLNullExpr(); + }else{ + listExpr = new SQLListExpr(); + ((SQLListExpr)listExpr).getItems().addAll(param); + } + pp.setRight(listExpr); + + } + } + return statement.toString(); + } + +} diff --git a/src/main/java/io/mycat/route/impl/middlerResultStrategy/InSubQueryResultHandler.java b/src/main/java/io/mycat/route/impl/middlerResultStrategy/InSubQueryResultHandler.java new file mode 100644 index 000000000..66501c21a --- /dev/null +++ b/src/main/java/io/mycat/route/impl/middlerResultStrategy/InSubQueryResultHandler.java @@ -0,0 +1,43 @@ +package io.mycat.route.impl.middlerResultStrategy; + +import java.util.List; + +import com.alibaba.druid.sql.ast.SQLExprImpl; +import com.alibaba.druid.sql.ast.SQLObject; +import com.alibaba.druid.sql.ast.SQLStatement; +import com.alibaba.druid.sql.ast.expr.SQLBinaryOpExpr; +import com.alibaba.druid.sql.ast.expr.SQLInListExpr; +import com.alibaba.druid.sql.ast.expr.SQLInSubQueryExpr; +import com.alibaba.druid.sql.ast.expr.SQLNullExpr; +import com.alibaba.druid.sql.ast.statement.SQLSelect; +import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlSelectQueryBlock; + + +public class InSubQueryResultHandler implements RouteMiddlerReaultHandler { + + @Override + public String dohandler(SQLStatement statement,SQLSelect sqlselect,SQLObject parent,List param) { + SQLExprImpl inlistExpr = null; + if(null==param||param.isEmpty()){ + inlistExpr = new SQLNullExpr(); + }else{ + inlistExpr = new SQLInListExpr(); + ((SQLInListExpr)inlistExpr).setTargetList(param); + ((SQLInListExpr)inlistExpr).setExpr(((SQLInSubQueryExpr)parent).getExpr()); + ((SQLInListExpr)inlistExpr).setNot(((SQLInSubQueryExpr)parent).isNot()); + ((SQLInListExpr)inlistExpr).setParent(sqlselect.getParent()); + } + if(parent.getParent() instanceof MySqlSelectQueryBlock){ + ((MySqlSelectQueryBlock)parent.getParent()).setWhere(inlistExpr); + }else if(parent.getParent() instanceof SQLBinaryOpExpr){ + SQLBinaryOpExpr pp = ((SQLBinaryOpExpr)parent.getParent()); + if(pp.getLeft().equals(parent)){ + pp.setLeft(inlistExpr); + }else if(pp.getRight().equals(parent)){ + pp.setRight(inlistExpr); + } + } + return statement.toString(); + } + +} diff --git a/src/main/java/io/mycat/route/impl/middlerResultStrategy/RouteMiddlerReaultHandler.java b/src/main/java/io/mycat/route/impl/middlerResultStrategy/RouteMiddlerReaultHandler.java new file mode 100644 index 000000000..4880bbaef --- /dev/null +++ b/src/main/java/io/mycat/route/impl/middlerResultStrategy/RouteMiddlerReaultHandler.java @@ -0,0 +1,20 @@ +package io.mycat.route.impl.middlerResultStrategy; + +import java.util.List; + +import com.alibaba.druid.sql.ast.SQLObject; +import com.alibaba.druid.sql.ast.SQLStatement; +import com.alibaba.druid.sql.ast.statement.SQLSelect; + +public interface RouteMiddlerReaultHandler { + + /** + * 处理中间结果 + * @param statement + * @param sqlselect + * @param param + * @return + */ + String dohandler(SQLStatement statement,SQLSelect sqlselect,SQLObject parent,List param); + +} diff --git a/src/main/java/io/mycat/route/impl/middlerResultStrategy/SQLAllResultHandler.java b/src/main/java/io/mycat/route/impl/middlerResultStrategy/SQLAllResultHandler.java new file mode 100644 index 000000000..40b2044c9 --- /dev/null +++ b/src/main/java/io/mycat/route/impl/middlerResultStrategy/SQLAllResultHandler.java @@ -0,0 +1,79 @@ +package io.mycat.route.impl.middlerResultStrategy; + +import java.util.List; + +import com.alibaba.druid.sql.ast.SQLExpr; +import com.alibaba.druid.sql.ast.SQLExprImpl; +import com.alibaba.druid.sql.ast.SQLObject; +import com.alibaba.druid.sql.ast.SQLStatement; +import com.alibaba.druid.sql.ast.expr.SQLBinaryOpExpr; +import com.alibaba.druid.sql.ast.expr.SQLBinaryOperator; +import com.alibaba.druid.sql.ast.expr.SQLNullExpr; +import com.alibaba.druid.sql.ast.expr.SQLValuableExpr; +import com.alibaba.druid.sql.ast.statement.SQLSelect; + +/** + * 对于 = + select * from test where id = all (select id from mytab where xxx) ---> + 改写后 sql为 all: select * from test where id = val1 and id = val2 … + * @author lyj + * + */ +public class SQLAllResultHandler implements RouteMiddlerReaultHandler{ + + @Override + public String dohandler(SQLStatement statement, SQLSelect sqlselect, SQLObject parent, List param) { + if(parent.getParent() instanceof SQLBinaryOpExpr){ + + SQLExprImpl inlistExpr = null; + if(null==param||param.isEmpty()){ + inlistExpr = new SQLNullExpr(); + SQLBinaryOpExpr xp = (SQLBinaryOpExpr)parent.getParent(); + xp.setOperator(SQLBinaryOperator.Is); + if(xp.getRight().equals(parent)){ + xp.setRight(inlistExpr); + }else if(xp.getLeft().equals(parent)){ + xp.setLeft(inlistExpr); + } + }else{ + int len = param.size(); + + SQLBinaryOpExpr xp = (SQLBinaryOpExpr)parent.getParent(); + SQLExpr left = null; + if(xp.getRight().equals(parent)){ + left = xp.getLeft(); + }else if(xp.getLeft().equals(parent)){ + left = xp.getRight(); + } + + SQLBinaryOpExpr p = xp; + for(int i=0;i items = pp.getItems(); + for(int i=0;i ++offset && stmt.charAt(offset) == '@') { - if (stmt.length() > offset + "SLOW ".length()) { + if (stmt.length() > ++offset && stmt.charAt(offset) == '@' + &&stmt.length() > offset + "SLOW ".length()) { char c1 = stmt.charAt(++offset); char c2 = stmt.charAt(++offset); char c3 = stmt.charAt(++offset); @@ -76,7 +76,6 @@ static int clear2Check(String stmt, int offset) { } } } - } } return OTHER; } diff --git a/src/main/java/io/mycat/route/parser/ManagerParseHeartbeat.java b/src/main/java/io/mycat/route/parser/ManagerParseHeartbeat.java new file mode 100644 index 000000000..4453175e0 --- /dev/null +++ b/src/main/java/io/mycat/route/parser/ManagerParseHeartbeat.java @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.route.parser; + +import io.mycat.route.parser.util.Pair; + +/** + * @author songwie + */ +public final class ManagerParseHeartbeat { + + public static final int OTHER = -1; + public static final int DATASOURCE = 1; + + // SHOW @@HEARTBEAT + static int show2HeaCheck(String stmt, int offset) { + if (stmt.length() > offset + "RTBEAT".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + char c6 = stmt.charAt(++offset); + if ((c1 == 'R' || c1 == 'r') && (c2 == 'T' || c2 == 't') & (c3 == 'B' || c3 == 'b') + && (c4 == 'E' || c4 == 'e') & (c5 == 'A' || c5 == 'a') && (c6 == 'T' || c6 == 't')) { + if (stmt.length() > offset + ".DETAIL".length()) { + char c7 = stmt.charAt(++offset); + if(c7 == '.'){ + return show2HeaDetailCheck(stmt,offset); + } + } + if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { + return OTHER; + } + return ManagerParseShow.HEARTBEAT; + } + } + return OTHER; + } + // SHOW @@HEARTBEAT.DETAIL + static int show2HeaDetailCheck(String stmt, int offset) { + if (stmt.length() > offset + "DETAIL".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + char c6 = stmt.charAt(++offset); + if ((c1 == 'D' || c1 == 'd') && (c2 == 'E' || c2 == 'e') & (c3 == 'T' || c3 == 't') + && (c4 == 'A' || c4 == 'a') & (c5 == 'I' || c5 == 'i') && (c6 == 'L' || c6 == 'l')) { + if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { + return OTHER; + } + return ManagerParseShow.HEARTBEAT_DETAIL; + } + } + return OTHER; + } + + public static Pair getPair(String stmt) { + int offset = stmt.indexOf("@@"); + String s = stmt.substring(++offset + " heartbeat.detail".length()); + char c = s.charAt(0); + offset = 0; + if(c == ' '){ + char c1 = s.charAt(++offset); + char c2 = s.charAt(++offset); + char c3 = s.charAt(++offset); + char c4 = s.charAt(++offset); + char c5 = s.charAt(++offset); + char c6 = s.charAt(++offset); + char c7 = s.charAt(++offset); + char c8 = s.charAt(++offset); + char c9 = s.charAt(++offset); + char c10 = s.charAt(++offset); + char c11 = s.charAt(++offset); + if ((c1 == 'W' || c1 == 'w') && (c2 == 'H' || c2 == 'h') && (c3 == 'E' || c3 == 'e') + && (c4 == 'R' || c4 == 'r') && (c5 == 'E' || c5 == 'e') + && c6 == ' ' && (c7 == 'N' || c7 == 'n') && (c8 == 'A' || c8 == 'a') && (c9 == 'M' || c9 == 'm') + && (c10 == 'E' || c10 == 'e') && (c11 == '=')) { + String name = s.substring(++offset).trim(); + return new Pair("name", name); + } + } + return new Pair("name", ""); + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/parser/ManagerParseKill.java b/src/main/java/io/mycat/route/parser/ManagerParseKill.java similarity index 95% rename from src/main/java/io/mycat/server/parser/ManagerParseKill.java rename to src/main/java/io/mycat/route/parser/ManagerParseKill.java index 9d031e8ea..91b636b7f 100644 --- a/src/main/java/io/mycat/server/parser/ManagerParseKill.java +++ b/src/main/java/io/mycat/route/parser/ManagerParseKill.java @@ -21,9 +21,9 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.parser; +package io.mycat.route.parser; -import io.mycat.util.ParseUtil; +import io.mycat.route.parser.util.ParseUtil; /** * @author mycat @@ -54,8 +54,8 @@ public static int parse(String stmt, int offset) { // KILL @@CONNECTION static int kill2Check(String stmt, int offset) { - if (stmt.length() > ++offset && stmt.charAt(offset) == '@') { - if (stmt.length() > offset + 10) { + if (stmt.length() > ++offset && stmt.charAt(offset) == '@' + && stmt.length() > offset + 10) { char c1 = stmt.charAt(++offset); char c2 = stmt.charAt(++offset); char c3 = stmt.charAt(++offset); @@ -75,7 +75,6 @@ static int kill2Check(String stmt, int offset) { } return CONNECTION; } - } } return OTHER; } diff --git a/src/main/java/io/mycat/server/parser/ManagerParseReload.java b/src/main/java/io/mycat/route/parser/ManagerParseReload.java similarity index 61% rename from src/main/java/io/mycat/server/parser/ManagerParseReload.java rename to src/main/java/io/mycat/route/parser/ManagerParseReload.java index d9630d9d3..605d8880e 100644 --- a/src/main/java/io/mycat/server/parser/ManagerParseReload.java +++ b/src/main/java/io/mycat/route/parser/ManagerParseReload.java @@ -1,144 +1,211 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.server.parser; - -import io.mycat.util.ParseUtil; - -/** - * @author mycat - */ -public final class ManagerParseReload { - - public static final int OTHER = -1; - public static final int CONFIG = 1; - public static final int ROUTE = 2; - public static final int USER = 3; - public static final int CONFIG_ALL = 4; - - public static int parse(String stmt, int offset) { - int i = offset; - for (; i < stmt.length(); i++) { - switch (stmt.charAt(i)) { - case ' ': - continue; - case '/': - case '#': - i = ParseUtil.comment(stmt, i); - continue; - case '@': - return reload2Check(stmt, i); - default: - return OTHER; - } - } - return OTHER; - } - - static int reload2Check(String stmt, int offset) { - if (stmt.length() > ++offset && stmt.charAt(offset) == '@') { - if (stmt.length() > ++offset) { - switch (stmt.charAt(offset)) { - case 'C': - case 'c': - return reload2CCheck(stmt, offset); - case 'R': - case 'r': - return reload2RCheck(stmt, offset); - case 'U': - case 'u': - return reload2UCheck(stmt, offset); - default: - return OTHER; - } - } - } - return OTHER; - } - - // RELOAD @@CONFIG - static int reload2CCheck(String stmt, int offset) { - if (stmt.length() > offset + 5) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - char c5 = stmt.charAt(++offset); - if ((c1 == 'O' || c1 == 'o') && (c2 == 'N' || c2 == 'n') && (c3 == 'F' || c3 == 'f') - && (c4 == 'I' || c4 == 'i') && (c5 == 'G' || c5 == 'g')) { - if (stmt.length() > offset + 4) - { - char c6 = stmt.charAt(++offset); - char c7 = stmt.charAt(++offset); - char c8 = stmt.charAt(++offset); - char c9 = stmt.charAt(++offset); - if ((c6 == '_' || c6 == '-') && (c7 == 'A' || c7 == 'a') && (c8 == 'L' || c8 == 'l') - && (c9 == 'L' || c9 == 'l') ) { - return CONFIG_ALL; - } - } - if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { - return OTHER; - } - - return CONFIG; - } - } - return OTHER; - } - - // RELOAD @@ROUTE - static int reload2RCheck(String stmt, int offset) { - if (stmt.length() > offset + 4) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - if ((c1 == 'O' || c1 == 'o') && (c2 == 'U' || c2 == 'u') && (c3 == 'T' || c3 == 't') - && (c4 == 'E' || c4 == 'e')) { - if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { - return OTHER; - } - return ROUTE; - } - } - return OTHER; - } - - // RELOAD @@USER - static int reload2UCheck(String stmt, int offset) { - if (stmt.length() > offset + 3) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - if ((c1 == 'S' || c1 == 's') && (c2 == 'E' || c2 == 'e') && (c3 == 'R' || c3 == 'r')) { - if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { - return OTHER; - } - return USER; - } - } - return OTHER; - } - +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.route.parser; + +import io.mycat.route.parser.util.ParseUtil; + +/** + * @author mycat + */ +public final class ManagerParseReload { + + public static final int OTHER = -1; + public static final int CONFIG = 1; + public static final int ROUTE = 2; + public static final int USER = 3; + public static final int USER_STAT = 4; + public static final int CONFIG_ALL = 5; + public static final int SQL_SLOW = 6; + public static final int QUERY_CF = 8; + + public static int parse(String stmt, int offset) { + int i = offset; + for (; i < stmt.length(); i++) { + switch (stmt.charAt(i)) { + case ' ': + continue; + case '/': + case '#': + i = ParseUtil.comment(stmt, i); + continue; + case '@': + return reload2Check(stmt, i); + default: + return OTHER; + } + } + return OTHER; + } + + static int reload2Check(String stmt, int offset) { + if (stmt.length() > ++offset && stmt.charAt(offset) == '@' + && stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case 'C': + case 'c': + return reload2CCheck(stmt, offset); + case 'R': + case 'r': + return reload2RCheck(stmt, offset); + case 'U': + case 'u': + return reload2UCheck(stmt, offset); + case 'S': + case 's': + return reload2SCheck(stmt, offset); + case 'Q': + case 'q': + return reload2QCheck(stmt, offset); + default: + return OTHER; + } + } + return OTHER; + } + + // RELOAD @@CONFIG + static int reload2CCheck(String stmt, int offset) { + if (stmt.length() > offset + 5) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + if ((c1 == 'O' || c1 == 'o') && (c2 == 'N' || c2 == 'n') && (c3 == 'F' || c3 == 'f') + && (c4 == 'I' || c4 == 'i') && (c5 == 'G' || c5 == 'g')) { + if (stmt.length() > offset + 4) + { + char c6 = stmt.charAt(++offset); + char c7 = stmt.charAt(++offset); + char c8 = stmt.charAt(++offset); + char c9 = stmt.charAt(++offset); + if ((c6 == '_' || c6 == '-') && (c7 == 'A' || c7 == 'a') && (c8 == 'L' || c8 == 'l') + && (c9 == 'L' || c9 == 'l') ) { + return CONFIG_ALL; + } + } + if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { + return OTHER; + } + + return CONFIG; + } + } + return OTHER; + } + + // RELOAD @@ROUTE + static int reload2RCheck(String stmt, int offset) { + if (stmt.length() > offset + 4) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + if ((c1 == 'O' || c1 == 'o') && (c2 == 'U' || c2 == 'u') && (c3 == 'T' || c3 == 't') + && (c4 == 'E' || c4 == 'e')) { + if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { + return OTHER; + } + return ROUTE; + } + } + return OTHER; + } + + // RELOAD @@USER + static int reload2UCheck(String stmt, int offset) { + if (stmt.length() > offset + 3) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + if ((c1 == 'S' || c1 == 's') && (c2 == 'E' || c2 == 'e') && (c3 == 'R' || c3 == 'r')) { + + + if (stmt.length() > offset + 5) + { + char c6 = stmt.charAt(++offset); + char c7 = stmt.charAt(++offset); + char c8 = stmt.charAt(++offset); + char c9 = stmt.charAt(++offset); + char c10 = stmt.charAt(++offset); + + if ((c6 == '_' || c6 == '-') && (c7 == 'S' || c7 == 's') && (c8 == 'T' || c8 == 't') + && (c9 == 'A' || c9 == 'a') && (c10 == 'T' || c10 == 't') ) { + return USER_STAT; + } + } + + if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { + return OTHER; + } + return USER; + } + } + return OTHER; + } + + // RELOAD @@SQL + static int reload2SCheck(String stmt, int offset) { + if (stmt.length() > offset + 4) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + char c6 = stmt.charAt(++offset); + + // reload @@sqlslow + if ((c1 == 'Q' || c1 == 'q') && (c2 == 'L' || c2 == 'l') && (c3 == 's' || c3 == 'S') + && (c4 == 'L' || c4 == 'l') && (c5 == 'O' || c5 == 'o') && (c6 == 'W' || c6 == 'w') + && stmt.length() > ++offset && stmt.charAt(offset) != ' ') { + return SQL_SLOW ; + } + + return OTHER; + } + return OTHER; + } + + // RELOAD @@QUERY + static int reload2QCheck(String stmt, int offset) { + if (stmt.length() > offset + 4) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + char c6 = stmt.charAt(++offset); + char c7 = stmt.charAt(++offset); + + if ((c1 == 'U' || c1 == 'u') && (c2 == 'E' || c2 == 'e') && (c3 == 'R' || c3 == 'r') + && (c4 == 'Y' || c4 == 'y') && (c5 == '_' ) && (c6 == 'C' || c6 == 'c') && (c7 == 'F' || c7 == 'f') ) { + if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { + return QUERY_CF ; + } + return OTHER; + } + } + return OTHER; + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/parser/ManagerParseRollback.java b/src/main/java/io/mycat/route/parser/ManagerParseRollback.java similarity index 97% rename from src/main/java/io/mycat/server/parser/ManagerParseRollback.java rename to src/main/java/io/mycat/route/parser/ManagerParseRollback.java index 18ca47549..466528965 100644 --- a/src/main/java/io/mycat/server/parser/ManagerParseRollback.java +++ b/src/main/java/io/mycat/route/parser/ManagerParseRollback.java @@ -21,9 +21,9 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.parser; +package io.mycat.route.parser; -import io.mycat.util.ParseUtil; +import io.mycat.route.parser.util.ParseUtil; /** * @author mycat @@ -55,8 +55,8 @@ public static int parse(String stmt, int offset) { } static int rollback2Check(String stmt, int offset) { - if (stmt.length() > ++offset && stmt.charAt(offset) == '@') { - if (stmt.length() > ++offset) { + if (stmt.length() > ++offset && stmt.charAt(offset) == '@' + && stmt.length() > ++offset) { switch (stmt.charAt(offset)) { case 'C': case 'c': @@ -70,7 +70,6 @@ static int rollback2Check(String stmt, int offset) { default: return OTHER; } - } } return OTHER; } diff --git a/src/main/java/io/mycat/server/parser/ManagerParseSelect.java b/src/main/java/io/mycat/route/parser/ManagerParseSelect.java similarity index 78% rename from src/main/java/io/mycat/server/parser/ManagerParseSelect.java rename to src/main/java/io/mycat/route/parser/ManagerParseSelect.java index 7b945185f..b76a93c81 100644 --- a/src/main/java/io/mycat/server/parser/ManagerParseSelect.java +++ b/src/main/java/io/mycat/route/parser/ManagerParseSelect.java @@ -21,9 +21,9 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.parser; +package io.mycat.route.parser; -import io.mycat.util.ParseUtil; +import io.mycat.route.parser.util.ParseUtil; /** * @author mycat @@ -33,9 +33,11 @@ public final class ManagerParseSelect { public static final int OTHER = -1; public static final int VERSION_COMMENT = 1; public static final int SESSION_AUTO_INCREMENT = 2; + public static final int SESSION_TX_READ_ONLY = 3; private static final char[] _VERSION_COMMENT = "VERSION_COMMENT".toCharArray(); private static final char[] _SESSION_AUTO_INCREMENT = "SESSION.AUTO_INCREMENT_INCREMENT".toCharArray(); + private static final char[] _SESSION_TX_READ_ONLY = "SESSION.TX_READ_ONLY".toCharArray(); public static int parse(String stmt, int offset) { int i = offset; @@ -57,8 +59,8 @@ public static int parse(String stmt, int offset) { } static int select2Check(String stmt, int offset) { - if (stmt.length() > ++offset && stmt.charAt(offset) == '@') { - if (stmt.length() > ++offset) { + if (stmt.length() > ++offset && stmt.charAt(offset) == '@' + && stmt.length() > ++offset) { switch (stmt.charAt(offset)) { case 'S': case 's': @@ -69,7 +71,6 @@ static int select2Check(String stmt, int offset) { default: return OTHER; } - } } return OTHER; } @@ -77,28 +78,33 @@ static int select2Check(String stmt, int offset) { // VERSION_COMMENT static int select2VCheck(String stmt, int offset) { int length = offset + _VERSION_COMMENT.length; - if (stmt.length() >= length) { - if (ParseUtil.compare(stmt, offset, _VERSION_COMMENT)) { + if (stmt.length() >= length + && ParseUtil.compare(stmt, offset, _VERSION_COMMENT)) { if (stmt.length() > length && stmt.charAt(length) != ' ') { return OTHER; } return VERSION_COMMENT; - } } return OTHER; } - // SESSION.AUTO_INCREMENT_INCREMENT + // SESSION.AUTO_INCREMENT_INCREMENT or SESSION.TX_READ_ONLY static int select2SCheck(String stmt, int offset) { int length = offset + _SESSION_AUTO_INCREMENT.length; - if (stmt.length() >= length) { - if (ParseUtil.compare(stmt, offset, _SESSION_AUTO_INCREMENT)) { + if (stmt.length() >= length + && ParseUtil.compare(stmt, offset, _SESSION_AUTO_INCREMENT)) { if (stmt.length() > length && stmt.charAt(length) != ' ') { return OTHER; } return SESSION_AUTO_INCREMENT; + } else if (stmt.length() >= (offset + _SESSION_TX_READ_ONLY.length) + && ParseUtil.compare(stmt, offset, _SESSION_TX_READ_ONLY)) { + if (stmt.length() > length && stmt.charAt(length) != ' ') { + return OTHER; } + return SESSION_TX_READ_ONLY; } + return OTHER; } diff --git a/src/main/java/io/mycat/server/parser/ManagerParseShow.java b/src/main/java/io/mycat/route/parser/ManagerParseShow.java similarity index 65% rename from src/main/java/io/mycat/server/parser/ManagerParseShow.java rename to src/main/java/io/mycat/route/parser/ManagerParseShow.java index 231a163b0..ec3100d2d 100644 --- a/src/main/java/io/mycat/server/parser/ManagerParseShow.java +++ b/src/main/java/io/mycat/route/parser/ManagerParseShow.java @@ -1,1150 +1,1632 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.server.parser; - -import io.mycat.util.ParseUtil; - -/** - * @author mycat - */ -public final class ManagerParseShow { - - public static final int OTHER = -1; - public static final int COMMAND = 1; - public static final int CONNECTION = 2; - public static final int DATABASE = 3; - public static final int DATANODE = 4; - public static final int DATASOURCE = 5; - public static final int HELP = 6; - public static final int PARSER = 7; - public static final int PROCESSOR = 8; - public static final int ROUTER = 9; - public static final int SERVER = 10; - public static final int SQL = 11; - public static final int SQL_DETAIL = 12; - public static final int SQL_EXECUTE = 13; - public static final int SQL_SLOW = 14; - public static final int THREADPOOL = 15; - public static final int TIME_CURRENT = 16; - public static final int TIME_STARTUP = 17; - public static final int VERSION = 18; - public static final int VARIABLES = 19; - public static final int COLLATION = 20; - public static final int CONNECTION_SQL = 21; - public static final int DATANODE_WHERE = 22; - public static final int DATASOURCE_WHERE = 23; - public static final int HEARTBEAT = 24; - public static final int SLOW_DATANODE = 25; - public static final int SLOW_SCHEMA = 26; - public static final int BACKEND = 27; - public static final int CACHE = 28; - public static final int SESSION = 29; - - public static int parse(String stmt, int offset) { - int i = offset; - for (; i < stmt.length(); i++) { - switch (stmt.charAt(i)) { - case ' ': - continue; - case '/': - case '#': - i = ParseUtil.comment(stmt, i); - continue; - case '@': - return show2Check(stmt, i); - case 'C': - case 'c': - return showCCheck(stmt, i); - case 'd': - case 'D': - return show2DCheck(stmt, i); - case 'V': - case 'v': - return showVCheck(stmt, i); - default: - return OTHER; - } - } - return OTHER; - } - - // SHOW @ - static int show2Check(String stmt, int offset) { - if (stmt.length() > ++offset && stmt.charAt(offset) == '@') { - if (stmt.length() > ++offset) { - switch (stmt.charAt(offset)) { - case 'B': - case 'b': - return show2BCheck(stmt, offset); - case 'C': - case 'c': - return show2CCheck(stmt, offset); - case 'D': - case 'd': - return show2DCheck(stmt, offset); - case 'H': - case 'h': - return show2HCheck(stmt, offset); - case 'P': - case 'p': - return show2PCheck(stmt, offset); - case 'R': - case 'r': - return show2RCheck(stmt, offset); - case 'S': - case 's': - return show2SCheck(stmt, offset); - case 'T': - case 't': - return show2TCheck(stmt, offset); - case 'V': - case 'v': - return show2VCheck(stmt, offset); - default: - return OTHER; - } - } - } - return OTHER; - } - - // SHOW COLLATION - static int showCCheck(String stmt, int offset) { - if (stmt.length() > offset + "OLLATION".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - char c5 = stmt.charAt(++offset); - char c6 = stmt.charAt(++offset); - char c7 = stmt.charAt(++offset); - char c8 = stmt.charAt(++offset); - if ((c1 == 'O' || c1 == 'o') && (c2 == 'L' || c2 == 'l') && (c3 == 'L' || c3 == 'l') - && (c4 == 'A' || c4 == 'a') && (c5 == 'T' || c5 == 't') && (c6 == 'I' || c6 == 'i') - && (c7 == 'O' || c7 == 'o') && (c8 == 'N' || c8 == 'n')) { - if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { - return OTHER; - } - return COLLATION; - } - } - return OTHER; - } - - // SHOW VARIABLES - static int showVCheck(String stmt, int offset) { - if (stmt.length() > offset + "ARIABLES".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - char c5 = stmt.charAt(++offset); - char c6 = stmt.charAt(++offset); - char c7 = stmt.charAt(++offset); - char c8 = stmt.charAt(++offset); - if ((c1 == 'A' || c1 == 'a') && (c2 == 'R' || c2 == 'r') && (c3 == 'I' || c3 == 'i') - && (c4 == 'A' || c4 == 'a') && (c5 == 'B' || c5 == 'b') && (c6 == 'L' || c6 == 'l') - && (c7 == 'E' || c7 == 'e') && (c8 == 'S' || c8 == 's')) { - if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { - return OTHER; - } - return VARIABLES; - } - } - return OTHER; - } - - // SHOW @@BACKEND - static int show2BCheck(String stmt, int offset) { - if (stmt.length() > offset + "ACKEND".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - char c5 = stmt.charAt(++offset); - char c6 = stmt.charAt(++offset); - if ((c1 == 'A' || c1 == 'a') && (c2 == 'C' || c2 == 'c') && (c3 == 'K' || c3 == 'k') - && (c4 == 'E' || c4 == 'e') && (c5 == 'N' || c5 == 'n') && (c6 == 'D' || c6 == 'd') - && (stmt.length() == ++offset || ParseUtil.isEOF(stmt.charAt(offset)))) { - return BACKEND; - } - } - return OTHER; - } - - // SHOW @@C - static int show2CCheck(String stmt, int offset) { - if (stmt.length() > ++offset) { - switch (stmt.charAt(offset)) { - case 'O': - case 'o': - return show2CoCheck(stmt, offset); - case 'A': - case 'a': - return show2CACheck(stmt, offset); - default: - return OTHER; - } - } - return OTHER; - } - // SHOW @@CACHE - private static int show2CACheck(String stmt, int offset) { - String remain=stmt.substring(offset); - if(remain.equalsIgnoreCase("ACHE")) - { - return CACHE; - } - return OTHER; - } - - // SHOW @@DATA - static int show2DCheck(String stmt, int offset) { - if (stmt.length() > offset + "ATA".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - if ((c1 == 'A' || c1 == 'a') && (c2 == 'T' || c2 == 't') && (c3 == 'A' || c3 == 'a')) { - if (stmt.length() > ++offset) { - switch (stmt.charAt(offset)) { - case 'B': - case 'b': - return show2DataBCheck(stmt, offset); - case 'N': - case 'n': - return show2DataNCheck(stmt, offset); - case 'S': - case 's': - return show2DataSCheck(stmt, offset); - default: - return OTHER; - } - } - } - } - return OTHER; - } - - // SHOW @@HELP - static int show2HCheck(String stmt, int offset) { - if (stmt.length() > ++offset) { - switch (stmt.charAt(offset)) { - case 'E': - case 'e': - return show2HeCheck(stmt, offset); - default: - return OTHER; - } - } - return OTHER; - } - - // SHOW @@HE - static int show2HeCheck(String stmt, int offset) { - if (stmt.length() > ++offset) { - switch (stmt.charAt(offset)) { - case 'L': - case 'l': - return show2HelCheck(stmt, offset); - case 'A': - case 'a': - return show2HeaCheck(stmt, offset); - default: - return OTHER; - } - } - return OTHER; - } - - // SHOW @@HELP - static int show2HelCheck(String stmt, int offset) { - if (stmt.length() > offset + "P".length()) { - char c1 = stmt.charAt(++offset); - if ((c1 == 'P' || c1 == 'p')) { - if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { - return OTHER; - } - return HELP; - } - } - return OTHER; - } - - // SHOW @@HEARTBEAT - static int show2HeaCheck(String stmt, int offset) { - if (stmt.length() > offset + "RTBEAT".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - char c5 = stmt.charAt(++offset); - char c6 = stmt.charAt(++offset); - if ((c1 == 'R' || c1 == 'r') && (c2 == 'T' || c2 == 't') & (c3 == 'B' || c3 == 'b') - && (c4 == 'E' || c4 == 'e') & (c5 == 'A' || c5 == 'a') && (c6 == 'T' || c6 == 't')) { - if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { - return OTHER; - } - return HEARTBEAT; - } - } - return OTHER; - } - - // SHOW @@P - static int show2PCheck(String stmt, int offset) { - if (stmt.length() > ++offset) { - switch (stmt.charAt(offset)) { - case 'A': - case 'a': - return show2PaCheck(stmt, offset); - case 'R': - case 'r': - return show2PrCheck(stmt, offset); - default: - return OTHER; - } - } - return OTHER; - } - - // SHOW @@ROUTER - static int show2RCheck(String stmt, int offset) { - if (stmt.length() > offset + "OUTER".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - char c5 = stmt.charAt(++offset); - if ((c1 == 'O' || c1 == 'o') && (c2 == 'U' || c2 == 'u') && (c3 == 'T' || c3 == 't') - && (c4 == 'E' || c4 == 'e') && (c5 == 'R' || c5 == 'r')) { - if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { - return OTHER; - } - return ROUTER; - } - } - return OTHER; - } - - // SHOW @@S - static int show2SCheck(String stmt, int offset) { - if (stmt.length() > ++offset) { - switch (stmt.charAt(offset)) { - case 'E': - case 'e': - return show2SeCheck(stmt, offset); - case 'Q': - case 'q': - return show2SqCheck(stmt, offset); - case 'L': - case 'l': - return show2SlCheck(stmt, offset); - default: - return OTHER; - } - } - return OTHER; - } - - - - // SHOW @@SLOW - static int show2SlCheck(String stmt, int offset) { - if (stmt.length() > offset + "OW ".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - if ((c1 == 'O' || c1 == 'o') && (c2 == 'W' || c2 == 'w') && c3 == ' ') { - while (stmt.length() > ++offset) { - switch (stmt.charAt(offset)) { - case ' ': - continue; - case 'W': - case 'w': - return show2SlowWhereCheck(stmt, offset); - default: - return OTHER; - } - } - } - } - return OTHER; - } - - // SHOW @@SLOW WHERE - static int show2SlowWhereCheck(String stmt, int offset) { - if (stmt.length() > offset + "HERE".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - if ((c1 == 'H' || c1 == 'h') && (c2 == 'E' || c2 == 'e') && (c3 == 'R' || c3 == 'r') - && (c4 == 'E' || c4 == 'e')) { - while (stmt.length() > ++offset) { - switch (stmt.charAt(offset)) { - case ' ': - continue; - case 'D': - case 'd': - return show2SlowWhereDCheck(stmt, offset); - case 'S': - case 's': - return show2SlowWhereSCheck(stmt, offset); - default: - return OTHER; - } - } - } - } - return OTHER; - } - - // SHOW @@SLOW WHERE DATANODE= XXXXXX - static int show2SlowWhereDCheck(String stmt, int offset) { - if (stmt.length() > offset + "ATANODE".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - char c5 = stmt.charAt(++offset); - char c6 = stmt.charAt(++offset); - char c7 = stmt.charAt(++offset); - if ((c1 == 'A' || c1 == 'a') && (c2 == 'T' || c2 == 't') && (c3 == 'A' || c3 == 'a') - && (c4 == 'N' || c4 == 'n') && (c5 == 'O' || c5 == 'o') && (c6 == 'D' || c6 == 'd') - && (c7 == 'E' || c7 == 'e')) { - while (stmt.length() > ++offset) { - switch (stmt.charAt(offset)) { - case ' ': - continue; - case '=': - while (stmt.length() > ++offset) { - switch (stmt.charAt(offset)) { - case ' ': - continue; - default: - return (offset << 8) | SLOW_DATANODE; - } - } - return OTHER; - default: - return OTHER; - } - } - } - } - return OTHER; - } - - // SHOW @@SLOW WHERE SCHEMA= XXXXXX - static int show2SlowWhereSCheck(String stmt, int offset) { - if (stmt.length() > offset + "CHEMA".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - char c5 = stmt.charAt(++offset); - if ((c1 == 'C' || c1 == 'c') && (c2 == 'H' || c2 == 'h') && (c3 == 'E' || c3 == 'e') - && (c4 == 'M' || c4 == 'm') && (c5 == 'A' || c5 == 'a')) { - while (stmt.length() > ++offset) { - switch (stmt.charAt(offset)) { - case ' ': - continue; - case '=': - while (stmt.length() > ++offset) { - switch (stmt.charAt(offset)) { - case ' ': - continue; - default: - return (offset << 8) | SLOW_SCHEMA; - } - } - return OTHER; - default: - return OTHER; - } - } - } - } - return OTHER; - } - - // SHOW @@T - static int show2TCheck(String stmt, int offset) { - if (stmt.length() > ++offset) { - switch (stmt.charAt(offset)) { - case 'H': - case 'h': - return show2ThCheck(stmt, offset); - case 'I': - case 'i': - return show2TiCheck(stmt, offset); - default: - return OTHER; - } - } - return OTHER; - } - - // SHOW @@VERSION - static int show2VCheck(String stmt, int offset) { - if (stmt.length() > offset + "ERSION".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - char c5 = stmt.charAt(++offset); - char c6 = stmt.charAt(++offset); - if ((c1 == 'E' || c1 == 'e') && (c2 == 'R' || c2 == 'r') && (c3 == 'S' || c3 == 's') - && (c4 == 'I' || c4 == 'i') && (c5 == 'O' || c5 == 'o') && (c6 == 'N' || c6 == 'n')) { - if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { - return OTHER; - } - return VERSION; - } - } - return OTHER; - } - - // SHOW @@CO - static int show2CoCheck(String stmt, int offset) { - if (stmt.length() > ++offset) { - switch (stmt.charAt(offset)) { - case 'M': - case 'm': - return show2ComCheck(stmt, offset); - case 'N': - case 'n': - return show2ConCheck(stmt, offset); - default: - return OTHER; - } - } - return OTHER; - } - - // SHOW @@DATABASE - static int show2DataBCheck(String stmt, int offset) { - if (stmt.length() > offset + "ASE".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - if ((c1 == 'A' || c1 == 'a') && (c2 == 'S' || c2 == 's') && (c3 == 'E' || c3 == 'e')) { -// if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { -// return OTHER; -// } - return DATABASE; - } - } - return OTHER; - } - - // SHOW @@DATANODE - static int show2DataNCheck(String stmt, int offset) { - if (stmt.length() > offset + "ODE".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - if ((c1 == 'O' || c1 == 'o') && (c2 == 'D' || c2 == 'd') && (c3 == 'E' || c3 == 'e')) { - while (stmt.length() > ++offset) { - switch (stmt.charAt(offset)) { - case ' ': - continue; - case 'W': - case 'w': - return show2DataNWhereCheck(stmt, offset); - default: - return OTHER; - } - } - return DATANODE; - } - } - return OTHER; - } - - // SHOW @@DATANODE WHERE - static int show2DataNWhereCheck(String stmt, int offset) { - if (stmt.length() > offset + "HERE".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - if ((c1 == 'H' || c1 == 'h') && (c2 == 'E' || c2 == 'e') && (c3 == 'R' || c3 == 'r') - && (c4 == 'E' || c4 == 'e')) { - while (stmt.length() > ++offset) { - switch (stmt.charAt(offset)) { - case ' ': - continue; - case 'S': - case 's': - return show2DataNWhereSchemaCheck(stmt, offset); - default: - return OTHER; - } - } - } - } - return OTHER; - } - - // SHOW @@DATANODE WHERE SCHEMA = XXXXXX - static int show2DataNWhereSchemaCheck(String stmt, int offset) { - if (stmt.length() > offset + "CHEMA".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - char c5 = stmt.charAt(++offset); - if ((c1 == 'C' || c1 == 'c') && (c2 == 'H' || c2 == 'h') && (c3 == 'E' || c3 == 'e') - && (c4 == 'M' || c4 == 'm') && (c5 == 'A' || c5 == 'a')) { - while (stmt.length() > ++offset) { - switch (stmt.charAt(offset)) { - case ' ': - continue; - case '=': - while (stmt.length() > ++offset) { - switch (stmt.charAt(offset)) { - case ' ': - continue; - default: - return (offset << 8) | DATANODE_WHERE; - } - } - return OTHER; - default: - return OTHER; - } - } - } - } - return OTHER; - } - - // SHOW @@DATASOURCE - static int show2DataSCheck(String stmt, int offset) { - if (stmt.length() > offset + "OURCE".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - char c5 = stmt.charAt(++offset); - if ((c1 == 'O' || c1 == 'o') && (c2 == 'U' || c2 == 'u') && (c3 == 'R' || c3 == 'r') - && (c4 == 'C' || c4 == 'c') && (c5 == 'E' || c5 == 'e')) { - while (stmt.length() > ++offset) { - switch (stmt.charAt(offset)) { - case ' ': - continue; - case 'W': - case 'w': - return show2DataSWhereCheck(stmt, offset); - default: - return OTHER; - } - } - - return DATASOURCE; - } - } - return OTHER; - } - - // SHOW @@DATASOURCE WHERE - static int show2DataSWhereCheck(String stmt, int offset) { - if (stmt.length() > offset + "HERE".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - if ((c1 == 'H' || c1 == 'h') && (c2 == 'E' || c2 == 'e') && (c3 == 'R' || c3 == 'r') - && (c4 == 'E' || c4 == 'e')) { - while (stmt.length() > ++offset) { - switch (stmt.charAt(offset)) { - case ' ': - continue; - case 'd': - case 'D': - return show2DataSWhereDatanodeCheck(stmt, offset); - default: - return OTHER; - } - } - } - } - return OTHER; - } - - // SHOW @@DATASOURCE WHERE DATANODE = XXXXXX - static int show2DataSWhereDatanodeCheck(String stmt, int offset) { - if (stmt.length() > offset + "ATANODE".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - char c5 = stmt.charAt(++offset); - char c6 = stmt.charAt(++offset); - char c7 = stmt.charAt(++offset); - if ((c1 == 'A' || c1 == 'a') && (c2 == 'T' || c2 == 't') && (c3 == 'A' || c3 == 'a') - && (c4 == 'N' || c4 == 'n') && (c5 == 'O' || c5 == 'o') && (c6 == 'D' || c6 == 'd') - && (c7 == 'E' || c7 == 'e')) { - while (stmt.length() > ++offset) { - switch (stmt.charAt(offset)) { - case ' ': - continue; - case '=': - while (stmt.length() > ++offset) { - switch (stmt.charAt(offset)) { - case ' ': - continue; - default: - return (offset << 8) | DATASOURCE_WHERE; - } - } - return OTHER; - default: - return OTHER; - } - } - } - } - return OTHER; - } - - // SHOW @@PARSER - static int show2PaCheck(String stmt, int offset) { - if (stmt.length() > offset + "RSER".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - if ((c1 == 'R' || c1 == 'r') && (c2 == 'S' || c2 == 's') && (c3 == 'E' || c3 == 'e') - && (c4 == 'R' || c4 == 'r')) { - if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { - return OTHER; - } - return PARSER; - } - } - return OTHER; - } - - // SHOW @@PROCESSOR - static int show2PrCheck(String stmt, int offset) { - if (stmt.length() > offset + "OCESSOR".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - char c5 = stmt.charAt(++offset); - char c6 = stmt.charAt(++offset); - char c7 = stmt.charAt(++offset); - if ((c1 == 'O' || c1 == 'o') && (c2 == 'C' || c2 == 'c') && (c3 == 'E' || c3 == 'e') - && (c4 == 'S' || c4 == 's') && (c5 == 'S' || c5 == 's') && (c6 == 'O' || c6 == 'o') - && (c7 == 'R' || c7 == 'r')) { - if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { - return OTHER; - } - return PROCESSOR; - } - } - return OTHER; - } - - // SHOW @@SERVER - // SHOW @@SESSION - static int show2SeCheck(String stmt, int offset) { - if (stmt.length() > offset + "SSION".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - char c5 = stmt.charAt(++offset); - if ((c1 == 'S' || c1 == 's') && (c2 == 'S' || c2 == 's') && (c3 == 'I' || c3 == 'i') - && (c4 == 'O' || c4 == 'o') && (c5 == 'N' || c5 == 'n')) { - if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { - return OTHER; - } - return SESSION; - } - } - else if (stmt.length() > offset + "RVER".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - if ((c1 == 'R' || c1 == 'r') && (c2 == 'V' || c2 == 'v') && (c3 == 'E' || c3 == 'e') - && (c4 == 'R' || c4 == 'r')) { - if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { - return OTHER; - } - return SERVER; - } - } - return OTHER; - } - - // SHOW @@THREADPOOL - static int show2ThCheck(String stmt, int offset) { - if (stmt.length() > offset + "READPOOL".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - char c5 = stmt.charAt(++offset); - char c6 = stmt.charAt(++offset); - char c7 = stmt.charAt(++offset); - char c8 = stmt.charAt(++offset); - if ((c1 == 'R' || c1 == 'r') && (c2 == 'E' || c2 == 'e') && (c3 == 'A' || c3 == 'a') - && (c4 == 'D' || c4 == 'd') && (c5 == 'P' || c5 == 'p') && (c6 == 'O' || c6 == 'o') - && (c7 == 'O' || c7 == 'o') && (c8 == 'L' || c8 == 'l')) { - if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { - return OTHER; - } - return THREADPOOL; - } - } - return OTHER; - } - - // SHOW @@TIME. - static int show2TiCheck(String stmt, int offset) { - if (stmt.length() > offset + "ME.".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - if ((c1 == 'M' || c1 == 'm') && (c2 == 'E' || c2 == 'e') && (c3 == '.')) { - if (stmt.length() > ++offset) { - switch (stmt.charAt(offset)) { - case 'C': - case 'c': - return show2TimeCCheck(stmt, offset); - case 'S': - case 's': - return show2TimeSCheck(stmt, offset); - default: - return OTHER; - } - } - } - } - return OTHER; - } - - // SHOW @@COMMAND - static int show2ComCheck(String stmt, int offset) { - if (stmt.length() > offset + "MAND".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - if ((c1 == 'M' || c1 == 'm') && (c2 == 'A' || c2 == 'a') && (c3 == 'N' || c3 == 'n') - && (c4 == 'D' || c4 == 'd')) { - if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { - return OTHER; - } - return COMMAND; - } - } - return OTHER; - } - - // SHOW @@CONNECTION - static int show2ConCheck(String stmt, int offset) { - if (stmt.length() > offset + "NECTION".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - char c5 = stmt.charAt(++offset); - char c6 = stmt.charAt(++offset); - char c7 = stmt.charAt(++offset); - if ((c1 == 'N' || c1 == 'n') && (c2 == 'E' || c2 == 'e') && (c3 == 'C' || c3 == 'c') - && (c4 == 'T' || c4 == 't') && (c5 == 'I' || c5 == 'i') && (c6 == 'O' || c6 == 'o') - && (c7 == 'N' || c7 == 'n')) { - if (stmt.length() > ++offset) { - switch (stmt.charAt(offset)) { - case ' ': - return CONNECTION; - case '.': - return show2ConnectonSQL(stmt, offset); - default: - return OTHER; - } - } - return CONNECTION; - } - } - return OTHER; - } - - // SHOW @@CONNECTION.SQL - static int show2ConnectonSQL(String stmt, int offset) { - if (stmt.length() > offset + "SQL".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - if ((c1 == 'S' || c1 == 's') && (c2 == 'Q' || c2 == 'q') && (c3 == 'L' || c3 == 'l')) { - if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { - return OTHER; - } - return CONNECTION_SQL; - } - } - return OTHER; - } - - // SHOW @@TIME.CURRENT - static int show2TimeCCheck(String stmt, int offset) { - if (stmt.length() > offset + "URRENT".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - char c5 = stmt.charAt(++offset); - char c6 = stmt.charAt(++offset); - if ((c1 == 'U' || c1 == 'u') && (c2 == 'R' || c2 == 'r') && (c3 == 'R' || c3 == 'r') - && (c4 == 'E' || c4 == 'e') && (c5 == 'N' || c5 == 'n') && (c6 == 'T' || c6 == 't')) { - if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { - return OTHER; - } - return TIME_CURRENT; - } - } - return OTHER; - } - - // SHOW @@TIME.STARTUP - static int show2TimeSCheck(String stmt, int offset) { - if (stmt.length() > offset + "TARTUP".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - char c5 = stmt.charAt(++offset); - char c6 = stmt.charAt(++offset); - if ((c1 == 'T' || c1 == 't') && (c2 == 'A' || c2 == 'a') && (c3 == 'R' || c3 == 'r') - && (c4 == 'T' || c4 == 't') && (c5 == 'U' || c5 == 'u') && (c6 == 'P' || c6 == 'p')) { - if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { - return OTHER; - } - return TIME_STARTUP; - } - } - return OTHER; - } - - // SHOW @@SQ - static int show2SqCheck(String stmt, int offset) { - if (stmt.length() > ++offset) { - switch (stmt.charAt(offset)) { - case 'L': - case 'l': - return show2SqlCheck(stmt, offset); - default: - return OTHER; - } - } - return OTHER; - } - - // SHOW @@SQL - static int show2SqlCheck(String stmt, int offset) { - if (stmt.length() > ++offset) { - switch (stmt.charAt(offset)) { - case '.': - return show2SqlDotCheck(stmt, offset); - case ' ': - return show2SqlBlankCheck(stmt, offset); - default: - return OTHER; - } - } - return OTHER; - } - - // SHOW @@SQL. - static int show2SqlDotCheck(String stmt, int offset) { - if (stmt.length() > ++offset) { - switch (stmt.charAt(offset)) { - case 'D': - case 'd': - return show2SqlDCheck(stmt, offset); - case 'E': - case 'e': - return show2SqlECheck(stmt, offset); - case 'S': - case 's': - return show2SqlSCheck(stmt, offset); - default: - return OTHER; - } - } - return OTHER; - } - - // SHOW @@SQL WHERE ID = XXXXXX - static int show2SqlBlankCheck(String stmt, int offset) { - for (++offset; stmt.length() > offset; ++offset) { - switch (stmt.charAt(offset)) { - case ' ': - continue; - case 'W': - case 'w': - if (isWhere(stmt, offset)) { - return SQL; - } else { - return OTHER; - } - default: - return OTHER; - } - } - - return OTHER; - } - - // SHOW @@SQL.DETAIL WHERE ID = XXXXXX - static int show2SqlDCheck(String stmt, int offset) { - if (stmt.length() > offset + "ETAIL".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - char c5 = stmt.charAt(++offset); - if ((c1 == 'E' || c1 == 'e') && (c2 == 'T' || c2 == 't') && (c3 == 'A' || c3 == 'a') - && (c4 == 'I' || c4 == 'i') && (c5 == 'L' || c5 == 'l')) { - for (++offset; stmt.length() > offset; ++offset) { - switch (stmt.charAt(offset)) { - case ' ': - continue; - case 'W': - case 'w': - if (isWhere(stmt, offset)) { - return SQL_DETAIL; - } else { - return OTHER; - } - default: - return OTHER; - } - } - } - } - return OTHER; - } - - // SHOW @@SQL.EXECUTE - static int show2SqlECheck(String stmt, int offset) { - if (stmt.length() > offset + "XECUTE".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - char c5 = stmt.charAt(++offset); - char c6 = stmt.charAt(++offset); - if ((c1 == 'X' || c1 == 'x') && (c2 == 'E' || c2 == 'e') && (c3 == 'C' || c3 == 'c') - && (c4 == 'U' || c4 == 'u') && (c5 == 'T' || c5 == 't') && (c6 == 'E' || c6 == 'e')) { - if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { - return OTHER; - } - return SQL_EXECUTE; - } - } - return OTHER; - } - - // SHOW @@SQL.SLOW - static int show2SqlSCheck(String stmt, int offset) { - if (stmt.length() > offset + "LOW".length()) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - if ((c1 == 'L' || c1 == 'l') && (c2 == 'O' || c2 == 'o') && (c3 == 'W' || c3 == 'w')) { - if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { - return OTHER; - } - return SQL_SLOW; - } - } - return OTHER; - } - - static boolean isWhere(String stmt, int offset) { - if (stmt.length() > offset + 5) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - char c5 = stmt.charAt(++offset); - if ((c1 == 'H' || c1 == 'h') && (c2 == 'E' || c2 == 'e') && (c3 == 'R' || c3 == 'r') - && (c4 == 'E' || c4 == 'e') && (c5 == ' ')) { - boolean jump1 = false; - for (++offset; stmt.length() > offset && !jump1; ++offset) { - switch (stmt.charAt(offset)) { - case ' ': - continue; - case 'I': - case 'i': - jump1 = true; - break; - default: - return false; - } - } - if ((stmt.length() > offset) && (stmt.charAt(offset) == 'D' || stmt.charAt(offset) == 'd')) { - boolean jump2 = false; - for (++offset; stmt.length() > offset && !jump2; ++offset) { - switch (stmt.charAt(offset)) { - case ' ': - continue; - case '=': - jump2 = true; - break; - default: - return false; - } - } - return isSqlId(stmt, offset); - } - } - } - return false; - } - - static boolean isSqlId(String stmt, int offset) { - String id = stmt.substring(offset).trim(); - try { - Long.parseLong(id); - } catch (Exception e) { - return false; - } - return true; - } - - public static String getWhereParameter(String stmt) { - int offset = stmt.indexOf('='); - ++offset; - return stmt.substring(offset).trim(); - } - +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.route.parser; + +import io.mycat.route.parser.util.ParseUtil; + +/** + * @author mycat + */ +public final class ManagerParseShow { + + public static final int OTHER = -1; + public static final int COMMAND = 1; + public static final int CONNECTION = 2; + public static final int DATABASE = 3; + public static final int DATANODE = 4; + public static final int DATASOURCE = 5; + public static final int HELP = 6; + public static final int PARSER = 7; + public static final int PROCESSOR = 8; + public static final int ROUTER = 9; + public static final int SERVER = 10; + public static final int SQL = 11; + public static final int SQL_DETAIL = 12; + public static final int SQL_EXECUTE = 13; + public static final int SQL_SLOW = 14; + public static final int SQL_SUM_USER = 15; + public static final int SQL_SUM_TABLE = 16; + public static final int SQL_HIGH = 17; + public static final int SQL_CONDITION = 18; + public static final int SQL_LARGE = 19; + public static final int SQL_RESULTSET = 20; + + public static final int THREADPOOL = 21; + public static final int TIME_CURRENT = 22; + public static final int TIME_STARTUP = 23; + public static final int VERSION = 24; + public static final int VARIABLES = 25; + public static final int COLLATION = 26; + public static final int CONNECTION_SQL = 27; + public static final int DATANODE_WHERE = 28; + public static final int DATASOURCE_WHERE = 29; + public static final int HEARTBEAT = 30; + public static final int SLOW_DATANODE = 31; + public static final int SLOW_SCHEMA = 32; + public static final int BACKEND = 33; + public static final int BACKEND_OLD = 34; + + public static final int CACHE = 35; + public static final int SESSION = 36; + public static final int SYSPARAM = 37; + public static final int SYSLOG = 38; + public static final int HEARTBEAT_DETAIL = 39; + public static final int DATASOURCE_SYNC = 40; + public static final int DATASOURCE_SYNC_DETAIL = 41; + public static final int DATASOURCE_CLUSTER = 42; + + public static final int WHITE_HOST = 43; + public static final int WHITE_HOST_SET = 44; + public static final int DIRECTMEMORY_TOTAL = 45; + public static final int DIRECTMEMORY_DETAILl = 46; + + + public static int parse(String stmt, int offset) { + int i = offset; + for (; i < stmt.length(); i++) { + switch (stmt.charAt(i)) { + case ' ': + continue; + case '/': + case '#': + i = ParseUtil.comment(stmt, i); + continue; + case '@': + return show2Check(stmt, i); + case 'C': + case 'c': + return showCCheck(stmt, i); + case 'd': + case 'D': + return show2DCheck(stmt, i); + case 'V': + case 'v': + return showVCheck(stmt, i); + default: + return OTHER; + } + } + return OTHER; + } + + // SHOW @ + static int show2Check(String stmt, int offset) { + if (stmt.length() > ++offset && stmt.charAt(offset) == '@' + && stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case 'B': + case 'b': + return show2BCheck(stmt, offset); + case 'C': + case 'c': + return show2CCheck(stmt, offset); + case 'D': + case 'd': + return show2DCheck(stmt, offset); + case 'H': + case 'h': + return show2HCheck(stmt, offset); + case 'P': + case 'p': + return show2PCheck(stmt, offset); + case 'R': + case 'r': + return show2RCheck(stmt, offset); + case 'S': + case 's': + return show2SCheck(stmt, offset); + case 'T': + case 't': + return show2TCheck(stmt, offset); + case 'V': + case 'v': + return show2VCheck(stmt, offset); + case 'W': + case 'w': + return show2WCheck(stmt, offset); + default: + return OTHER; + } + } + return OTHER; + } + + // SHOW COLLATION + static int showCCheck(String stmt, int offset) { + if (stmt.length() > offset + "OLLATION".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + char c6 = stmt.charAt(++offset); + char c7 = stmt.charAt(++offset); + char c8 = stmt.charAt(++offset); + if ((c1 == 'O' || c1 == 'o') && (c2 == 'L' || c2 == 'l') && (c3 == 'L' || c3 == 'l') + && (c4 == 'A' || c4 == 'a') && (c5 == 'T' || c5 == 't') && (c6 == 'I' || c6 == 'i') + && (c7 == 'O' || c7 == 'o') && (c8 == 'N' || c8 == 'n')) { + if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { + return OTHER; + } + return COLLATION; + } + } + return OTHER; + } + + // SHOW VARIABLES + static int showVCheck(String stmt, int offset) { + if (stmt.length() > offset + "ARIABLES".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + char c6 = stmt.charAt(++offset); + char c7 = stmt.charAt(++offset); + char c8 = stmt.charAt(++offset); + if ((c1 == 'A' || c1 == 'a') && (c2 == 'R' || c2 == 'r') && (c3 == 'I' || c3 == 'i') + && (c4 == 'A' || c4 == 'a') && (c5 == 'B' || c5 == 'b') && (c6 == 'L' || c6 == 'l') + && (c7 == 'E' || c7 == 'e') && (c8 == 'S' || c8 == 's')) { + if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { + return OTHER; + } + return VARIABLES; + } + } + return OTHER; + } + + // SHOW @@BACKEND + static int show2BCheck(String stmt, int offset) { + if (stmt.length() > offset + "ACKEND".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + char c6 = stmt.charAt(++offset); + if ((c1 == 'A' || c1 == 'a') && (c2 == 'C' || c2 == 'c') && (c3 == 'K' || c3 == 'k') + && (c4 == 'E' || c4 == 'e') && (c5 == 'N' || c5 == 'n') && (c6 == 'D' || c6 == 'd')) { + + if (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case ';': + case ' ': + return BACKEND; + case '.': + return show2BackendOld(stmt, offset); + default: + return OTHER; + } + } + return BACKEND; + + } + } + return OTHER; + } + + static int show2BackendOld(String stmt, int offset) { + if (stmt.length() > offset + "OLD".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + if ((c1 == 'O' || c1 == 'o') && (c2 == 'L' || c2 == 'l') && (c3 == 'D' || c3 == 'd')) { + if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { + return OTHER; + } + return BACKEND_OLD; + } + } + return OTHER; + } + + // SHOW @@C + static int show2CCheck(String stmt, int offset) { + if (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case 'O': + case 'o': + return show2CoCheck(stmt, offset); + case 'A': + case 'a': + return show2CACheck(stmt, offset); + default: + return OTHER; + } + } + return OTHER; + } + // SHOW @@CACHE + private static int show2CACheck(String stmt, int offset) { + String remain=stmt.substring(offset); + if(remain.equalsIgnoreCase("ACHE")) + { + return CACHE; + } + return OTHER; + } + + // SHOW @@DATA + static int show2DCheck(String stmt, int offset) { + if (stmt.length() > offset + "ATA".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + if ((c1 == 'A' || c1 == 'a') && (c2 == 'T' || c2 == 't') && (c3 == 'A' || c3 == 'a') + && stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case 'B': + case 'b': + return show2DataBCheck(stmt, offset); + case 'N': + case 'n': + return show2DataNCheck(stmt, offset); + case 'S': + case 's': + return show2DataSCheck(stmt, offset); + default: + return OTHER; + } + }else if( (c1 == 'I'|| c1 == 'i') + &&(c2 == 'R' || c2 == 'r') + && (c3 == 'E' || c3 == 'e') + && stmt.length() > ++offset){ /**DIRECTMEMORY**/ + switch (stmt.charAt(offset)) { + case 'C': + case 'c': + return show2DirectMemoryCheck(stmt,offset); + default: + return OTHER; + } + } + } + return OTHER; + } + // SHOW @@DIRECT_MEMORY=1 or 0 + static int show2DirectMemoryCheck(String stmt, int offset) { + if (stmt.length() > offset + "TMEMORY".length()) { + + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + char c6 = stmt.charAt(++offset); + char c7 = stmt.charAt(++offset); + char c8 = stmt.charAt(++offset); + + if ((c1 == 'T' || c1 == 't') + && (c2 == 'M' || c2 == 'm') + && (c3 == 'E' || c3 == 'e') + && (c4 == 'M' || c4 == 'm') + && (c5 == 'O' || c5 == 'o') + && (c6 == 'R' || c6 == 'r') + && (c7 == 'Y' || c7 == 'y') + && (c8 == '=' || c8 == '=') + && stmt.length() > ++offset) { + + switch (stmt.charAt(offset)) { + case '1': + return DIRECTMEMORY_TOTAL; + case '2': + return DIRECTMEMORY_DETAILl; + default: + return OTHER; + } + + } + } + + return OTHER; + } + // SHOW @@DataSyn + static int show2DataSynCheck(String stmt, int offset) { + if (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case 'S': + case 's': + if (stmt.length() > offset + "yn".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + if ((c1 == 'Y' || c1 == 'y') && (c2 == 'N' || c2 == 'n')){ + switch (stmt.charAt(++offset)) { + case 'S': + case 's': + return show2SynStatuslCheck(stmt,offset); + case 'D': + case 'd': + return show2SynDetailCheck(stmt,offset); + default: + return OTHER; + } + + }else{ + return OTHER; + } + } + case 'C': + case 'c': + if (stmt.length() > offset + "luster".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + char c6 = stmt.charAt(++offset); + if ((c1 == 'L' || c1 == 'l') && (c2 == 'U' || c2 == 'u') + && (c3 == 'S' || c3 == 's') && (c4 == 'T' || c4 == 't') + && (c5 == 'E' || c5 == 'e')&& (c6 == 'R' || c6 == 'r') ){ + return DATASOURCE_CLUSTER; + }else{ + return OTHER; + } + } + default: + return OTHER; + } + } + return OTHER; + } + //show @@datasource.syndetail + static int show2SynDetailCheck(String stmt, int offset) { + if (stmt.length() > offset + "etail".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + + if ((c1 == 'E' || c1 == 'e') && (c2 == 'T' || c2 == 't') && (c3 == 'A' || c3 == 'a') + && (c4 == 'I' || c4 == 'i') && (c5 == 'L' || c5 == 'l')) { + if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { + return OTHER; + } + return DATASOURCE_SYNC_DETAIL; + } + } + return OTHER; + } + //show @@datasource.synstatus + static int show2SynStatuslCheck(String stmt, int offset) { + if (stmt.length() > offset + "tatus".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + + if ((c1 == 'T' || c1 == 't') && (c2 == 'A' || c2 == 'a') && (c3 == 'T' || c3 == 't') + && (c4 == 'U' || c4 == 'u') && (c5 == 'S' || c5 == 's')) { + if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { + return OTHER; + } + return DATASOURCE_SYNC; + } + } + return OTHER; + } + + // SHOW @@HELP + static int show2HCheck(String stmt, int offset) { + if (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case 'E': + case 'e': + return show2HeCheck(stmt, offset); + default: + return OTHER; + } + } + return OTHER; + } + + // SHOW @@HE + static int show2HeCheck(String stmt, int offset) { + if (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case 'L': + case 'l': + return show2HelCheck(stmt, offset); + case 'A': + case 'a': + return ManagerParseHeartbeat.show2HeaCheck(stmt, offset); + default: + return OTHER; + } + } + return OTHER; + } + + // SHOW @@HELP + static int show2HelCheck(String stmt, int offset) { + if (stmt.length() > offset + "P".length()) { + char c1 = stmt.charAt(++offset); + if ((c1 == 'P' || c1 == 'p')) { + if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { + return OTHER; + } + return HELP; + } + } + return OTHER; + } + + + + // SHOW @@P + static int show2PCheck(String stmt, int offset) { + if (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case 'A': + case 'a': + return show2PaCheck(stmt, offset); + case 'R': + case 'r': + return show2PrCheck(stmt, offset); + default: + return OTHER; + } + } + return OTHER; + } + + // SHOW @@ROUTER + static int show2RCheck(String stmt, int offset) { + if (stmt.length() > offset + "OUTER".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + if ((c1 == 'O' || c1 == 'o') && (c2 == 'U' || c2 == 'u') && (c3 == 'T' || c3 == 't') + && (c4 == 'E' || c4 == 'e') && (c5 == 'R' || c5 == 'r')) { + if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { + return OTHER; + } + return ROUTER; + } + } + return OTHER; + } + + // SHOW @@S + static int show2SCheck(String stmt, int offset) { + if (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case 'E': + case 'e': + return show2SeCheck(stmt, offset); + case 'Q': + case 'q': + return show2SqCheck(stmt, offset); + case 'L': + case 'l': + return show2SlCheck(stmt, offset); + case 'Y': + case 'y': + return show2SyCheck(stmt, offset); + default: + return OTHER; + } + } + return OTHER; + } + + // SHOW @@SLOW + static int show2SlCheck(String stmt, int offset) { + if (stmt.length() > offset + "OW ".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + if ((c1 == 'O' || c1 == 'o') && (c2 == 'W' || c2 == 'w') && c3 == ' ') { + while (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case ' ': + continue; + case 'W': + case 'w': + return show2SlowWhereCheck(stmt, offset); + default: + return OTHER; + } + } + } + } + return OTHER; + } + + // SHOW @@SYSPARAM + static int show2sysparam(String stmt, int offset) { + if (stmt.length() > offset + "ARAM".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + + if ((c1 == 'A' || c1 == 'a') && (c2 == 'R' || c2 == 'r') + && (c3 == 'A' || c3 == 'a') && (c4 == 'M' || c4 == 'm')) { + if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { + return OTHER; + } + return SYSPARAM; + } + } + return OTHER; + } + + static int show2syslog(String stmt, int offset) { + + if (stmt.length() > offset + "SLOG".length()) { + + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + + if ( (c1 == 'O' || c1 == 'o') && (c2 == 'G' || c2 == 'g') && c3 == ' ' ) { + + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + char c6 = stmt.charAt(++offset); + char c7 = stmt.charAt(++offset); + char c8 = stmt.charAt(++offset); + + if ((c4 == 'L' || c4 == 'l') && (c5 == 'I' || c5 == 'i') && (c6 == 'M' || c6 == 'm') + && (c7 == 'I' || c7 == 'i') && (c8 == 'T' || c8 == 't') ) { + + while (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case ' ': + continue; + case '=': + while (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case ' ': + continue; + default: + return (offset << 8) | SYSLOG; + } + } + return OTHER; + default: + return OTHER; + } + } + } + + return SYSLOG; + } + } + + return OTHER; + } + + // SHOW @@SYSPARAM + // SHOW @@SYSLOG LIMIT=1000 + static int show2SyCheck(String stmt, int offset) { + + if (stmt.length() > offset + "YS".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + if ( c1 == 'S' || c1 == 's' ) { + switch (c2) { + case 'L': + case 'l': + return show2syslog(stmt, offset); + case 'P': + case 'p': + return show2sysparam(stmt, offset); + default: + return OTHER; + } + } + } + return OTHER; + } + + + + // SHOW @@SLOW WHERE + static int show2SlowWhereCheck(String stmt, int offset) { + if (stmt.length() > offset + "HERE".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + if ((c1 == 'H' || c1 == 'h') && (c2 == 'E' || c2 == 'e') && (c3 == 'R' || c3 == 'r') + && (c4 == 'E' || c4 == 'e')) { + while (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case ' ': + continue; + case 'D': + case 'd': + return show2SlowWhereDCheck(stmt, offset); + case 'S': + case 's': + return show2SlowWhereSCheck(stmt, offset); + default: + return OTHER; + } + } + } + } + return OTHER; + } + + // SHOW @@SLOW WHERE DATANODE= XXXXXX + static int show2SlowWhereDCheck(String stmt, int offset) { + if (stmt.length() > offset + "ATANODE".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + char c6 = stmt.charAt(++offset); + char c7 = stmt.charAt(++offset); + if ((c1 == 'A' || c1 == 'a') && (c2 == 'T' || c2 == 't') && (c3 == 'A' || c3 == 'a') + && (c4 == 'N' || c4 == 'n') && (c5 == 'O' || c5 == 'o') && (c6 == 'D' || c6 == 'd') + && (c7 == 'E' || c7 == 'e')) { + while (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case ' ': + continue; + case '=': + while (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case ' ': + continue; + default: + return (offset << 8) | SLOW_DATANODE; + } + } + return OTHER; + default: + return OTHER; + } + } + } + } + return OTHER; + } + + // SHOW @@SLOW WHERE SCHEMA= XXXXXX + static int show2SlowWhereSCheck(String stmt, int offset) { + if (stmt.length() > offset + "CHEMA".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + if ((c1 == 'C' || c1 == 'c') && (c2 == 'H' || c2 == 'h') && (c3 == 'E' || c3 == 'e') + && (c4 == 'M' || c4 == 'm') && (c5 == 'A' || c5 == 'a')) { + while (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case ' ': + continue; + case '=': + while (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case ' ': + continue; + default: + return (offset << 8) | SLOW_SCHEMA; + } + } + return OTHER; + default: + return OTHER; + } + } + } + } + return OTHER; + } + + // SHOW @@T + static int show2TCheck(String stmt, int offset) { + if (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case 'H': + case 'h': + return show2ThCheck(stmt, offset); + case 'I': + case 'i': + return show2TiCheck(stmt, offset); + default: + return OTHER; + } + } + return OTHER; + } + + // SHOW @@VERSION + static int show2VCheck(String stmt, int offset) { + if (stmt.length() > offset + "ERSION".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + char c6 = stmt.charAt(++offset); + if ((c1 == 'E' || c1 == 'e') && (c2 == 'R' || c2 == 'r') && (c3 == 'S' || c3 == 's') + && (c4 == 'I' || c4 == 'i') && (c5 == 'O' || c5 == 'o') && (c6 == 'N' || c6 == 'n')) { + if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { + return OTHER; + } + return VERSION; + } + } + return OTHER; + } + // SHOW @@White ip白名单 + static int show2WCheck(String stmt, int offset) { + if (stmt.length() > offset + "HITE".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + if ((c1 == 'H' || c1 == 'h') && (c2 == 'I' || c2 == 'i') && (c3 == 'T' || c3 == 't') + && (c4 == 'E' || c4 == 'e') ) { + if (stmt.length() > ++offset && stmt.charAt(offset) == '.') { + return show2WhiteCheck(stmt, offset); + } + return WHITE_HOST; + } + } + return OTHER; + } + static int show2WhiteCheck(String stmt, int offset) { + if (stmt.length() > offset + "set".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + + if ((c1 == 'S' || c1 == 's') && (c2 == 'E' || c2 == 'e') && (c3 == 'T' || c3 == 't')) { + if (stmt.length() > ++offset && stmt.charAt(offset) == '=') { + return WHITE_HOST_SET; + } + return OTHER; + } + } + return OTHER; + } + // SHOW @@CO + static int show2CoCheck(String stmt, int offset) { + if (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case 'M': + case 'm': + return show2ComCheck(stmt, offset); + case 'N': + case 'n': + return show2ConCheck(stmt, offset); + default: + return OTHER; + } + } + return OTHER; + } + + // SHOW @@DATABASE + static int show2DataBCheck(String stmt, int offset) { + if (stmt.length() > offset + "ASE".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + if ((c1 == 'A' || c1 == 'a') && (c2 == 'S' || c2 == 's') && (c3 == 'E' || c3 == 'e')) { +// if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { +// return OTHER; +// } + return DATABASE; + } + } + return OTHER; + } + + // SHOW @@DATANODE + static int show2DataNCheck(String stmt, int offset) { + if (stmt.length() > offset + "ODE".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + if ((c1 == 'O' || c1 == 'o') && (c2 == 'D' || c2 == 'd') && (c3 == 'E' || c3 == 'e')) { + while (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case ' ': + continue; + case 'W': + case 'w': + return show2DataNWhereCheck(stmt, offset); + default: + return OTHER; + } + } + return DATANODE; + } + } + return OTHER; + } + + // SHOW @@DATANODE WHERE + static int show2DataNWhereCheck(String stmt, int offset) { + if (stmt.length() > offset + "HERE".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + if ((c1 == 'H' || c1 == 'h') && (c2 == 'E' || c2 == 'e') && (c3 == 'R' || c3 == 'r') + && (c4 == 'E' || c4 == 'e')) { + while (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case ' ': + continue; + case 'S': + case 's': + return show2DataNWhereSchemaCheck(stmt, offset); + default: + return OTHER; + } + } + } + } + return OTHER; + } + + // SHOW @@DATANODE WHERE SCHEMA = XXXXXX + static int show2DataNWhereSchemaCheck(String stmt, int offset) { + if (stmt.length() > offset + "CHEMA".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + if ((c1 == 'C' || c1 == 'c') && (c2 == 'H' || c2 == 'h') && (c3 == 'E' || c3 == 'e') + && (c4 == 'M' || c4 == 'm') && (c5 == 'A' || c5 == 'a')) { + while (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case ' ': + continue; + case '=': + while (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case ' ': + continue; + default: + return (offset << 8) | DATANODE_WHERE; + } + } + return OTHER; + default: + return OTHER; + } + } + } + } + return OTHER; + } + + // SHOW @@DATASOURCE + static int show2DataSCheck(String stmt, int offset) { + if (stmt.length() > offset + "OURCE".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + if ((c1 == 'O' || c1 == 'o') && (c2 == 'U' || c2 == 'u') && (c3 == 'R' || c3 == 'r') + && (c4 == 'C' || c4 == 'c') && (c5 == 'E' || c5 == 'e')) { + while (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case ' ': + continue; + case 'W': + case 'w': + return show2DataSWhereCheck(stmt, offset); + case '.': + return show2DataSynCheck(stmt, offset); + default: + return OTHER; + } + } + + return DATASOURCE; + } + } + return OTHER; + } + + // SHOW @@DATASOURCE WHERE + static int show2DataSWhereCheck(String stmt, int offset) { + if (stmt.length() > offset + "HERE".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + if ((c1 == 'H' || c1 == 'h') && (c2 == 'E' || c2 == 'e') && (c3 == 'R' || c3 == 'r') + && (c4 == 'E' || c4 == 'e')) { + while (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case ' ': + continue; + case 'd': + case 'D': + return show2DataSWhereDatanodeCheck(stmt, offset); + default: + return OTHER; + } + } + } + } + return OTHER; + } + + // SHOW @@DATASOURCE WHERE DATANODE = XXXXXX + static int show2DataSWhereDatanodeCheck(String stmt, int offset) { + if (stmt.length() > offset + "ATANODE".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + char c6 = stmt.charAt(++offset); + char c7 = stmt.charAt(++offset); + if ((c1 == 'A' || c1 == 'a') && (c2 == 'T' || c2 == 't') && (c3 == 'A' || c3 == 'a') + && (c4 == 'N' || c4 == 'n') && (c5 == 'O' || c5 == 'o') && (c6 == 'D' || c6 == 'd') + && (c7 == 'E' || c7 == 'e')) { + while (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case ' ': + continue; + case '=': + while (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case ' ': + continue; + default: + return (offset << 8) | DATASOURCE_WHERE; + } + } + return OTHER; + default: + return OTHER; + } + } + } + } + return OTHER; + } + + // SHOW @@PARSER + static int show2PaCheck(String stmt, int offset) { + if (stmt.length() > offset + "RSER".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + if ((c1 == 'R' || c1 == 'r') && (c2 == 'S' || c2 == 's') && (c3 == 'E' || c3 == 'e') + && (c4 == 'R' || c4 == 'r')) { + if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { + return OTHER; + } + return PARSER; + } + } + return OTHER; + } + + // SHOW @@PROCESSOR + static int show2PrCheck(String stmt, int offset) { + if (stmt.length() > offset + "OCESSOR".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + char c6 = stmt.charAt(++offset); + char c7 = stmt.charAt(++offset); + if ((c1 == 'O' || c1 == 'o') && (c2 == 'C' || c2 == 'c') && (c3 == 'E' || c3 == 'e') + && (c4 == 'S' || c4 == 's') && (c5 == 'S' || c5 == 's') && (c6 == 'O' || c6 == 'o') + && (c7 == 'R' || c7 == 'r')) { + if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { + return OTHER; + } + return PROCESSOR; + } + } + return OTHER; + } + + // SHOW @@SERVER + // SHOW @@SESSION + static int show2SeCheck(String stmt, int offset) { + if (stmt.length() > offset + "SSION".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + if ((c1 == 'S' || c1 == 's') && (c2 == 'S' || c2 == 's') && (c3 == 'I' || c3 == 'i') + && (c4 == 'O' || c4 == 'o') && (c5 == 'N' || c5 == 'n')) { + if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { + return OTHER; + } + return SESSION; + } + } + else if (stmt.length() > offset + "RVER".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + if ((c1 == 'R' || c1 == 'r') && (c2 == 'V' || c2 == 'v') && (c3 == 'E' || c3 == 'e') + && (c4 == 'R' || c4 == 'r')) { + if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { + return OTHER; + } + return SERVER; + } + } + return OTHER; + } + + // SHOW @@THREADPOOL + static int show2ThCheck(String stmt, int offset) { + if (stmt.length() > offset + "READPOOL".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + char c6 = stmt.charAt(++offset); + char c7 = stmt.charAt(++offset); + char c8 = stmt.charAt(++offset); + if ((c1 == 'R' || c1 == 'r') && (c2 == 'E' || c2 == 'e') && (c3 == 'A' || c3 == 'a') + && (c4 == 'D' || c4 == 'd') && (c5 == 'P' || c5 == 'p') && (c6 == 'O' || c6 == 'o') + && (c7 == 'O' || c7 == 'o') && (c8 == 'L' || c8 == 'l')) { + if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { + return OTHER; + } + return THREADPOOL; + } + } + return OTHER; + } + + // SHOW @@TIME. + static int show2TiCheck(String stmt, int offset) { + if (stmt.length() > offset + "ME.".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + if ((c1 == 'M' || c1 == 'm') && (c2 == 'E' || c2 == 'e') && (c3 == '.') + && (stmt.length() > ++offset)) { + switch (stmt.charAt(offset)) { + case 'C': + case 'c': + return show2TimeCCheck(stmt, offset); + case 'S': + case 's': + return show2TimeSCheck(stmt, offset); + default: + return OTHER; + } + } + } + return OTHER; + } + + // SHOW @@COMMAND + static int show2ComCheck(String stmt, int offset) { + if (stmt.length() > offset + "MAND".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + if ((c1 == 'M' || c1 == 'm') && (c2 == 'A' || c2 == 'a') && (c3 == 'N' || c3 == 'n') + && (c4 == 'D' || c4 == 'd')) { + if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { + return OTHER; + } + return COMMAND; + } + } + return OTHER; + } + + // SHOW @@CONNECTION + static int show2ConCheck(String stmt, int offset) { + if (stmt.length() > offset + "NECTION".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + char c6 = stmt.charAt(++offset); + char c7 = stmt.charAt(++offset); + if ((c1 == 'N' || c1 == 'n') && (c2 == 'E' || c2 == 'e') && (c3 == 'C' || c3 == 'c') + && (c4 == 'T' || c4 == 't') && (c5 == 'I' || c5 == 'i') && (c6 == 'O' || c6 == 'o') + && (c7 == 'N' || c7 == 'n')) { + if (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case ' ': + return CONNECTION; + case '.': + return show2ConnectonSQL(stmt, offset); + default: + return OTHER; + } + } + return CONNECTION; + } + } + return OTHER; + } + + // SHOW @@CONNECTION.SQL + static int show2ConnectonSQL(String stmt, int offset) { + if (stmt.length() > offset + "SQL".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + if ((c1 == 'S' || c1 == 's') && (c2 == 'Q' || c2 == 'q') && (c3 == 'L' || c3 == 'l')) { + if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { + return OTHER; + } + return CONNECTION_SQL; + } + } + return OTHER; + } + + // SHOW @@TIME.CURRENT + static int show2TimeCCheck(String stmt, int offset) { + if (stmt.length() > offset + "URRENT".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + char c6 = stmt.charAt(++offset); + if ((c1 == 'U' || c1 == 'u') && (c2 == 'R' || c2 == 'r') && (c3 == 'R' || c3 == 'r') + && (c4 == 'E' || c4 == 'e') && (c5 == 'N' || c5 == 'n') && (c6 == 'T' || c6 == 't')) { + if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { + return OTHER; + } + return TIME_CURRENT; + } + } + return OTHER; + } + + // SHOW @@TIME.STARTUP + static int show2TimeSCheck(String stmt, int offset) { + if (stmt.length() > offset + "TARTUP".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + char c6 = stmt.charAt(++offset); + if ((c1 == 'T' || c1 == 't') && (c2 == 'A' || c2 == 'a') && (c3 == 'R' || c3 == 'r') + && (c4 == 'T' || c4 == 't') && (c5 == 'U' || c5 == 'u') && (c6 == 'P' || c6 == 'p')) { + if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { + return OTHER; + } + return TIME_STARTUP; + } + } + return OTHER; + } + + // SHOW @@SQ + static int show2SqCheck(String stmt, int offset) { + if (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case 'L': + case 'l': + return show2SqlCheck(stmt, offset); + default: + return OTHER; + } + } + return OTHER; + } + + // SHOW @@SQL + static int show2SqlCheck(String stmt, int offset) { + if (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case '.': + return show2SqlDotCheck(stmt, offset); + case ' ': + return show2SqlBlankCheck(stmt, offset); + default: + return SQL; + } + } else { + return SQL; + } + } + + // SHOW @@SQL. + static int show2SqlDotCheck(String stmt, int offset) { + if (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case 'D': + case 'd': + return show2SqlDCheck(stmt, offset); + case 'E': + case 'e': + return show2SqlECheck(stmt, offset); + case 'S': + case 's': + char c1 = stmt.charAt(++offset); + switch (c1) { + case 'L': + case 'l': + return show2SqlSLCheck(stmt, offset); + case 'U': + case 'u': + return show2SqlSUCheck(stmt, offset); + } + case 'H': + case 'h': + return show2SqlHCheck(stmt, offset); + case 'L': + case 'l': + return show2SqlLCheck(stmt, offset); + case 'C': + case 'c': + return show2SqlCCheck(stmt, offset); + case 'R': + case 'r': + return show2SqlRCheck(stmt, offset); + default: + return OTHER; + } + } + return OTHER; + } + + // SHOW @@SQL WHERE ID = XXXXXX + static int show2SqlBlankCheck(String stmt, int offset) { + for (++offset; stmt.length() > offset;) { + switch (stmt.charAt(offset)) { + case ' ': + return SQL; + case 'W': + case 'w': + if (isWhere(stmt, offset)) { + return SQL; + } else { + return OTHER; + } + default: + return (offset << 8) | SQL; + } + } + + return OTHER; + } + + // SHOW @@SQL.DETAIL WHERE ID = XXXXXX + static int show2SqlDCheck(String stmt, int offset) { + if (stmt.length() > offset + "ETAIL".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + if ((c1 == 'E' || c1 == 'e') && (c2 == 'T' || c2 == 't') && (c3 == 'A' || c3 == 'a') + && (c4 == 'I' || c4 == 'i') && (c5 == 'L' || c5 == 'l')) { + for (++offset; stmt.length() > offset; ++offset) { + switch (stmt.charAt(offset)) { + case ' ': + continue; + case 'W': + case 'w': + if (isWhere(stmt, offset)) { + return SQL_DETAIL; + } else { + return OTHER; + } + default: + return OTHER; + } + } + } + } + return OTHER; + } + + // SHOW @@SQL.EXECUTE + static int show2SqlECheck(String stmt, int offset) { + if (stmt.length() > offset + "XECUTE".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + char c6 = stmt.charAt(++offset); + if ((c1 == 'X' || c1 == 'x') && (c2 == 'E' || c2 == 'e') && (c3 == 'C' || c3 == 'c') + && (c4 == 'U' || c4 == 'u') && (c5 == 'T' || c5 == 't') && (c6 == 'E' || c6 == 'e')) { + if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { + return OTHER; + } + return SQL_EXECUTE; + } + } + return OTHER; + } + + // SHOW @@SQL.SLOW + static int show2SqlSLCheck(String stmt, int offset) { + if (stmt.length() > offset + "OW".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + if ((c1 == 'O' || c1 == 'o') && (c2 == 'W' || c2 == 'w')) { + + while (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case ' ': + continue; + default: + return (offset << 8) | SQL_SLOW; + } + } + + return SQL_SLOW; + } + } + return OTHER; + } + + // SHOW @@SQL.HIGH + static int show2SqlHCheck(String stmt, int offset) { + + if (stmt.length() > offset + "IGH".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + if ((c1 == 'I' || c1 == 'i') && (c2 == 'G' || c2 == 'g') && (c3 == 'H' || c3 == 'h') ) { + + while (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case ' ': + continue; + default: + return (offset << 8) | SQL_HIGH; + } + } + + return SQL_HIGH; + } + } + return OTHER; + } + + // SHOW @@SQL.RESULTSET + static int show2SqlRCheck(String stmt, int offset) { + + if (stmt.length() > offset + "ESULTSET".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + char c6 = stmt.charAt(++offset); + char c7 = stmt.charAt(++offset); + char c8 = stmt.charAt(++offset); + if ((c1 == 'E' || c1 == 'e') && (c2 == 'S' || c2 == 's') && (c3 == 'U' || c3 == 'u')&& + (c4 == 'l' || c4 == 'i') && (c5 == 'T' || c5 == 't') && (c6 == 'S' || c6 == 's')&& + (c7 == 'E' || c7 == 'e') && (c8 == 'T' || c8 == 't') ) { + + while (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case ' ': + continue; + default: + return (offset << 8) | SQL_RESULTSET; + } + } + + return SQL_RESULTSET; + } + } + return OTHER; + } + + // SHOW @@SQL.LARGE + static int show2SqlLCheck(String stmt, int offset) { + + if (stmt.length() > offset + "ARGE".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + if ((c1 == 'A' || c1 == 'a') && (c2 == 'R' || c2 == 'r') && (c3 == 'G' || c3 == 'g') && (c4 == 'E' || c4 == 'e') ) { + + while (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case ' ': + continue; + default: + return (offset << 8) | SQL_LARGE; + } + } + + return SQL_LARGE; + } + } + return OTHER; + } + + // SHOW @@sql.condition + static int show2SqlCCheck(String stmt, int offset) { + + if (stmt.length() > offset + "ONDITION".length()) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + char c6 = stmt.charAt(++offset); + char c7 = stmt.charAt(++offset); + char c8 = stmt.charAt(++offset); + if ( (c1 == 'O' || c1 == 'o') && (c2 == 'N' || c2 == 'n') && (c3 == 'D' || c3 == 'd') && + (c4 == 'I' || c4 == 'i') && (c5 == 'T' || c5 == 't') && (c6 == 'I' || c6 == 'i') && + (c7 == 'O' || c7 == 'o') && (c8 == 'N' || c8 == 'n') ) { + if (stmt.length() > ++offset && stmt.charAt(offset) != ' ') { + return OTHER; + } + return SQL_CONDITION; + } + } + return OTHER; + } + + // SHOW @@SQL.SUM + static int show2SqlSUCheck(String stmt, int offset) { + if (stmt.length() > offset + "M".length()) { + char c1 = stmt.charAt(++offset); + if ( c1 == 'M' || c1 == 'm') { + if (stmt.length() > ++offset && stmt.charAt(offset) == '.') { + + /** + * TODO: modify by zhuam + * + * 兼容之前指令 + * 在保留 SHOW @@SQL.SUM 指令的同时, 扩展支持 SHOW @@SQL.SUM.TABLE 、 SHOW @@SQL.SUM.USER + */ + if ( stmt.length() > (offset+4) ) { + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + + if ( (c2 == 'U' || c2 == 'u') && (c3 == 'S' || c3 == 's') + && (c4 == 'E' || c4 == 'e') && (c5 == 'R' || c5 == 'r') ) { + return SQL_SUM_USER; + + } else if ( (c2 == 'T' || c2 == 't') && (c3 == 'A' || c3 == 'a') + && (c4 == 'B' || c4 == 'b') && (c5 == 'L' || c5 == 'l') + && stmt.length() > (offset+1)) { + + char c6 = stmt.charAt(++offset); + if ( c6 == 'E' || c6 == 'e') { + + while (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case ' ': + continue; + default: + return (offset << 8) | SQL_SUM_TABLE; + } + } + + return SQL_SUM_TABLE; + } + } + + } + + return OTHER; + } + + while (stmt.length() > ++offset) { + switch (stmt.charAt(offset)) { + case ' ': + continue; + default: + return (offset << 8) | SQL_SUM_USER; + } + } + return SQL_SUM_USER; + } + } + return OTHER; + } + + + static boolean isWhere(String stmt, int offset) { + if (stmt.length() > offset + 5) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + if ((c1 == 'H' || c1 == 'h') && (c2 == 'E' || c2 == 'e') && (c3 == 'R' || c3 == 'r') + && (c4 == 'E' || c4 == 'e') && (c5 == ' ')) { + boolean jump1 = false; + for (++offset; stmt.length() > offset && !jump1; ++offset) { + switch (stmt.charAt(offset)) { + case ' ': + continue; + case 'I': + case 'i': + jump1 = true; + break; + default: + return false; + } + } + if ((stmt.length() > offset) && (stmt.charAt(offset) == 'D' || stmt.charAt(offset) == 'd')) { + boolean jump2 = false; + for (++offset; stmt.length() > offset && !jump2; ++offset) { + switch (stmt.charAt(offset)) { + case ' ': + continue; + case '=': + jump2 = true; + break; + default: + return false; + } + } + return isSqlId(stmt, offset); + } + } + } + return false; + } + + static boolean isSqlId(String stmt, int offset) { + String id = stmt.substring(offset).trim(); + try { + Long.parseLong(id); + } catch (Exception e) { + return false; + } + return true; + } + + public static String getWhereParameter(String stmt) { + int offset = stmt.indexOf('='); + ++offset; + return stmt.substring(offset).trim(); + } + } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/parser/ManagerParseStop.java b/src/main/java/io/mycat/route/parser/ManagerParseStop.java similarity index 95% rename from src/main/java/io/mycat/server/parser/ManagerParseStop.java rename to src/main/java/io/mycat/route/parser/ManagerParseStop.java index 84ba6121e..d604d38b8 100644 --- a/src/main/java/io/mycat/server/parser/ManagerParseStop.java +++ b/src/main/java/io/mycat/route/parser/ManagerParseStop.java @@ -21,10 +21,10 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.parser; +package io.mycat.route.parser; -import io.mycat.util.Pair; -import io.mycat.util.ParseUtil; +import io.mycat.route.parser.util.Pair; +import io.mycat.route.parser.util.ParseUtil; import io.mycat.util.SplitUtil; /** @@ -70,8 +70,8 @@ public static Pair getPair(String stmt) { // HEARTBEAT static int stop2Check(String stmt, int offset) { - if (stmt.length() > ++offset && stmt.charAt(offset) == '@') { - if (stmt.length() > offset + 9) { + if (stmt.length() > ++offset && stmt.charAt(offset) == '@' + && stmt.length() > offset + 9) { char c1 = stmt.charAt(++offset); char c2 = stmt.charAt(++offset); char c3 = stmt.charAt(++offset); @@ -89,7 +89,6 @@ static int stop2Check(String stmt, int offset) { } return HEARTBEAT; } - } } return OTHER; } diff --git a/src/main/java/io/mycat/server/parser/ManagerParseSwitch.java b/src/main/java/io/mycat/route/parser/ManagerParseSwitch.java similarity index 95% rename from src/main/java/io/mycat/server/parser/ManagerParseSwitch.java rename to src/main/java/io/mycat/route/parser/ManagerParseSwitch.java index beb00a7fb..be3757f40 100644 --- a/src/main/java/io/mycat/server/parser/ManagerParseSwitch.java +++ b/src/main/java/io/mycat/route/parser/ManagerParseSwitch.java @@ -21,10 +21,10 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.parser; +package io.mycat.route.parser; -import io.mycat.util.Pair; -import io.mycat.util.ParseUtil; +import io.mycat.route.parser.util.Pair; +import io.mycat.route.parser.util.ParseUtil; import io.mycat.util.SplitUtil; /** @@ -70,8 +70,8 @@ public static Pair getPair(String stmt) { // DATASOURCE static int switch2Check(String stmt, int offset) { - if (stmt.length() > ++offset && stmt.charAt(offset) == '@') { - if (stmt.length() > offset + 10) { + if (stmt.length() > ++offset && stmt.charAt(offset) == '@' + && stmt.length() > offset + 10) { char c1 = stmt.charAt(++offset); char c2 = stmt.charAt(++offset); char c3 = stmt.charAt(++offset); @@ -91,7 +91,6 @@ static int switch2Check(String stmt, int offset) { } return DATASOURCE; } - } } return OTHER; } diff --git a/src/main/java/io/mycat/route/parser/druid/DruidParser.java b/src/main/java/io/mycat/route/parser/druid/DruidParser.java index 5bd2c77e1..8158e1313 100644 --- a/src/main/java/io/mycat/route/parser/druid/DruidParser.java +++ b/src/main/java/io/mycat/route/parser/druid/DruidParser.java @@ -1,13 +1,13 @@ package io.mycat.route.parser.druid; -import io.mycat.cache.LayerCachePool; -import io.mycat.route.RouteResultset; -import io.mycat.server.config.node.SchemaConfig; - import java.sql.SQLNonTransientException; import com.alibaba.druid.sql.ast.SQLStatement; +import io.mycat.cache.LayerCachePool; +import io.mycat.config.model.SchemaConfig; +import io.mycat.route.RouteResultset; + /** * 对SQLStatement解析 * 主要通过visitor解析和statement解析:有些类型的SQLStatement通过visitor解析足够了, diff --git a/src/main/java/io/mycat/route/parser/druid/DruidParserFactory.java b/src/main/java/io/mycat/route/parser/druid/DruidParserFactory.java index 09b226057..204367504 100644 --- a/src/main/java/io/mycat/route/parser/druid/DruidParserFactory.java +++ b/src/main/java/io/mycat/route/parser/druid/DruidParserFactory.java @@ -1,19 +1,5 @@ package io.mycat.route.parser.druid; -import io.mycat.route.parser.druid.impl.DefaultDruidParser; -import io.mycat.route.parser.druid.impl.DruidAlterTableParser; -import io.mycat.route.parser.druid.impl.DruidCreateTableParser; -import io.mycat.route.parser.druid.impl.DruidDeleteParser; -import io.mycat.route.parser.druid.impl.DruidInsertParser; -import io.mycat.route.parser.druid.impl.DruidSelectDb2Parser; -import io.mycat.route.parser.druid.impl.DruidSelectOracleParser; -import io.mycat.route.parser.druid.impl.DruidSelectParser; -import io.mycat.route.parser.druid.impl.DruidSelectPostgresqlParser; -import io.mycat.route.parser.druid.impl.DruidSelectSqlServerParser; -import io.mycat.route.parser.druid.impl.DruidUpdateParser; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.TableConfig; - import java.util.ArrayList; import java.util.HashSet; import java.util.List; @@ -21,14 +7,30 @@ import java.util.Set; import com.alibaba.druid.sql.ast.SQLStatement; +import com.alibaba.druid.sql.ast.statement.SQLAlterTableStatement; import com.alibaba.druid.sql.ast.statement.SQLSelectStatement; -import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlAlterTableStatement; import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlCreateTableStatement; import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlDeleteStatement; import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlInsertStatement; +import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlLockTableStatement; import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlUpdateStatement; import com.alibaba.druid.sql.visitor.SchemaStatVisitor; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.TableConfig; +import io.mycat.route.parser.druid.impl.DefaultDruidParser; +import io.mycat.route.parser.druid.impl.DruidAlterTableParser; +import io.mycat.route.parser.druid.impl.DruidCreateTableParser; +import io.mycat.route.parser.druid.impl.DruidDeleteParser; +import io.mycat.route.parser.druid.impl.DruidInsertParser; +import io.mycat.route.parser.druid.impl.DruidLockTableParser; +import io.mycat.route.parser.druid.impl.DruidSelectDb2Parser; +import io.mycat.route.parser.druid.impl.DruidSelectOracleParser; +import io.mycat.route.parser.druid.impl.DruidSelectParser; +import io.mycat.route.parser.druid.impl.DruidSelectPostgresqlParser; +import io.mycat.route.parser.druid.impl.DruidSelectSqlServerParser; +import io.mycat.route.parser.druid.impl.DruidUpdateParser; + /** * DruidParser的工厂类 * @@ -64,9 +66,11 @@ public static DruidParser create(SchemaConfig schema, SQLStatement statement, Sc } else if (statement instanceof MySqlUpdateStatement) { parser = new DruidUpdateParser(); - } else if (statement instanceof MySqlAlterTableStatement) + } else if (statement instanceof SQLAlterTableStatement) { parser = new DruidAlterTableParser(); + } else if (statement instanceof MySqlLockTableStatement) { + parser = new DruidLockTableParser(); } else { parser = new DefaultDruidParser(); @@ -79,7 +83,12 @@ private static DruidParser getDruidParserForMultiDB(SchemaConfig schema, SQLStat { DruidParser parser=null; //先解出表,判断表所在db的类型,再根据不同db类型返回不同的解析 - List tables = parseTables(statement, visitor); + /** + * 不能直接使用visitor变量,防止污染后续sql解析 + * @author SvenAugustus + */ + SchemaStatVisitor _visitor = SchemaStatVisitorFactory.create(schema); + List tables = parseTables(statement, _visitor); for (String table : tables) { Set dbTypes =null; @@ -125,10 +134,6 @@ private static List parseTables(SQLStatement stmt, SchemaStatVisitor sch { String key = entry.getKey(); String value = entry.getValue(); - if (key != null && key.indexOf("`") >= 0) - { - key = key.replaceAll("`", ""); - } if (value != null && value.indexOf("`") >= 0) { value = value.replaceAll("`", ""); @@ -136,16 +141,21 @@ private static List parseTables(SQLStatement stmt, SchemaStatVisitor sch //表名前面带database的,去掉 if (key != null) { - int pos = key.indexOf("."); + int pos = key.indexOf("`"); + if (pos > 0) + { + key = key.replaceAll("`", ""); + } + pos = key.indexOf("."); if (pos > 0) { key = key.substring(pos + 1); } - } - if (key.equals(value)) - { - tables.add(key.toUpperCase()); + if (key.equals(value)) + { + tables.add(key.toUpperCase()); + } } } diff --git a/src/main/java/io/mycat/route/parser/druid/DruidSequenceHandler.java b/src/main/java/io/mycat/route/parser/druid/DruidSequenceHandler.java index e39ba584e..e77c11899 100644 --- a/src/main/java/io/mycat/route/parser/druid/DruidSequenceHandler.java +++ b/src/main/java/io/mycat/route/parser/druid/DruidSequenceHandler.java @@ -1,80 +1,125 @@ -package io.mycat.route.parser.druid; - -import io.mycat.server.config.node.SystemConfig; -import io.mycat.server.sequence.IncrSequenceMySQLHandler; -import io.mycat.server.sequence.IncrSequencePropHandler; -import io.mycat.server.sequence.IncrSequenceTimeHandler; -import io.mycat.server.sequence.SequenceHandler; - -import java.io.UnsupportedEncodingException; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -/** - * 使用Druid解析器实现对Sequence处理 - * @author 兵临城下 - * @date 2015/03/13 - */ -public class DruidSequenceHandler { - private final SequenceHandler sequenceHandler; - - /** 获取MYCAT SEQ的匹配语句 */ - private final static String MATCHED_FEATURE = "NEXT VALUE FOR MYCATSEQ_"; - - private final static Pattern pattern = Pattern.compile("(?:(\\s*next\\s+value\\s+for\\s*MYCATSEQ_(\\w+))(,|\\)|\\s)*)+", Pattern.CASE_INSENSITIVE); - - public DruidSequenceHandler(int seqHandlerType) { - switch(seqHandlerType){ - case SystemConfig.SEQUENCEHANDLER_MYSQLDB: - sequenceHandler = IncrSequenceMySQLHandler.getInstance(); - break; - case SystemConfig.SEQUENCEHANDLER_LOCALFILE: - sequenceHandler = IncrSequencePropHandler.getInstance(); - break; - case SystemConfig.SEQUENCEHANDLER_LOCAL_TIME: - sequenceHandler = IncrSequenceTimeHandler.getInstance(); - break; - default: - throw new java.lang.IllegalArgumentException("Invalid sequnce handler type "+seqHandlerType); - } - } - - /** - * 根据原sql获取可执行的sql - * @param sql - * @return - * @throws UnsupportedEncodingException - */ - public String getExecuteSql(String sql,String charset) throws UnsupportedEncodingException{ - String executeSql = null; - if (null!=sql && !"".equals(sql)) { - //sql不能转大写,因为sql可能是insert语句会把values也给转换了 - // 获取表名。 - Matcher matcher = pattern.matcher(sql); - if(matcher.find()) - { - String tableName = matcher.group(2); - long value = sequenceHandler.nextId(tableName.toUpperCase()); - - // 将MATCHED_FEATURE+表名替换成序列号。 - executeSql = sql.replace(matcher.group(1), " "+value); - } - - } - return executeSql; - } - - - //just for test - public String getTableName(String sql) { - Matcher matcher = pattern.matcher(sql); - if(matcher.find()) - { - return matcher.group(2); - } - return null; - } - - - -} +package io.mycat.route.parser.druid; + +import java.io.UnsupportedEncodingException; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.ReentrantLock; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import io.mycat.MycatServer; +import io.mycat.config.model.SystemConfig; +import io.mycat.route.SessionSQLPair; +import io.mycat.route.sequence.handler.DistributedSequenceHandler; +import io.mycat.route.sequence.handler.IncrSequenceMySQLHandler; +import io.mycat.route.sequence.handler.IncrSequencePropHandler; +import io.mycat.route.sequence.handler.IncrSequenceTimeHandler; +import io.mycat.route.sequence.handler.IncrSequenceZKHandler; +import io.mycat.route.sequence.handler.SequenceHandler; +import io.mycat.util.TimeUtil; + +/** + * 使用Druid解析器实现对Sequence处理 + * + * @author 兵临城下 + * @date 2015/03/13 + */ +public class DruidSequenceHandler { + private final SequenceHandler sequenceHandler; + + /** + * 分段锁 + */ + private final static Map segmentLock = new ConcurrentHashMap<>(); + + /** + * 获取MYCAT SEQ的匹配语句 + */ + private final static String MATCHED_FEATURE = "NEXT VALUE FOR MYCATSEQ_"; + + private final static Pattern pattern = Pattern.compile("(?:(\\s*next\\s+value\\s+for\\s*MYCATSEQ_(\\w+))(,|\\)|\\s)*)+", Pattern.CASE_INSENSITIVE); + + public DruidSequenceHandler(int seqHandlerType) { + switch (seqHandlerType) { + case SystemConfig.SEQUENCEHANDLER_MYSQLDB: + sequenceHandler = IncrSequenceMySQLHandler.getInstance(); + break; + case SystemConfig.SEQUENCEHANDLER_LOCALFILE: + sequenceHandler = IncrSequencePropHandler.getInstance(); + break; + case SystemConfig.SEQUENCEHANDLER_LOCAL_TIME: + sequenceHandler = IncrSequenceTimeHandler.getInstance(); + break; + case SystemConfig.SEQUENCEHANDLER_ZK_DISTRIBUTED: + sequenceHandler = DistributedSequenceHandler.getInstance(MycatServer.getInstance().getConfig().getSystem()); + break; + case SystemConfig.SEQUENCEHANDLER_ZK_GLOBAL_INCREMENT: + sequenceHandler = IncrSequenceZKHandler.getInstance(); + break; + default: + throw new java.lang.IllegalArgumentException("Invalid sequnce handler type " + seqHandlerType); + } + } + + /** + * 根据原sql获取可执行的sql + * + * @param sql + * @return + * @throws UnsupportedEncodingException + */ + public String getExecuteSql(SessionSQLPair pair, String charset) throws UnsupportedEncodingException,InterruptedException { + String executeSql = pair.sql; + if (null != pair.sql && !"".equals(pair.sql)) { + Matcher matcher = pattern.matcher(executeSql); + if(matcher.find()){ + String tableName = matcher.group(2); + ReentrantLock lock = getSegLock(tableName); + lock.lock(); + try { + matcher = pattern.matcher(executeSql); + while(matcher.find()){ + long value = sequenceHandler.nextId(tableName.toUpperCase()); + executeSql = executeSql.replaceFirst(matcher.group(1), " "+Long.toString(value)); + pair.session.getSource().setLastWriteTime(TimeUtil.currentTimeMillis()); + } + } finally { + lock.unlock(); + } + } + } + return executeSql; + } + + /* + * 获取分段锁 + * @param name + * @return + */ + private ReentrantLock getSegLock(String name){ + ReentrantLock lock = segmentLock.get(name); + if(lock==null){ + synchronized (segmentLock) { + lock = segmentLock.get(name); + if(lock==null){ + lock = new ReentrantLock(); + segmentLock.put(name, lock); + } + } + } + return lock; + } + + + //just for test + public String getTableName(String sql) { + Matcher matcher = pattern.matcher(sql); + if (matcher.find()) { + return matcher.group(2); + } + return null; + } + + +} diff --git a/src/main/java/io/mycat/route/parser/druid/DruidShardingParseInfo.java b/src/main/java/io/mycat/route/parser/druid/DruidShardingParseInfo.java index f13ba2850..6cf6002f1 100644 --- a/src/main/java/io/mycat/route/parser/druid/DruidShardingParseInfo.java +++ b/src/main/java/io/mycat/route/parser/druid/DruidShardingParseInfo.java @@ -1,9 +1,20 @@ package io.mycat.route.parser.druid; import java.util.ArrayList; +import java.util.HashSet; import java.util.LinkedHashMap; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; +import java.util.Set; + +import com.alibaba.druid.sql.visitor.SchemaStatVisitor; +import com.alibaba.druid.stat.TableStat; +import com.alibaba.druid.stat.TableStat.Name; + +import io.mycat.route.util.RouterUtil; +import io.mycat.sqlengine.mpp.ColumnRoutePair; +import io.mycat.sqlengine.mpp.RangeValue; /** * druid parser result @@ -33,6 +44,8 @@ public class DruidShardingParseInfo { */ private Map tableAliasMap = new LinkedHashMap(); + private SchemaStatVisitor visitor; + public Map getTableAliasMap() { return tableAliasMap; } @@ -79,5 +92,32 @@ public void clear() { unit.clear(); } } + + public void setVisitor(SchemaStatVisitor visitor) { + + this.visitor = visitor; + } + + public SchemaStatVisitor getVisitor(){ + + return this.visitor; + } + + public void addTables(Map map) { + + int dotIndex; + for(Name _name : map.keySet()){ + + String _tableName = _name.getName().toString().toUpperCase(); + //系统表直接跳过,路由到默认datanode + if(RouterUtil.isSystemSchema(_tableName)){ + continue; + } + if((dotIndex = _tableName.indexOf('.')) != -1){ + _tableName = _tableName.substring(dotIndex + 1); + } + addTable(_tableName); + } + } } diff --git a/src/main/java/io/mycat/route/parser/druid/MycatExprParser.java b/src/main/java/io/mycat/route/parser/druid/MycatExprParser.java index 2b3a275d5..136d3e502 100644 --- a/src/main/java/io/mycat/route/parser/druid/MycatExprParser.java +++ b/src/main/java/io/mycat/route/parser/druid/MycatExprParser.java @@ -1,14 +1,16 @@ package io.mycat.route.parser.druid; +import com.alibaba.druid.sql.ast.statement.SQLSelectItem; import com.alibaba.druid.sql.dialect.mysql.parser.MySqlExprParser; import com.alibaba.druid.sql.parser.Lexer; +import com.alibaba.druid.sql.parser.Token; /** * Created by nange on 2015/3/13. */ public class MycatExprParser extends MySqlExprParser { - public static String[] max_agg_functions = {"AVG", "COUNT", "GROUP_CONCAT", "MAX", "MIN", "STDDEV", "SUM", "ROW_NUMBER"}; + public static final String[] max_agg_functions = {"AVG", "COUNT", "GROUP_CONCAT", "MAX", "MIN", "STDDEV", "SUM", "ROW_NUMBER"}; public MycatExprParser(Lexer lexer) { @@ -22,4 +24,55 @@ public MycatExprParser(String sql) lexer.nextToken(); super.aggregateFunctions = max_agg_functions; } + @Override + public SQLSelectItem parseSelectItem() + { + parseTop(); + return super.parseSelectItem(); + } + public void parseTop() + { + if (lexer.token() == Token.TOP) + { + lexer.nextToken(); + + boolean paren = false; + if (lexer.token() == Token.LPAREN) + { + paren = true; + lexer.nextToken(); + } + + if (paren) + { + accept(Token.RPAREN); + } + + if (lexer.token() == Token.LITERAL_INT) + { + lexer.mark(); + lexer.nextToken(); + } + if (lexer.token() == Token.IDENTIFIER) + { + lexer.nextToken(); + + } + if (lexer.token() == Token.EQ||lexer.token() == Token.DOT) + { + lexer.nextToken(); + } else if(lexer.token() != Token.STAR) + { + lexer.reset(); + } + if (lexer.token() == Token.PERCENT) + { + lexer.nextToken(); + } + + + } + + + } } diff --git a/src/main/java/io/mycat/route/parser/druid/MycatLexer.java b/src/main/java/io/mycat/route/parser/druid/MycatLexer.java index 691e2d225..21a019327 100644 --- a/src/main/java/io/mycat/route/parser/druid/MycatLexer.java +++ b/src/main/java/io/mycat/route/parser/druid/MycatLexer.java @@ -1,12 +1,12 @@ package io.mycat.route.parser.druid; -import java.util.HashMap; -import java.util.Map; - import com.alibaba.druid.sql.dialect.mysql.parser.MySqlLexer; import com.alibaba.druid.sql.parser.Keywords; import com.alibaba.druid.sql.parser.Token; +import java.util.HashMap; +import java.util.Map; + /** * Created by magicdoom on 2015/3/13. */ diff --git a/src/main/java/io/mycat/route/parser/druid/MycatSchemaStatVisitor.java b/src/main/java/io/mycat/route/parser/druid/MycatSchemaStatVisitor.java index bf7974890..70598990e 100644 --- a/src/main/java/io/mycat/route/parser/druid/MycatSchemaStatVisitor.java +++ b/src/main/java/io/mycat/route/parser/druid/MycatSchemaStatVisitor.java @@ -1,35 +1,62 @@ package io.mycat.route.parser.druid; -import io.mycat.route.util.RouterUtil; - import java.util.ArrayList; +import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Map.Entry; +import java.util.Queue; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import com.alibaba.druid.sql.ast.SQLCommentHint; import com.alibaba.druid.sql.ast.SQLExpr; +import com.alibaba.druid.sql.ast.SQLExprImpl; import com.alibaba.druid.sql.ast.SQLName; import com.alibaba.druid.sql.ast.SQLObject; +import com.alibaba.druid.sql.ast.expr.SQLAggregateExpr; +import com.alibaba.druid.sql.ast.expr.SQLAllExpr; +import com.alibaba.druid.sql.ast.expr.SQLAnyExpr; import com.alibaba.druid.sql.ast.expr.SQLBetweenExpr; import com.alibaba.druid.sql.ast.expr.SQLBinaryOpExpr; import com.alibaba.druid.sql.ast.expr.SQLBinaryOperator; import com.alibaba.druid.sql.ast.expr.SQLCharExpr; +import com.alibaba.druid.sql.ast.expr.SQLExistsExpr; import com.alibaba.druid.sql.ast.expr.SQLIdentifierExpr; +import com.alibaba.druid.sql.ast.expr.SQLInListExpr; +import com.alibaba.druid.sql.ast.expr.SQLInSubQueryExpr; import com.alibaba.druid.sql.ast.expr.SQLPropertyExpr; +import com.alibaba.druid.sql.ast.expr.SQLQueryExpr; +import com.alibaba.druid.sql.ast.expr.SQLSomeExpr; +import com.alibaba.druid.sql.ast.expr.SQLValuableExpr; import com.alibaba.druid.sql.ast.statement.SQLAlterTableItem; import com.alibaba.druid.sql.ast.statement.SQLAlterTableStatement; import com.alibaba.druid.sql.ast.statement.SQLDeleteStatement; import com.alibaba.druid.sql.ast.statement.SQLExprTableSource; import com.alibaba.druid.sql.ast.statement.SQLJoinTableSource; +import com.alibaba.druid.sql.ast.statement.SQLSelect; +import com.alibaba.druid.sql.ast.statement.SQLSelectItem; +import com.alibaba.druid.sql.ast.statement.SQLSelectQueryBlock; import com.alibaba.druid.sql.ast.statement.SQLSelectStatement; +import com.alibaba.druid.sql.ast.statement.SQLSubqueryTableSource; +import com.alibaba.druid.sql.ast.statement.SQLUnionQuery; import com.alibaba.druid.sql.ast.statement.SQLUpdateStatement; +import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlCreateTableStatement; import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlDeleteStatement; +import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlHintStatement; +import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlInsertStatement; import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlSelectQueryBlock; import com.alibaba.druid.sql.dialect.mysql.visitor.MySqlSchemaStatVisitor; +import com.alibaba.druid.sql.visitor.SchemaStatVisitor; import com.alibaba.druid.stat.TableStat; import com.alibaba.druid.stat.TableStat.Column; import com.alibaba.druid.stat.TableStat.Condition; import com.alibaba.druid.stat.TableStat.Mode; +import com.alibaba.druid.stat.TableStat.Relationship; + +import io.mycat.route.util.RouterUtil; /** * Druid解析器中用来从ast语法中提取表名、条件、字段等的vistor @@ -40,6 +67,9 @@ public class MycatSchemaStatVisitor extends MySqlSchemaStatVisitor { private boolean hasOrCondition = false; private List whereUnits = new CopyOnWriteArrayList(); private List storedwhereUnits = new CopyOnWriteArrayList(); + private Queue subQuerys = new LinkedBlockingQueue<>(); //子查询集合 + private boolean hasChange = false; // 是否有改写sql + private boolean subqueryRelationOr = false; //子查询存在关联条件的情况下,是否有 or 条件 private void reset() { this.conditions.clear(); @@ -220,11 +250,17 @@ private String getOwnerTableName(SQLBetweenExpr betweenExpr,String column) { } else if(tableStats.size() == 0) {//一个表都没有,返回空串 return ""; } else {//多个表名 - for(Column col : columns) {//从columns中找表名 + for (Column col : columns.keySet()) + { if(col.getName().equals(column)) { return col.getTable(); } } +// for(Column col : columns) {//从columns中找表名 +// if(col.getName().equals(column)) { +// return col.getTable(); +// } +// } //前面没找到表名的,自己从parent中解析 @@ -256,16 +292,464 @@ else if(parent instanceof SQLUpdateStatement) { return ""; } + private void setSubQueryRelationOrFlag(SQLExprImpl x){ + MycatSubQueryVisitor subQueryVisitor = new MycatSubQueryVisitor(); + x.accept(subQueryVisitor); + if(subQueryVisitor.isRelationOr()){ + subqueryRelationOr = true; + } + } + + /* + * 子查询 + * (non-Javadoc) + * @see com.alibaba.druid.sql.visitor.SQLASTVisitorAdapter#visit(com.alibaba.druid.sql.ast.expr.SQLQueryExpr) + */ + @Override + public boolean visit(SQLQueryExpr x) { + setSubQueryRelationOrFlag(x); + addSubQuerys(x.getSubQuery()); + return super.visit(x); + } + /* + * (non-Javadoc) + * @see com.alibaba.druid.sql.visitor.SchemaStatVisitor#visit(com.alibaba.druid.sql.ast.statement.SQLSubqueryTableSource) + */ + @Override + public boolean visit(SQLSubqueryTableSource x){ + addSubQuerys(x.getSelect()); + return super.visit(x); + } + + /* + * (non-Javadoc) + * @see com.alibaba.druid.sql.visitor.SQLASTVisitorAdapter#visit(com.alibaba.druid.sql.ast.expr.SQLExistsExpr) + */ + @Override + public boolean visit(SQLExistsExpr x) { + setSubQueryRelationOrFlag(x); + addSubQuerys(x.getSubQuery()); + return super.visit(x); + } + + @Override + public boolean visit(SQLInListExpr x) { + return super.visit(x); + } + + /* + * 对 in 子查询的处理 + * (non-Javadoc) + * @see com.alibaba.druid.sql.visitor.SchemaStatVisitor#visit(com.alibaba.druid.sql.ast.expr.SQLInSubQueryExpr) + */ + @Override + public boolean visit(SQLInSubQueryExpr x) { + setSubQueryRelationOrFlag(x); + addSubQuerys(x.getSubQuery()); + return super.visit(x); + } + + /* + * 遇到 all 将子查询改写成 SELECT MAX(name) FROM subtest1 + * 例如: + * select * from subtest where id > all (select name from subtest1); + * >/>= all ----> >/>= max + * all ----> not in + * = all ----> id = 1 and id = 2 + * other 不改写 + */ + @Override + public boolean visit(SQLAllExpr x) { + setSubQueryRelationOrFlag(x); + + List itemlist = ((SQLSelectQueryBlock)(x.getSubQuery().getQuery())).getSelectList(); + SQLExpr sexpr = itemlist.get(0).getExpr(); + + if(x.getParent() instanceof SQLBinaryOpExpr){ + SQLBinaryOpExpr parentExpr = (SQLBinaryOpExpr)x.getParent(); + SQLAggregateExpr saexpr = null; + switch (parentExpr.getOperator()) { + case GreaterThan: + case GreaterThanOrEqual: + case NotLessThan: + this.hasChange = true; + if(sexpr instanceof SQLIdentifierExpr + || (sexpr instanceof SQLPropertyExpr&&((SQLPropertyExpr)sexpr).getOwner() instanceof SQLIdentifierExpr)){ + saexpr = new SQLAggregateExpr("MAX"); + saexpr.getArguments().add(sexpr); + saexpr.setParent(itemlist.get(0)); + itemlist.get(0).setExpr(saexpr); + } + SQLQueryExpr maxSubQuery = new SQLQueryExpr(x.getSubQuery()); + x.getSubQuery().setParent(x.getParent()); + // 生成新的SQLQueryExpr 替换当前 SQLAllExpr 节点 + if(x.getParent() instanceof SQLBinaryOpExpr){ + if(((SQLBinaryOpExpr)x.getParent()).getLeft().equals(x)){ + ((SQLBinaryOpExpr)x.getParent()).setLeft(maxSubQuery); + }else if(((SQLBinaryOpExpr)x.getParent()).getRight().equals(x)){ + ((SQLBinaryOpExpr)x.getParent()).setRight(maxSubQuery); + } + } + addSubQuerys(x.getSubQuery()); + return super.visit(x.getSubQuery()); + case LessThan: + case LessThanOrEqual: + case NotGreaterThan: + this.hasChange = true; + if(sexpr instanceof SQLIdentifierExpr + || (sexpr instanceof SQLPropertyExpr&&((SQLPropertyExpr)sexpr).getOwner() instanceof SQLIdentifierExpr)){ + saexpr = new SQLAggregateExpr("MIN"); + saexpr.getArguments().add(sexpr); + saexpr.setParent(itemlist.get(0)); + itemlist.get(0).setExpr(saexpr); + + x.subQuery.setParent(x.getParent()); + } + // 生成新的SQLQueryExpr 替换当前 SQLAllExpr 节点 + SQLQueryExpr minSubQuery = new SQLQueryExpr(x.getSubQuery()); + if(x.getParent() instanceof SQLBinaryOpExpr){ + if(((SQLBinaryOpExpr)x.getParent()).getLeft().equals(x)){ + ((SQLBinaryOpExpr)x.getParent()).setLeft(minSubQuery); + }else if(((SQLBinaryOpExpr)x.getParent()).getRight().equals(x)){ + ((SQLBinaryOpExpr)x.getParent()).setRight(minSubQuery); + } + } + addSubQuerys(x.getSubQuery()); + return super.visit(x.getSubQuery()); + case LessThanOrGreater: + case NotEqual: + this.hasChange = true; + SQLInSubQueryExpr notInSubQueryExpr = new SQLInSubQueryExpr(x.getSubQuery()); + x.getSubQuery().setParent(notInSubQueryExpr); + notInSubQueryExpr.setNot(true); + // 生成新的SQLQueryExpr 替换当前 SQLAllExpr 节点 + if(x.getParent() instanceof SQLBinaryOpExpr){ + SQLBinaryOpExpr xp = (SQLBinaryOpExpr)x.getParent(); + + if(xp.getLeft().equals(x)){ + notInSubQueryExpr.setExpr(xp.getRight()); + }else if(xp.getRight().equals(x)){ + notInSubQueryExpr.setExpr(xp.getLeft()); + } + + if(xp.getParent() instanceof MySqlSelectQueryBlock){ + ((MySqlSelectQueryBlock)xp.getParent()).setWhere(notInSubQueryExpr); + }else if(xp.getParent() instanceof SQLBinaryOpExpr){ + SQLBinaryOpExpr pp = ((SQLBinaryOpExpr)xp.getParent()); + if(pp.getLeft().equals(xp)){ + pp.setLeft(notInSubQueryExpr); + }else if(pp.getRight().equals(xp)){ + pp.setRight(notInSubQueryExpr); + } + } + } + addSubQuerys(x.getSubQuery()); + return super.visit(notInSubQueryExpr); + default: + break; + } + } + addSubQuerys(x.getSubQuery()); + return super.visit(x); + } + + /* + * 遇到 some 将子查询改写成 SELECT MIN(name) FROM subtest1 + * 例如: + * select * from subtest where id > some (select name from subtest1); + * >/>= some ----> >/>= min + * some ----> not in + * = some ----> in + * other 不改写 + */ + @Override + public boolean visit(SQLSomeExpr x) { + + setSubQueryRelationOrFlag(x); + + List itemlist = ((SQLSelectQueryBlock)(x.getSubQuery().getQuery())).getSelectList(); + SQLExpr sexpr = itemlist.get(0).getExpr(); + + if(x.getParent() instanceof SQLBinaryOpExpr){ + SQLBinaryOpExpr parentExpr = (SQLBinaryOpExpr)x.getParent(); + SQLAggregateExpr saexpr = null; + switch (parentExpr.getOperator()) { + case GreaterThan: + case GreaterThanOrEqual: + case NotLessThan: + this.hasChange = true; + if(sexpr instanceof SQLIdentifierExpr + || (sexpr instanceof SQLPropertyExpr&&((SQLPropertyExpr)sexpr).getOwner() instanceof SQLIdentifierExpr)){ + saexpr = new SQLAggregateExpr("MIN"); + saexpr.getArguments().add(sexpr); + saexpr.setParent(itemlist.get(0)); + itemlist.get(0).setExpr(saexpr); + } + SQLQueryExpr maxSubQuery = new SQLQueryExpr(x.getSubQuery()); + x.getSubQuery().setParent(maxSubQuery); + // 生成新的SQLQueryExpr 替换当前 SQLAllExpr 节点 + if(x.getParent() instanceof SQLBinaryOpExpr){ + if(((SQLBinaryOpExpr)x.getParent()).getLeft().equals(x)){ + ((SQLBinaryOpExpr)x.getParent()).setLeft(maxSubQuery); + }else if(((SQLBinaryOpExpr)x.getParent()).getRight().equals(x)){ + ((SQLBinaryOpExpr)x.getParent()).setRight(maxSubQuery); + } + } + addSubQuerys(x.getSubQuery()); + return super.visit(x.getSubQuery()); + case LessThan: + case LessThanOrEqual: + case NotGreaterThan: + this.hasChange = true; + if(sexpr instanceof SQLIdentifierExpr + || (sexpr instanceof SQLPropertyExpr&&((SQLPropertyExpr)sexpr).getOwner() instanceof SQLIdentifierExpr)){ + saexpr = new SQLAggregateExpr("MAX"); + saexpr.getArguments().add(sexpr); + saexpr.setParent(itemlist.get(0)); + itemlist.get(0).setExpr(saexpr); + } + // 生成新的SQLQueryExpr 替换当前 SQLAllExpr 节点 + SQLQueryExpr minSubQuery = new SQLQueryExpr(x.getSubQuery()); + x.getSubQuery().setParent(minSubQuery); + + if(x.getParent() instanceof SQLBinaryOpExpr){ + if(((SQLBinaryOpExpr)x.getParent()).getLeft().equals(x)){ + ((SQLBinaryOpExpr)x.getParent()).setLeft(minSubQuery); + }else if(((SQLBinaryOpExpr)x.getParent()).getRight().equals(x)){ + ((SQLBinaryOpExpr)x.getParent()).setRight(minSubQuery); + } + } + addSubQuerys(x.getSubQuery()); + return super.visit(x.getSubQuery()); + case LessThanOrGreater: + case NotEqual: + this.hasChange = true; + SQLInSubQueryExpr notInSubQueryExpr = new SQLInSubQueryExpr(x.getSubQuery()); + x.getSubQuery().setParent(notInSubQueryExpr); + notInSubQueryExpr.setNot(true); + // 生成新的SQLQueryExpr 替换当前 SQLAllExpr 节点 + if(x.getParent() instanceof SQLBinaryOpExpr){ + SQLBinaryOpExpr xp = (SQLBinaryOpExpr)x.getParent(); + + if(xp.getLeft().equals(x)){ + notInSubQueryExpr.setExpr(xp.getRight()); + }else if(xp.getRight().equals(x)){ + notInSubQueryExpr.setExpr(xp.getLeft()); + } + + if(xp.getParent() instanceof MySqlSelectQueryBlock){ + ((MySqlSelectQueryBlock)xp.getParent()).setWhere(notInSubQueryExpr); + }else if(xp.getParent() instanceof SQLBinaryOpExpr){ + SQLBinaryOpExpr pp = ((SQLBinaryOpExpr)xp.getParent()); + if(pp.getLeft().equals(xp)){ + pp.setLeft(notInSubQueryExpr); + }else if(pp.getRight().equals(xp)){ + pp.setRight(notInSubQueryExpr); + } + } + } + addSubQuerys(x.getSubQuery()); + return super.visit(notInSubQueryExpr); + case Equality: + this.hasChange = true; + SQLInSubQueryExpr inSubQueryExpr = new SQLInSubQueryExpr(x.getSubQuery()); + x.getSubQuery().setParent(inSubQueryExpr); + inSubQueryExpr.setNot(false); + // 生成新的SQLQueryExpr 替换当前 SQLAllExpr 节点 + if(x.getParent() instanceof SQLBinaryOpExpr){ + SQLBinaryOpExpr xp = (SQLBinaryOpExpr)x.getParent(); + + if(xp.getLeft().equals(x)){ + inSubQueryExpr.setExpr(xp.getRight()); + }else if(xp.getRight().equals(x)){ + inSubQueryExpr.setExpr(xp.getLeft()); + } + + if(xp.getParent() instanceof MySqlSelectQueryBlock){ + ((MySqlSelectQueryBlock)xp.getParent()).setWhere(inSubQueryExpr); + }else if(xp.getParent() instanceof SQLBinaryOpExpr){ + SQLBinaryOpExpr pp = ((SQLBinaryOpExpr)xp.getParent()); + if(pp.getLeft().equals(xp)){ + pp.setLeft(inSubQueryExpr); + }else if(pp.getRight().equals(xp)){ + pp.setRight(inSubQueryExpr); + } + } + } + addSubQuerys(x.getSubQuery()); + return super.visit(inSubQueryExpr); + default: + break; + } + } + addSubQuerys(x.getSubQuery()); + return super.visit(x); + } + + /* + * 遇到 any 将子查询改写成 SELECT MIN(name) FROM subtest1 + * 例如: + * select * from subtest where id oper any (select name from subtest1); + * >/>= any ----> >/>= min + * any ----> not in + * = some ----> in + * other 不改写 + */ + @Override + public boolean visit(SQLAnyExpr x) { + + setSubQueryRelationOrFlag(x); + + List itemlist = ((SQLSelectQueryBlock)(x.getSubQuery().getQuery())).getSelectList(); + SQLExpr sexpr = itemlist.get(0).getExpr(); + + if(x.getParent() instanceof SQLBinaryOpExpr){ + SQLBinaryOpExpr parentExpr = (SQLBinaryOpExpr)x.getParent(); + SQLAggregateExpr saexpr = null; + switch (parentExpr.getOperator()) { + case GreaterThan: + case GreaterThanOrEqual: + case NotLessThan: + this.hasChange = true; + if(sexpr instanceof SQLIdentifierExpr + || (sexpr instanceof SQLPropertyExpr&&((SQLPropertyExpr)sexpr).getOwner() instanceof SQLIdentifierExpr)){ + saexpr = new SQLAggregateExpr("MIN"); + saexpr.getArguments().add(sexpr); + saexpr.setParent(itemlist.get(0)); + itemlist.get(0).setExpr(saexpr); + } + SQLQueryExpr maxSubQuery = new SQLQueryExpr(x.getSubQuery()); + x.getSubQuery().setParent(maxSubQuery); + // 生成新的SQLQueryExpr 替换当前 SQLAllExpr 节点 + if(x.getParent() instanceof SQLBinaryOpExpr){ + if(((SQLBinaryOpExpr)x.getParent()).getLeft().equals(x)){ + ((SQLBinaryOpExpr)x.getParent()).setLeft(maxSubQuery); + }else if(((SQLBinaryOpExpr)x.getParent()).getRight().equals(x)){ + ((SQLBinaryOpExpr)x.getParent()).setRight(maxSubQuery); + } + } + addSubQuerys(x.getSubQuery()); + return super.visit(x.getSubQuery()); + case LessThan: + case LessThanOrEqual: + case NotGreaterThan: + this.hasChange = true; + if(sexpr instanceof SQLIdentifierExpr + || (sexpr instanceof SQLPropertyExpr&&((SQLPropertyExpr)sexpr).getOwner() instanceof SQLIdentifierExpr)){ + saexpr = new SQLAggregateExpr("MAX"); + saexpr.getArguments().add(sexpr); + saexpr.setParent(itemlist.get(0)); + itemlist.get(0).setExpr(saexpr); + } + // 生成新的SQLQueryExpr 替换当前 SQLAllExpr 节点 + SQLQueryExpr minSubQuery = new SQLQueryExpr(x.getSubQuery()); + x.subQuery.setParent(minSubQuery); + if(x.getParent() instanceof SQLBinaryOpExpr){ + if(((SQLBinaryOpExpr)x.getParent()).getLeft().equals(x)){ + ((SQLBinaryOpExpr)x.getParent()).setLeft(minSubQuery); + }else if(((SQLBinaryOpExpr)x.getParent()).getRight().equals(x)){ + ((SQLBinaryOpExpr)x.getParent()).setRight(minSubQuery); + } + } + addSubQuerys(x.getSubQuery()); + return super.visit(x.getSubQuery()); + case LessThanOrGreater: + case NotEqual: + this.hasChange = true; + SQLInSubQueryExpr notInSubQueryExpr = new SQLInSubQueryExpr(x.getSubQuery()); + x.getSubQuery().setParent(notInSubQueryExpr); + notInSubQueryExpr.setNot(true); + // 生成新的SQLQueryExpr 替换当前 SQLAllExpr 节点 + if(x.getParent() instanceof SQLBinaryOpExpr){ + SQLBinaryOpExpr xp = (SQLBinaryOpExpr)x.getParent(); + + if(xp.getLeft().equals(x)){ + notInSubQueryExpr.setExpr(xp.getRight()); + }else if(xp.getRight().equals(x)){ + notInSubQueryExpr.setExpr(xp.getLeft()); + } + + if(xp.getParent() instanceof MySqlSelectQueryBlock){ + ((MySqlSelectQueryBlock)xp.getParent()).setWhere(notInSubQueryExpr); + }else if(xp.getParent() instanceof SQLBinaryOpExpr){ + SQLBinaryOpExpr pp = ((SQLBinaryOpExpr)xp.getParent()); + if(pp.getLeft().equals(xp)){ + pp.setLeft(notInSubQueryExpr); + }else if(pp.getRight().equals(xp)){ + pp.setRight(notInSubQueryExpr); + } + } + } + addSubQuerys(x.getSubQuery()); + return super.visit(notInSubQueryExpr); + case Equality: + this.hasChange = true; + SQLInSubQueryExpr inSubQueryExpr = new SQLInSubQueryExpr(x.getSubQuery()); + x.getSubQuery().setParent(inSubQueryExpr); + inSubQueryExpr.setNot(false); + // 生成新的SQLQueryExpr 替换当前 SQLAllExpr 节点 + if(x.getParent() instanceof SQLBinaryOpExpr){ + SQLBinaryOpExpr xp = (SQLBinaryOpExpr)x.getParent(); + + if(xp.getLeft().equals(x)){ + inSubQueryExpr.setExpr(xp.getRight()); + }else if(xp.getRight().equals(x)){ + inSubQueryExpr.setExpr(xp.getLeft()); + } + + if(xp.getParent() instanceof MySqlSelectQueryBlock){ + ((MySqlSelectQueryBlock)xp.getParent()).setWhere(inSubQueryExpr); + }else if(xp.getParent() instanceof SQLBinaryOpExpr){ + SQLBinaryOpExpr pp = ((SQLBinaryOpExpr)xp.getParent()); + if(pp.getLeft().equals(xp)){ + pp.setLeft(inSubQueryExpr); + }else if(pp.getRight().equals(xp)){ + pp.setRight(inSubQueryExpr); + } + } + } + addSubQuerys(x.getSubQuery()); + return super.visit(inSubQueryExpr); + default: + break; + } + } + addSubQuerys(x.getSubQuery()); + return super.visit(x); + } + @Override public boolean visit(SQLBinaryOpExpr x) { x.getLeft().setParent(x); x.getRight().setParent(x); - + + /* + * fix bug 当 selectlist 存在多个子查询时, 主表没有别名的情况下.主表的查询条件 被错误的附加到子查询上. + * eg. select (select id from subtest2 where id = 1), (select id from subtest3 where id = 2) from subtest1 where id =4; + * 像这样的子查询, subtest1 的 过滤条件 id = 4 . 被 加入到 subtest3 上. 加别名的情况下正常,不加别名,就会存在这个问题. + * 这里设置好操作的是哪张表后,再进行判断. + */ + String currenttable = x.getParent()==null?null: (String) x.getParent().getAttribute(SchemaStatVisitor.ATTR_TABLE); + if(currenttable!=null){ + this.setCurrentTable(currenttable); + } + switch (x.getOperator()) { case Equality: case LessThanOrEqualOrGreaterThan: case Is: case IsNot: + case GreaterThan: + case GreaterThanOrEqual: + case LessThan: + case LessThanOrEqual: + case NotLessThan: + case LessThanOrGreater: + case NotEqual: + case NotGreaterThan: handleCondition(x.getLeft(), x.getOperator().name, x.getRight()); handleCondition(x.getRight(), x.getOperator().name, x.getLeft()); handleRelationship(x.getLeft(), x.getOperator().name, x.getRight()); @@ -291,11 +775,6 @@ public boolean visit(SQLBinaryOpExpr x) { return false; case Like: case NotLike: - case NotEqual: - case GreaterThan: - case GreaterThanOrEqual: - case LessThan: - case LessThanOrEqual: default: break; } @@ -426,24 +905,63 @@ private List> getMergedConditionList(List whereUnitLi * @param list2 * @return */ - private List> merge(List> list1, List> list2) { - if(list1.size() == 0) { - return list2; - } else if (list2.size() == 0) { - return list1; - } - + private List> merge(List> list1, List> list2) { + if(list1.size() == 0) { + return list2; + } else if (list2.size() == 0) { + return list1; + } + List> retList = new ArrayList>(); for(int i = 0; i < list1.size(); i++) { for(int j = 0; j < list2.size(); j++) { - List listTmp = new ArrayList(); - listTmp.addAll(list1.get(i)); - listTmp.addAll(list2.get(j)); - retList.add(listTmp); +// List listTmp = new ArrayList(); +// listTmp.addAll(list1.get(i)); +// listTmp.addAll(list2.get(j)); +// retList.add(listTmp); + /** + * 单纯做笛卡尔积运算,会导致非常多不必要的条件列表,
+ * 当whereUnit和条件相对多时,会急剧增长条件列表项,内存直线上升,导致假死状态
+ * 因此,修改算法为
+ * 1、先合并两个条件列表的元素为一个条件列表
+ * 2、计算合并后的条件列表,在结果retList中:
+ *  2-1、如果当前的条件列表 是 另外一个条件列表的 超集,更新,并标识已存在
+ *  2-2、如果当前的条件列表 是 另外一个条件列表的 子集,标识已存在
+ * 3、最后,如果被标识不存在,加入结果retList,否则丢弃。
+ * + * @author SvenAugustus + */ + // 合并两个条件列表的元素为一个条件列表 + List listTmp = mergeSqlConditionList(list1.get(i), list2.get(j)); + + // 判定当前的条件列表 是否 另外一个条件列表的 子集 + boolean exists = false; + Iterator> it = retList.iterator(); + while (it.hasNext()) { + List result = (List) it.next(); + if (result != null && listTmp != null && listTmp.size() > result.size()) { + // 如果当前的条件列表 是 另外一个条件列表的 超集,更新,并标识已存在 + if (sqlConditionListInOther(result, listTmp)) { + result.clear(); + result.addAll(listTmp); + exists = true; + break; + } + } else { + // 如果当前的条件列表 是 另外一个条件列表的 子集,标识已存在 + if (sqlConditionListInOther(listTmp, result)) { + exists = true; + break; + } + } + } + if (!exists) {// 被标识不存在,加入 + retList.add(listTmp); + } // 否则丢弃 } } - return retList; - } + return retList; + } private void getConditionsFromWhereUnit(WhereUnit whereUnit) { List> retList = new ArrayList>(); @@ -454,9 +972,14 @@ private void getConditionsFromWhereUnit(WhereUnit whereUnit) { this.conditions.clear(); for(SQLExpr sqlExpr : whereUnit.getSplitedExprList()) { sqlExpr.accept(this); - List conditions = new ArrayList(); - conditions.addAll(getConditions()); - conditions.addAll(outSideCondition); +// List conditions = new ArrayList(); +// conditions.addAll(getConditions()); conditions.addAll(outSideCondition); + /** + * 合并两个条件列表的元素为一个条件列表,减少不必要多的条件项
+ * + * @author SvenAugustus + */ + List conditions = mergeSqlConditionList(getConditions(), outSideCondition); retList.add(conditions); this.conditions.clear(); } @@ -520,7 +1043,32 @@ public boolean visit(SQLAlterTableStatement x) { return false; } - + public boolean visit(MySqlCreateTableStatement x) { + SQLName sqlName= x.getName(); + if(sqlName!=null) + { + String table = sqlName.toString(); + if(table.startsWith("`")) + { + table=table.substring(1,table.length()-1); + } + setCurrentTable(table); + } + return false; + } + public boolean visit(MySqlInsertStatement x) { + SQLName sqlName= x.getTableName(); + if(sqlName!=null) + { + String table = sqlName.toString(); + if(table.startsWith("`")) + { + table=table.substring(1,table.length()-1); + } + setCurrentTable(sqlName.toString()); + } + return false; + } // DUAL public boolean visit(MySqlDeleteStatement x) { setAliasMap(); @@ -550,4 +1098,304 @@ public boolean visit(MySqlDeleteStatement x) { public void endVisit(MySqlDeleteStatement x) { } + + public boolean visit(SQLUpdateStatement x) { + setAliasMap(); + + setMode(x, Mode.Update); + + SQLName identName = x.getTableName(); + if (identName != null) { + String ident = identName.toString(); + String alias = x.getTableSource().getAlias(); + setCurrentTable(ident); + + TableStat stat = getTableStat(ident); + stat.incrementUpdateCount(); + + Map aliasMap = getAliasMap(); + + aliasMap.put(ident, ident); + if(alias != null) { + aliasMap.put(alias, ident); + } + } else { + x.getTableSource().accept(this); + } + + accept(x.getItems()); + accept(x.getWhere()); + + return false; + } + + @Override + public void endVisit(MySqlHintStatement x) { + super.endVisit(x); + } + + @Override + public boolean visit(MySqlHintStatement x) { + List hits = x.getHints(); + if(hits != null && !hits.isEmpty()) { + String schema = parseSchema(hits); + if(schema != null ) { + setCurrentTable(x, schema + "."); + return true; + } + } + return true; + } + + private String parseSchema(List hits) { + String regx = "\\!mycat:schema\\s*=([\\s\\w]*)$"; + for(SQLCommentHint hit : hits ) { + Pattern pattern = Pattern.compile(regx); + Matcher m = pattern.matcher(hit.getText()); + if(m.matches()) { + return m.group(1).trim(); + } + } + return null; + } + + public Queue getSubQuerys() { + return subQuerys; + } + + private void addSubQuerys(SQLSelect sqlselect){ + /* 多个 sqlselect 之间 , equals 和 hashcode 是相同的.去重时 都被过滤掉了. */ + if(subQuerys.isEmpty()){ + subQuerys.add(sqlselect); + return; + } + boolean exists = false; + Iterator iter = subQuerys.iterator(); + while(iter.hasNext()){ + SQLSelect ss = iter.next(); + if(ss.getQuery() instanceof SQLSelectQueryBlock + &&sqlselect.getQuery() instanceof SQLSelectQueryBlock){ + SQLSelectQueryBlock current = (SQLSelectQueryBlock)sqlselect.getQuery(); + SQLSelectQueryBlock ssqb = (SQLSelectQueryBlock)ss.getQuery(); +// if(!sqlSelectQueryBlockEquals(ssqb,current)){ +// subQuerys.add(sqlselect); +// } + /** + * 修正判定逻辑,应改为全不在subQuerys中才加入
+ * + * @author SvenAugustus + */ + if(sqlSelectQueryBlockEquals(current,ssqb)){ + exists = true; + break; + } + } + } + if(!exists) { + subQuerys.add(sqlselect); + } + } + + /* 多个 sqlselect 之间 , equals 和 hashcode 是相同的.去重时 使用 SQLSelectQueryBlock equals 方法 */ + private boolean sqlSelectQueryBlockEquals(SQLSelectQueryBlock obj1,SQLSelectQueryBlock obj2) { + if (obj1 == obj2) return true; + if (obj2 == null) return false; + if (obj1.getClass() != obj2.getClass()) return false; + if (obj1.isParenthesized() ^ obj2.isParenthesized()) return false; + if (obj1.getDistionOption() != obj2.getDistionOption()) return false; + if (obj1.getFrom() == null) { + if (obj2.getFrom() != null) return false; + } else if (!obj1.getFrom().equals(obj2.getFrom())) return false; + if (obj1.getGroupBy() == null) { + if (obj2.getGroupBy() != null) return false; + } else if (!obj1.getGroupBy().equals(obj2.getGroupBy())) return false; + if (obj1.getInto() == null) { + if (obj2.getInto() != null) return false; + } else if (!obj1.getInto().equals(obj2.getInto())) return false; + if (obj1.getSelectList() == null) { + if (obj2.getSelectList() != null) return false; + } else if (!obj1.getSelectList().equals(obj2.getSelectList())) return false; + if (obj1.getWhere() == null) { + if (obj2.getWhere() != null) return false; + } else if (!obj1.getWhere().equals(obj2.getWhere())) return false; + return true; + } + + public boolean isHasChange() { + return hasChange; + } + + public boolean isSubqueryRelationOr() { + return subqueryRelationOr; + } + + /** + * 判定当前的条件列表 是否 另外一个条件列表的 子集 + * + * @author SvenAugustus + * @param current 当前的条件列表 + * @param other 另外一个条件列表 + * @return + */ + private boolean sqlConditionListInOther(List current, List other) { + if (current == null) { + if (other != null) { + return false; + } + return true; + } + if (current.size() > other.size()) { + return false; + } + if (other.size() == current.size()) { + // 判定两个条件列表的元素是否内容相等 + return sqlConditionListEquals(current, other); + } + for (int j = 0; j < current.size(); j++) { + boolean exists = false; + for (int i = 0; i < other.size(); i++) { + // 判定两个条件是否相等 + if (sqlConditionEquals(current.get(j), other.get(i))) { + exists = true; + break; + } + } + if (!exists) { + return false; + } + } + return true; + } + + /** + * 判定两个条件列表的元素是否内容相等 + * + * @author SvenAugustus + * @param list1 + * @param list2 + * @return + */ + private boolean sqlConditionListEquals(List list1, List list2) { + if (list1 == null) { + if (list2 != null) { + return false; + } + return true; + } + if (list2.size() != list1.size()) { + return false; + } + int len = list1.size(); + for (int j = 0; j < len; j++) { + boolean exists = false; + for (int i = 0; i < len; i++) { + // 判定两个条件是否相等 + if (sqlConditionEquals(list2.get(j), list1.get(i))) { + exists = true; + break; + } + } + if (!exists) { + return false; + } + } + return true; + } + + /** + * 合并两个条件列表的元素为一个条件列表 + * + * @author SvenAugustus + * @param list1 条件列表1 + * @param list2 条件列表2 + * @return + */ + private List mergeSqlConditionList(List list1, List list2) { + if (list1 == null) { + list1 = new ArrayList(); + } + if (list2 == null) { + list2 = new ArrayList(); + } + List retList = new ArrayList(); + if (!list1.isEmpty() && !(list1.get(0) instanceof Condition)) { + return retList; + } + if (!list2.isEmpty() && !(list2.get(0) instanceof Condition)) { + return retList; + } + retList.addAll(list1); + for (int j = 0; j < list2.size(); j++) { + boolean exists = false; + for (int i = 0; i < list1.size(); i++) { + if (sqlConditionEquals(list2.get(j), list1.get(i))) { + exists = true; + break; + } + } + if (!exists) { + retList.add(list2.get(j)); + } + } + return retList; + } + + /** + * 判定两个条件是否相等 + * + * @author SvenAugustus + * @param obj1 + * @param obj2 + * @return + */ + private boolean sqlConditionEquals(Condition obj1, Condition obj2) { + if (obj1 == obj2) { + return true; + } + if (obj2 == null) { + return false; + } + if (obj1.getClass() != obj2.getClass()) { + return false; + } + Condition other = (Condition) obj2; + if (obj1.getColumn() == null) { + if (other.getColumn() != null) { + return false; + } + } else if (!obj1.getColumn().equals(other.getColumn())) { + return false; + } + if (obj1.getOperator() == null) { + if (other.getOperator() != null) { + return false; + } + } else if (!obj1.getOperator().equals(other.getOperator())) { + return false; + } + if (obj1.getValues() == null) { + if (other.getValues() != null) { + return false; + } + } else { + boolean notEquals=false; + for (Object val1: obj1.getValues()) { + for (Object val2: obj2.getValues()) { + if(val1==null) { + if(val2!=null) { + notEquals=true; + break; + } + }else if(!val1.equals(val2)) { + notEquals=true; + break; + } + } + if(notEquals)break; + } + if(notEquals) + return false; + } + return true; + } } diff --git a/src/main/java/io/mycat/route/parser/druid/MycatSelectParser.java b/src/main/java/io/mycat/route/parser/druid/MycatSelectParser.java index 0af0bd578..92e730d16 100644 --- a/src/main/java/io/mycat/route/parser/druid/MycatSelectParser.java +++ b/src/main/java/io/mycat/route/parser/druid/MycatSelectParser.java @@ -1,6 +1,7 @@ package io.mycat.route.parser.druid; import com.alibaba.druid.sql.ast.statement.SQLSelectItem; +import com.alibaba.druid.sql.ast.statement.SQLSelectQuery; import com.alibaba.druid.sql.dialect.mysql.parser.MySqlSelectParser; import com.alibaba.druid.sql.parser.SQLExprParser; import com.alibaba.druid.sql.parser.Token; @@ -20,13 +21,12 @@ public MycatSelectParser(String sql) super(sql); } - @Override - protected SQLSelectItem parseSelectItem() - { - parseTop(); - return super.parseSelectItem(); - } +//public SQLSelectQuery query() +//{ +// parseTop(); +// return super.query(); +//} public void parseTop() { diff --git a/src/main/java/io/mycat/route/parser/druid/MycatStatementParser.java b/src/main/java/io/mycat/route/parser/druid/MycatStatementParser.java index 4ebf3c37c..f66573dbb 100644 --- a/src/main/java/io/mycat/route/parser/druid/MycatStatementParser.java +++ b/src/main/java/io/mycat/route/parser/druid/MycatStatementParser.java @@ -6,10 +6,7 @@ import com.alibaba.druid.sql.ast.statement.SQLSelectStatement; import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlLoadDataInFileStatement; import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser; -import com.alibaba.druid.sql.parser.Lexer; -import com.alibaba.druid.sql.parser.ParserException; -import com.alibaba.druid.sql.parser.SQLExprParser; -import com.alibaba.druid.sql.parser.Token; +import com.alibaba.druid.sql.parser.*; import com.alibaba.druid.util.JdbcConstants; /** diff --git a/src/main/java/io/mycat/route/parser/druid/MycatSubQueryVisitor.java b/src/main/java/io/mycat/route/parser/druid/MycatSubQueryVisitor.java new file mode 100644 index 000000000..b2a44a44f --- /dev/null +++ b/src/main/java/io/mycat/route/parser/druid/MycatSubQueryVisitor.java @@ -0,0 +1,44 @@ +package io.mycat.route.parser.druid; + +import com.alibaba.druid.sql.ast.expr.SQLBinaryOpExpr; +import com.alibaba.druid.sql.dialect.mysql.visitor.MySqlSchemaStatVisitor; + +/** + * 子查询访问器 + */ +public class MycatSubQueryVisitor extends MySqlSchemaStatVisitor{ + + private boolean relationOr; + + @Override + public boolean visit(SQLBinaryOpExpr x) { + + switch (x.getOperator()) { + case Equality: + case LessThanOrEqualOrGreaterThan: + case GreaterThan: + case GreaterThanOrEqual: + case LessThan: + case LessThanOrEqual: + case NotLessThan: + case LessThanOrGreater: + case NotEqual: + case NotGreaterThan: + break; + case BooleanOr: + relationOr = true; + break; + case Like: + case NotLike: + default: + break; + } + return true; + } + + public boolean isRelationOr() { + return relationOr; + } + + +} diff --git a/src/main/java/io/mycat/route/parser/druid/RouteCalculateUnit.java b/src/main/java/io/mycat/route/parser/druid/RouteCalculateUnit.java index cab49c54f..fa3353c0b 100644 --- a/src/main/java/io/mycat/route/parser/druid/RouteCalculateUnit.java +++ b/src/main/java/io/mycat/route/parser/druid/RouteCalculateUnit.java @@ -1,13 +1,13 @@ package io.mycat.route.parser.druid; -import io.mycat.sqlengine.mpp.ColumnRoutePair; -import io.mycat.sqlengine.mpp.RangeValue; - import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.Map; import java.util.Set; +import io.mycat.sqlengine.mpp.ColumnRoutePair; +import io.mycat.sqlengine.mpp.RangeValue; + /** * 路由计算单元 * diff --git a/src/main/java/io/mycat/route/parser/druid/SchemaStatVisitorFactory.java b/src/main/java/io/mycat/route/parser/druid/SchemaStatVisitorFactory.java new file mode 100644 index 000000000..81d1ae40e --- /dev/null +++ b/src/main/java/io/mycat/route/parser/druid/SchemaStatVisitorFactory.java @@ -0,0 +1,26 @@ +package io.mycat.route.parser.druid; + +import com.alibaba.druid.sql.visitor.SchemaStatVisitor; +import io.mycat.config.model.SchemaConfig; + +/** + * 为防止SchemaStatVisitor被污染,采用factory创建 + * + * Date:2017年12月1日 + * + * @author SvenAugustus + * @version 1.0 + * @since JDK 1.7 + */ +public class SchemaStatVisitorFactory { + + /** + * 创建 + * + * @return + */ + public static SchemaStatVisitor create(SchemaConfig schema) { + SchemaStatVisitor visitor = new MycatSchemaStatVisitor(); + return visitor; + } +} diff --git a/src/main/java/io/mycat/route/parser/druid/impl/DefaultDruidParser.java b/src/main/java/io/mycat/route/parser/druid/impl/DefaultDruidParser.java index 0976648c3..0b16a968f 100644 --- a/src/main/java/io/mycat/route/parser/druid/impl/DefaultDruidParser.java +++ b/src/main/java/io/mycat/route/parser/druid/impl/DefaultDruidParser.java @@ -1,25 +1,30 @@ package io.mycat.route.parser.druid.impl; +import java.sql.SQLNonTransientException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import com.alibaba.druid.sql.ast.SQLStatement; +import com.alibaba.druid.sql.ast.statement.SQLSelectQuery; +import com.alibaba.druid.sql.ast.statement.SQLSelectStatement; +import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlSelectQueryBlock; import com.alibaba.druid.sql.visitor.SchemaStatVisitor; import com.alibaba.druid.stat.TableStat.Condition; + import io.mycat.cache.LayerCachePool; +import io.mycat.config.model.SchemaConfig; import io.mycat.route.RouteResultset; import io.mycat.route.parser.druid.DruidParser; import io.mycat.route.parser.druid.DruidShardingParseInfo; import io.mycat.route.parser.druid.MycatSchemaStatVisitor; import io.mycat.route.parser.druid.RouteCalculateUnit; -import io.mycat.server.config.node.SchemaConfig; import io.mycat.sqlengine.mpp.RangeValue; import io.mycat.util.StringUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.sql.SQLNonTransientException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; /** * 对SQLStatement解析 @@ -30,8 +35,7 @@ * */ public class DefaultDruidParser implements DruidParser { - protected static final Logger LOGGER = LoggerFactory - .getLogger(DefaultDruidParser.class); + protected static final Logger LOGGER = LoggerFactory.getLogger(DefaultDruidParser.class); /** * 解析得到的结果 */ @@ -60,11 +64,19 @@ public void parser(SchemaConfig schema, RouteResultset rrs, SQLStatement stmt, S ctx.setSql(originSql); //通过visitor解析 visitorParse(rrs,stmt,schemaStatVisitor); + //通过Statement解析 statementParse(schema, rrs, stmt); - - //改写sql:如insert语句主键自增长的可以 - changeSql(schema, rrs, stmt,cachePool); + } + + /** + * 是否终止解析,子类可覆盖此方法控制解析进程. + * 存在子查询的情况下,如果子查询需要先执行获取返回结果后,进一步改写sql后,再执行 在这种情况下,不再需要statement 和changeSql 解析。增加此模板方法 + * @param schemaStatVisitor + * @return + */ + public boolean afterVisitorParser(RouteResultset rrs, SQLStatement stmt,MycatSchemaStatVisitor schemaStatVisitor){ + return false; } /** @@ -94,7 +106,17 @@ public void changeSql(SchemaConfig schema, RouteResultset rrs, public void visitorParse(RouteResultset rrs, SQLStatement stmt,MycatSchemaStatVisitor visitor) throws SQLNonTransientException{ stmt.accept(visitor); - + ctx.setVisitor(visitor); + + if(stmt instanceof SQLSelectStatement){ + SQLSelectQuery query = ((SQLSelectStatement) stmt).getSelect().getQuery(); + if(query instanceof MySqlSelectQueryBlock){ + if(((MySqlSelectQueryBlock)query).isForUpdate()){ + rrs.setSelectForUpdate(true); + } + } + } + List> mergedConditionList = new ArrayList>(); if(visitor.hasOrCondition()) {//包含or语句 //TODO @@ -104,6 +126,11 @@ public void visitorParse(RouteResultset rrs, SQLStatement stmt,MycatSchemaStatVi mergedConditionList.add(visitor.getConditions()); } + if(visitor.isHasChange()){ // 在解析的过程中子查询被改写了.需要更新ctx. + ctx.setSql(stmt.toString()); + rrs.setStatement(ctx.getSql()); + } + if(visitor.getAliasMap() != null) { for(Map.Entry entry : visitor.getAliasMap().entrySet()) { String key = entry.getKey(); @@ -120,14 +147,19 @@ public void visitorParse(RouteResultset rrs, SQLStatement stmt,MycatSchemaStatVi if(pos> 0) { key = key.substring(pos + 1); } + + tableAliasMap.put(key.toUpperCase(), value); } - if(key.equals(value)) { - ctx.addTable(key.toUpperCase()); - } else { - tableAliasMap.put(key, value); - } + +// else { +// tableAliasMap.put(key, value); +// } + } + ctx.addTables(visitor.getTables()); + + visitor.getAliasMap().putAll(tableAliasMap); ctx.setTableAliasMap(tableAliasMap); } ctx.setRouteCalculateUnits(this.buildRouteCalculateUnits(visitor, mergedConditionList)); @@ -141,12 +173,18 @@ private List buildRouteCalculateUnits(SchemaStatVisitor visi for(Condition condition : conditionList.get(i)) { List values = condition.getValues(); if(values.size() == 0) { - break; + continue; } if(checkConditionValues(values)) { String columnName = StringUtil.removeBackquote(condition.getColumn().getName().toUpperCase()); String tableName = StringUtil.removeBackquote(condition.getColumn().getTable().toUpperCase()); - if(visitor.getAliasMap() != null && visitor.getAliasMap().get(condition.getColumn().getTable()) == null) {//子查询的别名条件忽略掉,不参数路由计算,否则后面找不到表 + + if(visitor.getAliasMap() != null && visitor.getAliasMap().get(tableName) != null + && !visitor.getAliasMap().get(tableName).equals(tableName)) { + tableName = visitor.getAliasMap().get(tableName); + } + + if(visitor.getAliasMap() != null && visitor.getAliasMap().get(StringUtil.removeBackquote(condition.getColumn().getTable().toUpperCase())) == null) {//子查询的别名条件忽略掉,不参数路由计算,否则后面找不到表 continue; } diff --git a/src/main/java/io/mycat/route/parser/druid/impl/DruidAlterTableParser.java b/src/main/java/io/mycat/route/parser/druid/impl/DruidAlterTableParser.java index eb6a06f9d..5922fc711 100644 --- a/src/main/java/io/mycat/route/parser/druid/impl/DruidAlterTableParser.java +++ b/src/main/java/io/mycat/route/parser/druid/impl/DruidAlterTableParser.java @@ -1,14 +1,18 @@ package io.mycat.route.parser.druid.impl; -import io.mycat.route.RouteResultset; -import io.mycat.route.parser.druid.MycatSchemaStatVisitor; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.util.StringUtil; - import java.sql.SQLNonTransientException; import com.alibaba.druid.sql.ast.SQLStatement; -import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlAlterTableStatement; + + +import com.alibaba.druid.sql.ast.statement.SQLAlterTableStatement; + +import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser; +import com.alibaba.druid.sql.parser.SQLStatementParser; +import io.mycat.config.model.SchemaConfig; +import io.mycat.route.RouteResultset; +import io.mycat.route.parser.druid.MycatSchemaStatVisitor; +import io.mycat.util.StringUtil; /** * alter table 语句解析 @@ -22,10 +26,19 @@ public void visitorParse(RouteResultset rrs, SQLStatement stmt,MycatSchemaStatVi } @Override public void statementParse(SchemaConfig schema, RouteResultset rrs, SQLStatement stmt) throws SQLNonTransientException { - MySqlAlterTableStatement alterTable = (MySqlAlterTableStatement)stmt; - String tableName = StringUtil.removeBackquote(alterTable.getTableSource().toString().toUpperCase()); - - ctx.addTable(tableName); + SQLAlterTableStatement alterTable = (SQLAlterTableStatement)stmt; + String tableName = StringUtil.removeBackquote(alterTable.getTableSource().toString().toUpperCase()); +// + ctx.addTable(tableName); } + +// public static void main(String[] args) +// { +// String s="SELECT Customer,SUM(OrderPrice) FROM Orders\n" + +// "GROUP BY Customer"; +// SQLStatementParser parser = new MySqlStatementParser(s); +// SQLStatement statement = parser.parseStatement(); +// System.out.println(); +// } } diff --git a/src/main/java/io/mycat/route/parser/druid/impl/DruidCreateTableParser.java b/src/main/java/io/mycat/route/parser/druid/impl/DruidCreateTableParser.java index fe5ac9e30..58bd5a9ca 100644 --- a/src/main/java/io/mycat/route/parser/druid/impl/DruidCreateTableParser.java +++ b/src/main/java/io/mycat/route/parser/druid/impl/DruidCreateTableParser.java @@ -1,15 +1,24 @@ package io.mycat.route.parser.druid.impl; -import io.mycat.route.RouteResultset; -import io.mycat.route.parser.druid.MycatSchemaStatVisitor; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.util.StringUtil; - import java.sql.SQLNonTransientException; import com.alibaba.druid.sql.ast.SQLStatement; +import com.alibaba.druid.sql.ast.expr.SQLCharExpr; +import com.alibaba.druid.sql.ast.expr.SQLIdentifierExpr; +import com.alibaba.druid.sql.ast.statement.SQLCharacterDataType; +import com.alibaba.druid.sql.ast.statement.SQLColumnDefinition; +import com.alibaba.druid.sql.ast.statement.SQLCreateTableStatement; import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlCreateTableStatement; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.TableConfig; +import io.mycat.route.RouteResultset; +import io.mycat.route.function.AbstractPartitionAlgorithm; +import io.mycat.route.function.SlotFunction; +import io.mycat.route.parser.druid.MycatSchemaStatVisitor; +import io.mycat.util.StringUtil; + + public class DruidCreateTableParser extends DefaultDruidParser { @Override @@ -25,6 +34,20 @@ public void statementParse(SchemaConfig schema, RouteResultset rrs, SQLStatement throw new SQLNonTransientException(msg); } String tableName = StringUtil.removeBackquote(createStmt.getTableSource().toString().toUpperCase()); + if(schema.getTables().containsKey(tableName)) { + TableConfig tableConfig = schema.getTables().get(tableName); + AbstractPartitionAlgorithm algorithm = tableConfig.getRule().getRuleAlgorithm(); + if(algorithm instanceof SlotFunction){ + SQLColumnDefinition column = new SQLColumnDefinition(); + column.setDataType(new SQLCharacterDataType("int")); + column.setName(new SQLIdentifierExpr("_slot")); + column.setComment(new SQLCharExpr("自动迁移算法slot,禁止修改")); + ((SQLCreateTableStatement)stmt).getTableElementList().add(column); + String sql = createStmt.toString(); + rrs.setStatement(sql); + ctx.setSql(sql); + } + } ctx.addTable(tableName); } diff --git a/src/main/java/io/mycat/route/parser/druid/impl/DruidDeleteParser.java b/src/main/java/io/mycat/route/parser/druid/impl/DruidDeleteParser.java index 97b7545e4..87ef7c0b3 100644 --- a/src/main/java/io/mycat/route/parser/druid/impl/DruidDeleteParser.java +++ b/src/main/java/io/mycat/route/parser/druid/impl/DruidDeleteParser.java @@ -1,14 +1,14 @@ package io.mycat.route.parser.druid.impl; -import io.mycat.route.RouteResultset; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.util.StringUtil; - import java.sql.SQLNonTransientException; import com.alibaba.druid.sql.ast.SQLStatement; import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlDeleteStatement; +import io.mycat.config.model.SchemaConfig; +import io.mycat.route.RouteResultset; +import io.mycat.util.StringUtil; + public class DruidDeleteParser extends DefaultDruidParser { @Override public void statementParse(SchemaConfig schema, RouteResultset rrs, SQLStatement stmt) throws SQLNonTransientException { diff --git a/src/main/java/io/mycat/route/parser/druid/impl/DruidInsertParser.java b/src/main/java/io/mycat/route/parser/druid/impl/DruidInsertParser.java index 6e21363c0..ff69cb164 100644 --- a/src/main/java/io/mycat/route/parser/druid/impl/DruidInsertParser.java +++ b/src/main/java/io/mycat/route/parser/druid/impl/DruidInsertParser.java @@ -1,17 +1,5 @@ package io.mycat.route.parser.druid.impl; -import io.mycat.route.RouteResultset; -import io.mycat.route.RouteResultsetNode; -import io.mycat.route.function.AbstractPartitionAlgorithm; -import io.mycat.route.parser.druid.MycatSchemaStatVisitor; -import io.mycat.route.parser.druid.RouteCalculateUnit; -import io.mycat.route.util.RouterUtil; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.TableConfig; -import io.mycat.server.executors.FetchStoreNodeOfChildTableHandler; -import io.mycat.server.parser.ServerParse; -import io.mycat.util.StringUtil; - import java.sql.SQLNonTransientException; import java.sql.SQLSyntaxErrorException; import java.util.ArrayList; @@ -27,270 +15,293 @@ import com.alibaba.druid.sql.ast.statement.SQLInsertStatement.ValuesClause; import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlInsertStatement; -public class DruidInsertParser extends DefaultDruidParser { - @Override - public void visitorParse(RouteResultset rrs, SQLStatement stmt, MycatSchemaStatVisitor visitor) throws SQLNonTransientException { - - } - - /** - * 考虑因素:isChildTable、批量、是否分片 - */ - @Override - public void statementParse(SchemaConfig schema, RouteResultset rrs, SQLStatement stmt) throws SQLNonTransientException { - MySqlInsertStatement insert = (MySqlInsertStatement)stmt; - String tableName = StringUtil.removeBackquote(insert.getTableName().getSimpleName()).toUpperCase(); - - ctx.addTable(tableName); - if(RouterUtil.isNoSharding(schema,tableName)) {//整个schema都不分库或者该表不拆分 - RouterUtil.routeForTableMeta(rrs, schema, tableName, rrs.getStatement()); - rrs.setFinishedRoute(true); - return; - } - - TableConfig tc = schema.getTables().get(tableName); - if(tc == null) { - String msg = "can't find table define in schema " - + tableName + " schema:" + schema.getName(); - LOGGER.warn(msg); - throw new SQLNonTransientException(msg); - } else { - //childTable的insert直接在解析过程中完成路由 - if (tc.isChildTable()) { - parserChildTable(schema, rrs, tableName, insert); - return; - } +import io.mycat.backend.mysql.nio.handler.FetchStoreNodeOfChildTableHandler; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.TableConfig; +import io.mycat.route.RouteResultset; +import io.mycat.route.RouteResultsetNode; +import io.mycat.route.function.AbstractPartitionAlgorithm; +import io.mycat.route.function.SlotFunction; +import io.mycat.route.parser.druid.MycatSchemaStatVisitor; +import io.mycat.route.parser.druid.RouteCalculateUnit; +import io.mycat.route.parser.util.ParseUtil; +import io.mycat.route.util.RouterUtil; +import io.mycat.server.parser.ServerParse; +import io.mycat.util.StringUtil; - String partitionColumn = tc.getPartitionColumn(); +public class DruidInsertParser extends DefaultDruidParser { + @Override + public void visitorParse(RouteResultset rrs, SQLStatement stmt, MycatSchemaStatVisitor visitor) throws SQLNonTransientException { + + } + + /** + * 考虑因素:isChildTable、批量、是否分片 + */ + @Override + public void statementParse(SchemaConfig schema, RouteResultset rrs, SQLStatement stmt) throws SQLNonTransientException { + MySqlInsertStatement insert = (MySqlInsertStatement)stmt; + String tableName = StringUtil.removeBackquote(insert.getTableName().getSimpleName()).toUpperCase(); - if(partitionColumn != null) {//分片表 - //拆分表必须给出column list,否则无法寻找分片字段的值 - if(insert.getColumns() == null || insert.getColumns().size() == 0) { - throw new SQLSyntaxErrorException("partition table, insert must provide ColumnList"); - } + ctx.addTable(tableName); + if(RouterUtil.isNoSharding(schema,tableName)) {//整个schema都不分库或者该表不拆分 + RouterUtil.routeForTableMeta(rrs, schema, tableName, rrs.getStatement()); + rrs.setFinishedRoute(true); + return; + } - //批量insert - if(isMultiInsert(insert)) { + TableConfig tc = schema.getTables().get(tableName); + if(tc == null) { + String msg = "can't find table define in schema " + + tableName + " schema:" + schema.getName(); + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } else { + //childTable的insert直接在解析过程中完成路由 + if (tc.isChildTable()) { + parserChildTable(schema, rrs, tableName, insert); + return; + } + + String partitionColumn = tc.getPartitionColumn(); + + if(partitionColumn != null) {//分片表 + //拆分表必须给出column list,否则无法寻找分片字段的值 + if(insert.getColumns() == null || insert.getColumns().size() == 0) { + throw new SQLSyntaxErrorException("partition table, insert must provide ColumnList"); + } + + //批量insert + if(isMultiInsert(insert)) { // String msg = "multi insert not provided" ; // LOGGER.warn(msg); // throw new SQLNonTransientException(msg); - parserBatchInsert(schema, rrs, partitionColumn, tableName, insert); - } else { - parserSingleInsert(schema, rrs, partitionColumn, tableName, insert); - } - - } - } - } - - /** - * 寻找joinKey的索引 - * @param columns - * @param joinKey - * @return -1表示没找到,>=0表示找到了 - */ - private int getJoinKeyIndex(List columns, String joinKey) { - for(int i = 0; i < columns.size(); i++) { - String col = StringUtil.removeBackquote(columns.get(i).toString()).toUpperCase(); - if(col.equals(joinKey)) { - return i; - } - } - return -1; - } - - /** - * 是否为批量插入:insert into ...values (),()...或 insert into ...select..... - * @param insertStmt - * @return - */ - private boolean isMultiInsert(MySqlInsertStatement insertStmt) { - return (insertStmt.getValuesList() != null && insertStmt.getValuesList().size() > 1) || insertStmt.getQuery() != null; - } - - private RouteResultset parserChildTable(SchemaConfig schema, RouteResultset rrs, - String tableName, MySqlInsertStatement insertStmt) throws SQLNonTransientException { - TableConfig tc = schema.getTables().get(tableName); - - String joinKey = tc.getJoinKey(); - int joinKeyIndex = getJoinKeyIndex(insertStmt.getColumns(), joinKey); - if(joinKeyIndex == -1) { - String inf = "joinKey not provided :" + tc.getJoinKey()+ "," + insertStmt; - LOGGER.warn(inf); - throw new SQLNonTransientException(inf); - } - if(isMultiInsert(insertStmt)) { - String msg = "ChildTable multi insert not provided" ; - LOGGER.warn(msg); - throw new SQLNonTransientException(msg); - } - - String joinKeyVal = insertStmt.getValues().getValues().get(joinKeyIndex).toString(); - - - String sql = insertStmt.toString(); - - // try to route by ER parent partion key - RouteResultset theRrs = RouterUtil.routeByERParentKey(null,schema, ServerParse.INSERT,sql, rrs, tc,joinKeyVal); - if (theRrs != null) { - rrs.setFinishedRoute(true); - return theRrs; - } - - // route by sql query root parent's datanode - String findRootTBSql = tc.getLocateRTableKeySql().toLowerCase() + joinKeyVal; - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("find root parent's node sql "+ findRootTBSql); - } - FetchStoreNodeOfChildTableHandler fetchHandler = new FetchStoreNodeOfChildTableHandler(); - String dn = fetchHandler.execute(schema.getName(),findRootTBSql, tc.getRootParent().getDataNodes()); - if (dn == null) { - throw new SQLNonTransientException("can't find (root) parent sharding node for sql:"+ sql); - } - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("found partion node for child table to insert "+ dn + " sql :" + sql); - } - return RouterUtil.routeToSingleNode(rrs, dn, sql); - } - - /** - * 单条insert(非批量) - * @param schema - * @param rrs - * @param partitionColumn - * @param tableName - * @param insertStmt - * @throws SQLNonTransientException - */ - private void parserSingleInsert(SchemaConfig schema, RouteResultset rrs, String partitionColumn, - String tableName, MySqlInsertStatement insertStmt) throws SQLNonTransientException { - boolean isFound = false; - for(int i = 0; i < insertStmt.getColumns().size(); i++) { - if(partitionColumn.equalsIgnoreCase(StringUtil.removeBackquote(insertStmt.getColumns().get(i).toString()))) {//找到分片字段 - isFound = true; - String column = StringUtil.removeBackquote(insertStmt.getColumns().get(i).toString()); - - String value = StringUtil.removeBackquote(insertStmt.getValues().getValues().get(i).toString()); - - RouteCalculateUnit routeCalculateUnit = new RouteCalculateUnit(); - routeCalculateUnit.addShardingExpr(tableName, column, value); - ctx.addRouteCalculateUnit(routeCalculateUnit); - //mycat是单分片键,找到了就返回 - break; - } - } - if(!isFound) {//分片表的 - String msg = "bad insert sql (sharding column:"+ partitionColumn + " not provided," + insertStmt; - LOGGER.warn(msg); - throw new SQLNonTransientException(msg); - } - // insert into .... on duplicateKey - //such as :INSERT INTO TABLEName (a,b,c) VALUES (1,2,3) ON DUPLICATE KEY UPDATE b=VALUES(b); - //INSERT INTO TABLEName (a,b,c) VALUES (1,2,3) ON DUPLICATE KEY UPDATE c=c+1; - if(insertStmt.getDuplicateKeyUpdate() != null) { - List updateList = insertStmt.getDuplicateKeyUpdate(); - for(SQLExpr expr : updateList) { - SQLBinaryOpExpr opExpr = (SQLBinaryOpExpr)expr; - String column = StringUtil.removeBackquote(opExpr.getLeft().toString().toUpperCase()); - if(column.equals(partitionColumn)) { - String msg = "partion key can't be updated: " + tableName + " -> " + partitionColumn; - LOGGER.warn(msg); - throw new SQLNonTransientException(msg); - } - } - } - } - - /** - * insert into .... select .... 或insert into table() values (),(),.... - * @param schema - * @param rrs - * @param insertStmt - * @throws SQLNonTransientException - */ - private void parserBatchInsert(SchemaConfig schema, RouteResultset rrs, String partitionColumn, - String tableName, MySqlInsertStatement insertStmt) throws SQLNonTransientException { - //insert into table() values (),(),.... - if(insertStmt.getValuesList().size() > 1) { - //字段列数 - int columnNum = insertStmt.getColumns().size(); - int shardingColIndex = getSharingColIndex(insertStmt, partitionColumn); - if(shardingColIndex == -1) { - String msg = "bad insert sql (sharding column:"+ partitionColumn + " not provided," + insertStmt; - LOGGER.warn(msg); - throw new SQLNonTransientException(msg); - } else { - List valueClauseList = insertStmt.getValuesList(); - - Map> nodeValuesMap = new HashMap>(); - TableConfig tableConfig = schema.getTables().get(tableName); - AbstractPartitionAlgorithm algorithm = tableConfig.getRule().getRuleAlgorithm(); - for(ValuesClause valueClause : valueClauseList) { - if(valueClause.getValues().size() != columnNum) { - String msg = "bad insert sql columnSize != valueSize:" - + columnNum + " != " + valueClause.getValues().size() - + "values:" + valueClause; - LOGGER.warn(msg); - throw new SQLNonTransientException(msg); - } - SQLExpr expr = valueClause.getValues().get(shardingColIndex); - String shardingValue = null; - if(expr instanceof SQLIntegerExpr) { - SQLIntegerExpr intExpr = (SQLIntegerExpr)expr; - shardingValue = intExpr.getNumber() + ""; - } else if (expr instanceof SQLCharExpr) { - SQLCharExpr charExpr = (SQLCharExpr)expr; - shardingValue = charExpr.getText(); - } + parserBatchInsert(schema, rrs, partitionColumn, tableName, insert); + } else { + parserSingleInsert(schema, rrs, partitionColumn, tableName, insert); + } + + } + } + } + + /** + * 寻找joinKey的索引 + * @param columns + * @param joinKey + * @return -1表示没找到,>=0表示找到了 + */ + private int getJoinKeyIndex(List columns, String joinKey) { + for(int i = 0; i < columns.size(); i++) { + String col = StringUtil.removeBackquote(columns.get(i).toString()).toUpperCase(); + if(col.equals(joinKey)) { + return i; + } + } + return -1; + } + + /** + * 是否为批量插入:insert into ...values (),()...或 insert into ...select..... + * @param insertStmt + * @return + */ + private boolean isMultiInsert(MySqlInsertStatement insertStmt) { + return (insertStmt.getValuesList() != null && insertStmt.getValuesList().size() > 1) || insertStmt.getQuery() != null; + } + + private RouteResultset parserChildTable(SchemaConfig schema, RouteResultset rrs, + String tableName, MySqlInsertStatement insertStmt) throws SQLNonTransientException { + TableConfig tc = schema.getTables().get(tableName); + + String joinKey = tc.getJoinKey(); + int joinKeyIndex = getJoinKeyIndex(insertStmt.getColumns(), joinKey); + if(joinKeyIndex == -1) { + String inf = "joinKey not provided :" + tc.getJoinKey()+ "," + insertStmt; + LOGGER.warn(inf); + throw new SQLNonTransientException(inf); + } + if(isMultiInsert(insertStmt)) { + String msg = "ChildTable multi insert not provided" ; + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + + String joinKeyVal = insertStmt.getValues().getValues().get(joinKeyIndex).toString(); - Integer nodeIndex = algorithm.calculate(shardingValue); - //没找到插入的分片 - if(nodeIndex == null) { - String msg = "can't find any valid datanode :" + tableName - + " -> " + partitionColumn + " -> " + shardingValue; - LOGGER.warn(msg); - throw new SQLNonTransientException(msg); - } - if(nodeValuesMap.get(nodeIndex) == null) { - nodeValuesMap.put(nodeIndex, new ArrayList()); - } - nodeValuesMap.get(nodeIndex).add(valueClause); - System.out.println(); - } + + String sql = insertStmt.toString(); + + // try to route by ER parent partion key + RouteResultset theRrs = RouterUtil.routeByERParentKey(null,schema, ServerParse.INSERT,sql, rrs, tc,joinKeyVal); + if (theRrs != null) { + rrs.setFinishedRoute(true); + return theRrs; + } - RouteResultsetNode[] nodes = new RouteResultsetNode[nodeValuesMap.size()]; - int count = 0; - for(Map.Entry> node : nodeValuesMap.entrySet()) { - Integer nodeIndex = node.getKey(); - List valuesList = node.getValue(); - insertStmt.setValuesList(valuesList); - nodes[count++] = new RouteResultsetNode(tableConfig.getDataNodes().get(nodeIndex), - rrs.getSqlType(),insertStmt.toString()); - } - rrs.setNodes(nodes); - rrs.setFinishedRoute(true); - } - } else if(insertStmt.getQuery() != null) { // insert into .... select .... - String msg = "TODO:insert into .... select .... not supported!"; - LOGGER.warn(msg); - throw new SQLNonTransientException(msg); - } - } + // route by sql query root parent's datanode + String findRootTBSql = tc.getLocateRTableKeySql().toLowerCase() + joinKeyVal; + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("find root parent's node sql "+ findRootTBSql); + } + FetchStoreNodeOfChildTableHandler fetchHandler = new FetchStoreNodeOfChildTableHandler(); + String dn = fetchHandler.execute(schema.getName(),findRootTBSql, tc.getRootParent().getDataNodes()); + if (dn == null) { + throw new SQLNonTransientException("can't find (root) parent sharding node for sql:"+ sql); + } + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("found partion node for child table to insert "+ dn + " sql :" + sql); + } + return RouterUtil.routeToSingleNode(rrs, dn, sql); + } + + /** + * 单条insert(非批量) + * @param schema + * @param rrs + * @param partitionColumn + * @param tableName + * @param insertStmt + * @throws SQLNonTransientException + */ + private void parserSingleInsert(SchemaConfig schema, RouteResultset rrs, String partitionColumn, + String tableName, MySqlInsertStatement insertStmt) throws SQLNonTransientException { + boolean isFound = false; + for(int i = 0; i < insertStmt.getColumns().size(); i++) { + if(partitionColumn.equalsIgnoreCase(StringUtil.removeBackquote(insertStmt.getColumns().get(i).toString()))) {//找到分片字段 + isFound = true; + String column = StringUtil.removeBackquote(insertStmt.getColumns().get(i).toString()); + + String value = StringUtil.removeBackquote(insertStmt.getValues().getValues().get(i).toString()); + + RouteCalculateUnit routeCalculateUnit = new RouteCalculateUnit(); + routeCalculateUnit.addShardingExpr(tableName, column, value); + ctx.addRouteCalculateUnit(routeCalculateUnit); + //mycat是单分片键,找到了就返回 + break; + } + } + if(!isFound) {//分片表的 + String msg = "bad insert sql (sharding column:"+ partitionColumn + " not provided," + insertStmt; + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + // insert into .... on duplicateKey + //such as :INSERT INTO TABLEName (a,b,c) VALUES (1,2,3) ON DUPLICATE KEY UPDATE b=VALUES(b); + //INSERT INTO TABLEName (a,b,c) VALUES (1,2,3) ON DUPLICATE KEY UPDATE c=c+1; + if(insertStmt.getDuplicateKeyUpdate() != null) { + List updateList = insertStmt.getDuplicateKeyUpdate(); + for(SQLExpr expr : updateList) { + SQLBinaryOpExpr opExpr = (SQLBinaryOpExpr)expr; + String column = StringUtil.removeBackquote(opExpr.getLeft().toString().toUpperCase()); + if(column.equals(partitionColumn)) { + String msg = "Sharding column can't be updated: " + tableName + " -> " + partitionColumn; + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + } + } + } + + /** + * insert into .... select .... 或insert into table() values (),(),.... + * @param schema + * @param rrs + * @param insertStmt + * @throws SQLNonTransientException + */ + private void parserBatchInsert(SchemaConfig schema, RouteResultset rrs, String partitionColumn, + String tableName, MySqlInsertStatement insertStmt) throws SQLNonTransientException { + //insert into table() values (),(),.... + if(insertStmt.getValuesList().size() > 1) { + //字段列数 + int columnNum = insertStmt.getColumns().size(); + int shardingColIndex = getShardingColIndex(insertStmt, partitionColumn); + if(shardingColIndex == -1) { + String msg = "bad insert sql (sharding column:"+ partitionColumn + " not provided," + insertStmt; + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } else { + List valueClauseList = insertStmt.getValuesList(); + + Map> nodeValuesMap = new HashMap>(); + Map slotsMap = new HashMap<>(); + TableConfig tableConfig = schema.getTables().get(tableName); + AbstractPartitionAlgorithm algorithm = tableConfig.getRule().getRuleAlgorithm(); + for(ValuesClause valueClause : valueClauseList) { + if(valueClause.getValues().size() != columnNum) { + String msg = "bad insert sql columnSize != valueSize:" + + columnNum + " != " + valueClause.getValues().size() + + "values:" + valueClause; + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + SQLExpr expr = valueClause.getValues().get(shardingColIndex); + String shardingValue = null; + if(expr instanceof SQLIntegerExpr) { + SQLIntegerExpr intExpr = (SQLIntegerExpr)expr; + shardingValue = intExpr.getNumber() + ""; + } else if (expr instanceof SQLCharExpr) { + SQLCharExpr charExpr = (SQLCharExpr)expr; + shardingValue = charExpr.getText(); + } + + Integer nodeIndex = algorithm.calculate(shardingValue); + if(algorithm instanceof SlotFunction){ + slotsMap.put(nodeIndex,((SlotFunction) algorithm).slotValue()) ; + } + //没找到插入的分片 + if(nodeIndex == null) { + String msg = "can't find any valid datanode :" + tableName + + " -> " + partitionColumn + " -> " + shardingValue; + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + if(nodeValuesMap.get(nodeIndex) == null) { + nodeValuesMap.put(nodeIndex, new ArrayList()); + } + nodeValuesMap.get(nodeIndex).add(valueClause); + } + + RouteResultsetNode[] nodes = new RouteResultsetNode[nodeValuesMap.size()]; + int count = 0; + for(Map.Entry> node : nodeValuesMap.entrySet()) { + Integer nodeIndex = node.getKey(); + List valuesList = node.getValue(); + insertStmt.setValuesList(valuesList); + nodes[count] = new RouteResultsetNode(tableConfig.getDataNodes().get(nodeIndex), + rrs.getSqlType(),insertStmt.toString()); + if(algorithm instanceof SlotFunction) { + nodes[count].setSlot(slotsMap.get(nodeIndex)); + nodes[count].setStatement(ParseUtil.changeInsertAddSlot(nodes[count].getStatement(),nodes[count].getSlot())); + } + nodes[count++].setSource(rrs); - /** - * 寻找拆分字段在 columnList中的索引 - * @param insertStmt - * @param partitionColumn - * @return - */ - private int getSharingColIndex(MySqlInsertStatement insertStmt,String partitionColumn) { - int shardingColIndex = -1; - for(int i = 0; i < insertStmt.getColumns().size(); i++) { - if(partitionColumn.equalsIgnoreCase(StringUtil.removeBackquote(insertStmt.getColumns().get(i).toString()))) {//找到分片字段 - shardingColIndex = i; - return shardingColIndex; - } - } - return shardingColIndex; - } + } + rrs.setNodes(nodes); + rrs.setFinishedRoute(true); + } + } else if(insertStmt.getQuery() != null) { // insert into .... select .... + String msg = "TODO:insert into .... select .... not supported!"; + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + } + + /** + * 寻找拆分字段在 columnList中的索引 + * @param insertStmt + * @param partitionColumn + * @return + */ + private int getShardingColIndex(MySqlInsertStatement insertStmt,String partitionColumn) { + int shardingColIndex = -1; + for(int i = 0; i < insertStmt.getColumns().size(); i++) { + if(partitionColumn.equalsIgnoreCase(StringUtil.removeBackquote(insertStmt.getColumns().get(i).toString()))) {//找到分片字段 + shardingColIndex = i; + return shardingColIndex; + } + } + return shardingColIndex; + } } diff --git a/src/main/java/io/mycat/route/parser/druid/impl/DruidLockTableParser.java b/src/main/java/io/mycat/route/parser/druid/impl/DruidLockTableParser.java new file mode 100644 index 000000000..c08935a5c --- /dev/null +++ b/src/main/java/io/mycat/route/parser/druid/impl/DruidLockTableParser.java @@ -0,0 +1,78 @@ +package io.mycat.route.parser.druid.impl; + +import java.sql.SQLNonTransientException; +import java.util.List; + +import com.alibaba.druid.sql.ast.SQLStatement; +import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlLockTableStatement; +import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlLockTableStatement.LockType; + +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.TableConfig; +import io.mycat.route.RouteResultset; +import io.mycat.route.RouteResultsetNode; +import io.mycat.route.parser.druid.DruidParser; +import io.mycat.route.parser.druid.MycatSchemaStatVisitor; +import io.mycat.server.parser.ServerParse; +import io.mycat.util.SplitUtil; + +/** + * lock tables [table] [write|read]语句解析器 + * @author songdabin + */ +public class DruidLockTableParser extends DefaultDruidParser implements DruidParser { + @Override + public void statementParse(SchemaConfig schema, RouteResultset rrs, SQLStatement stmt) + throws SQLNonTransientException { + MySqlLockTableStatement lockTableStat = (MySqlLockTableStatement)stmt; + String table = lockTableStat.getTableSource().toString().toUpperCase(); + TableConfig tableConfig = schema.getTables().get(table); + if (tableConfig == null) { + String msg = "can't find table define of " + table + " in schema:" + schema.getName(); + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + LockType lockType = lockTableStat.getLockType(); + if (LockType.WRITE != lockType && LockType.READ != lockType) { + String msg = "lock type must be write or read"; + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + List dataNodes = tableConfig.getDataNodes(); + RouteResultsetNode[] nodes = new RouteResultsetNode[dataNodes.size()]; + for (int i = 0; i < dataNodes.size(); i ++) { + nodes[i] = new RouteResultsetNode(dataNodes.get(i), ServerParse.LOCK, stmt.toString()); + } + rrs.setNodes(nodes); + rrs.setFinishedRoute(true); + } + + @Override + public void visitorParse(RouteResultset rrs, SQLStatement stmt, MycatSchemaStatVisitor visitor) + throws SQLNonTransientException { + // 对于lock tables table1 write, table2 read类型的多表锁语句,DruidParser只能解析出table1, + // 由于多表锁在分布式场景处理逻辑繁琐,且应用场景较少,因此在此处对这种锁表语句进行拦截。 + // 多表锁的语句在语义上会有",",这里以此为判断依据 + String sql = rrs.getStatement(); + sql = sql.replaceAll("\n", " ").replaceAll("\t", " "); + String[] stmts = SplitUtil.split(sql, ',', true); + // 如果命令中存在",",则按多表锁的语句来处理 + if (stmts.length > 1) { + String tmpStmt = null; + String tmpWords[] = null; + for (int i = 1; i < stmts.length; i ++) { + tmpStmt = stmts[i]; + tmpWords = SplitUtil.split(tmpStmt, ' ', true); + if (tmpWords.length==2 && ("READ".equalsIgnoreCase(tmpWords[1]) || "WRITE".equalsIgnoreCase(tmpWords[1]))) { + // 如果符合多表锁的语法,则继续,并在最后提示不能多表锁! + continue; + } else { + // 如果不符合多表锁的语法,则提示语法错误和不能多表锁! + throw new SQLNonTransientException("You have an error in your SQL syntax, don't support lock multi tables!"); + } + } + LOGGER.error("can't lock multi-table"); + throw new SQLNonTransientException("can't lock multi-table"); + } + } +} diff --git a/src/main/java/io/mycat/route/parser/druid/impl/DruidSelectDb2Parser.java b/src/main/java/io/mycat/route/parser/druid/impl/DruidSelectDb2Parser.java index 1c6754ab5..9684cae01 100644 --- a/src/main/java/io/mycat/route/parser/druid/impl/DruidSelectDb2Parser.java +++ b/src/main/java/io/mycat/route/parser/druid/impl/DruidSelectDb2Parser.java @@ -1,8 +1,5 @@ package io.mycat.route.parser.druid.impl; -import io.mycat.route.RouteResultset; -import io.mycat.server.config.node.SchemaConfig; - import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -22,6 +19,9 @@ import com.alibaba.druid.sql.dialect.oracle.ast.stmt.OracleSelectQueryBlock; import com.alibaba.druid.util.JdbcConstants; +import io.mycat.config.model.SchemaConfig; +import io.mycat.route.RouteResultset; + /** * 由于druid的db2解析部分不够完整,且使用oracle的解析基本能满足需求 @@ -70,7 +70,9 @@ protected void parseNativePageSql(SQLStatement stmt, RouteResultset rrs, OracleS { SQLIntegerExpr right = (SQLIntegerExpr) one.getRight(); int firstrownum = right.getNumber().intValue(); - if (operator == SQLBinaryOperator.LessThan&&firstrownum!=0) firstrownum = firstrownum - 1; + if (operator == SQLBinaryOperator.LessThan&&firstrownum!=0) { + firstrownum = firstrownum - 1; + } if (subSelect instanceof OracleSelectQueryBlock) { rrs.setLimitStart(0); @@ -98,26 +100,34 @@ protected void parseNativePageSql(SQLStatement stmt, RouteResultset rrs, OracleS { small=leftE; firstrownum=((SQLIntegerExpr) leftE.getRight()).getNumber().intValue(); - if(leftE.getOperator()==SQLBinaryOperator.GreaterThanOrEqual &&firstrownum!=0) firstrownum = firstrownum - 1; + if(leftE.getOperator()==SQLBinaryOperator.GreaterThanOrEqual &&firstrownum!=0) { + firstrownum = firstrownum - 1; + } } else if(leftE.getRight() instanceof SQLIntegerExpr&&(leftE.getOperator()==SQLBinaryOperator.LessThan||leftE.getOperator()==SQLBinaryOperator.LessThanOrEqual)) { larger=leftE; lastrownum=((SQLIntegerExpr) leftE.getRight()).getNumber().intValue(); - if(leftE.getOperator()==SQLBinaryOperator.LessThan&&lastrownum!=0) lastrownum = lastrownum - 1; + if(leftE.getOperator()==SQLBinaryOperator.LessThan&&lastrownum!=0) { + lastrownum = lastrownum - 1; + } } if(rightE.getRight() instanceof SQLIntegerExpr&&(rightE.getOperator()==SQLBinaryOperator.GreaterThan||rightE.getOperator()==SQLBinaryOperator.GreaterThanOrEqual)) { small=rightE; firstrownum=((SQLIntegerExpr) rightE.getRight()).getNumber().intValue(); - if(rightE.getOperator()==SQLBinaryOperator.GreaterThanOrEqual&&firstrownum!=0) firstrownum = firstrownum - 1; + if(rightE.getOperator()==SQLBinaryOperator.GreaterThanOrEqual&&firstrownum!=0) { + firstrownum = firstrownum - 1; + } } else if(rightE.getRight() instanceof SQLIntegerExpr&&(rightE.getOperator()==SQLBinaryOperator.LessThan||rightE.getOperator()==SQLBinaryOperator.LessThanOrEqual)) { larger=rightE; lastrownum=((SQLIntegerExpr) rightE.getRight()).getNumber().intValue(); - if(rightE.getOperator()==SQLBinaryOperator.LessThan&&lastrownum!=0) lastrownum = lastrownum - 1; + if(rightE.getOperator()==SQLBinaryOperator.LessThan&&lastrownum!=0) { + lastrownum = lastrownum - 1; + } } if(small!=null&&larger!=null) { @@ -138,13 +148,7 @@ protected void parseNativePageSql(SQLStatement stmt, RouteResultset rrs, OracleS { parseNativeSql(stmt,rrs,mysqlSelectQuery,schema); } - - - } - - - } else { diff --git a/src/main/java/io/mycat/route/parser/druid/impl/DruidSelectOracleParser.java b/src/main/java/io/mycat/route/parser/druid/impl/DruidSelectOracleParser.java index 4d7d423c0..bd2c7435c 100644 --- a/src/main/java/io/mycat/route/parser/druid/impl/DruidSelectOracleParser.java +++ b/src/main/java/io/mycat/route/parser/druid/impl/DruidSelectOracleParser.java @@ -1,8 +1,5 @@ package io.mycat.route.parser.druid.impl; -import io.mycat.route.RouteResultset; -import io.mycat.server.config.node.SchemaConfig; - import java.util.List; import java.util.Map; @@ -26,6 +23,9 @@ import com.alibaba.druid.sql.dialect.oracle.parser.OracleStatementParser; import com.alibaba.druid.util.JdbcConstants; +import io.mycat.config.model.SchemaConfig; +import io.mycat.route.RouteResultset; + public class DruidSelectOracleParser extends DruidSelectParser { @Override @@ -98,7 +98,9 @@ protected void parseNativePageSql(SQLStatement stmt, RouteResultset rrs, OracleS { SQLIntegerExpr right = (SQLIntegerExpr) one.getRight(); int firstrownum = right.getNumber().intValue(); - if (operator == SQLBinaryOperator.LessThan&&firstrownum!=0) firstrownum = firstrownum - 1; + if (operator == SQLBinaryOperator.LessThan&&firstrownum!=0) { + firstrownum = firstrownum - 1; + } SQLSelectQuery subSelect = ((SQLSubqueryTableSource) from).getSelect().getQuery(); if (subSelect instanceof OracleSelectQueryBlock) { @@ -146,7 +148,9 @@ protected void parseNativePageSql(SQLStatement stmt, RouteResultset rrs, OracleS { SQLIntegerExpr right = (SQLIntegerExpr) one.getRight(); int firstrownum = right.getNumber().intValue(); - if (operator == SQLBinaryOperator.LessThan&&firstrownum!=0) firstrownum = firstrownum - 1; + if (operator == SQLBinaryOperator.LessThan&&firstrownum!=0) { + firstrownum = firstrownum - 1; + } if (subSelect instanceof OracleSelectQueryBlock) { rrs.setLimitStart(0); @@ -174,26 +178,34 @@ protected void parseNativePageSql(SQLStatement stmt, RouteResultset rrs, OracleS { small=leftE; firstrownum=((SQLIntegerExpr) leftE.getRight()).getNumber().intValue(); - if(leftE.getOperator()==SQLBinaryOperator.GreaterThanOrEqual &&firstrownum!=0) firstrownum = firstrownum - 1; + if(leftE.getOperator()==SQLBinaryOperator.GreaterThanOrEqual &&firstrownum!=0) { + firstrownum = firstrownum - 1; + } } else if(leftE.getRight() instanceof SQLIntegerExpr&&(leftE.getOperator()==SQLBinaryOperator.LessThan||leftE.getOperator()==SQLBinaryOperator.LessThanOrEqual)) { larger=leftE; lastrownum=((SQLIntegerExpr) leftE.getRight()).getNumber().intValue(); - if(leftE.getOperator()==SQLBinaryOperator.LessThan&&lastrownum!=0) lastrownum = lastrownum - 1; + if(leftE.getOperator()==SQLBinaryOperator.LessThan&&lastrownum!=0) { + lastrownum = lastrownum - 1; + } } if(rightE.getRight() instanceof SQLIntegerExpr&&(rightE.getOperator()==SQLBinaryOperator.GreaterThan||rightE.getOperator()==SQLBinaryOperator.GreaterThanOrEqual)) { small=rightE; firstrownum=((SQLIntegerExpr) rightE.getRight()).getNumber().intValue(); - if(rightE.getOperator()==SQLBinaryOperator.GreaterThanOrEqual&&firstrownum!=0) firstrownum = firstrownum - 1; + if(rightE.getOperator()==SQLBinaryOperator.GreaterThanOrEqual&&firstrownum!=0) { + firstrownum = firstrownum - 1; + } } else if(rightE.getRight() instanceof SQLIntegerExpr&&(rightE.getOperator()==SQLBinaryOperator.LessThan||rightE.getOperator()==SQLBinaryOperator.LessThanOrEqual)) { larger=rightE; lastrownum=((SQLIntegerExpr) rightE.getRight()).getNumber().intValue(); - if(rightE.getOperator()==SQLBinaryOperator.LessThan&&lastrownum!=0) lastrownum = lastrownum - 1; + if(rightE.getOperator()==SQLBinaryOperator.LessThan&&lastrownum!=0) { + lastrownum = lastrownum - 1; + } } if(small!=null&&larger!=null) { @@ -245,7 +257,9 @@ private void parseThreeLevelPageSql(SQLStatement stmt, RouteResultset rrs, Schem { SQLIntegerExpr right = (SQLIntegerExpr) one.getRight(); int firstrownum = right.getNumber().intValue(); - if (operator == SQLBinaryOperator.GreaterThanOrEqual&&firstrownum!=0) firstrownum = firstrownum - 1; + if (operator == SQLBinaryOperator.GreaterThanOrEqual&&firstrownum!=0) { + firstrownum = firstrownum - 1; + } SQLSelectQuery subSelect = from.getSelect().getQuery(); if (subSelect instanceof OracleSelectQueryBlock) { //第二层子查询 @@ -258,7 +272,9 @@ private void parseThreeLevelPageSql(SQLStatement stmt, RouteResultset rrs, Schem if (isRowNum && twoWhere.getRight() instanceof SQLIntegerExpr && isLess) { int lastrownum = ((SQLIntegerExpr) twoWhere.getRight()).getNumber().intValue(); - if (operator == SQLBinaryOperator.LessThan&&lastrownum!=0) lastrownum = lastrownum - 1; + if (operator == SQLBinaryOperator.LessThan&&lastrownum!=0) { + lastrownum = lastrownum - 1; + } SQLSelectQuery finalQuery = ((SQLSubqueryTableSource) twoSubSelect.getFrom()).getSelect().getQuery(); if (finalQuery instanceof OracleSelectQueryBlock) { @@ -278,6 +294,6 @@ private void parseThreeLevelPageSql(SQLStatement stmt, RouteResultset rrs, Schem - + } diff --git a/src/main/java/io/mycat/route/parser/druid/impl/DruidSelectParser.java b/src/main/java/io/mycat/route/parser/druid/impl/DruidSelectParser.java index 9dc096abe..66770a8db 100644 --- a/src/main/java/io/mycat/route/parser/druid/impl/DruidSelectParser.java +++ b/src/main/java/io/mycat/route/parser/druid/impl/DruidSelectParser.java @@ -1,22 +1,8 @@ package io.mycat.route.parser.druid.impl; -import io.mycat.MycatServer; -import io.mycat.cache.LayerCachePool; -import io.mycat.route.RouteResultset; -import io.mycat.route.RouteResultsetNode; -import io.mycat.route.parser.druid.RouteCalculateUnit; -import io.mycat.route.util.RouterUtil; -import io.mycat.server.ErrorCode; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.TableConfig; -import io.mycat.sqlengine.mpp.ColumnRoutePair; -import io.mycat.sqlengine.mpp.HavingCols; -import io.mycat.sqlengine.mpp.MergeCol; -import io.mycat.sqlengine.mpp.OrderCol; -import io.mycat.util.ObjectUtil; -import io.mycat.util.StringUtil; - import java.sql.SQLNonTransientException; +import java.util.ArrayList; +import java.util.Collection; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; @@ -26,6 +12,7 @@ import java.util.SortedSet; import java.util.TreeSet; +import com.alibaba.druid.sql.SQLUtils; import com.alibaba.druid.sql.ast.SQLExpr; import com.alibaba.druid.sql.ast.SQLName; import com.alibaba.druid.sql.ast.SQLOrderingSpecification; @@ -40,79 +27,119 @@ import com.alibaba.druid.sql.ast.expr.SQLNumericLiteralExpr; import com.alibaba.druid.sql.ast.expr.SQLPropertyExpr; import com.alibaba.druid.sql.ast.expr.SQLTextLiteralExpr; +import com.alibaba.druid.sql.ast.statement.SQLExprTableSource; import com.alibaba.druid.sql.ast.statement.SQLSelectGroupByClause; import com.alibaba.druid.sql.ast.statement.SQLSelectItem; import com.alibaba.druid.sql.ast.statement.SQLSelectOrderByItem; import com.alibaba.druid.sql.ast.statement.SQLSelectQuery; import com.alibaba.druid.sql.ast.statement.SQLSelectQueryBlock; import com.alibaba.druid.sql.ast.statement.SQLSelectStatement; -import com.alibaba.druid.sql.dialect.mysql.ast.expr.MySqlSelectGroupByExpr; +import com.alibaba.druid.sql.ast.statement.SQLTableSource; +import com.alibaba.druid.sql.dialect.db2.ast.stmt.DB2SelectQueryBlock; +import com.alibaba.druid.sql.dialect.db2.visitor.DB2OutputVisitor; +import com.alibaba.druid.sql.dialect.mysql.ast.expr.MySqlOrderingExpr; import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlSelectQueryBlock; import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlSelectQueryBlock.Limit; import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlUnionQuery; +import com.alibaba.druid.sql.dialect.mysql.visitor.MySqlOutputVisitor; +import com.alibaba.druid.sql.dialect.mysql.visitor.MySqlSchemaStatVisitor; +import com.alibaba.druid.sql.dialect.oracle.ast.stmt.OracleSelectQueryBlock; +import com.alibaba.druid.sql.dialect.oracle.visitor.OracleOutputVisitor; +import com.alibaba.druid.sql.dialect.postgresql.ast.stmt.PGSelectQueryBlock; +import com.alibaba.druid.sql.dialect.postgresql.visitor.PGOutputVisitor; +import com.alibaba.druid.sql.dialect.sqlserver.ast.SQLServerSelectQueryBlock; +import com.alibaba.druid.sql.visitor.SQLASTOutputVisitor; import com.alibaba.druid.util.JdbcConstants; import com.alibaba.druid.wall.spi.WallVisitorUtils; +import io.mycat.MycatServer; +import io.mycat.cache.LayerCachePool; +import io.mycat.config.ErrorCode; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.TableConfig; +import io.mycat.route.RouteResultset; +import io.mycat.route.RouteResultsetNode; +import io.mycat.route.parser.druid.MycatSchemaStatVisitor; +import io.mycat.route.parser.druid.RouteCalculateUnit; +import io.mycat.route.util.RouterUtil; +import io.mycat.sqlengine.mpp.ColumnRoutePair; +import io.mycat.sqlengine.mpp.HavingCols; +import io.mycat.sqlengine.mpp.MergeCol; +import io.mycat.sqlengine.mpp.OrderCol; +import io.mycat.util.ObjectUtil; +import io.mycat.util.StringUtil; + public class DruidSelectParser extends DefaultDruidParser { protected boolean isNeedParseOrderAgg=true; @Override - public void statementParse(SchemaConfig schema, RouteResultset rrs, SQLStatement stmt) { - SQLSelectStatement selectStmt = (SQLSelectStatement)stmt; - SQLSelectQuery sqlSelectQuery = selectStmt.getSelect().getQuery(); - if(sqlSelectQuery instanceof MySqlSelectQueryBlock) { - MySqlSelectQueryBlock mysqlSelectQuery = (MySqlSelectQueryBlock)selectStmt.getSelect().getQuery(); - - parseOrderAggGroupMysql(schema, stmt,rrs, mysqlSelectQuery); - //更改canRunInReadDB属性 - if ((mysqlSelectQuery.isForUpdate() || mysqlSelectQuery.isLockInShareMode()) && rrs.isAutocommit() == false) - { - rrs.setCanRunInReadDB(false); - } - - } else if (sqlSelectQuery instanceof MySqlUnionQuery) { + public void statementParse(SchemaConfig schema, RouteResultset rrs, SQLStatement stmt) { + SQLSelectStatement selectStmt = (SQLSelectStatement)stmt; + SQLSelectQuery sqlSelectQuery = selectStmt.getSelect().getQuery(); + if(sqlSelectQuery instanceof MySqlSelectQueryBlock) { + MySqlSelectQueryBlock mysqlSelectQuery = (MySqlSelectQueryBlock)selectStmt.getSelect().getQuery(); + + parseOrderAggGroupMysql(schema, stmt,rrs, mysqlSelectQuery); + //更改canRunInReadDB属性 + if ((mysqlSelectQuery.isForUpdate() || mysqlSelectQuery.isLockInShareMode()) && rrs.isAutocommit() == false) + { + rrs.setCanRunInReadDB(false); + } + + } else if (sqlSelectQuery instanceof MySqlUnionQuery) { // MySqlUnionQuery unionQuery = (MySqlUnionQuery)sqlSelectQuery; // MySqlSelectQueryBlock left = (MySqlSelectQueryBlock)unionQuery.getLeft(); // MySqlSelectQueryBlock right = (MySqlSelectQueryBlock)unionQuery.getLeft(); // System.out.println(); - } - } - protected void parseOrderAggGroupMysql(SchemaConfig schema, SQLStatement stmt, RouteResultset rrs, MySqlSelectQueryBlock mysqlSelectQuery) - { - if(!isNeedParseOrderAgg) + } + } + protected void parseOrderAggGroupMysql(SchemaConfig schema, SQLStatement stmt, RouteResultset rrs, MySqlSelectQueryBlock mysqlSelectQuery) + { + MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor(); + stmt.accept(visitor); +// rrs.setGroupByCols((String[])visitor.getGroupByColumns().toArray()); + if(!isNeedParseOrderAgg) { return; } - Map aliaColumns = parseAggGroupCommon(schema, stmt, rrs, mysqlSelectQuery); + Map aliaColumns = parseAggGroupCommon(schema, stmt, rrs, mysqlSelectQuery); - //setOrderByCols - if(mysqlSelectQuery.getOrderBy() != null) { - List orderByItems = mysqlSelectQuery.getOrderBy().getItems(); - rrs.setOrderByCols(buildOrderByCols(orderByItems,aliaColumns)); - } + //setOrderByCols + if(mysqlSelectQuery.getOrderBy() != null) { + List orderByItems = mysqlSelectQuery.getOrderBy().getItems(); + rrs.setOrderByCols(buildOrderByCols(orderByItems,aliaColumns)); + } isNeedParseOrderAgg=false; - } - protected Map parseAggGroupCommon(SchemaConfig schema, SQLStatement stmt, RouteResultset rrs, SQLSelectQueryBlock mysqlSelectQuery) - { - Map aliaColumns = new HashMap(); - Map aggrColumns = new HashMap(); - List selectList = mysqlSelectQuery.getSelectList(); + } + protected Map parseAggGroupCommon(SchemaConfig schema, SQLStatement stmt, RouteResultset rrs, SQLSelectQueryBlock mysqlSelectQuery) + { + Map aliaColumns = new HashMap(); + Map aggrColumns = new HashMap(); + // Added by winbill, 20160314, for having clause, Begin ==> + List havingColsName = new ArrayList(); + // Added by winbill, 20160314, for having clause, End <== + List selectList = mysqlSelectQuery.getSelectList(); boolean isNeedChangeSql=false; int size = selectList.size(); boolean isDistinct=mysqlSelectQuery.getDistionOption()==2; for (int i = 0; i < size; i++) - { - SQLSelectItem item = selectList.get(i); - - if (item.getExpr() instanceof SQLAggregateExpr) - { - SQLAggregateExpr expr = (SQLAggregateExpr) item.getExpr(); - String method = expr.getMethodName(); - - //只处理有别名的情况,无别名添加别名,否则某些数据库会得不到正确结果处理 - int mergeType = MergeCol.getMergeType(method); + { + SQLSelectItem item = selectList.get(i); + + if (item.getExpr() instanceof SQLAggregateExpr) + { + SQLAggregateExpr expr = (SQLAggregateExpr) item.getExpr(); + String method = expr.getMethodName(); + boolean isHasArgument=!expr.getArguments().isEmpty(); + if(isHasArgument) + { + String aggrColName = method + "(" + expr.getArguments().get(0) + ")"; // Added by winbill, 20160314, for having clause + havingColsName.add(aggrColName); // Added by winbill, 20160314, for having clause + } + //只处理有别名的情况,无别名添加别名,否则某些数据库会得不到正确结果处理 + int mergeType = MergeCol.getMergeType(method); if (MergeCol.MERGE_AVG == mergeType&&isRoutMultiNode(schema,rrs)) { //跨分片avg需要特殊处理,直接avg结果是不对的 String colName = item.getAlias() != null ? item.getAlias() : method + i; @@ -120,12 +147,14 @@ protected Map parseAggGroupCommon(SchemaConfig schema, SQLStatem String sumColName = colName + "SUM"; sum.setAlias(sumColName); SQLAggregateExpr sumExp =new SQLAggregateExpr("SUM"); - ObjectUtil.copyProperties(expr, sumExp); + ObjectUtil.copyProperties(expr,sumExp); sumExp.getArguments().addAll(expr.getArguments()); sumExp.setMethodName("SUM"); sum.setExpr(sumExp); selectList.set(i, sum); aggrColumns.put(sumColName, MergeCol.MERGE_SUM); + havingColsName.add(sumColName); // Added by winbill, 20160314, for having clause + havingColsName.add(item.getAlias() != null ? item.getAlias() : ""); // Added by winbill, 20160314, two aliases for AVG SQLSelectItem count =new SQLSelectItem(); String countColName = colName + "COUNT"; @@ -141,38 +170,55 @@ protected Map parseAggGroupCommon(SchemaConfig schema, SQLStatem isNeedChangeSql=true; aggrColumns.put(colName, mergeType); rrs.setHasAggrColumn(true); - } else - if (MergeCol.MERGE_UNSUPPORT != mergeType) - { - if (item.getAlias() != null && item.getAlias().length() > 0) - { - aggrColumns.put(item.getAlias(), mergeType); - } else - { //如果不加,jdbc方式时取不到正确结果 ;修改添加别名 - item.setAlias(method + i); - aggrColumns.put(method + i, mergeType); - isNeedChangeSql=true; - } - rrs.setHasAggrColumn(true); - } - } else - { - if (!(item.getExpr() instanceof SQLAllColumnExpr)) - { - String alia = item.getAlias(); - String field = getFieldName(item); - if (alia == null) - { - alia = field; - } - aliaColumns.put(field, alia); - } - } - - } - if(aggrColumns.size() > 0) { - rrs.setMergeCols(aggrColumns); - } + } else if (MergeCol.MERGE_UNSUPPORT != mergeType){ + String aggColName = null; + StringBuilder sb = new StringBuilder(); + if(mysqlSelectQuery instanceof MySqlSelectQueryBlock) { + expr.accept(new MySqlOutputVisitor(sb)); + } else if(mysqlSelectQuery instanceof OracleSelectQueryBlock) { + expr.accept(new OracleOutputVisitor(sb)); + } else if(mysqlSelectQuery instanceof PGSelectQueryBlock){ + expr.accept(new PGOutputVisitor(sb)); + } else if(mysqlSelectQuery instanceof SQLServerSelectQueryBlock) { + expr.accept(new SQLASTOutputVisitor(sb)); + } else if(mysqlSelectQuery instanceof DB2SelectQueryBlock) { + expr.accept(new DB2OutputVisitor(sb)); + } + aggColName = sb.toString(); + + if (item.getAlias() != null && item.getAlias().length() > 0) + { + aggrColumns.put(item.getAlias(), mergeType); + aliaColumns.put(aggColName,item.getAlias()); + } else + { //如果不加,jdbc方式时取不到正确结果 ;修改添加别名 + item.setAlias(method + i); + aggrColumns.put(method + i, mergeType); + aliaColumns.put(aggColName, method + i); + isNeedChangeSql=true; + } + rrs.setHasAggrColumn(true); + havingColsName.add(item.getAlias()); // Added by winbill, 20160314, for having clause + havingColsName.add(""); // Added by winbill, 20160314, one alias for non-AVG + } + } else + { + if (!(item.getExpr() instanceof SQLAllColumnExpr)) + { + String alia = item.getAlias(); + String field = getFieldName(item); + if (alia == null) + { + alia = field; + } + aliaColumns.put(field, alia); + } + } + + } + if(aggrColumns.size() > 0) { + rrs.setMergeCols(aggrColumns); + } //通过优化转换成group by来实现 if(isDistinct) @@ -189,13 +235,14 @@ protected Map parseAggGroupCommon(SchemaConfig schema, SQLStatem //setGroupByCols - if(mysqlSelectQuery.getGroupBy() != null) { - List groupByItems = mysqlSelectQuery.getGroupBy().getItems(); - String[] groupByCols = buildGroupByCols(groupByItems,aliaColumns); - rrs.setGroupByCols(groupByCols); - rrs.setHavings(buildGroupByHaving(mysqlSelectQuery.getGroupBy().getHaving())); - rrs.setHasAggrColumn(true); - } + if(mysqlSelectQuery.getGroupBy() != null) { + List groupByItems = mysqlSelectQuery.getGroupBy().getItems(); + String[] groupByCols = buildGroupByCols(groupByItems,aliaColumns); + rrs.setGroupByCols(groupByCols); + rrs.setHavings(buildGroupByHaving(mysqlSelectQuery.getGroupBy().getHaving(),aliaColumns)); + rrs.setHasAggrColumn(true); + rrs.setHavingColsName(havingColsName.toArray()); // Added by winbill, 20160314, for having clause + } if (isNeedChangeSql) @@ -204,364 +251,424 @@ protected Map parseAggGroupCommon(SchemaConfig schema, SQLStatem rrs.changeNodeSqlAfterAddLimit(schema,getCurentDbType(),sql,0,-1, false); getCtx().setSql(sql); } - return aliaColumns; - } - - private HavingCols buildGroupByHaving(SQLExpr having){ - if (having == null) { - return null; - } - - SQLBinaryOpExpr expr = ((SQLBinaryOpExpr) having); - SQLExpr left = expr.getLeft(); - SQLBinaryOperator operator = expr.getOperator(); - SQLExpr right = expr.getRight(); - - String leftValue = null;; - if (left instanceof SQLAggregateExpr) { - leftValue = ((SQLAggregateExpr) left).getMethodName() + "(" - + ((SQLAggregateExpr) left).getArguments().get(0) + ")"; - } else if (left instanceof SQLIdentifierExpr) { - leftValue = ((SQLIdentifierExpr) left).getName(); - } - - String rightValue = null; - if (right instanceof SQLNumericLiteralExpr) { - rightValue = right.toString(); - }else if(right instanceof SQLTextLiteralExpr){ - rightValue = StringUtil.removeBackquote(right.toString()); - } - - return new HavingCols(leftValue,rightValue,operator.getName()); - } - - private boolean isRoutMultiNode(SchemaConfig schema, RouteResultset rrs) - { - if(rrs.getNodes()!=null&&rrs.getNodes().length>1) - { - return true; - } - LayerCachePool tableId2DataNodeCache = (LayerCachePool) MycatServer.getInstance().getCacheService().getCachePool("TableID2DataNodeCache"); - try - { - tryRoute(schema, rrs, tableId2DataNodeCache); - if(rrs.getNodes()!=null&&rrs.getNodes().length>1) - { - return true; - } - } catch (SQLNonTransientException e) - { - throw new RuntimeException(e); - } - return false; - } - - private String getFieldName(SQLSelectItem item){ - if ((item.getExpr() instanceof SQLPropertyExpr)||(item.getExpr() instanceof SQLMethodInvokeExpr) - || (item.getExpr() instanceof SQLIdentifierExpr) || item.getExpr() instanceof SQLBinaryOpExpr) { - return item.getExpr().toString();//字段别名 - } - else - return item.toString(); - } - /** - * 改写sql:需要加limit的加上 - */ - @Override - public void changeSql(SchemaConfig schema, RouteResultset rrs, SQLStatement stmt,LayerCachePool cachePool) throws SQLNonTransientException { - - tryRoute(schema, rrs, cachePool); - - rrs.copyLimitToNodes(); - - SQLSelectStatement selectStmt = (SQLSelectStatement)stmt; - SQLSelectQuery sqlSelectQuery = selectStmt.getSelect().getQuery(); - if(sqlSelectQuery instanceof MySqlSelectQueryBlock) { - MySqlSelectQueryBlock mysqlSelectQuery = (MySqlSelectQueryBlock)selectStmt.getSelect().getQuery(); - int limitStart = 0; - int limitSize = schema.getDefaultMaxLimit(); - - //clear group having - SQLSelectGroupByClause groupByClause = mysqlSelectQuery.getGroupBy(); - if(groupByClause != null && groupByClause.getHaving() != null){ - groupByClause.setHaving(null); - } - - Map>> allConditions = getAllConditions(); - boolean isNeedAddLimit = isNeedAddLimit(schema, rrs, mysqlSelectQuery, allConditions); - if(isNeedAddLimit) { - Limit limit = new Limit(); - limit.setRowCount(new SQLIntegerExpr(limitSize)); - mysqlSelectQuery.setLimit(limit); - rrs.setLimitSize(limitSize); - String sql= getSql(rrs, stmt, isNeedAddLimit); - rrs.changeNodeSqlAfterAddLimit(schema, getCurentDbType(), sql, 0, limitSize, true); - - } - Limit limit = mysqlSelectQuery.getLimit(); - if(limit != null&&!isNeedAddLimit) { - SQLIntegerExpr offset = (SQLIntegerExpr)limit.getOffset(); - SQLIntegerExpr count = (SQLIntegerExpr)limit.getRowCount(); - if(offset != null) { - limitStart = offset.getNumber().intValue(); - rrs.setLimitStart(limitStart); - } - if(count != null) { - limitSize = count.getNumber().intValue(); - rrs.setLimitSize(limitSize); - } - - if(isNeedChangeLimit(rrs)) { - Limit changedLimit = new Limit(); - changedLimit.setRowCount(new SQLIntegerExpr(limitStart + limitSize)); - - if(offset != null) { - if(limitStart < 0) { - String msg = "You have an error in your SQL syntax; check the manual that " + - "corresponds to your MySQL server version for the right syntax to use near '" + limitStart + "'"; - throw new SQLNonTransientException(ErrorCode.ER_PARSE_ERROR + " - " + msg); - } else { - changedLimit.setOffset(new SQLIntegerExpr(0)); - - } - } - - mysqlSelectQuery.setLimit(changedLimit); + return aliaColumns; + } + + private HavingCols buildGroupByHaving(SQLExpr having,Map aliaColumns ){ + if (having == null) { + return null; + } + + SQLBinaryOpExpr expr = ((SQLBinaryOpExpr) having); + SQLExpr left = expr.getLeft(); + SQLBinaryOperator operator = expr.getOperator(); + SQLExpr right = expr.getRight(); + + String leftValue = null;; + if (left instanceof SQLAggregateExpr) { + leftValue = ((SQLAggregateExpr) left).getMethodName() + "(" + + ((SQLAggregateExpr) left).getArguments().get(0) + ")"; + String aggrColumnAlias = getAliaColumn(aliaColumns,leftValue); + if(aggrColumnAlias != null) { // having聚合函数存在别名 + expr.setLeft(new SQLIdentifierExpr(aggrColumnAlias)); + leftValue = aggrColumnAlias; + } + } else if (left instanceof SQLIdentifierExpr) { + leftValue = ((SQLIdentifierExpr) left).getName(); + } + + String rightValue = null; + if (right instanceof SQLNumericLiteralExpr) { + rightValue = right.toString(); + }else if(right instanceof SQLTextLiteralExpr){ + rightValue = StringUtil.removeBackquote(right.toString()); + } + + return new HavingCols(leftValue,rightValue,operator.getName()); + } + + private boolean isRoutMultiNode(SchemaConfig schema, RouteResultset rrs) + { + if(rrs.getNodes()!=null&&rrs.getNodes().length>1) + { + return true; + } + LayerCachePool tableId2DataNodeCache = (LayerCachePool) MycatServer.getInstance().getCacheService().getCachePool("TableID2DataNodeCache"); + try + { + tryRoute(schema, rrs, tableId2DataNodeCache); + if(rrs.getNodes()!=null&&rrs.getNodes().length>1) + { + return true; + } + } catch (SQLNonTransientException e) + { + throw new RuntimeException(e); + } + return false; + } + + private String getFieldName(SQLSelectItem item){ + if ((item.getExpr() instanceof SQLPropertyExpr)||(item.getExpr() instanceof SQLMethodInvokeExpr) + || (item.getExpr() instanceof SQLIdentifierExpr) || item.getExpr() instanceof SQLBinaryOpExpr) { + return item.getExpr().toString();//字段别名 + } + else { + return item.toString(); + } + } + + /** + * 现阶段目标为 有一个只涉及到一张表的子查询时,先执行子查询,获得返回结果后,改写原有sql继续执行,得到最终结果. + * 在这种情况下,原sql不需要继续解析. + * 使用catlet 的情况也不再继续解析. + */ + @Override + public boolean afterVisitorParser(RouteResultset rrs, SQLStatement stmt, MycatSchemaStatVisitor visitor) { + int subQuerySize = visitor.getSubQuerys().size(); + + if(subQuerySize==0&&ctx.getTables().size()==2){ //两表关联,考虑使用catlet + if(ctx.getVisitor().getConditions() !=null && ctx.getVisitor().getConditions().size()>0){ + return true; + } + }else if(subQuerySize==1){ //只涉及一张表的子查询,使用 MiddlerResultHandler 获取中间结果后,改写原有 sql 继续执行 TODO 后期可能会考虑多个. + SQLSelectQuery sqlSelectQuery = visitor.getSubQuerys().iterator().next().getQuery(); + if(((MySqlSelectQueryBlock)sqlSelectQuery).getFrom() instanceof SQLExprTableSource) { + return true; + } + } + + return super.afterVisitorParser(rrs, stmt, visitor); + } + + /** + * 改写sql:需要加limit的加上 + */ + @Override + public void changeSql(SchemaConfig schema, RouteResultset rrs, SQLStatement stmt,LayerCachePool cachePool) throws SQLNonTransientException { + + tryRoute(schema, rrs, cachePool); + + rrs.copyLimitToNodes(); + + SQLSelectStatement selectStmt = (SQLSelectStatement)stmt; + SQLSelectQuery sqlSelectQuery = selectStmt.getSelect().getQuery(); + if(sqlSelectQuery instanceof MySqlSelectQueryBlock) { + MySqlSelectQueryBlock mysqlSelectQuery = (MySqlSelectQueryBlock)selectStmt.getSelect().getQuery(); + int limitStart = 0; + int limitSize = schema.getDefaultMaxLimit(); + + //clear group having + SQLSelectGroupByClause groupByClause = mysqlSelectQuery.getGroupBy(); + // Modified by winbill, 20160614, do NOT include having clause when routing to multiple nodes + if(groupByClause != null && groupByClause.getHaving() != null && isRoutMultiNode(schema,rrs)){ + groupByClause.setHaving(null); + } + + Map>> allConditions = getAllConditions(); + boolean isNeedAddLimit = isNeedAddLimit(schema, rrs, mysqlSelectQuery, allConditions); + if(isNeedAddLimit) { + Limit limit = new Limit(); + limit.setRowCount(new SQLIntegerExpr(limitSize)); + mysqlSelectQuery.setLimit(limit); + rrs.setLimitSize(limitSize); + String sql= getSql(rrs, stmt, isNeedAddLimit); + rrs.changeNodeSqlAfterAddLimit(schema, getCurentDbType(), sql, 0, limitSize, true); + + } + Limit limit = mysqlSelectQuery.getLimit(); + if(limit != null&&!isNeedAddLimit) { + SQLIntegerExpr offset = (SQLIntegerExpr)limit.getOffset(); + SQLIntegerExpr count = (SQLIntegerExpr)limit.getRowCount(); + if(offset != null) { + limitStart = offset.getNumber().intValue(); + rrs.setLimitStart(limitStart); + } + if(count != null) { + limitSize = count.getNumber().intValue(); + rrs.setLimitSize(limitSize); + } + + if(isNeedChangeLimit(rrs)) { + Limit changedLimit = new Limit(); + changedLimit.setRowCount(new SQLIntegerExpr(limitStart + limitSize)); + + if(offset != null) { + if(limitStart < 0) { + String msg = "You have an error in your SQL syntax; check the manual that " + + "corresponds to your MySQL server version for the right syntax to use near '" + limitStart + "'"; + throw new SQLNonTransientException(ErrorCode.ER_PARSE_ERROR + " - " + msg); + } else { + changedLimit.setOffset(new SQLIntegerExpr(0)); + + } + } + + mysqlSelectQuery.setLimit(changedLimit); String sql= getSql(rrs, stmt, isNeedAddLimit); - rrs.changeNodeSqlAfterAddLimit(schema,getCurentDbType(),sql,0, limitStart + limitSize, true); - - //设置改写后的sql - ctx.setSql(sql); - - } else - { - - rrs.changeNodeSqlAfterAddLimit(schema,getCurentDbType(),getCtx().getSql(),rrs.getLimitStart(), rrs.getLimitSize(), true); - // ctx.setSql(nativeSql); - - } - - - } - - rrs.setCacheAble(isNeedCache(schema, rrs, mysqlSelectQuery, allConditions)); - } - - } - - /** - * 获取所有的条件:因为可能被or语句拆分成多个RouteCalculateUnit,条件分散了 - * @return - */ - private Map>> getAllConditions() { - Map>> map = new HashMap>>(); - for(RouteCalculateUnit unit : ctx.getRouteCalculateUnits()) { - if(unit != null && unit.getTablesAndConditions() != null) { - map.putAll(unit.getTablesAndConditions()); - } - } - - return map; - } - - private void tryRoute(SchemaConfig schema, RouteResultset rrs, LayerCachePool cachePool) throws SQLNonTransientException - { - if(rrs.isFinishedRoute()) - { - return;//避免重复路由 - } - - //无表的select语句直接路由带任一节点 - if(ctx.getTables() == null || ctx.getTables().size() == 0) { - rrs = RouterUtil.routeToSingleNode(rrs, schema.getRandomDataNode(), ctx.getSql()); - rrs.setFinishedRoute(true); - return; - } + rrs.changeNodeSqlAfterAddLimit(schema,getCurentDbType(),sql,0, limitStart + limitSize, true); + + //设置改写后的sql + ctx.setSql(sql); + + } else + { + + rrs.changeNodeSqlAfterAddLimit(schema,getCurentDbType(),getCtx().getSql(),rrs.getLimitStart(), rrs.getLimitSize(), true); + // ctx.setSql(nativeSql); + + } + + + } + + if(rrs.isDistTable()){ + SQLTableSource from = mysqlSelectQuery.getFrom(); + + for (RouteResultsetNode node : rrs.getNodes()) { + SQLIdentifierExpr sqlIdentifierExpr = new SQLIdentifierExpr(); + sqlIdentifierExpr.setParent(from); + sqlIdentifierExpr.setName(node.getSubTableName()); + SQLExprTableSource from2 = new SQLExprTableSource(sqlIdentifierExpr); + from2.setAlias(from.getAlias()); + mysqlSelectQuery.setFrom(from2); + node.setStatement(stmt.toString()); + } + } + + rrs.setCacheAble(isNeedCache(schema, rrs, mysqlSelectQuery, allConditions)); + } + + } + + /** + * 获取所有的条件:因为可能被or语句拆分成多个RouteCalculateUnit,条件分散了 + * @return + */ + private Map>> getAllConditions() { + Map>> map = new HashMap>>(); + for(RouteCalculateUnit unit : ctx.getRouteCalculateUnits()) { + if(unit != null && unit.getTablesAndConditions() != null) { + map.putAll(unit.getTablesAndConditions()); + } + } + + return map; + } + + private void tryRoute(SchemaConfig schema, RouteResultset rrs, LayerCachePool cachePool) throws SQLNonTransientException { + if(rrs.isFinishedRoute()) + { + return;//避免重复路由 + } + + //无表的select语句直接路由带任一节点 + if((ctx.getTables() == null || ctx.getTables().size() == 0)&&(ctx.getTableAliasMap()==null||ctx.getTableAliasMap().isEmpty())) { + rrs = RouterUtil.routeToSingleNode(rrs, schema.getRandomDataNode(), ctx.getSql()); + rrs.setFinishedRoute(true); + return; + } // RouterUtil.tryRouteForTables(schema, ctx, rrs, true, cachePool); - SortedSet nodeSet = new TreeSet(); - boolean isAllGlobalTable = RouterUtil.isAllGlobalTable(ctx, schema); - for (RouteCalculateUnit unit : ctx.getRouteCalculateUnits()) { - RouteResultset rrsTmp = RouterUtil.tryRouteForTables(schema, ctx, unit, rrs, true, cachePool); - if (rrsTmp != null) { - for (RouteResultsetNode node : rrsTmp.getNodes()) { - nodeSet.add(node); + SortedSet nodeSet = new TreeSet(); + boolean isAllGlobalTable = RouterUtil.isAllGlobalTable(ctx, schema); + for (RouteCalculateUnit unit : ctx.getRouteCalculateUnits()) { + RouteResultset rrsTmp = RouterUtil.tryRouteForTables(schema, ctx, unit, rrs, true, cachePool); + if (rrsTmp != null&&rrsTmp.getNodes()!=null) { + for (RouteResultsetNode node : rrsTmp.getNodes()) { + nodeSet.add(node); + } + } + if(isAllGlobalTable) {//都是全局表时只计算一遍路由 + break; + } + } + + if(nodeSet.size() == 0) { + + Collection stringCollection= ctx.getTableAliasMap().values() ; + for (String table : stringCollection) + { + if(table!=null&&table.toLowerCase().contains("information_schema.")) + { + rrs = RouterUtil.routeToSingleNode(rrs, schema.getRandomDataNode(), ctx.getSql()); + rrs.setFinishedRoute(true); + return; } } - if(isAllGlobalTable) {//都是全局表时只计算一遍路由 - break; - } - } - - if(nodeSet.size() == 0) { - String msg = " find no Route:" + ctx.getSql(); - LOGGER.warn(msg); - throw new SQLNonTransientException(msg); - } - - RouteResultsetNode[] nodes = new RouteResultsetNode[nodeSet.size()]; - int i = 0; - for (Iterator iterator = nodeSet.iterator(); iterator.hasNext();) { - nodes[i] = (RouteResultsetNode) iterator.next(); - i++; - - } - - rrs.setNodes(nodes); - rrs.setFinishedRoute(true); - } - - - protected String getCurentDbType() - { - return JdbcConstants.MYSQL; - } - - - - - protected String getSql( RouteResultset rrs,SQLStatement stmt, boolean isNeedAddLimit) - { - if(getCurentDbType().equalsIgnoreCase("mysql")&&(isNeedChangeLimit(rrs)||isNeedAddLimit)) - { - - return stmt.toString(); - - } - - return getCtx().getSql(); - } - - - - protected boolean isNeedChangeLimit(RouteResultset rrs) { - if(rrs.getNodes() == null) { - return false; - } else { - if(rrs.getNodes().length > 1) { - return true; - } - return false; - - } - } - - private boolean isNeedCache(SchemaConfig schema, RouteResultset rrs, - MySqlSelectQueryBlock mysqlSelectQuery, Map>> allConditions) { - if(ctx.getTables() == null || ctx.getTables().size() == 0 ) { - return false; - } - TableConfig tc = schema.getTables().get(ctx.getTables().get(0)); - if(tc==null ||(ctx.getTables().size() == 1 && tc.isGlobalTable()) - ) {//|| (ctx.getTables().size() == 1) && tc.getRule() == null && tc.getDataNodes().size() == 1 - return false; - } else { - //单表主键查询 - if(ctx.getTables().size() == 1) { - String tableName = ctx.getTables().get(0); - String primaryKey = schema.getTables().get(tableName).getPrimaryKey(); + String msg = " find no Route:" + ctx.getSql(); + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + + RouteResultsetNode[] nodes = new RouteResultsetNode[nodeSet.size()]; + int i = 0; + for (Iterator iterator = nodeSet.iterator(); iterator.hasNext();) { + nodes[i] = (RouteResultsetNode) iterator.next(); + i++; + + } + + rrs.setNodes(nodes); + rrs.setFinishedRoute(true); + } + + + protected String getCurentDbType() + { + return JdbcConstants.MYSQL; + } + + + + + protected String getSql( RouteResultset rrs,SQLStatement stmt, boolean isNeedAddLimit) + { + if(getCurentDbType().equalsIgnoreCase("mysql")&&(isNeedChangeLimit(rrs)||isNeedAddLimit)) + { + + return stmt.toString(); + + } + + return getCtx().getSql(); + } + + + + protected boolean isNeedChangeLimit(RouteResultset rrs) { + if(rrs.getNodes() == null) { + return false; + } else { + if(rrs.getNodes().length > 1) { + return true; + } + return false; + + } + } + + private boolean isNeedCache(SchemaConfig schema, RouteResultset rrs, + MySqlSelectQueryBlock mysqlSelectQuery, Map>> allConditions) { + if(ctx.getTables() == null || ctx.getTables().size() == 0 ) { + return false; + } + TableConfig tc = schema.getTables().get(ctx.getTables().get(0)); + if(tc==null ||(ctx.getTables().size() == 1 && tc.isGlobalTable()) + ) {//|| (ctx.getTables().size() == 1) && tc.getRule() == null && tc.getDataNodes().size() == 1 + return false; + } else { + //单表主键查询 + if(ctx.getTables().size() == 1) { + String tableName = ctx.getTables().get(0); + String primaryKey = schema.getTables().get(tableName).getPrimaryKey(); // schema.getTables().get(ctx.getTables().get(0)).getParentKey() != null; - if(ctx.getRouteCalculateUnit().getTablesAndConditions().get(tableName) != null - && ctx.getRouteCalculateUnit().getTablesAndConditions().get(tableName).get(primaryKey) != null - && tc.getDataNodes().size() > 1) {//有主键条件 - return false; - } - } - return true; - } - } - - /** - * 单表且是全局表 - * 单表且rule为空且nodeNodes只有一个 - * @param schema - * @param rrs - * @param mysqlSelectQuery - * @return - */ - private boolean isNeedAddLimit(SchemaConfig schema, RouteResultset rrs, - MySqlSelectQueryBlock mysqlSelectQuery, Map>> allConditions) { + if(ctx.getRouteCalculateUnit().getTablesAndConditions().get(tableName) != null + && ctx.getRouteCalculateUnit().getTablesAndConditions().get(tableName).get(primaryKey) != null + && tc.getDataNodes().size() > 1) {//有主键条件 + return false; + } + //全局表不缓存 + }else if(RouterUtil.isAllGlobalTable(ctx, schema)){ + return false; + } + return true; + } + } + + /** + * 单表且是全局表 + * 单表且rule为空且nodeNodes只有一个 + * @param schema + * @param rrs + * @param mysqlSelectQuery + * @return + */ + private boolean isNeedAddLimit(SchemaConfig schema, RouteResultset rrs, + MySqlSelectQueryBlock mysqlSelectQuery, Map>> allConditions) { // ctx.getTablesAndConditions().get(key)) - if(rrs.getLimitSize()>-1) - { - return false; - }else - if(schema.getDefaultMaxLimit() == -1) { - return false; - } else if (mysqlSelectQuery.getLimit() != null) {//语句中已有limit - return false; - } else if(ctx.getTables().size() == 1) { - String tableName = ctx.getTables().get(0); - TableConfig tableConfig = schema.getTables().get(tableName); - if(tableConfig==null) - { - return schema.getDefaultMaxLimit() > -1; // 找不到则取schema的配置 - } - - boolean isNeedAddLimit= tableConfig.isNeedAddLimit(); - if(!isNeedAddLimit) - { - return false;//优先从配置文件取 - } - - if(schema.getTables().get(tableName).isGlobalTable()) { - return true; - } - - String primaryKey = schema.getTables().get(tableName).getPrimaryKey(); + if(rrs.getLimitSize()>-1) + { + return false; + }else + if(schema.getDefaultMaxLimit() == -1) { + return false; + } else if (mysqlSelectQuery.getLimit() != null) {//语句中已有limit + return false; + } else if(ctx.getTables().size() == 1) { + String tableName = ctx.getTables().get(0); + TableConfig tableConfig = schema.getTables().get(tableName); + if(tableConfig==null) + { + return schema.getDefaultMaxLimit() > -1; // 找不到则取schema的配置 + } + + boolean isNeedAddLimit= tableConfig.isNeedAddLimit(); + if(!isNeedAddLimit) + { + return false;//优先从配置文件取 + } + + if(schema.getTables().get(tableName).isGlobalTable()) { + return true; + } + + String primaryKey = schema.getTables().get(tableName).getPrimaryKey(); // schema.getTables().get(ctx.getTables().get(0)).getParentKey() != null; - if(allConditions.get(tableName) == null) {//无条件 - return true; - } - - if (allConditions.get(tableName).get(primaryKey) != null) {//条件中带主键 - return false; - } - - return true; - } else if(rrs.hasPrimaryKeyToCache() && ctx.getTables().size() == 1){//只有一个表且条件中有主键,不需要limit了,因为主键只能查到一条记录 - return false; - } else {//多表或无表 - return false; - } - - } - private String getAliaColumn(Map aliaColumns,String column ){ - String alia=aliaColumns.get(column); - if (alia==null){ - if(column.indexOf(".") < 0) { - String col = "." + column; - String col2 = ".`" + column+"`"; - //展开aliaColumns,将之类的键值对展开成 - for(Map.Entry entry : aliaColumns.entrySet()) { - if(entry.getKey().endsWith(col)||entry.getKey().endsWith(col2)) { - if(entry.getValue() != null && entry.getValue().indexOf(".") > 0) { - return column; - } - return entry.getValue(); - } - } - } - - return column; - } - else { - return alia; - } - } - - private String[] buildGroupByCols(List groupByItems,Map aliaColumns) { - String[] groupByCols = new String[groupByItems.size()]; - for(int i= 0; i < groupByItems.size(); i++) { + if(allConditions.get(tableName) == null) {//无条件 + return true; + } + + if (allConditions.get(tableName).get(primaryKey) != null) {//条件中带主键 + return false; + } + + return true; + } else if(rrs.hasPrimaryKeyToCache() && ctx.getTables().size() == 1){//只有一个表且条件中有主键,不需要limit了,因为主键只能查到一条记录 + return false; + } else {//多表或无表 + return false; + } + + } + private String getAliaColumn(Map aliaColumns,String column ){ + String alia=aliaColumns.get(column); + if (alia==null){ + if(column.indexOf(".") < 0) { + String col = "." + column; + String col2 = ".`" + column+"`"; + //展开aliaColumns,将之类的键值对展开成 + for(Map.Entry entry : aliaColumns.entrySet()) { + if(entry.getKey().endsWith(col)||entry.getKey().endsWith(col2)) { + if(entry.getValue() != null && entry.getValue().indexOf(".") > 0) { + return column; + } + return entry.getValue(); + } + } + } + + return column; + } + else { + return alia; + } + } + + private String[] buildGroupByCols(List groupByItems,Map aliaColumns) { + String[] groupByCols = new String[groupByItems.size()]; + for(int i= 0; i < groupByItems.size(); i++) { SQLExpr sqlExpr = groupByItems.get(i); - String column; + String column = null; if(sqlExpr instanceof SQLIdentifierExpr ) { column=((SQLIdentifierExpr) sqlExpr).getName(); - } else - { - SQLExpr expr = ((MySqlSelectGroupByExpr) sqlExpr).getExpr(); + } else if(sqlExpr instanceof SQLMethodInvokeExpr){ + column = ((SQLMethodInvokeExpr) sqlExpr).toString(); + } else if(sqlExpr instanceof MySqlOrderingExpr){ + //todo czn + SQLExpr expr = ((MySqlOrderingExpr) sqlExpr).getExpr(); if (expr instanceof SQLName) { @@ -570,77 +677,88 @@ private String[] buildGroupByCols(List groupByItems,Map { column = StringUtil.removeBackquote(expr.toString()); } - } - int dotIndex=column.indexOf(".") ; - if(dotIndex!=-1) - { - //此步骤得到的column必须是不带.的,有别名的用别名,无别名的用字段名 - column=column.substring(dotIndex+1) ; - } - groupByCols[i] = getAliaColumn(aliaColumns,column);//column; - } - return groupByCols; - } - - protected LinkedHashMap buildOrderByCols(List orderByItems,Map aliaColumns) { - LinkedHashMap map = new LinkedHashMap(); - for(int i= 0; i < orderByItems.size(); i++) { - SQLOrderingSpecification type = orderByItems.get(i).getType(); + } else if(sqlExpr instanceof SQLPropertyExpr){ + /** + * 针对子查询别名,例如select id from (select h.id from hotnews h union select h.title from hotnews h ) as t1 group by t1.id; + */ + column = sqlExpr.toString(); + } + if(column == null){ + column = sqlExpr.toString(); + } + int dotIndex=column.indexOf(".") ; + int bracketIndex=column.indexOf("(") ; + //通过判断含有括号来决定是否为函数列 + if(dotIndex!=-1&&bracketIndex==-1) + { + //此步骤得到的column必须是不带.的,有别名的用别名,无别名的用字段名 + column=column.substring(dotIndex+1) ; + } + groupByCols[i] = getAliaColumn(aliaColumns,column);//column; + } + return groupByCols; + } + + protected LinkedHashMap buildOrderByCols(List orderByItems,Map aliaColumns) { + LinkedHashMap map = new LinkedHashMap(); + for(int i= 0; i < orderByItems.size(); i++) { + SQLOrderingSpecification type = orderByItems.get(i).getType(); //orderColumn只记录字段名称,因为返回的结果集是不带表名的。 - SQLExpr expr = orderByItems.get(i).getExpr(); - String col; - if (expr instanceof SQLName) { - col = ((SQLName)expr).getSimpleName(); - } - else { - col =expr.toString(); - } - if(type == null) { - type = SQLOrderingSpecification.ASC; - } - col=getAliaColumn(aliaColumns,col);//此步骤得到的col必须是不带.的,有别名的用别名,无别名的用字段名 - map.put(col, type == SQLOrderingSpecification.ASC ? OrderCol.COL_ORDER_TYPE_ASC : OrderCol.COL_ORDER_TYPE_DESC); - } - return map; - } - - private boolean isConditionAlwaysTrue(SQLStatement statement) { - SQLSelectStatement selectStmt = (SQLSelectStatement)statement; - SQLSelectQuery sqlSelectQuery = selectStmt.getSelect().getQuery(); - if(sqlSelectQuery instanceof MySqlSelectQueryBlock) { - MySqlSelectQueryBlock mysqlSelectQuery = (MySqlSelectQueryBlock)selectStmt.getSelect().getQuery(); - SQLExpr expr = mysqlSelectQuery.getWhere(); - - Object o = WallVisitorUtils.getValue(expr); - if(Boolean.TRUE.equals(o)) { - return true; - } - return false; - } else {//union - return false; - } - - } - - protected void setLimitIFChange(SQLStatement stmt, RouteResultset rrs, SchemaConfig schema, SQLBinaryOpExpr one, int firstrownum, int lastrownum) - { - rrs.setLimitStart(firstrownum); - rrs.setLimitSize(lastrownum - firstrownum); - LayerCachePool tableId2DataNodeCache = (LayerCachePool) MycatServer.getInstance().getCacheService().getCachePool("TableID2DataNodeCache"); - try - { - tryRoute(schema, rrs, tableId2DataNodeCache); - } catch (SQLNonTransientException e) - { - throw new RuntimeException(e); - } - if (isNeedChangeLimit(rrs)) - { - one.setRight(new SQLIntegerExpr(0)); - String sql = stmt.toString(); - rrs.changeNodeSqlAfterAddLimit(schema,getCurentDbType(), sql,0,lastrownum, false); - //设置改写后的sql - getCtx().setSql(sql); - } - } -} \ No newline at end of file + SQLExpr expr = orderByItems.get(i).getExpr(); + String col; + if (expr instanceof SQLName) { + col = ((SQLName)expr).getSimpleName(); + } + else { + col =expr.toString(); + } + if(type == null) { + type = SQLOrderingSpecification.ASC; + } + col=getAliaColumn(aliaColumns,col);//此步骤得到的col必须是不带.的,有别名的用别名,无别名的用字段名 + map.put(col, type == SQLOrderingSpecification.ASC ? OrderCol.COL_ORDER_TYPE_ASC : OrderCol.COL_ORDER_TYPE_DESC); + } + return map; + } + + private boolean isConditionAlwaysTrue(SQLStatement statement) { + SQLSelectStatement selectStmt = (SQLSelectStatement)statement; + SQLSelectQuery sqlSelectQuery = selectStmt.getSelect().getQuery(); + if(sqlSelectQuery instanceof MySqlSelectQueryBlock) { + MySqlSelectQueryBlock mysqlSelectQuery = (MySqlSelectQueryBlock)selectStmt.getSelect().getQuery(); + SQLExpr expr = mysqlSelectQuery.getWhere(); + + Object o = WallVisitorUtils.getValue(expr); + if(Boolean.TRUE.equals(o)) { + return true; + } + return false; + } else {//union + return false; + } + + } + + protected void setLimitIFChange(SQLStatement stmt, RouteResultset rrs, SchemaConfig schema, SQLBinaryOpExpr one, int firstrownum, int lastrownum) + { + rrs.setLimitStart(firstrownum); + rrs.setLimitSize(lastrownum - firstrownum); + LayerCachePool tableId2DataNodeCache = (LayerCachePool) MycatServer.getInstance().getCacheService().getCachePool("TableID2DataNodeCache"); + try + { + tryRoute(schema, rrs, tableId2DataNodeCache); + } catch (SQLNonTransientException e) + { + throw new RuntimeException(e); + } + if (isNeedChangeLimit(rrs)) + { + one.setRight(new SQLIntegerExpr(0)); + String curentDbType ="db2".equalsIgnoreCase(this.getCurentDbType())?"oracle":getCurentDbType(); + String sql = SQLUtils.toSQLString(stmt, curentDbType);; + rrs.changeNodeSqlAfterAddLimit(schema,getCurentDbType(), sql,0,lastrownum, false); + //设置改写后的sql + getCtx().setSql(sql); + } + } +} diff --git a/src/main/java/io/mycat/route/parser/druid/impl/DruidSelectSqlServerParser.java b/src/main/java/io/mycat/route/parser/druid/impl/DruidSelectSqlServerParser.java index 9e80b23da..bac21508d 100644 --- a/src/main/java/io/mycat/route/parser/druid/impl/DruidSelectSqlServerParser.java +++ b/src/main/java/io/mycat/route/parser/druid/impl/DruidSelectSqlServerParser.java @@ -1,8 +1,5 @@ package io.mycat.route.parser.druid.impl; -import io.mycat.route.RouteResultset; -import io.mycat.server.config.node.SchemaConfig; - import java.util.List; import java.util.Map; @@ -26,10 +23,15 @@ import com.alibaba.druid.sql.dialect.sqlserver.parser.SQLServerStatementParser; import com.alibaba.druid.util.JdbcConstants; -public class DruidSelectSqlServerParser extends DruidSelectParser { +import io.mycat.config.model.SchemaConfig; +import io.mycat.route.RouteResultset; +public class DruidSelectSqlServerParser extends DruidSelectParser { - protected boolean isNeedParseOrderAgg=true; + public DruidSelectSqlServerParser(){ + super(); + isNeedParseOrderAgg=true; + } @Override public void statementParse(SchemaConfig schema, RouteResultset rrs, SQLStatement stmt) { @@ -166,7 +168,9 @@ private void parseSqlServerPageSql(SQLStatement stmt, RouteResultset rrs, SQLSer { SQLIntegerExpr right = (SQLIntegerExpr) one.getRight(); int firstrownum = right.getNumber().intValue(); - if (operator == SQLBinaryOperator.GreaterThanOrEqual&&firstrownum!=0) firstrownum = firstrownum - 1; + if (operator == SQLBinaryOperator.GreaterThanOrEqual&&firstrownum!=0) { + firstrownum = firstrownum - 1; + } int lastrownum =subTop; setLimitIFChange(stmt, rrs, schema, one, firstrownum, lastrownum); if(orderBy!=null) @@ -184,7 +188,9 @@ private void parseSqlServerPageSql(SQLStatement stmt, RouteResultset rrs, SQLSer { SQLIntegerExpr right = (SQLIntegerExpr) one.getRight(); int firstrownum = right.getNumber().intValue(); - if (operator == SQLBinaryOperator.LessThan&&firstrownum!=0) firstrownum = firstrownum - 1; + if (operator == SQLBinaryOperator.LessThan&&firstrownum!=0) { + firstrownum = firstrownum - 1; + } if (subSelect instanceof SQLServerSelectQueryBlock) { rrs.setLimitStart(0); @@ -212,26 +218,34 @@ private void parseSqlServerPageSql(SQLStatement stmt, RouteResultset rrs, SQLSer { small=leftE; firstrownum=((SQLIntegerExpr) leftE.getRight()).getNumber().intValue(); - if(leftE.getOperator()==SQLBinaryOperator.GreaterThanOrEqual &&firstrownum!=0) firstrownum = firstrownum - 1; + if(leftE.getOperator()==SQLBinaryOperator.GreaterThanOrEqual &&firstrownum!=0) { + firstrownum = firstrownum - 1; + } } else if(leftE.getRight() instanceof SQLIntegerExpr&&(leftE.getOperator()==SQLBinaryOperator.LessThan||leftE.getOperator()==SQLBinaryOperator.LessThanOrEqual)) { larger=leftE; lastrownum=((SQLIntegerExpr) leftE.getRight()).getNumber().intValue(); - if(leftE.getOperator()==SQLBinaryOperator.LessThan&&lastrownum!=0) lastrownum = lastrownum - 1; + if(leftE.getOperator()==SQLBinaryOperator.LessThan&&lastrownum!=0) { + lastrownum = lastrownum - 1; + } } if(rightE.getRight() instanceof SQLIntegerExpr&&(rightE.getOperator()==SQLBinaryOperator.GreaterThan||rightE.getOperator()==SQLBinaryOperator.GreaterThanOrEqual)) { small=rightE; firstrownum=((SQLIntegerExpr) rightE.getRight()).getNumber().intValue(); - if(rightE.getOperator()==SQLBinaryOperator.GreaterThanOrEqual&&firstrownum!=0) firstrownum = firstrownum - 1; + if(rightE.getOperator()==SQLBinaryOperator.GreaterThanOrEqual&&firstrownum!=0) { + firstrownum = firstrownum - 1; + } } else if(rightE.getRight() instanceof SQLIntegerExpr&&(rightE.getOperator()==SQLBinaryOperator.LessThan||rightE.getOperator()==SQLBinaryOperator.LessThanOrEqual)) { larger=rightE; lastrownum=((SQLIntegerExpr) rightE.getRight()).getNumber().intValue(); - if(rightE.getOperator()==SQLBinaryOperator.LessThan&&lastrownum!=0) lastrownum = lastrownum - 1; + if(rightE.getOperator()==SQLBinaryOperator.LessThan&&lastrownum!=0) { + lastrownum = lastrownum - 1; + } } if(small!=null&&larger!=null) { @@ -259,6 +273,6 @@ private void parseSqlServerPageSql(SQLStatement stmt, RouteResultset rrs, SQLSer } - + } diff --git a/src/main/java/io/mycat/route/parser/druid/impl/DruidUpdateParser.java b/src/main/java/io/mycat/route/parser/druid/impl/DruidUpdateParser.java index bcd9e5e47..758a0255d 100644 --- a/src/main/java/io/mycat/route/parser/druid/impl/DruidUpdateParser.java +++ b/src/main/java/io/mycat/route/parser/druid/impl/DruidUpdateParser.java @@ -1,70 +1,57 @@ package io.mycat.route.parser.druid.impl; +import com.alibaba.druid.sql.ast.SQLStatement; +import com.alibaba.druid.sql.ast.statement.SQLUpdateSetItem; +import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlUpdateStatement; +import com.alibaba.druid.sql.ast.SQLExpr; +import com.alibaba.druid.sql.ast.expr.*; +import com.alibaba.druid.sql.ast.statement.SQLUpdateStatement; +import com.alibaba.druid.sql.dialect.mysql.visitor.MySqlSchemaStatVisitor; +import com.alibaba.druid.stat.TableStat; +import com.alibaba.druid.stat.TableStat.Name; + +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.TableConfig; import io.mycat.route.RouteResultset; import io.mycat.route.util.RouterUtil; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.TableConfig; import io.mycat.util.StringUtil; import java.sql.SQLNonTransientException; import java.util.List; - -import com.alibaba.druid.sql.ast.SQLStatement; -import com.alibaba.druid.sql.ast.statement.SQLUpdateSetItem; -import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlUpdateStatement; +import java.util.Map; public class DruidUpdateParser extends DefaultDruidParser { - @Override - public void statementParse(SchemaConfig schema, RouteResultset rrs, SQLStatement stmt) throws SQLNonTransientException { - if(ctx.getTables() != null && ctx.getTables().size() > 1 && !schema.isNoSharding()) { - String msg = "multi table related update not supported,tables:" + ctx.getTables(); - LOGGER.warn(msg); - throw new SQLNonTransientException(msg); - } - MySqlUpdateStatement update = (MySqlUpdateStatement)stmt; - String tableName = StringUtil.removeBackquote(update.getTableName().getSimpleName().toUpperCase()); - - List updateSetItem = update.getItems(); - TableConfig tc = schema.getTables().get(tableName); - if (tc == null) { + @Override + public void statementParse(SchemaConfig schema, RouteResultset rrs, SQLStatement stmt) throws SQLNonTransientException { + //这里限制了update分片表的个数只能有一个 + if (ctx.getTables() != null && getUpdateTableCount() > 1 && !schema.isNoSharding()) { + String msg = "multi table related update not supported,tables:" + ctx.getTables(); + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + MySqlUpdateStatement update = (MySqlUpdateStatement) stmt; + String tableName = StringUtil.removeBackquote(update.getTableName().getSimpleName().toUpperCase()); + + TableConfig tc = schema.getTables().get(tableName); + + if (RouterUtil.isNoSharding(schema, tableName)) {//整个schema都不分库或者该表不拆分 + RouterUtil.routeForTableMeta(rrs, schema, tableName, rrs.getStatement()); + rrs.setFinishedRoute(true); + return; + } + + String partitionColumn = tc.getPartitionColumn(); + String joinKey = tc.getJoinKey(); + if (tc.isGlobalTable() || (partitionColumn == null && joinKey == null)) { + //修改全局表 update 受影响的行数 + RouterUtil.routeToMultiNode(false, rrs, tc.getDataNodes(), rrs.getStatement(), tc.isGlobalTable()); + rrs.setFinishedRoute(true); return; } - if(RouterUtil.isNoSharding(schema,tableName)) {//整个schema都不分库或者该表不拆分 - RouterUtil.routeForTableMeta(rrs, schema, tableName, rrs.getStatement()); - rrs.setFinishedRoute(true); - return; - } - - String partitionColumn = tc.getPartitionColumn(); - String joinKey = tc.getJoinKey(); - if(tc.isGlobalTable() || (partitionColumn == null && joinKey == null)) { - //修改全局表 update 受影响的行数 - RouterUtil.routeToMultiNode(false, rrs, tc.getDataNodes(), rrs.getStatement(),tc.isGlobalTable()); - rrs.setFinishedRoute(true); - return; - } - - if(updateSetItem != null && updateSetItem.size() > 0) { - boolean hasParent = (schema.getTables().get(tableName).getParentTC() != null); - for(SQLUpdateSetItem item : updateSetItem) { - String column = StringUtil.removeBackquote(item.getColumn().toString().toUpperCase()); - if(partitionColumn != null && partitionColumn.equals(column)) { - String msg = "partion key can't be updated " + tableName + "->" + partitionColumn; - LOGGER.warn(msg); - throw new SQLNonTransientException(msg); - } - if(hasParent) { - if(column.equals(joinKey)) { - String msg = "parent relation column can't be updated " + tableName + "->" + joinKey; - LOGGER.warn(msg); - throw new SQLNonTransientException(msg); - } - rrs.setCacheAble(true); - } - } - } - + + confirmShardColumnNotUpdated(update, schema, tableName, partitionColumn, joinKey, rrs); + // if(ctx.getTablesAndConditions().size() > 0) { // Map> map = ctx.getTablesAndConditions().get(tableName); // if(map != null) { @@ -77,9 +64,171 @@ public void statementParse(SchemaConfig schema, RouteResultset rrs, SQLStatement // // } // System.out.println(); - - if(schema.getTables().get(tableName).isGlobalTable() && ctx.getRouteCalculateUnit().getTablesAndConditions().size() > 1) { - throw new SQLNonTransientException("global table not supported multi table related update "+ tableName); - } - } + + if (schema.getTables().get(tableName).isGlobalTable() && ctx.getRouteCalculateUnit().getTablesAndConditions().size() > 1) { + throw new SQLNonTransientException("global table is not supported in multi table related update " + tableName); + } + } + + /** + * 获取更新的表数 + * @author lian + * @date 2016年11月2日 + * @return + */ + private int getUpdateTableCount(){ + Map tableMap = this.ctx.getVisitor().getTables(); + int updateTableCount = 0; + for(Name _name : tableMap.keySet()){ + + TableStat ts = tableMap.get(_name); + updateTableCount += ts.getUpdateCount(); + } + return updateTableCount; + } + + /* + * 判断字段是否在SQL AST的节点中,比如 col 在 col = 'A' 中,这里要注意,一些子句中可能会在字段前加上表的别名, + * 比如 t.col = 'A',这两种情况, 操作符(=)左边被druid解析器解析成不同的对象SQLIdentifierExpr(无表别名)和 + * SQLPropertyExpr(有表别名) + */ + private static boolean columnInExpr(SQLExpr sqlExpr, String colName) throws SQLNonTransientException { + String column; + if (sqlExpr instanceof SQLIdentifierExpr) { + column = StringUtil.removeBackquote(((SQLIdentifierExpr) sqlExpr).getName()).toUpperCase(); + } else if (sqlExpr instanceof SQLPropertyExpr) { + column = StringUtil.removeBackquote(((SQLPropertyExpr) sqlExpr).getName()).toUpperCase(); + } else { + throw new SQLNonTransientException("Unhandled SQL AST node type encountered: " + sqlExpr.getClass()); + } + + return column.equals(colName.toUpperCase()); + } + + /* + * 当前节点是不是一个子查询 + * IN (select...), ANY, EXISTS, ALL等关键字, IN (1,2,3...) 这种对应的是SQLInListExpr + */ + private static boolean isSubQueryClause(SQLExpr sqlExpr) throws SQLNonTransientException { + return (sqlExpr instanceof SQLInSubQueryExpr || sqlExpr instanceof SQLAnyExpr || sqlExpr instanceof SQLAllExpr + || sqlExpr instanceof SQLQueryExpr || sqlExpr instanceof SQLExistsExpr); + } + + /* + * 遍历where子句的AST,寻找是否有与update子句中更新分片字段相同的条件, + * o 如果发现有or或者xor,然后分片字段的条件在or或者xor中的,这种情况update也无法执行,比如 + * update mytab set ptn_col = val, col1 = val1 where col1 = val11 or ptn_col = val; + * 但是下面的这种update是可以执行的 + * update mytab set ptn_col = val, col1 = val1 where ptn_col = val and (col1 = val11 or col2 = val2); + * o 如果没有发现与update子句中更新分片字段相同的条件,则update也无法执行,比如 + * update mytab set ptn_col = val, col1 = val1 where col1 = val11 and col2 = val22; + * o 如果条件之间都是and,且有与更新分片字段相同的条件,这种情况是允许执行的。比如 + * update mytab set ptn_col = val, col1 = val1 where ptn_col = val and col1 = val11 and col2 = val2; + * o 对于一些特殊的运算符,比如between,not,或者子查询,遇到这些子句现在不会去检查分片字段是否在此类子句中, + * 即使分片字段在此类子句中,现在也认为对应的update语句无法执行。 + * + * @param whereClauseExpr where子句的语法树AST + * @param column 分片字段的名字 + * @param value 分片字段要被更新成的值 + * @hasOR 遍历到whereClauseExpr这个节点的时候,其上层路径中是否有OR/XOR关系运算 + * + * @return true,表示update不能执行,false表示可以执行 + */ + private boolean shardColCanBeUpdated(SQLExpr whereClauseExpr, String column, SQLExpr value, boolean hasOR) + throws SQLNonTransientException { + boolean canUpdate = false; + boolean parentHasOR = false; + + if (whereClauseExpr == null) + return false; + + if (whereClauseExpr instanceof SQLBinaryOpExpr) { + SQLBinaryOpExpr nodeOpExpr = (SQLBinaryOpExpr) whereClauseExpr; + /* + * 条件中有or或者xor的,如果分片字段出现在or/xor的一个子句中,则此update + * 语句无法执行 + */ + if ((nodeOpExpr.getOperator() == SQLBinaryOperator.BooleanOr) || + (nodeOpExpr.getOperator() == SQLBinaryOperator.BooleanXor)) { + parentHasOR = true; + } + // 发现类似 col = value 的子句 + if (nodeOpExpr.getOperator() == SQLBinaryOperator.Equality) { + boolean foundCol; + SQLExpr leftExpr = nodeOpExpr.getLeft(); + SQLExpr rightExpr = nodeOpExpr.getRight(); + + foundCol = columnInExpr(leftExpr, column); + + // 发现col = value子句,col刚好是分片字段,比较value与update要更新的值是否一样,并且是否在or/xor子句中 + if (foundCol) { + if (rightExpr.getClass() != value.getClass()) { + throw new SQLNonTransientException("SQL AST nodes type mismatch!"); + } + + canUpdate = rightExpr.toString().equals(value.toString()) && (!hasOR) && (!parentHasOR); + } + } else if (nodeOpExpr.getOperator().isLogical()) { + if (nodeOpExpr.getLeft() != null) { + if (nodeOpExpr.getLeft() instanceof SQLBinaryOpExpr) { + canUpdate = shardColCanBeUpdated(nodeOpExpr.getLeft(), column, value, parentHasOR); + } + // else + // 此子语句不是 =,>,<等关系运算符(对应的类是SQLBinaryOpExpr)。比如between X and Y + // 或者 NOT,或者单独的子查询,这些情况,我们不做处理 + } + if ((!canUpdate) && nodeOpExpr.getRight() != null) { + if (nodeOpExpr.getRight() instanceof SQLBinaryOpExpr) { + canUpdate = shardColCanBeUpdated(nodeOpExpr.getRight(), column, value, parentHasOR); + } + // else + // 此子语句不是 =,>,<等关系运算符(对应的类是SQLBinaryOpExpr)。比如between X and Y + // 或者 NOT,或者单独的子查询,这些情况,我们不做处理 + } + } else if (isSubQueryClause(nodeOpExpr)){ + // 对于子查询的检查有点复杂,这里暂时不支持 + return false; + } + // else + // 其他类型的子句,忽略, 如果分片字段在这类子句中,此类情况目前不做处理,将返回false + } + // else + //此处说明update的where只有一个条件,并且不是 =,>,<等关系运算符(对应的类是SQLBinaryOpExpr)。比如between X and Y + // 或者 NOT,或者单独的子查询,这些情况,我们都不做处理 + + return canUpdate; + } + + private void confirmShardColumnNotUpdated(SQLUpdateStatement update,SchemaConfig schema,String tableName,String partitionColumn,String joinKey,RouteResultset rrs) throws SQLNonTransientException { + List updateSetItem = update.getItems(); + if (updateSetItem != null && updateSetItem.size() > 0) { + boolean hasParent = (schema.getTables().get(tableName).getParentTC() != null); + for (SQLUpdateSetItem item : updateSetItem) { + String column = StringUtil.removeBackquote(item.getColumn().toString().toUpperCase()); + //考虑别名,前面已经限制了update分片表的个数只能有一个,所以这里别名只能是分片表的 + if (column.contains(StringUtil.TABLE_COLUMN_SEPARATOR)) { + column = column.substring(column.indexOf(".") + 1).trim().toUpperCase(); + } + if (partitionColumn != null && partitionColumn.equals(column)) { + boolean canUpdate; + canUpdate = ((update.getWhere() != null) && shardColCanBeUpdated(update.getWhere(), + partitionColumn, item.getValue(), false)); + + if (!canUpdate) { + String msg = "Sharding column can't be updated " + tableName + "->" + partitionColumn; + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + } + if (hasParent) { + if (column.equals(joinKey)) { + String msg = "Parent relevant column can't be updated " + tableName + "->" + joinKey; + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + rrs.setCacheAble(true); + } + } + } + } } diff --git a/src/main/java/io/mycat/route/parser/primitive/FunctionParser.java b/src/main/java/io/mycat/route/parser/primitive/FunctionParser.java new file mode 100644 index 000000000..f3d5ae462 --- /dev/null +++ b/src/main/java/io/mycat/route/parser/primitive/FunctionParser.java @@ -0,0 +1,121 @@ +package io.mycat.route.parser.primitive; + +import io.mycat.route.parser.primitive.Model.Commons; +import io.mycat.route.parser.primitive.Model.Field; +import io.mycat.route.parser.primitive.Model.Function; +import io.mycat.route.parser.primitive.Model.Identifier; +import io.mycat.util.StringUtil; + +import java.sql.SQLNonTransientException; +import java.util.LinkedList; +import java.util.List; +import java.util.Stack; + +/** + * @author Hash Zhang + * @version 1.0.0 + * @date 2016/7/26 + */ +public class FunctionParser { + public static Function parseFunction(String function) throws SQLNonTransientException { + StringBuilder buffer = new StringBuilder(); + Stack functions = new Stack<>(); + + int flag = 0; + for (int i = 0; i < function.length(); i++) { + char current = function.charAt(i); + switch (current) { + case Commons.LEFT_BRACKET: + if (flag == 0) { + String currentIdentifier = buffer.toString().trim(); + buffer = new StringBuilder(); + if (!StringUtil.isEmpty(currentIdentifier)) { + Function function1 = new Function(currentIdentifier); + if (!functions.empty() && functions.peek() != null) { + functions.peek().getArguments().add(function1); + } + functions.push(function1); + } + break; + } + buffer.append(current); + break; + + case Commons.ARGUMENT_SEPARATOR: + if (flag == 0 || flag == 3) { + String currentIdentifier = buffer.toString().trim(); + buffer = new StringBuilder(); + if (!StringUtil.isEmpty(currentIdentifier)) { + if (flag == 3) { + flag = 0; + Identifier identifier = new Identifier(currentIdentifier); + functions.peek().getArguments().add(identifier); + } else { + Field field = new Field(currentIdentifier); + functions.peek().getArguments().add(field); + } + } + break; + } + buffer.append(current); + break; + case Commons.RIGHT_BRACKET: + if (flag != 1 && flag != 2) { + String currentIdentifier = buffer.toString().trim(); + buffer = new StringBuilder(); + if (!StringUtil.isEmpty(currentIdentifier)) { + if (flag == 3) { + flag = 0; + Identifier identifier = new Identifier(currentIdentifier); + functions.peek().getArguments().add(identifier); + } else { + Field field = new Field(currentIdentifier); + functions.peek().getArguments().add(field); + } + } + if (flag == 0) { + if (functions.size() == 1) { + return functions.pop(); + } else { + functions.pop(); + } + } + break; + } + buffer.append(current); + break; + case Commons.QUOTE: + if (flag == 0) { + flag = 1; + } else if (flag == 1) { + flag = 3; + } + case Commons.DOUBLE_QUOTE: + if (flag == 0) { + flag = 2; + } else if (flag == 2) { + flag = 3; + } + default: + buffer.append(current); + } + } + throw new SQLNonTransientException("Function is not in right format!"); + } + + public static List getFields(Function function){ + List fields = new LinkedList<>(); + for(Identifier identifier : function.getArguments()){ + if(identifier instanceof Field){ + fields.add(identifier.getName()); + } else if (identifier instanceof Function){ + fields.addAll(getFields((Function) identifier)); + } + } + return fields; + } + public static void main(String[] args) throws SQLNonTransientException { + Function function = FunctionParser.parseFunction("function1(arg1,a.t,\"ast()\",function2(c.t,function3(x)))"); + System.out.println(getFields(function)); + } +} diff --git a/src/main/java/io/mycat/route/parser/primitive/Model/Commons.java b/src/main/java/io/mycat/route/parser/primitive/Model/Commons.java new file mode 100644 index 000000000..64c588830 --- /dev/null +++ b/src/main/java/io/mycat/route/parser/primitive/Model/Commons.java @@ -0,0 +1,16 @@ +package io.mycat.route.parser.primitive.Model; + +/** + * @author Hash Zhang + * @version 1.0.0 + * @date 2016/7/26 + */ +public class Commons { + public final static char ARGUMENT_SEPARATOR = ','; + public final static char DEPENDENCY_SEPARATOR = '.'; + public final static char LEFT_BRACKET = '('; + public final static char RIGHT_BRACKET = ')'; + public final static char SLASH = '\\'; + public final static char QUOTE = '\''; + public final static char DOUBLE_QUOTE = '\"'; +} diff --git a/src/main/java/io/mycat/route/parser/primitive/Model/Field.java b/src/main/java/io/mycat/route/parser/primitive/Model/Field.java new file mode 100644 index 000000000..6a9b62fb2 --- /dev/null +++ b/src/main/java/io/mycat/route/parser/primitive/Model/Field.java @@ -0,0 +1,12 @@ +package io.mycat.route.parser.primitive.Model; + +/** + * @author Hash Zhang + * @version 1.0.0 + * @date 2016/7/26 + */ +public class Field extends Identifier { + public Field(String name) { + super(name); + } +} diff --git a/src/main/java/io/mycat/route/parser/primitive/Model/Function.java b/src/main/java/io/mycat/route/parser/primitive/Model/Function.java new file mode 100644 index 000000000..bf084690c --- /dev/null +++ b/src/main/java/io/mycat/route/parser/primitive/Model/Function.java @@ -0,0 +1,22 @@ +package io.mycat.route.parser.primitive.Model; + +import java.util.LinkedList; +import java.util.List; + +/** + * @author Hash Zhang + * @version 1.0.0 + * @date 2016/7/26 + */ +public class Function extends Identifier { + private final List arguments; + + public Function(String name) { + super(name); + this.arguments = new LinkedList<>(); + } + + public List getArguments() { + return arguments; + } +} diff --git a/src/main/java/io/mycat/route/parser/primitive/Model/Identifier.java b/src/main/java/io/mycat/route/parser/primitive/Model/Identifier.java new file mode 100644 index 000000000..62657212c --- /dev/null +++ b/src/main/java/io/mycat/route/parser/primitive/Model/Identifier.java @@ -0,0 +1,18 @@ +package io.mycat.route.parser.primitive.Model; + +/** + * @author Hash Zhang + * @version 1.0.0 + * @date 2016/7/26 + */ +public class Identifier { + private final String name; + + public Identifier(String name) { + this.name = name; + } + + public String getName() { + return name; + } +} diff --git a/src/main/java/io/mycat/route/parser/util/ArrayUtil.java b/src/main/java/io/mycat/route/parser/util/ArrayUtil.java new file mode 100644 index 000000000..9200ea9bf --- /dev/null +++ b/src/main/java/io/mycat/route/parser/util/ArrayUtil.java @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.route.parser.util; + +/** + * @author mycat + */ +public class ArrayUtil { + public static boolean equals(String str1, String str2) { + if (str1 == null) { + return str2 == null; + } + return str1.equals(str2); + } + + public static boolean contains(String[] list, String str) { + if (list == null) { + return false; + } + for (String string : list) { + if (equals(str, string)) { + return true; + } + } + return false; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/util/CharTypes.java b/src/main/java/io/mycat/route/parser/util/CharTypes.java similarity index 98% rename from src/main/java/io/mycat/util/CharTypes.java rename to src/main/java/io/mycat/route/parser/util/CharTypes.java index dfb635d7b..df64fcbc6 100644 --- a/src/main/java/io/mycat/util/CharTypes.java +++ b/src/main/java/io/mycat/route/parser/util/CharTypes.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.util; +package io.mycat.route.parser.util; /** * @author mycat * @author mycat diff --git a/src/main/java/io/mycat/route/util/PageSQLUtil.java b/src/main/java/io/mycat/route/parser/util/PageSQLUtil.java similarity index 90% rename from src/main/java/io/mycat/route/util/PageSQLUtil.java rename to src/main/java/io/mycat/route/parser/util/PageSQLUtil.java index 84f0fc95d..545fd49c8 100644 --- a/src/main/java/io/mycat/route/util/PageSQLUtil.java +++ b/src/main/java/io/mycat/route/parser/util/PageSQLUtil.java @@ -1,26 +1,12 @@ -package io.mycat.route.util; - -import java.util.List; +package io.mycat.route.parser.util; import com.alibaba.druid.sql.PagerUtils; import com.alibaba.druid.sql.SQLUtils; import com.alibaba.druid.sql.ast.SQLExpr; import com.alibaba.druid.sql.ast.SQLOrderBy; import com.alibaba.druid.sql.ast.SQLOver; -import com.alibaba.druid.sql.ast.expr.SQLAggregateExpr; -import com.alibaba.druid.sql.ast.expr.SQLAllColumnExpr; -import com.alibaba.druid.sql.ast.expr.SQLBinaryOpExpr; -import com.alibaba.druid.sql.ast.expr.SQLBinaryOperator; -import com.alibaba.druid.sql.ast.expr.SQLIdentifierExpr; -import com.alibaba.druid.sql.ast.expr.SQLNumberExpr; -import com.alibaba.druid.sql.ast.expr.SQLPropertyExpr; -import com.alibaba.druid.sql.ast.statement.SQLSelect; -import com.alibaba.druid.sql.ast.statement.SQLSelectItem; -import com.alibaba.druid.sql.ast.statement.SQLSelectQuery; -import com.alibaba.druid.sql.ast.statement.SQLSelectQueryBlock; -import com.alibaba.druid.sql.ast.statement.SQLSelectStatement; -import com.alibaba.druid.sql.ast.statement.SQLSubqueryTableSource; -import com.alibaba.druid.sql.ast.statement.SQLTableSource; +import com.alibaba.druid.sql.ast.expr.*; +import com.alibaba.druid.sql.ast.statement.*; import com.alibaba.druid.sql.dialect.db2.ast.stmt.DB2SelectQueryBlock; import com.alibaba.druid.sql.dialect.db2.parser.DB2StatementParser; import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlSelectQueryBlock; @@ -32,6 +18,8 @@ import com.alibaba.druid.sql.dialect.sqlserver.parser.SQLServerStatementParser; import com.alibaba.druid.util.JdbcConstants; +import java.util.List; + /** * Created by magicdoom on 2015/3/15. */ @@ -43,7 +31,6 @@ public static String convertLimitToNativePageSql(String dbType, String sql, int { OracleStatementParser oracleParser = new OracleStatementParser(sql); SQLSelectStatement oracleStmt = (SQLSelectStatement) oracleParser.parseStatement(); - return PagerUtils.limit(oracleStmt.getSelect(), JdbcConstants.ORACLE, offset, count); } else if (JdbcConstants.SQL_SERVER.equalsIgnoreCase(dbType)) { @@ -85,8 +72,9 @@ else if (JdbcConstants.DB2.equalsIgnoreCase(dbType)) if(query instanceof PGSelectQueryBlock) { PGSelectQueryBlock pgSelectQueryBlock= (PGSelectQueryBlock) query; - pgSelectQueryBlock.setLimit(null); pgSelectQueryBlock.setOffset(null); + pgSelectQueryBlock.setLimit(null); + } return PagerUtils.limit(select, JdbcConstants.POSTGRESQL, offset, count); diff --git a/src/main/java/io/mycat/util/Pair.java b/src/main/java/io/mycat/route/parser/util/Pair.java similarity index 88% rename from src/main/java/io/mycat/util/Pair.java rename to src/main/java/io/mycat/route/parser/util/Pair.java index d811d43a5..630aa4c37 100644 --- a/src/main/java/io/mycat/util/Pair.java +++ b/src/main/java/io/mycat/route/parser/util/Pair.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.util; +package io.mycat.route.parser.util; /** * (created at 2010-7-21) @@ -74,15 +74,23 @@ public int hashCode() { @SuppressWarnings("rawtypes") @Override public boolean equals(Object obj) { - if (this == obj) return true; - if (!(obj instanceof Pair)) return false; + if (this == obj) { + return true; + } + if (!(obj instanceof Pair)) { + return false; + } Pair that = (Pair) obj; return isEquals(this.key, that.key) && isEquals(this.value, that.value); } private boolean isEquals(Object o1, Object o2) { - if (o1 == o2) return true; - if (o1 == null) return o2 == null; + if (o1 == o2) { + return true; + } + if (o1 == null) { + return o2 == null; + } return o1.equals(o2); } diff --git a/src/main/java/io/mycat/util/PairUtil.java b/src/main/java/io/mycat/route/parser/util/PairUtil.java similarity index 98% rename from src/main/java/io/mycat/util/PairUtil.java rename to src/main/java/io/mycat/route/parser/util/PairUtil.java index 77de5ab7a..35add27d7 100644 --- a/src/main/java/io/mycat/util/PairUtil.java +++ b/src/main/java/io/mycat/route/parser/util/PairUtil.java @@ -21,8 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.util; - +package io.mycat.route.parser.util; /** * @author mycat diff --git a/src/main/java/io/mycat/route/parser/util/ParseString.java b/src/main/java/io/mycat/route/parser/util/ParseString.java new file mode 100644 index 000000000..68bc45454 --- /dev/null +++ b/src/main/java/io/mycat/route/parser/util/ParseString.java @@ -0,0 +1,172 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.route.parser.util; + +/** + * @author mycat + */ +public final class ParseString { + + private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; + + public static byte[] hexString2Bytes(char[] hexString, int offset, int length) { + if (hexString == null) { + return null; + } + if (length == 0) { + return EMPTY_BYTE_ARRAY; + } + boolean odd = length << 31 == Integer.MIN_VALUE; + byte[] bs = new byte[odd ? (length + 1) >> 1 : length >> 1]; + for (int i = offset, limit = offset + length; i < limit; ++i) { + char high, low; + if (i == offset && odd) { + high = '0'; + low = hexString[i]; + } else { + high = hexString[i]; + low = hexString[++i]; + } + int b; + switch (high) { + case '0': + b = 0; + break; + case '1': + b = 0x10; + break; + case '2': + b = 0x20; + break; + case '3': + b = 0x30; + break; + case '4': + b = 0x40; + break; + case '5': + b = 0x50; + break; + case '6': + b = 0x60; + break; + case '7': + b = 0x70; + break; + case '8': + b = 0x80; + break; + case '9': + b = 0x90; + break; + case 'a': + case 'A': + b = 0xa0; + break; + case 'b': + case 'B': + b = 0xb0; + break; + case 'c': + case 'C': + b = 0xc0; + break; + case 'd': + case 'D': + b = 0xd0; + break; + case 'e': + case 'E': + b = 0xe0; + break; + case 'f': + case 'F': + b = 0xf0; + break; + default: + throw new IllegalArgumentException("illegal hex-string: " + new String(hexString, offset, length)); + } + switch (low) { + case '0': + break; + case '1': + b += 1; + break; + case '2': + b += 2; + break; + case '3': + b += 3; + break; + case '4': + b += 4; + break; + case '5': + b += 5; + break; + case '6': + b += 6; + break; + case '7': + b += 7; + break; + case '8': + b += 8; + break; + case '9': + b += 9; + break; + case 'a': + case 'A': + b += 10; + break; + case 'b': + case 'B': + b += 11; + break; + case 'c': + case 'C': + b += 12; + break; + case 'd': + case 'D': + b += 13; + break; + case 'e': + case 'E': + b += 14; + break; + case 'f': + case 'F': + b += 15; + break; + default: + throw new IllegalArgumentException("illegal hex-string: " + new String(hexString, offset, length)); + } + bs[(i - offset) >> 1] = (byte) b; + } + return bs; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/util/ParseUtil.java b/src/main/java/io/mycat/route/parser/util/ParseUtil.java similarity index 85% rename from src/main/java/io/mycat/util/ParseUtil.java rename to src/main/java/io/mycat/route/parser/util/ParseUtil.java index c15ea6215..682ff0c38 100644 --- a/src/main/java/io/mycat/util/ParseUtil.java +++ b/src/main/java/io/mycat/route/parser/util/ParseUtil.java @@ -1,357 +1,377 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.util; - -/** - * @author mycat - */ -public final class ParseUtil { - - public static boolean isEOF(char c) { - return (c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == ';'); - } - - public static long getSQLId(String stmt) { - int offset = stmt.indexOf('='); - if (offset != -1 && stmt.length() > ++offset) { - String id = stmt.substring(offset).trim(); - try { - return Long.parseLong(id); - } catch (NumberFormatException e) { - } - } - return 0L; - } - - /** - * 'abc' - * - * @param offset stmt.charAt(offset) == first ' - */ - private static String parseString(String stmt, int offset) { - StringBuilder sb = new StringBuilder(); - loop: for (++offset; offset < stmt.length(); ++offset) { - char c = stmt.charAt(offset); - if (c == '\\') { - switch (c = stmt.charAt(++offset)) { - case '0': - sb.append('\0'); - break; - case 'b': - sb.append('\b'); - break; - case 'n': - sb.append('\n'); - break; - case 'r': - sb.append('\r'); - break; - case 't': - sb.append('\t'); - break; - case 'Z': - sb.append((char) 26); - break; - default: - sb.append(c); - } - } else if (c == '\'') { - if (offset + 1 < stmt.length() && stmt.charAt(offset + 1) == '\'') { - ++offset; - sb.append('\''); - } else { - break loop; - } - } else { - sb.append(c); - } - } - return sb.toString(); - } - - /** - * "abc" - * - * @param offset stmt.charAt(offset) == first " - */ - private static String parseString2(String stmt, int offset) { - StringBuilder sb = new StringBuilder(); - loop: for (++offset; offset < stmt.length(); ++offset) { - char c = stmt.charAt(offset); - if (c == '\\') { - switch (c = stmt.charAt(++offset)) { - case '0': - sb.append('\0'); - break; - case 'b': - sb.append('\b'); - break; - case 'n': - sb.append('\n'); - break; - case 'r': - sb.append('\r'); - break; - case 't': - sb.append('\t'); - break; - case 'Z': - sb.append((char) 26); - break; - default: - sb.append(c); - } - } else if (c == '"') { - if (offset + 1 < stmt.length() && stmt.charAt(offset + 1) == '"') { - ++offset; - sb.append('"'); - } else { - break loop; - } - } else { - sb.append(c); - } - } - return sb.toString(); - } - - /** - * AS `abc` - * - * @param offset stmt.charAt(offset) == first ` - */ - private static String parseIdentifierEscape(String stmt, int offset) { - StringBuilder sb = new StringBuilder(); - loop: for (++offset; offset < stmt.length(); ++offset) { - char c = stmt.charAt(offset); - if (c == '`') { - if (offset + 1 < stmt.length() && stmt.charAt(offset + 1) == '`') { - ++offset; - sb.append('`'); - } else { - break loop; - } - } else { - sb.append(c); - } - } - return sb.toString(); - } - - /** - * @param aliasIndex for AS id, index of 'i' - */ - public static String parseAlias(String stmt, final int aliasIndex) { - if (aliasIndex < 0 || aliasIndex >= stmt.length()) { - return null; - } - switch (stmt.charAt(aliasIndex)) { - case '\'': - return parseString(stmt, aliasIndex); - case '"': - return parseString2(stmt, aliasIndex); - case '`': - return parseIdentifierEscape(stmt, aliasIndex); - default: - int offset = aliasIndex; - for (; offset < stmt.length() && CharTypes.isIdentifierChar(stmt.charAt(offset)); ++offset); - return stmt.substring(aliasIndex, offset); - } - } - - /** - * 注解保留,注释 - * @param stmt - * @param offset - * @return - */ - public static int comment(String stmt, int offset) { - int len = stmt.length(); - int n = offset; - switch (stmt.charAt(n)) { - case '/': - if (len > ++n && stmt.charAt(n++) == '*' && len > n + 1) { - //对两种注解放过:/*!mycat: 和 /*#mycat: - if(stmt.charAt(n) == '!') { - break; - } else if (stmt.charAt(n) == '#') { - if(len > n + 5 && stmt.charAt(n + 1) == 'm' - && stmt.charAt(n + 2) == 'y' - && stmt.charAt(n + 3) == 'c' - && stmt.charAt(n + 4) == 'a' - && stmt.charAt(n + 5) == 't') { - break; - - } - } - for (int i = n; i < len; ++i) { - if (stmt.charAt(i) == '*') { - int m = i + 1; - if (len > m && stmt.charAt(m) == '/') return m; - } - } - } - break; - case '#': - for (int i = n + 1; i < len; ++i) { - if (stmt.charAt(i) == '\n') return i; - } - break; - } - return offset; - } - - public static boolean currentCharIsSep(String stmt, int offset) { - if (stmt.length() > offset) { - switch (stmt.charAt(offset)) { - case ' ': - case '\t': - case '\r': - case '\n': - return true; - default: - return false; - } - } - return true; - } - - /***** - * 检查下一个字符是否为分隔符,并把偏移量加1 - */ - public static boolean nextCharIsSep(String stmt, int offset) { - return currentCharIsSep(stmt, ++offset); - } - - /***** - * 检查下一个字符串是否为期望的字符串,并把偏移量移到从offset开始计算,expectValue之后的位置 - * - * @param stmt 被解析的sql - * @param offset 被解析的sql的当前位置 - * @param nextExpectedString 在stmt中准备查找的字符串 - * @param checkSepChar 当找到expectValue值时,是否检查其后面字符为分隔符号 - * @return 如果包含指定的字符串,则移动相应的偏移量,否则返回值=offset - */ - public static int nextStringIsExpectedWithIgnoreSepChar(String stmt, - int offset, - String nextExpectedString, - boolean checkSepChar) { - if (nextExpectedString == null || nextExpectedString.length() < 1) return offset; - int i = offset; - int index = 0; - char expectedChar; - char actualChar; - boolean isSep; - for (; i < stmt.length() && index < nextExpectedString.length(); ++i) { - if (index == 0) { - isSep = currentCharIsSep(stmt, i); - if (isSep) { - continue; - } - } - actualChar = stmt.charAt(i); - expectedChar = nextExpectedString.charAt(index++); - if (actualChar != expectedChar) { - return offset; - } - } - if (index == nextExpectedString.length()) { - boolean ok = true; - if (checkSepChar) { - ok = nextCharIsSep(stmt, i); - } - if (ok) return i; - } - return offset; - } - - private static final String JSON = "json"; - private static final String EQ = "="; - - //private static final String WHERE = "where"; - //private static final String SET = "set"; - - /********** - * 检查下一个字符串是否json= * - * - * @param stmt 被解析的sql - * @param offset 被解析的sql的当前位置 - * @return 如果包含指定的字符串,则移动相应的偏移量,否则返回值=offset - */ - public static int nextStringIsJsonEq(String stmt, int offset) { - int i = offset; - - // / drds 之后的符号 - if (!currentCharIsSep(stmt, ++i)) { - return offset; - } - - // json 串 - int k = nextStringIsExpectedWithIgnoreSepChar(stmt, i, JSON, false); - if (k <= i) { - return offset; - } - i = k; - - // 等于符号 - k = nextStringIsExpectedWithIgnoreSepChar(stmt, i, EQ, false); - if (k <= i) { - return offset; - } - return i; - } - - public static int move(String stmt, int offset, int length) { - int i = offset; - for (; i < stmt.length(); ++i) { - switch (stmt.charAt(i)) { - case ' ': - case '\t': - case '\r': - case '\n': - continue; - case '/': - case '#': - i = comment(stmt, i); - continue; - default: - return i + length; - } - } - return i; - } - - public static boolean compare(String s, int offset, char[] keyword) { - if (s.length() >= offset + keyword.length) { - for (int i = 0; i < keyword.length; ++i, ++offset) { - if (Character.toUpperCase(s.charAt(offset)) != keyword[i]) { - return false; - } - } - return true; - } - return false; - } - -} \ No newline at end of file +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.route.parser.util; + +import com.alibaba.druid.sql.ast.expr.SQLIdentifierExpr; +import com.alibaba.druid.sql.ast.expr.SQLIntegerExpr; +import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlInsertStatement; +import com.alibaba.druid.sql.parser.SQLStatementParser; +import io.mycat.route.parser.druid.MycatStatementParser; + +/** + * @author mycat + */ +public final class ParseUtil { + + public static boolean isEOF(char c) { + return (c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == ';'); + } + + public static String parseString(String stmt) { + int offset = stmt.indexOf('='); + if (offset != -1 && stmt.length() > ++offset) { + String txt = stmt.substring(offset).trim(); + return txt; + } + return null; + } + + public static long getSQLId(String stmt) { + int offset = stmt.indexOf('='); + if (offset != -1 && stmt.length() > ++offset) { + String id = stmt.substring(offset).trim(); + try { + return Long.parseLong(id); + } catch (NumberFormatException e) { + } + } + return 0L; + } + + public static String changeInsertAddSlot(String sql,int slotValue) + { + SQLStatementParser parser = new MycatStatementParser(sql); + MySqlInsertStatement insert = (MySqlInsertStatement) parser.parseStatement(); + insert.getColumns().add(new SQLIdentifierExpr("_slot") ); + insert.getValues().getValues().add(new SQLIntegerExpr(slotValue)) ; + return insert.toString(); + } + /** + * 'abc' + * + * @param offset stmt.charAt(offset) == first ' + */ + private static String parseString(String stmt, int offset) { + StringBuilder sb = new StringBuilder(); + loop: for (++offset; offset < stmt.length(); ++offset) { + char c = stmt.charAt(offset); + if (c == '\\') { + switch (c = stmt.charAt(++offset)) { + case '0': + sb.append('\0'); + break; + case 'b': + sb.append('\b'); + break; + case 'n': + sb.append('\n'); + break; + case 'r': + sb.append('\r'); + break; + case 't': + sb.append('\t'); + break; + case 'Z': + sb.append((char) 26); + break; + default: + sb.append(c); + } + } else if (c == '\'') { + if (offset + 1 < stmt.length() && stmt.charAt(offset + 1) == '\'') { + ++offset; + sb.append('\''); + } else { + break loop; + } + } else { + sb.append(c); + } + } + return sb.toString(); + } + + /** + * "abc" + * + * @param offset stmt.charAt(offset) == first " + */ + private static String parseString2(String stmt, int offset) { + StringBuilder sb = new StringBuilder(); + loop: for (++offset; offset < stmt.length(); ++offset) { + char c = stmt.charAt(offset); + if (c == '\\') { + switch (c = stmt.charAt(++offset)) { + case '0': + sb.append('\0'); + break; + case 'b': + sb.append('\b'); + break; + case 'n': + sb.append('\n'); + break; + case 'r': + sb.append('\r'); + break; + case 't': + sb.append('\t'); + break; + case 'Z': + sb.append((char) 26); + break; + default: + sb.append(c); + } + } else if (c == '"') { + if (offset + 1 < stmt.length() && stmt.charAt(offset + 1) == '"') { + ++offset; + sb.append('"'); + } else { + break loop; + } + } else { + sb.append(c); + } + } + return sb.toString(); + } + + /** + * AS `abc` + * + * @param offset stmt.charAt(offset) == first ` + */ + private static String parseIdentifierEscape(String stmt, int offset) { + StringBuilder sb = new StringBuilder(); + loop: for (++offset; offset < stmt.length(); ++offset) { + char c = stmt.charAt(offset); + if (c == '`') { + if (offset + 1 < stmt.length() && stmt.charAt(offset + 1) == '`') { + ++offset; + sb.append('`'); + } else { + break loop; + } + } else { + sb.append(c); + } + } + return sb.toString(); + } + + /** + * @param aliasIndex for AS id, index of 'i' + */ + public static String parseAlias(String stmt, final int aliasIndex) { + if (aliasIndex < 0 || aliasIndex >= stmt.length()) { + return null; + } + switch (stmt.charAt(aliasIndex)) { + case '\'': + return parseString(stmt, aliasIndex); + case '"': + return parseString2(stmt, aliasIndex); + case '`': + return parseIdentifierEscape(stmt, aliasIndex); + default: + int offset = aliasIndex; + for (; offset < stmt.length() && CharTypes.isIdentifierChar(stmt.charAt(offset)); ++offset) { + ; + } + return stmt.substring(aliasIndex, offset); + } + } + + /** + * 解析注释,返回stmt中注释结尾的index + * @param stmt + * @param offset + * @return + */ + public static int comment(String stmt, int offset) { + int len = stmt.length(); + int n = offset; + switch (stmt.charAt(n)) { + case '/': + if (len > ++n && stmt.charAt(n++) == '*' && len > n + 1) { + for (int i = n; i < len; ++i) { + if (stmt.charAt(i) == '*') { + int m = i + 1; + if (len > m && stmt.charAt(m) == '/') { + return m; + } + } + } + } + break; + case '#': + for (int i = n + 1; i < len; ++i) { + if (stmt.charAt(i) == '\n') { + return i; + } + } + break; + } + return offset; + } + + public static boolean currentCharIsSep(String stmt, int offset) { + if (stmt.length() > offset) { + switch (stmt.charAt(offset)) { + case ' ': + case '\t': + case '\r': + case '\n': + return true; + default: + return false; + } + } + return true; + } + + /***** + * 检查下一个字符是否为分隔符,并把偏移量加1 + */ + public static boolean nextCharIsSep(String stmt, int offset) { + return currentCharIsSep(stmt, ++offset); + } + + /***** + * 检查下一个字符串是否为期望的字符串,并把偏移量移到从offset开始计算,expectValue之后的位置 + * + * @param stmt 被解析的sql + * @param offset 被解析的sql的当前位置 + * @param nextExpectedString 在stmt中准备查找的字符串 + * @param checkSepChar 当找到expectValue值时,是否检查其后面字符为分隔符号 + * @return 如果包含指定的字符串,则移动相应的偏移量,否则返回值=offset + */ + public static int nextStringIsExpectedWithIgnoreSepChar(String stmt, + int offset, + String nextExpectedString, + boolean checkSepChar) { + if (nextExpectedString == null || nextExpectedString.length() < 1) { + return offset; + } + int i = offset; + int index = 0; + char expectedChar; + char actualChar; + boolean isSep; + for (; i < stmt.length() && index < nextExpectedString.length(); ++i) { + if (index == 0) { + isSep = currentCharIsSep(stmt, i); + if (isSep) { + continue; + } + } + actualChar = stmt.charAt(i); + expectedChar = nextExpectedString.charAt(index++); + if (actualChar != expectedChar) { + return offset; + } + } + if (index == nextExpectedString.length()) { + boolean ok = true; + if (checkSepChar) { + ok = nextCharIsSep(stmt, i); + } + if (ok) { + return i; + } + } + return offset; + } + + private static final String JSON = "json"; + private static final String EQ = "="; + + //private static final String WHERE = "where"; + //private static final String SET = "set"; + + /********** + * 检查下一个字符串是否json= * + * + * @param stmt 被解析的sql + * @param offset 被解析的sql的当前位置 + * @return 如果包含指定的字符串,则移动相应的偏移量,否则返回值=offset + */ + public static int nextStringIsJsonEq(String stmt, int offset) { + int i = offset; + + // / drds 之后的符号 + if (!currentCharIsSep(stmt, ++i)) { + return offset; + } + + // json 串 + int k = nextStringIsExpectedWithIgnoreSepChar(stmt, i, JSON, false); + if (k <= i) { + return offset; + } + i = k; + + // 等于符号 + k = nextStringIsExpectedWithIgnoreSepChar(stmt, i, EQ, false); + if (k <= i) { + return offset; + } + return i; + } + + public static int move(String stmt, int offset, int length) { + int i = offset; + for (; i < stmt.length(); ++i) { + switch (stmt.charAt(i)) { + case ' ': + case '\t': + case '\r': + case '\n': + continue; + case '/': + case '#': + i = comment(stmt, i); + continue; + default: + return i + length; + } + } + return i; + } + + public static boolean compare(String s, int offset, char[] keyword) { + if (s.length() >= offset + keyword.length) { + for (int i = 0; i < keyword.length; ++i, ++offset) { + if (Character.toUpperCase(s.charAt(offset)) != keyword[i]) { + return false; + } + } + return true; + } + return false; + } + +} diff --git a/src/main/java/io/mycat/route/parser/util/SQLParserUtils.java b/src/main/java/io/mycat/route/parser/util/SQLParserUtils.java new file mode 100644 index 000000000..d29649e6a --- /dev/null +++ b/src/main/java/io/mycat/route/parser/util/SQLParserUtils.java @@ -0,0 +1,304 @@ +package io.mycat.route.parser.util; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Scanner; +import java.util.Stack; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.junit.Assert; + +public class SQLParserUtils { + + class State{ + Integer state = -1; + } + + State state = new State(); //当前处于哪个部分 0: select 1: from 2:where + + Stack stateStack = new Stack(); + + Map tables = new HashMap(); + boolean tableFlag = false; //在From部分出现关键字from join 逗号后的是表名 + + public Map parse(String sql){ + tables.clear(); + stateStack.clear(); + state = null; + + tableFlag = false; + + boolean sFlag = false; //单引号 + boolean dFlag = false; //双引号计数器 + Scanner reader=new Scanner(sql); + reader.useDelimiter(" "); + String value; + while(reader.hasNext()){ + value = reader.next().toLowerCase(); + //前面已经出现单引号,在再次出现单引号之前不做任何处理 + if (sFlag){ + if (value.endsWith("'")&& getCount(value,"'")==1){ + sFlag = false; + continue; + }else if (value.indexOf("'")!=-1){ + value = value.substring(value.indexOf("'")+1); + sFlag = false; + }else { + continue; + } + } + //前面已经出现双引号,在再次出现双引号之前不做任何处理 + if (dFlag){ + if (value.endsWith("\"")&& getCount(value,"\"")==1){ + dFlag = false; + continue; + }else if (value.indexOf("\"")!=-1){ + value = value.substring(value.indexOf("\"")+1); + dFlag = false; + }else { + continue; + } + } + //单引号在select,where部分不做处理 + if (state != null && state.state !=1 && getCount(value,"'")%2==1){ + sFlag = true; + continue; + } + if (state != null && state.state !=1 && getCount(value,"\"")%2==1){ + dFlag = true; + continue; + } + + //SELECT关键字 + if (value.equals("select") || value.equals("(select")){ + //if (state != null) + state = new State(); + state.state = 0; + stateStack.push(state); //入栈 + continue; + } + + + //FROM关键字 + if (value.equals("from") || value.equals("into")|| value.equals("join")){ + state.state = 1; + tableFlag = true; + continue; + } + //From部分出现逗号后面是表名 + if (state.state == 1 && value.equals(",")){ + tableFlag = true; + continue; + } + if (state.state == 1 && tableFlag == true){ + getTableName(value); + continue; + } + + if (state.state == 1 && tableFlag == false){ + if (!value.startsWith("),") &&(value.equals(",")|| value.endsWith(","))){ + tableFlag = true; + continue; + }else if (!value.startsWith("),") && value.indexOf(",")!=-1){ + getTableName(value); + continue; + } + + } + + //WHERE关键字 + if (value.equals("where")){ + state.state = 2; + continue; + } + + if (value.endsWith("(select")){ + + stateStack.push(state); + state = new State(); + state.state = 0; + continue; + } + if ( value.equals(")")|| value.startsWith("),")){ + stateStack.pop(); + state = stateStack.peek(); + tableFlag = value.endsWith(",")?true:false; + if (state.state ==1){ + getTableName(value); + } + continue; + } + + + } + + return tables; + } + + private void getTableName(String str){ + String[] t = str.split(","); + for (int i=tableFlag?0:1; i0 && !t[i].trim().equals("(")) { + tables.put(t[i], ""); + } + } + } + if (!str.endsWith(",")) { + tableFlag = false; + } + } + + + + public static int getCount(String str,String match){ + int count = 0; + int index = 0; + while((index=str.indexOf(match,index))!=-1){ + index = index+match.length(); + count++; + } + return count; + } + + + private boolean test(String sql,String[] tables){ + + + Map result = parse(sql); + if (result.size() != tables.length) { + return false; + } + for (String tmp : tables){ + if (result.get(tmp.toLowerCase())==null) { + return false; + } + } + return true; + + } + private static final String sql1 = "select t3.*,ztd3.TypeDetailName as UseStateName\n" + + "from\n" + + "( \n" + + " select t4.*,ztd4.TypeDetailName as AssistantUnitName\n" + + " from\n" + + " (\n" + + " select t2.*,ztd2.TypeDetailName as UnitName \n" + + " from\n" + + " (\n" + + " select t1.*,ztd1.TypeDetailName as MaterielAttributeName \n" + + " from \n" + + " (\n" + + " select m.*,r.RoutingName,u.username,mc.MoldClassName\n" + + " from dbo.D_Materiel as m\n" + + " left join dbo.D_Routing as r\n" + + " on m.RoutingID=r.RoutingID\n" + + " left join dbo.D_MoldClass as mc\n" + + " on m.MoldClassID=mc.MoldClassID\n" + + " left join dbo.D_User as u\n" + + " on u.UserId=m.AddUserID\n" + + " )as t1\n" + + " left join dbo.D_Type_Detail as ztd1 \n" + + " on t1.MaterielAttributeID=ztd1.TypeDetailID\n" + + " )as t2\n" + + " left join dbo.D_Type_Detail as ztd2 \n" + + " on t2.UnitID=ztd2.TypeDetailID\n" + + " ) as t4\n" + + " left join dbo.D_Type_Detail as ztd4 \n" + + " on t4.AssistantUnitID=ztd4.TypeDetailID\n" + + ")as t3\n" + + "left join dbo.D_Type_Detail as ztd3 \n" + + "on t3.UseState=ztd3.TypeDetailID"; + public static void main(String[] args) { + SQLParserUtils parser = new SQLParserUtils(); + //parser.parse("select 'select * from C , D',' select * from E' from B"); + //if (true) return; + List list = new ArrayList(); + list.add(new String[]{"select * from B","B"}); + list.add(new String[]{"select * from B,C","B,C"}); + list.add(new String[]{"select * from B ,C","B,C"}); + list.add(new String[]{"select * from B , C","B,C"}); + list.add(new String[]{"select * from B a","B"}); + list.add(new String[]{"select * from B a,C,D","B,C,D"}); + list.add(new String[]{"select * from B a,C e ,D","B,C,D"}); + list.add(new String[]{"select * from B a,C e ,D f","B,C,D"}); + list.add(new String[]{"select * from B,(select * from C),D","B,C,D"}); + list.add(new String[]{"select * from B, (select * from C),D","B,C,D"}); + list.add(new String[]{"select * from B, ( select * from C),D","B,C,D"}); + list.add(new String[]{"select * from B, ( select * from C),D,E","B,C,D,E"}); + list.add(new String[]{"select * from B,(select * from C ),D","B,C,D"}); + list.add(new String[]{"select * from B,(select * from C ) ,D","B,C,D"}); + list.add(new String[]{"select * from B,(select * from C), D","B,C,D"}); + list.add(new String[]{"select * from B,(select * from C ) , D","B,C,D"}); + list.add(new String[]{"select * from B,(select * from C ) , (select * from D )","B,C,D"}); + list.add(new String[]{"select * from B,(select * from C ) , (select * from D ),E","B,C,D,E"}); + list.add(new String[]{"select * from B,(select C.ID , D.ID from C ) , (select * from D ),E","B,C,D,E"}); + + list.add(new String[]{"select (select C.ID,D.ID from C ) from B, D","B,C,D"}); + list.add(new String[]{"select (select C.ID,D.ID from C ) , E from B, D","B,C,D"}); + list.add(new String[]{"select (select C.ID,D.ID from C ), E from B, D","B,C,D"}); + list.add(new String[]{"select (select C.ID,D.ID from C ),E from B, D","B,C,D"}); + list.add(new String[]{"select a from t1 union select b from t2","t1,t2"}); + + + list.add(new String[]{"select * from B where C =1","B"}); + list.add(new String[]{"select * from B where C = (select 1 from D)","B,D"}); + list.add(new String[]{"select * from B where C = (select 1 from D) AND E = (select 2 from F)","B,D,F"}); + + + list.add(new String[]{"select * from B INNER JOIN C ON C.ID = D.ID","B,C"}); + list.add(new String[]{"select * from B INNER JOIN C ON C.ID = D.ID INNER JOIN E ON 1=1","B,C,E"}); + list.add(new String[]{"select * from B INNER JOIN C ON C.ID = (select G,H FROM I) INNER JOIN E ON 1=1","B,C,E,I"}); + list.add(new String[]{"select * from B INNER JOIN C ON C.ID = (select G,H FROM I ) INNER JOIN E ON 1=1","B,C,E,I"}); + list.add(new String[]{"select * from B INNER JOIN C ON C.ID = ( select G,H FROM I ) INNER JOIN E ON 1=1","B,C,E,I"}); + + + + list.add(new String[]{"select 'select * from C' from B","B"}); + list.add(new String[]{"select 'select * from C,D' from B","B"}); + list.add(new String[]{"select 'select * from C , D',E from B","B"}); + list.add(new String[]{"select 'select * from C , D',' select * from E' from B","B"}); + list.add(new String[]{"select 'select * from C , D','F',' select * from E' from B","B"}); + list.add(new String[]{"select 'select * from C , D',' F',' select * from E' from B","B"}); + list.add(new String[]{"select 'select * from C , D',' F ',' select * from E' from B","B"}); + + + list.add(new String[]{"select * from 'B'","'B'"}); + list.add(new String[]{"select * from 'B','C'","'B','C'"}); + + list.add(new String[]{sql1,"dbo.D_Materiel,dbo.D_Routing,dbo.D_MoldClass,dbo.D_Type_Detail,dbo.D_User"}); + //String sql = "select ' form \"' * from \"B\",C where a='c'"; + //String sql = "select ' form \"' * from \"B\",C"; + for (String[] tmp :list){ + + Assert.assertTrue(tmp[0],parser.test(tmp[0],tmp[1].split(","))); + { + System.out.println(tmp[0]+"--->"+tmp[1]); + Map tables = parser.parse(tmp[0]); + System.out.print("表名:"); + for (String key :tables.keySet()) { + System.out.println(key); + } + } + } + // TODO Auto-generated method stub + + } + +} diff --git a/src/main/java/demo/catlets/BatchInsertSequence.java b/src/main/java/io/mycat/route/sequence/BatchInsertSequence.java similarity index 78% rename from src/main/java/demo/catlets/BatchInsertSequence.java rename to src/main/java/io/mycat/route/sequence/BatchInsertSequence.java index 393016bb6..735bd3018 100644 --- a/src/main/java/demo/catlets/BatchInsertSequence.java +++ b/src/main/java/io/mycat/route/sequence/BatchInsertSequence.java @@ -1,137 +1,144 @@ -package demo.catlets; - -import com.alibaba.druid.sql.ast.SQLStatement; -import com.alibaba.druid.sql.ast.expr.SQLIdentifierExpr; -import com.alibaba.druid.sql.ast.expr.SQLIntegerExpr; -import com.alibaba.druid.sql.ast.statement.SQLInsertStatement.ValuesClause; -import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlInsertStatement; -import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser; -import io.mycat.MycatServer; -import io.mycat.cache.LayerCachePool; -import io.mycat.route.RouteResultset; -import io.mycat.route.RouteResultsetNode; -import io.mycat.route.factory.RouteStrategyFactory; -import io.mycat.server.ErrorCode; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SystemConfig; -import io.mycat.server.config.node.TableConfig; -import io.mycat.server.parser.ServerParse; -import io.mycat.server.sequence.IncrSequenceMySQLHandler; -import io.mycat.server.sequence.IncrSequencePropHandler; -import io.mycat.server.sequence.SequenceHandler; -import io.mycat.sqlengine.Catlet; -import io.mycat.sqlengine.EngineCtx; -import io.mycat.util.StringUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * 执行批量插入sequence Id - * @author 兵临城下 - * @date 2015/03/20 - */ -public class BatchInsertSequence implements Catlet { - private static final Logger LOGGER = LoggerFactory - .getLogger(BatchInsertSequence.class); - - private RouteResultset rrs;//路由结果集 - private String executeSql;//接收执行处理任务的sql - private SequenceHandler sequenceHandler;//sequence处理对象 - - //重新路由使用 - private SystemConfig sysConfig; - private SchemaConfig schema; - private int sqltype; - private String charset; - private MySQLFrontConnection sc; - private LayerCachePool cachePool; - - @Override - public void processSQL(String sql, EngineCtx ctx) { - try { - getRoute(executeSql); - RouteResultsetNode[] nodes = rrs.getNodes(); - if (nodes == null || nodes.length == 0 || nodes[0].getName() == null - || nodes[0].getName().equals("")) { - ctx.getSession().getSource().writeErrMessage(ErrorCode.ER_NO_DB_ERROR, - "No dataNode found ,please check tables defined in schema:" - + ctx.getSession().getSource().getSchema()); - return; - } - - sc.getSession2().execute(rrs, sqltype);//将路由好的数据执行入库 - - } catch (Exception e) { - LOGGER.error("BatchInsertSequence.processSQL(String sql, EngineCtx ctx)",e); - } - } - - @Override - public void route(SystemConfig sysConfig, SchemaConfig schema, int sqlType, - String realSQL, String charset, MySQLFrontConnection sc, - LayerCachePool cachePool) { - int rs = ServerParse.parse(realSQL); - this.sqltype = rs & 0xff; - this.sysConfig=sysConfig; - this.schema=schema; - this.charset=charset; - this.sc=sc; - this.cachePool=cachePool; - - try { - MySqlStatementParser parser = new MySqlStatementParser(realSQL); - SQLStatement statement = parser.parseStatement(); - MySqlInsertStatement insert = (MySqlInsertStatement)statement; - if(insert.getValuesList()!=null){ - String tableName = StringUtil.getTableName(realSQL).toUpperCase(); - TableConfig tableConfig = schema.getTables().get(tableName); - String primaryKey = tableConfig.getPrimaryKey();//获得表的主键字段 - - SQLIdentifierExpr sqlIdentifierExpr = new SQLIdentifierExpr(); - sqlIdentifierExpr.setName(primaryKey); - insert.getColumns().add(sqlIdentifierExpr); - - if(sequenceHandler == null){ - int seqHandlerType = MycatServer.getInstance().getConfig().getSystem().getSequnceHandlerType(); - switch(seqHandlerType){ - case SystemConfig.SEQUENCEHANDLER_MYSQLDB: - sequenceHandler = IncrSequenceMySQLHandler.getInstance(); - break; - case SystemConfig.SEQUENCEHANDLER_LOCALFILE: - sequenceHandler = IncrSequencePropHandler.getInstance(); - break; - default: - throw new java.lang.IllegalArgumentException("Invalid sequnce handler type "+seqHandlerType); - } - } - - for(ValuesClause vc : insert.getValuesList()){ - SQLIntegerExpr sqlIntegerExpr = new SQLIntegerExpr(); - long value = sequenceHandler.nextId(tableName.toUpperCase()); - sqlIntegerExpr.setNumber(value);//插入生成的sequence值 - vc.addValue(sqlIntegerExpr); - } - - String insertSql = insert.toString(); - this.executeSql = insertSql; - } - - } catch (Exception e) { - LOGGER.error("BatchInsertSequence.route(......)",e); - } - } - - /** - * 根据sql获得路由执行结果 - * @param sql - */ - private void getRoute(String sql){ - try { - rrs =RouteStrategyFactory.getRouteStrategy().route(sysConfig, schema, sqltype,sql,charset, sc, cachePool); - } catch (Exception e) { - LOGGER.error("BatchInsertSequence.getRoute(String sql)",e); - } - } - -} +package io.mycat.route.sequence; + +import io.mycat.route.sequence.handler.*; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import com.alibaba.druid.sql.ast.SQLStatement; +import com.alibaba.druid.sql.ast.expr.SQLIdentifierExpr; +import com.alibaba.druid.sql.ast.expr.SQLIntegerExpr; +import com.alibaba.druid.sql.ast.statement.SQLInsertStatement.ValuesClause; +import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlInsertStatement; +import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser; + +import io.mycat.MycatServer; +import io.mycat.cache.LayerCachePool; +import io.mycat.catlets.Catlet; +import io.mycat.config.ErrorCode; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.config.model.TableConfig; +import io.mycat.route.RouteResultset; +import io.mycat.route.RouteResultsetNode; +import io.mycat.route.factory.RouteStrategyFactory; +import io.mycat.server.ServerConnection; +import io.mycat.server.parser.ServerParse; +import io.mycat.sqlengine.EngineCtx; +import io.mycat.util.StringUtil; + +/** + * 执行批量插入sequence Id + * @author 兵临城下 + * @date 2015/03/20 + */ +public class BatchInsertSequence implements Catlet { + private static final Logger LOGGER = LoggerFactory.getLogger(BatchInsertSequence.class); + + private RouteResultset rrs;//路由结果集 + private String executeSql;//接收执行处理任务的sql + private SequenceHandler sequenceHandler;//sequence处理对象 + + //重新路由使用 + private SystemConfig sysConfig; + private SchemaConfig schema; + private int sqltype; + private String charset; + private ServerConnection sc; + private LayerCachePool cachePool; + + @Override + public void processSQL(String sql, EngineCtx ctx) { + try { + getRoute(executeSql); + RouteResultsetNode[] nodes = rrs.getNodes(); + if (nodes == null || nodes.length == 0 || nodes[0].getName() == null + || nodes[0].getName().equals("")) { + ctx.getSession().getSource().writeErrMessage(ErrorCode.ER_NO_DB_ERROR, + "No dataNode found ,please check tables defined in schema:" + + ctx.getSession().getSource().getSchema()); + return; + } + + sc.getSession2().execute(rrs, sqltype);//将路由好的数据执行入库 + + } catch (Exception e) { + LOGGER.error("BatchInsertSequence.processSQL(String sql, EngineCtx ctx)",e); + } + } + + @Override + public void route(SystemConfig sysConfig, SchemaConfig schema, int sqlType, + String realSQL, String charset, ServerConnection sc, + LayerCachePool cachePool) { + int rs = ServerParse.parse(realSQL); + this.sqltype = rs & 0xff; + this.sysConfig=sysConfig; + this.schema=schema; + this.charset=charset; + this.sc=sc; + this.cachePool=cachePool; + + try { + MySqlStatementParser parser = new MySqlStatementParser(realSQL); + SQLStatement statement = parser.parseStatement(); + MySqlInsertStatement insert = (MySqlInsertStatement)statement; + if(insert.getValuesList()!=null){ + String tableName = StringUtil.getTableName(realSQL).toUpperCase(); + TableConfig tableConfig = schema.getTables().get(tableName); + String primaryKey = tableConfig.getPrimaryKey();//获得表的主键字段 + + SQLIdentifierExpr sqlIdentifierExpr = new SQLIdentifierExpr(); + sqlIdentifierExpr.setName(primaryKey); + insert.getColumns().add(sqlIdentifierExpr); + + if(sequenceHandler == null){ + int seqHandlerType = MycatServer.getInstance().getConfig().getSystem().getSequnceHandlerType(); + switch(seqHandlerType){ + case SystemConfig.SEQUENCEHANDLER_MYSQLDB: + sequenceHandler = IncrSequenceMySQLHandler.getInstance(); + break; + case SystemConfig.SEQUENCEHANDLER_LOCALFILE: + sequenceHandler = IncrSequencePropHandler.getInstance(); + break; + case SystemConfig.SEQUENCEHANDLER_LOCAL_TIME: + sequenceHandler = IncrSequenceTimeHandler.getInstance(); + break; + case SystemConfig.SEQUENCEHANDLER_ZK_DISTRIBUTED: + sequenceHandler = DistributedSequenceHandler.getInstance(MycatServer.getInstance().getConfig().getSystem()); + break; + case SystemConfig.SEQUENCEHANDLER_ZK_GLOBAL_INCREMENT: + sequenceHandler = IncrSequenceZKHandler.getInstance(); + break; + default: + throw new java.lang.IllegalArgumentException("Invalid sequnce handler type "+seqHandlerType); + } + } + + for(ValuesClause vc : insert.getValuesList()){ + SQLIntegerExpr sqlIntegerExpr = new SQLIntegerExpr(); + long value = sequenceHandler.nextId(tableName.toUpperCase()); + sqlIntegerExpr.setNumber(value);//插入生成的sequence值 + vc.addValue(sqlIntegerExpr); + } + + String insertSql = insert.toString(); + this.executeSql = insertSql; + } + + } catch (Exception e) { + LOGGER.error("BatchInsertSequence.route(......)",e); + } + } + + /** + * 根据sql获得路由执行结果 + * @param sql + */ + private void getRoute(String sql){ + try { + rrs =RouteStrategyFactory.getRouteStrategy().route(sysConfig, schema, sqltype,sql,charset, sc, cachePool); + } catch (Exception e) { + LOGGER.error("BatchInsertSequence.getRoute(String sql)",e); + } + } + +} diff --git a/src/main/java/io/mycat/route/sequence/handler/DistributedSequenceHandler.java b/src/main/java/io/mycat/route/sequence/handler/DistributedSequenceHandler.java new file mode 100644 index 000000000..b51e2fa99 --- /dev/null +++ b/src/main/java/io/mycat/route/sequence/handler/DistributedSequenceHandler.java @@ -0,0 +1,327 @@ +package io.mycat.route.sequence.handler; + + +import io.mycat.config.loader.console.ZookeeperPath; +import io.mycat.config.loader.zkprocess.comm.ZkConfig; +import io.mycat.config.loader.zkprocess.comm.ZkParamCfg; +import io.mycat.config.model.SystemConfig; +import io.mycat.route.util.PropertiesUtil; +import org.apache.curator.framework.CuratorFramework; +import org.apache.curator.framework.CuratorFrameworkFactory; +import org.apache.curator.framework.recipes.leader.CancelLeadershipException; +import org.apache.curator.framework.recipes.leader.LeaderSelector; +import org.apache.curator.framework.recipes.leader.LeaderSelectorListenerAdapter; +import org.apache.curator.framework.state.ConnectionState; +import org.apache.curator.retry.ExponentialBackoffRetry; +import org.apache.curator.utils.CloseableUtils; +import org.apache.zookeeper.CreateMode; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.Closeable; +import java.io.IOException; +import java.util.List; +import java.util.Properties; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +/** + * 基于ZK与本地配置的分布式ID生成器(可以通过ZK获取集群(机房)唯一InstanceID,也可以通过配置文件配置InstanceID) + * ID结构:long 64位,ID最大可占63位 + * |current time millis(微秒时间戳38位,可以使用17年)|clusterId(机房或者ZKid,通过配置文件配置5位)|instanceId(实例ID,可以通过ZK或者配置文件获取,5位)|threadId(线程ID,9位)|increment(自增,6位) + * 一共63位,可以承受单机房单机器单线程1000*(2^6)=640000的并发。 + * 无悲观锁,无强竞争,吞吐量更高 + *

+ * 配置文件:sequence_distributed_conf.properties + * 只要配置里面:INSTANCEID=ZK就是从ZK上获取InstanceID + * + * @author Hash Zhang + * @version 1.0 + * @time 00:08:03 2016/5/3 + */ +public class DistributedSequenceHandler extends LeaderSelectorListenerAdapter implements Closeable, SequenceHandler { + protected static final Logger LOGGER = LoggerFactory.getLogger(DistributedSequenceHandler.class); + private static final String SEQUENCE_DB_PROPS = "sequence_distributed_conf.properties"; + private static DistributedSequenceHandler instance; + + private final long timestampBits = 38L; + private final long clusterIdBits = 5L; + private final long instanceIdBits = 5L; + private final long threadIdBits = 9L; + private final long incrementBits = 6L; + + private final long timestampMask = (1L << timestampBits) - 1L; + + private final long incrementShift = 0L; + private final long threadIdShift = incrementShift + incrementBits; + private final long instanceIdShift = threadIdShift + threadIdBits; + private final long clusterIdShift = instanceIdShift + instanceIdBits; + private final long timestampShift = clusterIdShift + clusterIdBits; + + private final long maxIncrement = 1L << incrementBits; + private final long maxThreadId = 1L << threadIdBits; + private final long maxinstanceId = 1L << instanceIdBits; + private final long maxclusterId = 1L << instanceIdBits; + + private volatile long instanceId; + private long clusterId; + + private ThreadLocal threadInc = new ThreadLocal<>(); + private ThreadLocal threadLastTime = new ThreadLocal<>(); + private ThreadLocal threadID = new ThreadLocal<>(); + private long nextID = 0L; + + private final static String PATH = ZookeeperPath.ZK_SEPARATOR.getKey() + ZookeeperPath.FLOW_ZK_PATH_BASE.getKey() + + io.mycat.config.loader.zkprocess.comm.ZkConfig.getInstance().getValue(ZkParamCfg.ZK_CFG_CLUSTERID) + + ZookeeperPath.ZK_SEPARATOR.getKey() + ZookeeperPath.FLOW_ZK_PATH_SEQUENCE.getKey(); + // private final static String PATH = "/mycat/sequence"; + private final static String INSTANCE_PATH = ZookeeperPath.ZK_SEPARATOR.getKey() + + ZookeeperPath.FLOW_ZK_PATH_SEQUENCE_INSTANCE.getKey(); + private final static String LEADER_PATH = ZookeeperPath.ZK_SEPARATOR.getKey() + + ZookeeperPath.FLOW_ZK_PATH_SEQUENCE_LEADER.getKey(); + private SystemConfig mycatConfig; + private String ID; + + private int mark[]; + private volatile boolean isLeader = false; + private volatile String slavePath; + // 配置是否载入好 + private volatile boolean ready = false; + + private CuratorFramework client; + + private LeaderSelector leaderSelector; + + private final ScheduledExecutorService timerExecutor = Executors.newSingleThreadScheduledExecutor(); + private ScheduledExecutorService leaderExecutor; + private final long SELF_CHECK_PERIOD = 10L; + + public static DistributedSequenceHandler getInstance(SystemConfig systemConfig) { + if (instance == null) { + instance = new DistributedSequenceHandler(systemConfig); + } + return instance; + } + + public long getClusterId() { + return clusterId; + } + + public void setClusterId(long clusterId) { + this.clusterId = clusterId; + } + + public LeaderSelector getLeaderSelector() { + return leaderSelector; + } + + public long getInstanceId() { + return instanceId; + } + + public void setInstanceId(long instanceId) { + this.instanceId = instanceId; + } + + public CuratorFramework getClient() { + return client; + } + + public void setClient(CuratorFramework client) { + this.client = client; + } + + public DistributedSequenceHandler(SystemConfig mycatConfig) { + this.mycatConfig = mycatConfig; + ID = mycatConfig.getBindIp() + mycatConfig.getServerPort(); + } + + public void load() { + // load sequnce properties + Properties props = PropertiesUtil.loadProps(SEQUENCE_DB_PROPS); + if ("ZK".equals(props.getProperty("INSTANCEID"))) { + initializeZK(ZkConfig.getInstance().getZkURL()); + } else { + this.instanceId = Long.parseLong(props.getProperty("INSTANCEID")); + this.ready = true; + } + this.clusterId = Long.valueOf(props.getProperty("CLUSTERID")); + + } + + public void initializeZK(String zkAddress) { + this.client = CuratorFrameworkFactory.newClient(zkAddress, new ExponentialBackoffRetry(1000, 3)); + this.client.start(); + try { + if (client.checkExists().forPath(PATH.concat(INSTANCE_PATH)) == null) { + client.create().creatingParentContainersIfNeeded().forPath(PATH.concat(INSTANCE_PATH)); + } + } catch (Exception e) { + // do nothing + } + this.leaderSelector = new LeaderSelector(client, PATH.concat(LEADER_PATH), this); + this.leaderSelector.autoRequeue(); + this.leaderSelector.start(); + Runnable runnable = new Runnable() { + @Override + public void run() { + try { + while (leaderSelector.getLeader() == null) { + Thread.currentThread().yield(); + } + if (!leaderSelector.hasLeadership()) { + isLeader = false; + if (slavePath != null && client.checkExists().forPath(slavePath) != null) { + return; + } + slavePath = client.create().creatingParentsIfNeeded().withMode(CreateMode.EPHEMERAL_SEQUENTIAL) + .forPath(PATH.concat("/instance/node"), "ready".getBytes()); + while ("ready".equals(new String(client.getData().forPath(slavePath)))) { + Thread.currentThread().yield(); + } + instanceId = Long.parseLong(new String(client.getData().forPath(slavePath))); + ready = true; + } + } catch (Exception e) { + LOGGER.warn("Caught exception while handling zk!", e); + } + } + }; + timerExecutor.scheduleAtFixedRate(runnable, 1L, 10L, TimeUnit.SECONDS); + } + + @Override + public long nextId(String prefixName) { + // System.out.println(instanceId); + while (!ready) { + try { + Thread.sleep(50); + } catch (InterruptedException e) { + LOGGER.warn("Unexpected thread interruption!"); + Thread.currentThread().interrupt(); + } + } + long time = System.currentTimeMillis(); + if (threadLastTime.get() == null) { + threadLastTime.set(time); + } + if (threadInc.get() == null) { + threadInc.set(0L); + } + if (threadID.get() == null) { + threadID.set(getNextThreadID()); + } + long a = threadInc.get(); + if ((a + 1L) >= maxIncrement) { + if (threadLastTime.get() == time) { + time = blockUntilNextMillis(time); + } + threadInc.set(0L); + } else { + threadInc.set(a + 1L); + } + threadLastTime.set(time); + return ((time & timestampMask) << timestampShift) | (((threadID.get() % maxThreadId) << threadIdShift)) + | (instanceId << instanceIdShift) | (clusterId << clusterIdShift) | a; + } + + private synchronized Long getNextThreadID() { + long i = nextID; + nextID++; + return i; + } + + private long blockUntilNextMillis(long time) { + while (System.currentTimeMillis() == time) { + } + return System.currentTimeMillis(); + } + + @Override + public void stateChanged(CuratorFramework client, ConnectionState newState) { + if (newState == ConnectionState.SUSPENDED || newState == ConnectionState.LOST) { + this.isLeader = false; + leaderExecutor.shutdownNow(); + throw new CancelLeadershipException(); + } + } + + @Override + public void takeLeadership(final CuratorFramework curatorFramework) { + this.isLeader = true; + this.instanceId = 1; + this.ready = true; + this.mark = new int[(int) maxinstanceId]; + List children = null; + try { + if (this.slavePath != null) { + client.delete().forPath(slavePath); + } + if (client.checkExists().forPath(PATH.concat(INSTANCE_PATH)) != null) { + children = client.getChildren().forPath(PATH.concat(INSTANCE_PATH)); + } + if (children != null) { + for (String child : children) { + String data = new String( + client.getData().forPath(PATH.concat(INSTANCE_PATH.concat("/").concat(child)))); + if (!"ready".equals(data)) { + mark[Integer.parseInt(data)] = 1; + } + } + } + } catch (Exception e) { + LOGGER.warn("Caught exception while handling zk!", e); + } + + leaderExecutor = Executors.newSingleThreadScheduledExecutor(); + leaderExecutor.scheduleAtFixedRate(new Runnable() { + @Override + public void run() { + try { + while (!client.isStarted()) { + Thread.currentThread().yield(); + } + List children = client.getChildren().forPath(PATH.concat(INSTANCE_PATH)); + int mark2[] = new int[(int) maxinstanceId]; + for (String child : children) { + String data = new String(client.getData().forPath(PATH.concat("/instance/" + child))); + if ("ready".equals(data)) { + int i = nextFree(); + client.setData().forPath(PATH.concat(INSTANCE_PATH.concat("/").concat(child)), + ("" + i).getBytes()); + mark2[i] = 1; + } else { + mark2[Integer.parseInt(data)] = 1; + } + } + mark = mark2; + } catch (Exception e) { + LOGGER.warn("Caught exception while handling zk!", e); + } + } + }, 0L, 3L, TimeUnit.SECONDS); + while (true) { + Thread.currentThread().yield(); + } + } + + private int nextFree() { + for (int i = 0; i < mark.length; i++) { + if (i == 1) { + continue; + } + if (mark[i] != 1) { + mark[i] = 1; + return i; + } + } + return -1; + } + + @Override + public void close() throws IOException { + CloseableUtils.closeQuietly(this.leaderSelector); + CloseableUtils.closeQuietly(this.client); + } +} diff --git a/src/main/java/io/mycat/route/sequence/handler/IncrSequenceBDBHandler.java b/src/main/java/io/mycat/route/sequence/handler/IncrSequenceBDBHandler.java new file mode 100644 index 000000000..b9919f996 --- /dev/null +++ b/src/main/java/io/mycat/route/sequence/handler/IncrSequenceBDBHandler.java @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.route.sequence.handler; + +import java.util.Map; + +/** + * BDB 数据库实现递增序列号 + * + * @author Michael + * @time Create on 2013-12-29 下午11:05:44 + * @version 1.0 + */ +public class IncrSequenceBDBHandler extends IncrSequenceHandler { + + private static class IncrSequenceBDBHandlerHolder { + private static final IncrSequenceBDBHandler instance = new IncrSequenceBDBHandler(); + } + + public static IncrSequenceBDBHandler getInstance() { + return IncrSequenceBDBHandlerHolder.instance; + } + + private IncrSequenceBDBHandler() { + } + + @Override + public Map getParaValMap(String prefixName) { + + return null; + } + + @Override + public Boolean fetchNextPeriod(String prefixName) { + + return null; + } + + @Override + public Boolean updateCURIDVal(String prefixName, Long val) { + + return null; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/sequence/IncrSequenceHandler.java b/src/main/java/io/mycat/route/sequence/handler/IncrSequenceHandler.java similarity index 89% rename from src/main/java/io/mycat/server/sequence/IncrSequenceHandler.java rename to src/main/java/io/mycat/route/sequence/handler/IncrSequenceHandler.java index 0e89193dd..21885ad59 100644 --- a/src/main/java/io/mycat/server/sequence/IncrSequenceHandler.java +++ b/src/main/java/io/mycat/route/sequence/handler/IncrSequenceHandler.java @@ -2,8 +2,8 @@ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the * terms of the GNU General Public License version 2 only, as published by the * Free Software Foundation. * @@ -16,12 +16,12 @@ * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address + * + * Any questions about this component can be directed to it's project Web address * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.sequence; +package io.mycat.route.sequence.handler; import java.util.Map; @@ -30,14 +30,15 @@ /** * 递增序列号处理器 - * + * * @author Michael * @time Create on 2013-12-29 下午10:42:39 * @version 1.0 */ -public abstract class IncrSequenceHandler extends SequenceHandler { +public abstract class IncrSequenceHandler implements SequenceHandler { - public static final Logger logger = LoggerFactory.getLogger(IncrSequenceHandler.class); + public static final Logger logger = LoggerFactory + .getLogger(IncrSequenceHandler.class); public static final String FILE_NAME = "sequence_conf.properties"; diff --git a/src/main/java/io/mycat/server/sequence/IncrSequenceMySQLHandler.java b/src/main/java/io/mycat/route/sequence/handler/IncrSequenceMySQLHandler.java similarity index 84% rename from src/main/java/io/mycat/server/sequence/IncrSequenceMySQLHandler.java rename to src/main/java/io/mycat/route/sequence/handler/IncrSequenceMySQLHandler.java index 014cba46c..531a88b50 100644 --- a/src/main/java/io/mycat/server/sequence/IncrSequenceMySQLHandler.java +++ b/src/main/java/io/mycat/route/sequence/handler/IncrSequenceMySQLHandler.java @@ -1,60 +1,60 @@ -package io.mycat.server.sequence; +package io.mycat.route.sequence.handler; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; + +import io.mycat.route.util.PropertiesUtil; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; import io.mycat.MycatServer; import io.mycat.backend.BackendConnection; -import io.mycat.backend.PhysicalDBNode; +import io.mycat.backend.datasource.PhysicalDBNode; +import io.mycat.backend.mysql.nio.handler.ResponseHandler; +import io.mycat.config.MycatConfig; +import io.mycat.config.util.ConfigException; +import io.mycat.net.mysql.ErrorPacket; +import io.mycat.net.mysql.RowDataPacket; import io.mycat.route.RouteResultsetNode; -import io.mycat.server.config.ConfigException; -import io.mycat.server.config.node.MycatConfig; -import io.mycat.server.config.node.SequenceConfig; -import io.mycat.server.executors.ResponseHandler; -import io.mycat.server.packet.ErrorPacket; -import io.mycat.server.packet.RowDataPacket; import io.mycat.server.parser.ServerParse; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import java.util.*; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicLong; +public class IncrSequenceMySQLHandler implements SequenceHandler { -public class IncrSequenceMySQLHandler extends SequenceHandler { - public static final Logger LOGGER = LoggerFactory + protected static final Logger LOGGER = LoggerFactory .getLogger(IncrSequenceMySQLHandler.class); + private static final String SEQUENCE_DB_PROPS = "sequence_db_conf.properties"; protected static final String errSeqResult = "-999999999,null"; protected static Map latestErrors = new ConcurrentHashMap(); private final FetchMySQLSequnceHandler mysqlSeqFetcher = new FetchMySQLSequnceHandler(); - /** - * save sequnce -> curval - */ - private ConcurrentHashMap seqValueMap = new ConcurrentHashMap(); private static class IncrSequenceMySQLHandlerHolder { private static final IncrSequenceMySQLHandler instance = new IncrSequenceMySQLHandler(); } - public static SequenceHandler getInstance() { + + public static IncrSequenceMySQLHandler getInstance() { return IncrSequenceMySQLHandlerHolder.instance; } public IncrSequenceMySQLHandler() { + load(); } public void load() { - Properties props = new Properties(); // load sequnce properties - SequenceConfig sequenceConfig = SequenceHandler.getConfig(); - Map data = sequenceConfig.getProps(); - Set keySet = data.keySet(); - for(String key : keySet){ - props.put(key, data.get(key)); - } + Properties props = PropertiesUtil.loadProps(SEQUENCE_DB_PROPS); removeDesertedSequenceVals(props); putNewSequenceVals(props); } + private void removeDesertedSequenceVals(Properties props) { Iterator> i = seqValueMap.entrySet() .iterator(); @@ -78,6 +78,11 @@ private void putNewSequenceVals(Properties props) { } } + /** + * save sequnce -> curval + */ + private ConcurrentHashMap seqValueMap = new ConcurrentHashMap(); + @Override public long nextId(String seqName) { SequenceVal seqVal = seqValueMap.get(seqName); @@ -132,7 +137,6 @@ private long getSeqValueFromDB(SequenceVal seqVal) { } } - } class FetchMySQLSequnceHandler implements ResponseHandler { @@ -221,17 +225,23 @@ public void rowResponse(byte[] row, BackendConnection conn) { @Override public void rowEofResponse(byte[] eof, BackendConnection conn) { - ((SequenceVal) conn.getAttachment()).dbfinished = true; + SequenceVal sequenceVal = ((SequenceVal) conn.getAttachment()); conn.release(); + sequenceVal.dbfinished = true; } private void executeException(BackendConnection c, Throwable e) { SequenceVal seqVal = ((SequenceVal) c.getAttachment()); seqVal.dbfinished = true; - String errMgs = e.toString(); + String errMgs=e.toString(); IncrSequenceMySQLHandler.latestErrors.put(seqVal.seqName, errMgs); LOGGER.warn("executeException " + errMgs); - c.close("exception:" + errMgs); + c.close("exception:" +errMgs); + + } + + @Override + public void writeQueueAvailable() { } @@ -275,7 +285,7 @@ public boolean isNexValValid(Long nexVal) { } } - FetchMySQLSequnceHandler seqHandler; +// FetchMySQLSequnceHandler seqHandler; public void setCurValue(long newValue) { curVal.set(newValue); @@ -286,15 +296,18 @@ public Long[] waitFinish() { long start = System.currentTimeMillis(); long end = start + 10 * 1000; while (System.currentTimeMillis() < end) { - if (dbretVal == IncrSequenceMySQLHandler.errSeqResult) { - throw new java.lang.RuntimeException( - "sequnce not found in db table "); - } else if (dbretVal != null) { + + if(dbfinished){ + if (dbretVal == IncrSequenceMySQLHandler.errSeqResult) { + throw new java.lang.RuntimeException( + "sequnce not found in db table "); + } + String[] items = dbretVal.split(","); - Long curVal = Long.valueOf(items[0]); - int span = Integer.valueOf(items[1]); + Long curVal = Long.parseLong(items[0]); + int span = Integer.parseInt(items[1]); return new Long[] { curVal, curVal + span }; - } else { + }else{ try { Thread.sleep(100); } catch (InterruptedException e) { diff --git a/src/main/java/io/mycat/server/sequence/IncrSequencePropHandler.java b/src/main/java/io/mycat/route/sequence/handler/IncrSequencePropHandler.java similarity index 99% rename from src/main/java/io/mycat/server/sequence/IncrSequencePropHandler.java rename to src/main/java/io/mycat/route/sequence/handler/IncrSequencePropHandler.java index a4c0d6f5b..7acdcaf37 100644 --- a/src/main/java/io/mycat/server/sequence/IncrSequencePropHandler.java +++ b/src/main/java/io/mycat/route/sequence/handler/IncrSequencePropHandler.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.sequence; +package io.mycat.route.sequence.handler; import java.io.FileInputStream; import java.io.FileOutputStream; diff --git a/src/main/java/io/mycat/server/sequence/IncrSequenceTimeHandler.java b/src/main/java/io/mycat/route/sequence/handler/IncrSequenceTimeHandler.java similarity index 68% rename from src/main/java/io/mycat/server/sequence/IncrSequenceTimeHandler.java rename to src/main/java/io/mycat/route/sequence/handler/IncrSequenceTimeHandler.java index ca6dea952..ae22ba731 100644 --- a/src/main/java/io/mycat/server/sequence/IncrSequenceTimeHandler.java +++ b/src/main/java/io/mycat/route/sequence/handler/IncrSequenceTimeHandler.java @@ -1,36 +1,51 @@ -package io.mycat.server.sequence; +package io.mycat.route.sequence.handler; -import io.mycat.server.config.node.SequenceConfig; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.io.IOException; +import java.io.InputStream; +import java.util.Properties; -import java.util.Map; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class IncrSequenceTimeHandler extends SequenceHandler { - protected static final Logger LOGGER = LoggerFactory - .getLogger(IncrSequenceTimeHandler.class); +public class IncrSequenceTimeHandler implements SequenceHandler { + protected static final Logger LOGGER = LoggerFactory.getLogger(IncrSequenceTimeHandler.class); + private static final String SEQUENCE_DB_PROPS = "sequence_time_conf.properties"; private static final IncrSequenceTimeHandler instance = new IncrSequenceTimeHandler(); - private static IdWorker workey = new IdWorker(0,0); + private static IdWorker workey = new IdWorker(1,1); - public static SequenceHandler getInstance() { + + public static IncrSequenceTimeHandler getInstance() { return IncrSequenceTimeHandler.instance; } - static{ - SequenceConfig sequenceConfig = SequenceHandler.getConfig(); - Map props = sequenceConfig.getProps(); - if(!props.isEmpty()){ - long workerId = Integer.valueOf((String)props.get("WORKID")) ; - long datacenterId = Integer.valueOf((String)props.get("DATAACENTERID")) ; - IncrSequenceTimeHandler.instance.setWorkey(new IdWorker(workerId,datacenterId) ); - } + public IncrSequenceTimeHandler() { + load(); } - private IncrSequenceTimeHandler() { + public void load(){ + // load sequnce properties + Properties props = loadProps(SEQUENCE_DB_PROPS); + + long workid = Long.parseLong(props.getProperty("WORKID")); + long dataCenterId = Long.parseLong(props.getProperty("DATAACENTERID")); + + workey = new IdWorker(workid,dataCenterId); } + private Properties loadProps(String propsFile){ + Properties props = new Properties(); + InputStream inp = Thread.currentThread().getContextClassLoader().getResourceAsStream(propsFile); + if (inp == null) { + throw new java.lang.RuntimeException("time sequnce properties not found " + propsFile); + } + try { + props.load(inp); + } catch (IOException e) { + throw new java.lang.RuntimeException(e); + } + return props; + } @Override public long nextId(String prefixName) { return workey.nextId(); @@ -55,7 +70,6 @@ static class IdWorker { private final static long sequenceBits = 12L; // 机器ID偏左移12位 private final static long workerIdShift = sequenceBits; - // 数据中心ID左移17位 private final static long datacenterIdShift = sequenceBits + workerIdBits; // 时间毫秒左移22位 private final static long timestampLeftShift = sequenceBits + workerIdBits + datacenterIdBits; @@ -85,7 +99,7 @@ public synchronized long nextId() { try { throw new Exception("Clock moved backwards. Refusing to generate id for "+ (lastTimestamp - timestamp) + " milliseconds"); } catch (Exception e) { - e.printStackTrace(); + LOGGER.error("error",e); } } @@ -124,10 +138,6 @@ private long timeGen() { } - public void setWorkey(IdWorker workey) { - IncrSequenceTimeHandler.workey = workey; - } - diff --git a/src/main/java/io/mycat/route/sequence/handler/IncrSequenceZKHandler.java b/src/main/java/io/mycat/route/sequence/handler/IncrSequenceZKHandler.java new file mode 100644 index 000000000..6551cc0dd --- /dev/null +++ b/src/main/java/io/mycat/route/sequence/handler/IncrSequenceZKHandler.java @@ -0,0 +1,236 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.route.sequence.handler; + + +import io.mycat.config.loader.console.ZookeeperPath; +import io.mycat.config.loader.zkprocess.comm.ZkConfig; +import io.mycat.config.loader.zkprocess.comm.ZkParamCfg; +import io.mycat.route.util.PropertiesUtil; +import org.apache.curator.framework.CuratorFramework; +import org.apache.curator.framework.CuratorFrameworkFactory; +import org.apache.curator.framework.recipes.locks.InterProcessSemaphoreMutex; +import org.apache.curator.retry.ExponentialBackoffRetry; +import org.apache.curator.utils.ZKPaths; +import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.data.Stat; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.UnsupportedEncodingException; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.ConcurrentHashMap; + +/** + * zookeeper 实现递增序列号 + * 配置文件:sequence_conf.properties + * 只要配置好ZK地址和表名的如下属性 + * TABLE.MINID 某线程当前区间内最小值 + * TABLE.MAXID 某线程当前区间内最大值 + * TABLE.CURID 某线程当前区间内当前值 + * 文件配置的MAXID以及MINID决定每次取得区间,这个对于每个线程或者进程都有效 + * 文件中的这三个属性配置只对第一个进程的第一个线程有效,其他线程和进程会动态读取ZK + * + * @author Hash Zhang + * @version 1.0 + * @time 23:35 2016/5/6 + */ +public class IncrSequenceZKHandler extends IncrSequenceHandler { + protected static final Logger LOGGER = LoggerFactory.getLogger(IncrSequenceHandler.class); + private final static String PATH = ZookeeperPath.ZK_SEPARATOR.getKey() + ZookeeperPath.FLOW_ZK_PATH_BASE.getKey() + + ZookeeperPath.ZK_SEPARATOR.getKey() + + io.mycat.config.loader.zkprocess.comm.ZkConfig.getInstance().getValue(ZkParamCfg.ZK_CFG_CLUSTERID) + + ZookeeperPath.ZK_SEPARATOR.getKey() + ZookeeperPath.FLOW_ZK_PATH_SEQUENCE.getKey() + + ZookeeperPath.ZK_SEPARATOR.getKey() + ZookeeperPath.FLOW_ZK_PATH_SEQUENCE_INCREMENT_SEQ.getKey(); + private final static String LOCK = "/lock"; + private final static String SEQ = "/seq"; + private final static IncrSequenceZKHandler instance = new IncrSequenceZKHandler(); + + public static IncrSequenceZKHandler getInstance() { + return instance; + } + + private ThreadLocal>> tableParaValMapThreadLocal = new ThreadLocal<>(); + + private CuratorFramework client; + private ThreadLocal interProcessSemaphoreMutexThreadLocal = new ThreadLocal<>(); + private Properties props; + + public void load() { + props = PropertiesUtil.loadProps(FILE_NAME); + String zkAddress = ZkConfig.getInstance().getZkURL(); + try { + initializeZK(props, zkAddress); + } catch (Exception e) { + LOGGER.error("Error caught while initializing ZK:" + e.getCause()); + } + } + + public void threadLocalLoad() throws Exception { + Enumeration enu = props.propertyNames(); + while (enu.hasMoreElements()) { + String key = (String) enu.nextElement(); + if (key.endsWith(KEY_MIN_NAME)) { + handle(key); + } + } + } + + public void initializeZK(Properties props, String zkAddress) throws Exception { + this.client = CuratorFrameworkFactory.newClient(zkAddress, new ExponentialBackoffRetry(1000, 3)); + this.client.start(); + this.props = props; + Enumeration enu = props.propertyNames(); + while (enu.hasMoreElements()) { + String key = (String) enu.nextElement(); + if (key.endsWith(KEY_MIN_NAME)) { + handle(key); + } + } + } + + private void handle(String key) throws Exception { + String table = key.substring(0, key.indexOf(KEY_MIN_NAME)); + InterProcessSemaphoreMutex interProcessSemaphoreMutex = interProcessSemaphoreMutexThreadLocal.get(); + if (interProcessSemaphoreMutex == null) { + interProcessSemaphoreMutex = new InterProcessSemaphoreMutex(client, PATH + "/" + table + SEQ + LOCK); + interProcessSemaphoreMutexThreadLocal.set(interProcessSemaphoreMutex); + } + Map> tableParaValMap = tableParaValMapThreadLocal.get(); + if (tableParaValMap == null) { + tableParaValMap = new HashMap<>(); + tableParaValMapThreadLocal.set(tableParaValMap); + } + Map paraValMap = tableParaValMap.get(table); + if (paraValMap == null) { + paraValMap = new ConcurrentHashMap<>(); + tableParaValMap.put(table, paraValMap); + + String seqPath = PATH + ZookeeperPath.ZK_SEPARATOR.getKey() + table + SEQ; + + Stat stat = this.client.checkExists().forPath(seqPath); + + if (stat == null || (stat.getDataLength() == 0)) { + paraValMap.put(table + KEY_MIN_NAME, props.getProperty(key)); + paraValMap.put(table + KEY_MAX_NAME, props.getProperty(table + KEY_MAX_NAME)); + paraValMap.put(table + KEY_CUR_NAME, props.getProperty(table + KEY_CUR_NAME)); + try { + String val = props.getProperty(table + KEY_MIN_NAME); + client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT) + .forPath(PATH + "/" + table + SEQ, val.getBytes()); + } catch (Exception e) { + LOGGER.debug("Node exists! Maybe other instance is initializing!"); + } + } + fetchNextPeriod(table); + } + } + + @Override + public Map getParaValMap(String prefixName) { + Map> tableParaValMap = tableParaValMapThreadLocal.get(); + if (tableParaValMap == null) { + try { + threadLocalLoad(); + } catch (Exception e) { + LOGGER.error("Error caught while loding configuration within current thread:" + e.getCause()); + } + tableParaValMap = tableParaValMapThreadLocal.get(); + } + Map paraValMap = tableParaValMap.get(prefixName); + return paraValMap; + } + + @Override + public Boolean fetchNextPeriod(String prefixName) { + InterProcessSemaphoreMutex interProcessSemaphoreMutex = interProcessSemaphoreMutexThreadLocal.get(); + try { + if (interProcessSemaphoreMutex == null) { + throw new IllegalStateException("IncrSequenceZKHandler should be loaded first!"); + } + interProcessSemaphoreMutex.acquire(); + Map> tableParaValMap = tableParaValMapThreadLocal.get(); + if (tableParaValMap == null) { + throw new IllegalStateException("IncrSequenceZKHandler should be loaded first!"); + } + Map paraValMap = tableParaValMap.get(prefixName); + if (paraValMap == null) { + throw new IllegalStateException("IncrSequenceZKHandler should be loaded first!"); + } + if (paraValMap.get(prefixName + KEY_MAX_NAME) == null) { + paraValMap.put(prefixName + KEY_MAX_NAME, props.getProperty(prefixName + KEY_MAX_NAME)); + } + if (paraValMap.get(prefixName + KEY_MIN_NAME) == null) { + paraValMap.put(prefixName + KEY_MIN_NAME, props.getProperty(prefixName + KEY_MIN_NAME)); + } + if (paraValMap.get(prefixName + KEY_CUR_NAME) == null) { + paraValMap.put(prefixName + KEY_CUR_NAME, props.getProperty(prefixName + KEY_CUR_NAME)); + } + long period = Long.parseLong(paraValMap.get(prefixName + KEY_MAX_NAME)) + - Long.parseLong(paraValMap.get(prefixName + KEY_MIN_NAME)); + long now = Long.parseLong(new String(client.getData().forPath(PATH + "/" + prefixName + SEQ))); + client.setData().forPath(PATH + "/" + prefixName + SEQ, ((now + period + 1) + "").getBytes()); + + paraValMap.put(prefixName + KEY_MAX_NAME, (now + period + 1) + ""); + paraValMap.put(prefixName + KEY_MIN_NAME, (now + 1) + ""); + paraValMap.put(prefixName + KEY_CUR_NAME, (now) + ""); + + } catch (Exception e) { + LOGGER.error("Error caught while updating period from ZK:" + e.getCause()); + } finally { + try { + interProcessSemaphoreMutex.release(); + } catch (Exception e) { + LOGGER.error("Error caught while realeasing distributed lock" + e.getCause()); + } + } + return true; + } + + @Override + public Boolean updateCURIDVal(String prefixName, Long val) { + Map> tableParaValMap = tableParaValMapThreadLocal.get(); + if (tableParaValMap == null) { + throw new IllegalStateException("IncrSequenceZKHandler should be loaded first!"); + } + Map paraValMap = tableParaValMap.get(prefixName); + if (paraValMap == null) { + throw new IllegalStateException("IncrSequenceZKHandler should be loaded first!"); + } + paraValMap.put(prefixName + KEY_CUR_NAME, val + ""); + return true; + } + + public static void main(String[] args) throws UnsupportedEncodingException { + IncrSequenceZKHandler incrSequenceZKHandler = new IncrSequenceZKHandler(); + incrSequenceZKHandler.load(); + System.out.println(incrSequenceZKHandler.nextId("TRAVELRECORD")); + System.out.println(incrSequenceZKHandler.nextId("TRAVELRECORD")); + System.out.println(incrSequenceZKHandler.nextId("TRAVELRECORD")); + System.out.println(incrSequenceZKHandler.nextId("TRAVELRECORD")); + } +} diff --git a/src/main/java/io/mycat/server/sequence/SequenceHandler.java b/src/main/java/io/mycat/route/sequence/handler/SequenceHandler.java similarity index 76% rename from src/main/java/io/mycat/server/sequence/SequenceHandler.java rename to src/main/java/io/mycat/route/sequence/handler/SequenceHandler.java index b38db3ca4..891e02dd5 100644 --- a/src/main/java/io/mycat/server/sequence/SequenceHandler.java +++ b/src/main/java/io/mycat/route/sequence/handler/SequenceHandler.java @@ -2,8 +2,8 @@ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the * terms of the GNU General Public License version 2 only, as published by the * Free Software Foundation. * @@ -16,26 +16,21 @@ * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address + * + * Any questions about this component can be directed to it's project Web address * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.sequence; - -import io.mycat.MycatServer; -import io.mycat.server.config.node.SequenceConfig; +package io.mycat.route.sequence.handler; /** - * + * * @author Michael * @time Create on 2013-12-20 下午3:35:53 * @version 1.0 */ -public abstract class SequenceHandler { - public abstract long nextId(String prefixName); - public static SequenceConfig getConfig(){ - return MycatServer.getInstance().getConfig().getSequenceConfig(); - }; +public interface SequenceHandler { + + public long nextId(String prefixName); } \ No newline at end of file diff --git a/src/main/java/io/mycat/route/sequence/handler/SnowflakeIdSequenceHandler.java b/src/main/java/io/mycat/route/sequence/handler/SnowflakeIdSequenceHandler.java new file mode 100644 index 000000000..9710dbe4c --- /dev/null +++ b/src/main/java/io/mycat/route/sequence/handler/SnowflakeIdSequenceHandler.java @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.route.sequence.handler; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * 本地默认获取的全局ID(用于单机或者测试)
+ * java for base on https://github.com/twitter/snowflake + * + * @author Michael + * @time Create on 2013-12-22 下午4:52:25 + * @version 1.0 + */ +public class SnowflakeIdSequenceHandler implements SequenceHandler { + + private final static Logger logger = LoggerFactory + .getLogger(SequenceHandler.class); + + private final long workerId; + private final long datacenterId; + private static final long twepoch = 1355285532520L; + + private static final long workerIdBits = 5L; + private static final long datacenterIdBits = 5L; + private static final long maxWorkerId = -1L ^ -1L << workerIdBits; + private static final long maxDatacenterId = -1L ^ -1L << datacenterIdBits; + private static final long sequenceBits = 12L; + private static final long workerIdShift = sequenceBits; + private static final long datacenterIdShift = sequenceBits + workerIdBits; + + private static final long timestampLeftShift = sequenceBits + + workerIdBits; + private static final long sequenceMask = -1L ^ -1L << sequenceBits; + + private long sequence = 0L; + private long lastTimestamp = -1L; + + public SnowflakeIdSequenceHandler(long workerId, long datacenterId) { + super(); + System.out.println("maxWorkerId = " + maxWorkerId); + System.out.println("maxDatacenterId = " + maxDatacenterId); + if (workerId > this.maxWorkerId || workerId < 0) { + throw new IllegalArgumentException(String.format( + "worker Id can't be greater than %d or less than 0", + this.maxWorkerId)); + } + + this.workerId = workerId; + if (datacenterId > maxDatacenterId || datacenterId < 0) { + throw new IllegalArgumentException(String.format( + "datacenter Id can't be greater than %d or less than 0", + maxDatacenterId)); + + } + this.datacenterId = datacenterId; + logger.info(String + .format("worker starting. timestamp left shift %d, datacenter id bits %d, worker id bits %d, sequence bits %d, workerid %d", + timestampLeftShift, datacenterIdBits, workerIdBits, + sequenceBits, workerId)); + + } + + public SnowflakeIdSequenceHandler(long workerId) { + this(workerId, 13); + } + + // 默认 + public SnowflakeIdSequenceHandler() { + this(23, 13); + } + + @Override + public synchronized long nextId(String prefixName) { + long timestamp = this.timeGen(); + if (timestamp < this.lastTimestamp) { + logger.error( + "clock is moving backwards. Rejecting requests until %d.", + lastTimestamp); + throw new RuntimeException( + String.format( + "Clock moved backwards. Refusing to generate id for %d milliseconds", + (this.lastTimestamp - timestamp))); + } + if (this.lastTimestamp == timestamp) { + this.sequence = this.sequence + 1 & this.sequenceMask; + if (this.sequence == 0) { + timestamp = this.tilNextMillis(this.lastTimestamp); + } + } else { + this.sequence = 0; + } + + this.lastTimestamp = timestamp; + return timestamp - this.twepoch << this.timestampLeftShift + | this.datacenterId << this.datacenterIdShift + | this.workerId << this.workerIdShift | this.sequence; + } + + private synchronized long tilNextMillis(long lastTimestamp) { + long timestamp = this.timeGen(); + while (timestamp <= lastTimestamp) { + timestamp = this.timeGen(); + } + return timestamp; + } + + private long timeGen() { + return System.currentTimeMillis(); + } + + public static void main(String[] args) { + SnowflakeIdSequenceHandler gen = new SnowflakeIdSequenceHandler(16); + System.out.println("nextId = " + gen.nextId(null)); + + } +} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/sequence/IncrSequenceZKHandler.java b/src/main/java/io/mycat/route/sequence/handler/ThirftClientSequenceHandler.java similarity index 80% rename from src/main/java/io/mycat/server/sequence/IncrSequenceZKHandler.java rename to src/main/java/io/mycat/route/sequence/handler/ThirftClientSequenceHandler.java index 2fc02da6e..9b910e3bc 100644 --- a/src/main/java/io/mycat/server/sequence/IncrSequenceZKHandler.java +++ b/src/main/java/io/mycat/route/sequence/handler/ThirftClientSequenceHandler.java @@ -2,8 +2,8 @@ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the * terms of the GNU General Public License version 2 only, as published by the * Free Software Foundation. * @@ -16,29 +16,26 @@ * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address + * + * Any questions about this component can be directed to it's project Web address * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.sequence; +package io.mycat.route.sequence.handler; /** - * zookeeper 实现递增序列号 - * + * 通过Thirft客户端获取集群中心分配的全局ID + * * @author Michael - * @time Create on 2013-12-29 下午11:04:47 + * @time Create on 2013-12-25 上午12:15:48 * @version 1.0 */ -public class IncrSequenceZKHandler extends SequenceHandler { - - public SequenceHandler getInstance() { - return null; - } +public class ThirftClientSequenceHandler implements SequenceHandler { @Override public long nextId(String prefixName) { + return 0; } diff --git a/src/main/java/io/mycat/route/util/CacheUtil.java b/src/main/java/io/mycat/route/util/CacheUtil.java new file mode 100644 index 000000000..8b569e452 --- /dev/null +++ b/src/main/java/io/mycat/route/util/CacheUtil.java @@ -0,0 +1,48 @@ +package io.mycat.route.util; + +/** + * Created by 862911 on 2016/5/4. + */ +public class CacheUtil { + private static final int RUNS = 10; + private static final int DIMENSION_1 = 1024 * 1024; + private static final int DIMENSION_2 = 6; + + private static long[][] longs; + + public static void main(String[] args) throws Exception { + Thread.sleep(10000); + longs = new long[DIMENSION_1][]; + for (int i = 0; i < DIMENSION_1; i++) { + longs[i] = new long[DIMENSION_2]; + for (int j = 0; j < DIMENSION_2; j++) { + longs[i][j] = 0L; + } + } + System.out.println("starting...."); + + long sum = 0L; + for (int r = 0; r < RUNS; r++) { + + final long start = System.nanoTime(); + +// slow + for (int j = 0; j < DIMENSION_2; j++) { + for (int i = 0; i < DIMENSION_1; i++) { + sum += longs[i][j]; + } + } + + //fast +// for (int i = 0; i < DIMENSION_1; i++) { +// for (int j = 0; j < DIMENSION_2; j++) { +// sum += longs[i][j]; +// } +// } + + System.out.println((System.nanoTime() - start)); + } + + } + +} diff --git a/src/main/java/io/mycat/route/util/DateUtil.java b/src/main/java/io/mycat/route/util/DateUtil.java deleted file mode 100644 index 1202bfc71..000000000 --- a/src/main/java/io/mycat/route/util/DateUtil.java +++ /dev/null @@ -1,242 +0,0 @@ - -package io.mycat.route.util; - -import java.text.ParseException; -import java.text.SimpleDateFormat; -import java.util.Calendar; -import java.util.Date; -import java.util.Locale; - -import org.apache.commons.lang.RandomStringUtils; -import org.apache.commons.lang.time.DateUtils; - -/** - * - * @author yan.yan@huawei.com - */ -public final class DateUtil extends DateUtils { - - /** yyyyMMdd */ - public final static String SHORT_FORMAT = "yyyyMMdd"; - - /** yyyyMMddHHmmss */ - public final static String LONG_FORMAT = "yyyyMMddHHmmss"; - - /** yyyy-MM-dd */ - public final static String WEB_FORMAT = "yyyy-MM-dd"; - - /** HHmmss */ - public final static String TIME_FORMAT = "HHmmss"; - - /** yyyyMM */ - public final static String MONTH_FORMAT = "yyyyMM"; - - /** yyyy年MM月dd�? */ - public final static String CHINA_FORMAT = "yyyy年MM月dd日"; - - /** yyyy-MM-dd HH:mm:ss */ - public final static String LONG_WEB_FORMAT = "yyyy-MM-dd HH:mm:ss"; - - /** yyyy-MM-dd HH:mm */ - public final static String LONG_WEB_FORMAT_NO_SEC = "yyyy-MM-dd HH:mm"; - - /** - * 日期对象解析成日期字符串基础方法,可以据此封装出多种便捷的方法直接使用 - * - * @param date 待格式化的日期对 - * @param format 输出的格式 - * @return 格式化的字符 - */ - public static String format(Date date, String format) { - if (date == null || StringUtil.isBlank(format)) { - return StringUtil.EMPTY; - } - - return new SimpleDateFormat(format, Locale.SIMPLIFIED_CHINESE).format(date); - } - - /** - * 格式化当前时间 - * - * @param format 输出的格式 - * @return - */ - public static String formatCurrent(String format) { - if (StringUtil.isBlank(format)) { - return StringUtil.EMPTY; - } - - return format(new Date(), format); - } - - /** - * 日期字符串解析成日期对象基础方法,可以在此封装出多种便捷的方法直接使用 - * - * @param dateStr 日期字符 - * @param format 输入的格式 - * @return 日期对象 - * @throws ParseException - */ - public static Date parse(String dateStr, String format) throws ParseException { - if (StringUtil.isBlank(format)) { - throw new ParseException("format can not be null.", 0); - } - - if (dateStr == null || dateStr.length() < format.length()) { - throw new ParseException("date string's length is too small.", 0); - } - - return new SimpleDateFormat(format, Locale.SIMPLIFIED_CHINESE).parse(dateStr); - } - - /** - * 日期字符串格式化基础方法,可以在此封装出多种便捷的方法直接使�? - * - * @param dateStr 日期字符 - * @param formatIn 输入的日期字符串的格式 - * @param formatOut 输出日期字符串的格式 - * @return 已经格式化的字符 - * @throws ParseException - */ - public static String format(String dateStr, String formatIn, String formatOut) - throws ParseException { - - Date date = parse(dateStr, formatIn); - return format(date, formatOut); - } - - /** - * 把日期对象按yyyyMMdd格式解析成字符串 - * - * @param date 待格式化的日期对 - * @return 格式化的字符串 - */ - public static String formatShort(Date date) { - return format(date, SHORT_FORMAT); - } - - /** - * 把日期字符串按照yyyyMMdd格式,进行格式化 - * - * @param dateStr 待格式化的日期字符串 - * @param formatIn 输入的日期字符串的格式 - * @return 格式化的字符 - */ - public static String formatShort(String dateStr, String formatIn) throws ParseException { - return format(dateStr, formatIn, SHORT_FORMAT); - } - - /** - * 把日期对象按yyyy-MM-dd格式解析成字符串 - * - * @param date 待格式化的日期对 - * @return 格式化的字符 - */ - public static String formatWeb(Date date) { - return format(date, WEB_FORMAT); - } - - /** - * 把日期字符串按照yyyy-MM-dd格式,进行格式化 - * - * @param dateStr 待格式化的日期字符串 - * @param formatIn 输入的日期字符串的格式 - * @return 格式化的字符 - * @throws ParseException - */ - public static String formatWeb(String dateStr, String formatIn) throws ParseException { - return format(dateStr, formatIn, WEB_FORMAT); - } - - /** - * 把日期对象按yyyyMM格式解析成字符串 - * - * @param date 待格式化的日期对 - * @return 格式化的字符 - */ - public static String formatMonth(Date date) { - - return format(date, MONTH_FORMAT); - } - - /** - * 把日期对象按HHmmss格式解析成字符串 - * - * @param date 待格式化的日期对 - * @return 格式化的字符 - */ - public static String formatTime(Date date) { - return format(date, TIME_FORMAT); - } - - /** - * 获取yyyyMMddHHmmss+n位随机数格式的时间戳 - * - * @param n 随机数位 - * @return - */ - public static String getTimestamp(int n) { - return formatCurrent(LONG_FORMAT) + RandomStringUtils.randomNumeric(n); - } - - /** - * 根据日期格式返回昨日日期 - * - * @param format 日期格式 - * @return - */ - public static String getYesterdayDate(String format) { - return getDateCompareToday(format, -1, 0); - } - - /** - * 把当日日期作为基准,按照格式返回相差定间隔的日期 - * - * @param format 日期格式 - * @param daysAfter 和当日比相差几天,例如3代表3天后,-1代表1天前 - * @param monthAfter 和当日比相差几月,例如2代表2月后,-3代表3月前 - * @return - */ - public static String getDateCompareToday(String format, int daysAfter, int monthAfter) { - Calendar today = Calendar.getInstance(); - if (daysAfter != 0) { - today.add(Calendar.DATE, daysAfter); - } - if (monthAfter != 0) { - today.add(Calendar.MONTH, monthAfter); - } - return format(today.getTime(), format); - } - - /** - * 根据日期格式返回上月的日期 - * - * @param format - * @return - */ - public static String getLastMonth(String format) { - Calendar today = Calendar.getInstance(); - today.add(Calendar.MONTH, -1); - return format(today.getTime(), format); - } - - /** - * 平移当前时间,以分为单元,minutes - * - * @param minutes - * @return - */ - public static Date addCurMin(long minutes) { - return DateUtils.addMinutes(new Date(), (int) minutes); - } - - /** - * 平移当前时间,以秒为单元,minutes - * - * @param secs - * @return - */ - public static Date addCurSeconds(long secs) { - return addSeconds(new Date(), (int) secs); - } -} diff --git a/src/main/java/io/mycat/route/util/PropertiesUtil.java b/src/main/java/io/mycat/route/util/PropertiesUtil.java new file mode 100644 index 000000000..000586768 --- /dev/null +++ b/src/main/java/io/mycat/route/util/PropertiesUtil.java @@ -0,0 +1,29 @@ +package io.mycat.route.util; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Properties; + +/** + * Property文件加载器 + * + * @author Hash Zhang + * @time 00:08:03 2016/5/3 + * @version 1.0 + */ +public class PropertiesUtil { + public static Properties loadProps(String propsFile){ + Properties props = new Properties(); + InputStream inp = Thread.currentThread().getContextClassLoader().getResourceAsStream(propsFile); + + if (inp == null) { + throw new java.lang.RuntimeException("time sequnce properties not found " + propsFile); + } + try { + props.load(inp); + } catch (IOException e) { + throw new java.lang.RuntimeException(e); + } + return props; + } +} diff --git a/src/main/java/io/mycat/route/util/RouterUtil.java b/src/main/java/io/mycat/route/util/RouterUtil.java index 84e51edac..b5bb73cb4 100644 --- a/src/main/java/io/mycat/route/util/RouterUtil.java +++ b/src/main/java/io/mycat/route/util/RouterUtil.java @@ -1,6 +1,32 @@ package io.mycat.route.util; +import java.sql.SQLNonTransientException; +import java.sql.SQLSyntaxErrorException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedHashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.regex.Pattern; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import com.alibaba.druid.sql.ast.SQLExpr; +import com.alibaba.druid.sql.ast.SQLStatement; +import com.alibaba.druid.sql.ast.expr.SQLCharExpr; +import com.alibaba.druid.sql.ast.expr.SQLIdentifierExpr; +import com.alibaba.druid.sql.ast.statement.SQLCharacterDataType; +import com.alibaba.druid.sql.ast.statement.SQLColumnDefinition; +import com.alibaba.druid.sql.ast.statement.SQLCreateTableStatement; +import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlCreateTableStatement; import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlInsertStatement; import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser; import com.alibaba.druid.wall.spi.WallVisitorUtils; @@ -8,118 +34,256 @@ import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; + import io.mycat.MycatServer; +import io.mycat.backend.datasource.PhysicalDBNode; +import io.mycat.backend.datasource.PhysicalDBPool; +import io.mycat.backend.datasource.PhysicalDatasource; +import io.mycat.backend.mysql.nio.handler.FetchStoreNodeOfChildTableHandler; import io.mycat.cache.LayerCachePool; +import io.mycat.config.ErrorCode; +import io.mycat.config.MycatConfig; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.TableConfig; +import io.mycat.config.model.rule.RuleConfig; import io.mycat.route.RouteResultset; import io.mycat.route.RouteResultsetNode; import io.mycat.route.SessionSQLPair; import io.mycat.route.function.AbstractPartitionAlgorithm; +import io.mycat.route.function.SlotFunction; import io.mycat.route.parser.druid.DruidShardingParseInfo; import io.mycat.route.parser.druid.RouteCalculateUnit; -import io.mycat.server.ErrorCode; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.config.node.RuleConfig; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.TableConfig; -import io.mycat.server.executors.FetchStoreNodeOfChildTableHandler; +import io.mycat.server.ServerConnection; import io.mycat.server.parser.ServerParse; import io.mycat.sqlengine.mpp.ColumnRoutePair; import io.mycat.sqlengine.mpp.LoadData; import io.mycat.util.StringUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.sql.SQLNonTransientException; -import java.sql.SQLSyntaxErrorException; -import java.util.*; -import java.util.concurrent.Callable; - /** * 从ServerRouterUtil中抽取的一些公用方法,路由解析工具类 * @author wang.dw * */ public class RouterUtil { - private static final Logger LOGGER = LoggerFactory.getLogger(RouterUtil.class); - /** - * 移除执行语句中的数据库名 + + private static final Logger LOGGER = LoggerFactory.getLogger(RouterUtil.class); + + /** + * 移除执行语句中的数据库名 + * + * @param stmt 执行语句 + * @param schema 数据库名 + * @return 执行语句 + * @author mycat * - * @param stmt - * 执行语句 - * @param schema - * 数据库名 - * @return 执行语句 - * @author mycat - */ - - public static String removeSchema(String stmt, String schema) { + * @modification 修正移除schema的方法 + * @date 2016/12/29 + * @modifiedBy Hash Zhang + * + */ + public static String removeSchema(String stmt, String schema) { final String upStmt = stmt.toUpperCase(); final String upSchema = schema.toUpperCase() + "."; + final String upSchema2 = new StringBuilder("`").append(schema.toUpperCase()).append("`.").toString(); int strtPos = 0; int indx = 0; - boolean flag = false; - indx = upStmt.indexOf(upSchema, strtPos); + + int indx1 = upStmt.indexOf(upSchema, strtPos); + int indx2 = upStmt.indexOf(upSchema2, strtPos); + boolean flag = indx1 < indx2 ? indx1 == -1 : indx2 != -1; + indx = !flag ? indx1 > 0 ? indx1 : indx2 : indx2 > 0 ? indx2 : indx1; if (indx < 0) { - StringBuilder sb = new StringBuilder("`").append( - schema.toUpperCase()).append("`."); - indx = upStmt.indexOf(sb.toString(), strtPos); - flag = true; - if (indx < 0) { - return stmt; - } + return stmt; } + + int firstE = upStmt.indexOf("'"); + int endE = upStmt.lastIndexOf("'"); + StringBuilder sb = new StringBuilder(); while (indx > 0) { sb.append(stmt.substring(strtPos, indx)); - strtPos = indx + upSchema.length(); + if (flag) { - strtPos += 2; + strtPos = indx + upSchema2.length(); + } else { + strtPos = indx + upSchema.length(); } - indx = upStmt.indexOf(upSchema, strtPos); + if (indx > firstE && indx < endE && countChar(stmt, indx) % 2 == 1) { + sb.append(stmt.substring(indx, indx + schema.length() + 1)); + } + indx1 = upStmt.indexOf(upSchema, strtPos); + indx2 = upStmt.indexOf(upSchema2, strtPos); + flag = indx1 < indx2 ? indx1 == -1 : indx2 != -1; + indx = !flag ? indx1 > 0 ? indx1 : indx2 : indx2 > 0 ? indx2 : indx1; } sb.append(stmt.substring(strtPos)); return sb.toString(); } - /** - * 获取第一个节点作为路由 - * - * @param rrs - * 数据路由集合 - * @param dataNode - * 数据库所在节点 - * @param stmt - * 执行语句 - * @return 数据路由集合 - * @author mycat - */ - public static RouteResultset routeToSingleNode(RouteResultset rrs, - String dataNode, String stmt) { - if (dataNode == null) { - return rrs; - } - RouteResultsetNode[] nodes = new RouteResultsetNode[1]; - nodes[0] = new RouteResultsetNode(dataNode, rrs.getSqlType(), stmt);//rrs.getStatement() - rrs.setNodes(nodes); - rrs.setFinishedRoute(true); - if (rrs.getCanRunInReadDB() != null) { - nodes[0].setCanRunInReadDB(rrs.getCanRunInReadDB()); - } - return rrs; - } + private static int countChar(String sql,int end) + { + int count=0; + boolean skipChar = false; + for (int i = 0; i < end; i++) { + if(sql.charAt(i)=='\'' && !skipChar) { + count++; + skipChar = false; + }else if( sql.charAt(i)=='\\'){ + skipChar = true; + }else{ + skipChar = false; + } + } + return count; + } - /** - * 获取table名字 - * - * @param stmt - * 执行语句 - * @param repPos - * 开始位置和位数 - * @return 表名 - * @author AStoneGod - */ - public static String getTableName(String stmt, int[] repPos) { + /** + * 获取第一个节点作为路由 + * + * @param rrs 数据路由集合 + * @param dataNode 数据库所在节点 + * @param stmt 执行语句 + * @return 数据路由集合 + * + * @author mycat + */ + public static RouteResultset routeToSingleNode(RouteResultset rrs, + String dataNode, String stmt) { + if (dataNode == null) { + return rrs; + } + RouteResultsetNode[] nodes = new RouteResultsetNode[1]; + nodes[0] = new RouteResultsetNode(dataNode, rrs.getSqlType(), stmt);//rrs.getStatement() + nodes[0].setSource(rrs); + rrs.setNodes(nodes); + rrs.setFinishedRoute(true); + if(rrs.getDataNodeSlotMap().containsKey(dataNode)){ + nodes[0].setSlot(rrs.getDataNodeSlotMap().get(dataNode)); + } + if (rrs.getCanRunInReadDB() != null) { + nodes[0].setCanRunInReadDB(rrs.getCanRunInReadDB()); + } + if(rrs.getRunOnSlave() != null){ + nodes[0].setRunOnSlave(rrs.getRunOnSlave()); + } + + return rrs; + } + + + + /** + * 修复DDL路由 + * + * @return RouteResultset + * @author aStoneGod + */ + public static RouteResultset routeToDDLNode(RouteResultset rrs, int sqlType, String stmt,SchemaConfig schema) throws SQLSyntaxErrorException { + stmt = getFixedSql(stmt); + String tablename = ""; + final String upStmt = stmt.toUpperCase(); + if(upStmt.startsWith("CREATE")){ + if (upStmt.contains("CREATE INDEX ") || upStmt.contains("CREATE UNIQUE INDEX ")){ + tablename = RouterUtil.getTableName(stmt, RouterUtil.getCreateIndexPos(upStmt, 0)); + }else { + tablename = RouterUtil.getTableName(stmt, RouterUtil.getCreateTablePos(upStmt, 0)); + } + }else if(upStmt.startsWith("DROP")){ + if (upStmt.contains("DROP INDEX ")){ + tablename = RouterUtil.getTableName(stmt, RouterUtil.getDropIndexPos(upStmt, 0)); + }else { + tablename = RouterUtil.getTableName(stmt, RouterUtil.getDropTablePos(upStmt, 0)); + } + }else if(upStmt.startsWith("ALTER")){ + tablename = RouterUtil.getTableName(stmt, RouterUtil.getAlterTablePos(upStmt, 0)); + }else if (upStmt.startsWith("TRUNCATE")){ + tablename = RouterUtil.getTableName(stmt, RouterUtil.getTruncateTablePos(upStmt, 0)); + } + tablename = tablename.toUpperCase(); + + if (schema.getTables().containsKey(tablename)){ + if(ServerParse.DDL==sqlType){ + List dataNodes = new ArrayList<>(); + Map tables = schema.getTables(); + TableConfig tc=tables.get(tablename); + if (tables != null && (tc != null)) { + dataNodes = tc.getDataNodes(); + } + boolean isSlotFunction= tc.getRule() != null && tc.getRule().getRuleAlgorithm() instanceof SlotFunction; + Iterator iterator1 = dataNodes.iterator(); + int nodeSize = dataNodes.size(); + RouteResultsetNode[] nodes = new RouteResultsetNode[nodeSize]; + if(isSlotFunction){ + stmt=changeCreateTable(schema,tablename,stmt); + } + for(int i=0;i 0) { + tableName = tableName.substring(ind2 + 1); } + return tableName; + } + + + /** + * 获取show语句table名字 + * + * @param stmt 执行语句 + * @param repPos 开始位置和位数 + * @return 表名 + * @author AStoneGod + */ + public static String getShowTableName(String stmt, int[] repPos) { + int startPos = repPos[0]; + int secInd = stmt.indexOf(' ', startPos + 1); + if (secInd < 0) { + secInd = stmt.length(); + } + + repPos[1] = secInd; + String tableName = stmt.substring(startPos, secInd).trim(); + int ind2 = tableName.indexOf('.'); if (ind2 > 0) { tableName = tableName.substring(ind2 + 1); @@ -151,240 +338,70 @@ public static String getTableName(String stmt, int[] repPos) { return tableName; } - /** - * 获取语句中前关键字位置和占位个数表名位置 - * - * @param upStmt - * 执行语句 - * @param start - * 开始位置 - * @return int[]关键字位置和占位个数 - * @author mycat - */ - public static int[] getCreateTablePos(String upStmt, int start) { + /** + * 获取语句中前关键字位置和占位个数表名位置 + * + * @param upStmt 执行语句 + * @param start 开始位置 + * @return int[] 关键字位置和占位个数 + * + * @author mycat + * + * @modification 修改支持语句中包含“IF NOT EXISTS”的情况 + * @date 2016/12/8 + * @modifiedBy Hash Zhang + */ + public static int[] getCreateTablePos(String upStmt, int start) { String token1 = "CREATE "; String token2 = " TABLE "; + String token3 = " EXISTS "; int createInd = upStmt.indexOf(token1, start); - int tabInd = upStmt.indexOf(token2, start); + int tabInd1 = upStmt.indexOf(token2, start); + int tabInd2 = upStmt.indexOf(token3, tabInd1); // 既包含CREATE又包含TABLE,且CREATE关键字在TABLE关键字之前 - if (createInd >= 0 && tabInd > 0 && tabInd > createInd) { - return new int[] { tabInd, token2.length() }; + if (createInd >= 0 && tabInd2 > 0 && tabInd2 > createInd) { + return new int[] { tabInd2, token3.length() }; + } else if(createInd >= 0 && tabInd1 > 0 && tabInd1 > createInd) { + return new int[] { tabInd1, token2.length() }; } else { return new int[] { -1, token2.length() };// 不满足条件时,只关注第一个返回值为-1,第二个任意 } } - /** - * 获取语句中前关键字位置和占位个数表名位置 - * - * @param upStmt - * 执行语句 - * @param start - * 开始位置 - * @return int[]关键字位置和占位个数 - * @author mycat - */ - public static int[] getSpecPos(String upStmt, int start) { - String token1 = " FROM "; - String token2 = " IN "; - int tabInd1 = upStmt.indexOf(token1, start); - int tabInd2 = upStmt.indexOf(token2, start); - if (tabInd1 > 0) { - if (tabInd2 < 0) { - return new int[] { tabInd1, token1.length() }; - } - return (tabInd1 < tabInd2) ? new int[] { tabInd1, token1.length() } - : new int[] { tabInd2, token2.length() }; + /** + * 获取语句中前关键字位置和占位个数表名位置 + * + * @param upStmt + * 执行语句 + * @param start + * 开始位置 + * @return int[]关键字位置和占位个数 + * @author aStoneGod + */ + public static int[] getCreateIndexPos(String upStmt, int start) { + String token1 = "CREATE "; + String token2 = " INDEX "; + String token3 = " ON "; + int createInd = upStmt.indexOf(token1, start); + int idxInd = upStmt.indexOf(token2, start); + int onInd = upStmt.indexOf(token3, start); + // 既包含CREATE又包含INDEX,且CREATE关键字在INDEX关键字之前, 且包含ON... + if (createInd >= 0 && idxInd > 0 && idxInd > createInd && onInd > 0 && onInd > idxInd) { + return new int[] {onInd , token3.length() }; } else { - return new int[] { tabInd2, token2.length() }; - } - } - - /** - * 获取开始位置后的 LIKE、WHERE 位置 如果不含 LIKE、WHERE 则返回执行语句的长度 - * - * @param upStmt - * 执行sql - * @param start - * 开发位置 - * @return int - * @author mycat - */ - public static int getSpecEndPos(String upStmt, int start) { - int tabInd = upStmt.indexOf(" LIKE ", start); - if (tabInd < 0) { - tabInd = upStmt.indexOf(" WHERE ", start); - } - if (tabInd < 0) { - return upStmt.length(); + return new int[] { -1, token2.length() };// 不满足条件时,只关注第一个返回值为-1,第二个任意 } - return tabInd; } - public static boolean processWithMycatSeq(SchemaConfig schema, int sqlType, - String origSQL, MySQLFrontConnection sc) { - // check if origSQL is with global sequence - // @micmiu it is just a simple judgement - if (origSQL.indexOf(" MYCATSEQ_") != -1) { - processSQL(sc,schema,origSQL,sqlType); - return true; - } - return false; - } - - public static void processSQL(MySQLFrontConnection sc,SchemaConfig schema,String sql,int sqlType){ - MycatServer.getInstance().getSequnceProcessor().addNewSql(new SessionSQLPair(sc.getSession2(), schema, sql, sqlType)); - } - - public static boolean processInsert(SchemaConfig schema, int sqlType, - String origSQL, MySQLFrontConnection sc) throws SQLNonTransientException { - String tableName = StringUtil.getTableName(origSQL).toUpperCase(); - TableConfig tableConfig = schema.getTables().get(tableName); - boolean processedInsert=false; - if (null != tableConfig && tableConfig.isAutoIncrement()) { - String primaryKey = tableConfig.getPrimaryKey(); - processedInsert=processInsert(sc,schema,sqlType,origSQL,tableName,primaryKey); - } - return processedInsert; - } - - private static boolean isPKInFields(String origSQL,String primaryKey,int firstLeftBracketIndex,int firstRightBracketIndex){ - if(primaryKey==null) - { - throw new RuntimeException("please make sure the primaryKey's config is not null in schemal.xml") ; - } - boolean isPrimaryKeyInFields=false; - String upperSQL=origSQL.substring(firstLeftBracketIndex,firstRightBracketIndex+1).toUpperCase(); - for(int pkOffset=0,primaryKeyLength=primaryKey.length(),pkStart=0;;){ - pkStart=upperSQL.indexOf(primaryKey, pkOffset); - if(pkStart>=0 && pkStart 0 &&fromIndex>0&&selectIndex>firstRightBracketIndex&&valuesIndex<0) { - String msg = "multi insert not provided" ; - LOGGER.warn(msg); - throw new SQLNonTransientException(msg); - } - - if(valuesIndex + "VALUES".length() <= firstLeftBracketIndex) { - throw new SQLSyntaxErrorException("insert must provide ColumnList"); - } - - boolean processedInsert=!isPKInFields(origSQL,primaryKey,firstLeftBracketIndex,firstRightBracketIndex); - if(processedInsert){ - processInsert(sc,schema,sqlType,origSQL,tableName,primaryKey,firstLeftBracketIndex+1,origSQL.indexOf('(',firstRightBracketIndex)+1); - } - return processedInsert; - } - - private static void processInsert(MySQLFrontConnection sc,SchemaConfig schema,int sqlType,String origSQL,String tableName,String primaryKey,int afterFirstLeftBracketIndex,int afterLastLeftBracketIndex){ - int primaryKeyLength=primaryKey.length(); - int insertSegOffset=afterFirstLeftBracketIndex; - String mycatSeqPrefix="next value for MYCATSEQ_"; - int mycatSeqPrefixLength=mycatSeqPrefix.length(); - int tableNameLength=tableName.length(); - - char[] newSQLBuf=new char[origSQL.length()+primaryKeyLength+mycatSeqPrefixLength+tableNameLength+2]; - origSQL.getChars(0, afterFirstLeftBracketIndex, newSQLBuf, 0); - primaryKey.getChars(0,primaryKeyLength,newSQLBuf,insertSegOffset); - insertSegOffset+=primaryKeyLength; - newSQLBuf[insertSegOffset]=','; - insertSegOffset++; - origSQL.getChars(afterFirstLeftBracketIndex,afterLastLeftBracketIndex,newSQLBuf,insertSegOffset); - insertSegOffset+=afterLastLeftBracketIndex-afterFirstLeftBracketIndex; - mycatSeqPrefix.getChars(0, mycatSeqPrefixLength, newSQLBuf, insertSegOffset); - insertSegOffset+=mycatSeqPrefixLength; - tableName.getChars(0,tableNameLength,newSQLBuf,insertSegOffset); - insertSegOffset+=tableNameLength; - newSQLBuf[insertSegOffset]=','; - insertSegOffset++; - origSQL.getChars(afterLastLeftBracketIndex, origSQL.length(), newSQLBuf, insertSegOffset); - processSQL(sc,schema,new String(newSQLBuf),sqlType); - } - - - /** - * 获取show语句table名字 - * - * @param stmt - * 执行语句 - * @param repPos - * 开始位置和位数 - * @return 表名 - * @author AStoneGod - */ - public static String getShowTableName(String stmt, int[] repPos) { - int startPos = repPos[0]; - int secInd = stmt.indexOf(' ', startPos + 1); - if (secInd < 0) { - secInd = stmt.length(); - } - - repPos[1] = secInd; - String tableName = stmt.substring(startPos, secInd).trim(); - - int ind2 = tableName.indexOf('.'); - if (ind2 > 0) { - tableName = tableName.substring(ind2 + 1); - } - return tableName; - } - - /** - * 处理SQL - * - * @param stmt - * 执行语句 - * @return 处理后SQL - * @author AStoneGod - */ - - public static String getFixedSql(String stmt){ - if (stmt.endsWith(";")) - stmt = stmt.substring(0,stmt.length()-2); - return stmt = stmt.trim().replace("`",""); - } - - /** - * 获取ALTER语句中前关键字位置和占位个数表名位置 - * - * @param upStmt - * 执行语句 - * @param start - * 开始位置 - * @return int[]关键字位置和占位个数 - * @author aStoneGod - */ - public static int[] getAlterTablePos(String upStmt, int start) { + /** + * 获取ALTER语句中前关键字位置和占位个数表名位置 + * + * @param upStmt 执行语句 + * @param start 开始位置 + * @return int[] 关键字位置和占位个数 + * @author aStoneGod + */ + public static int[] getAlterTablePos(String upStmt, int start) { String token1 = "ALTER "; String token2 = " TABLE "; int createInd = upStmt.indexOf(token1, start); @@ -397,17 +414,15 @@ public static int[] getAlterTablePos(String upStmt, int start) { } } - /** - * 获取DROP语句中前关键字位置和占位个数表名位置 - * - * @param upStmt - * 执行语句 - * @param start - * 开始位置 - * @return int[]关键字位置和占位个数 - * @author aStoneGod - */ - public static int[] getDropTablePos(String upStmt, int start) { + /** + * 获取DROP语句中前关键字位置和占位个数表名位置 + * + * @param upStmt 执行语句 + * @param start 开始位置 + * @return int[] 关键字位置和占位个数 + * @author aStoneGod + */ + public static int[] getDropTablePos(String upStmt, int start) { //增加 if exists判断 if(upStmt.contains("EXISTS")){ String token1 = "IF "; @@ -434,17 +449,41 @@ public static int[] getDropTablePos(String upStmt, int start) { } - /** - * 获取TRUNCATE语句中前关键字位置和占位个数表名位置 - * - * @param upStmt - * 执行语句 - * @param start - * 开始位置 - * @return int[]关键字位置和占位个数 - * @author aStoneGod - */ - public static int[] getTruncateTablePos(String upStmt, int start) { + /** + * 获取DROP语句中前关键字位置和占位个数表名位置 + * + * @param upStmt + * 执行语句 + * @param start + * 开始位置 + * @return int[]关键字位置和占位个数 + * @author aStoneGod + */ + + public static int[] getDropIndexPos(String upStmt, int start) { + String token1 = "DROP "; + String token2 = " INDEX "; + String token3 = " ON "; + int createInd = upStmt.indexOf(token1, start); + int idxInd = upStmt.indexOf(token2, start); + int onInd = upStmt.indexOf(token3, start); + // 既包含CREATE又包含INDEX,且CREATE关键字在INDEX关键字之前, 且包含ON... + if (createInd >= 0 && idxInd > 0 && idxInd > createInd && onInd > 0 && onInd > idxInd) { + return new int[] {onInd , token3.length() }; + } else { + return new int[] { -1, token2.length() };// 不满足条件时,只关注第一个返回值为-1,第二个任意 + } + } + + /** + * 获取TRUNCATE语句中前关键字位置和占位个数表名位置 + * + * @param upStmt 执行语句 + * @param start 开始位置 + * @return int[] 关键字位置和占位个数 + * @author aStoneGod + */ + public static int[] getTruncateTablePos(String upStmt, int start) { String token1 = "TRUNCATE "; String token2 = " TABLE "; int createInd = upStmt.indexOf(token1, start); @@ -457,208 +496,442 @@ public static int[] getTruncateTablePos(String upStmt, int start) { } } - /** - * 修复DDL路由 - * - * @return RouteResultset - * @author aStoneGod - */ - public static RouteResultset routeToDDLNode(RouteResultset rrs, int sqlType, String stmt,SchemaConfig schema) throws SQLSyntaxErrorException { - //检查表是否在配置文件中 - stmt = getFixedSql(stmt); - String tablename = ""; - final String upStmt = stmt.toUpperCase(); - if(upStmt.startsWith("CREATE")){ - tablename = RouterUtil.getTableName(stmt, RouterUtil.getCreateTablePos(upStmt, 0)); - }else if(upStmt.startsWith("DROP")){ - tablename = RouterUtil.getTableName(stmt, RouterUtil.getDropTablePos(upStmt, 0)); - }else if(upStmt.startsWith("ALTER")){ - tablename = RouterUtil.getTableName(stmt, RouterUtil.getAlterTablePos(upStmt, 0)); - }else if (upStmt.startsWith("TRUNCATE")){ - tablename = RouterUtil.getTableName(stmt, RouterUtil.getTruncateTablePos(upStmt, 0)); + /** + * 获取语句中前关键字位置和占位个数表名位置 + * + * @param upStmt 执行语句 + * @param start 开始位置 + * @return int[] 关键字位置和占位个数 + * @author mycat + */ + public static int[] getSpecPos(String upStmt, int start) { + String token1 = " FROM "; + String token2 = " IN "; + int tabInd1 = upStmt.indexOf(token1, start); + int tabInd2 = upStmt.indexOf(token2, start); + if (tabInd1 > 0) { + if (tabInd2 < 0) { + return new int[] { tabInd1, token1.length() }; + } + return (tabInd1 < tabInd2) ? new int[] { tabInd1, token1.length() } + : new int[] { tabInd2, token2.length() }; + } else { + return new int[] { tabInd2, token2.length() }; } - tablename = tablename.toUpperCase(); - - if (schema.getTables().containsKey(tablename)){ - if(ServerParse.DDL==sqlType){ - List dataNodes = new ArrayList<>(); - Map tables = schema.getTables(); - TableConfig tc; - if (tables != null && (tc = tables.get(tablename)) != null) { - dataNodes = tc.getDataNodes(); - } - Iterator iterator1 = dataNodes.iterator(); - int nodeSize = dataNodes.size(); - RouteResultsetNode[] nodes = new RouteResultsetNode[nodeSize]; + } - for(int i=0;i dataNodes, String stmt) { - RouteResultsetNode[] nodes = new RouteResultsetNode[dataNodes.size()]; - int i = 0; - RouteResultsetNode node; - for (String dataNode : dataNodes) { - node = new RouteResultsetNode(dataNode, rrs.getSqlType(), stmt); - if (rrs.getCanRunInReadDB() != null) { - node.setCanRunInReadDB(rrs.getCanRunInReadDB()); - } - nodes[i++] = node; - } - rrs.setCacheAble(cache); - rrs.setNodes(nodes); - return rrs; - } + public static void processSQL(ServerConnection sc,SchemaConfig schema,String sql,int sqlType){ +// int sequenceHandlerType = MycatServer.getInstance().getConfig().getSystem().getSequnceHandlerType(); + final SessionSQLPair sessionSQLPair = new SessionSQLPair(sc.getSession2(), schema, sql, sqlType); +// modify by yanjunli 序列获取修改为多线程方式。使用分段锁方式,一个序列一把锁。 begin +// MycatServer.getInstance().getSequnceProcessor().addNewSql(sessionSQLPair); + MycatServer.getInstance().getSequenceExecutor().execute(new Runnable() { + @Override + public void run() { + MycatServer.getInstance().getSequnceProcessor().executeSeq(sessionSQLPair); + } + }); +// modify 序列获取修改为多线程方式。使用分段锁方式,一个序列一把锁。 end +// } + } - public static RouteResultset routeToMultiNode(boolean cache,RouteResultset rrs, Collection dataNodes, String stmt,boolean isGlobalTable) { - rrs=routeToMultiNode(cache,rrs,dataNodes,stmt); - rrs.setGlobalTable(isGlobalTable); - return rrs; - } + public static boolean processInsert(SchemaConfig schema, int sqlType, + String origSQL, ServerConnection sc) throws SQLNonTransientException { + String tableName = StringUtil.getTableName(origSQL).toUpperCase(); + TableConfig tableConfig = schema.getTables().get(tableName); + boolean processedInsert=false; + //判断是有自增字段 + if (null != tableConfig && tableConfig.isAutoIncrement()) { + String primaryKey = tableConfig.getPrimaryKey(); + processedInsert=processInsert(sc,schema,sqlType,origSQL,tableName,primaryKey); + } + return processedInsert; + } - public static void routeForTableMeta(RouteResultset rrs, - SchemaConfig schema, String tableName, String sql) { - String dataNode = null; - if (isNoSharding(schema,tableName)) {//不分库的直接从schema中获取dataNode - dataNode = schema.getDataNode(); - } else { - dataNode = getMetaReadDataNode(schema, tableName); - } + private static boolean isPKInFields(String origSQL,String primaryKey,int firstLeftBracketIndex,int firstRightBracketIndex){ - RouteResultsetNode[] nodes = new RouteResultsetNode[1]; - nodes[0] = new RouteResultsetNode(dataNode, rrs.getSqlType(), sql); - if (rrs.getCanRunInReadDB() != null) { - nodes[0].setCanRunInReadDB(rrs.getCanRunInReadDB()); - } - rrs.setNodes(nodes); - } + if (primaryKey == null) { + throw new RuntimeException("please make sure the primaryKey's config is not null in schemal.xml"); + } - /** - * 根据标名随机获取一个节点 - * - * @param schema - * 数据库名 - * @param table - * 表名 - * @return 数据节点 - * @author mycat - */ - private static String getMetaReadDataNode(SchemaConfig schema, - String table) { - // Table名字被转化为大写的,存储在schema - table = table.toUpperCase(); - String dataNode = null; - Map tables = schema.getTables(); - TableConfig tc; - if (tables != null && (tc = tables.get(table)) != null) { - dataNode = tc.getRandomDataNode(); - } - return dataNode; + boolean isPrimaryKeyInFields = false; + String upperSQL = origSQL.substring(firstLeftBracketIndex, firstRightBracketIndex + 1).toUpperCase(); + for (int pkOffset = 0, primaryKeyLength = primaryKey.length(), pkStart = 0;;) { + pkStart = upperSQL.indexOf(primaryKey, pkOffset); + if (pkStart >= 0 && pkStart < firstRightBracketIndex) { + char pkSide = upperSQL.charAt(pkStart - 1); + if (pkSide <= ' ' || pkSide == '`' || pkSide == ',' || pkSide == '(') { + pkSide = upperSQL.charAt(pkStart + primaryKey.length()); + isPrimaryKeyInFields = pkSide <= ' ' || pkSide == '`' || pkSide == ',' || pkSide == ')'; + } + if (isPrimaryKeyInFields) { + break; + } + pkOffset = pkStart + primaryKeyLength; + } else { + break; + } + } + return isPrimaryKeyInFields; + } + + public static boolean processInsert(ServerConnection sc,SchemaConfig schema, + int sqlType,String origSQL,String tableName,String primaryKey) throws SQLNonTransientException { + + int firstLeftBracketIndex = origSQL.indexOf("("); + int firstRightBracketIndex = origSQL.indexOf(")"); + String upperSql = origSQL.toUpperCase(); + int valuesIndex = upperSql.indexOf("VALUES"); + int selectIndex = upperSql.indexOf("SELECT"); + int fromIndex = upperSql.indexOf("FROM"); + //屏蔽insert into table1 select * from table2语句 + if(firstLeftBracketIndex < 0) { + String msg = "invalid sql:" + origSQL; + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + //屏蔽批量插入 + if(selectIndex > 0 &&fromIndex>0&&selectIndex>firstRightBracketIndex&&valuesIndex<0) { + String msg = "multi insert not provided" ; + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + //插入语句必须提供列结构,因为MyCat默认对于表结构无感知 + if(valuesIndex + "VALUES".length() <= firstLeftBracketIndex) { + throw new SQLSyntaxErrorException("insert must provide ColumnList"); + } + //如果主键不在插入语句的fields中,则需要进一步处理 + boolean processedInsert=!isPKInFields(origSQL,primaryKey,firstLeftBracketIndex,firstRightBracketIndex); + if(processedInsert){ + handleBatchInsert(sc, schema, sqlType,origSQL, valuesIndex,tableName,primaryKey); + } + return processedInsert; + } + + public static List handleBatchInsert(String origSQL, int valuesIndex) { + List handledSQLs = new LinkedList<>(); + String prefix = origSQL.substring(0, valuesIndex + "VALUES".length()); + String values = origSQL.substring(valuesIndex + "VALUES".length()); + int flag = 0; + StringBuilder currentValue = new StringBuilder(); + currentValue.append(prefix); + for (int i = 0; i < values.length(); i++) { + char j = values.charAt(i); + if (j == '(' && flag == 0) { + flag = 1; + currentValue.append(j); + } else if (j == '\"' && flag == 1) { + flag = 2; + currentValue.append(j); + } else if (j == '\'' && flag == 1) { + flag = 3; + currentValue.append(j); + } else if (j == '\\' && flag == 2) { + flag = 4; + currentValue.append(j); + } else if (j == '\\' && flag == 3) { + flag = 5; + currentValue.append(j); + } else if (flag == 4) { + flag = 2; + currentValue.append(j); + } else if (flag == 5) { + flag = 3; + currentValue.append(j); + } else if (j == '\"' && flag == 2) { + flag = 1; + currentValue.append(j); + } else if (j == '\'' && flag == 3) { + flag = 1; + currentValue.append(j); + } else if (j == ')' && flag == 1) { + flag = 0; + currentValue.append(j); + handledSQLs.add(currentValue.toString()); + currentValue = new StringBuilder(); + currentValue.append(prefix); + } else if (j == ',' && flag == 0) { + continue; + } else { + currentValue.append(j); + } + } + return handledSQLs; + } + + /** + * 对于主键不在插入语句的fields中的SQL,需要改写。比如hotnews主键为id,插入语句为: + * insert into hotnews(title) values('aaa'); + * 需要改写成: + * insert into hotnews(id, title) values(next value for MYCATSEQ_hotnews,'aaa'); + */ + public static void handleBatchInsert(ServerConnection sc, SchemaConfig schema, + int sqlType,String origSQL, int valuesIndex,String tableName, String primaryKey) { + + final String pk = "\\("+primaryKey+","; + final String mycatSeqPrefix = "(next value for MYCATSEQ_"+tableName.toUpperCase()+","; + + /*"VALUES".length() ==6 */ + String prefix = origSQL.substring(0, valuesIndex + 6); + String values = origSQL.substring(valuesIndex + 6); + + prefix = prefix.replaceFirst("\\(", pk); + values = values.replaceFirst("\\(", mycatSeqPrefix); + values =Pattern.compile(",\\s*\\(").matcher(values).replaceAll(","+mycatSeqPrefix); + processSQL(sc, schema,prefix+values, sqlType); } - /** - * 根据 ER分片规则获取路由集合 - * - * @param stmt - * 执行的语句 - * @param rrs - * 数据路由集合 - * @param tc - * 表实体 - * @param joinKeyVal - * 连接属性 - * @return RouteResultset(数据路由集合) - * @throws SQLNonTransientException - * @author mycat - */ - - public static RouteResultset routeByERParentKey(MySQLFrontConnection sc,SchemaConfig schema, - int sqlType,String stmt, - RouteResultset rrs, TableConfig tc, String joinKeyVal) - throws SQLNonTransientException { - // only has one parent level and ER parent key is parent - // table's partition key - if (tc.isSecondLevel() - && tc.getParentTC().getPartitionColumn() - .equals(tc.getParentKey())) { // using - // parent - // rule to - // find - // datanode - Set parentColVal = new HashSet(1); - ColumnRoutePair pair = new ColumnRoutePair(joinKeyVal); - parentColVal.add(pair); - Set dataNodeSet = ruleCalculate(tc.getParentTC(), - parentColVal); - if (dataNodeSet.isEmpty() || dataNodeSet.size() > 1) { - throw new SQLNonTransientException( - "parent key can't find valid datanode ,expect 1 but found: " - + dataNodeSet.size()); - } - String dn = dataNodeSet.iterator().next(); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("found partion node (using parent partion rule directly) for child table to insert " - + dn + " sql :" + stmt); + public static RouteResultset routeToMultiNode(boolean cache,RouteResultset rrs, Collection dataNodes, String stmt) { + RouteResultsetNode[] nodes = new RouteResultsetNode[dataNodes.size()]; + int i = 0; + RouteResultsetNode node; + for (String dataNode : dataNodes) { + node = new RouteResultsetNode(dataNode, rrs.getSqlType(), stmt); + node.setSource(rrs); + if(rrs.getDataNodeSlotMap().containsKey(dataNode)){ + node.setSlot(rrs.getDataNodeSlotMap().get(dataNode)); + } + if (rrs.getCanRunInReadDB() != null) { + node.setCanRunInReadDB(rrs.getCanRunInReadDB()); + } + if(rrs.getRunOnSlave() != null){ + nodes[0].setRunOnSlave(rrs.getRunOnSlave()); + } + nodes[i++] = node; + } + rrs.setCacheAble(cache); + rrs.setNodes(nodes); + return rrs; + } + + public static RouteResultset routeToMultiNode(boolean cache, RouteResultset rrs, Collection dataNodes, + String stmt, boolean isGlobalTable) { + + rrs = routeToMultiNode(cache, rrs, dataNodes, stmt); + rrs.setGlobalTable(isGlobalTable); + return rrs; + } + + public static void routeForTableMeta(RouteResultset rrs, + SchemaConfig schema, String tableName, String sql) { + String dataNode = null; + if (isNoSharding(schema,tableName)) {//不分库的直接从schema中获取dataNode + dataNode = schema.getDataNode(); + } else { + dataNode = getMetaReadDataNode(schema, tableName); + } + + RouteResultsetNode[] nodes = new RouteResultsetNode[1]; + nodes[0] = new RouteResultsetNode(dataNode, rrs.getSqlType(), sql); + nodes[0].setSource(rrs); + if(rrs.getDataNodeSlotMap().containsKey(dataNode)){ + nodes[0].setSlot(rrs.getDataNodeSlotMap().get(dataNode)); + } + if (rrs.getCanRunInReadDB() != null) { + nodes[0].setCanRunInReadDB(rrs.getCanRunInReadDB()); + } + if(rrs.getRunOnSlave() != null){ + nodes[0].setRunOnSlave(rrs.getRunOnSlave()); + } + rrs.setNodes(nodes); + } + + /** + * 根据表名随机获取一个节点 + * + * @param schema 数据库名 + * @param table 表名 + * @return 数据节点 + * @author mycat + */ + private static String getMetaReadDataNode(SchemaConfig schema, + String table) { + // Table名字被转化为大写的,存储在schema + table = table.toUpperCase(); + String dataNode = null; + Map tables = schema.getTables(); + TableConfig tc; + if (tables != null && (tc = tables.get(table)) != null) { + dataNode = getAliveRandomDataNode(tc); + } + return dataNode; + } + + /** + * 解决getRandomDataNode方法获取错误节点的问题. + * @param tc + * @return + */ + private static String getAliveRandomDataNode(TableConfig tc) { + List randomDns = tc.getDataNodes(); + + MycatConfig mycatConfig = MycatServer.getInstance().getConfig(); + if (mycatConfig != null) { + for (String randomDn : randomDns) { + PhysicalDBNode physicalDBNode = mycatConfig.getDataNodes().get(randomDn); + if (physicalDBNode != null) { + if (physicalDBNode.getDbPool().getSource().isAlive()) { + for (PhysicalDBPool pool : MycatServer.getInstance().getConfig().getDataHosts().values()) { + PhysicalDatasource source = pool.getSource(); + if (source.getHostConfig().containDataNode(randomDn) && pool.getSource().isAlive()) { + return randomDn; + } + } + } + } + } + } + + // all fail return default + return tc.getRandomDataNode(); + } + + @Deprecated + private static String getRandomDataNode(TableConfig tc) { + //写节点不可用,意味着读节点也不可用。 + //直接使用下一个 dataHost + String randomDn = tc.getRandomDataNode(); + MycatConfig mycatConfig = MycatServer.getInstance().getConfig(); + if (mycatConfig != null) { + PhysicalDBNode physicalDBNode = mycatConfig.getDataNodes().get(randomDn); + if (physicalDBNode != null) { + if (physicalDBNode.getDbPool().getSource().isAlive()) { + for (PhysicalDBPool pool : MycatServer.getInstance() + .getConfig() + .getDataHosts() + .values()) { + if (pool.getSource().getHostConfig().containDataNode(randomDn)) { + continue; + } + + if (pool.getSource().isAlive()) { + return pool.getSource().getHostConfig().getRandomDataNode(); + } + } + } } - return RouterUtil.routeToSingleNode(rrs, dn, stmt); } - return null; + + //all fail return default + return randomDn; } - /** - * @return dataNodeIndex -> [partitionKeysValueTuple+] - */ - public static Set ruleByJoinValueCalculate(RouteResultset rrs, TableConfig tc, - Set colRoutePairSet) throws SQLNonTransientException { + /** + * 根据 ER分片规则获取路由集合 + * + * @param stmt 执行的语句 + * @param rrs 数据路由集合 + * @param tc 表实体 + * @param joinKeyVal 连接属性 + * @return RouteResultset(数据路由集合) * + * @throws SQLNonTransientException,IllegalShardingColumnValueException + * @author mycat + */ + + public static RouteResultset routeByERParentKey(ServerConnection sc,SchemaConfig schema, + int sqlType,String stmt, + RouteResultset rrs, TableConfig tc, String joinKeyVal) + throws SQLNonTransientException { + + // only has one parent level and ER parent key is parent + // table's partition key + if (tc.isSecondLevel() + //判断是否为二级子表(父表不再有父表) + && tc.getParentTC().getPartitionColumn() + .equals(tc.getParentKey())) { // using + // parent + // rule to + // find + // datanode + Set parentColVal = new HashSet(1); + ColumnRoutePair pair = new ColumnRoutePair(joinKeyVal); + parentColVal.add(pair); + Set dataNodeSet = ruleCalculate(tc.getParentTC(), parentColVal,rrs.getDataNodeSlotMap()); + if (dataNodeSet.isEmpty() || dataNodeSet.size() > 1) { + throw new SQLNonTransientException( + "parent key can't find valid datanode ,expect 1 but found: " + + dataNodeSet.size()); + } + String dn = dataNodeSet.iterator().next(); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("found partion node (using parent partion rule directly) for child table to insert " + + dn + " sql :" + stmt); + } + return RouterUtil.routeToSingleNode(rrs, dn, stmt); + } + return null; + } + + /** + * @return dataNodeIndex -> [partitionKeysValueTuple+] + */ + public static Set ruleByJoinValueCalculate(RouteResultset rrs, TableConfig tc, + Set colRoutePairSet) throws SQLNonTransientException { - String joinValue = ""; + String joinValue = ""; - if(colRoutePairSet.size() > 1) { - LOGGER.warn("joinKey can't have multi Value"); - } else { - Iterator it = colRoutePairSet.iterator(); - ColumnRoutePair joinCol = (ColumnRoutePair)it.next(); - joinValue = joinCol.colValue; - } + if(colRoutePairSet.size() > 1) { + LOGGER.warn("joinKey can't have multi Value"); + } else { + Iterator it = colRoutePairSet.iterator(); + ColumnRoutePair joinCol = it.next(); + joinValue = joinCol.colValue; + } - Set retNodeSet = new LinkedHashSet(); - - Set nodeSet = new LinkedHashSet(); - if (tc.isSecondLevel() - && tc.getParentTC().getPartitionColumn() - .equals(tc.getParentKey())) { // using - // parent - // rule to - // find - // datanode - - nodeSet = ruleCalculate(tc.getParentTC(),colRoutePairSet); - if (nodeSet.isEmpty()) { - throw new SQLNonTransientException( - "parent key can't find valid datanode ,expect 1 but found: " - + nodeSet.size()); - } - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("found partion node (using parent partion rule directly) for child table to insert " - + nodeSet + " sql :" + rrs.getStatement()); - } - retNodeSet.addAll(nodeSet); + Set retNodeSet = new LinkedHashSet(); + + Set nodeSet; + if (tc.isSecondLevel() + && tc.getParentTC().getPartitionColumn() + .equals(tc.getParentKey())) { // using + // parent + // rule to + // find + // datanode + + nodeSet = ruleCalculate(tc.getParentTC(),colRoutePairSet,rrs.getDataNodeSlotMap()); + if (nodeSet.isEmpty()) { + throw new SQLNonTransientException( + "parent key can't find valid datanode ,expect 1 but found: " + + nodeSet.size()); + } + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("found partion node (using parent partion rule directly) for child table to insert " + + nodeSet + " sql :" + rrs.getStatement()); + } + retNodeSet.addAll(nodeSet); // for(ColumnRoutePair pair : colRoutePairSet) { // nodeSet = ruleCalculate(tc.getParentTC(),colRoutePairSet); @@ -674,591 +947,813 @@ public static Set ruleByJoinValueCalculate(RouteResultset rrs, TableConf // } // retNodeSet.addAll(nodeSet); // } - return retNodeSet; - } else { - retNodeSet.addAll(tc.getParentTC().getDataNodes()); - } + return retNodeSet; + } else { + retNodeSet.addAll(tc.getParentTC().getDataNodes()); + } - return retNodeSet; - } + return retNodeSet; + } - /** - * @return dataNodeIndex -> [partitionKeysValueTuple+] - */ - public static Set ruleCalculate(TableConfig tc, - Set colRoutePairSet) { - Set routeNodeSet = new LinkedHashSet(); - String col = tc.getRule().getColumn(); - RuleConfig rule = tc.getRule(); - AbstractPartitionAlgorithm algorithm = rule.getRuleAlgorithm(); - for (ColumnRoutePair colPair : colRoutePairSet) { - if (colPair.colValue != null) { - Integer nodeIndx = algorithm.calculate(colPair.colValue); - if (nodeIndx == null) { - throw new IllegalArgumentException( - "can't find datanode for sharding column:" + col - + " val:" + colPair.colValue); - } else { - String dataNode = tc.getDataNodes().get(nodeIndx); - routeNodeSet.add(dataNode); - colPair.setNodeId(nodeIndx); - } - } else if (colPair.rangeValue != null) { - Integer[] nodeRange = algorithm.calculateRange( - String.valueOf(colPair.rangeValue.beginValue), - String.valueOf(colPair.rangeValue.endValue)); - if (nodeRange != null) { - /** - * 不能确认 colPair的 nodeid是否会有其它影响 - */ - if (nodeRange.length == 0) { - routeNodeSet.addAll(tc.getDataNodes()); - } else { - ArrayList dataNodes = tc.getDataNodes(); - String dataNode = null; - for (Integer nodeId : nodeRange) { - dataNode = dataNodes.get(nodeId); - routeNodeSet.add(dataNode); - } - } - } - } + /** + * @return dataNodeIndex -> [partitionKeysValueTuple+] + */ + public static Set ruleCalculate(TableConfig tc, + Set colRoutePairSet,Map dataNodeSlotMap) { + Set routeNodeSet = new LinkedHashSet(); + String col = tc.getRule().getColumn(); + RuleConfig rule = tc.getRule(); + AbstractPartitionAlgorithm algorithm = rule.getRuleAlgorithm(); + for (ColumnRoutePair colPair : colRoutePairSet) { + if (colPair.colValue != null) { + Integer nodeIndx = algorithm.calculate(colPair.colValue); + if (nodeIndx == null) { + throw new IllegalArgumentException( + "can't find datanode for sharding column:" + col + + " val:" + colPair.colValue); + } else { + String dataNode = tc.getDataNodes().get(nodeIndx); + routeNodeSet.add(dataNode); + if(algorithm instanceof SlotFunction) { + dataNodeSlotMap.put(dataNode,((SlotFunction) algorithm).slotValue()); + } + colPair.setNodeId(nodeIndx); + } + } else if (colPair.rangeValue != null) { + Integer[] nodeRange = algorithm.calculateRange( + String.valueOf(colPair.rangeValue.beginValue), + String.valueOf(colPair.rangeValue.endValue)); + if (nodeRange != null) { + /** + * 不能确认 colPair的 nodeid是否会有其它影响 + */ + if (nodeRange.length == 0) { + routeNodeSet.addAll(tc.getDataNodes()); + } else { + ArrayList dataNodes = tc.getDataNodes(); + String dataNode = null; + for (Integer nodeId : nodeRange) { + dataNode = dataNodes.get(nodeId); + if(algorithm instanceof SlotFunction) { + dataNodeSlotMap.put(dataNode,((SlotFunction) algorithm).slotValue()); + } + routeNodeSet.add(dataNode); + } + } + } + } - } - return routeNodeSet; - } + } + return routeNodeSet; + } - /** - * 多表路由 - * @param schema - * @param ctx - * @param tables - * @param rrs - * @param isSelect - * @return - * @throws SQLNonTransientException - */ - public static RouteResultset tryRouteForTables(SchemaConfig schema, DruidShardingParseInfo ctx, RouteCalculateUnit routeUnit, RouteResultset rrs, - boolean isSelect, LayerCachePool cachePool) throws SQLNonTransientException { - List tables = ctx.getTables(); - if(schema.isNoSharding()||(tables.size() >= 1&&isNoSharding(schema,tables.get(0)))) { - return routeToSingleNode(rrs, schema.getDataNode(), ctx.getSql()); - } + /** + * 多表路由 + */ + public static RouteResultset tryRouteForTables(SchemaConfig schema, DruidShardingParseInfo ctx, + RouteCalculateUnit routeUnit, RouteResultset rrs, boolean isSelect, LayerCachePool cachePool) + throws SQLNonTransientException { - //只有一个表的 - if(tables.size() == 1) { - return RouterUtil.tryRouteForOneTable(schema, ctx, routeUnit, tables.get(0), rrs, isSelect, cachePool); - } + List tables = ctx.getTables(); - Set retNodesSet = new HashSet(); - //每个表对应的路由映射 - Map> tablesRouteMap = new HashMap>(); - - //分库解析信息不为空 - Map>> tablesAndConditions = routeUnit.getTablesAndConditions(); - if(tablesAndConditions != null && tablesAndConditions.size() > 0) { - //为分库表找路由 - RouterUtil.findRouteWithcConditionsForTables(schema, rrs, tablesAndConditions, tablesRouteMap, ctx.getSql(), cachePool, isSelect); - if(rrs.isFinishedRoute()) { - return rrs; - } - } + if(schema.isNoSharding()||(tables.size() >= 1&&isNoSharding(schema,tables.get(0)))) { + return routeToSingleNode(rrs, schema.getDataNode(), ctx.getSql()); + } - //为全局表和单库表找路由 - for(String tableName : tables) { - TableConfig tableConfig = schema.getTables().get(tableName.toUpperCase()); - if(tableConfig == null) { - String msg = "can't find table define in schema "+ tableName + " schema:" + schema.getName(); - LOGGER.warn(msg); - throw new SQLNonTransientException(msg); - } - if(tableConfig.isGlobalTable()) {//全局表 - if(tablesRouteMap.get(tableName) == null) { - tablesRouteMap.put(tableName, new HashSet()); - } - tablesRouteMap.get(tableName).addAll(tableConfig.getDataNodes()); - } else if(tablesRouteMap.get(tableName) == null) { //余下的表都是单库表 - tablesRouteMap.put(tableName, new HashSet()); - tablesRouteMap.get(tableName).addAll(tableConfig.getDataNodes()); - } - } + //只有一个表的 + if(tables.size() == 1) { + return RouterUtil.tryRouteForOneTable(schema, ctx, routeUnit, tables.get(0), rrs, isSelect, cachePool); + } - boolean isFirstAdd = true; - for(Map.Entry> entry : tablesRouteMap.entrySet()) { - if(entry.getValue() == null || entry.getValue().size() == 0) { - throw new SQLNonTransientException("parent key can't find any valid datanode "); - } else { - if(isFirstAdd) { - retNodesSet.addAll(entry.getValue()); - isFirstAdd = false; - } else { - retNodesSet.retainAll(entry.getValue()); - if(retNodesSet.size() == 0) {//两个表的路由无交集 - String errMsg = "invalid route in sql, multi tables found but datanode has no intersection " - + " sql:" + ctx.getSql(); - LOGGER.warn(errMsg); - throw new SQLNonTransientException(errMsg); - } - } - } - } + Set retNodesSet = new HashSet(); + //每个表对应的路由映射 + Map> tablesRouteMap = new HashMap>(); + + //分库解析信息不为空 + Map>> tablesAndConditions = routeUnit.getTablesAndConditions(); + if(tablesAndConditions != null && tablesAndConditions.size() > 0) { + //为分库表找路由 + RouterUtil.findRouteWithcConditionsForTables(schema, rrs, tablesAndConditions, tablesRouteMap, ctx.getSql(), cachePool, isSelect); + if(rrs.isFinishedRoute()) { + return rrs; + } + } - if(retNodesSet != null && retNodesSet.size() > 0) { - if(retNodesSet.size() > 1 && isAllGlobalTable(ctx, schema)) { - // mulit routes ,not cache route result - if (isSelect) { - rrs.setCacheAble(false); - routeToSingleNode(rrs, retNodesSet.iterator().next(), ctx.getSql()); - } - else {//delete 删除全局表的记录 - routeToMultiNode(isSelect, rrs, retNodesSet, ctx.getSql(),true); - } + //为全局表和单库表找路由 + for(String tableName : tables) { + + TableConfig tableConfig = schema.getTables().get(tableName.toUpperCase()); + + if(tableConfig == null) { + //add 如果表读取不到则先将表名从别名中读取转化后再读取 + String alias = ctx.getTableAliasMap().get(tableName); + if(!StringUtil.isEmpty(alias)){ + tableConfig = schema.getTables().get(alias.toUpperCase()); + } + + if(tableConfig == null){ + String msg = "can't find table define in schema "+ tableName + " schema:" + schema.getName(); + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + + } + if(tableConfig.isGlobalTable()) {//全局表 + if(tablesRouteMap.get(tableName) == null) { + tablesRouteMap.put(tableName, new HashSet()); + } + tablesRouteMap.get(tableName).addAll(tableConfig.getDataNodes()); + } else if(tablesRouteMap.get(tableName) == null) { //余下的表都是单库表 + tablesRouteMap.put(tableName, new HashSet()); + tablesRouteMap.get(tableName).addAll(tableConfig.getDataNodes()); + } + } - } else { - routeToMultiNode(isSelect, rrs, retNodesSet, ctx.getSql()); - } + boolean isFirstAdd = true; + for(Map.Entry> entry : tablesRouteMap.entrySet()) { + if(entry.getValue() == null || entry.getValue().size() == 0) { + throw new SQLNonTransientException("parent key can't find any valid datanode "); + } else { + if(isFirstAdd) { + retNodesSet.addAll(entry.getValue()); + isFirstAdd = false; + } else { + retNodesSet.retainAll(entry.getValue()); + if(retNodesSet.size() == 0) {//两个表的路由无交集 + String errMsg = "invalid route in sql, multi tables found but datanode has no intersection " + + " sql:" + ctx.getSql(); + LOGGER.warn(errMsg); + throw new SQLNonTransientException(errMsg); + } + } + } + } - } - return rrs; + if(retNodesSet != null && retNodesSet.size() > 0) { + String tableName = tables.get(0); + TableConfig tableConfig = schema.getTables().get(tableName.toUpperCase()); + if(tableConfig.isDistTable()){ + routeToDistTableNode(tableName,schema, rrs, ctx.getSql(), tablesAndConditions, cachePool, isSelect); + return rrs; + } - } + if(retNodesSet.size() > 1 && isAllGlobalTable(ctx, schema)) { + // mulit routes ,not cache route result + if (isSelect) { + rrs.setCacheAble(false); + routeToSingleNode(rrs, retNodesSet.iterator().next(), ctx.getSql()); + } + else {//delete 删除全局表的记录 + routeToMultiNode(isSelect, rrs, retNodesSet, ctx.getSql(),true); + } - /** - * - * 单表路由 - * @param schema - * @param ctx - * @param tableName - * @param rrs - * @param isSelect - * @return - * @throws SQLNonTransientException - */ - public static RouteResultset tryRouteForOneTable(SchemaConfig schema, DruidShardingParseInfo ctx, RouteCalculateUnit routeUnit, String tableName, RouteResultset rrs, - boolean isSelect, LayerCachePool cachePool) throws SQLNonTransientException { - if(isNoSharding(schema,tableName)) - { - return routeToSingleNode(rrs, schema.getDataNode(), ctx.getSql()); - } + } else { + routeToMultiNode(isSelect, rrs, retNodesSet, ctx.getSql()); + } - TableConfig tc = schema.getTables().get(tableName); - if(tc == null) { - String msg = "can't find table define in schema " - + tableName + " schema:" + schema.getName(); - LOGGER.warn(msg); - throw new SQLNonTransientException(msg); - } - if(tc.isGlobalTable()) {//全局表 - if(isSelect) { - // global select ,not cache route result - rrs.setCacheAble(false); - return routeToSingleNode(rrs, tc.getRandomDataNode(),ctx.getSql()); - } else {//insert into 全局表的记录 - return routeToMultiNode(false, rrs, tc.getDataNodes(), ctx.getSql(),true); - } - } else {//单表或者分库表 - if (!checkRuleRequired(schema, ctx, routeUnit, tc)) { - throw new IllegalArgumentException("route rule for table " - + tc.getName() + " is required: " + ctx.getSql()); + } + return rrs; - } - if(tc.getPartitionColumn() == null && !tc.isSecondLevel()) {//单表且不是childTable -// return RouterUtil.routeToSingleNode(rrs, tc.getDataNodes().get(0),ctx.getSql()); - return routeToMultiNode(rrs.isCacheAble(), rrs, tc.getDataNodes(), ctx.getSql()); - } else { - //每个表对应的路由映射 - Map> tablesRouteMap = new HashMap>(); - if(routeUnit.getTablesAndConditions() != null && routeUnit.getTablesAndConditions().size() > 0) { - RouterUtil.findRouteWithcConditionsForTables(schema, rrs, routeUnit.getTablesAndConditions(), tablesRouteMap, ctx.getSql(),cachePool,isSelect); - if(rrs.isFinishedRoute()) { - return rrs; - } - } + } - if(tablesRouteMap.get(tableName) == null) { - return routeToMultiNode(rrs.isCacheAble(), rrs, tc.getDataNodes(), ctx.getSql()); - } else { -// boolean isCache = rrs.isCacheAble(); -// if(tablesRouteMap.get(tableName).size() > 1) { -// -// } - return routeToMultiNode(rrs.isCacheAble(), rrs, tablesRouteMap.get(tableName), ctx.getSql()); - } - } - } - } - /** - * 处理分库表路由 - * @param schema - * @param tablesAndConditions - * @param tablesRouteMap - * @throws SQLNonTransientException - */ - public static void findRouteWithcConditionsForTables(SchemaConfig schema, RouteResultset rrs, - Map>> tablesAndConditions, - Map> tablesRouteMap, String sql, LayerCachePool cachePool, boolean isSelect) - throws SQLNonTransientException { - //为分库表找路由 - for(Map.Entry>> entry : tablesAndConditions.entrySet()) { - String tableName = entry.getKey().toUpperCase(); - TableConfig tableConfig = schema.getTables().get(tableName); - if(tableConfig == null) { - String msg = "can't find table define in schema " - + tableName + " schema:" + schema.getName(); - LOGGER.warn(msg); - throw new SQLNonTransientException(msg); - } - //全局表或者不分库的表略过(全局表后面再计算) - if(tableConfig.isGlobalTable() || schema.getTables().get(tableName).getDataNodes().size() == 1) { - continue; - } else {//非全局表:分库表、childTable、其他 - Map> columnsMap = entry.getValue(); - String joinKey = tableConfig.getJoinKey(); - String partionCol = tableConfig.getPartitionColumn(); - String primaryKey = tableConfig.getPrimaryKey(); - boolean isFoundPartitionValue = partionCol != null && entry.getValue().get(partionCol) != null; - boolean isLoadData=false; - if (LOGGER.isDebugEnabled()) { - if(sql.startsWith(LoadData.loadDataHint)||rrs.isLoadData()) - { //由于load data一次会计算很多路由数据,如果输出此日志会极大降低load data的性能 - isLoadData=true; - } - } - if(entry.getValue().get(primaryKey) != null && entry.getValue().size() == 1&&!isLoadData) - {//主键查找 - // try by primary key if found in cache - Set primaryKeyPairs = entry.getValue().get(primaryKey); - if (primaryKeyPairs != null) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("try to find cache by primary key "); - } - String tableKey = schema.getName() + '_' + tableName; - boolean allFound = true; - for (ColumnRoutePair pair : primaryKeyPairs) {//可能id in(1,2,3)多主键 - String cacheKey = pair.colValue; - String dataNode = (String) cachePool.get(tableKey, cacheKey); - if (dataNode == null) { - allFound = false; - continue; - } else { - if(tablesRouteMap.get(tableName) == null) { - tablesRouteMap.put(tableName, new HashSet()); - } - tablesRouteMap.get(tableName).add(dataNode); - continue; - } - } - if (!allFound) { - // need cache primary key ->datanode relation - if (isSelect && tableConfig.getPrimaryKey() != null) { - rrs.setPrimaryKey(tableKey + '.' + tableConfig.getPrimaryKey()); - } - } else {//主键缓存中找到了就执行循环的下一轮 - continue; - } - } - } - if (isFoundPartitionValue) {//分库表 - Set partitionValue = columnsMap.get(partionCol); - if(partitionValue == null || partitionValue.size() == 0) { - if(tablesRouteMap.get(tableName) == null) { - tablesRouteMap.put(tableName, new HashSet()); - } - tablesRouteMap.get(tableName).addAll(tableConfig.getDataNodes()); - } else { - for(ColumnRoutePair pair : partitionValue) { - if(pair.colValue != null) { - Integer nodeIndex = tableConfig.getRule().getRuleAlgorithm().calculate(pair.colValue); - if(nodeIndex == null) { - String msg = "can't find any valid datanode :" + tableConfig.getName() - + " -> " + tableConfig.getPartitionColumn() + " -> " + pair.colValue; - LOGGER.warn(msg); - throw new SQLNonTransientException(msg); - } - String node = tableConfig.getDataNodes().get(nodeIndex); - if(node != null) { - if(tablesRouteMap.get(tableName) == null) { - tablesRouteMap.put(tableName, new HashSet()); - } - tablesRouteMap.get(tableName).add(node); - } - } - if(pair.rangeValue != null) { - Integer[] nodeIndexs = tableConfig.getRule().getRuleAlgorithm() - .calculateRange(pair.rangeValue.beginValue.toString(), pair.rangeValue.endValue.toString()); - for(Integer idx : nodeIndexs) { - String node = tableConfig.getDataNodes().get(idx); - if(node != null) { - if(tablesRouteMap.get(tableName) == null) { - tablesRouteMap.put(tableName, new HashSet()); - } - tablesRouteMap.get(tableName).add(node); - - } - } - } - } - } - } else if(joinKey != null && columnsMap.get(joinKey) != null && columnsMap.get(joinKey).size() != 0) {//childTable (如果是select 语句的父子表join)之前要找到root table,将childTable移除,只留下root table - Set joinKeyValue = columnsMap.get(joinKey); + /** + * + * 单表路由 + */ + public static RouteResultset tryRouteForOneTable(SchemaConfig schema, DruidShardingParseInfo ctx, + RouteCalculateUnit routeUnit, String tableName, RouteResultset rrs, boolean isSelect, + LayerCachePool cachePool) throws SQLNonTransientException { - ColumnRoutePair joinCol = null; - - Set dataNodeSet = ruleByJoinValueCalculate(rrs, tableConfig, joinKeyValue); + if (isNoSharding(schema, tableName)) { + return routeToSingleNode(rrs, schema.getDataNode(), ctx.getSql()); + } - if (dataNodeSet.isEmpty()) { - throw new SQLNonTransientException( - "parent key can't find any valid datanode "); - } - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("found partion nodes (using parent partion rule directly) for child table to update " - + Arrays.toString(dataNodeSet.toArray()) + " sql :" + sql); - } - if (dataNodeSet.size() > 1) { - routeToMultiNode(rrs.isCacheAble(), rrs, dataNodeSet, sql); - rrs.setFinishedRoute(true); - return; - } else { - rrs.setCacheAble(true); - routeToSingleNode(rrs, dataNodeSet.iterator().next(), sql); - return; - } + TableConfig tc = schema.getTables().get(tableName); + if(tc == null) { + String msg = "can't find table define in schema " + tableName + " schema:" + schema.getName(); + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } - } else { - //没找到拆分字段,该表的所有节点都路由 - if(tablesRouteMap.get(tableName) == null) { - tablesRouteMap.put(tableName, new HashSet()); - } - tablesRouteMap.get(tableName).addAll(tableConfig.getDataNodes()); - } - } - } - } + if(tc.isDistTable()){ + return routeToDistTableNode(tableName,schema,rrs,ctx.getSql(), routeUnit.getTablesAndConditions(), cachePool,isSelect); + } - public static boolean isAllGlobalTable(DruidShardingParseInfo ctx, SchemaConfig schema) { - boolean isAllGlobal = false; - for(String table : ctx.getTables()) { - TableConfig tableConfig = schema.getTables().get(table); - if(tableConfig!=null && tableConfig.isGlobalTable()) { - isAllGlobal = true; - } else { - return false; - } - } - return isAllGlobal; - } + if(tc.isGlobalTable()) {//全局表 + if(isSelect) { + // global select ,not cache route result + rrs.setCacheAble(false); + return routeToSingleNode(rrs, getAliveRandomDataNode(tc)/*getRandomDataNode(tc)*/, ctx.getSql()); + } else {//insert into 全局表的记录 + return routeToMultiNode(false, rrs, tc.getDataNodes(), ctx.getSql(),true); + } + } else {//单表或者分库表 + if (!checkRuleRequired(schema, ctx, routeUnit, tc)) { + throw new IllegalArgumentException("route rule for table " + + tc.getName() + " is required: " + ctx.getSql()); - /** - * - * @param schema - * @param ctx - * @param tc - * @return true表示校验通过,false表示检验不通过 - */ - public static boolean checkRuleRequired(SchemaConfig schema, DruidShardingParseInfo ctx, RouteCalculateUnit routeUnit, TableConfig tc) { - if(!tc.isRuleRequired()) { - return true; - } - boolean hasRequiredValue = false; - String tableName = tc.getName(); - if(routeUnit.getTablesAndConditions().get(tableName) == null || routeUnit.getTablesAndConditions().get(tableName).size() == 0) { - hasRequiredValue = false; - } else { - for(Map.Entry> condition : routeUnit.getTablesAndConditions().get(tableName).entrySet()) { - - String colName = condition.getKey(); - //条件字段是拆分字段 - if(colName.equals(tc.getPartitionColumn())) { - hasRequiredValue = true; - break; - } - } - } - return hasRequiredValue; - } + } + if(tc.getPartitionColumn() == null && !tc.isSecondLevel()) {//单表且不是childTable +// return RouterUtil.routeToSingleNode(rrs, tc.getDataNodes().get(0),ctx.getSql()); + return routeToMultiNode(rrs.isCacheAble(), rrs, tc.getDataNodes(), ctx.getSql()); + } else { + //每个表对应的路由映射 + Map> tablesRouteMap = new HashMap>(); + if(routeUnit.getTablesAndConditions() != null && routeUnit.getTablesAndConditions().size() > 0) { + RouterUtil.findRouteWithcConditionsForTables(schema, rrs, routeUnit.getTablesAndConditions(), tablesRouteMap, ctx.getSql(), cachePool, isSelect); + if(rrs.isFinishedRoute()) { + return rrs; + } + } + + if(tablesRouteMap.get(tableName) == null) { + return routeToMultiNode(rrs.isCacheAble(), rrs, tc.getDataNodes(), ctx.getSql()); + } else { + return routeToMultiNode(rrs.isCacheAble(), rrs, tablesRouteMap.get(tableName), ctx.getSql()); + } + } + } + } + private static RouteResultset routeToDistTableNode(String tableName, SchemaConfig schema, RouteResultset rrs, + String orgSql, Map>> tablesAndConditions, + LayerCachePool cachePool, boolean isSelect) throws SQLNonTransientException { - /** - * 增加判断支持未配置分片的表走默认的dataNode - * @param schemaConfig - * @param tableName - * @return - */ - public static boolean isNoSharding(SchemaConfig schemaConfig,String tableName) - { - if(schemaConfig.isNoSharding()) - { - return true; - } - if(schemaConfig.getDataNode()!=null&&!schemaConfig.getTables().containsKey(tableName)) - { - return true; - } + TableConfig tableConfig = schema.getTables().get(tableName); + if(tableConfig == null) { + String msg = "can't find table define in schema " + tableName + " schema:" + schema.getName(); + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + if(tableConfig.isGlobalTable()){ + String msg = "can't suport district table " + tableName + " schema:" + schema.getName() + " for global table "; + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + String partionCol = tableConfig.getPartitionColumn(); +// String primaryKey = tableConfig.getPrimaryKey(); + boolean isLoadData=false; - return false; - } + Set tablesRouteSet = new HashSet(); - /** - * 判断条件是否永真 - * @param expr - * @return - */ - public static boolean isConditionAlwaysTrue(SQLExpr expr) { - Object o = WallVisitorUtils.getValue(expr); - if(Boolean.TRUE.equals(o)) { - return true; + List dataNodes = tableConfig.getDataNodes(); + if(dataNodes.size()>1){ + String msg = "can't suport district table " + tableName + " schema:" + schema.getName() + " for mutiple dataNode " + dataNodes; + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); } - return false; - } + String dataNode = dataNodes.get(0); - /** - * 判断条件是否永假的 - * @param expr - * @return - */ - public static boolean isConditionAlwaysFalse(SQLExpr expr) { - Object o = WallVisitorUtils.getValue(expr); - if(Boolean.FALSE.equals(o)) { - return true; + //主键查找缓存暂时不实现 + if(tablesAndConditions.isEmpty()){ + List subTables = tableConfig.getDistTables(); + tablesRouteSet.addAll(subTables); } - return false; - } + for(Map.Entry>> entry : tablesAndConditions.entrySet()) { + boolean isFoundPartitionValue = partionCol != null && entry.getValue().get(partionCol) != null; + Map> columnsMap = entry.getValue(); - public static boolean processERChildTable(final SchemaConfig schema, final String origSQL, - final MySQLFrontConnection sc) throws SQLNonTransientException { - String tableName = StringUtil.getTableName(origSQL).toUpperCase(); - final TableConfig tc = schema.getTables().get(tableName); + Set partitionValue = columnsMap.get(partionCol); + if(partitionValue == null || partitionValue.size() == 0) { + tablesRouteSet.addAll(tableConfig.getDistTables()); + } else { + for(ColumnRoutePair pair : partitionValue) { + AbstractPartitionAlgorithm algorithm = tableConfig.getRule().getRuleAlgorithm(); + if(pair.colValue != null) { + Integer tableIndex = algorithm.calculate(pair.colValue); + if(tableIndex == null) { + String msg = "can't find any valid datanode :" + tableConfig.getName() + + " -> " + tableConfig.getPartitionColumn() + " -> " + pair.colValue; + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + String subTable = tableConfig.getDistTables().get(tableIndex); + if(subTable != null) { + tablesRouteSet.add(subTable); + if(algorithm instanceof SlotFunction){ + rrs.getDataNodeSlotMap().put(subTable,((SlotFunction) algorithm).slotValue()); + } + } + } + if(pair.rangeValue != null) { + Integer[] tableIndexs = algorithm + .calculateRange(pair.rangeValue.beginValue.toString(), pair.rangeValue.endValue.toString()); + for(Integer idx : tableIndexs) { + String subTable = tableConfig.getDistTables().get(idx); + if(subTable != null) { + tablesRouteSet.add(subTable); + if(algorithm instanceof SlotFunction){ + rrs.getDataNodeSlotMap().put(subTable,((SlotFunction) algorithm).slotValue()); + } + } + } + } + } + } + } - if (null != tc && tc.isChildTable()) { - final RouteResultset rrs = new RouteResultset(origSQL, ServerParse.INSERT); - String joinKey = tc.getJoinKey(); - MySqlInsertStatement insertStmt = (MySqlInsertStatement) (new MySqlStatementParser(origSQL)).parseInsert(); - int joinKeyIndex = getJoinKeyIndex(insertStmt.getColumns(), joinKey); + Object[] subTables = tablesRouteSet.toArray(); + RouteResultsetNode[] nodes = new RouteResultsetNode[subTables.length]; + Map dataNodeSlotMap= rrs.getDataNodeSlotMap(); + for(int i=0;i>> tablesAndConditions, + Map> tablesRouteMap, String sql, LayerCachePool cachePool, boolean isSelect) + throws SQLNonTransientException { + + //为分库表找路由 + for(Map.Entry>> entry : tablesAndConditions.entrySet()) { + String tableName = entry.getKey().toUpperCase(); + TableConfig tableConfig = schema.getTables().get(tableName); + if(tableConfig == null) { + String msg = "can't find table define in schema " + + tableName + " schema:" + schema.getName(); + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + if(tableConfig.getDistTables()!=null && tableConfig.getDistTables().size()>0){ + routeToDistTableNode(tableName,schema,rrs,sql, tablesAndConditions, cachePool,isSelect); + } + //全局表或者不分库的表略过(全局表后面再计算) + if(tableConfig.isGlobalTable() || schema.getTables().get(tableName).getDataNodes().size() == 1) { + continue; + } else {//非全局表:分库表、childTable、其他 + Map> columnsMap = entry.getValue(); + String joinKey = tableConfig.getJoinKey(); + String partionCol = tableConfig.getPartitionColumn(); + String primaryKey = tableConfig.getPrimaryKey(); + boolean isFoundPartitionValue = partionCol != null && entry.getValue().get(partionCol) != null; + boolean isLoadData=false; + if (LOGGER.isDebugEnabled() + && sql.startsWith(LoadData.loadDataHint)||rrs.isLoadData()) { + //由于load data一次会计算很多路由数据,如果输出此日志会极大降低load data的性能 + isLoadData=true; + } + if(entry.getValue().get(primaryKey) != null && entry.getValue().size() == 1&&!isLoadData) + {//主键查找 + // try by primary key if found in cache + Set primaryKeyPairs = entry.getValue().get(primaryKey); + if (primaryKeyPairs != null) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("try to find cache by primary key "); + } + String tableKey = schema.getName() + '_' + tableName; + boolean allFound = true; + for (ColumnRoutePair pair : primaryKeyPairs) {//可能id in(1,2,3)多主键 + String cacheKey = pair.colValue; + String dataNode = (String) cachePool.get(tableKey, cacheKey); + if (dataNode == null) { + allFound = false; + continue; + } else { + if(tablesRouteMap.get(tableName) == null) { + tablesRouteMap.put(tableName, new HashSet()); + } + tablesRouteMap.get(tableName).add(dataNode); + continue; + } + } + if (!allFound) { + // need cache primary key ->datanode relation + if (isSelect && tableConfig.getPrimaryKey() != null) { + rrs.setPrimaryKey(tableKey + '.' + tableConfig.getPrimaryKey()); + } + } else {//主键缓存中找到了就执行循环的下一轮 + continue; + } + } + } + if (isFoundPartitionValue) {//分库表 + Set partitionValue = columnsMap.get(partionCol); + if(partitionValue == null || partitionValue.size() == 0) { + if(tablesRouteMap.get(tableName) == null) { + tablesRouteMap.put(tableName, new HashSet()); + } + tablesRouteMap.get(tableName).addAll(tableConfig.getDataNodes()); + } else { + for(ColumnRoutePair pair : partitionValue) { + AbstractPartitionAlgorithm algorithm = tableConfig.getRule().getRuleAlgorithm(); + if(pair.colValue != null) { + Integer nodeIndex = algorithm.calculate(pair.colValue); + if(nodeIndex == null) { + String msg = "can't find any valid datanode :" + tableConfig.getName() + + " -> " + tableConfig.getPartitionColumn() + " -> " + pair.colValue; + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + + ArrayList dataNodes = tableConfig.getDataNodes(); + String node; + if (nodeIndex >=0 && nodeIndex < dataNodes.size()) { + node = dataNodes.get(nodeIndex); + + } else { + node = null; + String msg = "Can't find a valid data node for specified node index :" + + tableConfig.getName() + " -> " + tableConfig.getPartitionColumn() + + " -> " + pair.colValue + " -> " + "Index : " + nodeIndex; + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + if(node != null) { + if(tablesRouteMap.get(tableName) == null) { + tablesRouteMap.put(tableName, new HashSet()); + } + if(algorithm instanceof SlotFunction){ + rrs.getDataNodeSlotMap().put(node,((SlotFunction) algorithm).slotValue()); + } + tablesRouteMap.get(tableName).add(node); + } + } + if(pair.rangeValue != null) { + Integer[] nodeIndexs = algorithm + .calculateRange(pair.rangeValue.beginValue.toString(), pair.rangeValue.endValue.toString()); + ArrayList dataNodes = tableConfig.getDataNodes(); + String node; + for(Integer idx : nodeIndexs) { + if (idx >= 0 && idx < dataNodes.size()) { + node = dataNodes.get(idx); + } else { + String msg = "Can't find valid data node(s) for some of specified node indexes :" + + tableConfig.getName() + " -> " + tableConfig.getPartitionColumn(); + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + if(node != null) { + if(tablesRouteMap.get(tableName) == null) { + tablesRouteMap.put(tableName, new HashSet()); + } + if(algorithm instanceof SlotFunction){ + rrs.getDataNodeSlotMap().put(node,((SlotFunction) algorithm).slotValue()); + } + tablesRouteMap.get(tableName).add(node); + + } + } + } + } + } + } else if(joinKey != null && columnsMap.get(joinKey) != null && columnsMap.get(joinKey).size() != 0) {//childTable (如果是select 语句的父子表join)之前要找到root table,将childTable移除,只留下root table + Set joinKeyValue = columnsMap.get(joinKey); + + Set dataNodeSet = ruleByJoinValueCalculate(rrs, tableConfig, joinKeyValue); + + if (dataNodeSet.isEmpty()) { + throw new SQLNonTransientException( + "parent key can't find any valid datanode "); + } + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("found partion nodes (using parent partion rule directly) for child table to update " + + Arrays.toString(dataNodeSet.toArray()) + " sql :" + sql); + } + if (dataNodeSet.size() > 1) { + routeToMultiNode(rrs.isCacheAble(), rrs, dataNodeSet, sql); + rrs.setFinishedRoute(true); + return; + } else { + rrs.setCacheAble(true); + routeToSingleNode(rrs, dataNodeSet.iterator().next(), sql); + return; + } + + } else { + //没找到拆分字段,该表的所有节点都路由 + if(tablesRouteMap.get(tableName) == null) { + tablesRouteMap.put(tableName, new HashSet()); + } + boolean isSlotFunction= tableConfig.getRule() != null && tableConfig.getRule().getRuleAlgorithm() instanceof SlotFunction; + if(isSlotFunction){ + for (String dn : tableConfig.getDataNodes()) { + rrs.getDataNodeSlotMap().put(dn,-1); + } + } + tablesRouteMap.get(tableName).addAll(tableConfig.getDataNodes()); + } + } + } + } - String sql = insertStmt.toString(); + public static boolean isAllGlobalTable(DruidShardingParseInfo ctx, SchemaConfig schema) { + boolean isAllGlobal = false; + for(String table : ctx.getTables()) { + TableConfig tableConfig = schema.getTables().get(table); + if(tableConfig!=null && tableConfig.isGlobalTable()) { + isAllGlobal = true; + } else { + return false; + } + } + return isAllGlobal; + } - // try to route by ER parent partion key - //RouteResultset theRrs = RouterUtil.routeByERParentKey(sc,schema,ServerParse.INSERT,sql, rrs, tc, joinKeyVal); + /** + * + * @param schema + * @param ctx + * @param tc + * @return true表示校验通过,false表示检验不通过 + */ + public static boolean checkRuleRequired(SchemaConfig schema, DruidShardingParseInfo ctx, RouteCalculateUnit routeUnit, TableConfig tc) { + if(!tc.isRuleRequired()) { + return true; + } + boolean hasRequiredValue = false; + String tableName = tc.getName(); + if(routeUnit.getTablesAndConditions().get(tableName) == null || routeUnit.getTablesAndConditions().get(tableName).size() == 0) { + hasRequiredValue = false; + } else { + for(Map.Entry> condition : routeUnit.getTablesAndConditions().get(tableName).entrySet()) { + + String colName = condition.getKey(); + //条件字段是拆分字段 + if(colName.equals(tc.getPartitionColumn())) { + hasRequiredValue = true; + break; + } + } + } + return hasRequiredValue; + } - if (null != null) { - boolean processedInsert=false; - if ( sc!=null && tc.isAutoIncrement()) { - String primaryKey = tc.getPrimaryKey(); - processedInsert=processInsert(sc,schema,ServerParse.INSERT,sql,tc.getName(),primaryKey); - } - if(processedInsert==false){ - rrs.setFinishedRoute(true); - sc.getSession2().execute(rrs, ServerParse.INSERT); - } - return true; - } - // route by sql query root parent's datanode - final String findRootTBSql = tc.getLocateRTableKeySql().toLowerCase() + joinKeyVal; - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("find root parent's node sql " + findRootTBSql); - } + /** + * 增加判断支持未配置分片的表走默认的dataNode + * @param schemaConfig + * @param tableName + * @return + */ + public static boolean isNoSharding(SchemaConfig schemaConfig, String tableName) { + // Table名字被转化为大写的,存储在schema + tableName = tableName.toUpperCase(); + if (schemaConfig.isNoSharding()) { + return true; + } - ListenableFuture listenableFuture = MycatServer.getInstance(). - getListeningExecutorService().submit(new Callable() { - @Override - public String call() throws Exception { - FetchStoreNodeOfChildTableHandler fetchHandler = new FetchStoreNodeOfChildTableHandler(); - return fetchHandler.execute(schema.getName(), findRootTBSql, tc.getRootParent().getDataNodes()); - } - }); - - - Futures.addCallback(listenableFuture, new FutureCallback() { - @Override - public void onSuccess(String result) { - if (Strings.isNullOrEmpty(result)) { - StringBuilder s = new StringBuilder(); - LOGGER.warn(s.append(sc.getSession2()).append(origSQL).toString() + - " err:" + "can't find (root) parent sharding node for sql:" + origSQL); - sc.writeErrMessage(ErrorCode.ER_PARSE_ERROR, "can't find (root) parent sharding node for sql:" + origSQL); - return; - } + if (schemaConfig.getDataNode() != null && !schemaConfig.getTables().containsKey(tableName)) { + return true; + } - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("found partion node for child table to insert " + result + " sql :" + origSQL); - } - boolean processedInsert=false; - if ( sc!=null && tc.isAutoIncrement()) { - try { - String primaryKey = tc.getPrimaryKey(); - processedInsert=processInsert(sc,schema,ServerParse.INSERT,origSQL,tc.getName(),primaryKey); - } catch (SQLNonTransientException e) { - LOGGER.warn("sequence processInsert error,",e); - sc.writeErrMessage(ErrorCode.ER_PARSE_ERROR , "sequence processInsert error," + e.getMessage()); - } - } - if(processedInsert==false){ - RouteResultset executeRrs = RouterUtil.routeToSingleNode(rrs, result, origSQL); - sc.getSession2().execute(executeRrs, ServerParse.INSERT); - } - } + return false; + } - @Override - public void onFailure(Throwable t) { - StringBuilder s = new StringBuilder(); - LOGGER.warn(s.append(sc.getSession2()).append(origSQL).toString() + - " err:" + t.getMessage()); - sc.writeErrMessage(ErrorCode.ER_PARSE_ERROR, t.getMessage() + " " + s.toString()); - } - }, MycatServer.getInstance(). - getListeningExecutorService()); - return true; - } - return false; - } + /** + * 系统表判断,某些sql语句会查询系统表或者跟系统表关联 + * @author lian + * @date 2016年12月2日 + * @param tableName + * @return + */ + public static boolean isSystemSchema(String tableName) { + // 以information_schema, mysql开头的是系统表 + if (tableName.startsWith("INFORMATION_SCHEMA.") + || tableName.startsWith("MYSQL.") + || tableName.startsWith("PERFORMANCE_SCHEMA.")) { + return true; + } - /** - * 寻找joinKey的索引 - * - * @param columns - * @param joinKey - * @return -1表示没找到,>=0表示找到了 - */ - private static int getJoinKeyIndex(List columns, String joinKey) { - for (int i = 0; i < columns.size(); i++) { - String col = StringUtil.removeBackquote(columns.get(i).toString()).toUpperCase(); - if (col.equals(joinKey)) { - return i; - } - } - return -1; - } + return false; + } - /** - * 是否为批量插入:insert into ...values (),()...或 insert into ...select..... - * - * @param insertStmt - * @return - */ - private static boolean isMultiInsert(MySqlInsertStatement insertStmt) { - return (insertStmt.getValuesList() != null && insertStmt.getValuesList().size() > 1) || insertStmt.getQuery() != null; - } + /** + * 判断条件是否永真 + * @param expr + * @return + */ + public static boolean isConditionAlwaysTrue(SQLExpr expr) { + Object o = WallVisitorUtils.getValue(expr); + if(Boolean.TRUE.equals(o)) { + return true; + } + return false; + } -} + /** + * 判断条件是否永假的 + * @param expr + * @return + */ + public static boolean isConditionAlwaysFalse(SQLExpr expr) { + Object o = WallVisitorUtils.getValue(expr); + if(Boolean.FALSE.equals(o)) { + return true; + } + return false; + } + /** + * 该方法,返回是否是ER子表 + * @param schema + * @param origSQL + * @param sc + * @return + * @throws SQLNonTransientException + * + * 备注说明: + * edit by ding.w at 2017.4.28, 主要处理 CLIENT_MULTI_STATEMENTS(insert into ; insert into)的情况 + * 目前仅支持mysql,并COM_QUERY请求包中的所有insert语句要么全部是er表,要么全部不是 + * + * + */ + public static boolean processERChildTable(final SchemaConfig schema, final String origSQL, + final ServerConnection sc) throws SQLNonTransientException { + + MySqlStatementParser parser = new MySqlStatementParser(origSQL); + List statements = parser.parseStatementList(); + + if(statements == null || statements.isEmpty() ) { + throw new SQLNonTransientException(String.format("无效的SQL语句:%s", origSQL)); + } + + + boolean erFlag = false; //是否是er表 + for(SQLStatement stmt : statements ) { + MySqlInsertStatement insertStmt = (MySqlInsertStatement) stmt; + String tableName = insertStmt.getTableName().getSimpleName().toUpperCase(); + final TableConfig tc = schema.getTables().get(tableName); + + if (null != tc && tc.isChildTable()) { + erFlag = true; + + String sql = insertStmt.toString(); + + final RouteResultset rrs = new RouteResultset(sql, ServerParse.INSERT); + String joinKey = tc.getJoinKey(); + //因为是Insert语句,用MySqlInsertStatement进行parse +// MySqlInsertStatement insertStmt = (MySqlInsertStatement) (new MySqlStatementParser(origSQL)).parseInsert(); + //判断条件完整性,取得解析后语句列中的joinkey列的index + int joinKeyIndex = getJoinKeyIndex(insertStmt.getColumns(), joinKey); + if (joinKeyIndex == -1) { + String inf = "joinKey not provided :" + tc.getJoinKey() + "," + insertStmt; + LOGGER.warn(inf); + throw new SQLNonTransientException(inf); + } + //子表不支持批量插入 + if (isMultiInsert(insertStmt)) { + String msg = "ChildTable multi insert not provided"; + LOGGER.warn(msg); + throw new SQLNonTransientException(msg); + } + //取得joinkey的值 + String joinKeyVal = insertStmt.getValues().getValues().get(joinKeyIndex).toString(); + //解决bug #938,当关联字段的值为char类型时,去掉前后"'" + String realVal = joinKeyVal; + if (joinKeyVal.startsWith("'") && joinKeyVal.endsWith("'") && joinKeyVal.length() > 2) { + realVal = joinKeyVal.substring(1, joinKeyVal.length() - 1); + } + + + + // try to route by ER parent partion key + //如果是二级子表(父表不再有父表),并且分片字段正好是joinkey字段,调用routeByERParentKey + RouteResultset theRrs = RouterUtil.routeByERParentKey(sc, schema, ServerParse.INSERT, sql, rrs, tc, realVal); + if (theRrs != null) { + boolean processedInsert=false; + //判断是否需要全局序列号 + if ( sc!=null && tc.isAutoIncrement()) { + String primaryKey = tc.getPrimaryKey(); + processedInsert=processInsert(sc,schema,ServerParse.INSERT,sql,tc.getName(),primaryKey); + } + if(processedInsert==false){ + rrs.setFinishedRoute(true); + sc.getSession2().execute(rrs, ServerParse.INSERT); + } + // return true; + //继续处理下一条 + continue; + } + + // route by sql query root parent's datanode + //如果不是二级子表或者分片字段不是joinKey字段结果为空,则启动异步线程去后台分片查询出datanode + //只要查询出上一级表的parentkey字段的对应值在哪个分片即可 + final String findRootTBSql = tc.getLocateRTableKeySql().toLowerCase() + joinKeyVal; + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("find root parent's node sql " + findRootTBSql); + } + + ListenableFuture listenableFuture = MycatServer.getInstance(). + getListeningExecutorService().submit(new Callable() { + @Override + public String call() throws Exception { + FetchStoreNodeOfChildTableHandler fetchHandler = new FetchStoreNodeOfChildTableHandler(); +// return fetchHandler.execute(schema.getName(), findRootTBSql, tc.getRootParent().getDataNodes()); + return fetchHandler.execute(schema.getName(), findRootTBSql, tc.getRootParent().getDataNodes(), sc); + } + }); + + + Futures.addCallback(listenableFuture, new FutureCallback() { + @Override + public void onSuccess(String result) { + //结果为空,证明上一级表中不存在那条记录,失败 + if (Strings.isNullOrEmpty(result)) { + StringBuilder s = new StringBuilder(); + LOGGER.warn(s.append(sc.getSession2()).append(origSQL).toString() + + " err:" + "can't find (root) parent sharding node for sql:" + origSQL); + if(!sc.isAutocommit()) { // 处于事务下失败, 必须回滚 + sc.setTxInterrupt("can't find (root) parent sharding node for sql:" + origSQL); + } + sc.writeErrMessage(ErrorCode.ER_PARSE_ERROR, "can't find (root) parent sharding node for sql:" + origSQL); + return; + } + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("found partion node for child table to insert " + result + " sql :" + origSQL); + } + //找到分片,进行插入(和其他的一样,需要判断是否需要全局自增ID) + boolean processedInsert=false; + if ( sc!=null && tc.isAutoIncrement()) { + try { + String primaryKey = tc.getPrimaryKey(); + processedInsert=processInsert(sc,schema,ServerParse.INSERT,origSQL,tc.getName(),primaryKey); + } catch (SQLNonTransientException e) { + LOGGER.warn("sequence processInsert error,",e); + sc.writeErrMessage(ErrorCode.ER_PARSE_ERROR , "sequence processInsert error," + e.getMessage()); + } + } + if(processedInsert==false){ + RouteResultset executeRrs = RouterUtil.routeToSingleNode(rrs, result, origSQL); + sc.getSession2().execute(executeRrs, ServerParse.INSERT); + } + + } + + @Override + public void onFailure(Throwable t) { + StringBuilder s = new StringBuilder(); + LOGGER.warn(s.append(sc.getSession2()).append(origSQL).toString() + + " err:" + t.getMessage()); + sc.writeErrMessage(ErrorCode.ER_PARSE_ERROR, t.getMessage() + " " + s.toString()); + } + }, MycatServer.getInstance(). + getListeningExecutorService()); + + } else if(erFlag) { + throw new SQLNonTransientException(String.format("%s包含不是ER分片的表", origSQL)); + } + } + + + return erFlag; + } + /** + * 寻找joinKey的索引 + * + * @param columns + * @param joinKey + * @return -1表示没找到,>=0表示找到了 + */ + private static int getJoinKeyIndex(List columns, String joinKey) { + for (int i = 0; i < columns.size(); i++) { + String col = StringUtil.removeBackquote(columns.get(i).toString()).toUpperCase(); + if (col.equals(joinKey)) { + return i; + } + } + return -1; + } + /** + * 是否为批量插入:insert into ...values (),()...或 insert into ...select..... + * + * @param insertStmt + * @return + */ + private static boolean isMultiInsert(MySqlInsertStatement insertStmt) { + return (insertStmt.getValuesList() != null && insertStmt.getValuesList().size() > 1) + || insertStmt.getQuery() != null; + } +} diff --git a/src/main/java/io/mycat/route/util/StringUtil.java b/src/main/java/io/mycat/route/util/StringUtil.java deleted file mode 100644 index 7f367937d..000000000 --- a/src/main/java/io/mycat/route/util/StringUtil.java +++ /dev/null @@ -1,448 +0,0 @@ - -package io.mycat.route.util; - -import java.io.UnsupportedEncodingException; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.regex.Pattern; -import java.util.zip.CRC32; - -import org.apache.commons.lang.ArrayUtils; -import org.apache.commons.lang.StringUtils; - -/** - * - * @author yan.yan@huawei.com - * - */ -public class StringUtil extends StringUtils { - - /** 判断字符串是否为16进制数字 */ - public static final Pattern HAX_PATTERN = Pattern.compile("^[0-9a-fA-F]+$"); - - /** 判断字符串是否为数字 */ - public static final Pattern NUMBER_PATTERN = Pattern.compile("^[0-9]+$"); - - /** 默认中文编码字符 */ - public static final String DEFAULT_CHINESE_CHARSET = "GBK"; - - private static final String LINE_END = System.getProperty("line.separator"); - - /** - * 扩展并左对齐字符串,用指定字符串填充右边 - * 新增对中文字符串的支持,注意方法名称alignLefts - * - *

-     * StringUtil.alignLeft(null, *, *)      = null
-     * StringUtil.alignLeft("", 3, "z")      = "zzz"
-     * StringUtil.alignLeft("bat", 3, "yz")  = "bat"
-     * StringUtil.alignLeft("bat", 5, "yz")  = "batyz"
-     * StringUtil.alignLeft("bat", 8, "yz")  = "batyzyzy"
-     * StringUtil.alignLeft("bat", 1, "yz")  = "bat"
-     * StringUtil.alignLeft("bat", -1, "yz") = "bat"
-     * StringUtil.alignLeft("bat", 5, null)  = "bat  "
-     * StringUtil.alignLeft("bat", 5, "")    = "bat  "
-     * StringUtil.alignLeft("中文", 5, "")    = "中文 "
-     * 
- * - * @param str 要对齐的字符串 - * @param size 扩展字符串到指定宽度 - * @param padStr 填充字符串 - * - * @return 扩展后的字符串,如果字符串为null,则返回null - */ - public static String alignLefts(String str, int size, String padStr) { - if (str == null) { - return null; - } - - String padStringFinal = (isEmpty(padStr)) ? EMPTY : padStr; - int padLen = padStringFinal.length(); - int strLen = str.getBytes().length; - int pads = size - strLen; - - if (pads <= 0) { - return str; - } - - if (pads == padLen) { - return str.concat(padStringFinal); - } else if (pads < padLen) { - return str.concat(padStringFinal.substring(0, pads)); - } else { - char[] padding = new char[pads]; - char[] padChars = padStringFinal.toCharArray(); - - for (int i = 0; i < pads; i++) { - padding[i] = padChars[i % padLen]; - } - - return str.concat(new String(padding)); - } - } - - /** - * 扩展并右对齐字符串,用指定字符串填充左边 - * 新增对中文字符串的支持,注意方法名称alignRights - * - *
-     * StringUtil.alignRight(null, *, *)      = null
-     * StringUtil.alignRight("", 3, "z")      = "zzz"
-     * StringUtil.alignRight("bat", 3, "yz")  = "bat"
-     * StringUtil.alignRight("bat", 5, "yz")  = "yzbat"
-     * StringUtil.alignRight("bat", 8, "yz")  = "yzyzybat"
-     * StringUtil.alignRight("bat", 1, "yz")  = "bat"
-     * StringUtil.alignRight("bat", -1, "yz") = "bat"
-     * StringUtil.alignRight("bat", 5, null)  = "  bat"
-     * StringUtil.alignRight("bat", 5, "")    = "  bat"
-     * StringUtil.alignRight("中文", 5, "")    = " 中文"
-     * 
- * - * @param str 要对齐的字符串 - * @param size 扩展字符串到指定宽度 - * @param padStr 填充字符串 - * - * @return 扩展后的字符串,如果字符串为null,则返回null - */ - public static String alignRights(String str, int size, String padStr) { - if (str == null) { - return null; - } - - String padStringFinal = (isEmpty(padStr)) ? EMPTY : padStr; - int padLen = padStringFinal.length(); - int strLen = str.getBytes().length; - int pads = size - strLen; - if (pads <= 0) { - return str; - } - - if (pads == padLen) { - return padStringFinal.concat(str); - } else if (pads < padLen) { - return padStringFinal.substring(0, pads).concat(str); - } else { - char[] padding = new char[pads]; - char[] padChars = padStringFinal.toCharArray(); - - for (int i = 0; i < pads; i++) { - padding[i] = padChars[i % padLen]; - } - - return new String(padding).concat(str); - } - } - - /** - * 取指定字符串的子串,新增对中文字符串的支持 - * 注意方法名称substrings - * - * @param str 字符串 - * @param start 起始索引,如果为负数,表示从尾部计算 - * @param end 结束索引(不含),如果为负数,表示从尾部计算 - * - * @return 子串,如果原始串null,则返回null - */ - public static String substrings(String str, int start, int end) { - if (str == null) { - return null; - } - - int length = end - start; - byte[] dest = new byte[length]; - System.arraycopy(str.getBytes(), start, dest, 0, length); - - return new String(dest); - } - - /** - * 根据长度length把字符串切成两段,保存数据
- * 确保中文不要被切成两段 - * - * @param message - * @param length - * @return - */ - public static String[] cutString(String message, int length) { - String normal = StringUtil.substrings(message, 0, length); - if (isContainChinese(message, length)) { - normal = StringUtil.substrings(message, 0, length - 1); - } - - return new String[] { normal, StringUtil.substringAfter(message, normal) }; - } - - /** - * 字符串是否包含中文 - * - * @param message - * @param length - * @return - */ - public static boolean isContainChinese(String message) { - char[] chars = message.toCharArray(); - byte[] bytes; - try { - bytes = message.getBytes("GBK"); - } catch (UnsupportedEncodingException e) { - bytes = message.getBytes(); - } - return (chars.length != bytes.length); - } - - /** - * 字符串起始长度length的字符串是否包含中文 - * - * @param message - * @param length - * @return - */ - public static boolean isContainChinese(String message, int length) { - char[] chars = StringUtil.substrings(message, 0, length).toCharArray(); - char[] charsPlus = StringUtil.substrings(message, 0, length + 1).toCharArray(); - return ArrayUtils.isSameLength(chars, charsPlus); - } - - /** - * 在字符串中查找指定字符集合中的字符,并返回匹配的起始索引 如果字符串为null,则返回-1 - * 如果字符集合null或空,也返回-1 - *
-     * StringUtil.indexOfAny(null, *,0)                = -1
-     * StringUtil.indexOfAny("", *,0)                  = -1
-     * StringUtil.indexOfAny(*, null,0)                = -1
-     * StringUtil.indexOfAny(*, [],0)                  = -1
-     * StringUtil.indexOfAny("zzabyycdxx",['z','a'],0) = 0
-     * StringUtil.indexOfAny("zzabyycdxx",['b','y'],0) = 3
-     * StringUtil.indexOfAny("aba", ['z'],0)           = -1
-     * 
- * - * @param str 要扫描的字符串 - * @param searchChars 要搜索的字符集合 - * @param startPos 起始搜索的索引值,如果小于0,则看作0 - * - * @return 第一个匹配的索引值 如果字符串null或未找到,则返回-1 - */ - public static int indexOfAny(String str, char[] searchChars, int startPos) { - if ((str == null) || (str.length() == 0) || (searchChars == null) - || (searchChars.length == 0)) { - return -1; - } - - for (int i = startPos; i < str.length(); i++) { - char ch = str.charAt(i); - - for (int j = 0; j < searchChars.length; j++) { - if (searchChars[j] == ch) { - return i; - } - } - } - - return -1; - } - - /** - * 过滤要输出到json的字符串,进行转义输出 - * @param input - * @return - */ - public static String filterJsonString(String input) { - if (input == null) { - return EMPTY; - } - int length = input.length(); - StringBuilder result = new StringBuilder(length); - for (int i = 0; i < length; i++) { - char c = input.charAt(i); - switch (c) { - case '\'': { - result.append("\\'"); - break; - } - case '\"': { - result.append("\\\""); - break; - } - default: { - result.append(c); - } - } - } - return result.toString(); - } - - /** - * 过滤要输出到xml的字符串,将<,>,&,"进行转义输出 - * @param string - * @return - */ - public static String filterXMLString(String input) { - if (input == null) { - return EMPTY; - } - int length = input.length(); - StringBuilder result = new StringBuilder(length); - for (int i = 0; i < length; i++) { - char c = input.charAt(i); - switch (c) { - case '<': { - result.append("<"); - break; - } - case '>': { - result.append(">"); - break; - } - case '\"': { - result.append("&uot;"); - break; - } - case '&': { - result.append("&"); - break; - } - default: { - result.append(c); - } - } - } - return result.toString(); - } - - /** - * 根据url获取系统名称 - * 如果url里面包括系统名就返回,否则直接返回域名 - * 如http://bops.alipay.com 返回bops - * http://www.alipay.com 返回 alipay.com - * @param url - * @return - * @throws MalformedURLException - */ - public static String getSystemNameByURL(String url) throws MalformedURLException { - URL netURL = new URL(url); - String domain = netURL.getHost(); - if (domain.startsWith("www.")) { - domain = domain.substring(5); - } - int offset = domain.indexOf("alipay"); - if (offset > 0) { - return domain.substring(0, offset - 1); - } else { - return domain; - } - } - - /** - * 获取指定字符串按GBK编码转换成的byte长度 - * 由于String.getByte方法依赖于操作系统编码,处理中文字符串时建议用此方法 - * - * @param data - * @return - */ - public static byte[] getGBKByte(String data) { - if (data == null) { - return new byte[0]; - } - try { - return data.getBytes("GBK"); - } catch (UnsupportedEncodingException e) { - return data.getBytes(); - } - } - - /** - * 获取指定字符串按GBK编码转换成byte的长�? - * 由于String.getByte方法依赖于操作系统编码,处理中文字符串时建议用此方法 - * - * @param data - * @return - */ - public static int getGBKByteLength(String data) { - return getGBKByte(data).length; - } - - /** - * 生成�?定长度的序列�? - * - * @param length - * @param padding - * @return - */ - public static String genSerialNo(int length, String padding) { - String nanoTime = System.nanoTime() + ""; - if (nanoTime.length() >= length) { - nanoTime = nanoTime.substring(0, length); - } else { - nanoTime = nanoTime + repeat(padding, length - nanoTime.length()); - } - return nanoTime; - } - - /** - * 将数字格式化到固定长�? - * - * @param input - * @param fixLength - * @return - */ - public static String formatNumberToFixedLength(String input, int fixLength) { - if (input.length() <= fixLength) { - // 未到指定长度,左�?0 - return StringUtils.leftPad(input, fixLength, '0'); - } else { - // 超过长度,砍掉左边超长部�? - return input.substring(input.length() - fixLength); - } - } - - /** - * 判断字符串是否为16进制数字 - * @param input - * @return - */ - public static boolean isHexString(String input) { - if (input == null) { - return false; - } - if (NUMBER_PATTERN.matcher(input).matches()) { - // 过滤掉纯数字 - return false; - } - return HAX_PATTERN.matcher(input).matches(); - } - - /** - * 获取换行符(\n),供vm中使用,屏蔽了操作系统的差异 - * - * @return - */ - public static String getNewLine() { - return LINE_END; - } - - /** - * 将byte[]按指定编码转换为字符串,供velocity中使�? - * @param bytes - * @param charsetName - * @return - */ - public static String getNewString(byte[] bytes, String charsetName) { - try { - return new String(bytes, charsetName); - } catch (UnsupportedEncodingException e) { - return EMPTY; - } - } - - public static String crc32(String str) { - CRC32 crc32 = new CRC32(); - crc32.update(str.getBytes()); - return crc32.getValue() + ""; - } - - public static void main(String[] args) { - System.out.println(substring(crc32("123123123123123"), -3, -1)); - - } - -} diff --git a/src/main/java/io/mycat/route/util/VelocityUtil.java b/src/main/java/io/mycat/route/util/VelocityUtil.java deleted file mode 100644 index ca94f47b2..000000000 --- a/src/main/java/io/mycat/route/util/VelocityUtil.java +++ /dev/null @@ -1,119 +0,0 @@ -package io.mycat.route.util; - -import org.apache.velocity.VelocityContext; -import org.apache.velocity.app.Velocity; -import org.apache.velocity.exception.ParseErrorException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.io.StringWriter; -import java.io.Writer; - -/** - * - * @author yan.yan@huawei.com - * - */ -public class VelocityUtil { - private static final Logger LOGGER = LoggerFactory - .getLogger(VelocityUtil.class); - - private static DateUtil dateUtil = new DateUtil(); - private static StringUtil stringUtil = new StringUtil(); - - private static VelocityContext getContext() { - VelocityContext context = new VelocityContext(); - context.put("dateUtil", dateUtil); - context.put("stringUtil", stringUtil); - return context; - } - - /** - * - * - * @param tc - * @param colsVal - * @return - */ - public static String evalDBRule(String columnName, Object value, - String dbRule) { - - String dbIndex = ""; - VelocityContext context = getContext(); - Writer writer = new StringWriter(); - try { - context.put(columnName, value); - Velocity.evaluate(context, writer, StringUtil.EMPTY, dbRule); - dbIndex = StringUtil.trim(writer.toString()); - if (StringUtil.isEmpty(dbIndex)) { - return "0"; - } - return dbIndex; - } catch (ParseErrorException e) { - throw e; - } catch (Exception e) { - if (LOGGER.isDebugEnabled()) { - // LOGGER.debug(tc.getName() + "eval " + dbRule + " error.."); - - } - } - - return "0"; - - } - - public static void main(String[] args) throws IOException { - String rule = "#set($Integer=0)##\r\n" - + "#set($db=$ID%10)##\r\n" - + "#set($tb=($ID%100)/10)##\r\n" - + "#set($prefix='0'+$Integer.toString($db)+'_'+$Integer.toString($tb))##\r\n" - + "$!prefix\r\n"; - - String rule2 = "#set($Integer=0)##\r\n" - + "#set($monthstr=$stringUtil.substring($ID,4,6))##\r\n" - + "#set($month=$Integer.parseInt($monthstr))##\r\n" - + "#set($daystr=$stringUtil.substring($ID,6,8))##\r\n" - + "#set($day=$Integer.parseInt($daystr))##\r\n" - + "#if($month == 1)##\r\n" + "#set($n=0)##\r\n" - + "#elseif($month ==2)##\r\n" + "#set($n=31)##\r\n" - + "#elseif($month ==3)##\r\n" + "#set($n=31+28)##\r\n" - + "#elseif($month ==4)##\r\n" + "#set($n=31+28+31)##\r\n" - + "#elseif($month ==5)##\r\n" + "#set($n=31+28+31+30)##\r\n" - + "#elseif($month ==6)##\r\n" + "#set($n=31+28+31+30+31)##\r\n" - + "#elseif($month ==7)##\r\n" - + "#set($n=31+28+31+30+31+30)##\r\n" - + "#elseif($month ==8)##\r\n" - + "#set($n=31+28+31+30+31+30+31)##\r\n" - + "#elseif($month ==9)##\r\n" - + "#set($n=31+28+31+30+31+30+31+31)##\r\n" - + "#elseif($month ==10)##\r\n" - + "#set($n=31+28+31+30+31+30+31+31+30)##\r\n" - + "#elseif($month ==11)##\r\n" - + "#set($n=31+28+31+30+31+30+31+31+30+31)##\r\n" + "#else\r\n" - + "#set($n=31+28+31+30+31+30+31+31+30+31+30)##\r\n" - + "#end\r\n" + "#set($prefix=$n+$day+(-1))##\r\n" + "$!prefix"; - String rule3 = "#set($Integer=0)##\r\n" - + "#set($monthday=$stringUtil.substring($ID,2,8))##\r\n" - + "#set($prefix=$monthday.hashCode()%100)##\r\n" - + "$!prefix"; - String ret = evalDBRule("ID", "201508202330011", rule3); - System.out.println(ret); - String tpl = " #set($db_flag=$!stringUtil.crc32($F_CERTIFICATE_CODE))\r\n" - + "$!stringUtil.substring($db_flag,-3,-1)"; - Writer writer = new StringWriter(); - try { - VelocityContext context = getContext(); - context.put("F_CERTIFICATE_CODE", "123123123123123"); - Velocity.evaluate(context, writer, "", tpl); - System.out.println(writer.toString()); - } catch (ParseErrorException e) { - throw e; - } catch (Exception e) { - e.printStackTrace(); - } finally { - writer.close(); - } - - } -} diff --git a/src/main/java/io/mycat/server/GenalMySQLConnection.java b/src/main/java/io/mycat/server/GenalMySQLConnection.java deleted file mode 100644 index 5a3e4232a..000000000 --- a/src/main/java/io/mycat/server/GenalMySQLConnection.java +++ /dev/null @@ -1,239 +0,0 @@ -package io.mycat.server; - -import io.mycat.net.BufferArray; -import io.mycat.net.Connection; -import io.mycat.net.NetSystem; -import io.mycat.server.packet.AuthPacket; -import io.mycat.server.packet.ErrorPacket; -import io.mycat.server.packet.HandshakePacket; -import io.mycat.server.packet.util.CharsetUtil; -import io.mycat.server.packet.util.SecurityUtil; - -import java.io.UnsupportedEncodingException; -import java.nio.channels.SocketChannel; -import java.security.NoSuchAlgorithmException; -import java.util.concurrent.atomic.AtomicBoolean; - -/** - * MySQL Front connection - * - * @author wuzhih - * - */ - -public class GenalMySQLConnection extends Connection { - public static final int maxPacketSize = 16 * 1024 * 1024; - protected final AtomicBoolean isQuit = new AtomicBoolean(false); - protected byte[] seed; - protected String user; - protected volatile String schema; - protected volatile int txIsolation; - protected volatile boolean autocommit; - protected volatile boolean txInterrupted; - protected volatile String txInterrputMsg = ""; - protected long lastInsertId; - protected volatile String oldSchema; - protected long clientFlags; - protected String password; - protected boolean isAccepted; - protected boolean isAuthenticated; - protected volatile String charset; - protected volatile int charsetIndex; - protected HandshakePacket handshake; - protected boolean isSupportCompress = false; - - public GenalMySQLConnection(SocketChannel channel) { - super(channel); - this.txInterrupted = false; - this.autocommit = true; - } - - public int getTxIsolation() { - return txIsolation; - } - - public void setTxIsolation(int txIsolation) { - this.txIsolation = txIsolation; - } - - public boolean isAutocommit() { - return autocommit; - } - - public boolean isAuthenticated() { - return isAuthenticated; - } - - public boolean isSupportCompress() { - return isSupportCompress; - } - - public void setSupportCompress(boolean isSupportCompress) { - this.isSupportCompress = isSupportCompress; - } - - public void setAutocommit(boolean autocommit) { - this.autocommit = autocommit; - } - - public String getPassword() { - return password; - } - - public void setPassword(String password) { - this.password = password; - } - - public long getLastInsertId() { - return lastInsertId; - } - - public void setLastInsertId(long lastInsertId) { - this.lastInsertId = lastInsertId; - } - - public void setAuthenticated(boolean isAuthenticated) { - this.isAuthenticated = isAuthenticated; - } - - public void writeErrMessage(int errno, String msg) { - ErrorPacket err = new ErrorPacket(); - err.packetId = 1; - err.errno = errno; - err.message = encodeString(msg, charset); - err.write(this); - } - - public String getUser() { - return user; - } - - public int getCharsetIndex() { - return charsetIndex; - } - - public void setUser(String user) { - this.user = user; - } - - public String getSchema() { - return schema; - } - - public final static byte[] encodeString(String src, String charset) { - if (src == null) { - return null; - } - if (charset == null) { - return src.getBytes(); - } - try { - return src.getBytes(charset); - } catch (UnsupportedEncodingException e) { - return src.getBytes(); - } - } - - public void setSchema(String newSchema) { - String curSchema = schema; - if (curSchema == null) { - this.schema = newSchema; - this.oldSchema = newSchema; - } else { - this.oldSchema = curSchema; - this.schema = newSchema; - } - } - - public byte[] getSeed() { - return seed; - } - - public boolean setCharsetIndex(int ci) { - String charset = CharsetUtil.getCharset(ci); - if (charset != null) { - return setCharset(charset); - } else { - return false; - } - } - - public String getCharset() { - return charset; - } - - public boolean setCharset(String charset) { - - //修复PHP字符集设置错误, 如: set names 'utf8' - if ( charset != null ) { - charset = charset.replace("'", ""); - } - - int ci = CharsetUtil.getIndex(charset); - if (ci > 0) { - this.charset = charset.equalsIgnoreCase("utf8mb4") ? "utf8" - : charset; - this.charsetIndex = ci; - return true; - } else { - return false; - } - } - - /** - * 设置是否需要中断当前事务 - */ - public void setTxInterrupt(String txInterrputMsg) { - if (!autocommit && !txInterrupted) { - txInterrupted = true; - this.txInterrputMsg = txInterrputMsg; - } - } - - public boolean isTxInterrupted() { - return txInterrupted; - } - - public HandshakePacket getHandshake() { - return handshake; - } - - public void setHandshake(HandshakePacket handshake) { - this.handshake = handshake; - } - - private static byte[] passwd(String pass, HandshakePacket hs) - throws NoSuchAlgorithmException { - if (pass == null || pass.length() == 0) { - return null; - } - byte[] passwd = pass.getBytes(); - int sl1 = hs.seed.length; - int sl2 = hs.restOfScrambleBuff.length; - byte[] seed = new byte[sl1 + sl2]; - System.arraycopy(hs.seed, 0, seed, 0, sl1); - System.arraycopy(hs.restOfScrambleBuff, 0, seed, sl1, sl2); - return SecurityUtil.scramble411(passwd, seed); - } - - public void authenticate() { - AuthPacket packet = new AuthPacket(); - packet.packetId = 1; - packet.clientFlags = clientFlags; - packet.maxPacketSize = maxPacketSize; - packet.charsetIndex = this.charsetIndex; - packet.user = user; - try { - packet.password = passwd(password, handshake); - } catch (NoSuchAlgorithmException e) { - throw new RuntimeException(e.getMessage()); - } - packet.database = schema; - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); - packet.write(bufferArray); - // write to connection - this.write(bufferArray); - - } -} diff --git a/src/main/java/io/mycat/server/MySQLFrontConnection.java b/src/main/java/io/mycat/server/MySQLFrontConnection.java deleted file mode 100644 index bacbbba42..000000000 --- a/src/main/java/io/mycat/server/MySQLFrontConnection.java +++ /dev/null @@ -1,546 +0,0 @@ -package io.mycat.server; - -import io.mycat.MycatServer; -import io.mycat.net.NetSystem; -import io.mycat.route.RouteResultset; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.packet.HandshakePacket; -import io.mycat.server.packet.MySQLMessage; -import io.mycat.server.packet.OkPacket; -import io.mycat.server.parser.ServerParse; -import io.mycat.server.sqlhandler.BeginHandler; -import io.mycat.server.sqlhandler.ExplainHandler; -import io.mycat.server.sqlhandler.KillHandler; -import io.mycat.server.sqlhandler.SavepointHandler; -import io.mycat.server.sqlhandler.SelectHandler; -import io.mycat.server.sqlhandler.ServerLoadDataInfileHandler; -import io.mycat.server.sqlhandler.ServerPrepareHandler; -import io.mycat.server.sqlhandler.SetHandler; -import io.mycat.server.sqlhandler.ShowHandler; -import io.mycat.server.sqlhandler.StartHandler; -import io.mycat.server.sqlhandler.UseHandler; -import io.mycat.util.RandomUtil; - -import java.io.IOException; -import java.io.UnsupportedEncodingException; -import java.net.InetSocketAddress; -import java.nio.channels.SocketChannel; -import java.util.Set; - -/** - * MySQL Front connection - * - * @author wuzhih - * - */ - -public class MySQLFrontConnection extends GenalMySQLConnection { - protected FrontendPrivileges privileges; - - protected FrontendPrepareHandler prepareHandler; - protected LoadDataInfileHandler loadDataInfileHandler; - private final NonBlockingSession session; - private boolean readOnlyUser = false; - - protected int getServerCapabilities() { - int flag = 0; - flag |= Capabilities.CLIENT_LONG_PASSWORD; - flag |= Capabilities.CLIENT_FOUND_ROWS; - flag |= Capabilities.CLIENT_LONG_FLAG; - flag |= Capabilities.CLIENT_CONNECT_WITH_DB; - // flag |= Capabilities.CLIENT_NO_SCHEMA; - boolean usingCompress = MycatServer.getInstance().getConfig() - .getSystem().getUseCompression() == 1; - if (usingCompress) { - flag |= Capabilities.CLIENT_COMPRESS; - } - flag |= Capabilities.CLIENT_ODBC; - flag |= Capabilities.CLIENT_LOCAL_FILES; - flag |= Capabilities.CLIENT_IGNORE_SPACE; - flag |= Capabilities.CLIENT_PROTOCOL_41; - flag |= Capabilities.CLIENT_INTERACTIVE; - // flag |= Capabilities.CLIENT_SSL; - flag |= Capabilities.CLIENT_IGNORE_SIGPIPE; - flag |= Capabilities.CLIENT_TRANSACTIONS; - // flag |= ServerDefs.CLIENT_RESERVED; - flag |= Capabilities.CLIENT_SECURE_CONNECTION; - return flag; - } - - public MySQLFrontConnection(SocketChannel channel) throws IOException { - super(channel); - - session = new NonBlockingSession(this); - InetSocketAddress remoteAddr = null; - InetSocketAddress localAddr = (InetSocketAddress) channel - .getLocalAddress(); - remoteAddr = (InetSocketAddress) ((SocketChannel) channel) - .getRemoteAddress(); - this.host = remoteAddr.getHostString(); - this.port = localAddr.getPort(); - this.localPort = remoteAddr.getPort(); - loadDataInfileHandler = new ServerLoadDataInfileHandler(this); - prepareHandler = new ServerPrepareHandler(this); - } - - public void sendAuthPackge() throws IOException { - // 生成认证数据 - byte[] rand1 = RandomUtil.randomBytes(8); - byte[] rand2 = RandomUtil.randomBytes(12); - - // 保存认证数据 - byte[] seed = new byte[rand1.length + rand2.length]; - System.arraycopy(rand1, 0, seed, 0, rand1.length); - System.arraycopy(rand2, 0, seed, rand1.length, rand2.length); - this.seed = seed; - - // 发送握手数据包 - HandshakePacket hs = new HandshakePacket(); - hs.packetId = 0; - hs.protocolVersion = Versions.PROTOCOL_VERSION; - hs.serverVersion = Versions.SERVER_VERSION; - hs.threadId = id; - hs.seed = rand1; - hs.serverCapabilities = getServerCapabilities(); - hs.serverCharsetIndex = (byte) (charsetIndex & 0xff); - hs.serverStatus = 2; - hs.restOfScrambleBuff = rand2; - hs.write(this); - - // asynread response - this.asynRead(); - } - - /** - * 设置是否需要中断当前事务 - */ - public void setTxInterrupt(String txInterrputMsg) { - if (!autocommit && !txInterrupted) { - txInterrupted = true; - this.txInterrputMsg = txInterrputMsg; - } - } - - public FrontendPrivileges getPrivileges() { - return privileges; - } - - public void setPrivileges(FrontendPrivileges privileges) { - this.privileges = privileges; - } - - public boolean isTxInterrupted() { - return txInterrupted; - } - - public NonBlockingSession getSession2() { - return this.session; - } - - public void initDB(byte[] data) { - MySQLMessage mm = new MySQLMessage(data); - mm.position(5); - String db = mm.readString(); - - // 检查schema的有效性 - if (db == null || !privileges.schemaExists(db)) { - writeErrMessage(ErrorCode.ER_BAD_DB_ERROR, "Unknown database '" - + db + "'"); - return; - } - if (!privileges.userExists(user, host)) { - writeErrMessage(ErrorCode.ER_ACCESS_DENIED_ERROR, - "Access denied for user '" + user + "'"); - return; - } - readOnlyUser = privileges.isReadOnly(user); - Set schemas = privileges.getUserSchemas(user); - if (schemas == null || schemas.size() == 0 || schemas.contains(db)) { - this.schema = db; - write(OkPacket.OK); - } else { - String s = "Access denied for user '" + user + "' to database '" - + db + "'"; - writeErrMessage(ErrorCode.ER_DBACCESS_DENIED_ERROR, s); - } - - } - - public void loadDataInfileStart(String sql) { - if (loadDataInfileHandler != null) { - try { - loadDataInfileHandler.start(sql); - } catch (Exception e) { - LOGGER.error("load data error", e); - writeErrMessage(ErrorCode.ERR_HANDLE_DATA, e.getMessage()); - } - - } else { - writeErrMessage(ErrorCode.ER_UNKNOWN_COM_ERROR, - "load data infile sql is not unsupported!"); - } - - } - - public void loadDataInfileData(byte[] data) { - if (loadDataInfileHandler != null) { - try { - loadDataInfileHandler.handle(data); - } catch (Exception e) { - LOGGER.error("load data error", e); - writeErrMessage(ErrorCode.ERR_HANDLE_DATA, e.getMessage()); - } - } else { - writeErrMessage(ErrorCode.ER_UNKNOWN_COM_ERROR, - "load data infile data is not unsupported!"); - } - - } - - public void loadDataInfileEnd(byte packID) { - if (loadDataInfileHandler != null) { - try { - loadDataInfileHandler.end(packID); - } catch (Exception e) { - LOGGER.error("load data error", e); - writeErrMessage(ErrorCode.ERR_HANDLE_DATA, e.getMessage()); - } - } else { - writeErrMessage(ErrorCode.ER_UNKNOWN_COM_ERROR, - "load data infile end is not unsupported!"); - } - - } - - public void query(byte[] data) { - - if (this.isClosed()) { - LOGGER.warn("ignore execute ,server connection is closed " + this); - return; - } - // 状态检查 - if (txInterrupted) { - writeErrMessage(ErrorCode.ER_YES, - "Transaction error, need to rollback." + txInterrputMsg); - return; - } - - // 取得语句 - MySQLMessage mm = new MySQLMessage(data); - mm.position(5); - String sql = null; - try { - sql = mm.readString(charset); - } catch (UnsupportedEncodingException e) { - writeErrMessage(ErrorCode.ER_UNKNOWN_CHARACTER_SET, - "Unknown charset '" + charset + "'"); - return; - } - - query(sql); - - } - - public void query(String sql) { - if (sql == null || sql.length() == 0) { - writeErrMessage(ErrorCode.ER_NOT_ALLOWED_COMMAND, "Empty SQL"); - return; - } - if (LOGGER.isDebugEnabled()) { - LOGGER.debug(new StringBuilder().append(this).append(" ") - .append(sql).toString()); - } - - // sql = StringUtil.replace(sql, "`", ""); - - // remove last ';' - if (sql.endsWith(";")) { - sql = sql.substring(0, sql.length() - 1); - } - - // 执行查询 - int rs = ServerParse.parse(sql); - int sqlType = rs & 0xff; - - // 检查当前使用的DB - String db = this.schema; - if (db == null - && sqlType!=ServerParse.USE - && sqlType!=ServerParse.HELP - && sqlType!=ServerParse.SET - && sqlType!=ServerParse.SHOW - && sqlType!=ServerParse.KILL - && sqlType!=ServerParse.KILL_QUERY - && sqlType!=ServerParse.MYSQL_COMMENT - && sqlType!=ServerParse.MYSQL_CMD_COMMENT) { - writeErrMessage(ErrorCode.ERR_BAD_LOGICDB, "No MyCAT Database selected"); - return; - } - - switch (sqlType) { - case ServerParse.EXPLAIN: - ExplainHandler.handle(sql, this, rs >>> 8); - break; - case ServerParse.SET: - SetHandler.handle(sql, this, rs >>> 8); - break; - case ServerParse.SHOW: - ShowHandler.handle(sql, this, rs >>> 8); - break; - case ServerParse.SELECT: - SelectHandler.handle(sql, this, rs >>> 8); - break; - case ServerParse.START: - StartHandler.handle(sql, this, rs >>> 8); - break; - case ServerParse.BEGIN: - BeginHandler.handle(sql, this); - break; - case ServerParse.SAVEPOINT: - SavepointHandler.handle(sql, this); - break; - case ServerParse.KILL: - KillHandler.handle(sql, rs >>> 8, this); - break; - case ServerParse.KILL_QUERY: - LOGGER.warn(new StringBuilder().append("Unsupported command:") - .append(sql).toString()); - writeErrMessage(ErrorCode.ER_UNKNOWN_COM_ERROR, - "Unsupported command"); - break; - case ServerParse.USE: - UseHandler.handle(sql, this, rs >>> 8); - break; - case ServerParse.COMMIT: - commit(); - break; - case ServerParse.ROLLBACK: - rollback(); - break; - case ServerParse.HELP: - LOGGER.warn(new StringBuilder().append("Unsupported command:") - .append(sql).toString()); - writeErrMessage(ErrorCode.ER_SYNTAX_ERROR, "Unsupported command"); - break; - case ServerParse.MYSQL_CMD_COMMENT: - write(OkPacket.OK); - break; - case ServerParse.MYSQL_COMMENT: - write(OkPacket.OK); - break; - case ServerParse.LOAD_DATA_INFILE_SQL: - loadDataInfileStart(sql); - break; - default: - if (this.isReadOnlyUser()) { - LOGGER.warn(new StringBuilder().append("User readonly:") - .append(sql).toString()); - writeErrMessage(ErrorCode.ER_USER_READ_ONLY, "User readonly"); - break; - } - execute(sql, rs & 0xff); - } - } - - public boolean isReadOnlyUser() { - return readOnlyUser; - } - - public void execute(String sql, int type) { - SchemaConfig schema = MycatServer.getInstance().getConfig() - .getSchemas().get(this.schema); - if (schema == null) { - writeErrMessage(ErrorCode.ERR_BAD_LOGICDB, - "Unknown MyCAT Database '" + schema + "'"); - return; - } - routeEndExecuteSQL(sql, type, schema); - - } - - public void routeEndExecuteSQL(String sql, int type, SchemaConfig schema) { - // 路由计算 - RouteResultset rrs = null; - try { - rrs = MycatServer - .getInstance() - .getRouterservice() - .route(MycatServer.getInstance().getConfig().getSystem(), - schema, type, sql, this.charset, this); - - } catch (Exception e) { - StringBuilder s = new StringBuilder(); - LOGGER.warn( - s.append(this).append(sql).toString() + " err:" - + e.toString(), e); - String msg = e.getMessage(); - writeErrMessage(ErrorCode.ER_PARSE_ERROR, msg == null ? e - .getClass().getSimpleName() : msg); - return; - } - if (rrs != null) { - // session执行 - session.execute(rrs, type); - } - } - - public void stmtPrepare(byte[] data) { - if (prepareHandler != null) { - // 取得语句 - MySQLMessage mm = new MySQLMessage(data); - mm.position(5); - String sql = null; - try { - sql = mm.readString(charset); - } catch (UnsupportedEncodingException e) { - writeErrMessage(ErrorCode.ER_UNKNOWN_CHARACTER_SET, - "Unknown charset '" + charset + "'"); - return; - } - if (sql == null || sql.length() == 0) { - writeErrMessage(ErrorCode.ER_NOT_ALLOWED_COMMAND, "Empty SQL"); - return; - } - - // 执行预处理 - prepareHandler.prepare(sql); - } else { - writeErrMessage(ErrorCode.ER_UNKNOWN_COM_ERROR, - "Prepare unsupported!"); - } - } - - public void stmtExecute(byte[] data) { - if (prepareHandler != null) { - prepareHandler.execute(data); - } else { - writeErrMessage(ErrorCode.ER_UNKNOWN_COM_ERROR, - "Prepare unsupported!"); - } - } - - public void stmtClose(byte[] data) { - if (prepareHandler != null) { - prepareHandler.close(data); - } else { - writeErrMessage(ErrorCode.ER_UNKNOWN_COM_ERROR, - "Prepare unsupported!"); - } - } - - public RouteResultset routeSQL(String sql, int type) { - - // 检查当前使用的DB - String db = this.schema; - if (db == null) { - writeErrMessage(ErrorCode.ERR_BAD_LOGICDB, - "No MyCAT Database selected"); - return null; - } - SchemaConfig schema = MycatServer.getInstance().getConfig() - .getSchemas().get(db); - if (schema == null) { - writeErrMessage(ErrorCode.ERR_BAD_LOGICDB, - "Unknown MyCAT Database '" + db + "'"); - return null; - } - - // 路由计算 - RouteResultset rrs = null; - try { - rrs = MycatServer - .getInstance() - .getRouterservice() - .route(MycatServer.getInstance().getConfig().getSystem(), - schema, type, sql, this.charset, this); - - } catch (Exception e) { - StringBuilder s = new StringBuilder(); - LOGGER.warn( - s.append(this).append(sql).toString() + " err:" - + e.toString(), e); - String msg = e.getMessage(); - writeErrMessage(ErrorCode.ER_PARSE_ERROR, msg == null ? e - .getClass().getSimpleName() : msg); - return null; - } - return rrs; - } - - /** - * 提交事务 - */ - public void commit() { - if (txInterrupted) { - writeErrMessage(ErrorCode.ER_YES, - "Transaction error, need to rollback."); - } else { - session.commit(); - } - } - - /** - * 回滚事务 - */ - public void rollback() { - // 状态检查 - if (txInterrupted) { - txInterrupted = false; - } - - // 执行回滚 - session.rollback(); - } - - /** - * 撤销执行中的语句 - * - * @param sponsor - * 发起者为null表示是自己 - */ - public void cancel(final MySQLFrontConnection sponsor) { - NetSystem.getInstance().getExecutor().execute(new Runnable() { - @Override - public void run() { - session.cancel(sponsor); - } - }); - } - - @Override - public void close(String reason) { - super.close(reason); - session.terminate(); - if (getLoadDataInfileHandler() != null) { - getLoadDataInfileHandler().clear(); - } - if(getPrepareHandler() != null) { - getPrepareHandler().clear(); - } - } - - public LoadDataInfileHandler getLoadDataInfileHandler() { - return loadDataInfileHandler; - } - - public FrontendPrepareHandler getPrepareHandler() { - return prepareHandler; - } - - public void ping() { - write(OkPacket.OK); - } - - public void heartbeat(byte[] data) { - write(OkPacket.OK); - } - - public void kill(byte[] data) { - writeErrMessage(ErrorCode.ER_UNKNOWN_COM_ERROR, "Unknown command"); - } - - public void unknown(byte[] data) { - writeErrMessage(ErrorCode.ER_UNKNOWN_COM_ERROR, "Unknown command"); - } - -} diff --git a/src/main/java/io/mycat/server/MySQLFrontConnectionFactory.java b/src/main/java/io/mycat/server/MySQLFrontConnectionFactory.java deleted file mode 100644 index db96528db..000000000 --- a/src/main/java/io/mycat/server/MySQLFrontConnectionFactory.java +++ /dev/null @@ -1,41 +0,0 @@ -package io.mycat.server; - -import io.mycat.MycatServer; -import io.mycat.net.Connection; -import io.mycat.net.ConnectionFactory; -import io.mycat.net.NIOHandler; -import io.mycat.server.config.node.SystemConfig; - -import java.io.IOException; -import java.nio.channels.SocketChannel; - -public class MySQLFrontConnectionFactory extends ConnectionFactory { - - private final NIOHandler nioHandler; - - public MySQLFrontConnectionFactory( - NIOHandler nioHandler) { - super(); - this.nioHandler = nioHandler; - } - - @Override - protected Connection makeConnection(SocketChannel channel) - throws IOException { - MySQLFrontConnection con = new MySQLFrontConnection(channel); - SystemConfig sys = MycatServer.getInstance().getConfig().getSystem(); - con.setPrivileges(MycatPrivileges.instance()); - con.setCharset(sys.getCharset()); - // con.setLoadDataInfileHandler(new ServerLoadDataInfileHandler(c)); - // c.setPrepareHandler(new ServerPrepareHandler(c)); - con.setTxIsolation(sys.getTxIsolation()); - return con; - } - - @Override - protected NIOHandler getNIOHandler() { - - return nioHandler; - } - -} diff --git a/src/main/java/io/mycat/server/MySQLFrontConnectionHandler.java b/src/main/java/io/mycat/server/MySQLFrontConnectionHandler.java deleted file mode 100644 index 25ac5479b..000000000 --- a/src/main/java/io/mycat/server/MySQLFrontConnectionHandler.java +++ /dev/null @@ -1,254 +0,0 @@ -package io.mycat.server; - -import io.mycat.MycatServer; -import io.mycat.net.Connection; -import io.mycat.net.NIOHandler; -import io.mycat.server.packet.AuthPacket; -import io.mycat.server.packet.MySQLMessage; -import io.mycat.server.packet.MySQLPacket; -import io.mycat.server.packet.QuitPacket; -import io.mycat.server.packet.util.SecurityUtil; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.security.NoSuchAlgorithmException; -import java.util.Set; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class MySQLFrontConnectionHandler implements - NIOHandler { - private static final byte[] AUTH_OK = new byte[] { 7, 0, 0, 2, 0, 0, 0, 2, - 0, 0, 0 }; - protected static final Logger LOGGER = LoggerFactory - .getLogger(MySQLFrontConnectionHandler.class); - - @Override - public void onConnected(MySQLFrontConnection con) throws IOException { - con.sendAuthPackge(); - } - - @Override - public void handle(MySQLFrontConnection con, ByteBuffer data, - final int start, final int readedLength) { - switch (con.getState()) { - case connecting: { - doConnecting(con, data, start, readedLength); - return; - } - case connected: { - try { - doHandleBusinessMsg(con, data, start, readedLength); - } catch (Exception e) { - LOGGER.warn("caught err ", e); - } - return; - } - - default: - LOGGER.warn("not handled connecton state err " + con.getState() - + " for con " + con); - break; - - } - } - - private void doConnecting(MySQLFrontConnection source, ByteBuffer buf, - final int start, final int readedLength) { - byte[] data = new byte[readedLength]; - buf.get(data, start, readedLength); - // check quit packet - if (data.length == QuitPacket.QUIT.length - && data[4] == MySQLPacket.COM_QUIT) { - source.close("quit packet"); - return; - } - - AuthPacket auth = new AuthPacket(); - auth.read(data); - - // check user - if (!checkUser(source, auth.user, source.getHost())) { - failure(source, ErrorCode.ER_ACCESS_DENIED_ERROR, - "Access denied for user '" + auth.user + "'"); - return; - } - - // check password - if (!checkPassword(source, auth.password, auth.user)) { - failure(source, ErrorCode.ER_ACCESS_DENIED_ERROR, - "Access denied for user '" + auth.user + "'"); - return; - } - - // check schema - switch (checkSchema(source, auth.database, auth.user)) { - case ErrorCode.ER_BAD_DB_ERROR: - failure(source, ErrorCode.ER_BAD_DB_ERROR, "Unknown database '" - + auth.database + "'"); - break; - case ErrorCode.ER_DBACCESS_DENIED_ERROR: - String s = "Access denied for user '" + auth.user - + "' to database '" + auth.database + "'"; - failure(source, ErrorCode.ER_DBACCESS_DENIED_ERROR, s); - break; - default: - success(source, auth); - } - } - - protected boolean checkUser(MySQLFrontConnection source, String user, - String host) { - return source.getPrivileges().userExists(user, host); - } - - protected boolean checkPassword(MySQLFrontConnection source, - byte[] password, String user) { - String pass = source.getPrivileges().getPassword(user); - - // check null - if (pass == null || pass.length() == 0) { - if (password == null || password.length == 0) { - return true; - } else { - return false; - } - } - if (password == null || password.length == 0) { - return false; - } - - // encrypt - byte[] encryptPass = null; - try { - encryptPass = SecurityUtil.scramble411(pass.getBytes(), - source.getSeed()); - } catch (NoSuchAlgorithmException e) { - LOGGER.warn(source.toString(), e); - return false; - } - if (encryptPass != null && (encryptPass.length == password.length)) { - int i = encryptPass.length; - while (i-- != 0) { - if (encryptPass[i] != password[i]) { - return false; - } - } - } else { - return false; - } - - return true; - } - - protected int checkSchema(MySQLFrontConnection source, String schema, - String user) { - if (schema == null) { - return 0; - } - FrontendPrivileges privileges = source.getPrivileges(); - if (!privileges.schemaExists(schema)) { - return ErrorCode.ER_BAD_DB_ERROR; - } - Set schemas = privileges.getUserSchemas(user); - if (schemas == null || schemas.size() == 0 || schemas.contains(schema)) { - return 0; - } else { - return ErrorCode.ER_DBACCESS_DENIED_ERROR; - } - } - - protected void success(MySQLFrontConnection source, AuthPacket auth) { - source.setAuthenticated(true); - source.setUser(auth.user); - source.setSchema(auth.database); - source.setCharsetIndex(auth.charsetIndex); - if (LOGGER.isInfoEnabled()) { - StringBuilder s = new StringBuilder(); - s.append(source).append('\'').append(auth.user) - .append("' login success"); - byte[] extra = auth.extra; - if (extra != null && extra.length > 0) { - s.append(",extra:").append(new String(extra)); - } - LOGGER.info(s.toString()); - } - - source.write(AUTH_OK); - boolean clientCompress = Capabilities.CLIENT_COMPRESS == (Capabilities.CLIENT_COMPRESS & auth.clientFlags); - boolean usingCompress = MycatServer.getInstance().getConfig() - .getSystem().getUseCompression() == 1; - if (clientCompress && usingCompress) { - source.setSupportCompress(true); - } - source.setState(Connection.State.connected); - } - - protected void failure(MySQLFrontConnection source, int errno, String info) { - LOGGER.error(source.toString() + info); - source.writeErrMessage(errno, info); - } - - public void onClosed(MySQLFrontConnection con,String reason) { - - } - - public void doHandleBusinessMsg(final MySQLFrontConnection source, - final ByteBuffer buf, final int start, final int readedLength) { - byte[] data = new byte[readedLength]; - buf.get(data, start, readedLength); - if (source.getLoadDataInfileHandler() != null - && source.getLoadDataInfileHandler().isStartLoadData()) { - MySQLMessage mm = new MySQLMessage(data); - int packetLength = mm.readUB3(); - if (packetLength + 4 == data.length) { - source.loadDataInfileData(data); - } - return; - } - switch (data[4]) { - case MySQLPacket.COM_INIT_DB: - source.initDB(data); - break; - case MySQLPacket.COM_QUERY: - source.query(data); - break; - case MySQLPacket.COM_PING: - source.ping(); - break; - case MySQLPacket.COM_QUIT: - source.close("quit cmd"); - break; - case MySQLPacket.COM_PROCESS_KILL: - source.kill(data); - break; - case MySQLPacket.COM_STMT_PREPARE: - source.stmtPrepare(data); - break; - case MySQLPacket.COM_STMT_EXECUTE: - - source.stmtExecute(data); - break; - case MySQLPacket.COM_STMT_CLOSE: - - source.stmtClose(data); - break; - case MySQLPacket.COM_HEARTBEAT: - - source.heartbeat(data); - break; - default: - source.writeErrMessage(ErrorCode.ER_UNKNOWN_COM_ERROR, - "Unknown command"); - - } - - } - - @Override - public void onConnectFailed(MySQLFrontConnection con, Throwable e) { - - } - -} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/MycatPrivileges.java b/src/main/java/io/mycat/server/MycatPrivileges.java deleted file mode 100644 index addefd387..000000000 --- a/src/main/java/io/mycat/server/MycatPrivileges.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.server; - -import io.mycat.MycatServer; -import io.mycat.server.config.node.MycatConfig; -import io.mycat.server.config.node.UserConfig; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Map; -import java.util.Set; - -/** - * @author mycat - */ -public class MycatPrivileges implements FrontendPrivileges { - /** - * 无需每次建立连接都new实例。 - */ - private static MycatPrivileges instance = new MycatPrivileges(); - - private static final Logger ALARM = LoggerFactory - .getLogger("alarm"); - - public static MycatPrivileges instance() { - return instance; - } - - private MycatPrivileges() { - super(); - } - - @Override - public boolean schemaExists(String schema) { - MycatConfig conf = MycatServer.getInstance().getConfig(); - return conf.getSchemas().containsKey(schema); - } - - @Override - public boolean userExists(String user, String host) { - MycatConfig conf = MycatServer.getInstance().getConfig(); - Map> quarantineHosts = conf.getQuarantine().getHosts(); - if (quarantineHosts.containsKey(host)) { - boolean rs = quarantineHosts.get(host).contains(user); - if (!rs) { - ALARM.error(new StringBuilder().append(Alarms.QUARANTINE_ATTACK).append("[host=").append(host) - .append(",user=").append(user).append(']').toString()); - } - return rs; - } else { - if (user != null && user.equals(conf.getSystem().getClusterHeartbeatUser())) { - return true; - } else { - return conf.getUsers().containsKey(user); - } - } - } - - @Override - public String getPassword(String user) { - MycatConfig conf = MycatServer.getInstance().getConfig(); - if (user != null && user.equals(conf.getSystem().getClusterHeartbeatUser())) { - return conf.getSystem().getClusterHeartbeatPass(); - } else { - UserConfig uc = conf.getUsers().get(user); - if (uc != null) { - return uc.getPassword(); - } else { - return null; - } - } - } - - @Override - public Set getUserSchemas(String user) { - MycatConfig conf = MycatServer.getInstance().getConfig(); - UserConfig uc = conf.getUsers().get(user); - if (uc != null) { - return uc.getSchemas(); - } else { - return null; - } - } - - @Override - public boolean isReadOnly(String user) { - MycatConfig conf = MycatServer.getInstance().getConfig(); - UserConfig uc = conf.getUsers().get(user); - if (uc != null) { - return uc.isReadOnly(); - } else { - return false; - } - } -} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/NonBlockingSession.java b/src/main/java/io/mycat/server/NonBlockingSession.java index 9820358ec..e89c700c6 100644 --- a/src/main/java/io/mycat/server/NonBlockingSession.java +++ b/src/main/java/io/mycat/server/NonBlockingSession.java @@ -23,90 +23,277 @@ */ package io.mycat.server; -import io.mycat.MycatServer; -import io.mycat.backend.BackendConnection; -import io.mycat.backend.PhysicalDBNode; -import io.mycat.route.RouteResultset; -import io.mycat.route.RouteResultsetNode; -import io.mycat.server.config.node.MycatConfig; -import io.mycat.server.config.node.SystemConfig; -import io.mycat.server.executors.*; -import io.mycat.server.packet.OkPacket; -import io.mycat.server.sqlcmd.SQLCmdConstant; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - +import java.nio.ByteBuffer; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicInteger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.MycatServer; +import io.mycat.backend.BackendConnection; +import io.mycat.backend.datasource.PhysicalDBNode; +import io.mycat.backend.mysql.nio.handler.CommitNodeHandler; +import io.mycat.backend.mysql.nio.handler.KillConnectionHandler; +import io.mycat.backend.mysql.nio.handler.LockTablesHandler; +import io.mycat.backend.mysql.nio.handler.MiddlerResultHandler; +import io.mycat.backend.mysql.nio.handler.MultiNodeCoordinator; +import io.mycat.backend.mysql.nio.handler.MultiNodeQueryHandler; +import io.mycat.backend.mysql.nio.handler.RollbackNodeHandler; +import io.mycat.backend.mysql.nio.handler.RollbackReleaseHandler; +import io.mycat.backend.mysql.nio.handler.SingleNodeHandler; +import io.mycat.backend.mysql.nio.handler.UnLockTablesHandler; +import io.mycat.config.ErrorCode; +import io.mycat.config.MycatConfig; +import io.mycat.net.FrontendConnection; +import io.mycat.net.mysql.OkPacket; +import io.mycat.route.RouteResultset; +import io.mycat.route.RouteResultsetNode; +import io.mycat.server.parser.ServerParse; +import io.mycat.server.sqlcmd.SQLCmdConstant; + /** * @author mycat * @author mycat */ -public class NonBlockingSession{ - public static final Logger LOGGER = LoggerFactory - .getLogger(NonBlockingSession.class); - - private final MySQLFrontConnection source; - private final ConcurrentHashMap target; - // life-cycle: each sql execution - private volatile SingleNodeHandler singleNodeHandler; - private volatile MultiNodeQueryHandler multiNodeHandler; - private volatile RollbackNodeHandler rollbackHandler; - private final MultiNodeCoordinator multiNodeCoordinator; - private final CommitNodeHandler commitHandler; - private volatile String xaTXID; - - private boolean prepared; - - public NonBlockingSession(MySQLFrontConnection source) { - this.source = source; - this.target = new ConcurrentHashMap( - 2, 0.75f); - multiNodeCoordinator = new MultiNodeCoordinator(this); - commitHandler = new CommitNodeHandler(this); - } - - - public MySQLFrontConnection getSource() { - return source; - } - - - public int getTargetCount() { - return target.size(); - } - - public Set getTargetKeys() { - return target.keySet(); - } - - public BackendConnection getTarget(RouteResultsetNode key) { - return target.get(key); - } - - public Map getTargetMap() { - return this.target; - } - - public BackendConnection removeTarget(RouteResultsetNode key) { - return target.remove(key); - } +public class NonBlockingSession implements Session { + + public static final Logger LOGGER = LoggerFactory.getLogger(NonBlockingSession.class); + + private final ServerConnection source; + //huangyiming add 避免出现jdk版本冲突 + private final ConcurrentMap target; + // life-cycle: each sql execution + private volatile SingleNodeHandler singleNodeHandler; + private volatile MultiNodeQueryHandler multiNodeHandler; + private volatile RollbackNodeHandler rollbackHandler; + private final MultiNodeCoordinator multiNodeCoordinator; + private final CommitNodeHandler commitHandler; + private volatile String xaTXID; + + //huangyiming + private volatile boolean canClose = true; + + private volatile MiddlerResultHandler middlerResultHandler; + private boolean prepared; + + public NonBlockingSession(ServerConnection source) { + this.source = source; + this.target = new ConcurrentHashMap(2, 0.75f); + multiNodeCoordinator = new MultiNodeCoordinator(this); + commitHandler = new CommitNodeHandler(this); + } + + @Override + public ServerConnection getSource() { + return source; + } + + @Override + public int getTargetCount() { + return target.size(); + } + + public Set getTargetKeys() { + return target.keySet(); + } + + public BackendConnection getTarget(RouteResultsetNode key) { + return target.get(key); + } + + public Map getTargetMap() { + return this.target; + } + + public BackendConnection removeTarget(RouteResultsetNode key) { + return target.remove(key); + } + + @Override + public void execute(RouteResultset rrs, int type) { + + // clear prev execute resources + clearHandlesResources(); + if (LOGGER.isDebugEnabled()) { + StringBuilder s = new StringBuilder(); + LOGGER.debug(s.append(source).append(rrs).toString() + " rrs "); + } + + // 检查路由结果是否为空 + RouteResultsetNode[] nodes = rrs.getNodes(); + if (nodes == null || nodes.length == 0 || nodes[0].getName() == null || nodes[0].getName().equals("")) { + source.writeErrMessage(ErrorCode.ER_NO_DB_ERROR, + "No dataNode found ,please check tables defined in schema:" + source.getSchema()); + return; + } + boolean autocommit = source.isAutocommit(); + final int initCount = target.size(); + if (nodes.length == 1) { + singleNodeHandler = new SingleNodeHandler(rrs, this); + if (this.isPrepared()) { + singleNodeHandler.setPrepared(true); + } + + try { + if(initCount > 1){ + checkDistriTransaxAndExecute(rrs,1,autocommit); + }else{ + singleNodeHandler.execute(); + } + } catch (Exception e) { + LOGGER.warn(new StringBuilder().append(source).append(rrs).toString(), e); + source.writeErrMessage(ErrorCode.ERR_HANDLE_DATA, e.toString()); + } + + } else { + + multiNodeHandler = new MultiNodeQueryHandler(type, rrs, autocommit, this); + if (this.isPrepared()) { + multiNodeHandler.setPrepared(true); + } + try { + if(((type == ServerParse.DELETE || type == ServerParse.INSERT || type == ServerParse.UPDATE) && !rrs.isGlobalTable() && nodes.length > 1)||initCount > 1) { + checkDistriTransaxAndExecute(rrs,2,autocommit); + } else { + multiNodeHandler.execute(); + } + } catch (Exception e) { + LOGGER.warn(new StringBuilder().append(source).append(rrs).toString(), e); + source.writeErrMessage(ErrorCode.ERR_HANDLE_DATA, e.toString()); + } + } + + if (this.isPrepared()) { + this.setPrepared(false); + } + } + + private void checkDistriTransaxAndExecute(RouteResultset rrs, int type,boolean autocommit) throws Exception { + switch(MycatServer.getInstance().getConfig().getSystem().getHandleDistributedTransactions()) { + case 1: + source.writeErrMessage(ErrorCode.ER_NOT_ALLOWED_COMMAND, "Distributed transaction is disabled!"); + if(!autocommit){ + source.setTxInterrupt("Distributed transaction is disabled!"); + } + break; + case 2: + LOGGER.warn("Distributed transaction detected! RRS:" + rrs); + if(type == 1){ + singleNodeHandler.execute(); + } + else{ + multiNodeHandler.execute(); + } + break; + default: + if(type == 1){ + singleNodeHandler.execute(); + } + else{ + multiNodeHandler.execute(); + } + } + } + + private void checkDistriTransaxAndExecute() { + if(!isALLGlobal()){ + switch(MycatServer.getInstance().getConfig().getSystem().getHandleDistributedTransactions()) { + case 1: + source.writeErrMessage(ErrorCode.ER_NOT_ALLOWED_COMMAND, "Distributed transaction is disabled!Please rollback!"); + source.setTxInterrupt("Distributed transaction is disabled!"); + break; + case 2: + multiNodeCoordinator.executeBatchNodeCmd(SQLCmdConstant.COMMIT_CMD); + LOGGER.warn("Distributed transaction detected! Targets:" + target); + break; + default: + multiNodeCoordinator.executeBatchNodeCmd(SQLCmdConstant.COMMIT_CMD); + + } + } else { + multiNodeCoordinator.executeBatchNodeCmd(SQLCmdConstant.COMMIT_CMD); + } + } + + public void commit() { + final int initCount = target.size(); + if (initCount <= 0) { + ByteBuffer buffer = source.allocate(); + buffer = source.writeToBuffer(OkPacket.OK, buffer); + source.write(buffer); + /* 1. 如果开启了 xa 事务 */ + if(getXaTXID()!=null){ + setXATXEnabled(false); + } + /* 2. preAcStates 为true,事务结束后,需要设置为true。preAcStates 为ac上一个状态 */ + if(source.isPreAcStates()&&!source.isAutocommit()){ + source.setAutocommit(true); + } + return; + } else if (initCount == 1) { + //huangyiming add 避免出现jdk版本冲突 + BackendConnection con = target.values().iterator().next(); + commitHandler.commit(con); + } else { + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("multi node commit to send ,total " + initCount); + } + checkDistriTransaxAndExecute(); + } + + } + + private boolean isALLGlobal(){ + for(RouteResultsetNode routeResultsetNode:target.keySet()){ + if(routeResultsetNode.getSource()==null){ + return false; + } + else if(!routeResultsetNode.getSource().isGlobalTable()){ + return false; + } + } + return true; + } + + public void rollback() { + final int initCount = target.size(); + if (initCount <= 0) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("no session bound connections found ,no need send rollback cmd "); + } + ByteBuffer buffer = source.allocate(); + buffer = source.writeToBuffer(OkPacket.OK, buffer); + source.write(buffer); + /* 1. 如果开启了 xa 事务 */ + if(getXaTXID()!=null){ + setXATXEnabled(false); + } + /* 2. preAcStates 为true,事务结束后,需要设置为true。preAcStates 为ac上一个状态 */ + if(source.isPreAcStates()&&!source.isAutocommit()){ + source.setAutocommit(true); + } + return; + } - - public void execute(RouteResultset rrs, int type) { - // clear prev execute resources - clearHandlesResources(); - if (LOGGER.isDebugEnabled()) { - StringBuilder s = new StringBuilder(); - LOGGER.debug(s.append(source).append(rrs).toString() + " rrs "); - } + rollbackHandler = new RollbackNodeHandler(this); + rollbackHandler.rollback(); + } + /** + * 执行lock tables语句方法 + * @author songdabin + * @date 2016-7-9 + * @param rrs + */ + public void lockTable(RouteResultset rrs) { // 检查路由结果是否为空 RouteResultsetNode[] nodes = rrs.getNodes(); if (nodes == null || nodes.length == 0 || nodes[0].getName() == null @@ -116,284 +303,301 @@ public void execute(RouteResultset rrs, int type) { + source.getSchema()); return; } - - if (nodes.length == 1) { - singleNodeHandler = new SingleNodeHandler(rrs, this); - if(this.isPrepared()) { - singleNodeHandler.setPrepared(true); - } - try { - singleNodeHandler.execute(); - } catch (Exception e) { - LOGGER.warn("{} {}", source, rrs, e); - source.writeErrMessage(ErrorCode.ERR_HANDLE_DATA, e.toString()); - } - } else { - boolean autocommit = source.isAutocommit(); -// SystemConfig sysConfig = MycatServer.getInstance().getConfig() -// .getSystem(); - multiNodeHandler = new MultiNodeQueryHandler(type, rrs, autocommit, - this); - if(this.isPrepared()) { - multiNodeHandler.setPrepared(true); - } - try { - multiNodeHandler.execute(); - } catch (Exception e) { - LOGGER.warn("{} {}", source, rrs, e); - source.writeErrMessage(ErrorCode.ERR_HANDLE_DATA, e.toString()); - } - } - - if(this.isPrepared()) { - this.setPrepared(false); - } - - } - - public void commit() { - final int initCount = target.size(); - if (initCount <= 0) { - source.write(OkPacket.OK); - return; - } else if (initCount == 1) { - BackendConnection con = target.elements().nextElement(); - commitHandler.commit(con); - - } else { - - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("multi node commit to send ,total " + initCount); - } - multiNodeCoordinator.executeBatchNodeCmd(SQLCmdConstant.COMMIT_CMD); - } - - } - - public void rollback() { - final int initCount = target.size(); - if (initCount <= 0) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("no session bound connections found ,no need send rollback cmd "); - } - source.write(OkPacket.OK); - return; - } - rollbackHandler = new RollbackNodeHandler(this); - rollbackHandler.rollback(); - } - - - public void cancel(MySQLFrontConnection sponsor) { - - } - - /** - * {@link ServerConnection#isClosed()} must be true before invoking this - */ - public void terminate() { - for (BackendConnection node : target.values()) { - node.close("client closed "); - } - target.clear(); - clearHandlesResources(); - } - - public void closeAndClearResources(String reason) { - for (BackendConnection node : target.values()) { - node.close(reason); - } - target.clear(); - clearHandlesResources(); - } - - public void releaseConnectionIfSafe(BackendConnection conn, boolean debug, - boolean needRollback) { - RouteResultsetNode node = (RouteResultsetNode) conn.getAttachment(); - - if (node != null) { - if (this.source.isAutocommit() || conn.isFromSlaveDB() - || !conn.isModifiedSQLExecuted()) { - releaseConnection((RouteResultsetNode) conn.getAttachment(), - LOGGER.isDebugEnabled(), needRollback); - } + LockTablesHandler handler = new LockTablesHandler(this, rrs); + source.setLocked(true); + try { + handler.execute(); + } catch (Exception e) { + LOGGER.warn(new StringBuilder().append(source).append(rrs).toString(), e); + source.writeErrMessage(ErrorCode.ERR_HANDLE_DATA, e.toString()); } } - public void releaseConnection(RouteResultsetNode rrn, boolean debug, - final boolean needRollback) { - - BackendConnection c = target.remove(rrn); - if (c != null) { - if (debug) { - LOGGER.debug("release connection " + c); - } - if (c.getAttachment() != null) { - c.setAttachment(null); - } - if (!c.isClosedOrQuit()) { - if (c.isAutocommit()) { - c.release(); - } else - // if (needRollback) - { - c.setResponseHandler(new RollbackReleaseHandler()); - c.rollback(); - } - // else { - // c.release(); - // } - } - } - } - - public void releaseConnections(final boolean needRollback) { - boolean debug = LOGGER.isDebugEnabled(); - for (RouteResultsetNode rrn : target.keySet()) { - releaseConnection(rrn, debug, needRollback); - } - } - - public void releaseConnection(BackendConnection con) { - Iterator> itor = target - .entrySet().iterator(); - while (itor.hasNext()) { - BackendConnection theCon = itor.next().getValue(); - if (theCon == con) { - itor.remove(); - con.release(); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("realse connection " + con); - } - break; - } - } - - } - /** - * @return previous bound connection + * 执行unlock tables语句方法 + * @author songdabin + * @date 2016-7-9 + * @param rrs */ - public BackendConnection bindConnection(RouteResultsetNode key, - BackendConnection conn) { - // System.out.println("bind connection "+conn+ - // " to key "+key.getName()+" on sesion "+this); - return target.put(key, conn); + public void unLockTable(String sql) { + UnLockTablesHandler handler = new UnLockTablesHandler(this, this.source.isAutocommit(), sql); + handler.execute(); } - - public boolean tryExistsCon(final BackendConnection conn, - RouteResultsetNode node) { - - if (conn == null) { - return false; - } - if (!conn.isFromSlaveDB() - || node.canRunnINReadDB(getSource().isAutocommit())) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("found connections in session to use " + conn - + " for " + node); - } - conn.setAttachment(node); - return true; - } else { - // slavedb connection and can't use anymore ,release it - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("release slave connection,can't be used in trasaction " - + conn + " for " + node); - } - releaseConnection(node, LOGGER.isDebugEnabled(), false); - } - return false; - } - - protected void kill() { - boolean hooked = false; - AtomicInteger count = null; - Map killees = null; - for (RouteResultsetNode node : target.keySet()) { - BackendConnection c = target.get(node); - if (c != null) { - if (!hooked) { - hooked = true; - killees = new HashMap(); - count = new AtomicInteger(0); - } - killees.put(node, c); - count.incrementAndGet(); - } - } - if (hooked) { - for (Entry en : killees - .entrySet()) { - KillConnectionHandler kill = new KillConnectionHandler( - en.getValue(), this); - MycatConfig conf = MycatServer.getInstance().getConfig(); - PhysicalDBNode dn = conf.getDataNodes().get( - en.getKey().getName()); - try { - dn.getConnectionFromSameSource(null, true, en.getValue(), - kill, en.getKey()); - } catch (Exception e) { - LOGGER.error( - "get killer connection failed for " + en.getKey(), - e); - kill.connectionError(e, null); - } - } - } - } - - private void clearHandlesResources() { - SingleNodeHandler singleHander = singleNodeHandler; - if (singleHander != null) { - singleHander.clearResources(); - singleNodeHandler = null; - } - MultiNodeQueryHandler multiHandler = multiNodeHandler; - if (multiHandler != null) { - multiHandler.clearResources(); - multiNodeHandler = null; - } - } - - public void clearResources(final boolean needRollback) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("clear session resources " + this); - } - this.releaseConnections(needRollback); - clearHandlesResources(); - } - - public boolean closed() { - return source.isClosed(); - } - - private String genXATXID() { - return MycatServer.getInstance().genXATXID(); - } - - public void setXATXEnabled(boolean xaTXEnabled) { - - LOGGER.info("XA Transaction enabled ,con " + this.getSource()); - if (xaTXEnabled && this.xaTXID == null) { - xaTXID = genXATXID(); - - } - + + @Override + public void cancel(FrontendConnection sponsor) { + + } + + /** + * {@link ServerConnection#isClosed()} must be true before invoking this + */ + public void terminate() { + for (BackendConnection node : target.values()) { + node.close("client closed "); + } + target.clear(); + clearHandlesResources(); + } + + public void closeAndClearResources(String reason) { + for (BackendConnection node : target.values()) { + node.close(reason); + } + target.clear(); + clearHandlesResources(); + } + + public void releaseConnectionIfSafe(BackendConnection conn, boolean debug, + boolean needRollback) { + RouteResultsetNode node = (RouteResultsetNode) conn.getAttachment(); + + if (node != null) { + /* 分表 在 + * 1. 没有开启事务 + * 2. 读取走的从节点 + * 3. 没有执行过更新sql + * 也需要释放连接 + */ +// if (node.isDisctTable()) { +// return; +// } + if ((this.source.isAutocommit() || conn.isFromSlaveDB() + || !conn.isModifiedSQLExecuted()) && !this.source.isLocked()) { + releaseConnection((RouteResultsetNode) conn.getAttachment(), LOGGER.isDebugEnabled(), needRollback); + } + } + } + + public void releaseConnection(RouteResultsetNode rrn, boolean debug, + final boolean needRollback) { + + BackendConnection c = target.remove(rrn); + if (c != null) { + if (debug) { + LOGGER.debug("release connection " + c); + } + if (c.getAttachment() != null) { + c.setAttachment(null); + } + if (!c.isClosedOrQuit()) { + if (c.isAutocommit()) { + c.release(); + } else + //if (needRollback) + { + c.setResponseHandler(new RollbackReleaseHandler()); + c.rollback(); + } + //else { + // c.release(); + //} + } + } + } + + public void releaseConnections(final boolean needRollback) { + boolean debug = LOGGER.isDebugEnabled(); + + for (RouteResultsetNode rrn : target.keySet()) { + releaseConnection(rrn, debug, needRollback); + } + } + + public void releaseConnection(BackendConnection con) { + Iterator> itor = target + .entrySet().iterator(); + while (itor.hasNext()) { + BackendConnection theCon = itor.next().getValue(); + if (theCon == con) { + itor.remove(); + con.release(); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("realse connection " + con); + } + break; + } + } + + } + + /** + * @return previous bound connection + */ + public BackendConnection bindConnection(RouteResultsetNode key, + BackendConnection conn) { + // System.out.println("bind connection "+conn+ + // " to key "+key.getName()+" on sesion "+this); + return target.put(key, conn); + } + + public boolean tryExistsCon(final BackendConnection conn, RouteResultsetNode node) { + if (conn == null) { + return false; + } + + boolean canReUse = false; + // conn 是 slave db 的,并且 路由结果显示,本次sql可以重用该 conn + if (conn.isFromSlaveDB() && (node.canRunnINReadDB(getSource().isAutocommit()) + && (node.getRunOnSlave() == null || node.getRunOnSlave()))) { + canReUse = true; + } + + // conn 是 master db 的,并且路由结果显示,本次sql可以重用该conn + if (!conn.isFromSlaveDB() && (node.getRunOnSlave() == null || !node.getRunOnSlave())) { + canReUse = true; + } + + if (canReUse) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("found connections in session to use " + conn + + " for " + node); + } + conn.setAttachment(node); + return true; + } else { + // slavedb connection and can't use anymore ,release it + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("release slave connection,can't be used in trasaction " + + conn + " for " + node); + } + releaseConnection(node, LOGGER.isDebugEnabled(), false); + } + return false; + } + +// public boolean tryExistsCon(final BackendConnection conn, +// RouteResultsetNode node) { +// +// if (conn == null) { +// return false; +// } +// if (!conn.isFromSlaveDB() +// || node.canRunnINReadDB(getSource().isAutocommit())) { +// if (LOGGER.isDebugEnabled()) { +// LOGGER.debug("found connections in session to use " + conn +// + " for " + node); +// } +// conn.setAttachment(node); +// return true; +// } else { +// // slavedb connection and can't use anymore ,release it +// if (LOGGER.isDebugEnabled()) { +// LOGGER.debug("release slave connection,can't be used in trasaction " +// + conn + " for " + node); +// } +// releaseConnection(node, LOGGER.isDebugEnabled(), false); +// } +// return false; +// } + + protected void kill() { + boolean hooked = false; + AtomicInteger count = null; + Map killees = null; + for (RouteResultsetNode node : target.keySet()) { + BackendConnection c = target.get(node); + if (c != null) { + if (!hooked) { + hooked = true; + killees = new HashMap(); + count = new AtomicInteger(0); + } + killees.put(node, c); + count.incrementAndGet(); + } + } + if (hooked) { + for (Entry en : killees + .entrySet()) { + KillConnectionHandler kill = new KillConnectionHandler( + en.getValue(), this); + MycatConfig conf = MycatServer.getInstance().getConfig(); + PhysicalDBNode dn = conf.getDataNodes().get( + en.getKey().getName()); + try { + dn.getConnectionFromSameSource(null, true, en.getValue(), + kill, en.getKey()); + } catch (Exception e) { + LOGGER.error( + "get killer connection failed for " + en.getKey(), + e); + kill.connectionError(e, null); + } + } + } + } + + private void clearHandlesResources() { + SingleNodeHandler singleHander = singleNodeHandler; + if (singleHander != null) { + singleHander.clearResources(); + singleNodeHandler = null; + } + MultiNodeQueryHandler multiHandler = multiNodeHandler; + if (multiHandler != null) { + multiHandler.clearResources(); + multiNodeHandler = null; + } + } + + public void clearResources(final boolean needRollback) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("clear session resources " + this); + } + this.releaseConnections(needRollback); + clearHandlesResources(); + } + + public boolean closed() { + return source.isClosed(); + } + + private String genXATXID() { + return MycatServer.getInstance().getXATXIDGLOBAL(); + } + + public void setXATXEnabled(boolean xaTXEnabled) { + + if (xaTXEnabled) { + LOGGER.info("XA Transaction enabled ,con " + this.getSource()); + if(this.xaTXID == null){ + xaTXID = genXATXID(); + } + }else{ + LOGGER.info("XA Transaction disabled ,con " + this.getSource()); + this.xaTXID = null; + } + } + + public String getXaTXID() { + return xaTXID; + } + + public boolean isPrepared() { + return prepared; + } + + public void setPrepared(boolean prepared) { + this.prepared = prepared; + } + + + public boolean isCanClose() { + return canClose; } - public String getXaTXID() { - return xaTXID; + public void setCanClose(boolean canClose) { + this.canClose = canClose; } - - public boolean isPrepared() { - return prepared; + public MiddlerResultHandler getMiddlerResultHandler() { + return middlerResultHandler; } - - public void setPrepared(boolean prepared) { - this.prepared = prepared; + public void setMiddlerResultHandler(MiddlerResultHandler middlerResultHandler) { + this.middlerResultHandler = middlerResultHandler; } -} \ No newline at end of file + +} diff --git a/src/main/java/io/mycat/server/ServerConnection.java b/src/main/java/io/mycat/server/ServerConnection.java new file mode 100644 index 000000000..84cca51d7 --- /dev/null +++ b/src/main/java/io/mycat/server/ServerConnection.java @@ -0,0 +1,413 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.server; + +import java.io.IOException; +import java.nio.channels.NetworkChannel; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.MycatServer; +import io.mycat.config.ErrorCode; +import io.mycat.config.model.SchemaConfig; +import io.mycat.net.FrontendConnection; +import io.mycat.route.RouteResultset; +import io.mycat.server.handler.MysqlInformationSchemaHandler; +import io.mycat.server.handler.MysqlProcHandler; +import io.mycat.server.parser.ServerParse; +import io.mycat.server.response.Heartbeat; +import io.mycat.server.response.InformationSchemaProfiling; +import io.mycat.server.response.Ping; +import io.mycat.server.util.SchemaUtil; +import io.mycat.util.SplitUtil; +import io.mycat.util.TimeUtil; + +/** + * @author mycat + */ +public class ServerConnection extends FrontendConnection { + private static final Logger LOGGER = LoggerFactory + .getLogger(ServerConnection.class); + private static final long AUTH_TIMEOUT = 15 * 1000L; + + private volatile int txIsolation; + private volatile boolean autocommit; + private volatile boolean preAcStates; //上一个ac状态,默认为true + private volatile boolean txInterrupted; + private volatile String txInterrputMsg = ""; + private long lastInsertId; + private NonBlockingSession session; + /** + * 标志是否执行了lock tables语句,并处于lock状态 + */ + private volatile boolean isLocked = false; + + public ServerConnection(NetworkChannel channel) + throws IOException { + super(channel); + this.txInterrupted = false; + this.autocommit = true; + this.preAcStates = true; + } + + @Override + public boolean isIdleTimeout() { + if (isAuthenticated) { + return super.isIdleTimeout(); + } else { + return TimeUtil.currentTimeMillis() > Math.max(lastWriteTime, + lastReadTime) + AUTH_TIMEOUT; + } + } + + public int getTxIsolation() { + return txIsolation; + } + + public void setTxIsolation(int txIsolation) { + this.txIsolation = txIsolation; + } + + public boolean isAutocommit() { + return autocommit; + } + + public void setAutocommit(boolean autocommit) { + this.autocommit = autocommit; + } + + public long getLastInsertId() { + return lastInsertId; + } + + public void setLastInsertId(long lastInsertId) { + this.lastInsertId = lastInsertId; + } + + /** + * 设置是否需要中断当前事务 + */ + public void setTxInterrupt(String txInterrputMsg) { + if (!autocommit && !txInterrupted) { + txInterrupted = true; + this.txInterrputMsg = txInterrputMsg; + } + } + + public boolean isTxInterrupted() + { + return txInterrupted; + } + public NonBlockingSession getSession2() { + return session; + } + + public void setSession2(NonBlockingSession session2) { + this.session = session2; + } + + public boolean isLocked() { + return isLocked; + } + + public void setLocked(boolean isLocked) { + this.isLocked = isLocked; + } + + @Override + public void ping() { + Ping.response(this); + } + + @Override + public void heartbeat(byte[] data) { + Heartbeat.response(this, data); + } + + public void execute(String sql, int type) { + //连接状态检查 + if (this.isClosed()) { + LOGGER.warn("ignore execute ,server connection is closed " + this); + return; + } + // 事务状态检查 + if (txInterrupted) { + writeErrMessage(ErrorCode.ER_YES, + "Transaction error, need to rollback." + txInterrputMsg); + return; + } + + // 检查当前使用的DB + String db = this.schema; + boolean isDefault = true; + if (db == null) { + db = SchemaUtil.detectDefaultDb(sql, type); + if (db == null) { + writeErrMessage(ErrorCode.ERR_BAD_LOGICDB, "No MyCAT Database selected"); + return; + } + isDefault = false; + } + + // 兼容PhpAdmin's, 支持对MySQL元数据的模拟返回 + //// TODO: 2016/5/20 支持更多information_schema特性 + if (ServerParse.SELECT == type + && db.equalsIgnoreCase("information_schema") ) { + MysqlInformationSchemaHandler.handle(sql, this); + return; + } + + if (ServerParse.SELECT == type + && sql.contains("mysql") + && sql.contains("proc")) { + + SchemaUtil.SchemaInfo schemaInfo = SchemaUtil.parseSchema(sql); + if (schemaInfo != null + && "mysql".equalsIgnoreCase(schemaInfo.schema) + && "proc".equalsIgnoreCase(schemaInfo.table)) { + + // 兼容MySQLWorkbench + MysqlProcHandler.handle(sql, this); + return; + } + } + + SchemaConfig schema = MycatServer.getInstance().getConfig().getSchemas().get(db); + if (schema == null) { + writeErrMessage(ErrorCode.ERR_BAD_LOGICDB, + "Unknown MyCAT Database '" + db + "'"); + return; + } + + //fix navicat SELECT STATE AS `State`, ROUND(SUM(DURATION),7) AS `Duration`, CONCAT(ROUND(SUM(DURATION)/*100,3), '%') AS `Percentage` FROM INFORMATION_SCHEMA.PROFILING WHERE QUERY_ID= GROUP BY STATE ORDER BY SEQ + if(ServerParse.SELECT == type &&sql.contains(" INFORMATION_SCHEMA.PROFILING ")&&sql.contains("CONCAT(ROUND(SUM(DURATION)/")) + { + InformationSchemaProfiling.response(this); + return; + } + + /* 当已经设置默认schema时,可以通过在sql中指定其它schema的方式执行 + * 相关sql,已经在mysql客户端中验证。 + * 所以在此处增加关于sql中指定Schema方式的支持。 + */ + if (isDefault && schema.isCheckSQLSchema() && isNormalSql(type)) { + SchemaUtil.SchemaInfo schemaInfo = SchemaUtil.parseSchema(sql); + if (schemaInfo != null && schemaInfo.schema != null && !schemaInfo.schema.equals(db)) { + SchemaConfig schemaConfig = MycatServer.getInstance().getConfig().getSchemas().get(schemaInfo.schema); + if (schemaConfig != null) + schema = schemaConfig; + } + } + + routeEndExecuteSQL(sql, type, schema); + + } + + private boolean isNormalSql(int type) { + return ServerParse.SELECT==type||ServerParse.INSERT==type||ServerParse.UPDATE==type||ServerParse.DELETE==type||ServerParse.DDL==type; + } + + public RouteResultset routeSQL(String sql, int type) { + + // 检查当前使用的DB + String db = this.schema; + if (db == null) { + writeErrMessage(ErrorCode.ERR_BAD_LOGICDB, + "No MyCAT Database selected"); + return null; + } + SchemaConfig schema = MycatServer.getInstance().getConfig() + .getSchemas().get(db); + if (schema == null) { + writeErrMessage(ErrorCode.ERR_BAD_LOGICDB, + "Unknown MyCAT Database '" + db + "'"); + return null; + } + + // 路由计算 + RouteResultset rrs = null; + try { + rrs = MycatServer + .getInstance() + .getRouterservice() + .route(MycatServer.getInstance().getConfig().getSystem(), + schema, type, sql, this.charset, this); + + } catch (Exception e) { + StringBuilder s = new StringBuilder(); + LOGGER.warn(s.append(this).append(sql).toString() + " err:" + e.toString(),e); + String msg = e.getMessage(); + writeErrMessage(ErrorCode.ER_PARSE_ERROR, msg == null ? e.getClass().getSimpleName() : msg); + return null; + } + return rrs; + } + + + + + public void routeEndExecuteSQL(String sql, final int type, final SchemaConfig schema) { + // 路由计算 + RouteResultset rrs = null; + try { + rrs = MycatServer + .getInstance() + .getRouterservice() + .route(MycatServer.getInstance().getConfig().getSystem(), + schema, type, sql, this.charset, this); + + } catch (Exception e) { + StringBuilder s = new StringBuilder(); + LOGGER.warn(s.append(this).append(sql).toString() + " err:" + e.toString(),e); + String msg = e.getMessage(); + writeErrMessage(ErrorCode.ER_PARSE_ERROR, msg == null ? e.getClass().getSimpleName() : msg); + return; + } + if (rrs != null) { + // session执行 + session.execute(rrs, rrs.isSelectForUpdate()?ServerParse.UPDATE:type); + } + + } + + /** + * 提交事务 + */ + public void commit() { + if (txInterrupted) { + writeErrMessage(ErrorCode.ER_YES, + "Transaction error, need to rollback."); + } else { + session.commit(); + } + } + + /** + * 回滚事务 + */ + public void rollback() { + // 状态检查 + if (txInterrupted) { + txInterrupted = false; + } + + // 执行回滚 + session.rollback(); + } + /** + * 执行lock tables语句方法 + * @param sql + */ + public void lockTable(String sql) { + // 事务中不允许执行lock table语句 + if (!autocommit) { + writeErrMessage(ErrorCode.ER_YES, "can't lock table in transaction!"); + return; + } + // 已经执行了lock table且未执行unlock table之前的连接不能再次执行lock table命令 + if (isLocked) { + writeErrMessage(ErrorCode.ER_YES, "can't lock multi-table"); + return; + } + RouteResultset rrs = routeSQL(sql, ServerParse.LOCK); + if (rrs != null) { + session.lockTable(rrs); + } + } + + /** + * 执行unlock tables语句方法 + * @param sql + */ + public void unLockTable(String sql) { + sql = sql.replaceAll("\n", " ").replaceAll("\t", " "); + String[] words = SplitUtil.split(sql, ' ', true); + if (words.length==2 && ("table".equalsIgnoreCase(words[1]) || "tables".equalsIgnoreCase(words[1]))) { + isLocked = false; + session.unLockTable(sql); + } else { + writeErrMessage(ErrorCode.ER_UNKNOWN_COM_ERROR, "Unknown command"); + } + + } + + /** + * 撤销执行中的语句 + * + * @param sponsor + * 发起者为null表示是自己 + */ + public void cancel(final FrontendConnection sponsor) { + processor.getExecutor().execute(new Runnable() { + @Override + public void run() { + session.cancel(sponsor); + } + }); + } + + @Override + public void close(String reason) { + super.close(reason); + session.terminate(); + if(getLoadDataInfileHandler()!=null) + { + getLoadDataInfileHandler().clear(); + } + } + + /** + * add huangyiming 检测字符串中某字符串出现次数 + * @param srcText + * @param findText + * @return + */ + public static int appearNumber(String srcText, String findText) { + int count = 0; + Pattern p = Pattern.compile(findText); + Matcher m = p.matcher(srcText); + while (m.find()) { + count++; + } + return count; + } + @Override + public String toString() { + return "ServerConnection [id=" + id + ", schema=" + schema + ", host=" + + host + ", user=" + user + ",txIsolation=" + txIsolation + + ", autocommit=" + autocommit + ", schema=" + schema + "]"; + } + + public boolean isPreAcStates() { + return preAcStates; + } + + public void setPreAcStates(boolean preAcStates) { + this.preAcStates = preAcStates; + } + +} diff --git a/src/main/java/io/mycat/server/ServerConnectionFactory.java b/src/main/java/io/mycat/server/ServerConnectionFactory.java new file mode 100644 index 000000000..afb684a3c --- /dev/null +++ b/src/main/java/io/mycat/server/ServerConnectionFactory.java @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.server; + +import java.io.IOException; +import java.nio.channels.NetworkChannel; + +import io.mycat.MycatServer; +import io.mycat.config.MycatPrivileges; +import io.mycat.config.model.SystemConfig; +import io.mycat.net.FrontendConnection; +import io.mycat.net.factory.FrontendConnectionFactory; +import io.mycat.server.handler.ServerLoadDataInfileHandler; +import io.mycat.server.handler.ServerPrepareHandler; + +/** + * @author mycat + */ +public class ServerConnectionFactory extends FrontendConnectionFactory { + + @Override + protected FrontendConnection getConnection(NetworkChannel channel) throws IOException { + SystemConfig sys = MycatServer.getInstance().getConfig().getSystem(); + ServerConnection c = new ServerConnection(channel); + MycatServer.getInstance().getConfig().setSocketParams(c, true); + c.setPrivileges(MycatPrivileges.instance()); + c.setQueryHandler(new ServerQueryHandler(c)); + c.setLoadDataInfileHandler(new ServerLoadDataInfileHandler(c)); + c.setPrepareHandler(new ServerPrepareHandler(c)); + c.setTxIsolation(sys.getTxIsolation()); + c.setSession2(new NonBlockingSession(c)); + return c; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/ServerQueryHandler.java b/src/main/java/io/mycat/server/ServerQueryHandler.java new file mode 100644 index 000000000..e9625edaf --- /dev/null +++ b/src/main/java/io/mycat/server/ServerQueryHandler.java @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.server; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.config.ErrorCode; +import io.mycat.net.handler.FrontendQueryHandler; +import io.mycat.net.mysql.OkPacket; +import io.mycat.server.handler.*; +import io.mycat.server.parser.ServerParse; + +/** + * @author mycat + */ +public class ServerQueryHandler implements FrontendQueryHandler { + private static final Logger LOGGER = LoggerFactory + .getLogger(ServerQueryHandler.class); + + private final ServerConnection source; + protected Boolean readOnly; + + public void setReadOnly(Boolean readOnly) { + this.readOnly = readOnly; + } + + public ServerQueryHandler(ServerConnection source) { + this.source = source; + } + + @Override + public void query(String sql) { + + ServerConnection c = this.source; + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(new StringBuilder().append(c).append(sql).toString()); + } + // + int rs = ServerParse.parse(sql); + int sqlType = rs & 0xff; + + switch (sqlType) { + //explain sql + case ServerParse.EXPLAIN: + ExplainHandler.handle(sql, c, rs >>> 8); + break; + //explain2 datanode=? sql=? + case ServerParse.EXPLAIN2: + Explain2Handler.handle(sql, c, rs >>> 8); + break; + case ServerParse.SET: + SetHandler.handle(sql, c, rs >>> 8); + break; + case ServerParse.SHOW: + ShowHandler.handle(sql, c, rs >>> 8); + break; + case ServerParse.SELECT: + SelectHandler.handle(sql, c, rs >>> 8); + break; + case ServerParse.START: + StartHandler.handle(sql, c, rs >>> 8); + break; + case ServerParse.BEGIN: + BeginHandler.handle(sql, c); + break; + //不支持oracle的savepoint事务回退点 + case ServerParse.SAVEPOINT: + SavepointHandler.handle(sql, c); + break; + case ServerParse.KILL: + KillHandler.handle(sql, rs >>> 8, c); + break; + //不支持KILL_Query + case ServerParse.KILL_QUERY: + LOGGER.warn(new StringBuilder().append("Unsupported command:").append(sql).toString()); + c.writeErrMessage(ErrorCode.ER_UNKNOWN_COM_ERROR,"Unsupported command"); + break; + case ServerParse.USE: + UseHandler.handle(sql, c, rs >>> 8); + break; + case ServerParse.COMMIT: + c.commit(); + break; + case ServerParse.ROLLBACK: + c.rollback(); + break; + case ServerParse.HELP: + LOGGER.warn(new StringBuilder().append("Unsupported command:").append(sql).toString()); + c.writeErrMessage(ErrorCode.ER_SYNTAX_ERROR, "Unsupported command"); + break; + case ServerParse.MYSQL_CMD_COMMENT: + c.write(c.writeToBuffer(OkPacket.OK, c.allocate())); + break; + case ServerParse.MYSQL_COMMENT: + c.write(c.writeToBuffer(OkPacket.OK, c.allocate())); + break; + case ServerParse.LOAD_DATA_INFILE_SQL: + c.loadDataInfileStart(sql); + break; + case ServerParse.MIGRATE: + MigrateHandler.handle(sql,c); + break; + case ServerParse.LOCK: + c.lockTable(sql); + break; + case ServerParse.UNLOCK: + c.unLockTable(sql); + break; + default: + if(readOnly){ + LOGGER.warn(new StringBuilder().append("User readonly:").append(sql).toString()); + c.writeErrMessage(ErrorCode.ER_USER_READ_ONLY, "User readonly"); + break; + } + c.execute(sql, rs & 0xff); + } + } + +} diff --git a/src/main/java/io/mycat/server/Session.java b/src/main/java/io/mycat/server/Session.java new file mode 100644 index 000000000..a0d71a588 --- /dev/null +++ b/src/main/java/io/mycat/server/Session.java @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.server; + +import io.mycat.net.FrontendConnection; +import io.mycat.route.RouteResultset; + +/** + * @author mycat + */ +public interface Session { + + /** + * 取得源端连接 + */ + FrontendConnection getSource(); + + /** + * 取得当前目标端数量 + */ + int getTargetCount(); + + /** + * 开启一个会话执行 + */ + void execute(RouteResultset rrs, int type); + + /** + * 提交一个会话执行 + */ + void commit(); + + /** + * 回滚一个会话执行 + */ + void rollback(); + + /** + * 取消一个正在执行中的会话 + * + * @param sponsor + * 如果发起者为null,则表示由自己发起。 + */ + void cancel(FrontendConnection sponsor); + + /** + * 终止会话,必须在关闭源端连接后执行该方法。 + */ + void terminate(); + + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/config/cluster/ClusterSync.java b/src/main/java/io/mycat/server/config/cluster/ClusterSync.java deleted file mode 100644 index 360f85684..000000000 --- a/src/main/java/io/mycat/server/config/cluster/ClusterSync.java +++ /dev/null @@ -1,7 +0,0 @@ -package io.mycat.server.config.cluster; - -public interface ClusterSync { - - public void init(); - public boolean switchDataSource(String dataHost, int curIndex); -} diff --git a/src/main/java/io/mycat/server/config/cluster/DatabaseClusterSync.java b/src/main/java/io/mycat/server/config/cluster/DatabaseClusterSync.java deleted file mode 100644 index d53aafee9..000000000 --- a/src/main/java/io/mycat/server/config/cluster/DatabaseClusterSync.java +++ /dev/null @@ -1,17 +0,0 @@ -package io.mycat.server.config.cluster; - -public class DatabaseClusterSync implements ClusterSync{ - - @Override - public boolean switchDataSource(String dataHost, int curIndex) { - // TODO Auto-generated method stub - return false; - } - - @Override - public void init() { - // TODO Auto-generated method stub - - } - -} diff --git a/src/main/java/io/mycat/server/config/cluster/LocalClusterSync.java b/src/main/java/io/mycat/server/config/cluster/LocalClusterSync.java deleted file mode 100644 index 8a8e485db..000000000 --- a/src/main/java/io/mycat/server/config/cluster/LocalClusterSync.java +++ /dev/null @@ -1,59 +0,0 @@ -package io.mycat.server.config.cluster; - -import io.mycat.server.config.node.SystemConfig; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.util.Properties; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class LocalClusterSync implements ClusterSync{ - - private final static Logger LOGGER = LoggerFactory.getLogger(LocalClusterSync.class); - - @Override - public boolean switchDataSource(String dataHost, int curIndex) { - File file = new File(SystemConfig.getHomePath(), "conf" + File.separator + "dnindex.properties"); - FileOutputStream fileOut = null; - try { - Properties dnIndexProperties = new Properties(); - dnIndexProperties.load(new FileInputStream(file)); - String oldIndex = dnIndexProperties.getProperty(dataHost); - String newIndex = String.valueOf(curIndex); - if (newIndex.equals(oldIndex)) { - return true; - } - dnIndexProperties.setProperty(dataHost, newIndex); - LOGGER.info("save DataHost index " + dataHost + " cur index " + curIndex); - - File parent = file.getParentFile(); - if (parent != null && !parent.exists()) { - parent.mkdirs(); - } - - fileOut = new FileOutputStream(file); - dnIndexProperties.store(fileOut, "update"); - } catch (Exception e) { - LOGGER.warn("saveDataNodeIndex err:", e); - } finally { - if (fileOut != null) { - try { - fileOut.close(); - } catch (IOException e) { - } - } - } - return true; - } - - @Override - public void init() { - - - } - -} diff --git a/src/main/java/io/mycat/server/config/cluster/ZookeeperClusterSync.java b/src/main/java/io/mycat/server/config/cluster/ZookeeperClusterSync.java deleted file mode 100644 index 9695445c0..000000000 --- a/src/main/java/io/mycat/server/config/cluster/ZookeeperClusterSync.java +++ /dev/null @@ -1,17 +0,0 @@ -package io.mycat.server.config.cluster; - -public class ZookeeperClusterSync implements ClusterSync{ - - @Override - public boolean switchDataSource(String dataHost, int curIndex) { - // TODO Auto-generated method stub - return false; - } - - @Override - public void init() { - // TODO Auto-generated method stub - - } - -} diff --git a/src/main/java/io/mycat/server/config/loader/ConfigFactory.java b/src/main/java/io/mycat/server/config/loader/ConfigFactory.java deleted file mode 100644 index b0c38441d..000000000 --- a/src/main/java/io/mycat/server/config/loader/ConfigFactory.java +++ /dev/null @@ -1,92 +0,0 @@ -package io.mycat.server.config.loader; - -import io.mycat.server.config.ConfigException; -import io.mycat.server.config.ConfigUtil; -import io.mycat.server.config.cluster.ClusterSync; -import io.mycat.server.config.cluster.DatabaseClusterSync; -import io.mycat.server.config.cluster.LocalClusterSync; -import io.mycat.server.config.cluster.ZookeeperClusterSync; -import io.mycat.server.config.loader.zkloader.ZookeeperLoader; - -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.lang.reflect.InvocationTargetException; -import java.util.Map; - -import org.w3c.dom.Element; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList; - -public class ConfigFactory { - private final static String ZOOKEEPER = "zookeeper"; - private final static String DATABASE = "jdbc"; - private final static String LOCAL = "local"; - private static String registryAddress = null ; - - - public static ConfigLoader instanceLoader(){ - ConfigFactory.load(); - if(registryAddress==null){ - return ConfigFactory.instanceLocalLoader(); - }else if(registryAddress.startsWith(ZOOKEEPER)){ - return ConfigFactory.instanceZkLoader(); - }else if(registryAddress.startsWith(DATABASE)){ - return ConfigFactory.instanceDBLoader(); - }else if(registryAddress.startsWith(LOCAL)){ - return ConfigFactory.instanceLocalLoader(); - }else { - throw new ConfigException("regist center: "+ registryAddress +" is not supported,only zk,database "); - } - } - public static ClusterSync instanceCluster(){ - ConfigFactory.load(); - if(registryAddress==null){ - return new LocalClusterSync(); - }else if(registryAddress.startsWith(ZOOKEEPER)){ - return new ZookeeperClusterSync(); - }else if(registryAddress.startsWith(DATABASE)){ - return new DatabaseClusterSync(); - }else if(registryAddress.startsWith(LOCAL)){ - return new LocalClusterSync(); - }else { - throw new ConfigException("regist center: "+ registryAddress +" is not supported,only zk,database "); - } - } - private static void load() { - try { - Element root = LocalLoader.getRoot(); - registryAddress = loadSystem(root); - } catch (ConfigException e) { - throw e; - } catch (Exception e) { - throw new ConfigException(e); - } - } - private static String loadSystem(Element root) throws IllegalAccessException, InvocationTargetException { - NodeList serverList = root.getElementsByTagName("server-config"); - Element systemEle = (Element) serverList.item(0); - NodeList list = systemEle.getElementsByTagName("system"); - for (int i = 0, n = list.getLength(); i < n; i++) { - Node node = list.item(i); - if (node instanceof Element) { - Map props = ConfigUtil.loadElements((Element) node); - if(props.containsKey("registryAddress")){ - return (String) props.get("registryAddress"); - } - } - } - return null; - } - private static ConfigLoader instanceDBLoader() { - return null; - } - private static ConfigLoader instanceZkLoader(){ - ZookeeperLoader zookeeperLoader = new ZookeeperLoader(); - zookeeperLoader.initConfig(); - return null; - }; - private static ConfigLoader instanceLocalLoader(){ - return new LocalLoader(); - }; -} diff --git a/src/main/java/io/mycat/server/config/loader/ConfigInitializer.java b/src/main/java/io/mycat/server/config/loader/ConfigInitializer.java deleted file mode 100644 index 14b95a538..000000000 --- a/src/main/java/io/mycat/server/config/loader/ConfigInitializer.java +++ /dev/null @@ -1,255 +0,0 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.server.config.loader; - -import io.mycat.backend.MySQLDataSource; -import io.mycat.backend.PhysicalDBNode; -import io.mycat.backend.PhysicalDBPool; -import io.mycat.backend.PhysicalDatasource; -import io.mycat.backend.jdbc.JDBCDatasource; -import io.mycat.backend.postgresql.PostgreSQLDataSource; -import io.mycat.server.config.ConfigException; -import io.mycat.server.config.cluster.MycatClusterConfig; -import io.mycat.server.config.node.CharsetConfig; -import io.mycat.server.config.node.DBHostConfig; -import io.mycat.server.config.node.DataHostConfig; -import io.mycat.server.config.node.DataNodeConfig; -import io.mycat.server.config.node.HostIndexConfig; -import io.mycat.server.config.node.QuarantineConfig; -import io.mycat.server.config.node.RuleConfig; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SequenceConfig; -import io.mycat.server.config.node.SystemConfig; -import io.mycat.server.config.node.UserConfig; -import io.mycat.server.packet.util.CharsetUtil; - -import java.util.HashMap; -import java.util.Map; -import java.util.Set; - -/** - * @author mycat - */ -public class ConfigInitializer { - private volatile SystemConfig system; - private volatile MycatClusterConfig cluster; - private volatile QuarantineConfig quarantine; - private volatile Map users; - private volatile Map schemas; - private volatile Map dataNodes; - private volatile Map dataHosts; - private volatile Map tableRules; - private volatile SequenceConfig sequenceConfig; - private volatile CharsetConfig charsetConfig; - - public ConfigInitializer(boolean loadDataHost) { - ConfigLoader configLoader = ConfigFactory.instanceLoader(); - - this.system = configLoader.getSystemConfig(); - this.users = configLoader.getUserConfigs(); - if (loadDataHost) { - this.dataHosts = initDataHosts(configLoader); - this.dataNodes = initDataNodes(configLoader); - } - this.initCharsetConfig(configLoader); // 需要放在 initDataHosts 后面 - this.tableRules = configLoader.getTableRuleConfigs(); - this.schemas = configLoader.getSchemaConfigs(); - this.quarantine = configLoader.getQuarantineConfigs(); - this.cluster = configLoader.getClusterConfigs(); - this.sequenceConfig = configLoader.getSequenceConfig(); - this.charsetConfig = new CharsetConfig(); - - this.checkConfig(); - } - - private void checkConfig() throws ConfigException { - if (users == null || users.isEmpty()) - return; - for (UserConfig uc : users.values()) { - if (uc == null) { - continue; - } - Set authSchemas = uc.getSchemas(); - if (authSchemas == null) { - continue; - } - for (String schema : authSchemas) { - if (!schemas.containsKey(schema)) { - String errMsg = "schema " + schema + " refered by user " - + uc.getName() + " is not exist!"; - throw new ConfigException(errMsg); - } - } - } - - for (SchemaConfig sc : schemas.values()) { - if (null == sc) { - continue; - } - } - } - - public SystemConfig getSystem() { - return system; - } - - public MycatClusterConfig getCluster() { - return cluster; - } - - public QuarantineConfig getQuarantine() { - return quarantine; - } - - public Map getUsers() { - return users; - } - - public Map getSchemas() { - return schemas; - } - - public Map getDataNodes() { - return dataNodes; - } - - public Map getDataHosts() { - return this.dataHosts; - } - - public CharsetConfig getCharsetConfig() { - return this.charsetConfig; - } - - public SequenceConfig getSequenceConfig() { - return this.sequenceConfig; - } - - /* - * private MycatCluster initCobarCluster(ConfigLoader configLoader) { return - * new MycatCluster(configLoader.getClusterConfigs()); } - */ - public Map getTableRules() { - return tableRules; - } - - public void setTableRules(Map tableRules) { - this.tableRules = tableRules; - } - - private Map initDataHosts(ConfigLoader configLoader) { - Map nodeConfs = configLoader - .getDataHostConfigs(); - Map nodes = new HashMap( - nodeConfs.size()); - for (DataHostConfig conf : nodeConfs.values()) { - PhysicalDBPool pool = getPhysicalDBPool(conf, configLoader); - nodes.put(pool.getHostName(), pool); - } - return nodes; - } - - private PhysicalDatasource[] createDataSource(DataHostConfig conf, - String hostName, String dbType, String dbDriver, - DBHostConfig[] nodes, boolean isRead) { - PhysicalDatasource[] dataSources = new PhysicalDatasource[nodes.length]; - if (dbType.equals("mysql") && dbDriver.equals("native")) { - for (int i = 0; i < nodes.length; i++) { - nodes[i].setIdleTimeout(system.getIdleTimeout()); - MySQLDataSource ds = new MySQLDataSource(nodes[i], conf, isRead); - dataSources[i] = ds; - } - - } else if (dbDriver.equals("jdbc")) { - for (int i = 0; i < nodes.length; i++) { - nodes[i].setIdleTimeout(system.getIdleTimeout()); - JDBCDatasource ds = new JDBCDatasource(nodes[i], conf, isRead); - dataSources[i] = ds; - } - } else if ("PostgreSQL".equalsIgnoreCase(dbType) - && "native".equals(dbDriver)) { - for (int i = 0; i < nodes.length; i++) { - nodes[i].setIdleTimeout(system.getIdleTimeout()); - PostgreSQLDataSource ds = new PostgreSQLDataSource(nodes[i], - conf, isRead); - dataSources[i] = ds; - } - } else { - throw new ConfigException("not supported yet !" + hostName); - } - return dataSources; - } - - private PhysicalDBPool getPhysicalDBPool(DataHostConfig conf, - ConfigLoader configLoader) { - String name = conf.getName(); - String dbType = conf.getDbType(); - String dbDriver = conf.getDbDriver(); - PhysicalDatasource[] writeSources = createDataSource(conf, name, - dbType, dbDriver, conf.getWriteHosts(), false); - Map readHostsMap = conf.getReadHosts(); - Map readSourcesMap = new HashMap( - readHostsMap.size()); - for (Map.Entry entry : readHostsMap.entrySet()) { - PhysicalDatasource[] readSources = createDataSource(conf, name, - dbType, dbDriver, entry.getValue(), true); - readSourcesMap.put(entry.getKey(), readSources); - } - PhysicalDBPool pool = new PhysicalDBPool(conf.getName(), conf, - writeSources, readSourcesMap, conf.getBalance(), - conf.getWriteType()); - return pool; - } - - private Map initDataNodes(ConfigLoader configLoader) { - Map nodeConfs = configLoader - .getDataNodeConfigs(); - Map nodes = new HashMap( - nodeConfs.size()); - for (DataNodeConfig conf : nodeConfs.values()) { - PhysicalDBPool pool = this.dataHosts.get(conf.getDataHost()); - if (pool == null) { - throw new ConfigException("dataHost not exists " - + conf.getDataHost()); - - } - PhysicalDBNode dataNode = new PhysicalDBNode(conf.getName(), - conf.getDatabase(), pool); - nodes.put(dataNode.getName(), dataNode); - } - return nodes; - } - - private void initCharsetConfig(ConfigLoader configLoader) { - this.charsetConfig = configLoader.getCharsetConfigs(); - CharsetUtil.load(this.dataHosts, charsetConfig.getProps()); -// CharsetUtil.asynLoad(this.dataHosts, charsetConfig.getProps()); - } - - public HostIndexConfig getHostIndexs() { - ConfigLoader configLoader = ConfigFactory.instanceLoader(); - return configLoader.getHostIndexConfig(); - } - -} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/config/loader/ConfigLoader.java b/src/main/java/io/mycat/server/config/loader/ConfigLoader.java deleted file mode 100644 index c702ec79e..000000000 --- a/src/main/java/io/mycat/server/config/loader/ConfigLoader.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.server.config.loader; - -import io.mycat.server.config.cluster.MycatClusterConfig; -import io.mycat.server.config.node.CharsetConfig; -import io.mycat.server.config.node.DataHostConfig; -import io.mycat.server.config.node.DataNodeConfig; -import io.mycat.server.config.node.HostIndexConfig; -import io.mycat.server.config.node.QuarantineConfig; -import io.mycat.server.config.node.RuleConfig; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SequenceConfig; -import io.mycat.server.config.node.SystemConfig; -import io.mycat.server.config.node.UserConfig; - -import java.util.Map; - -/** - * @author mycat - */ -public interface ConfigLoader { - SchemaConfig getSchemaConfig(String schema); - Map getSchemaConfigs(); - Map getDataNodeConfigs(); - Map getDataHostConfigs(); - Map getTableRuleConfigs(); - SystemConfig getSystemConfig(); - UserConfig getUserConfig(String user); - Map getUserConfigs(); - QuarantineConfig getQuarantineConfigs(); - MycatClusterConfig getClusterConfigs(); - CharsetConfig getCharsetConfigs(); - HostIndexConfig getHostIndexConfig(); - SequenceConfig getSequenceConfig(); - -} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/config/loader/ConfigReLoader.java b/src/main/java/io/mycat/server/config/loader/ConfigReLoader.java deleted file mode 100644 index 5f0ce39e6..000000000 --- a/src/main/java/io/mycat/server/config/loader/ConfigReLoader.java +++ /dev/null @@ -1,18 +0,0 @@ -package io.mycat.server.config.loader; - - -public interface ConfigReLoader { - void reloadSchemaConfig(String schema); - void reloadSchemaConfigs(); - void reloadDataNodeConfigs(); - void reloadDataHostConfigs(); - void reloadTableRuleConfigs(); - void reloadSystemConfig(); - void reloadUserConfig(String user); - void reloadUserConfigs(); - void reloadQuarantineConfigs(); - void reloadClusterConfigs(); - void reloadCharsetConfigs(); - void reloadHostIndexConfig(); - -} diff --git a/src/main/java/io/mycat/server/config/loader/DatabaseLoader.java b/src/main/java/io/mycat/server/config/loader/DatabaseLoader.java deleted file mode 100644 index 010e4e379..000000000 --- a/src/main/java/io/mycat/server/config/loader/DatabaseLoader.java +++ /dev/null @@ -1,97 +0,0 @@ -package io.mycat.server.config.loader; - -import io.mycat.server.config.cluster.MycatClusterConfig; -import io.mycat.server.config.node.CharsetConfig; -import io.mycat.server.config.node.DataHostConfig; -import io.mycat.server.config.node.DataNodeConfig; -import io.mycat.server.config.node.HostIndexConfig; -import io.mycat.server.config.node.QuarantineConfig; -import io.mycat.server.config.node.RuleConfig; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SequenceConfig; -import io.mycat.server.config.node.SystemConfig; -import io.mycat.server.config.node.UserConfig; - -import java.util.Map; - -public class DatabaseLoader implements ConfigLoader { - - @Override - public SchemaConfig getSchemaConfig(String schema) { - // TODO Auto-generated method stub - return null; - } - - @Override - public Map getSchemaConfigs() { - // TODO Auto-generated method stub - return null; - } - - @Override - public Map getDataNodeConfigs() { - // TODO Auto-generated method stub - return null; - } - - @Override - public Map getDataHostConfigs() { - // TODO Auto-generated method stub - return null; - } - - @Override - public Map getTableRuleConfigs() { - // TODO Auto-generated method stub - return null; - } - - @Override - public SystemConfig getSystemConfig() { - // TODO Auto-generated method stub - return null; - } - - @Override - public UserConfig getUserConfig(String user) { - // TODO Auto-generated method stub - return null; - } - - @Override - public Map getUserConfigs() { - // TODO Auto-generated method stub - return null; - } - - @Override - public QuarantineConfig getQuarantineConfigs() { - // TODO Auto-generated method stub - return null; - } - - @Override - public MycatClusterConfig getClusterConfigs() { - // TODO Auto-generated method stub - return null; - } - - @Override - public CharsetConfig getCharsetConfigs() { - // TODO Auto-generated method stub - return null; - } - - @Override - public HostIndexConfig getHostIndexConfig() { - // TODO Auto-generated method stub - return null; - } - - @Override - public SequenceConfig getSequenceConfig() { - // TODO Auto-generated method stub - return null; - } - -} diff --git a/src/main/java/io/mycat/server/config/loader/LocalLoader.java b/src/main/java/io/mycat/server/config/loader/LocalLoader.java deleted file mode 100644 index f621daa60..000000000 --- a/src/main/java/io/mycat/server/config/loader/LocalLoader.java +++ /dev/null @@ -1,967 +0,0 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.server.config.loader; - -import io.mycat.backend.PhysicalDBPool; -import io.mycat.route.function.AbstractPartitionAlgorithm; -import io.mycat.server.config.ConfigException; -import io.mycat.server.config.ConfigUtil; -import io.mycat.server.config.ParameterMapping; -import io.mycat.server.config.cluster.MycatClusterConfig; -import io.mycat.server.config.cluster.MycatNodeConfig; -import io.mycat.server.config.node.CharsetConfig; -import io.mycat.server.config.node.DBHostConfig; -import io.mycat.server.config.node.DataHostConfig; -import io.mycat.server.config.node.DataNodeConfig; -import io.mycat.server.config.node.HostIndexConfig; -import io.mycat.server.config.node.JdbcDriver; -import io.mycat.server.config.node.QuarantineConfig; -import io.mycat.server.config.node.RuleConfig; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SequenceConfig; -import io.mycat.server.config.node.SystemConfig; -import io.mycat.server.config.node.TableConfig; -import io.mycat.server.config.node.TableConfigMap; -import io.mycat.server.config.node.UserConfig; -import io.mycat.util.SplitUtil; - -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.lang.reflect.InvocationTargetException; -import java.net.URI; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.Set; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.w3c.dom.Document; -import org.w3c.dom.Element; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList; - -/** - * @author mycat - */ -public class LocalLoader implements ConfigLoader { - private static final Logger logger = LoggerFactory.getLogger("LocalLoader"); - private Map dataHosts; - private Map dataNodes; - private Map schemas; - private Map tableRules; - private SystemConfig system; - private Map users; - private QuarantineConfig quarantine; - private MycatClusterConfig cluster; - private CharsetConfig charsetConfig; - private HostIndexConfig hostIndexConfig; - private SequenceConfig sequenceConfig; - - // 为了避免原代码中频繁调用 loadRoot 去频繁读取 /mycat.dtd 和 /mycat.xml,所以将 Document 作为属性进行缓存 - private static Document document = null; - - public LocalLoader(){ - this.system = new SystemConfig(); - this.users = new HashMap(); - this.dataHosts = new HashMap(); - this.dataNodes = new HashMap(); - this.schemas = new HashMap(); - this.tableRules = new HashMap(); - this.quarantine = new QuarantineConfig(); - this.charsetConfig = new CharsetConfig(); - this.hostIndexConfig = new HostIndexConfig(); - this.sequenceConfig = new SequenceConfig(); - } - - private static Element loadRoot() { - if(document == null){ - try(InputStream dtd = ConfigFactory.class.getResourceAsStream("/mycat.dtd"); - InputStream xml = ConfigFactory.class.getResourceAsStream("/mycat.xml");){ - document = ConfigUtil.getDocument(dtd, xml); - return document.getDocumentElement(); - } catch (Exception e) { - logger.error(" loadRoot error: " + e.getMessage()); - throw new ConfigException(e); - } - } - - return document.getDocumentElement(); - } - - @Override - public UserConfig getUserConfig(String user) { - Element root = loadRoot(); - loadUsers(root); - return this.users.get(user); - } - - @Override - public Map getUserConfigs() { - Element root = loadRoot(); - loadUsers(root); - return users; - } - - @Override - public SystemConfig getSystemConfig() { - Element root = loadRoot(); - loadSystem(root); - return system; - } - @Override - public Map getSchemaConfigs() { - Element root = loadRoot(); - loadSchemas(root); - return schemas; - } - @Override - public SchemaConfig getSchemaConfig(String schema) { - Element root = loadRoot(); - loadSchemas(root); - return schemas.get(schema); - } - @Override - public Map getDataNodeConfigs() { - Element root = loadRoot(); - loadDataNodes(root); - return dataNodes; - } - @Override - public Map getDataHostConfigs() { - Element root = loadRoot(); - loadDataHosts(root); - return dataHosts; - } - @Override - public Map getTableRuleConfigs() { - Element root = loadRoot(); - loadTableRules(root); - return tableRules; - } - - @Override - public QuarantineConfig getQuarantineConfigs() { - return quarantine; - } - @Override - public MycatClusterConfig getClusterConfigs() { - return cluster; - } - @Override - public CharsetConfig getCharsetConfigs() { - Element root = loadRoot(); - loadCharsetConfig(root); - return this.charsetConfig; - } - @Override - public HostIndexConfig getHostIndexConfig() { - Element root = loadRoot(); - loadHostIndexConfig(root); - return this.hostIndexConfig; - } - @Override - public SequenceConfig getSequenceConfig() { - Element root = loadRoot(); - loadSequenceConfig(root); - return this.sequenceConfig; - } - - public static Map loadJdbcDriverConfig() { - Element root = loadRoot(); - return loadJdbcDriverConfig(root); - } - - private void loadUsers(Element root) { - NodeList list = root.getElementsByTagName("user"); - for (int i = 0, n = list.getLength(); i < n; i++) { - Node node = list.item(i); - if (node instanceof Element) { - Element e = (Element) node; - String name = e.getAttribute("name"); - UserConfig user = new UserConfig(); - user.setName(name); - Map props = ConfigUtil.loadElements(e); - user.setPassword((String) props.get("password")); - String readOnly = (String) props.get("readOnly"); - if (null != readOnly) { - user.setReadOnly(Boolean.valueOf(readOnly)); - } - String schemas = (String) props.get("schemas"); - if (schemas != null) { - String[] strArray = SplitUtil.split(schemas, ',', true); - user.setSchemas(new HashSet(Arrays.asList(strArray))); - } - if (users.containsKey(name)) { - throw new ConfigException("user " + name + " duplicated!"); - } - users.put(name, user); - } - } - } - - private void loadSystem(Element root) { - NodeList list = root.getElementsByTagName("system"); - try { - for (int i = 0, n = list.getLength(); i < n; i++) { - Node node = list.item(i); - if (node instanceof Element) { - Map props = ConfigUtil.loadElements((Element) node); - ParameterMapping.mapping(system, props); - } - } - } catch (Exception e) { - e.printStackTrace(); - throw new ConfigException("loadSystem error: " + e.getMessage()); - } - - } - private void loadSchemas(Element root) { - NodeList list = root.getElementsByTagName("schema"); - for (int i = 0, n = list.getLength(); i < n; i++) { - Element schemaElement = (Element) list.item(i); - String name = schemaElement.getAttribute("name"); - String dataNode = schemaElement.getAttribute("dataNode"); - String checkSQLSchemaStr = schemaElement - .getAttribute("checkSQLschema"); - String sqlMaxLimitStr = schemaElement.getAttribute("sqlMaxLimit"); - int sqlMaxLimit = -1; - if (sqlMaxLimitStr != null && !sqlMaxLimitStr.isEmpty()) { - sqlMaxLimit = Integer.valueOf(sqlMaxLimitStr); - - } - // check dataNode already exists or not - String defaultDbType = null; - if (dataNode != null && !dataNode.isEmpty()) { - List dataNodeLst = new ArrayList(1); - dataNodeLst.add(dataNode); - checkDataNodeExists(dataNodeLst); - String dataHost = dataNodes.get(dataNode).getDataHost(); - defaultDbType = dataHosts.get(dataHost).getDbType(); - } else { - dataNode = null; - } - Map tables = loadTables(schemaElement); - if (schemas.containsKey(name)) { - throw new ConfigException("schema " + name + " duplicated!"); - } - - // 设置了table的不需要设置dataNode属性,没有设置table的必须设置dataNode属性 - if (dataNode == null && tables.size() == 0) { - throw new ConfigException( - "schema " - + name - + " didn't config tables,so you must set dataNode property!"); - } - - SchemaConfig schemaConfig = new SchemaConfig(name, dataNode, - tables, sqlMaxLimit, - "true".equalsIgnoreCase(checkSQLSchemaStr)); - if (defaultDbType != null) { - schemaConfig.setDefaultDataNodeDbType(defaultDbType); - if (!"mysql".equalsIgnoreCase(defaultDbType)) { - schemaConfig.setNeedSupportMultiDBType(true); - } - } - - // 判断是否有不是mysql的数据库类型,方便解析判断是否启用多数据库分页语法解析 - - for (String tableName : tables.keySet()) { - TableConfig tableConfig = tables.get(tableName); - if (isHasMultiDbType(tableConfig)) { - schemaConfig.setNeedSupportMultiDBType(true); - break; - } - } - Map dataNodeDbTypeMap = new HashMap<>(); - for (String dataNodeName : dataNodes.keySet()) { - DataNodeConfig dataNodeConfig = dataNodes.get(dataNodeName); - String dataHost = dataNodeConfig.getDataHost(); - DataHostConfig dataHostConfig = dataHosts.get(dataHost); - if (dataHostConfig != null) { - String dbType = dataHostConfig.getDbType(); - dataNodeDbTypeMap.put(dataNodeName, dbType); - } - } - schemaConfig.setDataNodeDbTypeMap(dataNodeDbTypeMap); - schemas.put(name, schemaConfig); - } - } - - private Map loadTables(Element node) { - // Map tables = new HashMap(); - - // 支持表名中包含引号[`] BEN GONG - Map tables = new TableConfigMap(); - - NodeList nodeList = node.getElementsByTagName("table"); - for (int i = 0; i < nodeList.getLength(); i++) { - Element tableElement = (Element) nodeList.item(i); - String tableNameElement = tableElement.getAttribute("name").toUpperCase(); - String[] tableNames = tableNameElement.split(","); - - String primaryKey = tableElement.hasAttribute("primaryKey") ? tableElement - .getAttribute("primaryKey").toUpperCase() : null; - - boolean autoIncrement = false; - if (tableElement.hasAttribute("autoIncrement")) { - autoIncrement = Boolean.parseBoolean(tableElement - .getAttribute("autoIncrement")); - } - boolean needAddLimit = true; - if (tableElement.hasAttribute("needAddLimit")) { - needAddLimit = Boolean.parseBoolean(tableElement - .getAttribute("needAddLimit")); - } - String tableTypeStr = tableElement.hasAttribute("type") ? tableElement - .getAttribute("type") : null; - int tableType = TableConfig.TYPE_GLOBAL_DEFAULT; - if ("global".equalsIgnoreCase(tableTypeStr)) { - tableType = TableConfig.TYPE_GLOBAL_TABLE; - } - String dataNode = tableElement.getAttribute("dataNode"); - RuleConfig tableRule = null; - if (tableElement.hasAttribute("rule")) { - String ruleName = tableElement.getAttribute("rule"); - tableRule = tableRules.get(ruleName); - if (tableRule == null) { - throw new ConfigException("rule " + ruleName + " is not found!"); - } - } - boolean ruleRequired = false; - if (tableElement.hasAttribute("ruleRequired")) { - ruleRequired = Boolean.parseBoolean(tableElement - .getAttribute("ruleRequired")); - } - - if (tableNames == null) { - throw new ConfigException("table name is not found!"); - } - String distPrex = "distribute("; - boolean distTableDns = dataNode.startsWith(distPrex); - if (distTableDns) { - dataNode = dataNode.substring(distPrex.length(), - dataNode.length() - 1); - } - for (int j = 0; j < tableNames.length; j++) { - String tableName = tableNames[j]; - TableConfig table = new TableConfig(tableName, primaryKey, - autoIncrement, needAddLimit, tableType, dataNode, - getDbType(dataNode), - (tableRule != null) ? tableRule : null, - ruleRequired, null, false, null, null); - checkDataNodeExists(table.getDataNodes()); - if (distTableDns) { - distributeDataNodes(table.getDataNodes()); - } - if (tables.containsKey(table.getName())) { - throw new ConfigException("table " + tableName - + " duplicated!"); - } - tables.put(table.getName(), table); - } - - if (tableNames.length == 1) { - TableConfig table = tables.get(tableNames[0]); - - // process child tables - processChildTables(tables, table, dataNode, tableElement); - } - } - - return tables; - } - - /** - * distribute datanodes in multi hosts,means ,dn1 (host1),dn100 - * (host2),dn300(host3),dn2(host1),dn101(host2),dn301(host3)...etc - * - * @param dataNodes - */ - private void distributeDataNodes(ArrayList theDataNodes) { - Map> newDataNodeMap = new HashMap>( - dataHosts.size()); - for (String dn : theDataNodes) { - DataNodeConfig dnConf = dataNodes.get(dn); - String host = dnConf.getDataHost(); - ArrayList hostDns = newDataNodeMap.get(host); - hostDns = (hostDns == null) ? new ArrayList() : hostDns; - hostDns.add(dn); - newDataNodeMap.put(host, hostDns); - } - ArrayList result = new ArrayList(theDataNodes.size()); - boolean hasData = true; - while (hasData) { - hasData = false; - for (ArrayList dns : newDataNodeMap.values()) { - if (!dns.isEmpty()) { - result.add(dns.remove(0)); - hasData = true; - } - } - } - theDataNodes.clear(); - theDataNodes.addAll(result); - } - - private Set getDbType(String dataNode) { - Set dbTypes = new HashSet<>(); - String[] dataNodeArr = SplitUtil.split(dataNode, ',', '$', '-'); - for (String node : dataNodeArr) { - DataNodeConfig datanode = dataNodes.get(node); - DataHostConfig datahost = dataHosts.get(datanode.getDataHost()); - dbTypes.add(datahost.getDbType()); - } - - return dbTypes; - } - - private Set getDataNodeDbTypeMap(String dataNode) { - Set dbTypes = new HashSet<>(); - String[] dataNodeArr = SplitUtil.split(dataNode, ',', '$', '-'); - for (String node : dataNodeArr) { - DataNodeConfig datanode = dataNodes.get(node); - DataHostConfig datahost = dataHosts.get(datanode.getDataHost()); - dbTypes.add(datahost.getDbType()); - } - - return dbTypes; - } - - private boolean isHasMultiDbType(TableConfig table) { - Set dbTypes = table.getDbTypes(); - for (String dbType : dbTypes) { - if (!"mysql".equalsIgnoreCase(dbType)) { - return true; - } - } - return false; - } - - private void processChildTables(Map tables, - TableConfig parentTable, String dataNodes, Element tableNode) { - // parse child tables - NodeList childNodeList = tableNode.getChildNodes(); - for (int j = 0; j < childNodeList.getLength(); j++) { - Node theNode = childNodeList.item(j); - if (!theNode.getNodeName().equals("childTable")) { - continue; - } - Element childTbElement = (Element) theNode; - - String cdTbName = childTbElement.getAttribute("name").toUpperCase(); - String primaryKey = childTbElement.hasAttribute("primaryKey") ? childTbElement - .getAttribute("primaryKey").toUpperCase() : null; - - boolean autoIncrement = false; - if (childTbElement.hasAttribute("autoIncrement")) { - autoIncrement = Boolean.parseBoolean(childTbElement - .getAttribute("autoIncrement")); - } - boolean needAddLimit = true; - if (childTbElement.hasAttribute("needAddLimit")) { - needAddLimit = Boolean.parseBoolean(childTbElement - .getAttribute("needAddLimit")); - } - String joinKey = childTbElement.getAttribute("joinKey") - .toUpperCase(); - String parentKey = childTbElement.getAttribute("parentKey") - .toUpperCase(); - TableConfig table = new TableConfig(cdTbName, primaryKey, - autoIncrement, needAddLimit, - TableConfig.TYPE_GLOBAL_DEFAULT, dataNodes, - getDbType(dataNodes), null, false, parentTable, true, - joinKey, parentKey); - if (tables.containsKey(table.getName())) { - throw new ConfigException("table " + table.getName() - + " duplicated!"); - } - tables.put(table.getName(), table); - processChildTables(tables, table, dataNodes, childTbElement); - } - } - - private void checkDataNodeExists(Collection nodes) { - if (nodes == null || nodes.size() < 1) { - return; - } - for (String node : nodes) { - if (!dataNodes.containsKey(node)) { - throw new ConfigException("dataNode '" + node - + "' is not found!"); - } - } - } - - private void loadDataNodes(Element root) { - NodeList list = root.getElementsByTagName("dataNode"); - for (int i = 0, n = list.getLength(); i < n; i++) { - Element element = (Element) list.item(i); - String dnNamePre = element.getAttribute("name"); - - String databaseStr = element.getAttribute("database"); - String host = element.getAttribute("dataHost"); - if (empty(dnNamePre) || empty(databaseStr) || empty(host)) { - throw new ConfigException("dataNode " + dnNamePre - + " define error ,attribute can't be empty"); - } - String[] dnNames = io.mycat.util.SplitUtil.split(dnNamePre, - ',', '$', '-'); - String[] databases = io.mycat.util.SplitUtil.split( - databaseStr, ',', '$', '-'); - String[] hostStrings = io.mycat.util.SplitUtil.split(host, - ',', '$', '-'); - - if (dnNames.length > 1 - && dnNames.length != databases.length * hostStrings.length) { - throw new ConfigException( - "dataNode " - + dnNamePre - + " define error ,dnNames.length must be=databases.length*hostStrings.length"); - } - if (dnNames.length > 1) { - List mhdList = mergerHostDatabase(hostStrings, - databases); - for (int k = 0; k < dnNames.length; k++) { - String[] hd = mhdList.get(k); - String dnName = dnNames[k]; - String databaseName = hd[1]; - String hostName = hd[0]; - createDataNode(dnName, databaseName, hostName); - - } - - } else { - createDataNode(dnNamePre, databaseStr, host); - } - - } - } - - private List mergerHostDatabase(String[] hostStrings, - String[] databases) { - List mhdList = new ArrayList<>(); - for (int i = 0; i < hostStrings.length; i++) { - String hostString = hostStrings[i]; - for (int i1 = 0; i1 < databases.length; i1++) { - String database = databases[i1]; - String[] hd = new String[2]; - hd[0] = hostString; - hd[1] = database; - mhdList.add(hd); - } - } - return mhdList; - } - - private void createDataNode(String dnName, String database, String host) { - DataNodeConfig conf = new DataNodeConfig(dnName, database, host); - if (dataNodes.containsKey(conf.getName())) { - throw new ConfigException("dataNode " + conf.getName() - + " duplicated!"); - } - if (!dataHosts.containsKey(host)) { - throw new ConfigException("dataNode " + dnName - + " reference dataHost:" + host + " not exists!"); - } - dataNodes.put(conf.getName(), conf); - } - - private boolean empty(String dnName) { - return dnName == null || dnName.length() == 0; - } - - private DBHostConfig createDBHostConf(String dataHost, Element node, - String dbType, String dbDriver, int maxCon, int minCon, String filters, long logTime) { - String nodeHost = node.getAttribute("host"); - String nodeUrl = node.getAttribute("url"); - String user = node.getAttribute("user"); - String password = node.getAttribute("password"); - String weightStr = node.getAttribute("weight"); - int weight = "".equals(weightStr) ? PhysicalDBPool.WEIGHT : Integer.valueOf(weightStr) ; - - String ip = null; - int port = 0; - if (empty(nodeHost) || empty(nodeUrl) || empty(user)) { - throw new ConfigException( - "dataHost " - + dataHost - + " define error,some attributes of this element is empty: " - + nodeHost); - } - if ("native".equalsIgnoreCase(dbDriver)) { - int colonIndex = nodeUrl.indexOf(':'); - ip = nodeUrl.substring(0, colonIndex).trim(); - port = Integer.parseInt(nodeUrl.substring(colonIndex + 1).trim()); - } else { - URI url; - try { - url = new URI(nodeUrl.substring(5)); - } catch (Exception e) { - throw new ConfigException("invalid jdbc url " + nodeUrl - + " of " + dataHost); - } - ip = url.getHost(); - port = url.getPort(); - } - - DBHostConfig conf = new DBHostConfig(nodeHost, ip, port, nodeUrl, user, password); - conf.setDbType(dbType); - conf.setMaxCon(maxCon); - conf.setMinCon(minCon); - conf.setFilters(filters); - conf.setLogTime(logTime); - conf.setWeight(weight); //新增权重 - return conf; - } - - private void loadDataHosts(Element root) { - NodeList list = root.getElementsByTagName("dataHost"); - for (int i = 0, n = list.getLength(); i < n; ++i) { - Element element = (Element) list.item(i); - String name = element.getAttribute("name"); - if (dataHosts.containsKey(name)) { - throw new ConfigException("dataHost name " + name - + "duplicated!"); - } - int maxCon = Integer.valueOf(element.getAttribute("maxCon")); - int minCon = Integer.valueOf(element.getAttribute("minCon")); - int balance = Integer.valueOf(element.getAttribute("balance")); - String switchTypeStr = element.getAttribute("switchType"); - int switchType = switchTypeStr.equals("") ? -1 : Integer - .valueOf(switchTypeStr); - String slaveThresholdStr = element.getAttribute("slaveThreshold"); - int slaveThreshold = slaveThresholdStr.equals("") ? -1 : Integer - .valueOf(slaveThresholdStr); - - //如果 tempReadHostAvailable 设置大于 0 则表示写主机如果挂掉, 临时的读服务依然可用 - String tempReadHostAvailableStr = element.getAttribute("tempReadHostAvailable"); - boolean tempReadHostAvailable = tempReadHostAvailableStr.equals("") ? false : Integer.valueOf(tempReadHostAvailableStr) > 0; - - String writeTypStr = element.getAttribute("writeType"); - int writeType = "".equals(writeTypStr) ? PhysicalDBPool.WRITE_ONLYONE_NODE - : Integer.valueOf(writeTypStr); - - String dbDriver = element.getAttribute("dbDriver"); - String dbType = element.getAttribute("dbType"); - String filters = element.getAttribute("filters"); - String logTimeStr = element.getAttribute("logTime"); - long logTime = "".equals(logTimeStr) ? PhysicalDBPool.LONG_TIME : Long.valueOf(logTimeStr) ; - String heartbeatSQL = element.getElementsByTagName("heartbeat") - .item(0).getTextContent(); - NodeList connectionInitSqlList = element - .getElementsByTagName("connectionInitSql"); - String initConSQL = null; - if (connectionInitSqlList.getLength() > 0) { - initConSQL = connectionInitSqlList.item(0).getTextContent(); - } - NodeList writeNodes = element.getElementsByTagName("writeHost"); - DBHostConfig[] writeDbConfs = new DBHostConfig[writeNodes - .getLength()]; - Map readHostsMap = new HashMap( - 2); - for (int w = 0; w < writeDbConfs.length; w++) { - Element writeNode = (Element) writeNodes.item(w); - writeDbConfs[w] = createDBHostConf(name, writeNode, dbType, - dbDriver, maxCon, minCon,filters,logTime); - NodeList readNodes = writeNode.getElementsByTagName("readHost"); - if (readNodes.getLength() != 0) { - DBHostConfig[] readDbConfs = new DBHostConfig[readNodes - .getLength()]; - for (int r = 0; r < readDbConfs.length; r++) { - Element readNode = (Element) readNodes.item(r); - readDbConfs[r] = createDBHostConf(name, readNode, - dbType, dbDriver, maxCon, minCon,filters,logTime); - } - readHostsMap.put(w, readDbConfs); - } - } - - DataHostConfig hostConf = new DataHostConfig(name, dbType, - dbDriver, writeDbConfs, readHostsMap, switchType, - slaveThreshold, tempReadHostAvailable); - hostConf.setMaxCon(maxCon); - hostConf.setMinCon(minCon); - hostConf.setBalance(balance); - hostConf.setWriteType(writeType); - hostConf.setHeartbeatSQL(heartbeatSQL); - hostConf.setConnectionInitSql(initConSQL); - hostConf.setFilters(filters); - hostConf.setLogTime(logTime); - dataHosts.put(hostConf.getName(), hostConf); - - } - } - private void loadTableRules(Element root) { - NodeList list = root.getElementsByTagName("tableRule"); - try { - for (int i = 0, n = list.getLength(); i < n; ++i) { - Node node = list.item(i); - if (node instanceof Element) { - Element e = (Element) node; - String name = e.getAttribute("name"); - String column = e.getAttribute("column"); - String functionName = e.getAttribute("functionName"); - - if (tableRules.containsKey(name)) { - throw new ConfigException("table rule " + name + " duplicated!"); - } - RuleConfig ruleConfig = new RuleConfig(name, column, functionName); - Map props = ConfigUtil.loadElements((Element) node); - ruleConfig.setProps(props); - - AbstractPartitionAlgorithm function = createFunction(name, functionName); - if (function == null) { - throw new ConfigException("can't find function of name :" + functionName); - } - ParameterMapping.mapping(function, ConfigUtil.loadElements(e)); - NodeList configNodes = e.getElementsByTagName("config"); - int length = configNodes.getLength(); - if (length > 1) { - throw new ConfigException("tableRule only one config can defined :" + name); - } - if(length!=0){ - Element configEle = (Element) configNodes.item(0); - LinkedHashMap configs = ConfigUtil.loadLinkElements((Element) configEle); - function.setConfig(configs); - } - - function.init(); - ruleConfig.setRuleAlgorithm(function); - tableRules.put(name, ruleConfig); - } - } - } catch (Exception e) { - throw new ConfigException("load tableRule error: " ,e); - } - - } - - private AbstractPartitionAlgorithm createFunction(String name, String clazz) - throws ClassNotFoundException, InstantiationException, - IllegalAccessException, InvocationTargetException { - Class clz = Class.forName(clazz); - if (!AbstractPartitionAlgorithm.class.isAssignableFrom(clz)) { - throw new IllegalArgumentException("rule function must implements " - + AbstractPartitionAlgorithm.class.getName() + ", name=" + name); - } - return (AbstractPartitionAlgorithm) clz.newInstance(); - } - - private static Map loadNode(Element root, int port) { - Map nodes = new HashMap(); - NodeList list = root.getElementsByTagName("node"); - Set hostSet = new HashSet(); - for (int i = 0, n = list.getLength(); i < n; i++) { - Node node = list.item(i); - if (node instanceof Element) { - Element element = (Element) node; - String name = element.getAttribute("name").trim(); - if (nodes.containsKey(name)) { - throw new ConfigException("node name duplicated :" + name); - } - - Map props = ConfigUtil.loadElements(element); - String host = (String) props.get("host"); - if (null == host || "".equals(host)) { - throw new ConfigException("host empty in node: " + name); - } - if (hostSet.contains(host)) { - throw new ConfigException("node host duplicated :" + host); - } - - String wei = (String) props.get("weight"); - if (null == wei || "".equals(wei)) { - throw new ConfigException("weight should not be null in host:" + host); - } - int weight = Integer.valueOf(wei); - if (weight <= 0) { - throw new ConfigException("weight should be > 0 in host:" + host + " weight:" + weight); - } - - MycatNodeConfig conf = new MycatNodeConfig(name, host, port, weight); - nodes.put(name, conf); - hostSet.add(host); - } - } - return nodes; - } - - private static Map> loadGroup(Element root, Map nodes) { - Map> groups = new HashMap>(); - NodeList list = root.getElementsByTagName("group"); - for (int i = 0, n = list.getLength(); i < n; i++) { - Node node = list.item(i); - if (node instanceof Element) { - Element e = (Element) node; - String groupName = e.getAttribute("name").trim(); - if (groups.containsKey(groupName)) { - throw new ConfigException("group duplicated : " + groupName); - } - - Map props = ConfigUtil.loadElements(e); - String value = (String) props.get("nodeList"); - if (null == value || "".equals(value)) { - throw new ConfigException("group should contain 'nodeList'"); - } - - String[] sList = SplitUtil.split(value, ',', true); - - if (null == sList || sList.length == 0) { - throw new ConfigException("group should contain 'nodeList'"); - } - - for (String s : sList) { - if (!nodes.containsKey(s)) { - throw new ConfigException("[ node :" + s + "] in [ group:" + groupName + "] doesn't exist!"); - } - } - List nodeList = Arrays.asList(sList); - groups.put(groupName, nodeList); - } - } - if (!groups.containsKey("default")) { - List nodeList = new ArrayList(nodes.keySet()); - groups.put("default", nodeList); - } - return groups; - } - - private void loadCharsetConfig(Element root){ - NodeList list = root.getElementsByTagName("charset-config"); - try { - for (int i = 0, n = list.getLength(); i < n; i++) { - Node node = list.item(i); - if (node instanceof Element) { - Map props = ConfigUtil.loadElements((Element) node); - this.charsetConfig.setProps(props); - } - } - } catch (Exception e) { - e.printStackTrace(); - throw new ConfigException("loadCharsetConfig error: " + e.getMessage()); - } - } - private void loadHostIndexConfig(Element root) { - /*NodeList list = root.getElementsByTagName("dnindex-config"); - try { - for (int i = 0, n = list.getLength(); i < n; i++) { - Node node = list.item(i); - if (node instanceof Element) { - Map props = ConfigUtil.loadElements((Element) node); - this.hostIndexConfig.setProps(props); - } - } - } catch (Exception e) { - e.printStackTrace(); - throw new ConfigException("loadHostIndexConfig error: " + e.getMessage()); - }*/ - try { - File file = new File(SystemConfig.getHomePath(), "conf" + File.separator + "dnindex.properties"); - Properties dnIndexProperties = new Properties(); - dnIndexProperties.load(new FileInputStream(file)); - this.hostIndexConfig.setProps(dnIndexProperties); - } catch (Exception e) { - e.printStackTrace(); - throw new ConfigException("loadHostIndexConfig error: " + e.getMessage()); - } - - - } - private void loadSequenceConfig(Element root) { - NodeList list = root.getElementsByTagName("sequence"); - try { - Node node = list.item(0); - if (node instanceof Element) { - String type = ((Element) node).getAttribute("type"); - String vclass = ((Element) node).getAttribute("class"); - - Map props = ConfigUtil.loadElements((Element) node); - this.sequenceConfig.setType(type); - this.sequenceConfig.setVclass(vclass); - this.sequenceConfig.setProps(props); - - } - } catch (Exception e) { - e.printStackTrace(); - throw new ConfigException("loadSequenceConfig error: " + e.getMessage()); - } - } - - private static Map loadJdbcDriverConfig(Element root) { - NodeList list = root.getElementsByTagName("driver"); - try { - Map jdbcDriverConfig = new HashMap<>(); - for(int i=0; i - * @return - */ - public static Element getRoot() { - if(document == null) - return loadRoot(); - return document.getDocumentElement(); - } - - /** - * 重新加载 mycat.xml - */ - public static void reLoad(){ - document = null; - loadRoot(); - } -} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/config/loader/ReloadUtil.java b/src/main/java/io/mycat/server/config/loader/ReloadUtil.java deleted file mode 100644 index 4cc8156a0..000000000 --- a/src/main/java/io/mycat/server/config/loader/ReloadUtil.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.server.config.loader; - - -import io.mycat.MycatServer; -import io.mycat.backend.PhysicalDBNode; -import io.mycat.backend.PhysicalDBPool; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.config.cluster.MycatClusterConfig; -import io.mycat.server.config.node.CharsetConfig; -import io.mycat.server.config.node.HostIndexConfig; -import io.mycat.server.config.node.MycatConfig; -import io.mycat.server.config.node.QuarantineConfig; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SequenceConfig; -import io.mycat.server.config.node.SystemConfig; -import io.mycat.server.config.node.UserConfig; -import io.mycat.server.response.ReloadCallBack; - -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.util.Map; -import java.util.concurrent.Callable; -import java.util.concurrent.locks.ReentrantLock; - -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; - -/** - * @author mycat - */ -public final class ReloadUtil { - public static void execute(MySQLFrontConnection c, final boolean loadAll) { - final ReentrantLock lock = MycatServer.getInstance().getConfig().getLock(); - lock.lock(); - try { - ListenableFuture listenableFuture = MycatServer.getInstance().getListeningExecutorService().submit(new Callable() - { - @Override - public Boolean call() throws Exception - { - - return loadAll?reload_all():reload(); - } - }); - Futures.addCallback(listenableFuture, new ReloadCallBack(c), MycatServer.getInstance().getListeningExecutorService()); - } finally { - lock.unlock(); - } - } - - private static boolean reload_all() { - reload(); - - //加载数据源 - boolean reloadStatus = true; - MycatConfig conf = MycatServer.getInstance().getConfig(); - reloadStatus = conf.reloadDatasource(); - if(reloadStatus==false){ - return false; - } - - return true; - } - - private static boolean reload() { - // 载入新的配置 - ConfigInitializer loader = new ConfigInitializer(false); - Map users = loader.getUsers(); - Map schemas = loader.getSchemas(); - Map dataNodes = loader.getDataNodes(); - Map dataHosts = loader.getDataHosts(); - MycatClusterConfig cluster = loader.getCluster(); - QuarantineConfig quarantine = loader.getQuarantine(); - CharsetConfig charsetConfig = loader.getCharsetConfig(); - SequenceConfig sequenceConfig = loader.getSequenceConfig(); - HostIndexConfig hostIndexConfig = loader.getHostIndexs(); - - // 应用新配置 - MycatServer instance = MycatServer.getInstance(); - MycatConfig conf = instance.getConfig(); - conf.reloadCharsetConfigs(); - - // 应用重载 - conf.reload(users, schemas, dataNodes, dataHosts, cluster, quarantine, - charsetConfig,sequenceConfig,hostIndexConfig,false); - - //清理缓存 - instance.getCacheService().clearCache(); - return true; - } - - public static boolean rollback() { - MycatConfig conf = MycatServer.getInstance().getConfig(); - Map users = conf.getBackupUsers(); - Map schemas = conf.getBackupSchemas(); - Map dataNodes = conf.getBackupDataNodes(); - Map dataHosts = conf.getBackupDataHosts(); - MycatClusterConfig cluster = conf.getBackupCluster(); - QuarantineConfig quarantine = conf.getBackupQuarantine(); - CharsetConfig charsetConfig = conf.getBackupCharsetConfig(); - SequenceConfig sequenceConfig = conf.getBackupSequenceConfig(); - HostIndexConfig hostIndexConfig = conf.getBackupHostIndexs(); - - // 检查可回滚状态 - if (!conf.canRollback()) { - return false; - } - // 应用回滚 - conf.rollback(users, schemas, dataNodes, dataHosts, cluster, quarantine,charsetConfig,sequenceConfig,hostIndexConfig); - - conf.rebackDatasource(); - - //清理缓存 - MycatServer.getInstance().getCacheService().clearCache(); - return true; - } - - -} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/config/loader/SystemLoader.java b/src/main/java/io/mycat/server/config/loader/SystemLoader.java deleted file mode 100644 index aa2faa5dd..000000000 --- a/src/main/java/io/mycat/server/config/loader/SystemLoader.java +++ /dev/null @@ -1,12 +0,0 @@ -package io.mycat.server.config.loader; - -import io.mycat.server.config.node.SystemConfig; - -/** - * Instances of this interface maintain the configuration of system. - * The Configuration is described by an instance of SystemConfig. - * Created by v1.lion on 2015/9/27. - */ -public interface SystemLoader { - SystemConfig getSystemConfig(); -} diff --git a/src/main/java/io/mycat/server/config/loader/zkloader/AbstractZKLoaders.java b/src/main/java/io/mycat/server/config/loader/zkloader/AbstractZKLoaders.java deleted file mode 100644 index 00a9dce00..000000000 --- a/src/main/java/io/mycat/server/config/loader/zkloader/AbstractZKLoaders.java +++ /dev/null @@ -1,93 +0,0 @@ -package io.mycat.server.config.loader.zkloader; - -import io.mycat.server.config.ConfigException; -import org.apache.curator.framework.CuratorFramework; -import org.apache.curator.utils.ZKPaths; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.nio.charset.StandardCharsets; -import java.util.List; - -/** - * provide a abstract constructor to construct zookeeper path. - * Created by v1.lion on 2015/10/8. - */ -public abstract class AbstractZKLoaders { - private static final Logger LOGGER = LoggerFactory.getLogger(AbstractZKLoaders.class); - - //parent config path in zookeeper - //example /mycat-cluster-/server-config/ - // /CLUSTER_ID/CONFIG_DIRECTORY_NAME - protected final String BASE_CONFIG_PATH; - - public AbstractZKLoaders() { - super(); - BASE_CONFIG_PATH = null; - } - - public AbstractZKLoaders(String clusterID, String configDirectoryName) { - BASE_CONFIG_PATH = ZKPaths.makePath("/", clusterID, configDirectoryName); - LOGGER.trace("base config path is {}", BASE_CONFIG_PATH); - } - - /** - * return a string transformed from data in specified path. - * - * @param zkConnection - * @param path path in zookeeper - * @return data string - */ - public String fetchDataToString(final CuratorFramework zkConnection, String path, String... restPath) { - return new String(fetchData(zkConnection, path, restPath), StandardCharsets.UTF_8); - } - - /** - * return data based on specified path. - * - * @param zkConnection - * @param path - * @param restPath - * @return data in zookeeper - */ - public byte[] fetchData(final CuratorFramework zkConnection, String path, String... restPath) { - String dataPath = ZKPaths.makePath(BASE_CONFIG_PATH, path, restPath); - try { - byte[] rawByte = zkConnection.getData().forPath(dataPath); - LOGGER.trace("get raw data from zookeeper: {} , path : {}", - new String(rawByte, StandardCharsets.UTF_8), dataPath); - return rawByte; - } catch (Exception e) { - LOGGER.error("get config data from zookeeper error : {}, path : {}", - e.getMessage(), dataPath); - throw new ConfigException(e); - } - } - - - /** - * return a children name list under BASE_CONFIG_PATH - * - * @param zkConnection - * @param restPath rest path concat to BASE_CONFIG_PATH - * @return name list - */ - public List fetchChildren(final CuratorFramework zkConnection, String... restPath) { - try { - String childPath = ZKPaths.makePath(BASE_CONFIG_PATH, null, restPath); - return zkConnection - .getChildren() - .forPath(childPath); - } catch (Exception e) { - LOGGER.error("fetch child node name from zookeeper error : {} , path {} ", e.getMessage(), BASE_CONFIG_PATH); - throw new ConfigException(e); - } - } - - /** - * fetch config form zookeeper and then transform them to bean. - * - * @param zkConnection a zookeeper connection - */ - abstract public void fetchConfig(final CuratorFramework zkConnection); -} diff --git a/src/main/java/io/mycat/server/config/loader/zkloader/ZkConfig.java b/src/main/java/io/mycat/server/config/loader/zkloader/ZkConfig.java deleted file mode 100644 index 54c738288..000000000 --- a/src/main/java/io/mycat/server/config/loader/zkloader/ZkConfig.java +++ /dev/null @@ -1,48 +0,0 @@ -package io.mycat.server.config.loader.zkloader; - -/** - * zookeeper config bean - * Created by v1.lion on 2015/10/6. - */ -public class ZkConfig { - private String zkURL; - private String clusterID; - private String myID; - - public ZkConfig() { - super(); - } - - public String getZkURL() { - return zkURL; - } - - public void setZkURL(String zkURL) { - this.zkURL = zkURL; - } - - public String getClusterID() { - return clusterID; - } - - public void setClusterID(String clusterID) { - this.clusterID = clusterID; - } - - public String getMyID() { - return myID; - } - - public void setMyID(String myID) { - this.myID = myID; - } - - @Override - public String toString() { - return "ZkConfig{" + - "zkURL='" + zkURL + '\'' + - ", clusterID='" + clusterID + '\'' + - ", myID='" + myID + '\'' + - '}'; - } -} diff --git a/src/main/java/io/mycat/server/config/loader/zkloader/ZkDataHostConfigLoader.java b/src/main/java/io/mycat/server/config/loader/zkloader/ZkDataHostConfigLoader.java deleted file mode 100644 index 5b0cb3389..000000000 --- a/src/main/java/io/mycat/server/config/loader/zkloader/ZkDataHostConfigLoader.java +++ /dev/null @@ -1,126 +0,0 @@ -package io.mycat.server.config.loader.zkloader; - -import com.alibaba.fastjson.JSON; - -import org.apache.curator.framework.CuratorFramework; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Function; -import java.util.stream.Collectors; - -import io.mycat.server.config.node.DBHostConfig; -import io.mycat.server.config.node.DataHostConfig; - -/** - * Created by v1.lion on 2015/10/18. - */ -public class ZkDataHostConfigLoader extends AbstractZKLoaders { - private static final Logger LOGGER = LoggerFactory.getLogger(ZkDataHostConfigLoader.class); - - private static final String DATAHOST_CONFIG_DIRECTORY = "datahost-config"; - - //hold datahost name mapping to DataHostConfig - private Map dataHostConfigs; - - private CuratorFramework zkConnection; - - public ZkDataHostConfigLoader(final String clusterID) { - super(clusterID, DATAHOST_CONFIG_DIRECTORY); - } - - @Override - public void fetchConfig(CuratorFramework zkConnection) { - this.zkConnection = zkConnection; - - /// mycat-cluster-1/ datahost -config/ ${dataHostName} - dataHostConfigs = super.fetchChildren(this.zkConnection, "") - .stream() - .map(dataHostName -> createHostConfig(dataHostName)) - .collect(Collectors.toMap(DataHostConfig::getName, Function.identity())); - - this.zkConnection = null; - } - - private DataHostConfig createHostConfig(final String dataHostName) { - //parse dataHost - DataHostConfig dataHostConfig = JSON.parseObject( - super.fetchData(this.zkConnection, dataHostName), DataHostConfig.class); - //for put read host - dataHostConfig.setReadHosts(new HashMap<>()); - - //create write host and read host - AtomicInteger writeCount = new AtomicInteger(); - List writeHostList = new ArrayList<>(); - - /// mycat-cluster-1/ datahost -config/ ${dataHostName} / ${writeHostName} - super.fetchChildren(this.zkConnection, dataHostName) - .stream() - .forEach(writeHostName -> { - int writeIndex = writeCount.getAndIncrement(); - writeHostList.add(generateWriteHostConfig(dataHostConfig, dataHostName, writeHostName)); - buildReadHostConfig(dataHostConfig, dataHostName, writeHostName, writeIndex); - }); - - - //Convert list to array - DBHostConfig[] writeArray = new DBHostConfig[writeHostList.size()]; - writeHostList.toArray(writeArray); - - dataHostConfig.setWriteHosts(writeArray); - return dataHostConfig; - } - - private void buildReadHostConfig(DataHostConfig dataHostConfig, String dataHost, - String writeHostName, int writeIndex) { - List readHostList = new ArrayList<>(); - - //parse read host - super.fetchChildren(this.zkConnection, dataHost, writeHostName) - .stream() - .forEach(readHostName -> { - DBHostConfig readHostConfig = JSON.parseObject( - super.fetchData(this.zkConnection, dataHost, writeHostName, readHostName) - , DBHostConfig.class); - readHostConfig.setDbType(dataHostConfig.getDbType()); - readHostConfig.setMaxCon(dataHostConfig.getMaxCon()); - readHostConfig.setMinCon(dataHostConfig.getMinCon()); - - readHostList.add(readHostConfig); - LOGGER.trace("generate read host config : {}", readHostConfig); - }); - - if (readHostList.size() > 0) { - //Convert list to array - DBHostConfig[] readArray = new DBHostConfig[readHostList.size()]; - readHostList.toArray(readArray); - - //set to dataHostConfig - dataHostConfig.getReadHosts().put(writeIndex, readArray); - } - } - - private DBHostConfig generateWriteHostConfig(DataHostConfig dataHostConfig, String dataHost, - String writeHostName) { - //parse write host - DBHostConfig writeHostConfig = JSON.parseObject( - super.fetchData(this.zkConnection, dataHost, writeHostName), DBHostConfig.class); - - writeHostConfig.setDbType(dataHostConfig.getDbType()); - writeHostConfig.setDbType(dataHostConfig.getDbType()); - writeHostConfig.setMaxCon(dataHostConfig.getMaxCon()); - writeHostConfig.setMinCon(dataHostConfig.getMinCon()); - - LOGGER.trace("generate write host config : {}", writeHostConfig); - return writeHostConfig; - } - - public Map getDataHostConfigs() { - return dataHostConfigs; - } -} diff --git a/src/main/java/io/mycat/server/config/loader/zkloader/ZkDataNodeConfigLoader.java b/src/main/java/io/mycat/server/config/loader/zkloader/ZkDataNodeConfigLoader.java deleted file mode 100644 index 487b99f75..000000000 --- a/src/main/java/io/mycat/server/config/loader/zkloader/ZkDataNodeConfigLoader.java +++ /dev/null @@ -1,74 +0,0 @@ -package io.mycat.server.config.loader.zkloader; - -import com.alibaba.fastjson.JSON; - -import org.apache.curator.framework.CuratorFramework; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.HashMap; -import java.util.Map; -import java.util.function.Function; - -import io.mycat.server.config.ConfigException; -import io.mycat.server.config.node.DataHostConfig; -import io.mycat.server.config.node.DataNodeConfig; - -import static java.util.stream.Collectors.toMap; - -/** - * Created by v1.lion on 2015/10/8. - */ -public class ZkDataNodeConfigLoader extends AbstractZKLoaders { - //directory name of data node config in zookeeper - private static final Logger LOGGER = LoggerFactory.getLogger(ZkDataNodeConfigLoader.class); - private static final String DATANODE_CONFIG_DIRECTORY = "datanode-config"; - private final ZkDataHostConfigLoader dataHostConfigLoader; - - //hold dataNode name mapping to DataNodeConfig - private Map dataNodeConfigs; - - public ZkDataNodeConfigLoader(final String clusterID, ZkDataHostConfigLoader dataHostConfigLoader) { - super(clusterID, DATANODE_CONFIG_DIRECTORY); - this.dataHostConfigLoader = dataHostConfigLoader; - } - - @Override - public void fetchConfig(CuratorFramework zkConnection) { - //invoke composed - this.dataHostConfigLoader.fetchConfig(zkConnection); - - //data node config path in zookeeper - //example: /mycat-cluster-1/datanode-config - this.dataNodeConfigs = new HashMap<>(); - - JSON.parseArray(super.fetchDataToString(zkConnection, "") - , DataNodeConfig.class) - .stream() - .forEach(dataNodeConfig -> { - if (dataNodeConfigs.containsKey(dataNodeConfig.getName())) { - throw new ConfigException("dataNode " + dataNodeConfig.getName() + - " duplicated!"); - } - - if (!dataHostConfigLoader.getDataHostConfigs() - .containsKey(dataNodeConfig.getDataHost())) { - throw new ConfigException("dataNode " + dataNodeConfig.getName() + - " reference dataHost:" + dataNodeConfig.getDataHost() + - " not exists!"); - } - - dataNodeConfigs.put(dataNodeConfig.getName(),dataNodeConfig); - }); - LOGGER.trace("done fetch data node config."); - } - - public Map getDataNodeConfigs() { - return this.dataNodeConfigs; - } - - public Map getDataHostConfigs() { - return this.dataHostConfigLoader.getDataHostConfigs(); - } - -} diff --git a/src/main/java/io/mycat/server/config/loader/zkloader/ZkRuleConfigLoader.java b/src/main/java/io/mycat/server/config/loader/zkloader/ZkRuleConfigLoader.java deleted file mode 100644 index c8ce64892..000000000 --- a/src/main/java/io/mycat/server/config/loader/zkloader/ZkRuleConfigLoader.java +++ /dev/null @@ -1,106 +0,0 @@ -package io.mycat.server.config.loader.zkloader; - -import com.alibaba.fastjson.JSON; -import com.alibaba.fastjson.JSONObject; -import io.mycat.route.function.AbstractPartitionAlgorithm; -import io.mycat.server.config.ConfigException; -import io.mycat.server.config.node.RuleConfig; -import org.apache.commons.beanutils.BeanUtils; -import org.apache.curator.framework.CuratorFramework; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.lang.reflect.InvocationTargetException; -import java.util.Map; -import java.util.function.Function; - -import static java.util.stream.Collectors.toMap; - -/** - * Created by v1.lion on 2015/10/8. - */ -public class ZkRuleConfigLoader extends AbstractZKLoaders { - private static final Logger LOGGER = LoggerFactory.getLogger(ZkRuleConfigLoader.class); - - //directory name of rule node config in zookeeper - private static final String RULE_CONFIG_DIRECTORY = "rule-config"; - private static final String RULE_NAME_KEY = "name"; - private static final String FUNCTION_NAME_KEY = "functionName"; - private static final String COLUMN_NAME_KEY = "column"; - - //hold rule name mapping to RuleConfig - private Map ruleConfigMap; - - - public ZkRuleConfigLoader(final String clusterID) { - super(clusterID, RULE_CONFIG_DIRECTORY); - } - - @Override - public void fetchConfig(CuratorFramework zkConnection) { - //rule config path in zookeeper - //example: /mycat-cluster-1/rule-config/sharding-by-enum - this.ruleConfigMap = super - .fetchChildren(zkConnection) - .stream() - .map(nodeName -> { - //fetch data - String rawRuleStr = super.fetchDataToString(zkConnection, nodeName); - - //parse - JSONObject ruleJson = JSON.parseObject(rawRuleStr); - - //create RuleConfig - RuleConfig ruleConfig = new RuleConfig( - ruleJson.getString(RULE_NAME_KEY), - ruleJson.getString(COLUMN_NAME_KEY), - ruleJson.getString(FUNCTION_NAME_KEY)); - - AbstractPartitionAlgorithm ruleFunction = instanceFunction(ruleJson); - - ruleConfig.setRuleAlgorithm(ruleFunction); - ruleConfig.setProps(ruleJson); - return ruleConfig; - }) - .collect(toMap(RuleConfig::getName, Function.identity())); - LOGGER.trace("done fetch rule config : {}", ruleConfigMap); - } - - private AbstractPartitionAlgorithm instanceFunction(JSONObject ruleJson) { - String functionName = ruleJson.getString(FUNCTION_NAME_KEY); - String ruleName = ruleJson.getString(RULE_NAME_KEY); - - //for bean copy - ruleJson.remove(COLUMN_NAME_KEY); - ruleJson.remove(FUNCTION_NAME_KEY); - ruleJson.remove(RULE_NAME_KEY); - - AbstractPartitionAlgorithm algorithm; - try { - Class clz = Class.forName(functionName); - if (!AbstractPartitionAlgorithm.class.isAssignableFrom(clz)) { - throw new IllegalArgumentException("rule function must implements " - + AbstractPartitionAlgorithm.class.getName() + ", name=" + ruleName); - } - algorithm = (AbstractPartitionAlgorithm) clz.newInstance(); - } catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) { - LOGGER.warn("instance function class error: {}", e.getMessage(), e); - throw new ConfigException(e); - } - - try { - BeanUtils.populate(algorithm, ruleJson); - } catch (IllegalAccessException | InvocationTargetException e) { - throw new ConfigException("copy property to " + functionName + " error: ", e); - } - - //init - algorithm.init(); - LOGGER.trace("instanced function class : {}", functionName); - return algorithm; - } - - public Map getRuleConfigs() { - return this.ruleConfigMap; - } -} diff --git a/src/main/java/io/mycat/server/config/loader/zkloader/ZkSchemaConfigLoader.java b/src/main/java/io/mycat/server/config/loader/zkloader/ZkSchemaConfigLoader.java deleted file mode 100644 index 2e71e5c7a..000000000 --- a/src/main/java/io/mycat/server/config/loader/zkloader/ZkSchemaConfigLoader.java +++ /dev/null @@ -1,126 +0,0 @@ -package io.mycat.server.config.loader.zkloader; - -import com.google.common.collect.ObjectArrays; - -import com.alibaba.fastjson.JSON; - -import org.apache.curator.framework.CuratorFramework; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Map; -import java.util.function.Function; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.TableConfig; - -/** - * - * Created by v1.lion on 2015/10/18. - */ -public class ZkSchemaConfigLoader extends AbstractZKLoaders { - private static final Logger LOGGER = LoggerFactory.getLogger(ZkSchemaConfigLoader.class); - - //directory name of data node config in zookeeper - private static final String SCHEMA_CONFIG_DIRECTORY = "schema-config"; - - ZkDataNodeConfigLoader dataNodeConfigLoader; - ZkRuleConfigLoader ruleConfigLoadr; - - //hold a zookeeper connection,it is be closed after initiation - CuratorFramework zkConnection; - - //hold schema name mapping to DataNodeConfig - private Map schemaConfigs; - - public ZkSchemaConfigLoader(final String clusterID, - ZkDataNodeConfigLoader dataNodeConfigLoader, - ZkRuleConfigLoader ruleConfigLoader) { - super(clusterID, SCHEMA_CONFIG_DIRECTORY); - this.ruleConfigLoadr = ruleConfigLoader; - this.dataNodeConfigLoader = dataNodeConfigLoader; - } - - @Override - public void fetchConfig(CuratorFramework zkConnection) { - this.ruleConfigLoadr.fetchConfig(zkConnection); - this.dataNodeConfigLoader.fetchConfig(zkConnection); - this.zkConnection = zkConnection; - - //data node config path in zookeeper - //example: /mycat-cluster-1/schema-config / ${schemaName} - this.schemaConfigs = super.fetchChildren(zkConnection) - .stream() - .map(this::createSchema) - .collect(Collectors.toMap(SchemaConfig::getName, Function.identity())); - } - - private SchemaConfig createSchema(final String schemaName) { - //parse schema - //mycat-cluster-1/ schema-config/ ${schema name} - SchemaConfig schemaConfig = JSON.parseObject( - super.fetchData(this.zkConnection, schemaName), SchemaConfig.class); - - //parse TableConfig - //mycat-cluster-1/ schema-config/ ${schema name} /${table name} - Map tables = super.fetchChildren(this.zkConnection, schemaName) - .stream() - .flatMap(tableName -> generateTable(schemaName, tableName)) - .collect(Collectors.toMap(TableConfig::getName, Function.identity())); - - schemaConfig.setTables(tables); - return schemaConfig; - } - - /** - * create parent tableConfig. - */ - private Stream generateTable(final String schemaName, final String tableName) { - TableConfig parentTableConfig = createTableConfig(schemaName, tableName); - - return Stream.concat( - super.fetchChildren(zkConnection, schemaName, tableName) - .stream() - .flatMap(childTableName -> generateChildTable(parentTableConfig, - schemaName, tableName, childTableName)), - Stream.of(parentTableConfig)); - } - - /** - * create child tableConfig. - */ - private Stream generateChildTable(TableConfig parentTableConfig, - final String schemaName, - final String... childTableName) { - //recursion parse child TableConfig - //mycat-cluster-1/ schema-config/ ${schema name} /${table name} /${child table name} - //deep first. - TableConfig childTableConfig = createTableConfig(schemaName, childTableName); - childTableConfig.setParentTC(parentTableConfig); - - return Stream.concat( - super.fetchChildren(zkConnection, - ObjectArrays.concat(new String[]{schemaName}, childTableName, String.class)) - .stream() - .flatMap( - grandChildTableName -> generateChildTable(childTableConfig, - schemaName, - ObjectArrays.concat(childTableName, new String[]{grandChildTableName}, String.class)) - ) - , Stream.of(childTableConfig)); - } - - private TableConfig createTableConfig(String schemaName, String... tableName) { - TableConfig tableConfig = JSON.parseObject( - super.fetchData(this.zkConnection, schemaName, tableName), TableConfig.class); - tableConfig.setRule(this.ruleConfigLoadr.getRuleConfigs().get(tableConfig.getRuleName())); - tableConfig.checkConfig(); - return tableConfig; - } - - public Map getSchemaConfigs() { - return schemaConfigs; - } -} diff --git a/src/main/java/io/mycat/server/config/loader/zkloader/ZkSequenceConfigLoader.java b/src/main/java/io/mycat/server/config/loader/zkloader/ZkSequenceConfigLoader.java deleted file mode 100644 index 19a3f7f68..000000000 --- a/src/main/java/io/mycat/server/config/loader/zkloader/ZkSequenceConfigLoader.java +++ /dev/null @@ -1,25 +0,0 @@ -package io.mycat.server.config.loader.zkloader; - -import org.apache.curator.framework.CuratorFramework; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Created by v1.lion on 2015/10/11. - */ -public class ZkSequenceConfigLoader extends AbstractZKLoaders { - private static final Logger LOGGER = LoggerFactory.getLogger(ZkSequenceConfigLoader.class); - - //directory name of rule node config in zookeeper - private static final String SEQUENCE_CONFIG_DIRECTORY = "sequence-config"; - - public ZkSequenceConfigLoader(final String clusterID) { - super(clusterID, SEQUENCE_CONFIG_DIRECTORY); - } - - @Override - public void fetchConfig(final CuratorFramework zkConnection) { - - } - -} diff --git a/src/main/java/io/mycat/server/config/loader/zkloader/ZkSystemConfigLoader.java b/src/main/java/io/mycat/server/config/loader/zkloader/ZkSystemConfigLoader.java deleted file mode 100644 index ad885aa82..000000000 --- a/src/main/java/io/mycat/server/config/loader/zkloader/ZkSystemConfigLoader.java +++ /dev/null @@ -1,43 +0,0 @@ -package io.mycat.server.config.loader.zkloader; - -import com.alibaba.fastjson.JSON; -import io.mycat.server.config.loader.SystemLoader; -import io.mycat.server.config.node.SystemConfig; -import org.apache.curator.framework.CuratorFramework; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - *

- * load system configuration from Zookeeper. - *

- * Created by v1.lion on 2015/9/27. - */ -public class ZkSystemConfigLoader extends AbstractZKLoaders implements SystemLoader { - //directory name of server config in zookeeper - protected static final String SERVER_CONFIG_DIRECTORY = "server-config"; - private static final Logger LOGGER = LoggerFactory.getLogger(ZkSystemConfigLoader.class); - //directory name of system config in zookeeper - private static final String SYSTEM_DIRECTORY = "system"; - - private SystemConfig systemConfig; - - public ZkSystemConfigLoader(final String clusterID) { - super(clusterID, SERVER_CONFIG_DIRECTORY); - } - - @Override - public void fetchConfig(CuratorFramework zkConnection) { - - //system config path in zookeeper - //example: /mycat-cluster-1 /server-config/system - this.systemConfig = JSON.parseObject(super.fetchData(zkConnection, SYSTEM_DIRECTORY) - , SystemConfig.class); - - LOGGER.trace("done system config from zookeeper : {}", systemConfig); - } - - public SystemConfig getSystemConfig() { - return this.systemConfig; - } -} diff --git a/src/main/java/io/mycat/server/config/loader/zkloader/ZkUserConfigLoader.java b/src/main/java/io/mycat/server/config/loader/zkloader/ZkUserConfigLoader.java deleted file mode 100644 index 98db58701..000000000 --- a/src/main/java/io/mycat/server/config/loader/zkloader/ZkUserConfigLoader.java +++ /dev/null @@ -1,55 +0,0 @@ -package io.mycat.server.config.loader.zkloader; - -import com.alibaba.fastjson.JSON; -import io.mycat.server.config.ConfigException; -import io.mycat.server.config.node.UserConfig; -import org.apache.curator.framework.CuratorFramework; -import org.apache.curator.utils.ZKPaths; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Map; -import java.util.function.Function; -import java.util.stream.Collectors; - -import static java.util.stream.Collectors.toMap; - -/** - *

- * load user configuration from Zookeeper. - *

- * Created by v1.lion on 2015/9/27. - */ -public class ZkUserConfigLoader extends AbstractZKLoaders { - private static final Logger LOGGER = LoggerFactory.getLogger(ZkUserConfigLoader.class); - - //directory name of user config in zookeeper - private static final String USERS_DIRECTORY = "user"; - - //hold user name mapping to UserConfig - private Map userConfigs; - - public ZkUserConfigLoader(final String clusterID) { - super(clusterID, ZkSystemConfigLoader.SERVER_CONFIG_DIRECTORY); - } - - - @Override - public void fetchConfig(CuratorFramework zkConnection) { - //user config path in zookeeper - //example: /mycat-cluster-1/server-config/user - - this.userConfigs = super - .fetchChildren(zkConnection, USERS_DIRECTORY) - .stream() - .map(username -> (UserConfig) JSON.parseObject( - super.fetchData(zkConnection, USERS_DIRECTORY, username), UserConfig.class)) - .collect(toMap(UserConfig::getName, Function.identity())); - - LOGGER.trace("done fetch user config : {}", this.userConfigs); - } - - public Map getUserConfig() { - return userConfigs; - } -} diff --git a/src/main/java/io/mycat/server/config/loader/zkloader/ZookeeperLoader.java b/src/main/java/io/mycat/server/config/loader/zkloader/ZookeeperLoader.java deleted file mode 100644 index 39c8cf86d..000000000 --- a/src/main/java/io/mycat/server/config/loader/zkloader/ZookeeperLoader.java +++ /dev/null @@ -1,148 +0,0 @@ -package io.mycat.server.config.loader.zkloader; - -import org.apache.curator.framework.CuratorFramework; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.yaml.snakeyaml.Yaml; - -import java.io.InputStream; -import java.util.Arrays; -import java.util.Map; - -import io.mycat.locator.ZookeeperServiceLocator; -import io.mycat.server.config.ConfigException; -import io.mycat.server.config.cluster.MycatClusterConfig; -import io.mycat.server.config.loader.ConfigLoader; -import io.mycat.server.config.node.CharsetConfig; -import io.mycat.server.config.node.DataHostConfig; -import io.mycat.server.config.node.DataNodeConfig; -import io.mycat.server.config.node.HostIndexConfig; -import io.mycat.server.config.node.QuarantineConfig; -import io.mycat.server.config.node.RuleConfig; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SequenceConfig; -import io.mycat.server.config.node.SystemConfig; -import io.mycat.server.config.node.UserConfig; - -public class ZookeeperLoader implements ConfigLoader { - private static final Logger LOGGER = LoggerFactory.getLogger(ZookeeperLoader.class); - private static final String ZK_CONFIG_FILE_NAME = "/zk.yaml"; - - private SystemConfig systemConfig; - private Map userConfigs; - private Map dataNodeConfigs; - private Map dataHostConfigs; - private Map ruleConfigs; - private Map schemaConfigs; - - public ZookeeperLoader() { - super(); - } - - public void initConfig() { - final ZkConfig zkConfig = loadZkConfig(); - final CuratorFramework zkConnection = ZookeeperServiceLocator.createConnection(zkConfig.getZkURL()); - final String myClusterId = zkConfig.getClusterID(); - - //system config - ZkSystemConfigLoader zkSystemLoader = new ZkSystemConfigLoader(myClusterId); - //user config - ZkUserConfigLoader zkUserConfigLoader = new ZkUserConfigLoader(myClusterId); - - //data host and data node config - ZkDataNodeConfigLoader dataNodeConfigLoader = new ZkDataNodeConfigLoader(myClusterId - , new ZkDataHostConfigLoader(myClusterId)); - - //rule config - ZkRuleConfigLoader ruleConfigLoader = new ZkRuleConfigLoader(myClusterId); - - // composed ZkDataHostConfigLoader,ZkDataNodeConfigLoader and ZkRuleConfigLoader - ZkSchemaConfigLoader zkSchemaConfigLoader = new ZkSchemaConfigLoader(myClusterId, - dataNodeConfigLoader, ruleConfigLoader); - - Arrays.asList(zkSystemLoader, zkUserConfigLoader, zkSchemaConfigLoader) - .stream() - .forEach(loader -> loader.fetchConfig(zkConnection)); - - this.systemConfig = zkSystemLoader.getSystemConfig(); - this.userConfigs = zkUserConfigLoader.getUserConfig(); - this.dataHostConfigs = dataNodeConfigLoader.getDataHostConfigs(); - this.dataNodeConfigs = dataNodeConfigLoader.getDataNodeConfigs(); - this.schemaConfigs = zkSchemaConfigLoader.getSchemaConfigs(); - } - - private ZkConfig loadZkConfig() { - LOGGER.trace("load file with name :" + ZK_CONFIG_FILE_NAME); - - InputStream configIS = getClass().getResourceAsStream(ZK_CONFIG_FILE_NAME); - if (configIS == null) { - throw new ConfigException("can't find zk properties file : " + ZK_CONFIG_FILE_NAME); - } - return new Yaml().loadAs(configIS, ZkConfig.class); - } - - @Override - public SchemaConfig getSchemaConfig(String schema) { - return null; - } - - @Override - public Map getSchemaConfigs() { - return null; - } - - @Override - public Map getDataNodeConfigs() { - return this.dataNodeConfigs; - } - - @Override - public Map getDataHostConfigs() { - return this.dataHostConfigs; - } - - @Override - public Map getTableRuleConfigs() { - return this.ruleConfigs; - } - - @Override - public SystemConfig getSystemConfig() { - return this.systemConfig; - } - - @Override - public UserConfig getUserConfig(String user) { - return this.userConfigs.get(user); - } - - @Override - public Map getUserConfigs() { - return this.userConfigs; - } - - @Override - public QuarantineConfig getQuarantineConfigs() { - return null; - } - - @Override - public MycatClusterConfig getClusterConfigs() { - return null; - } - - @Override - public CharsetConfig getCharsetConfigs() { - return null; - } - - @Override - public HostIndexConfig getHostIndexConfig() { - return null; - } - - @Override - public SequenceConfig getSequenceConfig() { - return null; - } -} diff --git a/src/main/java/io/mycat/server/config/node/CharsetConfig.java b/src/main/java/io/mycat/server/config/node/CharsetConfig.java deleted file mode 100644 index bd4013bc9..000000000 --- a/src/main/java/io/mycat/server/config/node/CharsetConfig.java +++ /dev/null @@ -1,18 +0,0 @@ -package io.mycat.server.config.node; - -import java.util.HashMap; -import java.util.Map; - -public class CharsetConfig { - private Map props = new HashMap(); - - public Map getProps() { - return props; - } - - public void setProps(Map props) { - this.props = props; - } - - -} diff --git a/src/main/java/io/mycat/server/config/node/DBHostConfig.java b/src/main/java/io/mycat/server/config/node/DBHostConfig.java deleted file mode 100644 index cc05ae0ff..000000000 --- a/src/main/java/io/mycat/server/config/node/DBHostConfig.java +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.server.config.node; - - -public class DBHostConfig { - private String hostName; - private String ip; - private int port; - private String url; - private String user; - private String password; - private long idleTimeout = SystemConfig.DEFAULT_IDLE_TIMEOUT; // 连接池中连接空闲超时时间 - private int maxCon; - private int minCon; - private String dbType; - private String filters = "mergeStat"; - private long logTime = 300000; - private int weight; - - public DBHostConfig() { - super(); - } - - public DBHostConfig(String hostName, String ip, int port, String url, - String user, String password) { - super(); - this.hostName = hostName; - this.ip = ip; - this.port = port; - this.url = url; - this.user = user; - this.password = password; - } - - public String getDbType() { - return dbType; - } - - public void setDbType(String dbType) { - this.dbType = dbType; - } - - public long getIdleTimeout() { - return idleTimeout; - } - - public void setIdleTimeout(long idleTimeout) { - this.idleTimeout = idleTimeout; - } - - public int getMaxCon() { - return maxCon; - } - - public void setMaxCon(int maxCon) { - this.maxCon = maxCon; - } - - public int getMinCon() { - return minCon; - } - - public void setMinCon(int minCon) { - this.minCon = minCon; - } - - public String getHostName() { - return hostName; - } - - public void setHostName(String hostName) { - this.hostName = hostName; - } - - public String getIp() { - return ip; - } - - public void setIp(String ip) { - this.ip = ip; - } - - public int getPort() { - return port; - } - - public void setPort(int port) { - this.port = port; - } - - public String getUrl() { - return url; - } - - public void setUrl(String url) { - this.url = url; - } - - public String getUser() { - return user; - } - - public void setUser(String user) { - this.user = user; - } - - public String getFilters() { - return filters; - } - - public void setFilters(String filters) { - this.filters = filters; - } - - public String getPassword() { - return password; - } - - public void setPassword(String password) { - this.password = password; - } - - public long getLogTime() { - return logTime; - } - - public void setLogTime(long logTime) { - this.logTime = logTime; - } - - public int getWeight() { - return weight; - } - - public void setWeight(int weight) { - this.weight = weight; - } - - @Override - public String toString() { - return "DBHostConfig [hostName=" + hostName + ", url=" + url + "]"; - } - -} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/config/node/DataHostConfig.java b/src/main/java/io/mycat/server/config/node/DataHostConfig.java deleted file mode 100644 index 697a76fe5..000000000 --- a/src/main/java/io/mycat/server/config/node/DataHostConfig.java +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.server.config.node; - -import java.util.Map; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import io.mycat.backend.PhysicalDBPool; - -/** - * Datahost is a group of DB servers which is synchronized with each other - * - * @author wuzhih - */ -public class DataHostConfig { - public static final int NOT_SWITCH_DS = -1; - public static final int DEFAULT_SWITCH_DS = 1; - public static final int SYN_STATUS_SWITCH_DS = 2; - private static final Pattern pattern = Pattern.compile("\\s*show\\s+slave\\s+status\\s*", Pattern.CASE_INSENSITIVE); - private String dbType; - private String dbDriver; - private int switchType; - private DBHostConfig[] writeHosts; - private Map readHosts; - private String name; - private int maxCon = SystemConfig.DEFAULT_POOL_SIZE; - private int minCon = 10; - private int balance = PhysicalDBPool.BALANCE_NONE; - private int writeType = PhysicalDBPool.WRITE_ONLYONE_NODE; - private String heartbeatSQL; - private boolean isShowSlaveSql = false; - private String connectionInitSql; - private int slaveThreshold = -1; - private String filters = "mergeStat"; - private long logTime = 300000; - private boolean tempReadHostAvailable = false; //如果写服务挂掉, 临时读服务是否继续可用 - - public DataHostConfig() { - super(); - } - - public DataHostConfig(String name, String dbType, String dbDriver, - DBHostConfig[] writeHosts, Map readHosts, int switchType, int slaveThreshold, boolean tempReadHostAvailable) { - super(); - this.name = name; - this.dbType = dbType; - this.dbDriver = dbDriver; - this.writeHosts = writeHosts; - this.readHosts = readHosts; - this.switchType = switchType; - this.slaveThreshold = slaveThreshold; - this.tempReadHostAvailable = tempReadHostAvailable; - } - - public boolean isTempReadHostAvailable() { - return this.tempReadHostAvailable; - } - - public int getSlaveThreshold() { - return slaveThreshold; - } - - public void setSlaveThreshold(int slaveThreshold) { - this.slaveThreshold = slaveThreshold; - } - - public int getSwitchType() { - return switchType; - } - - public String getConnectionInitSql() { - return connectionInitSql; - } - - public void setConnectionInitSql(String connectionInitSql) { - this.connectionInitSql = connectionInitSql; - } - - public int getWriteType() { - return writeType; - } - - public void setWriteType(int writeType) { - this.writeType = writeType; - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public int getMaxCon() { - return maxCon; - } - - public void setMaxCon(int maxCon) { - this.maxCon = maxCon; - } - - public int getMinCon() { - return minCon; - } - - public void setMinCon(int minCon) { - this.minCon = minCon; - } - - public int getBalance() { - return balance; - } - - public void setBalance(int balance) { - this.balance = balance; - } - - public String getDbType() { - return dbType; - } - - public String getDbDriver() { - return dbDriver; - } - - public DBHostConfig[] getWriteHosts() { - return writeHosts; - } - - public Map getReadHosts() { - return readHosts; - } - - public String getHeartbeatSQL() { - return heartbeatSQL; - } - - public void setHeartbeatSQL(String heartbeatSQL) { - this.heartbeatSQL = heartbeatSQL; - Matcher matcher = pattern.matcher(heartbeatSQL); - if (matcher.find()) { - isShowSlaveSql = true; - } - } - - public boolean isShowSlaveSql() { - return isShowSlaveSql; - } - - public String getFilters() { - return filters; - } - - public void setFilters(String filters) { - this.filters = filters; - } - - public long getLogTime() { - return logTime; - } - - public void setLogTime(long logTime) { - this.logTime = logTime; - } - - public void setDbType(String dbType) { - this.dbType = dbType; - } - - public void setDbDriver(String dbDriver) { - this.dbDriver = dbDriver; - } - - public void setSwitchType(int switchType) { - this.switchType = switchType; - } - - public void setWriteHosts(DBHostConfig[] writeHosts) { - this.writeHosts = writeHosts; - } - - public void setReadHosts(Map readHosts) { - this.readHosts = readHosts; - } - - public void setIsShowSlaveSql(boolean isShowSlaveSql) { - this.isShowSlaveSql = isShowSlaveSql; - } -} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/config/node/HostIndexConfig.java b/src/main/java/io/mycat/server/config/node/HostIndexConfig.java deleted file mode 100644 index a3c0f96be..000000000 --- a/src/main/java/io/mycat/server/config/node/HostIndexConfig.java +++ /dev/null @@ -1,17 +0,0 @@ -package io.mycat.server.config.node; - -import java.util.Properties; - -public class HostIndexConfig { - private Properties props = new Properties(); - - public Properties getProps() { - return props; - } - - public void setProps(Properties props) { - this.props = props; - } - - -} diff --git a/src/main/java/io/mycat/server/config/node/JdbcDriver.java b/src/main/java/io/mycat/server/config/node/JdbcDriver.java deleted file mode 100644 index 5042c0f6a..000000000 --- a/src/main/java/io/mycat/server/config/node/JdbcDriver.java +++ /dev/null @@ -1,30 +0,0 @@ -package io.mycat.server.config.node; - -/** - * - * @author Administrator - */ -public class JdbcDriver { - private String dbType; // 是哪种数据库的驱动,驱动对应的数据库的种类名称 - private String className; // 驱动类名 - // 后续可能还要增加其他字段 - - public JdbcDriver(){} - - public JdbcDriver(String dbType, String className){ - this.dbType = dbType; - this.className = className; - } - public String getDbType() { - return dbType; - } - public void setDbType(String dbType) { - this.dbType = dbType; - } - public String getClassName() { - return className; - } - public void setClassName(String className) { - this.className = className; - } -} diff --git a/src/main/java/io/mycat/server/config/node/MycatConfig.java b/src/main/java/io/mycat/server/config/node/MycatConfig.java deleted file mode 100644 index c726f5c2f..000000000 --- a/src/main/java/io/mycat/server/config/node/MycatConfig.java +++ /dev/null @@ -1,456 +0,0 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.server.config.node; - -import io.mycat.MycatServer; -import io.mycat.backend.PhysicalDBNode; -import io.mycat.backend.PhysicalDBPool; -import io.mycat.server.config.cluster.MycatClusterConfig; -import io.mycat.server.config.loader.ConfigInitializer; -import io.mycat.server.config.loader.ConfigReLoader; -import io.mycat.util.TimeUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.Map; -import java.util.concurrent.locks.ReentrantLock; - -/** - * @author mycat - */ -public class MycatConfig implements ConfigReLoader{ - private static final Logger LOGGER = LoggerFactory.getLogger("MycatConfig"); - - private static final int RELOAD = 1; - private static final int ROLLBACK = 2; - private static final int RELOAD_ALL = 3; - - private volatile SystemConfig system; - private volatile MycatClusterConfig cluster; - private volatile MycatClusterConfig _cluster; - private volatile QuarantineConfig quarantine; - private volatile QuarantineConfig _quarantine; - private volatile Map users; - private volatile Map _users; - private volatile Map schemas; - private volatile Map _schemas; - private volatile Map dataNodes; - private volatile Map _dataNodes; - private volatile Map dataHosts; - private volatile Map _dataHosts; - private volatile HostIndexConfig hostIndexConfig; - private volatile HostIndexConfig _hostIndexConfig; - private volatile CharsetConfig charsetConfig; - private volatile CharsetConfig _charsetConfig; - private volatile SequenceConfig sequenceConfig; - private volatile SequenceConfig _sequenceConfig; - - - private long reloadTime; - private long rollbackTime; - private int status; - private final ReentrantLock lock; - - public MycatConfig() { - ConfigInitializer confInit = new ConfigInitializer(true); - this.system = confInit.getSystem(); - this.users = confInit.getUsers(); - this.schemas = confInit.getSchemas(); - this.dataHosts = confInit.getDataHosts(); - this.charsetConfig = confInit.getCharsetConfig(); - this.sequenceConfig = confInit.getSequenceConfig(); - this.hostIndexConfig = confInit.getHostIndexs(); - - this.dataNodes = confInit.getDataNodes(); - for (PhysicalDBPool dbPool : dataHosts.values()) { - dbPool.setSchemas(getDataNodeSchemasOfDataHost(dbPool.getHostName())); - } - this.quarantine = confInit.getQuarantine(); - this.cluster = confInit.getCluster(); - - this.reloadTime = TimeUtil.currentTimeMillis(); - this.rollbackTime = -1L; - this.status = RELOAD; - this.lock = new ReentrantLock(); - } - - public SystemConfig getSystem() { - return system; - } - public Map getUsers() { - return users; - } - public Map getBackupUsers() { - return _users; - } - public Map getSchemas() { - return schemas; - } - public Map getBackupSchemas() { - return _schemas; - } - public Map getDataNodes() { - return dataNodes; - } - public String getHostIndex(String hostName, String index) { - if(this.hostIndexConfig.getProps().isEmpty() - || !this.hostIndexConfig.getProps().containsKey(hostName)){ - return index; - } - return String.valueOf(hostIndexConfig.getProps().get(hostName)); - } - public void setHostIndex(String hostName, int index) { - this.hostIndexConfig.getProps().put(hostName, index); - } - - public String[] getDataNodeSchemasOfDataHost(String dataHost) { - ArrayList schemas = new ArrayList(30); - for (PhysicalDBNode dn : dataNodes.values()) { - if (dn.getDbPool().getHostName().equals(dataHost)) { - schemas.add(dn.getDatabase()); - } - } - return schemas.toArray(new String[schemas.size()]); - } - - public Map getBackupDataNodes() { - return _dataNodes; - } - - public Map getDataHosts() { - return dataHosts; - } - - public Map getBackupDataHosts() { - return _dataHosts; - } - - public MycatClusterConfig getCluster() { - return cluster; - } - - public MycatClusterConfig getBackupCluster() { - return _cluster; - } - - public QuarantineConfig getQuarantine() { - return quarantine; - } - - public QuarantineConfig getBackupQuarantine() { - return _quarantine; - } - - public ReentrantLock getLock() { - return lock; - } - - public long getReloadTime() { - return reloadTime; - } - - public long getRollbackTime() { - return rollbackTime; - } - public CharsetConfig getBackupCharsetConfig() { - return _charsetConfig; - } - - public SequenceConfig getBackupSequenceConfig() { - return _sequenceConfig; - } - - public HostIndexConfig getBackupHostIndexs() { - return _hostIndexConfig; - } - - public CharsetConfig getCharsetConfig() { - return charsetConfig; - } - - public void setCharsetConfig(CharsetConfig charsetConfig) { - this.charsetConfig = charsetConfig; - } - - public SequenceConfig getSequenceConfig() { - return sequenceConfig; - } - - public void setSequenceConfig(SequenceConfig sequenceConfig) { - this.sequenceConfig = sequenceConfig; - } - - public boolean canRollback() { - if (_users == null || _schemas == null || _dataNodes == null - || _dataHosts == null || _cluster == null - || _quarantine == null || status == ROLLBACK) { - return false; - } else { - return true; - } - } - - public void reload(Map users, - Map schemas, - Map dataNodes, - Map dataHosts, - MycatClusterConfig cluster, - QuarantineConfig quarantine, - CharsetConfig charsetConfig, - SequenceConfig sequenceConfig, - HostIndexConfig hostIndexConfig,boolean reloadAll) { - apply(users, schemas, dataNodes, dataHosts, cluster, quarantine,charsetConfig,sequenceConfig,hostIndexConfig,reloadAll); - this.reloadTime = TimeUtil.currentTimeMillis(); - this.status = reloadAll?RELOAD_ALL:RELOAD; - } - - public void rollback(Map users, - Map schemas, - Map dataNodes, - Map dataHosts, - MycatClusterConfig cluster, - QuarantineConfig quarantine, - CharsetConfig charsetConfig, - SequenceConfig sequenceConfig, - HostIndexConfig hostIndexConfig) { - apply(users, schemas, dataNodes, dataHosts, cluster, quarantine,charsetConfig,sequenceConfig,hostIndexConfig,status==RELOAD_ALL); - this.rollbackTime = TimeUtil.currentTimeMillis(); - this.status = ROLLBACK; - } - - private void apply(Map users, - Map schemas, - Map dataNodes, - Map dataHosts, - MycatClusterConfig cluster, - QuarantineConfig quarantine, - CharsetConfig charsetConfig, - SequenceConfig sequenceConfig, - HostIndexConfig hostIndexConfig, - boolean isLoadAll) { - final ReentrantLock lock = this.lock; - lock.lock(); - try { - if(isLoadAll) - { - // stop datasource heartbeat - Map oldDataHosts = this.dataHosts; - if (oldDataHosts != null) - { - for (PhysicalDBPool n : oldDataHosts.values()) - { - if (n != null) - { - n.stopHeartbeat(); - } - } - } - this._dataNodes = this.dataNodes; - this._dataHosts = this.dataHosts; - } - this._users = this.users; - this._schemas = this.schemas; - - this._cluster = this.cluster; - this._quarantine = this.quarantine; - this._charsetConfig = this.charsetConfig; - this._sequenceConfig = this.sequenceConfig; - this._hostIndexConfig = this.hostIndexConfig; - - if(isLoadAll) - { - // start datasoruce heartbeat - if (dataNodes != null) - { - for (PhysicalDBPool n : dataHosts.values()) - { - if (n != null) - { - n.startHeartbeat(); - } - } - } - this.dataNodes = dataNodes; - this.dataHosts = dataHosts; - } - this.users = users; - this.schemas = schemas; - this.cluster = cluster; - this.quarantine = quarantine; - this.charsetConfig = charsetConfig; - this.sequenceConfig = sequenceConfig; - this.hostIndexConfig = hostIndexConfig; - - } finally { - lock.unlock(); - } - } - - public boolean initDatasource(){ - LOGGER.info("Initialize dataHost ..."); - for (PhysicalDBPool node : dataHosts.values()) { - String index = this.getHostIndex(node.getHostName(),"0"); - if (!"0".equals(index)) { - LOGGER.info("init datahost: " + node.getHostName() + " to use datasource index:" + index); - } - node.init(Integer.valueOf(index)); - node.startHeartbeat(); - } - return true; - } - public boolean reloadDatasource(){ - Map cNodes = this.getDataHosts(); - boolean reloadStatus = true; - for (PhysicalDBPool dn : dataHosts.values()) { - dn.setSchemas(MycatServer.getInstance().getConfig().getDataNodeSchemasOfDataHost(dn.getHostName())); - //init datahost - String index = this.getHostIndex(dn.getHostName(), "0"); - if (!"0".equals(index)) { - LOGGER.info("init datahost: " + dn.getHostName() + " to use datasource index:" + index); - } - dn.init(Integer.valueOf(index)); - //dn.init(0); - if (!dn.isInitSuccess()) { - reloadStatus = false; - // 如果重载不成功,则清理已初始化的资源。 - LOGGER.warn("reload failed ,clear previously created datasources "); - dn.clearDataSources("reload config"); - dn.stopHeartbeat(); - break; - } - } - - // 处理旧的资源 - for (PhysicalDBPool dn : cNodes.values()) { - dn.clearDataSources("reload config clear old datasources"); - dn.stopHeartbeat(); - } - - return reloadStatus; - } - public boolean rebackDatasource(){ - // 如果回滚已经存在的pool - boolean rollbackStatus = true; - Map cNodes = this.getDataHosts(); - for (PhysicalDBPool dn : dataHosts.values()) { - dn.init(dn.getActivedIndex()); - if (!dn.isInitSuccess()) { - rollbackStatus = false; - break; - } - } - // 如果回滚不成功,则清理已初始化的资源。 - if (!rollbackStatus) { - for (PhysicalDBPool dn : dataHosts.values()) { - dn.clearDataSources("rollbackup config"); - dn.stopHeartbeat(); - } - return false; - } - - // 处理旧的资源 - for (PhysicalDBPool dn : cNodes.values()) { - dn.clearDataSources("clear old config "); - dn.stopHeartbeat(); - } - return rollbackStatus; - } - - @Override - public void reloadSchemaConfig(String schema) { - // TODO Auto-generated method stub - - } - - @Override - public void reloadSchemaConfigs() { - // TODO Auto-generated method stub - - } - - @Override - public void reloadDataNodeConfigs() { - // TODO Auto-generated method stub - - } - - @Override - public void reloadDataHostConfigs() { - // TODO Auto-generated method stub - - } - - @Override - public void reloadTableRuleConfigs() { - // TODO Auto-generated method stub - - } - - @Override - public void reloadSystemConfig() { - // TODO Auto-generated method stub - - } - - @Override - public void reloadUserConfig(String user) { - // TODO Auto-generated method stub - - } - - @Override - public void reloadUserConfigs() { - // TODO Auto-generated method stub - - } - - @Override - public void reloadQuarantineConfigs() { - // TODO Auto-generated method stub - - } - - @Override - public void reloadClusterConfigs() { - // TODO Auto-generated method stub - - } - - @Override - public void reloadCharsetConfigs() { - // TODO Auto-generated method stub - - } - - @Override - public void reloadHostIndexConfig() { - // TODO Auto-generated method stub - - } - - - - -} diff --git a/src/main/java/io/mycat/server/config/node/SequenceConfig.java b/src/main/java/io/mycat/server/config/node/SequenceConfig.java deleted file mode 100644 index e8af050c9..000000000 --- a/src/main/java/io/mycat/server/config/node/SequenceConfig.java +++ /dev/null @@ -1,31 +0,0 @@ -package io.mycat.server.config.node; - -import java.util.HashMap; -import java.util.Map; - -public class SequenceConfig { - private String type; - private String vclass; - private Map props = new HashMap(); - - public Map getProps() { - return props; - } - public void setProps(Map props) { - this.props = props; - } - public String getType() { - return type; - } - public void setType(String type) { - this.type = type; - } - public String getVclass() { - return vclass; - } - public void setVclass(String vclass) { - this.vclass = vclass; - } - - -} diff --git a/src/main/java/io/mycat/server/config/node/TableConfig.java b/src/main/java/io/mycat/server/config/node/TableConfig.java deleted file mode 100644 index 5546698fa..000000000 --- a/src/main/java/io/mycat/server/config/node/TableConfig.java +++ /dev/null @@ -1,311 +0,0 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.server.config.node; - -import com.google.common.base.Strings; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Random; -import java.util.Set; - -import io.mycat.util.SplitUtil; - -/** - * @author mycat - */ -public class TableConfig { - public static final int TYPE_GLOBAL_TABLE = 1; - public static final int TYPE_GLOBAL_DEFAULT = 0; - private String name; - private String primaryKey; - private boolean autoIncrement; - private boolean needAddLimit; - private Set dbTypes; - private int tableType; - private ArrayList dataNodes; - private RuleConfig rule; - private String ruleName; - private String partitionColumn; - private boolean ruleRequired; - private TableConfig parentTC; - private boolean childTable; - private String joinKey; - private String parentKey; - private String locateRTableKeySql; - // only has one level of parent - private boolean secondLevel; - private boolean partionKeyIsPrimaryKey; - private Random rand = new Random(); - - public void setName(String name) { - if (name == null) { - throw new IllegalArgumentException("table name is null"); - } - this.name = name.toUpperCase(); - } - - public void setPrimaryKey(String primaryKey) { - this.primaryKey = primaryKey; - } - - public void setAutoIncrement(boolean autoIncrement) { - this.autoIncrement = autoIncrement; - } - - public void setNeedAddLimit(boolean needAddLimit) { - this.needAddLimit = needAddLimit; - } - - public void setDbTypes(Set dbTypes) { - this.dbTypes = dbTypes; - } - - public void setTableType(int tableType) { - this.tableType = TableConfig.TYPE_GLOBAL_DEFAULT == tableType ? - TableConfig.TYPE_GLOBAL_DEFAULT : TableConfig.TYPE_GLOBAL_TABLE; - } - - public void setDataNode(String dataNode) { - if (Strings.isNullOrEmpty(dataNode)) { - throw new IllegalArgumentException("dataNode name is null"); - } - - String theDataNodes[] = SplitUtil.split(dataNode, ',', '$', '-'); - - if (theDataNodes == null || theDataNodes.length <= 0) { - throw new IllegalArgumentException("invalid table dataNodes: " + dataNode); - } - - this.dataNodes = new ArrayList<>(Arrays.asList(theDataNodes)); - } - - public void setRule(RuleConfig rule) { - this.partitionColumn = (rule == null) ? null : rule.getColumn(); - this.partionKeyIsPrimaryKey = (this.partitionColumn == null) ? - this.primaryKey == null : this.partitionColumn.equals(this.primaryKey); - this.rule = rule; - } - - - - public void setRuleRequired(boolean ruleRequired) { - this.ruleRequired = ruleRequired; - } - - public void setParentTC(TableConfig parentTC) { - this.parentTC = parentTC; - if (this.parentTC != null) { - this.locateRTableKeySql = genLocateRootParentSQL(); - this.secondLevel = (parentTC.parentTC == null); - } - } - - public void setChildTable(boolean childTable) { - this.childTable = childTable; - } - - public void setJoinKey(String joinKey) { - this.joinKey = joinKey; - } - - public void setParentKey(String parentKey) { - this.parentKey = parentKey; - } - - public TableConfig(){ - super(); - } - - public TableConfig(String name, String primaryKey, boolean autoIncrement, boolean needAddLimit, - int tableType, String dataNode, Set dbType, RuleConfig rule, - boolean ruleRequired, TableConfig parentTC, boolean isChildTable, String joinKey, - String parentKey) { - super(); - this.setName(name); - this.setDataNode(dataNode); - this.setRule(rule); - this.setParentTC(parentTC); - this.primaryKey = primaryKey; - this.autoIncrement = autoIncrement; - this.needAddLimit = needAddLimit; - this.tableType = tableType; - this.dbTypes = dbType; - this.ruleRequired = ruleRequired; - this.childTable = isChildTable; - this.joinKey = joinKey; - this.parentKey = parentKey; - this.parentTC = parentTC; - this.checkConfig(); - } - - public String getPrimaryKey() { - return primaryKey; - } - - public Set getDbTypes() - { - return dbTypes; - } - - public boolean isAutoIncrement() { - return autoIncrement; - } - - public boolean isNeedAddLimit() { - return needAddLimit; - } - - public boolean isSecondLevel() { - return secondLevel; - } - - public String getLocateRTableKeySql() { - return locateRTableKeySql; - } - - public boolean isGlobalTable() { - return this.tableType == TableConfig.TYPE_GLOBAL_TABLE; - } - - public String genLocateRootParentSQL() { - TableConfig tb = this; - StringBuilder tableSb = new StringBuilder(); - StringBuilder condition = new StringBuilder(); - TableConfig prevTC = null; - int level = 0; - String latestCond = null; - while (tb.parentTC != null) { - tableSb.append(tb.parentTC.name).append(','); - String relation = null; - if (level == 0) { - latestCond = " " + tb.parentTC.getName() + '.' + tb.parentKey - + "="; - } else { - relation = tb.parentTC.getName() + '.' + tb.parentKey + '=' - + tb.name + '.' + tb.joinKey; - condition.append(relation).append(" AND "); - } - level++; - prevTC = tb; - tb = tb.parentTC; - } - String sql = "SELECT " - + prevTC.parentTC.name - + '.' - + prevTC.parentKey - + " FROM " - + tableSb.substring(0, tableSb.length() - 1) - + " WHERE " - + ((level < 2) ? latestCond : condition.toString() + latestCond); - // System.out.println(this.name+" sql " + sql); - return sql; - - } - - public String getPartitionColumn() { - return partitionColumn; - } - - public int getTableType() { - return tableType; - } - - /** - * get root parent - * - * @return - */ - public TableConfig getRootParent() { - if (parentTC == null) { - return null; - } - TableConfig preParent = parentTC; - TableConfig parent = preParent.getParentTC(); - - while (parent != null) { - preParent = parent; - parent = parent.getParentTC(); - } - return preParent; - } - - public TableConfig getParentTC() { - return parentTC; - } - - public boolean isChildTable() { - return childTable; - } - - public String getJoinKey() { - return joinKey; - } - - public String getParentKey() { - return parentKey; - } - - /** - * @return upper-case - */ - public String getName() { - return name; - } - - public ArrayList getDataNodes() { - return dataNodes; - } - - public String getRandomDataNode() { - int index = Math.abs(rand.nextInt()) % dataNodes.size(); - return dataNodes.get(index); - } - - public boolean isRuleRequired() { - return ruleRequired; - } - - public RuleConfig getRule() { - return rule; - } - - public boolean primaryKeyIsPartionKey() { - return partionKeyIsPrimaryKey; - } - - public void checkConfig(){ - if (this.ruleRequired && this.rule == null) { - throw new IllegalArgumentException("ruleRequired but rule is null"); - } - } - - public String getRuleName() { - return ruleName; - } - - public void setRuleName(String ruleName) { - this.ruleName = ruleName; - } -} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/executors/MultiNodeCoordinator.java b/src/main/java/io/mycat/server/executors/MultiNodeCoordinator.java deleted file mode 100644 index 117a89f86..000000000 --- a/src/main/java/io/mycat/server/executors/MultiNodeCoordinator.java +++ /dev/null @@ -1,136 +0,0 @@ -package io.mycat.server.executors; - -import io.mycat.backend.BackendConnection; -import io.mycat.route.RouteResultsetNode; -import io.mycat.server.NonBlockingSession; -import io.mycat.server.sqlcmd.SQLCtrlCommand; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; - -public class MultiNodeCoordinator implements ResponseHandler { - public static final Logger LOGGER = LoggerFactory - .getLogger(MultiNodeCoordinator.class); - private final AtomicInteger runningCount = new AtomicInteger(0); - private final AtomicInteger faileCount = new AtomicInteger(0); - private volatile int nodeCount; - private final NonBlockingSession session; - private SQLCtrlCommand cmdHandler; - private final AtomicBoolean failed = new AtomicBoolean(false); - - public MultiNodeCoordinator(NonBlockingSession session) { - this.session = session; - } - - public void executeBatchNodeCmd(SQLCtrlCommand cmdHandler) { - this.cmdHandler = cmdHandler; - final int initCount = session.getTargetCount(); - runningCount.set(initCount); - nodeCount = initCount; - failed.set(false); - faileCount.set(0); - // 执行 - int started = 0; - for (RouteResultsetNode rrn : session.getTargetKeys()) { - if (rrn == null) { - LOGGER.error("null is contained in RoutResultsetNodes, source = " - + session.getSource()); - continue; - } - final BackendConnection conn = session.getTarget(rrn); - if (conn != null) { - conn.setResponseHandler(this); - cmdHandler.sendCommand(session, conn); - ++started; - } - } - - if (started < nodeCount) { - runningCount.set(started); - LOGGER.warn("some connection failed to execut " - + (nodeCount - started)); - /** - * assumption: only caused by front-end connection close.
- * Otherwise, packet must be returned to front-end - */ - failed.set(true); - } - } - - private boolean finished() { - int val = runningCount.decrementAndGet(); - return (val == 0); - } - - @Override - public void connectionError(Throwable e, BackendConnection conn) { - } - - @Override - public void connectionAcquired(BackendConnection conn) { - - } - - @Override - public void errorResponse(byte[] err, BackendConnection conn) { - faileCount.incrementAndGet(); - - if (this.cmdHandler.releaseConOnErr()) { - session.releaseConnection(conn); - } else { - - session.releaseConnectionIfSafe(conn, LOGGER.isDebugEnabled(), - false); - } - if (this.finished()) { - cmdHandler.errorResponse(session, err, this.nodeCount, - this.faileCount.get()); - if (cmdHandler.isAutoClearSessionCons()) { - session.clearResources(session.getSource().isTxInterrupted()); - } - } - - } - - @Override - public void okResponse(byte[] ok, BackendConnection conn) { - if (this.cmdHandler.relaseConOnOK()) { - session.releaseConnection(conn); - } else { - session.releaseConnectionIfSafe(conn, LOGGER.isDebugEnabled(), - false); - } - if (this.finished()) { - cmdHandler.okResponse(session, ok); - if (cmdHandler.isAutoClearSessionCons()) { - session.clearResources(false); - } - - } - - } - - @Override - public void fieldEofResponse(byte[] header, List fields, - byte[] eof, BackendConnection conn) { - - } - - @Override - public void rowResponse(byte[] row, BackendConnection conn) { - - } - - @Override - public void rowEofResponse(byte[] eof, BackendConnection conn) { - } - - @Override - public void connectionClose(BackendConnection conn, String reason) { - - } - -} diff --git a/src/main/java/io/mycat/server/executors/MultiNodeQueryHandler.java b/src/main/java/io/mycat/server/executors/MultiNodeQueryHandler.java deleted file mode 100644 index c7a4a4eae..000000000 --- a/src/main/java/io/mycat/server/executors/MultiNodeQueryHandler.java +++ /dev/null @@ -1,547 +0,0 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.server.executors; - -import io.mycat.MycatServer; -import io.mycat.backend.BackendConnection; -import io.mycat.backend.PhysicalDBNode; -import io.mycat.backend.nio.MySQLBackendConnection; -import io.mycat.cache.LayerCachePool; -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.route.RouteResultset; -import io.mycat.route.RouteResultsetNode; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.NonBlockingSession; -import io.mycat.server.packet.BinaryRowDataPacket; -import io.mycat.server.config.node.MycatConfig; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.OkPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.LoadDataUtil; -import io.mycat.server.parser.ServerParse; -import io.mycat.sqlengine.mpp.ColMeta; -import io.mycat.sqlengine.mpp.DataMergeService; -import io.mycat.sqlengine.mpp.MergeCol; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.*; -import java.util.concurrent.locks.ReentrantLock; - -/** - * @author mycat - */ -public class MultiNodeQueryHandler extends MultiNodeHandler implements - LoadDataResponseHandler { - public static final Logger LOGGER = LoggerFactory - .getLogger(MultiNodeQueryHandler.class); - - private final RouteResultset rrs; - private final NonBlockingSession session; - // private final CommitNodeHandler icHandler; - private final DataMergeService dataMergeSvr; - private final boolean autocommit; - private String priamaryKeyTable = null; - private int primaryKeyIndex = -1; - private int fieldCount = 0; - private final ReentrantLock lock; - private long affectedRows; - private long insertId; - private volatile boolean fieldsReturned; - private int okCount; - private final boolean isCallProcedure; - private boolean prepared; - private List fieldPackets = new ArrayList(); - - public MultiNodeQueryHandler(int sqlType, RouteResultset rrs, - boolean autocommit, NonBlockingSession session) { - super(session); - if (rrs.getNodes() == null) { - throw new IllegalArgumentException("routeNode is null!"); - } - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("execute mutinode query " + rrs.getStatement()); - } - this.rrs = rrs; - if (ServerParse.SELECT == sqlType && rrs.needMerge()) { - dataMergeSvr = new DataMergeService(this, rrs); - } else { - dataMergeSvr = null; - } - isCallProcedure = rrs.isCallStatement(); - this.autocommit = session.getSource().isAutocommit(); - this.session = session; - this.lock = new ReentrantLock(); - // this.icHandler = new CommitNodeHandler(session); - if (dataMergeSvr != null) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("has data merge logic "); - } - } - } - - protected void reset(int initCount) { - super.reset(initCount); - this.okCount = initCount; - } - - public NonBlockingSession getSession() { - return session; - } - - public void execute() throws Exception { - final ReentrantLock lock = this.lock; - lock.lock(); - try { - this.reset(rrs.getNodes().length); - this.fieldsReturned = false; - this.affectedRows = 0L; - this.insertId = 0L; - } finally { - lock.unlock(); - } - MycatConfig conf = MycatServer.getInstance().getConfig(); - - for (final RouteResultsetNode node : rrs.getNodes()) { - final BackendConnection conn = session.getTarget(node); - if (session.tryExistsCon(conn, node)) { - _execute(conn, node); - } else { - // create new connection - PhysicalDBNode dn = conf.getDataNodes().get(node.getName()); - dn.getConnection(dn.getDatabase(), autocommit, node, this, node); - } - - } - } - - private void _execute(BackendConnection conn, RouteResultsetNode node) { - if (clearIfSessionClosed(session)) { - return; - } - conn.setResponseHandler(this); - try { - conn.execute(node, session.getSource(), autocommit); - } catch (IOException e) { - connectionError(e, conn); - } - } - - @Override - public void connectionAcquired(final BackendConnection conn) { - final RouteResultsetNode node = (RouteResultsetNode) conn - .getAttachment(); - session.bindConnection(node, conn); - _execute(conn, node); - } - - private boolean decrementOkCountBy(int finished) { - lock.lock(); - try { - return --okCount == 0; - } finally { - lock.unlock(); - } - } - - @Override - public void okResponse(byte[] data, BackendConnection conn) { - boolean executeResponse = conn.syncAndExcute(); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("received ok response ,executeResponse:" - + executeResponse + " from " + conn); - } - if (executeResponse) { - if (clearIfSessionClosed(session)) { - return; - } else if (canClose(conn, false)) { - return; - } - MySQLFrontConnection source = session.getSource(); - OkPacket ok = new OkPacket(); - ok.read(data); - lock.lock(); - try { - // 判断是否是全局表,如果是,执行行数不做累加,以最后一次执行的为准。 - if (!rrs.isGlobalTable()) { - affectedRows += ok.affectedRows; - } else { - affectedRows = ok.affectedRows; - } - if (ok.insertId > 0) { - insertId = (insertId == 0) ? ok.insertId : Math.min( - insertId, ok.insertId); - } - } finally { - lock.unlock(); - } - // 对于存储过程,其比较特殊,查询结果返回EndRow报文以后,还会再返回一个OK报文,才算结束 - boolean isEndPacket = isCallProcedure ? decrementOkCountBy(1) - : decrementCountBy(1); - if (isEndPacket) { - if (this.autocommit) {// clear all connections - session.releaseConnections(false); - } - if (this.isFail() || session.closed()) { - tryErrorFinished(true); - return; - } - - lock.lock(); - try { - if (rrs.isLoadData()) { - byte lastPackId = source.getLoadDataInfileHandler() - .getLastPackId(); - ok.packetId = ++lastPackId;// OK_PACKET - ok.message = ("Records: " + affectedRows + " Deleted: 0 Skipped: 0 Warnings: 0") - .getBytes();// 此处信息只是为了控制台给人看的 - source.getLoadDataInfileHandler().clear(); - } else { - ok.packetId = ++packetId;// OK_PACKET - } - - ok.affectedRows = affectedRows; - ok.serverStatus = source.isAutocommit() ? 2 : 1; - if (insertId > 0) { - ok.insertId = insertId; - source.setLastInsertId(insertId); - } - ok.write(source); - } catch (Exception e) { - handleDataProcessException(e); - } finally { - lock.unlock(); - } - } - } - } - - @Override - public void rowEofResponse(final byte[] eof, BackendConnection conn) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("on row end reseponse " + conn); - } - if (errorRepsponsed.get()) { - conn.close(this.error); - return; - } - - final MySQLFrontConnection source = session.getSource(); - if (!isCallProcedure) { - if (clearIfSessionClosed(session)) { - return; - } else if (canClose(conn, false)) { - return; - } - } - - if (decrementCountBy(1)) { - if (!this.isCallProcedure) { - if (this.autocommit) {// clear all connections - session.releaseConnections(false); - } - - if (this.isFail() || session.closed()) { - tryErrorFinished(true); - return; - } - } - if (dataMergeSvr != null) { - try { - dataMergeSvr.outputMergeResult(session, eof); - } catch (Exception e) { - handleDataProcessException(e); - } - - } else { - try { - lock.lock(); - eof[3] = ++packetId; - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("last packet id:" + packetId); - } - source.write(eof); - } finally { - lock.unlock(); - - } - } - } - } - - public void outputMergeResult(final MySQLFrontConnection source, - final byte[] eof) { - try { - lock.lock(); - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); - final DataMergeService dataMergeService = this.dataMergeSvr; - final RouteResultset rrs = dataMergeService.getRrs(); - - // 处理limit语句 - int start = rrs.getLimitStart(); - int end = start + rrs.getLimitSize(); - - /* - * modify by coder_czp@126.com#2015/11/2 优化为通过索引获取,避免无效循环 - * Collection results = dataMergeSvr.getResults(eof); - * Iterator itor = results.iterator(); if - * (LOGGER.isDebugEnabled()) { - * LOGGER.debug("output merge result ,total data " + results.size() - * + " start :" + start + " end :" + end + " package id start:" + - * packetId); } int i = 0; while (itor.hasNext()) { RowDataPacket - * row = itor.next(); if (i < start) { i++; continue; } else if (i - * == end) { break; } i++; row.packetId = ++packetId; buffer = - * row.write(buffer, source, true); } - */ - // 对于不需要排序的语句,返回的数据只有rrs.getLimitSize() - List results = dataMergeSvr.getResults(eof); - if (start < 0) - start = 0; - if(rrs.getLimitSize()<0 || end > results.size()) - { - end=results.size(); - } -// if (rrs.getOrderByCols() == null) { -// end = results.size(); -// start = 0; -// } - for (int i = start; i < end; i++) { - RowDataPacket row = results.get(i); - if(prepared) { - BinaryRowDataPacket binRowDataPk = new BinaryRowDataPacket(); - binRowDataPk.read(fieldPackets, row); - binRowDataPk.packetId = ++packetId; - binRowDataPk.write(bufferArray); - } else { - row.packetId = ++packetId; - row.write(bufferArray); - } - } - - eof[3] = ++packetId; - bufferArray.write(eof); - source.write(bufferArray); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("last packet id:" + packetId); - } - - } catch (Exception e) { - handleDataProcessException(e); - } finally { - lock.unlock(); - dataMergeSvr.clear(); - } - } - - @Override - public void fieldEofResponse(byte[] header, List fields, - byte[] eof, BackendConnection conn) { - MySQLFrontConnection source = null; - if (fieldsReturned) { - return; - } - lock.lock(); - try { - if (fieldsReturned) { - return; - } - fieldsReturned = true; - - boolean needMerg = (dataMergeSvr != null) - && dataMergeSvr.getRrs().needMerge(); - Set shouldRemoveAvgField = new HashSet<>(); - Set shouldRenameAvgField = new HashSet<>(); - if (needMerg) { - Map mergeColsMap = dataMergeSvr.getRrs() - .getMergeCols(); - if (mergeColsMap != null) { - for (Map.Entry entry : mergeColsMap - .entrySet()) { - String key = entry.getKey(); - int mergeType = entry.getValue(); - if (MergeCol.MERGE_AVG == mergeType - && mergeColsMap.containsKey(key + "SUM")) { - shouldRemoveAvgField.add((key + "COUNT") - .toUpperCase()); - shouldRenameAvgField.add((key + "SUM") - .toUpperCase()); - } - } - } - - } - - source = session.getSource(); - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); - fieldCount = fields.size(); - if (shouldRemoveAvgField.size() > 0) { - ResultSetHeaderPacket packet = new ResultSetHeaderPacket(); - packet.packetId = ++packetId; - packet.fieldCount = fieldCount - shouldRemoveAvgField.size(); - packet.write(bufferArray); - } else { - header[3] = ++packetId; - bufferArray.write(header); - } - - String primaryKey = null; - if (rrs.hasPrimaryKeyToCache()) { - String[] items = rrs.getPrimaryKeyItems(); - priamaryKeyTable = items[0]; - primaryKey = items[1]; - } - - Map columToIndx = new HashMap( - fieldCount); - - for (int i = 0, len = fieldCount; i < len; ++i) { - boolean shouldSkip = false; - byte[] field = fields.get(i); - if (needMerg) { - FieldPacket fieldPkg = new FieldPacket(); - fieldPkg.read(field); - fieldPackets.add(fieldPkg); - String fieldName = new String(fieldPkg.name).toUpperCase(); - if (columToIndx != null - && !columToIndx.containsKey(fieldName)) { - if (shouldRemoveAvgField.contains(fieldName)) { - shouldSkip = true; - } - if (shouldRenameAvgField.contains(fieldName)) { - String newFieldName = fieldName.substring(0, - fieldName.length() - 3); - fieldPkg.name = newFieldName.getBytes(); - fieldPkg.packetId = ++packetId; - shouldSkip = true; - fieldPkg.write(bufferArray); - } - - columToIndx.put(fieldName, - new ColMeta(i, fieldPkg.type)); - } - } else if (primaryKey != null && primaryKeyIndex == -1) { - // find primary key index - FieldPacket fieldPkg = new FieldPacket(); - fieldPkg.read(field); - fieldPackets.add(fieldPkg); - String fieldName = new String(fieldPkg.name); - if (primaryKey.equalsIgnoreCase(fieldName)) { - primaryKeyIndex = i; - fieldCount = fields.size(); - } - } - if (!shouldSkip) { - field[3] = ++packetId; - bufferArray.write(field); - } - } - eof[3] = ++packetId; - bufferArray.write(eof); - source.write(bufferArray); - if (dataMergeSvr != null) { - dataMergeSvr.onRowMetaData(columToIndx, fieldCount); - - } - } catch (Exception e) { - handleDataProcessException(e); - } finally { - lock.unlock(); - } - } - - public void handleDataProcessException(Exception e) { - if (!errorRepsponsed.get()) { - this.error = e.toString(); - LOGGER.warn("caught exception ", e); - setFail(e.toString()); - this.tryErrorFinished(true); - } - } - - @Override - public void rowResponse(final byte[] row, final BackendConnection conn) { - if (errorRepsponsed.get()) { - conn.close(error); - return; - } - lock.lock(); - try { - if (dataMergeSvr != null) { - final String dnName = ((RouteResultsetNode) conn - .getAttachment()).getName(); - dataMergeSvr.onNewRecord(dnName, row); - - } else { - if (primaryKeyIndex != -1) {// cache - // primaryKey-> - // dataNode - RowDataPacket rowDataPkg = new RowDataPacket(fieldCount); - rowDataPkg.read(row); - String primaryKey = new String( - rowDataPkg.fieldValues.get(primaryKeyIndex)); - LayerCachePool pool = MycatServer.getInstance() - .getRouterservice().getTableId2DataNodeCache(); - String dataNode = ((RouteResultsetNode) conn - .getAttachment()).getName(); - pool.putIfAbsent(priamaryKeyTable, primaryKey, dataNode); - } - row[3] = ++packetId; - session.getSource().write(row); - } - - } catch (Exception e) { - - handleDataProcessException(e); - } finally { - lock.unlock(); - } - } - - @Override - public void clearResources() { - if (dataMergeSvr != null) { - dataMergeSvr.clear(); - } - } - - @Override - public void requestDataResponse(byte[] data, BackendConnection conn) { - LoadDataUtil.requestFileDataResponse(data, - (MySQLBackendConnection) conn); - } - - public boolean isPrepared() { - return prepared; - } - - public void setPrepared(boolean prepared) { - this.prepared = prepared; - } - -} diff --git a/src/main/java/io/mycat/server/executors/SingleNodeHandler.java b/src/main/java/io/mycat/server/executors/SingleNodeHandler.java deleted file mode 100644 index 609de1a95..000000000 --- a/src/main/java/io/mycat/server/executors/SingleNodeHandler.java +++ /dev/null @@ -1,344 +0,0 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.server.executors; - -import com.google.common.base.Strings; -import io.mycat.MycatServer; -import io.mycat.backend.BackendConnection; -import io.mycat.backend.PhysicalDBNode; -import io.mycat.backend.nio.MySQLBackendConnection; -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.route.RouteResultset; -import io.mycat.route.RouteResultsetNode; -import io.mycat.server.ErrorCode; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.NonBlockingSession; -import io.mycat.server.packet.BinaryRowDataPacket; -import io.mycat.server.config.node.MycatConfig; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.packet.ErrorPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.OkPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.LoadDataUtil; -import io.mycat.server.parser.ServerParse; -import io.mycat.server.parser.ServerParseShow; -import io.mycat.server.response.ShowTables; -import io.mycat.util.StringUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.List; -import java.util.Set; - -/** - * @author mycat - */ -public class SingleNodeHandler implements ResponseHandler, Terminatable, - LoadDataResponseHandler { - public static final Logger LOGGER = LoggerFactory - .getLogger(SingleNodeHandler.class); - private final RouteResultsetNode node; - private final RouteResultset rrs; - private final NonBlockingSession session; - // only one thread access at one time no need lock - private volatile byte packetId; - private volatile boolean isRunning; - private Runnable terminateCallBack; - private boolean prepared; - private int fieldCount; - private List fieldPackets = new ArrayList(); - private volatile boolean isDefaultNodeShowTable; - private Set shardingTablesSet; - - public SingleNodeHandler(RouteResultset rrs, NonBlockingSession session) { - this.rrs = rrs; - this.node = rrs.getNodes()[0]; - if (node == null) { - throw new IllegalArgumentException("routeNode is null!"); - } - if (session == null) { - throw new IllegalArgumentException("session is null!"); - } - this.session = session; - MySQLFrontConnection source = session.getSource(); - String schema=source.getSchema(); - if(schema!=null&&ServerParse.SHOW==rrs.getSqlType()) - { - SchemaConfig schemaConfig= MycatServer.getInstance().getConfig().getSchemas().get(schema); - int type= ServerParseShow.tableCheck(rrs.getStatement(),0) ; - isDefaultNodeShowTable=(ServerParseShow.TABLES==type &&!Strings.isNullOrEmpty(schemaConfig.getDataNode())); - - if(isDefaultNodeShowTable) - { - shardingTablesSet = ShowTables.getTableSet(source, rrs.getStatement()); - } - } - } - - @Override - public void terminate(Runnable callback) { - boolean zeroReached = false; - - if (isRunning) { - terminateCallBack = callback; - } else { - zeroReached = true; - } - - if (zeroReached) { - callback.run(); - } - } - - private void endRunning() { - Runnable callback = null; - if (isRunning) { - isRunning = false; - callback = terminateCallBack; - terminateCallBack = null; - } - - if (callback != null) { - callback.run(); - } - } - - public void execute() throws Exception { - MySQLFrontConnection sc = session.getSource(); - this.isRunning = true; - this.packetId = 0; - final BackendConnection conn = session.getTarget(node); - if (session.tryExistsCon(conn, node)) { - _execute(conn); - } else { - // create new connection - - MycatConfig conf = MycatServer.getInstance().getConfig(); - PhysicalDBNode dn = conf.getDataNodes().get(node.getName()); - dn.getConnection(dn.getDatabase(), sc.isAutocommit(), node, this, - node); - } - - } - - @Override - public void connectionAcquired(final BackendConnection conn) { - session.bindConnection(node, conn); - _execute(conn); - - } - - private void _execute(BackendConnection conn) { - if (session.closed()) { - endRunning(); - session.clearResources(true); - return; - } - conn.setResponseHandler(this); - try { - conn.execute(node, session.getSource(), session.getSource() - .isAutocommit()); - } catch (Exception e1) { - executeException(conn, e1); - return; - } - } - - private void executeException(BackendConnection c, Exception e) { - ErrorPacket err = new ErrorPacket(); - err.packetId = ++packetId; - err.errno = ErrorCode.ERR_FOUND_EXCEPION; - err.message = StringUtil.encode(e.toString(), session.getSource() - .getCharset()); - - this.backConnectionErr(err, c); - } - - @Override - public void connectionError(Throwable e, BackendConnection conn) { - - endRunning(); - ErrorPacket err = new ErrorPacket(); - err.packetId = ++packetId; - err.errno = ErrorCode.ER_NEW_ABORTING_CONNECTION; - err.message = StringUtil.encode(e.getMessage(), session.getSource() - .getCharset()); - MySQLFrontConnection source = session.getSource(); - err.write(source); - } - - @Override - public void errorResponse(byte[] data, BackendConnection conn) { - ErrorPacket err = new ErrorPacket(); - err.read(data); - err.packetId = ++packetId; - backConnectionErr(err, conn); - - } - - private void backConnectionErr(ErrorPacket errPkg, BackendConnection conn) { - endRunning(); - String errmgs = " errno:" + errPkg.errno + " " - + new String(errPkg.message); - LOGGER.warn("execute sql err :" + errmgs + " con:" + conn); - session.releaseConnectionIfSafe(conn, LOGGER.isDebugEnabled(), false); - MySQLFrontConnection source = session.getSource(); - source.setTxInterrupt(errmgs); - errPkg.write(source); - } - - @Override - public void okResponse(byte[] data, BackendConnection conn) { - boolean executeResponse = conn.syncAndExcute(); - if (executeResponse) { - session.releaseConnectionIfSafe(conn, LOGGER.isDebugEnabled(), - false); - endRunning(); - MySQLFrontConnection source = session.getSource(); - OkPacket ok = new OkPacket(); - ok.read(data); - if (rrs.isLoadData()) { - byte lastPackId = source.getLoadDataInfileHandler() - .getLastPackId(); - ok.packetId = ++lastPackId;// OK_PACKET - source.getLoadDataInfileHandler().clear(); - } else { - ok.packetId = ++packetId;// OK_PACKET - } - ok.serverStatus = source.isAutocommit() ? 2 : 1; - source.setLastInsertId(ok.insertId); - ok.write(source); - - } - } - - @Override - public void rowEofResponse(byte[] eof, BackendConnection conn) { - MySQLFrontConnection source = session.getSource(); - - // 判断是调用存储过程的话不能在这里释放链接 - if (!rrs.isCallStatement()) { - session.releaseConnectionIfSafe(conn, LOGGER.isDebugEnabled(), - false); - endRunning(); - } - - eof[3] = ++packetId; - source.write(eof); - } - - @Override - public void fieldEofResponse(byte[] header, List fields, - byte[] eof, BackendConnection conn) { - header[3] = ++packetId; - MySQLFrontConnection source = session.getSource(); - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); - bufferArray.write(header); - for (int i = 0, len = fields.size(); i < len; ++i) { - byte[] field = fields.get(i); - field[3] = ++packetId; - // 保存field信息 - FieldPacket fieldPk = new FieldPacket(); - fieldPk.read(field); - fieldPackets.add(fieldPk); - bufferArray.write(field); - } - fieldCount = fieldPackets.size(); - eof[3] = ++packetId; - bufferArray.write(eof); - - if(isDefaultNodeShowTable) - { - for (String name : shardingTablesSet) { - RowDataPacket row = new RowDataPacket(1); - row.add(StringUtil.encode(name.toLowerCase(), source.getCharset())); - row.packetId = ++packetId; - row.write(bufferArray); - } - } - source.write(bufferArray); - } - - @Override - public void rowResponse(byte[] row, BackendConnection conn) { - if(isDefaultNodeShowTable) - { - RowDataPacket rowDataPacket =new RowDataPacket(1); - rowDataPacket.read(row); - String table= StringUtil.decode(rowDataPacket.fieldValues.get(0),conn.getCharset()); - if(shardingTablesSet.contains(table.toUpperCase())) return; - } - row[3] = ++packetId; - if(prepared) { - RowDataPacket rowDataPk = new RowDataPacket(fieldCount); - rowDataPk.read(row); - BinaryRowDataPacket binRowDataPk = new BinaryRowDataPacket(); - binRowDataPk.read(fieldPackets, rowDataPk); - binRowDataPk.packetId = rowDataPk.packetId; - binRowDataPk.write(session.getSource()); - } else { - session.getSource().write(row); - } - } - - @Override - public void connectionClose(BackendConnection conn, String reason) { - ErrorPacket err = new ErrorPacket(); - err.packetId = ++packetId; - err.errno = ErrorCode.ER_ERROR_ON_CLOSE; - err.message = StringUtil.encode(reason, session.getSource() - .getCharset()); - this.backConnectionErr(err, conn); - - } - - public void clearResources() { - - } - - @Override - public void requestDataResponse(byte[] data, BackendConnection conn) { - LoadDataUtil.requestFileDataResponse(data, - (MySQLBackendConnection) conn); - } - - @Override - public String toString() { - return "SingleNodeHandler [node=" + node + ", packetId=" + packetId - + "]"; - } - - public boolean isPrepared() { - return prepared; - } - - public void setPrepared(boolean prepared) { - this.prepared = prepared; - } - -} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/handler/BeginHandler.java b/src/main/java/io/mycat/server/handler/BeginHandler.java new file mode 100644 index 000000000..8487a56f3 --- /dev/null +++ b/src/main/java/io/mycat/server/handler/BeginHandler.java @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.server.handler; + +import io.mycat.server.ServerConnection; + +/** + * @author mycat + */ +public final class BeginHandler { + private static final byte[] AC_OFF = new byte[] { 7, 0, 0, 1, 0, 0, 0, 0, + 0, 0, 0 }; + public static void handle(String stmt, ServerConnection c) { + if (c.isAutocommit()) + { + c.write(c.writeToBuffer(AC_OFF, c.allocate())); + }else + { + c.getSession2().commit() ; + } + c.setAutocommit(false); + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/handler/Explain2Handler.java b/src/main/java/io/mycat/server/handler/Explain2Handler.java new file mode 100644 index 000000000..3dcd23b64 --- /dev/null +++ b/src/main/java/io/mycat/server/handler/Explain2Handler.java @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.server.handler; + +import java.nio.ByteBuffer; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.backend.mysql.nio.handler.SingleNodeHandler; +import io.mycat.config.Fields; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.route.RouteResultset; +import io.mycat.route.RouteResultsetNode; +import io.mycat.server.ServerConnection; +import io.mycat.server.parser.ServerParse; +import io.mycat.util.StringUtil; + +/** + * @author rainbow + */ +public class Explain2Handler { + + private static final Logger logger = LoggerFactory.getLogger(Explain2Handler.class); + private static final RouteResultsetNode[] EMPTY_ARRAY = new RouteResultsetNode[1]; + private static final int FIELD_COUNT = 2; + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + static { + fields[0] = PacketUtil.getField("SQL", + Fields.FIELD_TYPE_VAR_STRING); + fields[1] = PacketUtil.getField("MSG", + Fields.FIELD_TYPE_VAR_STRING); + } + + public static void handle(String stmt, ServerConnection c, int offset) { + + try { + stmt = stmt.substring(offset); + if(!stmt.toLowerCase().contains("datanode=") || !stmt.toLowerCase().contains("sql=")){ + showerror(stmt, c, "explain2 datanode=? sql=?"); + return ; + } + String dataNode = stmt.substring(stmt.indexOf("=") + 1 ,stmt.indexOf("sql=")).trim(); + String sql = "explain " + stmt.substring(stmt.indexOf("sql=") + 4 ,stmt.length()).trim(); + + if(dataNode == null || dataNode.isEmpty() || sql == null || sql.isEmpty()){ + showerror(stmt, c, "dataNode or sql is null or empty"); + return; + } + + RouteResultsetNode node = new RouteResultsetNode(dataNode, ServerParse.SELECT, sql); + RouteResultset rrs = new RouteResultset(sql, ServerParse.SELECT); + node.setSource(rrs); + EMPTY_ARRAY[0] = node; + rrs.setNodes(EMPTY_ARRAY); + SingleNodeHandler singleNodeHandler = new SingleNodeHandler(rrs, c.getSession2()); + singleNodeHandler.execute(); + } catch (Exception e) { + logger.error(e.getMessage(), e.getCause()); + showerror(stmt, c, e.getMessage()); + } + } + + private static void showerror(String stmt, ServerConnection c, String msg){ + ByteBuffer buffer = c.allocate(); + // write header + ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + byte packetId = header.packetId; + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + field.packetId = ++packetId; + buffer = field.write(buffer, c,true); + } + + // write eof + EOFPacket eof = new EOFPacket(); + eof.packetId = ++packetId; + buffer = eof.write(buffer, c,true); + + + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(StringUtil.encode(stmt, c.getCharset())); + row.add(StringUtil.encode(msg, c.getCharset())); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // post write + c.write(buffer); + } +} diff --git a/src/main/java/io/mycat/server/sqlhandler/ExplainHandler.java b/src/main/java/io/mycat/server/handler/ExplainHandler.java similarity index 71% rename from src/main/java/io/mycat/server/sqlhandler/ExplainHandler.java rename to src/main/java/io/mycat/server/handler/ExplainHandler.java index f475e77ba..1d6613a77 100644 --- a/src/main/java/io/mycat/server/sqlhandler/ExplainHandler.java +++ b/src/main/java/io/mycat/server/handler/ExplainHandler.java @@ -21,41 +21,44 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.sqlhandler; +package io.mycat.server.handler; + +import java.nio.ByteBuffer; +import java.util.List; +import java.util.regex.Pattern; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.alibaba.druid.sql.ast.SQLExpr; import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlInsertStatement; import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser; import com.alibaba.druid.sql.parser.SQLStatementParser; + import io.mycat.MycatServer; -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.ErrorCode; +import io.mycat.config.Fields; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.config.model.TableConfig; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; import io.mycat.route.RouteResultset; import io.mycat.route.RouteResultsetNode; -import io.mycat.server.ErrorCode; -import io.mycat.server.Fields; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.TableConfig; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.PacketUtil; +import io.mycat.server.ServerConnection; import io.mycat.server.parser.ServerParse; +import io.mycat.server.util.SchemaUtil; import io.mycat.util.StringUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; -import java.util.regex.Pattern; /** * @author mycat */ public class ExplainHandler { - private final static Pattern pattern = Pattern.compile("(?:(\\s*next\\s+value\\s+for\\s*MYCATSEQ_(\\w+))(,|\\)|\\s)*)+", Pattern.CASE_INSENSITIVE); + private static final Logger logger = LoggerFactory.getLogger(ExplainHandler.class); + private final static Pattern pattern = Pattern.compile("(?:(\\s*next\\s+value\\s+for\\s*MYCATSEQ_(\\w+))(,|\\)|\\s)*)+", Pattern.CASE_INSENSITIVE); private static final RouteResultsetNode[] EMPTY_ARRAY = new RouteResultsetNode[0]; private static final int FIELD_COUNT = 2; private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; @@ -65,63 +68,69 @@ public class ExplainHandler { fields[1] = PacketUtil.getField("SQL", Fields.FIELD_TYPE_VAR_STRING); } - public static void handle(String stmt, MySQLFrontConnection c, int offset) { - stmt = stmt.substring(offset); + public static void handle(String stmt, ServerConnection c, int offset) { + stmt = stmt.substring(offset).trim(); + RouteResultset rrs = getRouteResultset(c, stmt); - if (rrs == null) + if (rrs == null) { return; + } - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); + ByteBuffer buffer = c.allocate(); // write header ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); byte packetId = header.packetId; - header.write(bufferArray); + buffer = header.write(buffer, c,true); // write fields for (FieldPacket field : fields) { field.packetId = ++packetId; - field.write(bufferArray); + buffer = field.write(buffer, c,true); } // write eof EOFPacket eof = new EOFPacket(); eof.packetId = ++packetId; - eof.write(bufferArray); + buffer = eof.write(buffer, c,true); // write rows - RouteResultsetNode[] rrsn = (rrs != null) ? rrs.getNodes() - : EMPTY_ARRAY; + RouteResultsetNode[] rrsn = rrs.getNodes(); for (RouteResultsetNode node : rrsn) { RowDataPacket row = getRow(node, c.getCharset()); row.packetId = ++packetId; - row.write(bufferArray); + buffer = row.write(buffer, c,true); } // write last eof EOFPacket lastEof = new EOFPacket(); lastEof.packetId = ++packetId; - lastEof.write(bufferArray); + buffer = lastEof.write(buffer, c,true); // post write - c.write(bufferArray); + c.write(buffer); } private static RowDataPacket getRow(RouteResultsetNode node, String charset) { RowDataPacket row = new RowDataPacket(FIELD_COUNT); row.add(StringUtil.encode(node.getName(), charset)); - row.add(StringUtil.encode(node.getStatement(), charset)); + row.add(StringUtil.encode(node.getStatement().replaceAll("[\\t\\n\\r]", " "), charset)); return row; } - private static RouteResultset getRouteResultset(MySQLFrontConnection c, + private static RouteResultset getRouteResultset(ServerConnection c, String stmt) { String db = c.getSchema(); + int sqlType = ServerParse.parse(stmt) & 0xff; if (db == null) { - c.writeErrMessage(ErrorCode.ER_NO_DB_ERROR, "No database selected"); - return null; + db = SchemaUtil.detectDefaultDb(stmt, sqlType); + + if(db==null) + { + c.writeErrMessage(ErrorCode.ER_NO_DB_ERROR, "No database selected"); + return null; + } } SchemaConfig schema = MycatServer.getInstance().getConfig() .getSchemas().get(db); @@ -131,34 +140,37 @@ private static RouteResultset getRouteResultset(MySQLFrontConnection c, return null; } try { - int sqlType = ServerParse.parse(stmt) & 0xff; + if(ServerParse.INSERT==sqlType&&isMycatSeq(stmt, schema)) { c.writeErrMessage(ErrorCode.ER_PARSE_ERROR, "insert sql using mycat seq,you must provide primaryKey value for explain"); return null; } - return MycatServer - .getInstance() - .getRouterservice() - .route(MycatServer.getInstance().getConfig().getSystem(), - schema, sqlType, stmt, c.getCharset(), c); + SystemConfig system = MycatServer.getInstance().getConfig().getSystem(); + return MycatServer.getInstance().getRouterservice() + .route(system,schema, sqlType, stmt, c.getCharset(), c); } catch (Exception e) { StringBuilder s = new StringBuilder(); - logger.warn(s.append(c).append(stmt).toString() + " error:" + e); + logger.warn(s.append(c).append(stmt).toString()+" error:"+ e); String msg = e.getMessage(); c.writeErrMessage(ErrorCode.ER_PARSE_ERROR, msg == null ? e .getClass().getSimpleName() : msg); return null; } } + private static boolean isMycatSeq(String stmt, SchemaConfig schema) { - if(pattern.matcher(stmt).find()) return true; + if(pattern.matcher(stmt).find()) { + return true; + } SQLStatementParser parser =new MySqlStatementParser(stmt); MySqlInsertStatement statement = (MySqlInsertStatement) parser.parseStatement(); String tableName= statement.getTableName().getSimpleName(); TableConfig tableConfig= schema.getTables().get(tableName.toUpperCase()); - if(tableConfig==null) return false; + if(tableConfig==null) { + return false; + } if(tableConfig.isAutoIncrement()) { boolean isHasIdInSql=false; @@ -173,11 +185,13 @@ private static boolean isMycatSeq(String stmt, SchemaConfig schema) break; } } - if(!isHasIdInSql) return true; + if(!isHasIdInSql) { + return true; + } } return false; } -} \ No newline at end of file +} diff --git a/src/main/java/io/mycat/server/handler/KillHandler.java b/src/main/java/io/mycat/server/handler/KillHandler.java new file mode 100644 index 000000000..ea9ab98b8 --- /dev/null +++ b/src/main/java/io/mycat/server/handler/KillHandler.java @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.server.handler; + +import io.mycat.MycatServer; +import io.mycat.config.ErrorCode; +import io.mycat.net.FrontendConnection; +import io.mycat.net.NIOProcessor; +import io.mycat.net.mysql.OkPacket; +import io.mycat.server.ServerConnection; +import io.mycat.util.StringUtil; + +/** + * @author mycat + */ +public class KillHandler { + + public static void handle(String stmt, int offset, ServerConnection c) { + String id = stmt.substring(offset).trim(); + if (StringUtil.isEmpty(id)) { + c.writeErrMessage(ErrorCode.ER_NO_SUCH_THREAD, "NULL connection id"); + } else { + // get value + long value = 0; + try { + value = Long.parseLong(id); + } catch (NumberFormatException e) { + c.writeErrMessage(ErrorCode.ER_NO_SUCH_THREAD, "Invalid connection id:" + id); + return; + } + + // kill myself + if (value == c.getId()) { + getOkPacket().write(c); + c.write(c.allocate()); + return; + } + + // get connection and close it + FrontendConnection fc = null; + NIOProcessor[] processors = MycatServer.getInstance().getProcessors(); + for (NIOProcessor p : processors) { + if ((fc = p.getFrontends().get(value)) != null) { + break; + } + } + if (fc != null) { + fc.close("killed"); + getOkPacket().write(c); + } else { + c.writeErrMessage(ErrorCode.ER_NO_SUCH_THREAD, "Unknown connection id:" + id); + } + } + } + + private static OkPacket getOkPacket() { + OkPacket packet = new OkPacket(); + packet.packetId = 1; + packet.affectedRows = 0; + packet.serverStatus = 2; + return packet; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/handler/MigrateHandler.java b/src/main/java/io/mycat/server/handler/MigrateHandler.java new file mode 100644 index 000000000..8ca4a0e36 --- /dev/null +++ b/src/main/java/io/mycat/server/handler/MigrateHandler.java @@ -0,0 +1,354 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.server.handler; + +import com.alibaba.fastjson.JSON; +import com.alibaba.fastjson.JSONArray; +import com.alibaba.fastjson.JSONObject; +import com.google.common.base.CharMatcher; +import com.google.common.base.Joiner; +import com.google.common.base.Splitter; +import com.google.common.base.Strings; +import com.google.common.collect.Lists; +import io.mycat.MycatServer; +import io.mycat.backend.datasource.PhysicalDBNode; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.ErrorCode; +import io.mycat.config.Fields; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.TableConfig; +import io.mycat.migrate.MigrateTask; +import io.mycat.migrate.MigrateUtils; +import io.mycat.migrate.TaskNode; +import io.mycat.net.mysql.*; +import io.mycat.route.RouteResultsetNode; +import io.mycat.route.function.AbstractPartitionAlgorithm; +import io.mycat.route.function.PartitionByCRC32PreSlot; +import io.mycat.route.function.PartitionByCRC32PreSlot.Range; +import io.mycat.server.ServerConnection; +import io.mycat.util.ObjectUtil; +import io.mycat.util.StringUtil; +import io.mycat.util.ZKUtils; +import org.apache.curator.framework.CuratorFramework; +import org.apache.curator.framework.api.transaction.CuratorTransactionFinal; +import org.apache.curator.framework.recipes.locks.InterProcessMutex; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.nio.ByteBuffer; +import java.util.*; +import java.util.concurrent.TimeUnit; + +/** todo remove watch + * @author nange + */ +public final class MigrateHandler { + private static final Logger LOGGER = LoggerFactory.getLogger("MigrateHandler"); + + //可以优化成多个锁 + private static InterProcessMutex slaveIDsLock = new InterProcessMutex(ZKUtils.getConnection(), ZKUtils.getZKBasePath()+"lock/slaveIDs.lock");; + private static final int FIELD_COUNT = 1; + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + static { + fields[0] = PacketUtil.getField("TASK_ID", + Fields.FIELD_TYPE_VAR_STRING); + + } + private static String getUUID(){ + String s = UUID.randomUUID().toString(); + //去掉“-”符号 + return s.substring(0,8)+s.substring(9,13)+s.substring(14,18)+s.substring(19,23)+s.substring(24); + } + public static void handle(String stmt, ServerConnection c) { + Map map = parse(stmt); + + String table = map.get("table"); + String add = map.get("add"); + if (table == null) { + writeErrMessage(c, "table cannot be null"); + return; + } + + if (add == null) { + writeErrMessage(c, "add cannot be null"); + return; + } + String taskID= getUUID(); + try + { + SchemaConfig schemaConfig = MycatServer.getInstance().getConfig().getSchemas().get(c.getSchema()); + TableConfig tableConfig = schemaConfig.getTables().get(table.toUpperCase()); + AbstractPartitionAlgorithm algorithm = tableConfig.getRule().getRuleAlgorithm(); + if (!(algorithm instanceof PartitionByCRC32PreSlot)) { + writeErrMessage(c, "table: " + table + " rule is not be PartitionByCRC32PreSlot"); + return; + } + + Map> integerListMap = ((PartitionByCRC32PreSlot) algorithm).getRangeMap(); + integerListMap = (Map>) ObjectUtil.copyObject(integerListMap); + + ArrayList oldDataNodes = tableConfig.getDataNodes(); + List newDataNodes = Splitter.on(",").omitEmptyStrings().trimResults().splitToList(add); + Map> tasks= MigrateUtils + .balanceExpand(table, integerListMap, oldDataNodes, newDataNodes,PartitionByCRC32PreSlot.DEFAULT_SLOTS_NUM); + + CuratorTransactionFinal transactionFinal=null; + String taskBase = ZKUtils.getZKBasePath() + "migrate/" + c.getSchema(); + String taskPath = taskBase + "/" + taskID; + CuratorFramework client= ZKUtils.getConnection(); + + //校验 之前同一个表的迁移任务未完成,则jzhi禁止继续 + if( client.checkExists().forPath(taskBase) !=null ) { + List childTaskList = client.getChildren().forPath(taskBase); + for (String child : childTaskList) { + TaskNode taskNode = JSON + .parseObject(ZKUtils.getConnection().getData().forPath(taskBase + "/" + child), TaskNode.class); + if (taskNode.getSchema().equalsIgnoreCase(c.getSchema()) && table.equalsIgnoreCase(taskNode.getTable()) + && taskNode.getStatus() < 5) { + writeErrMessage(c, "table: " + table + " previous migrate task is still running,on the same time one table only one task"); + return; + } + } + } + client.create().creatingParentsIfNeeded().forPath(taskPath); + TaskNode taskNode=new TaskNode(); + taskNode.setSchema(c.getSchema()); + taskNode.setSql(stmt); + taskNode.setTable(table); + taskNode.setAdd(add); + taskNode.setStatus(0); + + Map fromNodeSlaveIdMap=new HashMap<>(); + + List allTaskList=new ArrayList<>(); + for (Map.Entry> entry : tasks.entrySet()) { + String key=entry.getKey(); + List value=entry.getValue(); + for (MigrateTask migrateTask : value) { + migrateTask.setSchema(c.getSchema()); + + //分配slaveid只需要一个dataHost分配一个即可,后续任务执行模拟从节点只需要一个dataHost一个 + String dataHost=getDataHostNameFromNode(migrateTask.getFrom()); + if(fromNodeSlaveIdMap.containsKey(dataHost)) { + migrateTask.setSlaveId( fromNodeSlaveIdMap.get(dataHost)); + } else { + migrateTask.setSlaveId( getSlaveIdFromZKForDataNode(migrateTask.getFrom())); + fromNodeSlaveIdMap.put(dataHost,migrateTask.getSlaveId()); + } + + } + allTaskList.addAll(value); + + } + + + transactionFinal= client.inTransaction() .setData().forPath(taskPath,JSON.toJSONBytes(taskNode)).and() ; + + + + //合并成dataHost级别任务 + Map > dataHostMigrateMap=mergerTaskForDataHost(allTaskList); + for (Map.Entry> entry : dataHostMigrateMap.entrySet()) { + String key=entry.getKey(); + List value=entry.getValue(); + String path= taskPath + "/" + key; + transactionFinal= transactionFinal.create().forPath(path, JSON.toJSONBytes(value)).and() ; + } + + + transactionFinal.commit(); + } catch (Exception e) { + LOGGER.error("migrate error", e); + writeErrMessage(c, "migrate error:" + e); + return; + } + + writePackToClient(c, taskID); + LOGGER.info("task start",new Date()); + } + + private static void writePackToClient(ServerConnection c, String taskID) { + ByteBuffer buffer = c.allocate(); + + // write header + ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + byte packetId = header.packetId; + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + field.packetId = ++packetId; + buffer = field.write(buffer, c,true); + } + + // write eof + EOFPacket eof = new EOFPacket(); + eof.packetId = ++packetId; + buffer = eof.write(buffer, c,true); + + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(StringUtil.encode(taskID, c.getCharset())); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // post write + c.write(buffer); + } + + + private static String getDataHostNameFromNode(String dataNode){ + return MycatServer.getInstance().getConfig().getDataNodes().get(dataNode).getDbPool().getHostName(); + } + + private static Map > mergerTaskForDataHost ( List migrateTaskList) + { + Map > taskMap=new HashMap<>(); + for (MigrateTask migrateTask : migrateTaskList) { + String dataHost=getDataHostNameFromNode(migrateTask.getFrom()); + if(taskMap.containsKey(dataHost)) { + taskMap.get(dataHost).add(migrateTask); + } else + { + taskMap.put(dataHost, Lists.newArrayList(migrateTask)) ; + } + } + + + return taskMap; + } + + private static int getSlaveIdFromZKForDataNode(String dataNode) + { + PhysicalDBNode dbNode= MycatServer.getInstance().getConfig().getDataNodes().get(dataNode); + String slaveIDs= dbNode.getDbPool().getSlaveIDs(); + if(Strings.isNullOrEmpty(slaveIDs)) + throw new RuntimeException("dataHost:"+dbNode.getDbPool().getHostName()+" do not config the salveIDs field"); + + List allSlaveIDList= parseSlaveIDs(slaveIDs); + + String taskPath = ZKUtils.getZKBasePath() + "slaveIDs/" +dbNode.getDbPool().getHostName(); + try { + slaveIDsLock.acquire(30, TimeUnit.SECONDS); + Set zkSlaveIdsSet=new HashSet<>(); + if(ZKUtils.getConnection().checkExists().forPath(taskPath)!=null ) { + List zkHasSlaveIDs = ZKUtils.getConnection().getChildren().forPath(taskPath); + for (String zkHasSlaveID : zkHasSlaveIDs) { + zkSlaveIdsSet.add(Integer.parseInt(zkHasSlaveID)); + } + } + for (Integer integer : allSlaveIDList) { + if(!zkSlaveIdsSet.contains(integer)) { + ZKUtils.getConnection().create().creatingParentsIfNeeded().forPath(taskPath+"/"+integer); + return integer; + } + } + } catch (Exception e) { + throw new RuntimeException(e); + } finally { + try { + slaveIDsLock.release(); + } catch (Exception e) { + LOGGER.error("error:",e); + } + } + + throw new RuntimeException("cannot get the slaveID for dataHost :"+dbNode.getDbPool().getHostName()); + } + + private static List parseSlaveIDs(String slaveIDs) + { + List allSlaveList=new ArrayList<>(); + List stringList= Splitter.on(",").omitEmptyStrings().trimResults().splitToList(slaveIDs); + for (String id : stringList) { + if(id.contains("-")) { + List idRangeList= Splitter.on("-").omitEmptyStrings().trimResults().splitToList(id) ; + if(idRangeList.size()!=2) + throw new RuntimeException(id+"slaveIds range must be 2 size"); + for(int i=Integer.parseInt(idRangeList.get(0));i<=Integer.parseInt(idRangeList.get(1));i++) + { + allSlaveList.add(i); + } + + } else + { + allSlaveList.add(Integer.parseInt(id)); + } + } + return allSlaveList; + } + + + + private static OkPacket getOkPacket() { + OkPacket packet = new OkPacket(); + packet.packetId = 1; + packet.affectedRows = 0; + packet.serverStatus = 2; + return packet; + } + + public static void writeErrMessage(ServerConnection c, String msg) { + c.writeErrMessage(ErrorCode.ER_UNKNOWN_ERROR, msg); + } + + public static void main(String[] args) { + String sql = "migrate -table=test -add=dn2,dn3,dn4 " + " \n -additional=\"a=b\""; + Map map = parse(sql); + System.out.println(); + for (int i = 0; i < 100; i++) { + System.out.println(i % 5); + } + + TaskNode taskNode=new TaskNode(); + taskNode.setSql(sql); + + + System.out.println(new String(JSON.toJSONBytes(taskNode))); + } + + private static Map parse(String sql) { + Map map = new HashMap<>(); + List rtn = Splitter.on(CharMatcher.whitespace()).omitEmptyStrings().splitToList(sql); + for (String s : rtn) { + if (s.contains("=")) { + int dindex = s.indexOf("="); + if (s.startsWith("-")) { + String key = s.substring(1, dindex).trim(); + String value = s.substring(dindex + 1).trim(); + map.put(key, value); + } else if (s.startsWith("--")) { + String key = s.substring(2, dindex).trim(); + String value = s.substring(dindex + 1).trim(); + map.put(key, value); + } + } + } + return map; + } +} diff --git a/src/main/java/io/mycat/server/handler/MysqlInformationSchemaHandler.java b/src/main/java/io/mycat/server/handler/MysqlInformationSchemaHandler.java new file mode 100644 index 000000000..bb324c0e8 --- /dev/null +++ b/src/main/java/io/mycat/server/handler/MysqlInformationSchemaHandler.java @@ -0,0 +1,86 @@ +package io.mycat.server.handler; + +import java.nio.ByteBuffer; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.OkPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.server.ServerConnection; +import io.mycat.server.util.SchemaUtil; + + +/** + * 对 PhpAdmin's 控制台操作进行支持 + * + * 如:SELECT * FROM information_schema.CHARACTER_SETS 等相关语句进行模拟返回 + * + * @author zhuam + * + */ +public class MysqlInformationSchemaHandler { + + /** + * 写入数据包 + * @param field_count + * @param fields + * @param c + */ + private static void doWrite(int field_count, FieldPacket[] fields, ServerConnection c) { + + ByteBuffer buffer = c.allocate(); + + // write header + ResultSetHeaderPacket header = PacketUtil.getHeader(field_count); + byte packetId = header.packetId; + buffer = header.write(buffer, c, true); + + // write fields + for (FieldPacket field : fields) { + field.packetId = ++packetId; + buffer = field.write(buffer, c, true); + } + + // write eof + EOFPacket eof = new EOFPacket(); + eof.packetId = ++packetId; + buffer = eof.write(buffer, c, true); + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c, true); + + // post write + c.write(buffer); + + } + + public static void handle(String sql, ServerConnection c) { + + SchemaUtil.SchemaInfo schemaInfo = SchemaUtil.parseSchema(sql); + if ( schemaInfo != null ) { + + if ( schemaInfo.table.toUpperCase().equals("CHARACTER_SETS") ) { + + //模拟列头 + int field_count = 4; + FieldPacket[] fields = new FieldPacket[field_count]; + fields[0] = PacketUtil.getField("CHARACTER_SET_NAME", Fields.FIELD_TYPE_VAR_STRING); + fields[1] = PacketUtil.getField("DEFAULT_COLLATE_NAME", Fields.FIELD_TYPE_VAR_STRING); + fields[2] = PacketUtil.getField("DESCRIPTION", Fields.FIELD_TYPE_VAR_STRING); + fields[3] = PacketUtil.getField("MAXLEN", Fields.FIELD_TYPE_LONG); + + doWrite(field_count, fields, c); + + } else { + c.write(c.writeToBuffer(OkPacket.OK, c.allocate())); + } + + } else { + c.write(c.writeToBuffer(OkPacket.OK, c.allocate())); + } + } +} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/handler/MysqlProcHandler.java b/src/main/java/io/mycat/server/handler/MysqlProcHandler.java new file mode 100644 index 000000000..66e8ea126 --- /dev/null +++ b/src/main/java/io/mycat/server/handler/MysqlProcHandler.java @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.server.handler; + + +import java.nio.ByteBuffer; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.server.ServerConnection; + +public class MysqlProcHandler +{ + private static final int FIELD_COUNT = 2; + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + + static + { + fields[0] = PacketUtil.getField("name", + Fields.FIELD_TYPE_VAR_STRING); + fields[1] = PacketUtil.getField("type", Fields.FIELD_TYPE_VAR_STRING); + } + + public static void handle(String stmt, ServerConnection c) + { + + ByteBuffer buffer = c.allocate(); + + // write header + ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + byte packetId = header.packetId; + buffer = header.write(buffer, c, true); + + // write fields + for (FieldPacket field : fields) + { + field.packetId = ++packetId; + buffer = field.write(buffer, c, true); + } + + // write eof + EOFPacket eof = new EOFPacket(); + eof.packetId = ++packetId; + buffer = eof.write(buffer, c, true); + + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c, true); + + // post write + c.write(buffer); + + } + + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/sqlhandler/SavepointHandler.java b/src/main/java/io/mycat/server/handler/SavepointHandler.java similarity index 81% rename from src/main/java/io/mycat/server/sqlhandler/SavepointHandler.java rename to src/main/java/io/mycat/server/handler/SavepointHandler.java index eb3528ce7..cd60c6b60 100644 --- a/src/main/java/io/mycat/server/sqlhandler/SavepointHandler.java +++ b/src/main/java/io/mycat/server/handler/SavepointHandler.java @@ -21,19 +21,18 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.sqlhandler; +package io.mycat.server.handler; -import io.mycat.server.ErrorCode; -import io.mycat.server.MySQLFrontConnection; +import io.mycat.config.ErrorCode; +import io.mycat.server.ServerConnection; /** * @author mycat */ public final class SavepointHandler { - public static void handle(String stmt, MySQLFrontConnection c) { - c.writeErrMessage(ErrorCode.ER_UNKNOWN_COM_ERROR, - "Unsupported statement"); - } + public static void handle(String stmt, ServerConnection c) { + c.writeErrMessage(ErrorCode.ER_UNKNOWN_COM_ERROR, "Unsupported statement"); + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/sqlhandler/SelectHandler.java b/src/main/java/io/mycat/server/handler/SelectHandler.java similarity index 78% rename from src/main/java/io/mycat/server/sqlhandler/SelectHandler.java rename to src/main/java/io/mycat/server/handler/SelectHandler.java index 0cfc7f36e..1b1ddf01f 100644 --- a/src/main/java/io/mycat/server/sqlhandler/SelectHandler.java +++ b/src/main/java/io/mycat/server/handler/SelectHandler.java @@ -21,28 +21,20 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.sqlhandler; +package io.mycat.server.handler; -import io.mycat.server.MySQLFrontConnection; +import io.mycat.route.parser.util.ParseUtil; +import io.mycat.server.ServerConnection; import io.mycat.server.parser.ServerParse; import io.mycat.server.parser.ServerParseSelect; -import io.mycat.server.response.SelectDatabase; -import io.mycat.server.response.SelectIdentity; -import io.mycat.server.response.SelectLastInsertId; -import io.mycat.server.response.SelectUser; -import io.mycat.server.response.SelectVariables; -import io.mycat.server.response.SelectVersion; -import io.mycat.server.response.SelectVersionComment; -import io.mycat.server.response.SessionIncrement; -import io.mycat.server.response.SessionIsolation; -import io.mycat.util.ParseUtil; +import io.mycat.server.response.*; /** * @author mycat */ public final class SelectHandler { - public static void handle(String stmt, MySQLFrontConnection c, int offs) { + public static void handle(String stmt, ServerConnection c, int offs) { int offset = offs; switch (ServerParseSelect.parse(stmt, offs)) { case ServerParseSelect.VERSION_COMMENT: @@ -65,7 +57,7 @@ public static void handle(String stmt, MySQLFrontConnection c, int offs) { break; case ServerParseSelect.LAST_INSERT_ID: // offset = ParseUtil.move(stmt, 0, "select".length()); - loop: for (int l = stmt.length(); offset < l; ++offset) { + loop:for (int l=stmt.length(); offset < l; ++offset) { switch (stmt.charAt(offset)) { case ' ': continue; @@ -84,7 +76,7 @@ public static void handle(String stmt, MySQLFrontConnection c, int offs) { break; case ServerParseSelect.IDENTITY: // offset = ParseUtil.move(stmt, 0, "select".length()); - loop: for (int l = stmt.length(); offset < l; ++offset) { + loop:for (int l=stmt.length(); offset < l; ++offset) { switch (stmt.charAt(offset)) { case ' ': continue; @@ -104,11 +96,14 @@ public static void handle(String stmt, MySQLFrontConnection c, int offs) { SelectIdentity.response(c, stmt, offset, orgName); break; case ServerParseSelect.SELECT_VAR_ALL: - SelectVariables.execute(c, stmt); + SelectVariables.execute(c,stmt); break; + case ServerParseSelect.SESSION_TX_READ_ONLY: + SelectTxReadOnly.response(c); + break; default: c.execute(stmt, ServerParse.SELECT); } } -} \ No newline at end of file +} diff --git a/src/main/java/io/mycat/server/sqlhandler/ServerLoadDataInfileHandler.java b/src/main/java/io/mycat/server/handler/ServerLoadDataInfileHandler.java similarity index 85% rename from src/main/java/io/mycat/server/sqlhandler/ServerLoadDataInfileHandler.java rename to src/main/java/io/mycat/server/handler/ServerLoadDataInfileHandler.java index 596e98dd4..63a7e3f5c 100644 --- a/src/main/java/io/mycat/server/sqlhandler/ServerLoadDataInfileHandler.java +++ b/src/main/java/io/mycat/server/handler/ServerLoadDataInfileHandler.java @@ -21,38 +21,44 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.sqlhandler; +package io.mycat.server.handler; + +import com.alibaba.druid.sql.ast.SQLExpr; +import com.alibaba.druid.sql.ast.expr.SQLCharExpr; +import com.alibaba.druid.sql.ast.expr.SQLIdentifierExpr; +import com.alibaba.druid.sql.ast.expr.SQLLiteralExpr; +import com.alibaba.druid.sql.ast.expr.SQLTextLiteralExpr; +import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlLoadDataInFileStatement; +import com.alibaba.druid.sql.parser.SQLStatementParser; +import com.google.common.collect.Lists; +import com.google.common.io.Files; +import com.univocity.parsers.csv.CsvParser; +import com.univocity.parsers.csv.CsvParserSettings; import io.mycat.MycatServer; import io.mycat.cache.LayerCachePool; +import io.mycat.config.ErrorCode; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.config.model.TableConfig; +import io.mycat.net.handler.LoadDataInfileHandler; +import io.mycat.net.mysql.BinaryPacket; +import io.mycat.net.mysql.RequestFilePacket; import io.mycat.route.RouteResultset; import io.mycat.route.RouteResultsetNode; +import io.mycat.route.function.SlotFunction; import io.mycat.route.parser.druid.DruidShardingParseInfo; import io.mycat.route.parser.druid.MycatStatementParser; import io.mycat.route.parser.druid.RouteCalculateUnit; import io.mycat.route.util.RouterUtil; -import io.mycat.server.ErrorCode; -import io.mycat.server.LoadDataInfileHandler; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SystemConfig; -import io.mycat.server.config.node.TableConfig; -import io.mycat.server.packet.BinaryPacket; -import io.mycat.server.packet.RequestFilePacket; +import io.mycat.server.ServerConnection; import io.mycat.server.parser.ServerParse; import io.mycat.sqlengine.mpp.LoadData; import io.mycat.util.ObjectUtil; +import io.mycat.util.StringUtil; -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStreamReader; -import java.io.StringReader; -import java.io.UnsupportedEncodingException; +import java.io.*; +import java.nio.ByteBuffer; import java.nio.charset.Charset; import java.sql.SQLNonTransientException; import java.util.ArrayList; @@ -63,17 +69,6 @@ import java.util.SortedSet; import java.util.TreeSet; -import com.alibaba.druid.sql.ast.SQLExpr; -import com.alibaba.druid.sql.ast.expr.SQLCharExpr; -import com.alibaba.druid.sql.ast.expr.SQLLiteralExpr; -import com.alibaba.druid.sql.ast.expr.SQLTextLiteralExpr; -import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlLoadDataInFileStatement; -import com.alibaba.druid.sql.parser.SQLStatementParser; -import com.google.common.collect.Lists; -import com.google.common.io.Files; -import com.univocity.parsers.csv.CsvParser; -import com.univocity.parsers.csv.CsvParserSettings; - /** * mysql命令行客户端也需要启用local file权限,加参数--local-infile=1 * jdbc则正常,不用设置 @@ -81,7 +76,7 @@ */ public final class ServerLoadDataInfileHandler implements LoadDataInfileHandler { - private MySQLFrontConnection serverConnection; + private ServerConnection serverConnection; private String sql; private String fileName; private byte packID = 0; @@ -102,6 +97,8 @@ public final class ServerLoadDataInfileHandler implements LoadDataInfileHandler private SchemaConfig schema; private boolean isStartLoadData = false; + private boolean shoudAddSlot = false; + public int getPackID() { return packID; @@ -112,7 +109,7 @@ public void setPackID(byte packID) this.packID = packID; } - public ServerLoadDataInfileHandler(MySQLFrontConnection serverConnection) + public ServerLoadDataInfileHandler(ServerConnection serverConnection) { this.serverConnection = serverConnection; @@ -149,9 +146,8 @@ private void parseLoadDataPram() loadData.setEnclose(enclose); SQLTextLiteralExpr escapseExpr = (SQLTextLiteralExpr)statement.getColumnsEscaped() ; - String escapse=escapseExpr==null?"\\":escapseExpr.getText(); + String escapse=escapseExpr==null?"\\":escapseExpr.getText(); loadData.setEscape(escapse); - String charset = statement.getCharset() != null ? statement.getCharset() : serverConnection.getCharset(); loadData.setCharset(charset); loadData.setFileName(fileName); @@ -180,6 +176,9 @@ public void start(String sql) tableId2DataNodeCache = (LayerCachePool) MycatServer.getInstance().getCacheService().getCachePool("TableID2DataNodeCache"); tableName = statement.getTableName().getSimpleName().toUpperCase(); tableConfig = schema.getTables().get(tableName); + if( tableConfig.getRule().getRuleAlgorithm() instanceof SlotFunction){ + shoudAddSlot=true; + } tempPath = SystemConfig.getHomePath() + File.separator + "temp" + File.separator + serverConnection.getId() + File.separator; tempFile = tempPath + "clientTemp.txt"; tempByteBuffer = new ByteArrayOutputStream(); @@ -187,35 +186,33 @@ public void start(String sql) List columns = statement.getColumns(); if(tableConfig!=null) { - String pColumn = tableConfig.getPartitionColumn(); - if (pColumn != null && columns != null && columns.size() > 0) - { - - for (int i = 0, columnsSize = columns.size(); i < columnsSize; i++) - { - SQLExpr column = columns.get(i); - if (pColumn.equalsIgnoreCase(column.toString())) - { + String pColumn = getPartitionColumn(); + if (pColumn != null && columns != null && columns.size() > 0) { + for (int i = 0, columnsSize = columns.size(); i < columnsSize; i++) { + String column = StringUtil.removeBackquote(columns.get(i).toString()); + if (pColumn.equalsIgnoreCase(column)) { partitionColumnIndex = i; - break; - } - + if("_slot".equalsIgnoreCase(column)){ + shoudAddSlot=false; + } } } } - + if(shoudAddSlot){ + columns.add(new SQLIdentifierExpr("_slot")); + } parseLoadDataPram(); if (statement.isLocal()) { isStartLoadData = true; //向客户端请求发送文件 - + ByteBuffer buffer = serverConnection.allocate(); RequestFilePacket filePacket = new RequestFilePacket(); filePacket.fileName = fileName.getBytes(); filePacket.packetId = 1; - filePacket.write(serverConnection); + filePacket.write(buffer, serverConnection, true); } else { if (!new File(fileName).exists()) @@ -300,8 +297,9 @@ private synchronized void saveByteOrToFile(byte[] data, boolean isForce) { try { - if (channel != null) + if (channel != null) { channel.close(); + } } catch (IOException ignored) { @@ -323,6 +321,7 @@ private RouteResultset tryDirectRoute(String sql, String[] lineList) { //走默认节点 RouteResultsetNode rrNode = new RouteResultsetNode(schema.getDataNode(), ServerParse.INSERT, sql); + rrNode.setSource(rrs); rrs.setNodes(new RouteResultsetNode[]{rrNode}); return rrs; } @@ -335,6 +334,10 @@ else if (tableConfig != null&&tableConfig.isGlobalTable()) String dataNode = dataNodes.get(i); RouteResultsetNode rrNode = new RouteResultsetNode(dataNode, ServerParse.INSERT, sql); rrsNodes[i]=rrNode; + if(rrs.getDataNodeSlotMap().containsKey(dataNode)){ + rrsNodes[i].setSlot(rrs.getDataNodeSlotMap().get(dataNode)); + } + rrsNodes[i].setSource(rrs); } rrs.setNodes(rrsNodes); @@ -353,7 +356,7 @@ else if (tableConfig != null) { String value = lineList[partitionColumnIndex]; RouteCalculateUnit routeCalculateUnit = new RouteCalculateUnit(); - routeCalculateUnit.addShardingExpr(tableName, tableConfig.getPartitionColumn(), parseFieldString(value,loadData.getEnclose())); + routeCalculateUnit.addShardingExpr(tableName, getPartitionColumn(), parseFieldString(value,loadData.getEnclose())); ctx.addRouteCalculateUnit(routeCalculateUnit); try { @@ -422,6 +425,9 @@ private void parseOneLine(List columns, String tableName, String[] line } String jLine = joinField(line, data); + if(shoudAddSlot){ + jLine=jLine+loadData.getFieldTerminatedBy()+routeResultsetNode.getSlot(); + } if (data.getData() == null) { data.setData(Lists.newArrayList(jLine)); @@ -432,12 +438,11 @@ private void parseOneLine(List columns, String tableName, String[] line } - if (toFile) + if (toFile + //避免当导入数据跨多分片时内存溢出的情况 + && data.getData().size()>10000) { - if(data.getData().size()>100000) - { saveDataToFile(data,name); - } } } @@ -468,7 +473,10 @@ private void saveDataToFile(LoadData data,String dnName) File dnFile = new File(data.getFileName()); try { - Files.append(joinLine(data.getData(),data), dnFile, Charset.forName(loadData.getCharset())); + if (!dnFile.exists()) { + Files.createParentDirs(dnFile); + } + Files.append(joinLine(data.getData(),data), dnFile, Charset.forName(loadData.getCharset())); } catch (IOException e) { @@ -535,13 +543,15 @@ private RouteResultset buildResultSet(Map routeMap) for (String dn : routeMap.keySet()) { RouteResultsetNode rrNode = new RouteResultsetNode(dn, ServerParse.LOAD_DATA_INFILE_SQL, srcStatement); + rrNode.setSource(rrs); rrNode.setTotalNodeSize(size); rrNode.setStatement(srcStatement); LoadData newLoadData = new LoadData(); ObjectUtil.copyProperties(loadData, newLoadData); newLoadData.setLocal(true); LoadData loadData1 = routeMap.get(dn); - if (isHasStoreToFile) + // if (isHasStoreToFile) + if (loadData1.getFileName()!=null)//此处判断是否有保存分库load的临时文件dn1.txt/dn2.txt,不是判断是否有clientTemp.txt { newLoadData.setFileName(loadData1.getFileName()); } else @@ -628,6 +638,8 @@ public void end(byte packID) // List lines = Splitter.on(loadData.getLineTerminatedBy()).omitEmptyStrings().splitToList(content); CsvParserSettings settings = new CsvParserSettings(); + settings.setMaxColumns(65535); + settings.setMaxCharsPerColumn(65535); settings.getFormat().setLineSeparator(loadData.getLineTerminatedBy()); settings.getFormat().setDelimiter(loadData.getFieldTerminatedBy().charAt(0)); if(loadData.getEnclose()!=null) @@ -636,9 +648,15 @@ public void end(byte packID) } if(loadData.getEscape()!=null) { - settings.getFormat().setQuoteEscape(loadData.getEscape().charAt(0)); + settings.getFormat().setQuoteEscape(loadData.getEscape().charAt(0)); } settings.getFormat().setNormalizedNewline(loadData.getLineTerminatedBy().charAt(0)); + /* + * fix bug #1074 : LOAD DATA local INFILE导入的所有Boolean类型全部变成了false + * 不可见字符将在CsvParser被当成whitespace过滤掉, 使用settings.trimValues(false)来避免被过滤掉 + * TODO : 设置trimValues(false)之后, 会引起字段值前后的空白字符无法被过滤! + */ + settings.trimValues(false); CsvParser parser = new CsvParser(settings); try { @@ -675,6 +693,8 @@ private void parseFileByLine(String file, String encode, String split) { List columns = statement.getColumns(); CsvParserSettings settings = new CsvParserSettings(); + settings.setMaxColumns(65535); + settings.setMaxCharsPerColumn(65535); settings.getFormat().setLineSeparator(loadData.getLineTerminatedBy()); settings.getFormat().setDelimiter(loadData.getFieldTerminatedBy().charAt(0)); if(loadData.getEnclose()!=null) @@ -686,6 +706,12 @@ private void parseFileByLine(String file, String encode, String split) settings.getFormat().setQuoteEscape(loadData.getEscape().charAt(0)); } settings.getFormat().setNormalizedNewline(loadData.getLineTerminatedBy().charAt(0)); + /* + * fix #1074 : LOAD DATA local INFILE导入的所有Boolean类型全部变成了false + * 不可见字符将在CsvParser被当成whitespace过滤掉, 使用settings.trimValues(false)来避免被过滤掉 + * TODO : 设置trimValues(false)之后, 会引起字段值前后的空白字符无法被过滤! + */ + settings.trimValues(false); CsvParser parser = new CsvParser(settings); InputStreamReader reader = null; FileInputStream fileInputStream = null; @@ -778,6 +804,18 @@ public boolean isStartLoadData() return isStartLoadData; } + private String getPartitionColumn() { + String pColumn; + if (tableConfig.isSecondLevel() + && tableConfig.getParentTC().getPartitionColumn() + .equals(tableConfig.getParentKey())) { + pColumn = tableConfig.getJoinKey(); + }else { + pColumn = tableConfig.getPartitionColumn(); + } + return pColumn; + } + /** * 删除目录及其所有子目录和文件 * @@ -814,4 +852,4 @@ private static void deleteFile(String dirPath) } -} \ No newline at end of file +} diff --git a/src/main/java/io/mycat/server/sqlhandler/ServerPrepareHandler.java b/src/main/java/io/mycat/server/handler/ServerPrepareHandler.java similarity index 52% rename from src/main/java/io/mycat/server/sqlhandler/ServerPrepareHandler.java rename to src/main/java/io/mycat/server/handler/ServerPrepareHandler.java index fcda4be28..71d3b0bf9 100644 --- a/src/main/java/io/mycat/server/sqlhandler/ServerPrepareHandler.java +++ b/src/main/java/io/mycat/server/handler/ServerPrepareHandler.java @@ -21,36 +21,56 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.sqlhandler; - -import io.mycat.server.ErrorCode; -import io.mycat.server.Fields; -import io.mycat.server.FrontendPrepareHandler; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.ExecutePacket; -import io.mycat.server.packet.util.BindValue; -import io.mycat.server.packet.util.ByteUtil; -import io.mycat.server.packet.util.PreparedStatement; -import io.mycat.server.response.PreparedStmtResponse; +package io.mycat.server.handler; +import java.io.ByteArrayOutputStream; +import java.io.IOException; import java.io.UnsupportedEncodingException; import java.util.HashMap; import java.util.Map; -import org.apache.log4j.Logger; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import com.google.common.escape.Escaper; +import com.google.common.escape.Escapers; +import com.google.common.escape.Escapers.Builder; + +import io.mycat.backend.mysql.BindValue; +import io.mycat.backend.mysql.ByteUtil; +import io.mycat.backend.mysql.PreparedStatement; +import io.mycat.config.ErrorCode; +import io.mycat.config.Fields; +import io.mycat.net.handler.FrontendPrepareHandler; +import io.mycat.net.mysql.ExecutePacket; +import io.mycat.net.mysql.LongDataPacket; +import io.mycat.net.mysql.OkPacket; +import io.mycat.net.mysql.ResetPacket; +import io.mycat.server.ServerConnection; +import io.mycat.server.response.PreparedStmtResponse; +import io.mycat.util.HexFormatUtil; /** - * @author mycat + * @author mycat, CrazyPig, zhuam */ public class ServerPrepareHandler implements FrontendPrepareHandler { - - private static final Logger LOGGER = Logger.getLogger(ServerPrepareHandler.class); - private MySQLFrontConnection source; + + private static final Logger LOGGER = LoggerFactory.getLogger(ServerPrepareHandler.class); + + private static Escaper varcharEscaper = null; + + static { + Builder escapeBuilder = Escapers.builder(); + escapeBuilder.addEscape('\'', "\\'"); + escapeBuilder.addEscape('$', "\\$"); + varcharEscaper = escapeBuilder.build(); + } + + private ServerConnection source; private volatile long pstmtId; private Map pstmtForSql; private Map pstmtForId; - public ServerPrepareHandler(MySQLFrontConnection source) { + public ServerPrepareHandler(ServerConnection source) { this.source = source; this.pstmtId = 0L; this.pstmtForSql = new HashMap(); @@ -59,6 +79,7 @@ public ServerPrepareHandler(MySQLFrontConnection source) { @Override public void prepare(String sql) { + LOGGER.debug("use server prepare, sql: " + sql); PreparedStatement pstmt = null; if ((pstmt = pstmtForSql.get(sql)) == null) { @@ -71,7 +92,43 @@ public void prepare(String sql) { } PreparedStmtResponse.response(pstmt, source); } + + @Override + public void sendLongData(byte[] data) { + LongDataPacket packet = new LongDataPacket(); + packet.read(data); + long pstmtId = packet.getPstmtId(); + PreparedStatement pstmt = pstmtForId.get(pstmtId); + if(pstmt != null) { + if(LOGGER.isDebugEnabled()) { + LOGGER.debug("send long data to prepare sql : " + pstmtForId.get(pstmtId)); + } + long paramId = packet.getParamId(); + try { + pstmt.appendLongData(paramId, packet.getLongData()); + } catch (IOException e) { + source.writeErrMessage(ErrorCode.ERR_FOUND_EXCEPION, e.getMessage()); + } + } + } + @Override + public void reset(byte[] data) { + ResetPacket packet = new ResetPacket(); + packet.read(data); + long pstmtId = packet.getPstmtId(); + PreparedStatement pstmt = pstmtForId.get(pstmtId); + if(pstmt != null) { + if(LOGGER.isDebugEnabled()) { + LOGGER.debug("reset prepare sql : " + pstmtForId.get(pstmtId)); + } + pstmt.resetLongData(); + source.write(OkPacket.OK); + } else { + source.writeErrMessage(ErrorCode.ERR_FOUND_EXCEPION, "can not reset prepare statement : " + pstmtForId.get(pstmtId)); + } + } + @Override public void execute(byte[] data) { long pstmtId = ByteUtil.readUB4(data, 5); @@ -91,15 +148,20 @@ public void execute(byte[] data) { String sql = prepareStmtBindValue(pstmt, bindValues); // 执行sql source.getSession2().setPrepared(true); - LOGGER.debug("execute prepare sql: " + sql); - source.query(sql); + if(LOGGER.isDebugEnabled()) { + LOGGER.debug("execute prepare sql: " + sql); + } + source.query( sql ); } } - + + @Override public void close(byte[] data) { long pstmtId = ByteUtil.readUB4(data, 5); // 获取prepare stmt id - LOGGER.debug("close prepare stmt, stmtId = " + pstmtId); + if(LOGGER.isDebugEnabled()) { + LOGGER.debug("close prepare stmt, stmtId = " + pstmtId); + } PreparedStatement pstmt = pstmtForId.remove(pstmtId); if(pstmt != null) { pstmtForSql.remove(pstmt.getStatement()); @@ -139,52 +201,78 @@ private int getParamCount(String sql) { */ private String prepareStmtBindValue(PreparedStatement pstmt, BindValue[] bindValues) { String sql = pstmt.getStatement(); - int paramNumber = pstmt.getParametersNumber(); int[] paramTypes = pstmt.getParametersType(); - for(int i = 0; i < paramNumber; i++) { - int paramType = paramTypes[i]; - BindValue bindValue = bindValues[i]; + + StringBuilder sb = new StringBuilder(); + int idx = 0; + for(int i = 0, len = sql.length(); i < len; i++) { + char c = sql.charAt(i); + if(c != '?') { + sb.append(c); + continue; + } + // 处理占位符? + int paramType = paramTypes[idx]; + BindValue bindValue = bindValues[idx]; + idx++; + // 处理字段为空的情况 if(bindValue.isNull) { - sql = sql.replaceFirst("\\?", "NULL"); + sb.append("NULL"); continue; } - switch(paramType) { - case io.mycat.server.Fields.FIELD_TYPE_TINY: - sql = sql.replaceFirst("\\?", String.valueOf(bindValue.byteBinding)); + // 非空情况, 根据字段类型获取值 + switch(paramType & 0xff) { + case Fields.FIELD_TYPE_TINY: + sb.append(String.valueOf(bindValue.byteBinding)); break; case Fields.FIELD_TYPE_SHORT: - sql = sql.replaceFirst("\\?", String.valueOf(bindValue.shortBinding)); + sb.append(String.valueOf(bindValue.shortBinding)); break; case Fields.FIELD_TYPE_LONG: - sql = sql.replaceFirst("\\?", String.valueOf(bindValue.intBinding)); + sb.append(String.valueOf(bindValue.intBinding)); break; case Fields.FIELD_TYPE_LONGLONG: - sql = sql.replaceFirst("\\?", String.valueOf(bindValue.longBinding)); + sb.append(String.valueOf(bindValue.longBinding)); break; case Fields.FIELD_TYPE_FLOAT: - sql = sql.replaceFirst("\\?", String.valueOf(bindValue.floatBinding)); + sb.append(String.valueOf(bindValue.floatBinding)); break; case Fields.FIELD_TYPE_DOUBLE: - sql = sql.replaceFirst("\\?", String.valueOf(bindValue.doubleBinding)); + sb.append(String.valueOf(bindValue.doubleBinding)); break; case Fields.FIELD_TYPE_VAR_STRING: case Fields.FIELD_TYPE_STRING: case Fields.FIELD_TYPE_VARCHAR: + bindValue.value = varcharEscaper.asFunction().apply(String.valueOf(bindValue.value)); + sb.append("'" + bindValue.value + "'"); + break; + case Fields.FIELD_TYPE_TINY_BLOB: case Fields.FIELD_TYPE_BLOB: - sql = sql.replaceFirst("\\?", "'" + bindValue.value + "'"); + case Fields.FIELD_TYPE_MEDIUM_BLOB: + case Fields.FIELD_TYPE_LONG_BLOB: + if(bindValue.value instanceof ByteArrayOutputStream) { + byte[] bytes = ((ByteArrayOutputStream) bindValue.value).toByteArray(); + sb.append("X'" + HexFormatUtil.bytesToHexString(bytes) + "'"); + } else { + // 正常情况下不会走到else, 除非long data的存储方式(ByteArrayOutputStream)被修改 + LOGGER.warn("bind value is not a instance of ByteArrayOutputStream, maybe someone change the implement of long data storage!"); + sb.append("'" + bindValue.value + "'"); + } break; case Fields.FIELD_TYPE_TIME: case Fields.FIELD_TYPE_DATE: case Fields.FIELD_TYPE_DATETIME: case Fields.FIELD_TYPE_TIMESTAMP: - sql = sql.replaceFirst("\\?", "'" + bindValue.value + "'"); + sb.append("'" + bindValue.value + "'"); break; default: - sql = sql.replaceFirst("\\?", bindValue.value.toString()); + bindValue.value = varcharEscaper.asFunction().apply(String.valueOf(bindValue.value)); + sb.append(bindValue.value.toString()); break; } } - return sql; - } + + return sb.toString(); + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/handler/SetHandler.java b/src/main/java/io/mycat/server/handler/SetHandler.java new file mode 100644 index 000000000..79533c7f1 --- /dev/null +++ b/src/main/java/io/mycat/server/handler/SetHandler.java @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.server.handler; + +import static io.mycat.server.parser.ServerParseSet.AUTOCOMMIT_OFF; +import static io.mycat.server.parser.ServerParseSet.AUTOCOMMIT_ON; +import static io.mycat.server.parser.ServerParseSet.CHARACTER_SET_CLIENT; +import static io.mycat.server.parser.ServerParseSet.CHARACTER_SET_CONNECTION; +import static io.mycat.server.parser.ServerParseSet.CHARACTER_SET_RESULTS; +import static io.mycat.server.parser.ServerParseSet.NAMES; +import static io.mycat.server.parser.ServerParseSet.TX_READ_COMMITTED; +import static io.mycat.server.parser.ServerParseSet.TX_READ_UNCOMMITTED; +import static io.mycat.server.parser.ServerParseSet.TX_REPEATED_READ; +import static io.mycat.server.parser.ServerParseSet.TX_SERIALIZABLE; +import static io.mycat.server.parser.ServerParseSet.XA_FLAG_OFF; +import static io.mycat.server.parser.ServerParseSet.XA_FLAG_ON; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.config.ErrorCode; +import io.mycat.config.Isolations; +import io.mycat.net.mysql.OkPacket; +import io.mycat.server.ServerConnection; +import io.mycat.server.parser.ServerParseSet; +import io.mycat.server.response.CharacterSet; +import io.mycat.util.SetIgnoreUtil; + +/** + * SET 语句处理 + * + * @author mycat + * @author zhuam + */ +public final class SetHandler { + + private static final Logger logger = LoggerFactory.getLogger(SetHandler.class); + + private static final byte[] AC_OFF = new byte[] { 7, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 }; + + public static void handle(String stmt, ServerConnection c, int offset) { + // System.out.println("SetHandler: "+stmt); + int rs = ServerParseSet.parse(stmt, offset); + switch (rs & 0xff) { + case AUTOCOMMIT_ON: + if (c.isAutocommit()) { + c.write(c.writeToBuffer(OkPacket.OK, c.allocate())); + } else { + c.setPreAcStates(true); + c.commit(); + c.setAutocommit(true); + } + break; + case AUTOCOMMIT_OFF: { + if (c.isAutocommit()) { + c.setAutocommit(false); + c.setPreAcStates(false); + } + c.write(c.writeToBuffer(AC_OFF, c.allocate())); + break; + } + case XA_FLAG_ON: { + if (c.isAutocommit()) { + c.writeErrMessage(ErrorCode.ERR_WRONG_USED, + "set xa cmd on can't used in autocommit connection "); + return; + } + c.getSession2().setXATXEnabled(true); + c.write(c.writeToBuffer(OkPacket.OK, c.allocate())); + break; + } + case XA_FLAG_OFF: { + c.writeErrMessage(ErrorCode.ERR_WRONG_USED, + "set xa cmd off not for external use "); + return; + } + case TX_READ_UNCOMMITTED: { + c.setTxIsolation(Isolations.READ_UNCOMMITTED); + c.write(c.writeToBuffer(OkPacket.OK, c.allocate())); + break; + } + case TX_READ_COMMITTED: { + c.setTxIsolation(Isolations.READ_COMMITTED); + c.write(c.writeToBuffer(OkPacket.OK, c.allocate())); + break; + } + case TX_REPEATED_READ: { + c.setTxIsolation(Isolations.REPEATED_READ); + c.write(c.writeToBuffer(OkPacket.OK, c.allocate())); + break; + } + case TX_SERIALIZABLE: { + c.setTxIsolation(Isolations.SERIALIZABLE); + c.write(c.writeToBuffer(OkPacket.OK, c.allocate())); + break; + } + case NAMES: + String charset = stmt.substring(rs >>> 8).trim(); + int index= charset.indexOf(",") ; + if(index>-1) { + //支持rails框架自动生成的SET NAMES utf8, @@SESSION.sql_auto_is_null = 0, @@SESSION.wait_timeout = 2147483, @@SESSION.sql_mode = 'STRICT_ALL_TABLES' + charset=charset.substring(0,index) ; + } + if(charset.startsWith("'")&&charset.endsWith("'")) + { + charset=charset.substring(1,charset.length()-1) ; + } + if (c.setCharset(charset)) { + c.write(c.writeToBuffer(OkPacket.OK, c.allocate())); + } else { + + /** + * TODO:修复 phpAyAdmin's 的发包问题 + * 如: SET NAMES 'utf8' COLLATE 'utf8_general_ci' 错误 + */ + int beginIndex = stmt.toLowerCase().indexOf("names"); + int endIndex = stmt.toLowerCase().indexOf("collate"); + if ( beginIndex > -1 && endIndex > -1 ) { + charset = stmt.substring(beginIndex + "names".length(), endIndex); + //重试一次 + if (c.setCharset( charset.trim() )) { + c.write(c.writeToBuffer(OkPacket.OK, c.allocate())); + } else { + c.writeErrMessage(ErrorCode.ER_UNKNOWN_CHARACTER_SET, "Unknown charset '" + charset + "'"); + } + + } else { + c.writeErrMessage(ErrorCode.ER_UNKNOWN_CHARACTER_SET, "Unknown charset '" + charset + "'"); + } + } + break; + case CHARACTER_SET_CLIENT: + case CHARACTER_SET_CONNECTION: + case CHARACTER_SET_RESULTS: + CharacterSet.response(stmt, c, rs); + break; + default: + boolean ignore = SetIgnoreUtil.isIgnoreStmt(stmt); + if ( !ignore ) { + StringBuilder s = new StringBuilder(); + logger.warn(s.append(c).append(stmt).append(" is not recoginized and ignored").toString()); + } + c.write(c.writeToBuffer(OkPacket.OK, c.allocate())); + } + } + +} diff --git a/src/main/java/io/mycat/server/handler/ShowCache.java b/src/main/java/io/mycat/server/handler/ShowCache.java new file mode 100644 index 000000000..51b9c0ad4 --- /dev/null +++ b/src/main/java/io/mycat/server/handler/ShowCache.java @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.server.handler; + +import java.nio.ByteBuffer; +import java.util.Map; + +import io.mycat.MycatServer; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.cache.CachePool; +import io.mycat.cache.CacheService; +import io.mycat.cache.CacheStatic; +import io.mycat.cache.LayerCachePool; +import io.mycat.config.Fields; +import io.mycat.manager.ManagerConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.util.LongUtil; +import io.mycat.util.StringUtil; + +public class ShowCache { + + private static final int FIELD_COUNT = 8; + private static final ResultSetHeaderPacket header = PacketUtil + .getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + + fields[i] = PacketUtil.getField("CACHE", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + fields[i] = PacketUtil.getField("MAX", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; + fields[i] = PacketUtil.getField("CUR", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; + fields[i] = PacketUtil.getField("ACCESS", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; + fields[i] = PacketUtil.getField("HIT", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; + fields[i] = PacketUtil.getField("PUT", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; + fields[i] = PacketUtil.getField("LAST_ACCESS", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; + fields[i] = PacketUtil.getField("LAST_PUT", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; + eof.packetId = ++packetId; + } + + public static void execute(ManagerConnection c) { + + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + CacheService cacheService = MycatServer.getInstance().getCacheService(); + for (Map.Entry entry : cacheService + .getAllCachePools().entrySet()) { + String cacheName=entry.getKey(); + CachePool cachePool = entry.getValue(); + if (cachePool instanceof LayerCachePool) { + for (Map.Entry staticsEntry : ((LayerCachePool) cachePool) + .getAllCacheStatic().entrySet()) { + RowDataPacket row = getRow(cacheName+'.'+staticsEntry.getKey(), + staticsEntry.getValue(), c.getCharset()); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + } else { + RowDataPacket row = getRow(cacheName, + cachePool.getCacheStatic(), c.getCharset()); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + } + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // write buffer + c.write(buffer); + } + + private static RowDataPacket getRow(String poolName, + CacheStatic cacheStatic, String charset) { + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(StringUtil.encode(poolName, charset)); + // max size + row.add(LongUtil.toBytes(cacheStatic.getMaxSize())); + row.add(LongUtil.toBytes(cacheStatic.getItemSize())); + row.add(LongUtil.toBytes(cacheStatic.getAccessTimes())); + row.add(LongUtil.toBytes(cacheStatic.getHitTimes())); + row.add(LongUtil.toBytes(cacheStatic.getPutTimes())); + row.add(LongUtil.toBytes(cacheStatic.getLastAccesTime())); + row.add(LongUtil.toBytes(cacheStatic.getLastPutTime())); + return row; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/sqlhandler/ShowHandler.java b/src/main/java/io/mycat/server/handler/ShowHandler.java similarity index 83% rename from src/main/java/io/mycat/server/sqlhandler/ShowHandler.java rename to src/main/java/io/mycat/server/handler/ShowHandler.java index 8a732eb60..10793326c 100644 --- a/src/main/java/io/mycat/server/sqlhandler/ShowHandler.java +++ b/src/main/java/io/mycat/server/handler/ShowHandler.java @@ -21,15 +21,12 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.sqlhandler; +package io.mycat.server.handler; -import io.mycat.server.MySQLFrontConnection; +import io.mycat.server.ServerConnection; import io.mycat.server.parser.ServerParse; import io.mycat.server.parser.ServerParseShow; -import io.mycat.server.response.ShowDatabases; -import io.mycat.server.response.ShowMyCATCluster; -import io.mycat.server.response.ShowMyCatStatus; -import io.mycat.server.response.ShowTables; +import io.mycat.server.response.*; import io.mycat.util.StringUtil; /** @@ -37,7 +34,7 @@ */ public final class ShowHandler { - public static void handle(String stmt, MySQLFrontConnection c, int offset) { + public static void handle(String stmt, ServerConnection c, int offset) { // 排除 “ ` ” 符号 stmt = StringUtil.replaceChars(stmt, "`", null); @@ -50,6 +47,9 @@ public static void handle(String stmt, MySQLFrontConnection c, int offset) { case ServerParseShow.TABLES: ShowTables.response(c, stmt,type); break; + case ServerParseShow.FULLTABLES: + ShowFullTables.response(c, stmt,type); + break; case ServerParseShow.MYCAT_STATUS: ShowMyCatStatus.response(c); break; diff --git a/src/main/java/io/mycat/server/sqlhandler/StartHandler.java b/src/main/java/io/mycat/server/handler/StartHandler.java similarity index 63% rename from src/main/java/io/mycat/server/sqlhandler/StartHandler.java rename to src/main/java/io/mycat/server/handler/StartHandler.java index ef0d3647c..b599b4e60 100644 --- a/src/main/java/io/mycat/server/sqlhandler/StartHandler.java +++ b/src/main/java/io/mycat/server/handler/StartHandler.java @@ -21,9 +21,10 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.sqlhandler; +package io.mycat.server.handler; -import io.mycat.server.MySQLFrontConnection; +import io.mycat.config.ErrorCode; +import io.mycat.server.ServerConnection; import io.mycat.server.parser.ServerParse; import io.mycat.server.parser.ServerParseStart; @@ -31,22 +32,23 @@ * @author mycat */ public final class StartHandler { - private static final byte[] AC_OFF = new byte[] { 7, 0, 0, 1, 0, 0, 0, 0, - 0, 0, 0 }; - - public static void handle(String stmt, MySQLFrontConnection c, int offset) { - switch (ServerParseStart.parse(stmt, offset)) { - case ServerParseStart.TRANSACTION: - if (c.isAutocommit()) { - c.setAutocommit(false); - c.write(AC_OFF); - } else { - c.getSession2().commit(); - } - break; - default: - c.execute(stmt, ServerParse.START); - } - } + private static final byte[] AC_OFF = new byte[] { 7, 0, 0, 1, 0, 0, 0, 0, + 0, 0, 0 }; + public static void handle(String stmt, ServerConnection c, int offset) { + switch (ServerParseStart.parse(stmt, offset)) { + case ServerParseStart.TRANSACTION: + if (c.isAutocommit()) + { + c.write(c.writeToBuffer(AC_OFF, c.allocate())); + }else + { + c.getSession2().commit() ; + } + c.setAutocommit(false); + break; + default: + c.execute(stmt, ServerParse.START); + } + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/sqlhandler/UseHandler.java b/src/main/java/io/mycat/server/handler/UseHandler.java similarity index 82% rename from src/main/java/io/mycat/server/sqlhandler/UseHandler.java rename to src/main/java/io/mycat/server/handler/UseHandler.java index af310db87..95cc3ce25 100644 --- a/src/main/java/io/mycat/server/sqlhandler/UseHandler.java +++ b/src/main/java/io/mycat/server/handler/UseHandler.java @@ -21,26 +21,29 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.sqlhandler; - -import io.mycat.server.ErrorCode; -import io.mycat.server.FrontendPrivileges; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.OkPacket; -import io.mycat.util.StringUtil; +package io.mycat.server.handler; +import java.nio.ByteBuffer; import java.util.Set; +import io.mycat.config.ErrorCode; +import io.mycat.net.handler.FrontendPrivileges; +import io.mycat.net.mysql.OkPacket; +import io.mycat.server.ServerConnection; +import io.mycat.util.StringUtil; + /** * @author mycat */ public final class UseHandler { - public static void handle(String sql, MySQLFrontConnection c, int offset) { + public static void handle(String sql, ServerConnection c, int offset) { String schema = sql.substring(offset).trim(); int length = schema.length(); if (length > 0) { - if(schema.endsWith(";")) schema=schema.substring(0,schema.length()-1); + if(schema.endsWith(";")) { + schema = schema.substring(0, schema.length() - 1); + } schema = StringUtil.replaceChars(schema, "`", null); length=schema.length(); if (schema.charAt(0) == '\'' && schema.charAt(length - 1) == '\'') { @@ -61,7 +64,8 @@ public static void handle(String sql, MySQLFrontConnection c, int offset) { Set schemas = privileges.getUserSchemas(user); if (schemas == null || schemas.size() == 0 || schemas.contains(schema)) { c.setSchema(schema); - c.write(OkPacket.OK); + ByteBuffer buffer = c.allocate(); + c.write(c.writeToBuffer(OkPacket.OK, buffer)); } else { String msg = "Access denied for user '" + c.getUser() + "' to database '" + schema + "'"; c.writeErrMessage(ErrorCode.ER_DBACCESS_DENIED_ERROR, msg); diff --git a/src/main/java/io/mycat/server/interceptor/impl/DefaultSqlInterceptor.java b/src/main/java/io/mycat/server/interceptor/impl/DefaultSqlInterceptor.java index 917de3c2e..96c4f5ce0 100644 --- a/src/main/java/io/mycat/server/interceptor/impl/DefaultSqlInterceptor.java +++ b/src/main/java/io/mycat/server/interceptor/impl/DefaultSqlInterceptor.java @@ -1,15 +1,10 @@ package io.mycat.server.interceptor.impl; +import io.mycat.MycatServer; +import io.mycat.config.model.SystemConfig; import io.mycat.server.interceptor.SQLInterceptor; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - public class DefaultSqlInterceptor implements SQLInterceptor { - private static final Pattern p = Pattern.compile("\\'", Pattern.LITERAL); - - private static final String TARGET_STRING = "''"; - private static final char ESCAPE_CHAR = '\\'; private static final int TARGET_STRING_LENGTH = 2; @@ -17,7 +12,10 @@ public class DefaultSqlInterceptor implements SQLInterceptor { /** * mysql driver对'转义与\',解析前改为foundationdb parser支持的'' add by sky * - * @param stmt + * @param sql + * @update by jason@dayima.com replace regex with general string walking + * avoid sql being destroyed in case of some mismatch + * maybe some performance enchanced * @return */ public static String processEscape(String sql) { @@ -25,15 +23,19 @@ public static String processEscape(String sql) { if ((sql == null) || ((firstIndex = sql.indexOf(ESCAPE_CHAR)) == -1)) { return sql; } else { - int lastIndex = sql.lastIndexOf(ESCAPE_CHAR, sql.length() - 2);// 不用考虑结尾字符为转义符 - Matcher matcher = p.matcher(sql.substring(firstIndex, lastIndex - + TARGET_STRING_LENGTH)); - String replacedStr = (lastIndex == firstIndex) ? matcher - .replaceFirst(TARGET_STRING) : matcher - .replaceAll(TARGET_STRING); + int lastIndex = sql.lastIndexOf(ESCAPE_CHAR, sql.length() - 2) + TARGET_STRING_LENGTH; StringBuilder sb = new StringBuilder(sql); - sb.replace(firstIndex, lastIndex + TARGET_STRING_LENGTH, - replacedStr); + for (int i = firstIndex; i < lastIndex; i ++) { + if (sb.charAt(i) == '\\') { + if (i + 1 < lastIndex + && sb.charAt(i + 1) == '\'') { + //replace + sb.setCharAt(i, '\''); + } + //roll over + i ++; + } + } return sb.toString(); } } @@ -44,10 +46,18 @@ public static String processEscape(String sql) { */ @Override public String interceptSQL(String sql, int sqlType) { - String result = processEscape(sql); + if("fdbparser".equals(MycatServer.getInstance().getConfig().getSystem().getDefaultSqlParser())) { + sql = processEscape(sql); + } + // 全局表一致性 sql 改写拦截 - result = GlobalTableUtil.interceptSQL(result, sqlType); - return result; + SystemConfig system = MycatServer.getInstance().getConfig().getSystem(); + if(system != null && system.getUseGlobleTableCheck() == 1) // 全局表一致性检测是否开启 + sql = GlobalTableUtil.interceptSQL(sql, sqlType); + + // other interceptors put in here .... + + return sql; } } diff --git a/src/main/java/io/mycat/server/interceptor/impl/GlobalTableUtil.java b/src/main/java/io/mycat/server/interceptor/impl/GlobalTableUtil.java index abd09032a..f4610c27e 100644 --- a/src/main/java/io/mycat/server/interceptor/impl/GlobalTableUtil.java +++ b/src/main/java/io/mycat/server/interceptor/impl/GlobalTableUtil.java @@ -1,493 +1,720 @@ -package io.mycat.server.interceptor.impl; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.ReentrantLock; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import org.apache.commons.lang.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.alibaba.druid.sql.ast.SQLExpr; -import com.alibaba.druid.sql.ast.SQLName; -import com.alibaba.druid.sql.ast.SQLOrderBy; -import com.alibaba.druid.sql.ast.SQLOrderingSpecification; -import com.alibaba.druid.sql.ast.SQLStatement; -import com.alibaba.druid.sql.ast.statement.SQLSelectOrderByItem; -import com.alibaba.druid.sql.ast.statement.SQLTableSource; -import com.alibaba.druid.sql.ast.statement.SQLUpdateSetItem; -import com.alibaba.druid.sql.ast.statement.SQLInsertStatement.ValuesClause; -import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlInsertStatement; -import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlUpdateStatement; -import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlSelectQueryBlock.Limit; -import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser; -import com.alibaba.fastjson.JSON; - -import io.mycat.MycatServer; -import io.mycat.backend.MySQLDataSource; -import io.mycat.backend.PhysicalDBNode; -import io.mycat.backend.PhysicalDBPool; -import io.mycat.backend.PhysicalDatasource; -import io.mycat.backend.heartbeat.MySQLConsistencyChecker; -import io.mycat.server.config.node.MycatConfig; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.TableConfig; -import io.mycat.server.parser.ServerParse; -import io.mycat.sqlengine.SQLQueryResult; -import io.mycat.util.StringUtil; - -/** - * @author digdeep@126.com - * 全局表一致性检查 和 拦截 - */ -public class GlobalTableUtil{ - private static final Logger LOGGER = LoggerFactory.getLogger(GlobalTableUtil.class); - private static Map globalTableMap = new ConcurrentHashMap<>(); - /** 全局表 保存修改时间戳 的字段名,用于全局表一致性检查 */ - public static final String GLOBAL_TABLE_MYCAT_COLUMN = "_mycat_op_time"; - public static final String COUNT_COLUMN = "record_count"; - public static final String MAX_COLUMN = "max_timestamp"; - public static final String INNER_COLUMN = "inner_col_exist"; - private static String operationTimestamp = String.valueOf(new Date().getTime()); - private static volatile int isInnerColumnCheckFinished = 0; - private static final ReentrantLock lock = new ReentrantLock(false); - private static List>> innerColumnNotExist = new ArrayList<>(); - - public static Map getGlobalTableMap() { - return globalTableMap; - } - - static { - getGlobalTable(); // 初始化 globalTableMap - } - - public static String interceptSQL(String sql, int sqlType){ - return GlobalTableUtil.consistencyInterceptor(sql, sqlType); - } - - public static String consistencyInterceptor(String sql, int sqlType){ - // 统一使用mycat-server所在机器的时间,防止不同mysqld时间不同步 - operationTimestamp = String.valueOf(new Date().getTime()); - - LOGGER.debug("before intercept: " + sql); - - if(sqlType == ServerParse.INSERT){ - sql = convertInsertSQL(sql); - } - if(sqlType == ServerParse.UPDATE){ - sql = convertUpdateSQL(sql); - } - - LOGGER.debug("after intercept: " + sql); - /* - 目前 mycat-server不支持 replace 语句,报错如下: - ERROR 1064 (HY000): ReplaceStatement can't be supported, - use insert into ...on duplicate key update... instead - - if(sqlType == ServerParse.REPLACE){ - return convertReplaceSQL(sql); - } - */ - return sql; - } - - /** - * Syntax: - INSERT [LOW_PRIORITY | DELAYED | HIGH_PRIORITY] [IGNORE] - [INTO] tbl_name - [PARTITION (partition_name,...)] - [(col_name,...)] - {VALUES | VALUE} ({expr | DEFAULT},...),(...),... - [ ON DUPLICATE KEY UPDATE - col_name=expr - [, col_name=expr] ... ] - - Or: - - INSERT [LOW_PRIORITY | DELAYED | HIGH_PRIORITY] [IGNORE] - [INTO] tbl_name - [PARTITION (partition_name,...)] - SET col_name={expr | DEFAULT}, ... - [ ON DUPLICATE KEY UPDATE - col_name=expr - [, col_name=expr] ... ] - - Or: - - INSERT [LOW_PRIORITY | HIGH_PRIORITY] [IGNORE] - [INTO] tbl_name - [PARTITION (partition_name,...)] - [(col_name,...)] - SELECT ... - [ ON DUPLICATE KEY UPDATE - col_name=expr - [, col_name=expr] ... ] - mysql> insert user value (33333333,'ddd'); - mysql> insert into user value (333333,'ddd'); - mysql> insert user values (3333,'ddd'); - * insert into user(id,name) valueS(1111,'dig'), - * (1111, 'dig'), (1111,'dig') ,(1111,'dig'); - * @param sql - * @return - */ - private static String convertInsertSQL(String sql){ - try{ - MySqlStatementParser parser = new MySqlStatementParser(sql); - SQLStatement statement = parser.parseStatement(); - MySqlInsertStatement insert = (MySqlInsertStatement)statement; - String tableName = StringUtil.removeBackquote(insert.getTableName().getSimpleName()); - if(!isGlobalTable(tableName)) - return sql; - if(!isInnerColExist(tableName)) - return sql; - - List columns = insert.getColumns(); - if(columns == null || columns.size() <= 0) - return sql; - - if(insert.getQuery() != null) // insert into tab select - return sql; - - StringBuilder sb = new StringBuilder(200) // 指定初始容量可以提高性能 - .append("insert into ") - .append(tableName).append("("); - int idx = -1; - for(int i = 0; i < columns.size(); i++) { - if(i < columns.size() - 1) - sb.append(columns.get(i).toString()).append(","); - else - sb.append(columns.get(i).toString()); - String column = StringUtil.removeBackquote(insert.getColumns().get(i).toString()); - if(column.equalsIgnoreCase(GLOBAL_TABLE_MYCAT_COLUMN)) - idx = i; - } - if(idx <= -1) - sb.append(",").append(GLOBAL_TABLE_MYCAT_COLUMN); - sb.append(")"); - - sb.append(" values"); - List vcl = insert.getValuesList(); - if(vcl != null && vcl.size() > 1){ // 批量insert - for(int j=0; j valuse = insert.getValues().getValues(); - appendValues(valuse, sb, idx); - } - - List dku = insert.getDuplicateKeyUpdate(); - if(dku != null && dku.size() > 0){ - sb.append(" on duplicate key update "); - for(int i=0; i 0){ - for(SQLQueryResult> map : innerColumnNotExist){ - if(map != null && tableName.equalsIgnoreCase(map.getTableName())){ - StringBuilder warnStr = new StringBuilder(map.getDataNode()) - .append(".").append(tableName).append(" inner column: ") - .append(GLOBAL_TABLE_MYCAT_COLUMN) - .append(" is not exist."); - LOGGER.warn(warnStr.toString()); - return false; // tableName 全局表没有内部列 - } - } - } - return true; // tableName 有内部列 - } - - private static StringBuilder appendValues(List valuse, StringBuilder sb, int idx){ - sb.append("("); - for(int i = 0; i < valuse.size(); i++) { - if(i < valuse.size() - 1){ - if(i != idx) - sb.append(valuse.get(i).toString()).append(","); - else - sb.append(operationTimestamp).append(","); - }else{ - if(i != idx) - sb.append(valuse.get(i).toString()); - else - sb.append(operationTimestamp); - } - } - if(idx <= -1) - sb.append(",").append(operationTimestamp); - return sb.append(")"); - } - - /** - * UPDATE [LOW_PRIORITY] [IGNORE] table_reference - SET col_name1={expr1|DEFAULT} [, col_name2={expr2|DEFAULT}] ... - [WHERE where_condition] - [ORDER BY ...] - [LIMIT row_count] - - Multiple-table syntax: - - UPDATE [LOW_PRIORITY] [IGNORE] table_references - SET col_name1={expr1|DEFAULT} [, col_name2={expr2|DEFAULT}] ... - [WHERE where_condition] - - update user, tuser set user.name='dddd',tuser.pwd='aaa' - where user.id=2 and tuser.id=0; - * @param sql update tuser set pwd='aaa', name='digdee' where id=0; - * @return - */ - public static String convertUpdateSQL(String sql){ - try{ - MySqlStatementParser parser = new MySqlStatementParser(sql); - SQLStatement stmt = parser.parseStatement(); - MySqlUpdateStatement update = (MySqlUpdateStatement)stmt; - SQLTableSource ts = update.getTableSource(); - if(ts != null && ts.toString().contains(",")){ - System.out.println(ts.toString()); - LOGGER.warn("Do not support Multiple-table udpate syntax..."); - return sql; - } - - String tableName = StringUtil.removeBackquote(update.getTableName().getSimpleName()); - if(!isGlobalTable(tableName)) - return sql; - if(!isInnerColExist(tableName)) - return sql; // 没有内部列 - - StringBuilder sb = new StringBuilder(150); - - String where = null; - if(update.getWhere() != null) - where = update.getWhere().toString(); - - SQLOrderBy orderBy = update.getOrderBy(); - Limit limit = update.getLimit(); - - sb.append("update ").append(tableName).append(" set "); - List items = update.getItems(); - boolean flag = false; - for(int i=0; i 0){ - sb.append(" order by "); - for(int i=0; i schemaMap = config.getSchemas(); - SchemaConfig schemaMconfig = null; - for(String key : schemaMap.keySet()){ - if(schemaMap.get(key) != null){ - schemaMconfig = schemaMap.get(key); - Map tableMap = schemaMconfig.getTables(); - if(tableMap != null){ - for(String k : tableMap.keySet()){ - TableConfig table = tableMap.get(k); - if(table != null && table.isGlobalTable()){ - globalTableMap.put(table.getName().toUpperCase(), table); - } - } - } - } - } - } - - public static void consistencyCheck() { - MycatConfig config = MycatServer.getInstance().getConfig(); - for(String key : globalTableMap.keySet()){ - TableConfig table = globalTableMap.get(key); - //
dataNodeList = table.getDataNodes(); - - // 记录本次已经执行的datanode - // 多个 datanode 对应到同一个 PhysicalDatasource 只执行一次 - Map executedMap = new HashMap<>(); - for(String nodeName : dataNodeList){ - Map map = config.getDataNodes(); - for(String k2 : map.keySet()){ - // - PhysicalDBNode dBnode = map.get(k2); - if(nodeName.equals(dBnode.getName())){ // dn1,dn2,dn3 - PhysicalDBPool pool = dBnode.getDbPool(); - Collection allDS = pool.genAllDataSources(); - for(PhysicalDatasource pds : allDS){ - if(pds instanceof MySQLDataSource){ - MySQLDataSource mds = (MySQLDataSource)pds; - if(executedMap.get(pds.getName()) == null){ - MySQLConsistencyChecker checker = - new MySQLConsistencyChecker(mds, table.getName()); - - checker.checkInnerColumnExist(); - while(isInnerColumnCheckFinished <= 0){ - LOGGER.debug("isInnerColumnCheckFinished:" + isInnerColumnCheckFinished); - try { - TimeUnit.SECONDS.sleep(1); - } catch (InterruptedException e) { - LOGGER.warn(e.getMessage()); - } - } - LOGGER.debug("isInnerColumnCheckFinished:" + isInnerColumnCheckFinished); - - checker = new MySQLConsistencyChecker(mds, table.getName()); - checker.checkRecordCout(); - try { - TimeUnit.SECONDS.sleep(1); - } catch (InterruptedException e) { - LOGGER.warn(e.getMessage()); - } - - checker = new MySQLConsistencyChecker(mds, table.getName()); - checker.checkMaxTimeStamp(); - - executedMap.put(pds.getName(), nodeName); - } - } - } - } - } - } - } - } - - public static List>> - finished(List>> list){ - lock.lock(); - try{ - //[{"dataNode":"db3","result":{"count(*)":"1"},"success":true,"tableName":"COMPANY"}] - LOGGER.debug("list:::::::::::" + JSON.toJSONString(list)); - for(SQLQueryResult> map : list){ - Map row = map.getResult(); - if(row != null){ - if(row.containsKey(GlobalTableUtil.MAX_COLUMN)){ - LOGGER.info(map.getDataNode() + "." + map.getTableName() - + "." + GlobalTableUtil.MAX_COLUMN - + ": "+ map.getResult().get(GlobalTableUtil.MAX_COLUMN)); - } - if(row.containsKey(GlobalTableUtil.COUNT_COLUMN)){ - LOGGER.info(map.getDataNode() + "." + map.getTableName() - + "." + GlobalTableUtil.COUNT_COLUMN - + ": "+ map.getResult().get(GlobalTableUtil.COUNT_COLUMN)); - } - if(row.containsKey(GlobalTableUtil.INNER_COLUMN)){ - int count = 0; - try{ - if(StringUtils.isNotBlank(row.get(GlobalTableUtil.INNER_COLUMN))) - count = Integer.parseInt(row.get(GlobalTableUtil.INNER_COLUMN).trim()); - }catch(NumberFormatException e){ - LOGGER.warn(row.get(GlobalTableUtil.INNER_COLUMN) + ", " + e.getMessage()); - }finally{ - if(count <= 0){ - LOGGER.warn(map.getDataNode() + "." + map.getTableName() - + " inner column: " - + GlobalTableUtil.GLOBAL_TABLE_MYCAT_COLUMN - + " is not exist."); - if(StringUtils.isNotBlank(map.getTableName())){ - for(SQLQueryResult> sqr : innerColumnNotExist){ - String name = map.getTableName(); - String node = map.getDataNode(); - if(name != null && !name.equalsIgnoreCase(sqr.getTableName()) - || node != null && !node.equalsIgnoreCase(sqr.getDataNode())){ - innerColumnNotExist.add(map); - } - } - } - } - isInnerColumnCheckFinished = 1; - } - } - } - } - }finally{ - lock.unlock(); - } - return list; - } - - private static boolean isGlobalTable(String tableName){ - if(globalTableMap != null && globalTableMap.size() > 0){ - return globalTableMap.get(tableName.toUpperCase()) != null; - } - return false; - } -} +package io.mycat.server.interceptor.impl; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.ReentrantLock; + +import org.apache.commons.lang.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.alibaba.druid.sql.ast.SQLExpr; +import com.alibaba.druid.sql.ast.SQLName; +import com.alibaba.druid.sql.ast.SQLOrderBy; +import com.alibaba.druid.sql.ast.SQLOrderingSpecification; +import com.alibaba.druid.sql.ast.SQLStatement; +import com.alibaba.druid.sql.ast.expr.SQLCharExpr; +import com.alibaba.druid.sql.ast.expr.SQLIdentifierExpr; +import com.alibaba.druid.sql.ast.expr.SQLInSubQueryExpr; +import com.alibaba.druid.sql.ast.statement.SQLAlterTableStatement; +import com.alibaba.druid.sql.ast.statement.SQLCharacterDataType; +import com.alibaba.druid.sql.ast.statement.SQLColumnDefinition; +import com.alibaba.druid.sql.ast.statement.SQLConstraint; +import com.alibaba.druid.sql.ast.statement.SQLCreateTableStatement; +import com.alibaba.druid.sql.ast.statement.SQLExprTableSource; +import com.alibaba.druid.sql.ast.statement.SQLInsertStatement.ValuesClause; +import com.alibaba.druid.sql.ast.statement.SQLSelectOrderByItem; +import com.alibaba.druid.sql.ast.statement.SQLTableElement; +import com.alibaba.druid.sql.ast.statement.SQLTableSource; +import com.alibaba.druid.sql.ast.statement.SQLUpdateSetItem; +import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlInsertStatement; +import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlSelectQueryBlock.Limit; +import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlUpdateStatement; +import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser; +import com.alibaba.fastjson.JSON; + +import io.mycat.MycatServer; +import io.mycat.backend.datasource.PhysicalDBNode; +import io.mycat.backend.datasource.PhysicalDBPool; +import io.mycat.backend.datasource.PhysicalDatasource; +import io.mycat.backend.heartbeat.MySQLConsistencyChecker; +import io.mycat.backend.mysql.nio.MySQLDataSource; +import io.mycat.config.MycatConfig; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.TableConfig; +import io.mycat.server.parser.ServerParse; +import io.mycat.sqlengine.SQLQueryResult; +import io.mycat.util.StringUtil; + +/** + * @author digdeep@126.com + * 全局表一致性检查 和 拦截 + */ +public class GlobalTableUtil{ + private static final Logger LOGGER = LoggerFactory.getLogger(GlobalTableUtil.class); + private static Map globalTableMap = new ConcurrentHashMap<>(); + /** 全局表 保存修改时间戳 的字段名,用于全局表一致性检查 */ + public static final String GLOBAL_TABLE_MYCAT_COLUMN = "_mycat_op_time"; + public static final String COUNT_COLUMN = "record_count"; + public static final String MAX_COLUMN = "max_timestamp"; + public static final String INNER_COLUMN = "inner_col_exist"; + private static String operationTimestamp = String.valueOf(new Date().getTime()); + private static volatile int isInnerColumnCheckFinished = 0; + private static volatile int isColumnCountCheckFinished = 0; + private static final ReentrantLock lock = new ReentrantLock(false); + private static List>> innerColumnNotExist = new ArrayList<>(); + private static Map tableColumsMap = new ConcurrentHashMap<>(); + + public static Map getGlobalTableMap() { + return globalTableMap; + } + + static { + getGlobalTable(); // 初始化 globalTableMap + } + + public static String interceptSQL(String sql, int sqlType){ + return GlobalTableUtil.consistencyInterceptor(sql, sqlType); + } + + public static String consistencyInterceptor(String sql, int sqlType){ + // 统一使用mycat-server所在机器的时间,防止不同mysqld时间不同步 + operationTimestamp = String.valueOf(new Date().getTime()); + + LOGGER.debug("before intercept: " + sql); + + if(sqlType == ServerParse.INSERT){ + sql = convertInsertSQL(sql); + } + if(sqlType == ServerParse.UPDATE){ + sql = convertUpdateSQL(sql); + } + if(sqlType == ServerParse.DDL){ + LOGGER.info(" DDL to modify global table."); + sql = handleDDLSQL(sql); + } + + LOGGER.debug("after intercept: " + sql); + /* + 目前 mycat-server不支持 replace 语句,报错如下: + ERROR 1064 (HY000): ReplaceStatement can't be supported, + use insert into ...on duplicate key update... instead + + if(sqlType == ServerParse.REPLACE){ + return convertReplaceSQL(sql); + } + */ + return sql; + } + + /* + * Name: 'ALTER TABLE' + Description: + Syntax: + ALTER [IGNORE] TABLE tbl_name + [alter_specification [, alter_specification] ...] + [partition_options] + 如果 DDL 修改了表结构,需要重新获得表的列list + */ + private static String handleDDLSQL(String sql){ + MySqlStatementParser parser = new MySqlStatementParser(sql); + SQLStatement statement = parser.parseStatement(); + // druid高版本去掉了 MySqlAlterTableStatement,在其父类 SQLAlterTableStatement 直接支持 mysql alter table 语句 +// MySqlAlterTableStatement alter = (MySqlAlterTableStatement)statement; + SQLExprTableSource source = getDDLTableSource(statement); + if (source == null) + return sql; + String tableName = StringUtil.removeBackquote(source.toString()); + if(StringUtils.isNotBlank(tableName)) + tableName = tableName.trim(); + else + return sql; + + if(!isGlobalTable(tableName)) + return sql; + + //增加对全局表create语句的解析,如果是建表语句创建的是全局表,且表中不含"_mycat_op_time"列 + //则为其增加"_mycat_op_time"列,方便导入数据。 + sql = addColumnIfCreate(sql, statement); + + final String tn = tableName; + MycatServer.getInstance().getListeningExecutorService().execute(new Runnable() { + public void run() { + try { + TimeUnit.SECONDS.sleep(3); // DDL发出之后,等待3秒让DDL分发完成 + } catch (InterruptedException e) { + } + reGetColumnsForTable(tn); // DDL 语句可能会增删 列,所以需要重新获取 全局表的 列list + } + }); + + MycatServer.getInstance().getListeningExecutorService().execute(new Runnable() { + public void run() { + try { + TimeUnit.MINUTES.sleep(10); // DDL发出之后,等待10分钟再次执行,全局表一般很小,DDL耗时不会超过10分钟 + } catch (InterruptedException e) { + } + reGetColumnsForTable(tn); // DDL 语句可能会增删 列,所以需要重新获取 全局表的 列list + } + }); + return sql; + } + + static String addColumnIfCreate(String sql, SQLStatement statement) { + if (isCreate(statement) && sql.trim().toUpperCase().startsWith("CREATE TABLE ") && !hasGlobalColumn(statement)) { + SQLColumnDefinition column = new SQLColumnDefinition(); + column.setDataType(new SQLCharacterDataType("bigint")); + column.setName(new SQLIdentifierExpr(GLOBAL_TABLE_MYCAT_COLUMN)); + column.setComment(new SQLCharExpr("全局表保存修改时间戳的字段名")); + ((SQLCreateTableStatement)statement).getTableElementList().add(column); + } + return statement.toString(); + } + + private static boolean hasGlobalColumn(SQLStatement statement){ + for (SQLTableElement tableElement : ((SQLCreateTableStatement)statement).getTableElementList()) { + SQLName sqlName = null; + if (tableElement instanceof SQLColumnDefinition) { + sqlName = ((SQLColumnDefinition)tableElement).getName(); + } + if (sqlName != null) { + String simpleName = sqlName.getSimpleName(); + simpleName = StringUtil.removeBackquote(simpleName); + if (tableElement instanceof SQLColumnDefinition && GLOBAL_TABLE_MYCAT_COLUMN.equalsIgnoreCase(simpleName)) { + return true; + } + } + } + return false; + } + + private static SQLExprTableSource getDDLTableSource(SQLStatement statement) { + SQLExprTableSource source = null; + if (statement instanceof SQLAlterTableStatement) { + source = ((SQLAlterTableStatement)statement).getTableSource(); + + } else if (isCreate(statement)) { + source = ((SQLCreateTableStatement)statement).getTableSource(); + } + return source; + } + + private static boolean isCreate(SQLStatement statement) { + return statement instanceof SQLCreateTableStatement; + } + + /** + * Syntax: + INSERT [LOW_PRIORITY | DELAYED | HIGH_PRIORITY] [IGNORE] + [INTO] tbl_name + [PARTITION (partition_name,...)] + [(col_name,...)] + {VALUES | VALUE} ({expr | DEFAULT},...),(...),... + [ ON DUPLICATE KEY UPDATE + col_name=expr + [, col_name=expr] ... ] + + Or: + + INSERT [LOW_PRIORITY | DELAYED | HIGH_PRIORITY] [IGNORE] + [INTO] tbl_name + [PARTITION (partition_name,...)] + SET col_name={expr | DEFAULT}, ... + [ ON DUPLICATE KEY UPDATE + col_name=expr + [, col_name=expr] ... ] + + Or: + + INSERT [LOW_PRIORITY | HIGH_PRIORITY] [IGNORE] + [INTO] tbl_name + [PARTITION (partition_name,...)] + [(col_name,...)] + SELECT ... + [ ON DUPLICATE KEY UPDATE + col_name=expr + [, col_name=expr] ... ] + mysql> insert user value (33333333,'ddd'); + mysql> insert into user value (333333,'ddd'); + mysql> insert user values (3333,'ddd'); + * insert into user(id,name) valueS(1111,'dig'), + * (1111, 'dig'), (1111,'dig') ,(1111,'dig'); + * @param sql + * @return + */ + private static String convertInsertSQL(String sql){ + try{ + MySqlStatementParser parser = new MySqlStatementParser(sql); + SQLStatement statement = parser.parseStatement(); + MySqlInsertStatement insert = (MySqlInsertStatement)statement; + String tableName = StringUtil.removeBackquote(insert.getTableName().getSimpleName()); + if(!isGlobalTable(tableName)) + return sql; + if(!isInnerColExist(tableName)) + return sql; + + if(insert.getQuery() != null) // insert into tab select + return sql; + + StringBuilder sb = new StringBuilder(200) // 指定初始容量可以提高性能 + .append("insert into ").append(tableName); + + List columns = insert.getColumns(); + + int idx = -1; + int colSize = -1; + + if(columns == null || columns.size() <= 0){ // insert 没有带列名:insert into t values(xxx,xxx) + String columnsList = tableColumsMap.get(tableName.toUpperCase()); + if(StringUtils.isNotBlank(columnsList)){ //"id,name,_mycat_op_time" + //newSQL = "insert into t(id,name,_mycat_op_time)"; + // 构建一个虚拟newSQL来寻找 内部列的索引位置 + String newSQL = "insert into " + tableName + "(" + columnsList + ")"; + MySqlStatementParser newParser = new MySqlStatementParser(newSQL); + SQLStatement newStatement = newParser.parseStatement(); + MySqlInsertStatement newInsert = (MySqlInsertStatement)newStatement; + List newColumns = newInsert.getColumns(); + for(int i = 0; i < newColumns.size(); i++) { + String column = StringUtil.removeBackquote(newInsert.getColumns().get(i).toString()); + if(column.equalsIgnoreCase(GLOBAL_TABLE_MYCAT_COLUMN)) + idx = i; // 找到 内部列的索引位置 + } + colSize = newColumns.size(); + sb.append("(").append(columnsList).append(")"); + }else{ // tableName 是全局表,但是 tableColumsMap 没有其对应的列list,这种情况不应该存在 + LOGGER.warn("you'd better do not use 'insert into t values(a,b)' Syntax (without column list) on global table, " + + "If you do. Then you must make sure inner column '_mycat_op_time' is last column of global table: " + + tableName + " in all database. Good luck. ^_^"); + // 我们假定 内部列位于表中所有列的最后,后面我们在values 子句的最后 给他附加上时间戳 + } + }else{ // insert 语句带有 列名 + sb.append("("); + for(int i = 0; i < columns.size(); i++) { + if(i < columns.size() - 1) + sb.append(columns.get(i).toString()).append(","); + else + sb.append(columns.get(i).toString()); + String column = StringUtil.removeBackquote(insert.getColumns().get(i).toString()); + if(column.equalsIgnoreCase(GLOBAL_TABLE_MYCAT_COLUMN)) + idx = i; + } + if(idx <= -1) + sb.append(",").append(GLOBAL_TABLE_MYCAT_COLUMN); + sb.append(")"); + colSize = columns.size(); + } + + sb.append(" values"); + List vcl = insert.getValuesList(); + if(vcl != null && vcl.size() > 1){ // 批量insert + for(int j=0; j valuse = insert.getValues().getValues(); + appendValues(valuse, sb, idx, colSize); + } + + List dku = insert.getDuplicateKeyUpdate(); + if(dku != null && dku.size() > 0){ + sb.append(" on duplicate key update "); + for(int i=0; i columns = insert.getColumns(); +// System.out.println(columns.size()); + + String sql = "alter table t add colomn name varchar(30)"; + System.out.println(handleDDLSQL(sql)); + } + + private static boolean isInnerColExist(String tableName){ + if(innerColumnNotExist.size() > 0){ + for(SQLQueryResult> map : innerColumnNotExist){ + if(map != null && tableName.equalsIgnoreCase(map.getTableName())){ + StringBuilder warnStr = new StringBuilder(map.getDataNode()) + .append(".").append(tableName).append(" inner column: ") + .append(GLOBAL_TABLE_MYCAT_COLUMN) + .append(" is not exist."); + LOGGER.warn(warnStr.toString()); + return false; // tableName 全局表没有内部列 + } + } + } + return true; // tableName 有内部列 + } + + private static StringBuilder appendValues(List valuse, StringBuilder sb, int idx, int colSize){ + int size = valuse.size(); + if(size < colSize) + size = colSize; + + sb.append("("); + for(int i = 0; i < size; i++) { + if(i < size - 1){ + if(i != idx) + sb.append(valuse.get(i).toString()).append(","); + else + sb.append(operationTimestamp).append(","); + }else{ + if(i != idx){ + sb.append(valuse.get(i).toString()); + }else{ + sb.append(operationTimestamp); + } + } + } + if(idx <= -1) + sb.append(",").append(operationTimestamp); + return sb.append(")"); + } + + /** + * UPDATE [LOW_PRIORITY] [IGNORE] table_reference + SET col_name1={expr1|DEFAULT} [, col_name2={expr2|DEFAULT}] ... + [WHERE where_condition] + [ORDER BY ...] + [LIMIT row_count] + + Multiple-table syntax: + + UPDATE [LOW_PRIORITY] [IGNORE] table_references + SET col_name1={expr1|DEFAULT} [, col_name2={expr2|DEFAULT}] ... + [WHERE where_condition] + + update user, tuser set user.name='dddd',tuser.pwd='aaa' + where user.id=2 and tuser.id=0; + * @param sql update tuser set pwd='aaa', name='digdee' where id=0; + * @return + */ + public static String convertUpdateSQL(String sql){ + try{ + MySqlStatementParser parser = new MySqlStatementParser(sql); + SQLStatement stmt = parser.parseStatement(); + MySqlUpdateStatement update = (MySqlUpdateStatement)stmt; + SQLTableSource ts = update.getTableSource(); + if(ts != null && ts.toString().contains(",")){ + System.out.println(ts.toString()); + LOGGER.warn("Do not support Multiple-table udpate syntax..."); + return sql; + } + + String tableName = StringUtil.removeBackquote(update.getTableName().getSimpleName()); + if(!isGlobalTable(tableName)) + return sql; + if(!isInnerColExist(tableName)) + return sql; // 没有内部列 + + StringBuilder sb = new StringBuilder(150); + + SQLExpr se = update.getWhere(); + // where中有子查询: update company set name='com' where id in (select id from xxx where ...) + if(se instanceof SQLInSubQueryExpr){ + // return sql; + int idx = sql.toUpperCase().indexOf(" SET ") + 5; + sb.append(sql.substring(0, idx)).append(GLOBAL_TABLE_MYCAT_COLUMN) + .append("=").append(operationTimestamp) + .append(",").append(sql.substring(idx)); + return sb.toString(); + } + String where = null; + if(update.getWhere() != null) + where = update.getWhere().toString(); + + SQLOrderBy orderBy = update.getOrderBy(); + Limit limit = update.getLimit(); + + sb.append("update ").append(tableName).append(" set "); + List items = update.getItems(); + boolean flag = false; + for(int i=0; i 0){ + sb.append(" order by "); + for(int i=0; i schemaMap = config.getSchemas(); + SchemaConfig schemaMconfig = null; + for(String key : schemaMap.keySet()){ + if(schemaMap.get(key) != null){ + schemaMconfig = schemaMap.get(key); + Map tableMap = schemaMconfig.getTables(); + if(tableMap != null){ + for(String k : tableMap.keySet()){ + TableConfig table = tableMap.get(k); + if(table != null && table.isGlobalTable()){ + globalTableMap.put(table.getName().toUpperCase(), table); + } + } + } + } + } + } + + /** + * 重新获得table 的列list + * @param tableName + */ + private static void reGetColumnsForTable(String tableName){ + MycatConfig config = MycatServer.getInstance().getConfig(); + if(globalTableMap != null + && globalTableMap.get(tableName.toUpperCase()) != null){ + + TableConfig tableConfig = globalTableMap.get(tableName.toUpperCase()); + if(tableConfig == null || isInnerColumnCheckFinished != 1) // consistencyCheck 在运行中 + return; + + String nodeName = tableConfig.getDataNodes().get(0); + + Map map = config.getDataNodes(); + for(String k2 : map.keySet()){ + PhysicalDBNode dBnode = map.get(k2); + if(nodeName.equals(dBnode.getName())){ + PhysicalDBPool pool = dBnode.getDbPool(); + List dsList = (List)pool.genAllDataSources(); + for(PhysicalDatasource ds : dsList){ + if(ds instanceof MySQLDataSource){ + MySQLDataSource mds = (MySQLDataSource)dsList.get(0); + MySQLConsistencyChecker checker = + new MySQLConsistencyChecker(mds, tableConfig.getName()); + checker.checkInnerColumnExist(); + return; // 运行一次就行了,不需要像consistencyCheck那样每个db都运行一次 + } + } + } + } + } + } + + public static void consistencyCheck() { + MycatConfig config = MycatServer.getInstance().getConfig(); + for(String key : globalTableMap.keySet()){ + TableConfig table = globalTableMap.get(key); + //
dataNodeList = table.getDataNodes(); + + // 记录本次已经执行的datanode + // 多个 datanode 对应到同一个 PhysicalDatasource 只执行一次 + Map executedMap = new HashMap<>(); + for(String nodeName : dataNodeList){ + Map map = config.getDataNodes(); + for(String k2 : map.keySet()){ + // + PhysicalDBNode dBnode = map.get(k2); + if(nodeName.equals(dBnode.getName())){ // dn1,dn2,dn3 + PhysicalDBPool pool = dBnode.getDbPool(); + Collection allDS = pool.genAllDataSources(); + for(PhysicalDatasource pds : allDS){ + if(pds instanceof MySQLDataSource){ + MySQLDataSource mds = (MySQLDataSource)pds; + if(executedMap.get(pds.getName()) == null){ + MySQLConsistencyChecker checker = + new MySQLConsistencyChecker(mds, table.getName()); + + isInnerColumnCheckFinished = 0; + checker.checkInnerColumnExist(); + while(isInnerColumnCheckFinished <= 0){ + LOGGER.debug("isInnerColumnCheckFinished:" + isInnerColumnCheckFinished); + try { + TimeUnit.SECONDS.sleep(1); + } catch (InterruptedException e) { + LOGGER.warn(e.getMessage()); + } + } + LOGGER.debug("isInnerColumnCheckFinished:" + isInnerColumnCheckFinished); + + // 一种 check 完成之后,再进行另一种 check + checker = new MySQLConsistencyChecker(mds, table.getName()); + isColumnCountCheckFinished = 0; + checker.checkRecordCout(); + while(isColumnCountCheckFinished <= 0){ + LOGGER.debug("isColumnCountCheckFinished:" + isColumnCountCheckFinished); + try { + TimeUnit.SECONDS.sleep(1); + } catch (InterruptedException e) { + LOGGER.warn(e.getMessage()); + } + } + LOGGER.debug("isColumnCountCheckFinished:" + isColumnCountCheckFinished); + + + checker = new MySQLConsistencyChecker(mds, table.getName()); + checker.checkMaxTimeStamp(); + + executedMap.put(pds.getName(), nodeName); + } + } + } + } + } + } + } + } + + /** + * 每次处理 一种 check 的结果,不会交叉同时处理 多种不同 check 的结果 + * @param list + * @return + */ + public static List>> + finished(List>> list){ + lock.lock(); + try{ + //[{"dataNode":"db3","result":{"count(*)":"1"},"success":true,"tableName":"COMPANY"}] + LOGGER.debug("list:::::::::::" + JSON.toJSONString(list)); + for(SQLQueryResult> map : list){ + Map row = map.getResult(); + if(row != null){ + if(row.containsKey(GlobalTableUtil.MAX_COLUMN)){ + LOGGER.info(map.getDataNode() + "." + map.getTableName() + + "." + GlobalTableUtil.MAX_COLUMN + + ": "+ map.getResult().get(GlobalTableUtil.MAX_COLUMN)); + } + if(row.containsKey(GlobalTableUtil.COUNT_COLUMN)){ + LOGGER.info(map.getDataNode() + "." + map.getTableName() + + "." + GlobalTableUtil.COUNT_COLUMN + + ": "+ map.getResult().get(GlobalTableUtil.COUNT_COLUMN)); + } + if(row.containsKey(GlobalTableUtil.INNER_COLUMN)){ + String columnsList = null; + try{ + if(StringUtils.isNotBlank(row.get(GlobalTableUtil.INNER_COLUMN))) + columnsList = row.get(GlobalTableUtil.INNER_COLUMN); // id,name,_mycat_op_time + LOGGER.debug("columnsList: " + columnsList); + }catch(Exception e){ + LOGGER.warn(row.get(GlobalTableUtil.INNER_COLUMN) + ", " + e.getMessage()); + }finally{ + if(columnsList == null + || columnsList.indexOf(GlobalTableUtil.GLOBAL_TABLE_MYCAT_COLUMN) == -1){ + LOGGER.warn(map.getDataNode() + "." + map.getTableName() + + " inner column: " + + GlobalTableUtil.GLOBAL_TABLE_MYCAT_COLUMN + + " is not exist."); + if(StringUtils.isNotBlank(map.getTableName())){ + for(SQLQueryResult> sqr : innerColumnNotExist){ + String name = map.getTableName(); + String node = map.getDataNode(); + if(name != null && !name.equalsIgnoreCase(sqr.getTableName()) + || node != null && !node.equalsIgnoreCase(sqr.getDataNode())){ + innerColumnNotExist.add(map); + } + } + } + }else{ + LOGGER.debug("columnsList: " + columnsList); + // COMPANY -> "id,name,_mycat_op_time",获得了全局表的所有列,并且知道了全局表是否有内部列 + // 所有列,在 insert into t values(xx,yy) 语法中需要用到 + tableColumsMap.put(map.getTableName().toUpperCase(), columnsList); + } +// isInnerColumnCheckFinished = 1; + } + } + } + } + }finally{ + isInnerColumnCheckFinished = 1; + isColumnCountCheckFinished = 1; + lock.unlock(); + } + return list; + } + + private static boolean isGlobalTable(String tableName){ + if(globalTableMap != null && globalTableMap.size() > 0){ + return globalTableMap.get(tableName.toUpperCase()) != null; + } + return false; + } + + public static Map getTableColumsMap() { + return tableColumsMap; + } + + +} diff --git a/src/main/java/io/mycat/server/interceptor/impl/StatSqlInterceptor.java b/src/main/java/io/mycat/server/interceptor/impl/StatSqlInterceptor.java new file mode 100644 index 000000000..0a16cd6f4 --- /dev/null +++ b/src/main/java/io/mycat/server/interceptor/impl/StatSqlInterceptor.java @@ -0,0 +1,15 @@ +package io.mycat.server.interceptor.impl; + +import io.mycat.server.interceptor.SQLInterceptor; + +public class StatSqlInterceptor implements SQLInterceptor { + + @Override + public String interceptSQL(String sql, int sqlType) { + // TODO Auto-generated method stub + final int atype = sqlType; + final String sqls = DefaultSqlInterceptor.processEscape(sql); + return sql; + } + +} diff --git a/src/main/java/io/mycat/server/interceptor/impl/StatisticsSqlInterceptor.java b/src/main/java/io/mycat/server/interceptor/impl/StatisticsSqlInterceptor.java index c357444cb..02e572147 100644 --- a/src/main/java/io/mycat/server/interceptor/impl/StatisticsSqlInterceptor.java +++ b/src/main/java/io/mycat/server/interceptor/impl/StatisticsSqlInterceptor.java @@ -1,14 +1,5 @@ package io.mycat.server.interceptor.impl; -import io.mycat.MycatServer; -import io.mycat.net.NetSystem; -import io.mycat.server.config.node.SystemConfig; -import io.mycat.server.interceptor.SQLInterceptor; -import io.mycat.server.parser.ServerParse; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; import java.io.FileWriter; import java.io.IOException; import java.text.DateFormat; @@ -17,116 +8,134 @@ import java.util.HashMap; import java.util.Map; -public class StatisticsSqlInterceptor implements SQLInterceptor { - - private final class StatisticsSqlRunner implements Runnable { - - private int sqltype = 0; - private String sqls = ""; - - public StatisticsSqlRunner(int sqltype, String sqls) { - this.sqltype = sqltype; - this.sqls = sqls; - } - - public void run() { - try { - SystemConfig sysconfig = MycatServer.getInstance().getConfig() - .getSystem(); - String sqlInterceptorType = sysconfig.getSqlInterceptorType(); - String sqlInterceptorFile = sysconfig.getSqlInterceptorFile(); - - String[] sqlInterceptorTypes = sqlInterceptorType.split(","); - for (String type : sqlInterceptorTypes) { - if (StatisticsSqlInterceptor.parseType(type.toUpperCase()) == sqltype) { - switch (sqltype) { - case ServerParse.SELECT: - StatisticsSqlInterceptor.appendFile( - sqlInterceptorFile, "SELECT:" + sqls + ""); - break; - case ServerParse.UPDATE: - StatisticsSqlInterceptor.appendFile( - sqlInterceptorFile, "UPDATE:" + sqls); - break; - case ServerParse.INSERT: - StatisticsSqlInterceptor.appendFile( - sqlInterceptorFile, "INSERT:" + sqls); - break; - case ServerParse.DELETE: - StatisticsSqlInterceptor.appendFile( - sqlInterceptorFile, "DELETE:" + sqls); - break; - default: - break; - } - } - } - - } catch (Exception e) { - LOGGER.error("interceptSQL error:" + e.getMessage()); - } - } - } +import org.slf4j.Logger; import org.slf4j.LoggerFactory; - private static final Logger LOGGER = LoggerFactory - .getLogger(StatisticsSqlInterceptor.class); - - private static Map typeMap = new HashMap(); - static { - typeMap.put("SELECT", 7); - typeMap.put("UPDATE", 11); - typeMap.put("INSERT", 4); - typeMap.put("DELETE", 3); - } - - public static int parseType(String type) { - return typeMap.get(type); - } - - /** - * 方法追加文件:使用FileWriter - */ - private static synchronized void appendFile(String fileName, String content) { - - Calendar calendar = Calendar.getInstance(); - DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd"); - String dayFile = dateFormat.format(calendar.getTime()); - - try { - String newFileName = fileName; - // 打开一个写文件器,构造函数中的第二个参数true表示以追加形式写文件 - String[] title = newFileName.split("\\."); - if (title.length == 2) { - newFileName = title[0] + dayFile + "." + title[1]; - } - File file = new File(newFileName); - if (!file.exists()) { - file.createNewFile(); - } - FileWriter writer = new FileWriter(file, true); - String newContent = content.replaceAll("[\\t\\n\\r]", "") - + System.getProperty("line.separator"); - writer.write(newContent); - - writer.close(); - } catch (IOException e) { - LOGGER.error("appendFile error:" + e); - } - } - - /** - * interceptSQL , type :insert,delete,update,select exectime:xxx ms log - * content : select:select 1 from table,exectime:100ms,shared:1 etc - */ - @Override - public String interceptSQL(String sql, int sqlType) { - LOGGER.debug("sql interceptSQL:"); +import io.mycat.MycatServer; +import io.mycat.config.model.SystemConfig; +import io.mycat.server.interceptor.SQLInterceptor; +import io.mycat.server.parser.ServerParse; - final int sqltype = sqlType; - final String sqls = DefaultSqlInterceptor.processEscape(sql); - NetSystem.getInstance().getExecutor() - .execute(new StatisticsSqlRunner(sqltype, sqls)); - return sql; - } +import java.io.File; +public class StatisticsSqlInterceptor implements SQLInterceptor { + +private final class StatisticsSqlRunner implements Runnable { + + private int sqltype = 0; + private String sqls = ""; + + public StatisticsSqlRunner(int sqltype, String sqls) { + this.sqltype = sqltype; + this.sqls = sqls; + } + + public void run() { + try { + SystemConfig sysconfig = MycatServer.getInstance().getConfig().getSystem(); + String sqlInterceptorType = sysconfig.getSqlInterceptorType(); + String sqlInterceptorFile = sysconfig.getSqlInterceptorFile(); + + String[] sqlInterceptorTypes = sqlInterceptorType.split(","); + for (String type : sqlInterceptorTypes) { + if (StatisticsSqlInterceptor.parseType(type.toUpperCase()) == sqltype) { + switch (sqltype) { + case ServerParse.SELECT: + StatisticsSqlInterceptor.appendFile(sqlInterceptorFile, "SELECT:" + + sqls + ""); + break; + case ServerParse.UPDATE: + StatisticsSqlInterceptor.appendFile(sqlInterceptorFile, "UPDATE:" + + sqls); + break; + case ServerParse.INSERT: + StatisticsSqlInterceptor.appendFile(sqlInterceptorFile, "INSERT:" + + sqls); + break; + case ServerParse.DELETE: + StatisticsSqlInterceptor.appendFile(sqlInterceptorFile, "DELETE:" + + sqls); + break; + default: + break; + } + } + } + + } catch (Exception e) { + LOGGER.error("interceptSQL error:" + e.getMessage(),e); + } + } + } + + private static final Logger LOGGER = LoggerFactory.getLogger(StatisticsSqlInterceptor.class); + + private static Map typeMap = new HashMap(); + static { + typeMap.put("SELECT", 7); + typeMap.put("UPDATE", 11); + typeMap.put("INSERT", 4); + typeMap.put("DELETE", 3); + } + + public static int parseType(String type) { + return typeMap.get(type); + } + + /** + * 方法追加文件:使用FileWriter + */ + private static synchronized void appendFile(String fileName, String content) { + + Calendar calendar = Calendar.getInstance(); + DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd"); + String dayFile = dateFormat.format(calendar.getTime()); + FileWriter writer = null; + try { + String newFileName = fileName; + //打开一个写文件器,构造函数中的第二个参数true表示以追加形式写文件 + String[] title = newFileName.split("\\."); + if (title.length == 2) { + newFileName = title[0] + dayFile + "." + title[1]; + } + File file = new File(newFileName); + if (!file.exists()) { + file.createNewFile(); + } + writer = new FileWriter(file, true); + String newContent = content.replaceAll("[\\t\\n\\r]", "") + + System.getProperty("line.separator"); + writer.write(newContent); + + writer.flush(); + } catch (IOException e) { + LOGGER.error("appendFile error:" + e.getMessage(),e); + } finally { + if(writer != null ){ + try { + writer.close(); + } catch (IOException e) { + LOGGER.error("close file error:" + e.getMessage(),e); + } + } + } + } + + /** + * interceptSQL , + * type :insert,delete,update,select + * exectime:xxx ms + * log content : select:select 1 from table,exectime:100ms,shared:1 + * etc + */ + @Override + public String interceptSQL(String sql, int sqlType) { + LOGGER.debug("sql interceptSQL:"); + + final int sqltype = sqlType; + final String sqls = DefaultSqlInterceptor.processEscape(sql); + MycatServer.getInstance().getBusinessExecutor() + .execute(new StatisticsSqlRunner(sqltype, sqls)); + return sql; + } + } diff --git a/src/main/java/io/mycat/server/packet/AuthPacket.java b/src/main/java/io/mycat/server/packet/AuthPacket.java deleted file mode 100644 index 85dc3b23d..000000000 --- a/src/main/java/io/mycat/server/packet/AuthPacket.java +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.server.packet; - -import io.mycat.net.BufferArray; -import io.mycat.server.Capabilities; -import io.mycat.server.packet.util.BufferUtil; -import io.mycat.server.packet.util.StreamUtil; - -import java.io.IOException; -import java.io.OutputStream; -import java.nio.ByteBuffer; - -/** - * From client to server during initial handshake. - * - *
- * Bytes                        Name
- * -----                        ----
- * 4                            client_flags
- * 4                            max_packet_size
- * 1                            charset_number
- * 23                           (filler) always 0x00...
- * n (Null-Terminated String)   user
- * n (Length Coded Binary)      scramble_buff (1 + x bytes)
- * n (Null-Terminated String)   databasename (optional)
- * 
- * @see http://forge.mysql.com/wiki/MySQL_Internals_ClientServer_Protocol#Client_Authentication_Packet
- * 
- * - * @author mycat - */ -public class AuthPacket extends MySQLPacket { - private static final byte[] FILLER = new byte[23]; - - public long clientFlags; - public long maxPacketSize; - public int charsetIndex; - public byte[] extra;// from FILLER(23) - public String user; - public byte[] password; - public String database; - - public void read(byte[] data) { - MySQLMessage mm = new MySQLMessage(data); - packetLength = mm.readUB3(); - packetId = mm.read(); - clientFlags = mm.readUB4(); - maxPacketSize = mm.readUB4(); - charsetIndex = (mm.read() & 0xff); - // read extra - int current = mm.position(); - int len = (int) mm.readLength(); - if (len > 0 && len < FILLER.length) { - byte[] ab = new byte[len]; - System.arraycopy(mm.bytes(), mm.position(), ab, 0, len); - this.extra = ab; - } - mm.position(current + FILLER.length); - user = mm.readStringWithNull(); - password = mm.readBytesWithLength(); - if (((clientFlags & Capabilities.CLIENT_CONNECT_WITH_DB) != 0) - && mm.hasRemaining()) { - database = mm.readStringWithNull(); - } - } - - public void write(OutputStream out) throws IOException { - StreamUtil.writeUB3(out, calcPacketSize()); - StreamUtil.write(out, packetId); - StreamUtil.writeUB4(out, clientFlags); - StreamUtil.writeUB4(out, maxPacketSize); - StreamUtil.write(out, (byte) charsetIndex); - out.write(FILLER); - if (user == null) { - StreamUtil.write(out, (byte) 0); - } else { - StreamUtil.writeWithNull(out, user.getBytes()); - } - if (password == null) { - StreamUtil.write(out, (byte) 0); - } else { - StreamUtil.writeWithLength(out, password); - } - if (database == null) { - StreamUtil.write(out, (byte) 0); - } else { - StreamUtil.writeWithNull(out, database.getBytes()); - } - } - - public void write(BufferArray bufferArray) { - int size = calcPacketSize(); - ByteBuffer buffer = bufferArray.checkWriteBuffer(packetHeaderSize - + size); - BufferUtil.writeUB3(buffer, calcPacketSize()); - buffer.put(packetId); - BufferUtil.writeUB4(buffer, clientFlags); - BufferUtil.writeUB4(buffer, maxPacketSize); - buffer.put((byte) charsetIndex); - buffer = bufferArray.write(FILLER); - if (user == null) { - buffer = bufferArray.checkWriteBuffer(1); - buffer.put((byte) 0); - } else { - byte[] userData = user.getBytes(); - buffer = bufferArray.checkWriteBuffer(userData.length + 1); - BufferUtil.writeWithNull(buffer, userData); - } - if (password == null) { - buffer = bufferArray.checkWriteBuffer(1); - buffer.put((byte) 0); - } else { - buffer = bufferArray.checkWriteBuffer(BufferUtil - .getLength(password)); - BufferUtil.writeWithLength(buffer, password); - } - if (database == null) { - buffer = bufferArray.checkWriteBuffer(1); - buffer.put((byte) 0); - } else { - byte[] databaseData = database.getBytes(); - buffer = bufferArray.checkWriteBuffer(databaseData.length + 1); - BufferUtil.writeWithNull(buffer, databaseData); - } - - } - - @Override - public int calcPacketSize() { - int size = 32;// 4+4+1+23; - size += (user == null) ? 1 : user.length() + 1; - size += (password == null) ? 1 : BufferUtil.getLength(password); - size += (database == null) ? 1 : database.length() + 1; - return size; - } - - @Override - protected String getPacketInfo() { - return "MySQL Authentication Packet"; - } - -} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/packet/BinaryRowDataPacket.java b/src/main/java/io/mycat/server/packet/BinaryRowDataPacket.java deleted file mode 100644 index bd613e97f..000000000 --- a/src/main/java/io/mycat/server/packet/BinaryRowDataPacket.java +++ /dev/null @@ -1,336 +0,0 @@ -package io.mycat.server.packet; - -import io.mycat.net.BufferArray; -import io.mycat.net.Connection; -import io.mycat.net.NetSystem; -import io.mycat.server.Fields; -import io.mycat.server.packet.util.BufferUtil; -import io.mycat.util.ByteUtil; -import io.mycat.util.DateUtil; - -import java.nio.ByteBuffer; -import java.text.ParseException; -import java.util.ArrayList; -import java.util.Date; -import java.util.List; - -/** - * ProtocolBinary::ResultsetRow: - * row of a binary resultset (COM_STMT_EXECUTE) - - * Payload - * 1 packet header [00] - * string[$len] NULL-bitmap, length: (column_count + 7 + 2) / 8 - * string[$len] values - * - * A Binary Protocol Resultset Row is made up of the NULL bitmap - * containing as many bits as we have columns in the resultset + 2 - * and the values for columns that are not NULL in the Binary Protocol Value format. - * - * @see http://dev.mysql.com/doc/internals/en/binary-protocol-resultset-row.html#packet-ProtocolBinary::ResultsetRow - * @see http://dev.mysql.com/doc/internals/en/binary-protocol-value.html - * @author CrazyPig - * - */ -public class BinaryRowDataPacket extends MySQLPacket { - - public int fieldCount; - public List fieldValues; - public byte packetHeader = (byte) 0; - public byte[] nullBitMap; - - public List fieldPackets; - - public BinaryRowDataPacket() {} - - /** - * 从RowDataPacket转换成BinaryRowDataPacket - * @param fieldPackets 字段包集合 - * @param rowDataPk 文本协议行数据包 - */ - public void read(List fieldPackets, RowDataPacket rowDataPk) { - this.fieldPackets = fieldPackets; - this.fieldCount = rowDataPk.fieldCount; - this.fieldValues = new ArrayList(fieldCount); - nullBitMap = new byte[(fieldCount + 7 + 2) / 8]; - List _fieldValues = rowDataPk.fieldValues; - for (int i = 0; i < fieldCount; i++) { - byte[] fv = _fieldValues.get(i); - FieldPacket fieldPk = fieldPackets.get(i); - if (fv == null) { // 字段值为null,根据协议规定存储nullBitMap - int bitMapPos = (i + 2) / 8; - int bitPos = (i + 2) % 8; - nullBitMap[bitMapPos] |= (byte) (1 << bitPos); - this.fieldValues.add(fv); - } else { - // 从RowDataPacket的fieldValue的数据转化成BinaryRowDataPacket的fieldValue数据 - int fieldType = fieldPk.type; - switch (fieldType) { - case Fields.FIELD_TYPE_STRING: - case Fields.FIELD_TYPE_VARCHAR: - case Fields.FIELD_TYPE_VAR_STRING: - case Fields.FIELD_TYPE_ENUM: - case Fields.FIELD_TYPE_SET: - case Fields.FIELD_TYPE_LONG_BLOB: - case Fields.FIELD_TYPE_MEDIUM_BLOB: - case Fields.FIELD_TYPE_BLOB: - case Fields.FIELD_TYPE_TINY_BLOB: - case Fields.FIELD_TYPE_GEOMETRY: - case Fields.FIELD_TYPE_BIT: - case Fields.FIELD_TYPE_DECIMAL: - case Fields.FIELD_TYPE_NEW_DECIMAL: - // Fields - // value (lenenc_str) -- string - - // Example - // 03 66 6f 6f -- string = "foo" - this.fieldValues.add(_fieldValues.get(i)); - break; - case Fields.FIELD_TYPE_LONGLONG: - // Fields - // value (8) -- integer - - // Example - // 01 00 00 00 00 00 00 00 -- int64 = 1 - long longVar = ByteUtil.getLong(_fieldValues.get(i)); - this.fieldValues.add(ByteUtil.getBytes(longVar)); - break; - case Fields.FIELD_TYPE_LONG: - case Fields.FIELD_TYPE_INT24: - // Fields - // value (4) -- integer - - // Example - // 01 00 00 00 -- int32 = 1 - int intVar = ByteUtil.getInt(_fieldValues.get(i)); - this.fieldValues.add(ByteUtil.getBytes(intVar)); - break; - case Fields.FIELD_TYPE_SHORT: - case Fields.FIELD_TYPE_YEAR: - // Fields - // value (2) -- integer - - // Example - // 01 00 -- int16 = 1 - short shortVar = ByteUtil.getShort(_fieldValues.get(i)); - this.fieldValues.add(ByteUtil.getBytes(shortVar)); - break; - case Fields.FIELD_TYPE_TINY: - // Fields - // value (1) -- integer - - // Example - // 01 -- int8 = 1 - int tinyVar = ByteUtil.getInt(_fieldValues.get(i)); - byte[] bytes = new byte[1]; - bytes[0] = new Integer(tinyVar).byteValue(); - this.fieldValues.add(bytes); - break; - case Fields.FIELD_TYPE_DOUBLE: - // Fields - // value (string.fix_len) -- (len=8) double - - // Example - // 66 66 66 66 66 66 24 40 -- double = 10.2 - double doubleVar = ByteUtil.getDouble(_fieldValues.get(i)); - this.fieldValues.add(ByteUtil.getBytes(doubleVar)); - break; - case Fields.FIELD_TYPE_FLOAT: - // Fields - // value (string.fix_len) -- (len=4) float - - // Example - // 33 33 23 41 -- float = 10.2 - float floatVar = ByteUtil.getFloat(_fieldValues.get(i)); - this.fieldValues.add(ByteUtil.getBytes(floatVar)); - break; - case Fields.FIELD_TYPE_DATE: - try { - Date dateVar = DateUtil.parseDate( - ByteUtil.getDate(_fieldValues.get(i)), - DateUtil.DATE_PATTERN_ONLY_DATE); - this.fieldValues.add(ByteUtil.getBytes(dateVar, false)); - } catch (ParseException e) { - e.printStackTrace(); - } - break; - case Fields.FIELD_TYPE_DATETIME: - case Fields.FIELD_TYPE_TIMESTAMP: - String dateStr = ByteUtil.getDate(_fieldValues.get(i)); - Date dateTimeVar = null; - try { - if (dateStr.indexOf(".") > 0) { - dateTimeVar = DateUtil.parseDate(dateStr, - DateUtil.DATE_PATTERN_FULL); - this.fieldValues.add(ByteUtil.getBytes(dateTimeVar, - false)); - } else { - dateTimeVar = DateUtil.parseDate(dateStr, - DateUtil.DEFAULT_DATE_PATTERN); - this.fieldValues.add(ByteUtil.getBytes(dateTimeVar, - false)); - } - } catch (ParseException e) { - e.printStackTrace(); - } - break; - case Fields.FIELD_TYPE_TIME: - String timeStr = ByteUtil.getTime(_fieldValues.get(i)); - Date timeVar = null; - try { - if (timeStr.indexOf(".") > 0) { - timeVar = DateUtil.parseDate(timeStr, - DateUtil.TIME_PATTERN_FULL); - this.fieldValues.add(ByteUtil.getBytes(timeVar, - true)); - } else { - timeVar = DateUtil.parseDate(timeStr, - DateUtil.DEFAULT_TIME_PATTERN); - this.fieldValues.add(ByteUtil.getBytes(timeVar, - true)); - } - } catch (ParseException e) { - e.printStackTrace(); - } - break; - } - } - } - } - - @Override - public void write(BufferArray bufferArray) { - int size = calcPacketSize(); - ByteBuffer bb = bufferArray.checkWriteBuffer(packetHeaderSize - + size); - BufferUtil.writeUB3(bb, calcPacketSize()); - bb.put(packetId); - bb.put(packetHeader); // packet header [00] - bb.put(nullBitMap); // NULL-Bitmap - for(int i = 0; i < fieldCount; i++) { // values - byte[] fv = fieldValues.get(i); - if(fv != null) { - bb = bufferArray.checkWriteBuffer(BufferUtil.getLength(fv.length)); - FieldPacket fieldPk = this.fieldPackets.get(i); - int fieldType = fieldPk.type; - switch(fieldType) { - case Fields.FIELD_TYPE_STRING: - case Fields.FIELD_TYPE_VARCHAR: - case Fields.FIELD_TYPE_VAR_STRING: - case Fields.FIELD_TYPE_ENUM: - case Fields.FIELD_TYPE_SET: - case Fields.FIELD_TYPE_LONG_BLOB: - case Fields.FIELD_TYPE_MEDIUM_BLOB: - case Fields.FIELD_TYPE_BLOB: - case Fields.FIELD_TYPE_TINY_BLOB: - case Fields.FIELD_TYPE_GEOMETRY: - case Fields.FIELD_TYPE_BIT: - case Fields.FIELD_TYPE_DECIMAL: - case Fields.FIELD_TYPE_NEW_DECIMAL: - // 长度编码的字符串需要一个字节来存储长度(0表示空字符串) - BufferUtil.writeLength(bb, fv.length); - break; - default: - break; - } - if(fv.length > 0) { - bufferArray.write(fv); - } - } - } - } - - public void write(Connection conn) { - int size = calcPacketSize(); - int totalSize = size + packetHeaderSize; - if(totalSize <= NetSystem.getInstance().getBufferPool().getChunkSize()) { - ByteBuffer bb = NetSystem.getInstance().getBufferPool() - .allocate(); - BufferUtil.writeUB3(bb, calcPacketSize()); - bb.put(packetId); - bb.put(packetHeader); // packet header [00] - bb.put(nullBitMap); // NULL-Bitmap - for(int i = 0; i < fieldCount; i++) { // values - byte[] fv = fieldValues.get(i); - if(fv != null) { - FieldPacket fieldPk = this.fieldPackets.get(i); - int fieldType = fieldPk.type; - switch(fieldType) { - case Fields.FIELD_TYPE_STRING: - case Fields.FIELD_TYPE_VARCHAR: - case Fields.FIELD_TYPE_VAR_STRING: - case Fields.FIELD_TYPE_ENUM: - case Fields.FIELD_TYPE_SET: - case Fields.FIELD_TYPE_LONG_BLOB: - case Fields.FIELD_TYPE_MEDIUM_BLOB: - case Fields.FIELD_TYPE_BLOB: - case Fields.FIELD_TYPE_TINY_BLOB: - case Fields.FIELD_TYPE_GEOMETRY: - case Fields.FIELD_TYPE_BIT: - case Fields.FIELD_TYPE_DECIMAL: - case Fields.FIELD_TYPE_NEW_DECIMAL: - // 长度编码的字符串需要一个字节来存储长度(0表示空字符串) - BufferUtil.writeLength(bb, fv.length); - break; - default: - break; - } - if(fv.length > 0) { - bb.put(fv); - } - } - } - conn.write(bb); - } else { - BufferArray bufArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); - write(bufArray); - conn.write(bufArray); - } - } - - @Override - public int calcPacketSize() { - int size = 0; - size = size + 1 + nullBitMap.length; - for(int i = 0, n = fieldValues.size(); i < n; i++) { - byte[] value = fieldValues.get(i); - if(value != null) { - FieldPacket fieldPk = this.fieldPackets.get(i); - int fieldType = fieldPk.type; - switch(fieldType) { - case Fields.FIELD_TYPE_STRING: - case Fields.FIELD_TYPE_VARCHAR: - case Fields.FIELD_TYPE_VAR_STRING: - case Fields.FIELD_TYPE_ENUM: - case Fields.FIELD_TYPE_SET: - case Fields.FIELD_TYPE_LONG_BLOB: - case Fields.FIELD_TYPE_MEDIUM_BLOB: - case Fields.FIELD_TYPE_BLOB: - case Fields.FIELD_TYPE_TINY_BLOB: - case Fields.FIELD_TYPE_GEOMETRY: - case Fields.FIELD_TYPE_BIT: - case Fields.FIELD_TYPE_DECIMAL: - case Fields.FIELD_TYPE_NEW_DECIMAL: - // 长度编码的字符串需要一个字节来存储长度 - if(value.length != 0) { - size = size + 1 + value.length; - } else { - size = size + 1; // 处理空字符串,只计算长度1个字节 - } - break; - default: - size = size + value.length; - break; - } - } - } - return size; - } - - @Override - protected String getPacketInfo() { - return "MySQL Binary RowData Packet"; - } - -} diff --git a/src/main/java/io/mycat/server/packet/MySQLPacket.java b/src/main/java/io/mycat/server/packet/MySQLPacket.java deleted file mode 100644 index faa3daa64..000000000 --- a/src/main/java/io/mycat/server/packet/MySQLPacket.java +++ /dev/null @@ -1,210 +0,0 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.server.packet; - -import io.mycat.net.BufferArray; - -/** - * @author mycat - */ -public abstract class MySQLPacket { - public static int packetHeaderSize = 4; - /** - * none, this is an internal thread state - */ - public static final byte COM_SLEEP = 0; - - /** - * mysql_close - */ - public static final byte COM_QUIT = 1; - - /** - * mysql_select_db - */ - public static final byte COM_INIT_DB = 2; - - /** - * mysql_real_query - */ - public static final byte COM_QUERY = 3; - - /** - * mysql_list_fields - */ - public static final byte COM_FIELD_LIST = 4; - - /** - * mysql_create_db (deprecated) - */ - public static final byte COM_CREATE_DB = 5; - - /** - * mysql_drop_db (deprecated) - */ - public static final byte COM_DROP_DB = 6; - - /** - * mysql_refresh - */ - public static final byte COM_REFRESH = 7; - - /** - * mysql_shutdown - */ - public static final byte COM_SHUTDOWN = 8; - - /** - * mysql_stat - */ - public static final byte COM_STATISTICS = 9; - - /** - * mysql_list_processes - */ - public static final byte COM_PROCESS_INFO = 10; - - /** - * none, this is an internal thread state - */ - public static final byte COM_CONNECT = 11; - - /** - * mysql_kill - */ - public static final byte COM_PROCESS_KILL = 12; - - /** - * mysql_dump_debug_info - */ - public static final byte COM_DEBUG = 13; - - /** - * mysql_ping - */ - public static final byte COM_PING = 14; - - /** - * none, this is an internal thread state - */ - public static final byte COM_TIME = 15; - - /** - * none, this is an internal thread state - */ - public static final byte COM_DELAYED_INSERT = 16; - - /** - * mysql_change_user - */ - public static final byte COM_CHANGE_USER = 17; - - /** - * used by slave server mysqlbinlog - */ - public static final byte COM_BINLOG_DUMP = 18; - - /** - * used by slave server to get master table - */ - public static final byte COM_TABLE_DUMP = 19; - - /** - * used by slave to log connection to master - */ - public static final byte COM_CONNECT_OUT = 20; - - /** - * used by slave to register to master - */ - public static final byte COM_REGISTER_SLAVE = 21; - - /** - * mysql_stmt_prepare - */ - public static final byte COM_STMT_PREPARE = 22; - - /** - * mysql_stmt_execute - */ - public static final byte COM_STMT_EXECUTE = 23; - - /** - * mysql_stmt_send_long_data - */ - public static final byte COM_STMT_SEND_LONG_DATA = 24; - - /** - * mysql_stmt_close - */ - public static final byte COM_STMT_CLOSE = 25; - - /** - * mysql_stmt_reset - */ - public static final byte COM_STMT_RESET = 26; - - /** - * mysql_set_server_option - */ - public static final byte COM_SET_OPTION = 27; - - /** - * mysql_stmt_fetch - */ - public static final byte COM_STMT_FETCH = 28; - - /** - * Mycat heartbeat - */ - public static final byte COM_HEARTBEAT = 64; - - public int packetLength; - public byte packetId; - - /** - * 把数据包写到BufferArray中 - */ - public void write(BufferArray bufferArray) { - throw new UnsupportedOperationException(); - } - - /** - * 计算数据包大小,不包含包头长度。 - */ - public abstract int calcPacketSize(); - - /** - * 取得数据包信息 - */ - protected abstract String getPacketInfo(); - - @Override - public String toString() { - return new StringBuilder().append(getPacketInfo()).append("{length=") - .append(packetLength).append(",id=").append(packetId) - .append('}').toString(); - } - -} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/packet/ResultStatus.java b/src/main/java/io/mycat/server/packet/ResultStatus.java deleted file mode 100644 index 37388d344..000000000 --- a/src/main/java/io/mycat/server/packet/ResultStatus.java +++ /dev/null @@ -1,45 +0,0 @@ -package io.mycat.server.packet; - -import java.util.List; - -/** - * mysql sql result status. for example ,query header result query rowset resut - * ,ok result, - * - * @author wuzhih - * - */ -public class ResultStatus { - public static final int RESULT_STATUS_INIT = 0; - public static final int RESULT_STATUS_HEADER = 1; - public static final int RESULT_STATUS_FIELD_EOF = 2; - - private int resultStatus; - private byte[] header; - private List fields; - - public int getResultStatus() { - return resultStatus; - } - - public void setResultStatus(int resultStatus) { - this.resultStatus = resultStatus; - } - - public byte[] getHeader() { - return header; - } - - public void setHeader(byte[] header) { - this.header = header; - } - - public List getFields() { - return fields; - } - - public void setFields(List fields) { - this.fields = fields; - } - -} diff --git a/src/main/java/io/mycat/server/packet/util/CharsetUtil.java b/src/main/java/io/mycat/server/packet/util/CharsetUtil.java deleted file mode 100644 index 106678967..000000000 --- a/src/main/java/io/mycat/server/packet/util/CharsetUtil.java +++ /dev/null @@ -1,353 +0,0 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.server.packet.util; - -import io.mycat.MycatServer; -import io.mycat.backend.PhysicalDBPool; -import io.mycat.backend.PhysicalDatasource; -import io.mycat.server.config.node.DBHostConfig; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.Callable; - -import org.apache.commons.lang.StringUtils; -import org.apache.commons.lang.SystemUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.alibaba.fastjson.JSON; -import com.google.common.util.concurrent.ListenableFuture; - -/** - * 该类被彻底重构,fix 掉了原来的 collationIndex 和 charset 之间对应关系的兼容性问题, - * 比如 utf8mb4 对应的 collationIndex 有 45, 46 两个值,如果我们只配置一个 45或者46的话, - * 那么当mysqld(my.cnf配置文件)中的配置了:collation_server=utf8mb4_bin时,而我们却仅仅 - * 值配置45的话,那么就会报错:'java.lang.RuntimeException: Unknown charsetIndex:46' - * 如果没有配置 collation_server=utf8mb4_bin,那么collation_server就是使用的默认值,而我们却仅仅 - * 仅仅配置46,那么也会报错。所以应该要同时配置45,46两个值才是正确的。 - * 重构方法是,在 MycatServer.startup()方法在中,在 config.initDatasource(); 之前,加入 - * CharsetUtil.initCharsetAndCollation(config.getDataHosts()); - * 该方法,直接从mysqld的information_schema.collations表中获取 collationIndex 和 charset 之间对应关系, - * 因为是从mysqld服务器获取的,所以肯定不会出现以前的兼容性问题(不同版本的mysqld,collationIndex 和 charset 对应关系不一样)。 - * @author mycat - */ -public class CharsetUtil { - public static final Logger logger = LoggerFactory.getLogger(CharsetUtil.class); - - /** collationIndex 和 charsetName 的映射 */ - private static final Map INDEX_TO_CHARSET = new HashMap<>(); - - /** charsetName 到 默认collationIndex 的映射 */ - private static final Map CHARSET_TO_INDEX = new HashMap<>(); - - /** collationName 到 CharsetCollation 对象的映射 */ - private static final Map COLLATION_TO_CHARSETCOLLATION = new HashMap<>(); - - /** - * 异步 初始化 charset 和 collation(根据 mycat.xml文件中的 dataHosts 去mysqld读取 charset 和 collation 的映射关系) - * 使用异步时,应该改用 ConcurrentHashMap - * @param charsetConfigMap mycat.xml文件中 charset-config 元素指定的 collationIndex --> charsetName - */ - public static void asynLoad(Map dataHosts, Map charsetConfigMap){ - MycatServer.getInstance().getListeningExecutorService().execute(new Runnable() { - public void run() { - CharsetUtil.load(dataHosts, charsetConfigMap); - } - }); - } - - /** - * 同步 初始化 charset 和 collation(根据 mycat.xml文件中的 dataHosts 去mysqld读取 charset 和 collation 的映射关系) - * @param charsetConfigMap mycat.xml文件中 charset-config 元素指定的 collationIndex 和 charsetName 映射 - */ - public static void load(Map dataHosts, Map charsetConfigMap){ - try { - if(dataHosts != null && dataHosts.size() > 0) - CharsetUtil.initCharsetAndCollation(dataHosts); // 去mysqld读取 charset 和 collation 的映射关系 - else - logger.debug("param dataHosts is null"); - - // 加载配置文件中的 指定的 collationIndex --> charsetName - for (String index : charsetConfigMap.keySet()){ - int collationIndex = Integer.parseInt(index); - String charsetName = INDEX_TO_CHARSET.get(collationIndex); - if(StringUtils.isNotBlank(charsetName)){ - INDEX_TO_CHARSET.put(collationIndex, charsetName); - CHARSET_TO_INDEX.put(charsetName, collationIndex); - } - logger.debug("load charset and collation from mycat.xml."); - } - } catch (Exception e) { - logger.error(e.getMessage()); - } - } - - /** - *
-     * 根据 dataHosts 去mysqld读取 charset 和 collation 的映射关系:
-     * mysql> SELECT ID,CHARACTER_SET_NAME,COLLATION_NAME,IS_DEFAULT FROM INFORMATION_SCHEMA.COLLATIONS;
-	 * +-----+--------------------+--------------------------+------------+
-	 * | ID  | CHARACTER_SET_NAME | COLLATION_NAME           | IS_DEFAULT |
-	 * +-----+--------------------+--------------------------+------------+
-	 * |   1 | big5               | big5_chinese_ci          | Yes        |
-	 * |  84 | big5               | big5_bin                 |            |
-	 * |   3 | dec8               | dec8_swedish_ci          | Yes        |
-	 * |  69 | dec8               | dec8_bin                 |            |
-	 *
- */ - private static void initCharsetAndCollation(Map dataHosts){ - if(COLLATION_TO_CHARSETCOLLATION.size() > 0){ // 已经初始化 - logger.debug(" charset and collation has already init ..."); - return; - } - - // 先利用mycat.xml配置文件 中的 heartbeat(该配置一般是存在的)的连接信息来获得CharsetCollation,避免后面的遍历; - // 如果没有成功,则遍历mycat.xml配置文件 中的所有dataHost元素,来获得CharsetCollation; - DBHostConfig dBHostconfig = getConfigByDataHostName(dataHosts, "jdbchost"); - if(dBHostconfig != null){ - if(getCharsetCollationFromMysql(dBHostconfig)){ - logger.debug(" init charset and collation success..."); - return; - } - } - - // 遍历 配置文件 mycat.xml 中的 dataHost 元素,直到可以成功连上mysqld,并且获取 charset 和 collation 信息 - for(String key : dataHosts.keySet()){ - PhysicalDBPool pool = dataHosts.get(key); - if(pool != null && pool.getSource() != null){ - PhysicalDatasource ds = pool.getSource(); - if(ds != null && ds.getConfig() != null - && "mysql".equalsIgnoreCase(ds.getConfig().getDbType())){ - DBHostConfig config = ds.getConfig(); - while(!getCharsetCollationFromMysql(config)){ - getCharsetCollationFromMysql(config); - } - logger.debug(" init charset and collation success..."); - return; // 结束外层 for 循环 - } - } - } - logger.error(" init charset and collation from mysqld failed, please check datahost in mycat.xml."+ - SystemUtils.LINE_SEPARATOR + - " if your backend database is not mysqld, please ignore this message."); - } - - public static DBHostConfig getConfigByDataHostName(Map dataHosts, String hostName){ - PhysicalDBPool pool = dataHosts.get(hostName); - if(pool != null && pool.getSource() != null){ - PhysicalDatasource ds = pool.getSource(); - return ds.getConfig(); - } - return null; - } - - public static final String getCharset(int index) { - return INDEX_TO_CHARSET.get(index); - } - - /** - * 因为 每一个 charset 对应多个 collationIndex, 所以这里返回的是默认的那个 collationIndex; - * 如果想获得确定的值 index,而非默认的index, 那么需要使用 getIndexByCollationName - * 或者 getIndexByCharsetNameAndCollationName - * @param charset - * @return - */ - public static final int getIndex(String charset) { - if (StringUtils.isBlank(charset)) { - return 0; - } else { - Integer i = CHARSET_TO_INDEX.get(charset.toLowerCase()); - if(i == null && "Cp1252".equalsIgnoreCase(charset) ) - charset = "latin1"; // 参见:http://www.cp1252.com/ The windows 1252 codepage, also called Latin 1 - - i = CHARSET_TO_INDEX.get(charset.toLowerCase()); - return (i == null) ? 0 : i; - } - } - - /** - * 根据 collationName 和 charset 返回 collationIndex - * @param charset - * @param collationName - * @return - */ - public static final int getIndexByCharsetNameAndCollationName(String charset, String collationName) { - if (StringUtils.isBlank(collationName)) { - return 0; - } else { - CharsetCollation cc = COLLATION_TO_CHARSETCOLLATION.get(collationName.toLowerCase()); - if(cc != null && charset != null && charset.equalsIgnoreCase(cc.getCharsetName())) - return cc.getCollationIndex(); - else - return 0; - } - } - - /** - * 根据 collationName 返回 collationIndex, 二者是一一对应的关系 - * @param collationName - * @return - */ - public static final int getIndexByCollationName(String collationName) { - if (StringUtils.isBlank(collationName)) { - return 0; - } else { - CharsetCollation cc = COLLATION_TO_CHARSETCOLLATION.get(collationName.toLowerCase()); - if(cc != null) - return cc.getCollationIndex(); - else - return 0; - } - } - - private static boolean getCharsetCollationFromMysql(DBHostConfig config){ - String sql = "SELECT ID,CHARACTER_SET_NAME,COLLATION_NAME,IS_DEFAULT FROM INFORMATION_SCHEMA.COLLATIONS"; - try(Connection conn = getConnection(config)){ - if(conn == null) return false; - - try(Statement statement = conn.createStatement()){ - ResultSet rs = statement.executeQuery(sql); - while(rs != null && rs.next()){ - int collationIndex = new Long(rs.getLong(1)).intValue(); - String charsetName = rs.getString(2); - String collationName = rs.getString(3); - boolean isDefaultCollation = (rs.getString(4) != null - && "Yes".equalsIgnoreCase(rs.getString(4))) ? true : false; - - INDEX_TO_CHARSET.put(collationIndex, charsetName); - if(isDefaultCollation){ // 每一个 charsetName 对应多个collationIndex,此处选择默认的collationIndex - CHARSET_TO_INDEX.put(charsetName, collationIndex); - } - - CharsetCollation cc = new CharsetCollation(charsetName, collationIndex, collationName, isDefaultCollation); - COLLATION_TO_CHARSETCOLLATION.put(collationName, cc); - } - if(COLLATION_TO_CHARSETCOLLATION.size() > 0) - return true; - return false; - } catch (SQLException e) { - logger.warn(e.getMessage()); - } - - } catch (SQLException e) { - logger.warn(e.getMessage()); - } - return false; - } - - - /** - * 利用参数 DBHostConfig cfg 获得物理数据库的连接(java.sql.Connection) - * 在mysqld刚启动马上启动mycat-server,该函数执行很慢。 - * 但是又不能又不能使用mysql协议来获得所要的数据,因为mysql协议中mysqld在第一次发来的handshake - * 就指定了 "serverCharsetIndex":46,在登录之前,我们无法修改connection的字符,必须使用 serverCharsetIndex - * 指定的字符编码完成 handshake 和登录: - * {"packetId":0,"packetLength":78,"protocolVersion":10,"restOfScrambleBuff":"OihYY2tvakVadV5Y", - * "seed":"YiJ+eWVsb2c=","serverCapabilities":63487, - * "serverCharsetIndex":46,"serverStatus":2,"serverVersion":"NS42LjI3LWxvZw==","threadId":65} - * 所以我们无法使用 mysql协议从mysqld获得字符信息,而JDBC协议中可以在url中指定字符集。所以只能用JDBC来获得字符信息。 - * @param cfg - * @return - * @throws SQLException - */ - public static Connection getConnection(DBHostConfig cfg){ - if(cfg == null) return null; - - String url = new StringBuffer("jdbc:mysql://").append(cfg.getUrl()) - .append("/mysql").append("?characterEncoding=UTF-8").toString(); - Connection connection = null; - long millisecondsEnd2 = System.currentTimeMillis(); - try { - Class.forName("com.mysql.jdbc.Driver"); - connection = DriverManager.getConnection(url, cfg.getUser(), cfg.getPassword()); - } catch (ClassNotFoundException | SQLException e) { - if(e instanceof ClassNotFoundException) - logger.error(e.getMessage()); - else - logger.warn(e.getMessage() + " " + JSON.toJSONString(cfg)); - } - long millisecondsEnd = System.currentTimeMillis(); - logger.debug(" function getConnection cost milliseconds: " + (millisecondsEnd - millisecondsEnd2)); - return connection; - } -} - -/** - * 该类用来表示 mysqld 数据库中 字符集、字符集支持的collation、字符集的collation的index、 字符集的默认collation 的对应关系: - * 一个字符集一般对应(支持)多个collation,其中一个是默认的 collation,每一个 collation对应一个唯一的index, - * collationName 和 collationIndex 一一对应, 每一个collationIndex对应到一个字符集,不同的collationIndex可以对应到相同的字符集, - * 所以字符集 到 collationIndex 的对应不是唯一的,一个字符集对应多个 index(有一个默认的 collation的index), - * 而 collationIndex 到 字符集 的对应是确定的,唯一的; - * mysqld 用 collation 的 index 来描述排序规则。 - * @author Administrator - * - */ -class CharsetCollation { - // mysqld支持的字符编码名称,注意这里不是java中的unicode编码的名字, - // 二者之间的区别和联系可以参考驱动jar包中的com.mysql.jdbc.CharsetMapping源码 - private String charsetName; - private int collationIndex; // collation的索引顺序 - private String collationName; // collation 名称 - private boolean isDefaultCollation = false; // 该collation是否是字符集的默认collation - - public CharsetCollation(String charsetName, int collationIndex, - String collationName, boolean isDefaultCollation){ - this.charsetName = charsetName; - this.collationIndex = collationIndex; - this.collationName = collationName; - this.isDefaultCollation = isDefaultCollation; - } - - public String getCharsetName() { - return charsetName; - } - public void setCharsetName(String charsetName) { - this.charsetName = charsetName; - } - public int getCollationIndex() { - return collationIndex; - } - public void setCollationIndex(int collationIndex) { - this.collationIndex = collationIndex; - } - public String getCollationName() { - return collationName; - } - public void setCollationName(String collationName) { - this.collationName = collationName; - } - public boolean isDefaultCollation() { - return isDefaultCollation; - } - public void setDefaultCollation(boolean isDefaultCollation) { - this.isDefaultCollation = isDefaultCollation; - } -} - diff --git a/src/main/java/io/mycat/server/packet/util/LoadDataUtil.java b/src/main/java/io/mycat/server/packet/util/LoadDataUtil.java deleted file mode 100644 index b4e142135..000000000 --- a/src/main/java/io/mycat/server/packet/util/LoadDataUtil.java +++ /dev/null @@ -1,106 +0,0 @@ -package io.mycat.server.packet.util; - -import io.mycat.MycatServer; -import io.mycat.backend.nio.MySQLBackendConnection; -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.route.RouteResultsetNode; -import io.mycat.server.packet.BinaryPacket; -import io.mycat.sqlengine.mpp.LoadData; - -import java.io.BufferedInputStream; -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.util.List; - -/** - * Created by nange on 2015/3/31. - */ -public class LoadDataUtil { - public static void requestFileDataResponse(byte[] data, - MySQLBackendConnection conn) { - - byte packId = data[3]; - MySQLBackendConnection backendAIOConnection = (MySQLBackendConnection) conn; - RouteResultsetNode rrn = (RouteResultsetNode) conn.getAttachment(); - LoadData loadData = rrn.getLoadData(); - List loadDataData = loadData.getData(); - try { - if (loadDataData != null && loadDataData.size() > 0) { - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - for (int i = 0, loadDataDataSize = loadDataData.size(); i < loadDataDataSize; i++) { - String line = loadDataData.get(i); - - String s = (i == loadDataDataSize - 1) ? line : line - + loadData.getLineTerminatedBy(); - byte[] bytes = s.getBytes(loadData.getCharset()); - bos.write(bytes); - - } - - packId = writeToBackConnection(packId, - new ByteArrayInputStream(bos.toByteArray()), - backendAIOConnection); - - } else { - // 从文件读取 - packId = writeToBackConnection(packId, new BufferedInputStream( - new FileInputStream(loadData.getFileName())), - backendAIOConnection); - - } - } catch (IOException e) { - - throw new RuntimeException(e); - } finally { - // 结束必须发空包 - byte[] empty = new byte[] { 0, 0, 0, 3 }; - empty[3] = ++packId; - backendAIOConnection.write(empty); - } - - } - - public static byte writeToBackConnection(byte packID, - InputStream inputStream, MySQLBackendConnection backendAIOConnection) - throws IOException { - try { - int packSize = MycatServer.getInstance().getConfig().getSystem() - .getProcessorBufferChunk() - 5; - // int packSize = backendAIOConnection.getMaxPacketSize() / 32; - // int packSize=65530; - byte[] buffer = new byte[packSize]; - int len = -1; - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); - while ((len = inputStream.read(buffer)) != -1) { - byte[] temp = null; - if (len == packSize) { - temp = buffer; - } else { - temp = new byte[len]; - System.arraycopy(buffer, 0, temp, 0, len); - } - BinaryPacket packet = new BinaryPacket(); - packet.packetId = ++packID; - packet.data = temp; - packet.write(bufferArray); - if (bufferArray.getBlockCount() == 5) { - backendAIOConnection.write(bufferArray); - bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); - } - - } - //write last - backendAIOConnection.write(bufferArray); - } finally { - inputStream.close(); - } - - return packID; - } -} diff --git a/src/main/java/io/mycat/server/parser/ServerParse.java b/src/main/java/io/mycat/server/parser/ServerParse.java index e06fd304d..394e1a237 100644 --- a/src/main/java/io/mycat/server/parser/ServerParse.java +++ b/src/main/java/io/mycat/server/parser/ServerParse.java @@ -23,11 +23,11 @@ */ package io.mycat.server.parser; -import io.mycat.util.ParseUtil; - import java.util.regex.Matcher; import java.util.regex.Pattern; +import io.mycat.route.parser.util.ParseUtil; + /** * @author mycat */ @@ -49,19 +49,28 @@ public final class ServerParse { public static final int SAVEPOINT = 13; public static final int USE = 14; public static final int EXPLAIN = 15; + public static final int EXPLAIN2 = 151; public static final int KILL_QUERY = 16; public static final int HELP = 17; public static final int MYSQL_CMD_COMMENT = 18; public static final int MYSQL_COMMENT = 19; public static final int CALL = 20; public static final int DESCRIBE = 21; + public static final int LOCK = 22; + public static final int UNLOCK = 23; public static final int LOAD_DATA_INFILE_SQL = 99; public static final int DDL = 100; + + + public static final int MIGRATE = 203; private static final Pattern pattern = Pattern.compile("(load)+\\s+(data)+\\s+\\w*\\s*(infile)+",Pattern.CASE_INSENSITIVE); + private static final Pattern callPattern = Pattern.compile("\\w*\\;\\s*\\s*(call)+\\s+\\w*\\s*",Pattern.CASE_INSENSITIVE); - public static int parse(String stmt) { - int lenth = stmt.length(); - for (int i = 0; i < lenth; ++i) { + public static int parse(String stmt) { + int length = stmt.length(); + //FIX BUG FOR SQL SUCH AS /XXXX/SQL + int rt = -1; + for (int i = 0; i < length; ++i) { switch (stmt.charAt(i)) { case ' ': case '\t': @@ -71,65 +80,123 @@ public static int parse(String stmt) { case '/': // such as /*!40101 SET character_set_client = @saved_cs_client // */; - if (i == 0 && stmt.charAt(1) == '*' && stmt.charAt(2) == '!' - && stmt.charAt(lenth - 2) == '*' - && stmt.charAt(lenth - 1) == '/') { + if (i == 0 && stmt.charAt(1) == '*' && stmt.charAt(2) == '!' && stmt.charAt(length - 2) == '*' + && stmt.charAt(length - 1) == '/') { return MYSQL_CMD_COMMENT; } case '#': i = ParseUtil.comment(stmt, i); - if (i + 1 == lenth) { + if (i + 1 == length) { return MYSQL_COMMENT; } continue; case 'A': case 'a': - return aCheck(stmt,i); + rt = aCheck(stmt, i); + if (rt != OTHER) { + return rt; + } + continue; case 'B': case 'b': - return beginCheck(stmt, i); + rt = beginCheck(stmt, i); + if (rt != OTHER) { + return rt; + } + continue; case 'C': case 'c': - return commitOrCallCheckOrCreate(stmt, i); + rt = commitOrCallCheckOrCreate(stmt, i); + if (rt != OTHER) { + return rt; + } + continue; case 'D': case 'd': - return deleteOrdCheck(stmt, i); + rt = deleteOrdCheck(stmt, i); + if (rt != OTHER) { + return rt; + } + continue; case 'E': case 'e': - return explainCheck(stmt, i); + rt = explainCheck(stmt, i); + if (rt != OTHER) { + return rt; + } + continue; case 'I': case 'i': - return insertCheck(stmt, i); + rt = insertCheck(stmt, i); + if (rt != OTHER) { + return rt; + } + continue; + case 'M': + case 'm': + rt = migrateCheck(stmt, i); + if (rt != OTHER) { + return rt; + } + continue; case 'R': case 'r': - return rCheck(stmt, i); + rt = rCheck(stmt, i); + if (rt != OTHER) { + return rt; + } + continue; case 'S': case 's': - return sCheck(stmt, i); + rt = sCheck(stmt, i); + if (rt != OTHER) { + return rt; + } + continue; case 'T': case 't': - return tCheck(stmt, i); + rt = tCheck(stmt, i); + if (rt != OTHER) { + return rt; + } + continue; case 'U': case 'u': - return uCheck(stmt, i); + rt = uCheck(stmt, i); + if (rt != OTHER) { + return rt; + } + continue; case 'K': case 'k': - return killCheck(stmt, i); + rt = killCheck(stmt, i); + if (rt != OTHER) { + return rt; + } + continue; case 'H': case 'h': - return helpCheck(stmt, i); - case 'L': - case 'l': - return loadDataCheck(stmt, i); + rt = helpCheck(stmt, i); + if (rt != OTHER) { + return rt; + } + continue; + case 'L': + case 'l': + rt = lCheck(stmt, i); + if (rt != OTHER) { + return rt; + } + continue; default: - return OTHER; + continue; } } return OTHER; } - static int loadDataCheck(String stmt, int offset) { + static int lCheck(String stmt, int offset) { if (stmt.length() > offset + 3) { char c1 = stmt.charAt(++offset); char c2 = stmt.charAt(++offset); @@ -138,13 +205,133 @@ static int loadDataCheck(String stmt, int offset) { && (c3 == 'D' || c3 == 'd')) { Matcher matcher = pattern.matcher(stmt); return matcher.find() ? LOAD_DATA_INFILE_SQL : OTHER; + } else if ((c1 == 'O' || c1 == 'o') && (c2 == 'C' || c2 == 'c') + && (c3 == 'K' || c3 == 'k')){ + return LOCK; } } return OTHER; } + private static int migrateCheck(String stmt, int offset) { + if (stmt.length() > offset + 7) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + char c6 = stmt.charAt(++offset); + + if ((c1 == 'i' || c1 == 'I') + && (c2 == 'g' || c2 == 'G') + && (c3 == 'r' || c3 == 'R') + && (c4 == 'a' || c4 == 'A') + && (c5 == 't' || c5 == 'T') + && (c6 == 'e' || c6 == 'E')) + { + return MIGRATE; + } + } + return OTHER; + } + //truncate + private static int tCheck(String stmt, int offset) { + if (stmt.length() > offset + 7) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + char c6 = stmt.charAt(++offset); + char c7 = stmt.charAt(++offset); + char c8 = stmt.charAt(++offset); + + if ((c1 == 'R' || c1 == 'r') + && (c2 == 'U' || c2 == 'u') + && (c3 == 'N' || c3 == 'n') + && (c4 == 'C' || c4 == 'c') + && (c5 == 'A' || c5 == 'a') + && (c6 == 'T' || c6 == 't') + && (c7 == 'E' || c7 == 'e') + && (c8 == ' ' || c8 == '\t' || c8 == '\r' || c8 == '\n')) { + return DDL; + } + } + return OTHER; + } + //alter table/view/... + private static int aCheck(String stmt, int offset) { + if (stmt.length() > offset + 4) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + if ((c1 == 'L' || c1 == 'l') + && (c2 == 'T' || c2 == 't') + && (c3 == 'E' || c3 == 'e') + && (c4 == 'R' || c4 == 'r') + && (c5 == ' ' || c5 == '\t' || c5 == '\r' || c5 == '\n')) { + return DDL; + } + } + return OTHER; + } + //create table/view/... + private static int createCheck(String stmt, int offset) { + if (stmt.length() > offset + 5) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + char c6 = stmt.charAt(++offset); + if ((c1 == 'R' || c1 == 'r') + && (c2 == 'E' || c2 == 'e') + && (c3 == 'A' || c3 == 'a') + && (c4 == 'T' || c4 == 't') + && (c5 == 'E' || c5 == 'e') + && (c6 == ' ' || c6 == '\t' || c6 == '\r' || c6 == '\n')) { + return DDL; + } + } + return OTHER; + } + //drop + private static int dropCheck(String stmt, int offset) { + if (stmt.length() > offset + 3) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + if ((c1 == 'R' || c1 == 'r') + && (c2 == 'O' || c2 == 'o') + && (c3 == 'P' || c3 == 'p') + && (c4 == ' ' || c4 == '\t' || c4 == '\r' || c4 == '\n')) { + return DDL; + } + } + return OTHER; + } + // delete or drop + static int deleteOrdCheck(String stmt, int offset){ + int sqlType = OTHER; + switch (stmt.charAt((offset + 1))) { + case 'E': + case 'e': + sqlType = dCheck(stmt, offset); + break; + case 'R': + case 'r': + sqlType = dropCheck(stmt, offset); + break; + default: + sqlType = OTHER; + } + return sqlType; + } // HELP' ' static int helpCheck(String stmt, int offset) { if (stmt.length() > offset + "ELP ".length()) { @@ -161,6 +348,7 @@ static int helpCheck(String stmt, int offset) { // EXPLAIN' ' static int explainCheck(String stmt, int offset) { + if (stmt.length() > offset + "XPLAIN ".length()) { char c1 = stmt.charAt(++offset); char c2 = stmt.charAt(++offset); @@ -176,6 +364,9 @@ static int explainCheck(String stmt, int offset) { return (offset << 8) | EXPLAIN; } } + if(stmt != null && stmt.toLowerCase().startsWith("explain2")){ + return (offset << 8) | EXPLAIN2; + } return OTHER; } @@ -255,49 +446,7 @@ static int beginCheck(String stmt, int offset) { } return OTHER; } - //truncate - private static int tCheck(String stmt, int offset) { - if (stmt.length() > offset + 7) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - char c5 = stmt.charAt(++offset); - char c6 = stmt.charAt(++offset); - char c7 = stmt.charAt(++offset); - char c8 = stmt.charAt(++offset); - if ((c1 == 'R' || c1 == 'r') - && (c2 == 'U' || c2 == 'u') - && (c3 == 'N' || c3 == 'n') - && (c4 == 'C' || c4 == 'c') - && (c5 == 'A' || c5 == 'a') - && (c6 == 't' || c6 == 't') - && (c7 == 'E' || c7 == 'e') - && (c8 == ' ' || c8 == '\t' || c8 == '\r' || c8 == '\n')) { - return DDL; - } - } - return OTHER; - } - //alter table/view/... - private static int aCheck(String stmt, int offset) { - if (stmt.length() > offset + 4) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - char c5 = stmt.charAt(++offset); - if ((c1 == 'L' || c1 == 'l') - && (c2 == 'T' || c2 == 't') - && (c3 == 'E' || c3 == 'e') - && (c4 == 'R' || c4 == 'r') - && (c5 == ' ' || c5 == '\t' || c5 == '\r' || c5 == '\n')) { - return DDL; - } - } - return OTHER; - } // COMMIT static int commitCheck(String stmt, int offset) { if (stmt.length() > offset + 5) { @@ -356,59 +505,6 @@ static int commitOrCallCheckOrCreate(String stmt, int offset) { return sqlType; } - //create table/view/... - private static int createCheck(String stmt, int offset) { - if (stmt.length() > offset + 5) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - char c5 = stmt.charAt(++offset); - char c6 = stmt.charAt(++offset); - if ((c1 == 'R' || c1 == 'r') - && (c2 == 'E' || c2 == 'e') - && (c3 == 'A' || c3 == 'a') - && (c4 == 'T' || c4 == 't') - && (c5 == 'E' || c5 == 'e') - && (c6 == ' ' || c6 == '\t' || c6 == '\r' || c6 == '\n')) { - return DDL; - } - } - return OTHER; - } - //drop - private static int dropCheck(String stmt, int offset) { - if (stmt.length() > offset + 3) { - char c1 = stmt.charAt(++offset); - char c2 = stmt.charAt(++offset); - char c3 = stmt.charAt(++offset); - char c4 = stmt.charAt(++offset); - if ((c1 == 'R' || c1 == 'r') - && (c2 == 'O' || c2 == 'o') - && (c3 == 'P' || c3 == 'p') - && (c4 == ' ' || c4 == '\t' || c4 == '\r' || c4 == '\n')) { - return DDL; - } - } - return OTHER; - } - // delete or drop - static int deleteOrdCheck(String stmt, int offset){ - int sqlType = OTHER; - switch (stmt.charAt((offset + 1))) { - case 'E': - case 'e': - sqlType = dCheck(stmt, offset); - break; - case 'R': - case 'r': - sqlType = dropCheck(stmt, offset); - break; - default: - sqlType = OTHER; - } - return sqlType; - } // DESCRIBE or desc or DELETE' ' static int dCheck(String stmt, int offset) { if (stmt.length() > offset + 4) { @@ -596,6 +692,18 @@ static int seCheck(String stmt, int offset) { case 'T': case 't': if (stmt.length() > ++offset) { +//支持一下语句 +// /*!mycat: sql=SELECT * FROM test where id=99 */set @pin=1; +// call p_test(@pin,@pout); +// select @pout; + if(stmt.startsWith("/*!mycat:")||stmt.startsWith("/*#mycat:")||stmt.startsWith("/*mycat:")) + { + Matcher matcher = callPattern.matcher(stmt); + if (matcher.find()) { + return CALL; + } + } + char c = stmt.charAt(offset); if (c == ' ' || c == '\r' || c == '\n' || c == '\t' || c == '/' || c == '#') { @@ -690,6 +798,23 @@ static int uCheck(String stmt, int offset) { } } break; + case 'N': + case 'n': + if (stmt.length() > offset + 5) { + char c1 = stmt.charAt(++offset); + char c2 = stmt.charAt(++offset); + char c3 = stmt.charAt(++offset); + char c4 = stmt.charAt(++offset); + char c5 = stmt.charAt(++offset); + if ((c1 == 'L' || c1 == 'l') + && (c2 == 'O' || c2 == 'o') + && (c3 == 'C' || c3 == 'c') + && (c4 == 'K' || c4 == 'k') + && (c5 == ' ' || c5 == '\t' || c5 == '\r' || c5 == '\n')) { + return UNLOCK; + } + } + break; default: return OTHER; } @@ -697,4 +822,4 @@ static int uCheck(String stmt, int offset) { return OTHER; } -} \ No newline at end of file +} diff --git a/src/main/java/io/mycat/server/parser/ServerParseSelect.java b/src/main/java/io/mycat/server/parser/ServerParseSelect.java index 6d7b7724a..fb8276a01 100644 --- a/src/main/java/io/mycat/server/parser/ServerParseSelect.java +++ b/src/main/java/io/mycat/server/parser/ServerParseSelect.java @@ -23,8 +23,8 @@ */ package io.mycat.server.parser; -import io.mycat.util.CharTypes; -import io.mycat.util.ParseUtil; +import io.mycat.route.parser.util.CharTypes; +import io.mycat.route.parser.util.ParseUtil; /** * @author mycat @@ -40,8 +40,11 @@ public final class ServerParseSelect { public static final int VERSION = 6; public static final int SESSION_INCREMENT = 7; public static final int SESSION_ISOLATION = 8; + public static final int SELECT_VAR_ALL = 9; + public static final int SESSION_TX_READ_ONLY = 10; + private static final char[] _VERSION_COMMENT = "VERSION_COMMENT" .toCharArray(); private static final char[] _IDENTITY = "IDENTITY".toCharArray(); @@ -96,13 +99,18 @@ private static int sessionVarCheck(String stmt, int offset) { if (s.startsWith("session.auto_increment_increment")) { if(s.contains("@@")) { - return SELECT_VAR_ALL; + return SELECT_VAR_ALL; } return SESSION_INCREMENT; } else if (s .startsWith("session.tx_isolation")) { return SESSION_ISOLATION; - } else { + } + else if (s + .startsWith("session.tx_read_only")) { + return SESSION_TX_READ_ONLY; + } + else { return OTHER; } } @@ -175,8 +183,9 @@ private static int versionParenthesisCheck(String stmt, int offset) { */ private static int skipAlias(String stmt, int offset) { offset = ParseUtil.move(stmt, offset, 0); - if (offset >= stmt.length()) + if (offset >= stmt.length()) { return offset; + } switch (stmt.charAt(offset)) { case '\'': return skipString(stmt, offset); @@ -187,8 +196,8 @@ private static int skipAlias(String stmt, int offset) { default: if (CharTypes.isIdentifierChar(stmt.charAt(offset))) { for (; offset < stmt.length() - && CharTypes.isIdentifierChar(stmt.charAt(offset)); ++offset) - ; + && CharTypes.isIdentifierChar(stmt.charAt(offset)); ++offset) { + } return offset; } } @@ -205,10 +214,9 @@ private static int skipAlias(String stmt, int offset) { */ private static int skipIdentifierEscape(String stmt, int offset) { for (++offset; offset < stmt.length(); ++offset) { - if (stmt.charAt(offset) == '`') { - if (++offset >= stmt.length() || stmt.charAt(offset) != '`') { + if (stmt.charAt(offset) == '`' + && (++offset >= stmt.length() || stmt.charAt(offset) != '`')) { return offset; - } } } return -1; @@ -331,8 +339,8 @@ public static int skipAs(String stmt, int offset) { * LAST_INSERT_ID */ public static int indexAfterLastInsertIdFunc(String stmt, int offset) { - if (stmt.length() >= offset + "LAST_INSERT_ID()".length()) { - if (ParseUtil.compare(stmt, offset, _LAST_INSERT_ID)) { + if (stmt.length() >= offset + "LAST_INSERT_ID()".length() + && ParseUtil.compare(stmt, offset, _LAST_INSERT_ID)) { offset = ParseUtil.move(stmt, offset + _LAST_INSERT_ID.length, 0); if (offset + 1 < stmt.length() && stmt.charAt(offset) == '(') { @@ -341,7 +349,6 @@ public static int indexAfterLastInsertIdFunc(String stmt, int offset) { return ++offset; } } - } } return -1; } @@ -431,8 +438,8 @@ static int identityCheck(String stmt, int offset) { } static int select2Check(String stmt, int offset) { - if (stmt.length() > ++offset && stmt.charAt(offset) == '@') { - if (stmt.length() > ++offset) { + if (stmt.length() > ++offset && stmt.charAt(offset) == '@' + && stmt.length() > ++offset) { switch (stmt.charAt(offset)) { case 'V': case 'v': @@ -446,7 +453,6 @@ static int select2Check(String stmt, int offset) { default: return OTHER; } - } } return OTHER; } @@ -495,13 +501,12 @@ static int userCheck(String stmt, int offset) { */ static int currentUserCheck(String stmt, int offset) { int length = offset + _CURRENT_USER.length; - if (stmt.length() >= length) { - if (ParseUtil.compare(stmt, offset, _CURRENT_USER)) { + if (stmt.length() >= length + && ParseUtil.compare(stmt, offset, _CURRENT_USER)) { if (stmt.length() > length && stmt.charAt(length) != ' ') { return OTHER; } return USER; - } } return OTHER; } @@ -522,4 +527,4 @@ static int versionCommentCheck(String stmt, int offset) { return OTHER; } -} \ No newline at end of file +} diff --git a/src/main/java/io/mycat/server/parser/ServerParseSet.java b/src/main/java/io/mycat/server/parser/ServerParseSet.java index 0612e65ec..65e93063a 100644 --- a/src/main/java/io/mycat/server/parser/ServerParseSet.java +++ b/src/main/java/io/mycat/server/parser/ServerParseSet.java @@ -23,7 +23,7 @@ */ package io.mycat.server.parser; -import io.mycat.util.ParseUtil; +import io.mycat.route.parser.util.ParseUtil; /** * @author mycat @@ -86,18 +86,27 @@ public static int parse(String stmt, int offset) { private static int xaFlag(String stmt, int offset) { if (stmt.length() > offset + 1) { char c1 = stmt.charAt(++offset); - char c2=stmt.charAt(++offset); - if ((c1 == 'A' || c1 == 'a') - &&( c2== ' '||c2=='=')) { - int value = autocommitValue(stmt, offset); - if (value == AUTOCOMMIT_ON) { - return XA_FLAG_ON; - } else if (value == AUTOCOMMIT_OFF) { - return XA_FLAG_OFF; - } else { - return OTHER; + if ((c1 == 'A' || c1 == 'a')) { + while (stmt.length() >= ++ offset) { + switch (stmt.charAt(offset)) { + case ' ': + case '\r': + case '\n': + case '\t': + continue; + case '=': + int value = autocommitValue(stmt, offset); + if (value == AUTOCOMMIT_ON) { + return XA_FLAG_ON; + } else if (value == AUTOCOMMIT_OFF) { + return XA_FLAG_OFF; + } else { + return OTHER; + } + default: + return OTHER; + } } - } } return OTHER; @@ -422,8 +431,8 @@ private static int session(String stmt, int offset) { && (c3 == 'S' || c3 == 's') && (c4 == 'I' || c4 == 'i') && (c5 == 'O' || c5 == 'o') && (c6 == 'N' || c6 == 'n') && stmt.charAt(++offset) == ' ') { - for (;;) { - if (stmt.length() > ++offset) { +// for (;;) { + while (stmt.length() > ++offset) { switch (stmt.charAt(offset)) { case ' ': case '\r': @@ -437,8 +446,8 @@ private static int session(String stmt, int offset) { return OTHER; } } - return OTHER; - } +// return OTHER; +// } } } return OTHER; @@ -463,8 +472,8 @@ private static int transaction(String stmt, int offset) { && (c7 == 'T' || c7 == 't') && (c8 == 'I' || c8 == 'i') && (c9 == 'O' || c9 == 'o') && (c10 == 'N' || c10 == 'n') && stmt.charAt(++offset) == ' ') { - for (;;) { - if (stmt.length() > ++offset) { +// for (;;) { + while (stmt.length() > ++offset) { switch (stmt.charAt(offset)) { case ' ': case '\r': @@ -478,8 +487,8 @@ private static int transaction(String stmt, int offset) { return OTHER; } } - return OTHER; - } +// return OTHER; +// } } } return OTHER; @@ -501,8 +510,8 @@ private static int isolation(String stmt, int offset) { && (c5 == 'T' || c5 == 't') && (c6 == 'I' || c6 == 'i') && (c7 == 'O' || c7 == 'o') && (c8 == 'N' || c8 == 'n') && stmt.charAt(++offset) == ' ') { - for (;;) { - if (stmt.length() > ++offset) { +// for (;;) { + while (stmt.length() > ++offset) { switch (stmt.charAt(offset)) { case ' ': case '\r': @@ -516,8 +525,8 @@ private static int isolation(String stmt, int offset) { return OTHER; } } - return OTHER; - } +// return OTHER; +// } } } return OTHER; @@ -533,8 +542,8 @@ private static int level(String stmt, int offset) { if ((c1 == 'E' || c1 == 'e') && (c2 == 'V' || c2 == 'v') && (c3 == 'E' || c3 == 'e') && (c4 == 'L' || c4 == 'l') && stmt.charAt(++offset) == ' ') { - for (;;) { - if (stmt.length() > ++offset) { +// for (;;) { + while (stmt.length() > ++offset) { switch (stmt.charAt(offset)) { case ' ': case '\r': @@ -551,8 +560,8 @@ private static int level(String stmt, int offset) { return OTHER; } } - return OTHER; - } +// return OTHER; +// } } } return OTHER; @@ -627,8 +636,8 @@ private static int aCheck(String stmt, int offset) { if (stmt.length() > offset + 2) { char c1 = stmt.charAt(++offset); if ((c1 == 'D' || c1 == 'd') && stmt.charAt(++offset) == ' ') { - for (;;) { - if (stmt.length() > ++offset) { +// for (;;) { + while (stmt.length() > ++offset) { switch (stmt.charAt(offset)) { case ' ': case '\r': @@ -645,8 +654,8 @@ private static int aCheck(String stmt, int offset) { return OTHER; } } - return OTHER; - } +// return OTHER; +// } } } return OTHER; @@ -724,8 +733,8 @@ private static int pCheck(String stmt, int offset) { && (c3 == 'T' || c3 == 't') && (c4 == 'A' || c4 == 'a') && (c5 == 'B' || c5 == 'b') && (c6 == 'L' || c6 == 'l') && (c7 == 'E' || c7 == 'e') && stmt.charAt(++offset) == ' ') { - for (;;) { - if (stmt.length() > ++offset) { +// for (;;) { + while (stmt.length() > ++offset) { switch (stmt.charAt(offset)) { case ' ': case '\r': @@ -739,8 +748,8 @@ private static int pCheck(String stmt, int offset) { return OTHER; } } - return OTHER; - } +// return OTHER; +// } } } return OTHER; diff --git a/src/main/java/io/mycat/server/parser/ServerParseShow.java b/src/main/java/io/mycat/server/parser/ServerParseShow.java index bf230d595..160eb2e6b 100644 --- a/src/main/java/io/mycat/server/parser/ServerParseShow.java +++ b/src/main/java/io/mycat/server/parser/ServerParseShow.java @@ -23,11 +23,11 @@ */ package io.mycat.server.parser; -import io.mycat.util.ParseUtil; - import java.util.regex.Matcher; import java.util.regex.Pattern; +import io.mycat.route.parser.util.ParseUtil; + /** * @author mycat */ @@ -39,13 +39,17 @@ public final class ServerParseShow { public static final int MYCAT_STATUS = 3; public static final int MYCAT_CLUSTER = 4; public static final int TABLES = 5; + public static final int FULLTABLES =65; public static int parse(String stmt, int offset) { int i = offset; for (; i < stmt.length(); i++) { switch (stmt.charAt(i)) { case ' ': - continue; + continue; + case 'F': + case 'f': + return fullTableCheck(stmt,i) ; case '/': case '#': i = ParseUtil.comment(stmt, i); @@ -159,9 +163,19 @@ static int dataCheck(String stmt, int offset) { return OTHER; } + private static Pattern fullpattern = Pattern.compile("^\\s*(SHOW)\\s+(FULL)+\\s+(TABLES)\\s+\\s*([\\!\\'\\=a-zA-Z_0-9\\s]*)", Pattern.CASE_INSENSITIVE); + public static int fullTableCheck(String stmt,int offset ) + { + if(fullpattern.matcher(stmt).matches()) + { + return FULLTABLES; + } + return OTHER; + } + // SHOW TABLE - public static int tableCheck(String stmt, int offset) { +public static int tableCheck(String stmt, int offset) { // strict match String pat1 = "^\\s*(SHOW)\\s+(TABLES)\\s*"; diff --git a/src/main/java/io/mycat/server/parser/ServerParseStart.java b/src/main/java/io/mycat/server/parser/ServerParseStart.java index 91b901f77..500a0cead 100644 --- a/src/main/java/io/mycat/server/parser/ServerParseStart.java +++ b/src/main/java/io/mycat/server/parser/ServerParseStart.java @@ -23,7 +23,7 @@ */ package io.mycat.server.parser; -import io.mycat.util.ParseUtil; +import io.mycat.route.parser.util.ParseUtil; /** * @author mycat @@ -54,7 +54,7 @@ public static int parse(String stmt, int offset) { } // START TRANSACTION - public static int transactionCheck(String stmt, int offset) { + static int transactionCheck(String stmt, int offset) { if (stmt.length() > offset + "ransaction".length()) { char c1 = stmt.charAt(++offset); char c2 = stmt.charAt(++offset); diff --git a/src/main/java/io/mycat/server/response/CharacterSet.java b/src/main/java/io/mycat/server/response/CharacterSet.java index e9c2937a0..914e27d00 100644 --- a/src/main/java/io/mycat/server/response/CharacterSet.java +++ b/src/main/java/io/mycat/server/response/CharacterSet.java @@ -23,15 +23,18 @@ */ package io.mycat.server.response; -import io.mycat.server.ErrorCode; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.OkPacket; +import static io.mycat.server.parser.ServerParseSet.CHARACTER_SET_CLIENT; +import static io.mycat.server.parser.ServerParseSet.CHARACTER_SET_CONNECTION; +import static io.mycat.server.parser.ServerParseSet.CHARACTER_SET_RESULTS; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.config.ErrorCode; +import io.mycat.net.mysql.OkPacket; +import io.mycat.server.ServerConnection; import io.mycat.server.parser.ServerParseSet; +import io.mycat.util.SetIgnoreUtil; import io.mycat.util.SplitUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static io.mycat.server.parser.ServerParseSet.*; /** * 字符集属性设置 @@ -41,129 +44,123 @@ */ public class CharacterSet { - private static final Logger logger = LoggerFactory - .getLogger(CharacterSet.class); - - public static void response(String stmt, MySQLFrontConnection c, int rs) { - if (-1 == stmt.indexOf(',')) { - /* 单个属性 */ - oneSetResponse(stmt, c, rs); - } else { - /* 多个属性 ,但是只关注CHARACTER_SET_RESULTS,CHARACTER_SET_CONNECTION */ - multiSetResponse(stmt, c, rs); - } - } - - private static void oneSetResponse(String stmt, MySQLFrontConnection c, - int rs) { - if ((rs & 0xff) == CHARACTER_SET_CLIENT) { - /* 忽略client属性设置 */ - c.write(OkPacket.OK); - } else { - String charset = stmt.substring(rs >>> 8).trim(); - if (charset.endsWith(";")) { - /* 结尾为 ; 标识符 */ - charset = charset.substring(0, charset.length() - 1); - } - - if (charset.startsWith("'") || charset.startsWith("`")) { - /* 与mysql保持一致,引号里的字符集不做trim操作 */ - charset = charset.substring(1, charset.length() - 1); - } - - // 设置字符集 - setCharset(charset, c); - } - } - - private static void multiSetResponse(String stmt, MySQLFrontConnection c, - int rs) { - String charResult = "null"; - String charConnection = "null"; - String[] sqlList = SplitUtil.split(stmt, ',', false); - - // check first - switch (rs & 0xff) { - case CHARACTER_SET_RESULTS: - charResult = sqlList[0].substring(rs >>> 8).trim(); - break; - case CHARACTER_SET_CONNECTION: - charConnection = sqlList[0].substring(rs >>> 8).trim(); - break; - } - - // check remaining - for (int i = 1; i < sqlList.length; i++) { - String sql = new StringBuilder("set ").append(sqlList[i]) - .toString(); - if ((i + 1 == sqlList.length) && sql.endsWith(";")) { - /* 去掉末尾的 ‘;’ */ - sql = sql.substring(0, sql.length() - 1); - } - int rs2 = ServerParseSet.parse(sql, "set".length()); - switch (rs2 & 0xff) { - case CHARACTER_SET_RESULTS: - charResult = sql.substring(rs2 >>> 8).trim(); - break; - case CHARACTER_SET_CONNECTION: - charConnection = sql.substring(rs2 >>> 8).trim(); - break; - case CHARACTER_SET_CLIENT: - break; - default: - StringBuilder s = new StringBuilder(); - logger.warn(s.append(c).append(sql).append(" is not executed") - .toString()); - } - } - - if (charResult.startsWith("'") || charResult.startsWith("`")) { - charResult = charResult.substring(1, charResult.length() - 1); - } - if (charConnection.startsWith("'") || charConnection.startsWith("`")) { - charConnection = charConnection.substring(1, - charConnection.length() - 1); - } - - // 如果其中一个为null,则以另一个为准。 - if ("null".equalsIgnoreCase(charResult)) { - setCharset(charConnection, c); - return; - } - if ("null".equalsIgnoreCase(charConnection)) { - setCharset(charResult, c); - return; - } - if (charConnection.equalsIgnoreCase(charResult)) { - setCharset(charConnection, c); - } else { - StringBuilder sb = new StringBuilder(); - sb.append("charset is not consistent:[connection=").append( - charConnection); - sb.append(",results=").append(charResult).append(']'); - c.writeErrMessage(ErrorCode.ER_UNKNOWN_CHARACTER_SET, sb.toString()); - } - } - - private static void setCharset(String charset, MySQLFrontConnection c) { - if ("null".equalsIgnoreCase(charset)) { - /* 忽略字符集为null的属性设置 */ - c.write(OkPacket.OK); - } else if (c.setCharset(charset)) { - c.write(OkPacket.OK); - } else { - try { - if (c.setCharsetIndex(Integer.parseInt(charset))) { - c.write(OkPacket.OK); - } else { - c.writeErrMessage(ErrorCode.ER_UNKNOWN_CHARACTER_SET, - "Unknown charset :" + charset); - } - } catch (RuntimeException e) { - c.writeErrMessage(ErrorCode.ER_UNKNOWN_CHARACTER_SET, - "Unknown charset :" + charset); - } - } - } + private static final Logger logger = LoggerFactory.getLogger(CharacterSet.class); + + public static void response(String stmt, ServerConnection c, int rs) { + if (-1 == stmt.indexOf(',')) { + /* 单个属性 */ + oneSetResponse(stmt, c, rs); + } else { + /* 多个属性 ,但是只关注CHARACTER_SET_RESULTS,CHARACTER_SET_CONNECTION */ + multiSetResponse(stmt, c, rs); + } + } + + private static void oneSetResponse(String stmt, ServerConnection c, int rs) { + if ((rs & 0xff) == CHARACTER_SET_CLIENT) { + /* 忽略client属性设置 */ + c.write(c.writeToBuffer(OkPacket.OK, c.allocate())); + } else { + String charset = stmt.substring(rs >>> 8).trim(); + if (charset.endsWith(";")) { + /* 结尾为 ; 标识符 */ + charset = charset.substring(0, charset.length() - 1); + } + + if (charset.startsWith("'") || charset.startsWith("`")) { + /* 与mysql保持一致,引号里的字符集不做trim操作 */ + charset = charset.substring(1, charset.length() - 1); + } + + // 设置字符集 + setCharset(charset, c); + } + } + + private static void multiSetResponse(String stmt, ServerConnection c, int rs) { + String charResult = "null"; + String charConnection = "null"; + String[] sqlList = SplitUtil.split(stmt, ',', false); + + // check first + switch (rs & 0xff) { + case CHARACTER_SET_RESULTS: + charResult = sqlList[0].substring(rs >>> 8).trim(); + break; + case CHARACTER_SET_CONNECTION: + charConnection = sqlList[0].substring(rs >>> 8).trim(); + break; + } + + // check remaining + for (int i = 1; i < sqlList.length; i++) { + String sql = new StringBuilder("set ").append(sqlList[i]).toString(); + if ((i + 1 == sqlList.length) && sql.endsWith(";")) { + /* 去掉末尾的 ‘;’ */ + sql = sql.substring(0, sql.length() - 1); + } + int rs2 = ServerParseSet.parse(sql, "set".length()); + switch (rs2 & 0xff) { + case CHARACTER_SET_RESULTS: + charResult = sql.substring(rs2 >>> 8).trim(); + break; + case CHARACTER_SET_CONNECTION: + charConnection = sql.substring(rs2 >>> 8).trim(); + break; + case CHARACTER_SET_CLIENT: + break; + default: + boolean ignore = SetIgnoreUtil.isIgnoreStmt( sql ); + if ( !ignore ) { + StringBuilder s = new StringBuilder(); + logger.warn(s.append(c).append(sql).append(" is not executed").toString()); + } + } + } + + if (charResult.startsWith("'") || charResult.startsWith("`")) { + charResult = charResult.substring(1, charResult.length() - 1); + } + if (charConnection.startsWith("'") || charConnection.startsWith("`")) { + charConnection = charConnection.substring(1, charConnection.length() - 1); + } + + // 如果其中一个为null,则以另一个为准。 + if ("null".equalsIgnoreCase(charResult)) { + setCharset(charConnection, c); + return; + } + if ("null".equalsIgnoreCase(charConnection)) { + setCharset(charResult, c); + return; + } + if (charConnection.equalsIgnoreCase(charResult)) { + setCharset(charConnection, c); + } else { + StringBuilder sb = new StringBuilder(); + sb.append("charset is not consistent:[connection=").append(charConnection); + sb.append(",results=").append(charResult).append(']'); + c.writeErrMessage(ErrorCode.ER_UNKNOWN_CHARACTER_SET, sb.toString()); + } + } + + private static void setCharset(String charset, ServerConnection c) { + if ("null".equalsIgnoreCase(charset)) { + /* 忽略字符集为null的属性设置 */ + c.write(c.writeToBuffer(OkPacket.OK, c.allocate())); + } else if (c.setCharset(charset)) { + c.write(c.writeToBuffer(OkPacket.OK, c.allocate())); + } else { + try { + if (c.setCharsetIndex(Integer.parseInt(charset))) { + c.write(c.writeToBuffer(OkPacket.OK, c.allocate())); + } else { + c.writeErrMessage(ErrorCode.ER_UNKNOWN_CHARACTER_SET, "Unknown charset :" + charset); + } + } catch (RuntimeException e) { + c.writeErrMessage(ErrorCode.ER_UNKNOWN_CHARACTER_SET, "Unknown charset :" + charset); + } + } + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/response/Heartbeat.java b/src/main/java/io/mycat/server/response/Heartbeat.java index 147d1d9ef..04cd18781 100644 --- a/src/main/java/io/mycat/server/response/Heartbeat.java +++ b/src/main/java/io/mycat/server/response/Heartbeat.java @@ -23,54 +23,51 @@ */ package io.mycat.server.response; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import io.mycat.MycatServer; -import io.mycat.server.ErrorCode; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.ErrorPacket; -import io.mycat.server.packet.HeartbeatPacket; -import io.mycat.server.packet.OkPacket; +import io.mycat.config.ErrorCode; +import io.mycat.net.mysql.ErrorPacket; +import io.mycat.net.mysql.HeartbeatPacket; +import io.mycat.net.mysql.OkPacket; +import io.mycat.server.ServerConnection; import io.mycat.util.TimeUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * @author mycat */ public class Heartbeat { - public static final Logger HEARTBEAT = LoggerFactory - .getLogger("heartbeat"); + private static final Logger HEARTBEAT = LoggerFactory.getLogger("heartbeat"); - public static void response(MySQLFrontConnection c, byte[] data) { - HeartbeatPacket hp = new HeartbeatPacket(); - hp.read(data); - if (MycatServer.getInstance().isOnline()) { - OkPacket ok = new OkPacket(); - ok.packetId = 1; - ok.affectedRows = hp.id; - ok.serverStatus = 2; - ok.write(c); - if (HEARTBEAT.isInfoEnabled()) { - HEARTBEAT.info(responseMessage("OK", c, hp.id)); - } - } else { - ErrorPacket error = new ErrorPacket(); - error.packetId = 1; - error.errno = ErrorCode.ER_SERVER_SHUTDOWN; - error.message = String.valueOf(hp.id).getBytes(); - error.write(c); - if (HEARTBEAT.isInfoEnabled()) { - HEARTBEAT.info(responseMessage("ERROR", c, hp.id)); - } - } - } + public static void response(ServerConnection c, byte[] data) { + HeartbeatPacket hp = new HeartbeatPacket(); + hp.read(data); + if (MycatServer.getInstance().isOnline()) { + OkPacket ok = new OkPacket(); + ok.packetId = 1; + ok.affectedRows = hp.id; + ok.serverStatus = 2; + ok.write(c); + if (HEARTBEAT.isInfoEnabled()) { + HEARTBEAT.info(responseMessage("OK", c, hp.id)); + } + } else { + ErrorPacket error = new ErrorPacket(); + error.packetId = 1; + error.errno = ErrorCode.ER_SERVER_SHUTDOWN; + error.message = String.valueOf(hp.id).getBytes(); + error.write(c); + if (HEARTBEAT.isInfoEnabled()) { + HEARTBEAT.info(responseMessage("ERROR", c, hp.id)); + } + } + } - private static String responseMessage(String action, - MySQLFrontConnection c, long id) { - return new StringBuilder("RESPONSE:").append(action).append(", id=") - .append(id).append(", host=").append(c.getHost()) - .append(", port=").append(c.getPort()).append(", time=") - .append(TimeUtil.currentTimeMillis()).toString(); - } + private static String responseMessage(String action, ServerConnection c, long id) { + return new StringBuilder("RESPONSE:").append(action).append(", id=").append(id).append(", host=") + .append(c.getHost()).append(", port=").append(c.getPort()).append(", time=") + .append(TimeUtil.currentTimeMillis()).toString(); + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/response/InformationSchemaProfiling.java b/src/main/java/io/mycat/server/response/InformationSchemaProfiling.java new file mode 100644 index 000000000..2eb86d464 --- /dev/null +++ b/src/main/java/io/mycat/server/response/InformationSchemaProfiling.java @@ -0,0 +1,74 @@ +package io.mycat.server.response; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.server.ServerConnection; + +import java.nio.ByteBuffer; + + +public class InformationSchemaProfiling +{ + + private static final int FIELD_COUNT = 3; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + + + /** + * response method. + * @param c + */ + public static void response(ServerConnection c) { + + + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + fields[i] = PacketUtil.getField("State" , Fields.FIELD_TYPE_VAR_STRING); + fields[i].packetId = ++packetId; + fields[i+1] = PacketUtil.getField("Duration" , Fields.FIELD_TYPE_DECIMAL); + fields[i+1].packetId = ++packetId; + + fields[i+2] = PacketUtil.getField("Percentage" , Fields.FIELD_TYPE_DECIMAL); + fields[i+2].packetId = ++packetId; + eof.packetId = ++packetId; + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + packetId = eof.packetId; + + + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // post write + c.write(buffer); + + + } + + + + + + + +} diff --git a/src/main/java/io/mycat/server/response/Ping.java b/src/main/java/io/mycat/server/response/Ping.java index c07cc946c..50a129db9 100644 --- a/src/main/java/io/mycat/server/response/Ping.java +++ b/src/main/java/io/mycat/server/response/Ping.java @@ -24,10 +24,10 @@ package io.mycat.server.response; import io.mycat.MycatServer; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.ErrorPacket; -import io.mycat.server.packet.OkPacket; -import io.mycat.server.packet.util.PacketUtil; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.net.FrontendConnection; +import io.mycat.net.mysql.ErrorPacket; +import io.mycat.net.mysql.OkPacket; /** * 加入了offline状态推送,用于心跳语句。 @@ -38,9 +38,9 @@ public class Ping { private static final ErrorPacket error = PacketUtil.getShutdown(); - public static void response(MySQLFrontConnection c) { + public static void response(FrontendConnection c) { if (MycatServer.getInstance().isOnline()) { - c.write(OkPacket.OK); + c.write(c.writeToBuffer(OkPacket.OK, c.allocate())); } else { error.write(c); } diff --git a/src/main/java/io/mycat/server/response/PreparedStmtResponse.java b/src/main/java/io/mycat/server/response/PreparedStmtResponse.java index 2d73e3808..1f03b8ec2 100644 --- a/src/main/java/io/mycat/server/response/PreparedStmtResponse.java +++ b/src/main/java/io/mycat/server/response/PreparedStmtResponse.java @@ -23,20 +23,20 @@ */ package io.mycat.server.response; -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.PreparedOkPacket; -import io.mycat.server.packet.util.PreparedStatement; +import java.nio.ByteBuffer; + +import io.mycat.backend.mysql.PreparedStatement; +import io.mycat.net.FrontendConnection; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.PreparedOkPacket; /** * @author mycat */ public class PreparedStmtResponse { - public static void response(PreparedStatement pstmt, MySQLFrontConnection c) { + public static void response(PreparedStatement pstmt, FrontendConnection c) { byte packetId = 0; // write preparedOk packet @@ -45,21 +45,19 @@ public static void response(PreparedStatement pstmt, MySQLFrontConnection c) { preparedOk.statementId = pstmt.getId(); preparedOk.columnsNumber = pstmt.getColumnsNumber(); preparedOk.parametersNumber = pstmt.getParametersNumber(); - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); - preparedOk.write(bufferArray); - + ByteBuffer buffer = preparedOk.write(c.allocate(), c,true); + // write parameter field packet int parametersNumber = preparedOk.parametersNumber; if (parametersNumber > 0) { for (int i = 0; i < parametersNumber; i++) { FieldPacket field = new FieldPacket(); field.packetId = ++packetId; - field.write(bufferArray); + buffer = field.write(buffer, c,true); } EOFPacket eof = new EOFPacket(); eof.packetId = ++packetId; - eof.write(bufferArray); + buffer = eof.write(buffer, c,true); } // write column field packet @@ -68,15 +66,15 @@ public static void response(PreparedStatement pstmt, MySQLFrontConnection c) { for (int i = 0; i < columnsNumber; i++) { FieldPacket field = new FieldPacket(); field.packetId = ++packetId; - field.write(bufferArray); + buffer = field.write(buffer, c,true); } EOFPacket eof = new EOFPacket(); eof.packetId = ++packetId; - eof.write(bufferArray); + buffer = eof.write(buffer, c,true); } // send buffer - c.write(bufferArray); + c.write(buffer); } } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/response/ReloadCallBack.java b/src/main/java/io/mycat/server/response/ReloadCallBack.java deleted file mode 100644 index 49c53166c..000000000 --- a/src/main/java/io/mycat/server/response/ReloadCallBack.java +++ /dev/null @@ -1,42 +0,0 @@ -package io.mycat.server.response; - -import com.google.common.util.concurrent.FutureCallback; -import io.mycat.server.ErrorCode; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.config.loader.ReloadUtil; -import io.mycat.server.packet.OkPacket; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * 异步执行回调类,用于回写数据给用户等。 - */ -public class ReloadCallBack implements FutureCallback { - private static final Logger LOGGER = LoggerFactory.getLogger(ReloadUtil.class); - - private MySQLFrontConnection mc; - - public ReloadCallBack(MySQLFrontConnection c) { - this.mc = c; - } - - @Override - public void onSuccess(Boolean result) { - if (result) { - LOGGER.warn("send ok package to client " + String.valueOf(mc)); - OkPacket ok = new OkPacket(); - ok.packetId = 1; - ok.affectedRows = 1; - ok.serverStatus = 2; - ok.message = "Reload config success".getBytes(); - ok.write(mc); - } else { - mc.writeErrMessage(ErrorCode.ER_YES, "Reload config failure"); - } - } - - @Override - public void onFailure(Throwable t) { - mc.writeErrMessage(ErrorCode.ER_YES, "Reload config failure"); - } -} diff --git a/src/main/java/io/mycat/server/response/RollbackCallBack.java b/src/main/java/io/mycat/server/response/RollbackCallBack.java deleted file mode 100644 index 6e5fe0a8c..000000000 --- a/src/main/java/io/mycat/server/response/RollbackCallBack.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.server.response; - - -import io.mycat.MycatServer; -import io.mycat.server.ErrorCode; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.config.loader.ReloadUtil; -import io.mycat.server.packet.OkPacket; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.concurrent.locks.ReentrantLock; - -/** - * @author mycat - */ -public final class RollbackCallBack { - private static final Logger LOGGER = LoggerFactory.getLogger(RollbackCallBack.class); - - public static void execute(MySQLFrontConnection c) { - final ReentrantLock lock = MycatServer.getInstance().getConfig().getLock(); - lock.lock(); - try { - if (ReloadUtil.rollback()) { - StringBuilder s = new StringBuilder(); - s.append(c).append("Rollback config success by manager"); - LOGGER.warn(s.toString()); - OkPacket ok = new OkPacket(); - ok.packetId = 1; - ok.affectedRows = 1; - ok.serverStatus = 2; - ok.message = "Rollback config success".getBytes(); - ok.write(c); - } else { - c.writeErrMessage(ErrorCode.ER_YES, "Rollback config failure"); - } - } finally { - lock.unlock(); - } - } - - -} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/response/SelectConnnectID.java b/src/main/java/io/mycat/server/response/SelectConnnectID.java new file mode 100644 index 000000000..bee7bd74b --- /dev/null +++ b/src/main/java/io/mycat/server/response/SelectConnnectID.java @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.server.response; + +import java.nio.ByteBuffer; + +import io.mycat.MycatServer; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.ErrorPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.server.ServerConnection; +import io.mycat.util.RandomUtil; +import io.mycat.util.StringUtil; + +/** + * @author mycat + */ +public class SelectConnnectID { + + private static final int FIELD_COUNT = 1; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + private static final ErrorPacket error = PacketUtil.getShutdown(); + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + fields[i] = PacketUtil.getField("CONNECTION_ID()", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + eof.packetId = ++packetId; + } + + public static void response(ServerConnection c) { + if (MycatServer.getInstance().isOnline()) { + ByteBuffer buffer = c.allocate(); + buffer = header.write(buffer, c,true); + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + buffer = eof.write(buffer, c,true); + byte packetId = eof.packetId; + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(getConnectID(c)); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + c.write(buffer); + } else { + error.write(c); + } + } + + private static byte[] getConnectID(ServerConnection c) { + StringBuilder sb = new StringBuilder(); + sb.append(new String(RandomUtil.randomBytes(10000))); + return StringUtil.encode(sb.toString(), c.getCharset()); + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/response/SelectDatabase.java b/src/main/java/io/mycat/server/response/SelectDatabase.java index 0bd17cba2..7ac7f9029 100644 --- a/src/main/java/io/mycat/server/response/SelectDatabase.java +++ b/src/main/java/io/mycat/server/response/SelectDatabase.java @@ -23,53 +23,50 @@ */ package io.mycat.server.response; -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.server.Fields; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.PacketUtil; +import java.nio.ByteBuffer; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.server.ServerConnection; import io.mycat.util.StringUtil; /** * @author mycat */ public class SelectDatabase { - private static final int FIELD_COUNT = 1; - private static final ResultSetHeaderPacket header = PacketUtil - .getHeader(FIELD_COUNT); - private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; - private static final EOFPacket eof = new EOFPacket(); - static { - int i = 0; - byte packetId = 0; - header.packetId = ++packetId; - fields[i] = PacketUtil.getField("DATABASE()", - Fields.FIELD_TYPE_VAR_STRING); - fields[i++].packetId = ++packetId; - eof.packetId = ++packetId; - } + private static final int FIELD_COUNT = 1; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + fields[i] = PacketUtil.getField("DATABASE()", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + eof.packetId = ++packetId; + } - public static void response(MySQLFrontConnection c) { - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); - header.write(bufferArray); - for (FieldPacket field : fields) { - field.write(bufferArray); - } - eof.write(bufferArray); - byte packetId = eof.packetId; - RowDataPacket row = new RowDataPacket(FIELD_COUNT); - row.add(StringUtil.encode(c.getSchema(), c.getCharset())); - row.packetId = ++packetId; - row.write(bufferArray); - EOFPacket lastEof = new EOFPacket(); - lastEof.packetId = ++packetId; - lastEof.write(bufferArray); - c.write(bufferArray); - } + public static void response(ServerConnection c) { + ByteBuffer buffer = c.allocate(); + buffer = header.write(buffer, c,true); + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + buffer = eof.write(buffer, c,true); + byte packetId = eof.packetId; + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(StringUtil.encode(c.getSchema(), c.getCharset())); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + c.write(buffer); + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/response/SelectIdentity.java b/src/main/java/io/mycat/server/response/SelectIdentity.java index bff0f2d27..50f9fa5c4 100644 --- a/src/main/java/io/mycat/server/response/SelectIdentity.java +++ b/src/main/java/io/mycat/server/response/SelectIdentity.java @@ -23,68 +23,65 @@ */ package io.mycat.server.response; -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.server.Fields; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.PacketUtil; +import java.nio.ByteBuffer; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.route.parser.util.ParseUtil; +import io.mycat.server.ServerConnection; import io.mycat.util.LongUtil; -import io.mycat.util.ParseUtil; /** * @author mycat */ public class SelectIdentity { - private static final int FIELD_COUNT = 1; - private static final ResultSetHeaderPacket header = PacketUtil - .getHeader(FIELD_COUNT); - static { - byte packetId = 0; - header.packetId = ++packetId; - } + private static final int FIELD_COUNT = 1; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + static { + byte packetId = 0; + header.packetId = ++packetId; + } + + public static void response(ServerConnection c, String stmt, int aliasIndex, final String orgName) { + String alias = ParseUtil.parseAlias(stmt, aliasIndex); + if (alias == null) { + alias = orgName; + } - public static void response(MySQLFrontConnection c, String stmt, - int aliasIndex, final String orgName) { - String alias = ParseUtil.parseAlias(stmt, aliasIndex); - if (alias == null) { - alias = orgName; - } + ByteBuffer buffer = c.allocate(); - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); + // write header + buffer = header.write(buffer, c,true); - // write header - header.write(bufferArray); - // write fields - byte packetId = header.packetId; - FieldPacket field = PacketUtil.getField(alias, orgName, - Fields.FIELD_TYPE_LONGLONG); - field.packetId = ++packetId; - field.write(bufferArray); + // write fields + byte packetId = header.packetId; + FieldPacket field = PacketUtil.getField(alias, orgName, Fields.FIELD_TYPE_LONGLONG); + field.packetId = ++packetId; + buffer = field.write(buffer, c,true); - // write eof - EOFPacket eof = new EOFPacket(); - eof.packetId = ++packetId; - eof.write(bufferArray); + // write eof + EOFPacket eof = new EOFPacket(); + eof.packetId = ++packetId; + buffer = eof.write(buffer, c,true); - // write rows - RowDataPacket row = new RowDataPacket(FIELD_COUNT); - row.add(LongUtil.toBytes(c.getLastInsertId())); - row.packetId = ++packetId; - row.write(bufferArray); + // write rows + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(LongUtil.toBytes(c.getLastInsertId())); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); - // write last eof - EOFPacket lastEof = new EOFPacket(); - lastEof.packetId = ++packetId; - lastEof.write(bufferArray); + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); - // post write - c.write(bufferArray); - } + // post write + c.write(buffer); + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/response/SelectLastInsertId.java b/src/main/java/io/mycat/server/response/SelectLastInsertId.java index ce415f6e0..4f9130e92 100644 --- a/src/main/java/io/mycat/server/response/SelectLastInsertId.java +++ b/src/main/java/io/mycat/server/response/SelectLastInsertId.java @@ -23,70 +23,66 @@ */ package io.mycat.server.response; -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.server.Fields; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.PacketUtil; +import java.nio.ByteBuffer; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.route.parser.util.ParseUtil; +import io.mycat.server.ServerConnection; import io.mycat.util.LongUtil; -import io.mycat.util.ParseUtil; /** * @author mycat */ public class SelectLastInsertId { - private static final String ORG_NAME = "LAST_INSERT_ID()"; - private static final int FIELD_COUNT = 1; - private static final ResultSetHeaderPacket header = PacketUtil - .getHeader(FIELD_COUNT); - static { - byte packetId = 0; - header.packetId = ++packetId; - } + private static final String ORG_NAME = "LAST_INSERT_ID()"; + private static final int FIELD_COUNT = 1; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + static { + byte packetId = 0; + header.packetId = ++packetId; + } - public static void response(MySQLFrontConnection c, String stmt, - int aliasIndex) { - String alias = ParseUtil.parseAlias(stmt, aliasIndex); - if (alias == null) { - alias = ORG_NAME; - } + public static void response(ServerConnection c, String stmt, int aliasIndex) { + String alias = ParseUtil.parseAlias(stmt, aliasIndex); + if (alias == null) { + alias = ORG_NAME; + } - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); + ByteBuffer buffer = c.allocate(); - // write header - header.write(bufferArray); + // write header + buffer = header.write(buffer, c,true); - // write fields - byte packetId = header.packetId; - FieldPacket field = PacketUtil.getField(alias, ORG_NAME, - Fields.FIELD_TYPE_LONGLONG); - field.packetId = ++packetId; - field.write(bufferArray); + // write fields + byte packetId = header.packetId; + FieldPacket field = PacketUtil.getField(alias, ORG_NAME, Fields.FIELD_TYPE_LONGLONG); + field.packetId = ++packetId; + buffer = field.write(buffer, c,true); - // write eof - EOFPacket eof = new EOFPacket(); - eof.packetId = ++packetId; - eof.write(bufferArray); + // write eof + EOFPacket eof = new EOFPacket(); + eof.packetId = ++packetId; + buffer = eof.write(buffer, c,true); - // write rows - RowDataPacket row = new RowDataPacket(FIELD_COUNT); - row.add(LongUtil.toBytes(c.getLastInsertId())); - row.packetId = ++packetId; - row.write(bufferArray); + // write rows + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(LongUtil.toBytes(c.getLastInsertId())); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); - // write last eof - EOFPacket lastEof = new EOFPacket(); - lastEof.packetId = ++packetId; - lastEof.write(bufferArray); + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); - // post write - c.write(bufferArray); - } + // post write + c.write(buffer); + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/response/SelectSessionAutoIncrement.java b/src/main/java/io/mycat/server/response/SelectSessionAutoIncrement.java deleted file mode 100644 index ee931a9d2..000000000 --- a/src/main/java/io/mycat/server/response/SelectSessionAutoIncrement.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.server.response; - -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.server.Fields; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.PacketUtil; -import io.mycat.util.LongUtil; - -/** - * @author mycat - */ -public final class SelectSessionAutoIncrement { - - private static final int FIELD_COUNT = 1; - private static final ResultSetHeaderPacket header = PacketUtil - .getHeader(FIELD_COUNT); - private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; - private static final EOFPacket eof = new EOFPacket(); - static { - int i = 0; - byte packetId = 0; - header.packetId = ++packetId; - - fields[i] = PacketUtil.getField("SESSION.AUTOINCREMENT", - Fields.FIELD_TYPE_LONGLONG); - fields[i++].packetId = ++packetId; - - eof.packetId = ++packetId; - } - - public static void execute(MySQLFrontConnection c) { - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); - // write header - header.write(bufferArray); - - // write fields - for (FieldPacket field : fields) { - field.write(bufferArray); - } - - // write eof - eof.write(bufferArray); - - // write rows - byte packetId = eof.packetId; - RowDataPacket row = new RowDataPacket(FIELD_COUNT); - row.packetId = ++packetId; - row.add(LongUtil.toBytes(1)); - row.write(bufferArray); - - // write last eof - EOFPacket lastEof = new EOFPacket(); - lastEof.packetId = ++packetId; - lastEof.write(bufferArray); - - // post write - c.write(bufferArray); - } - -} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/response/SelectTxReadOnly.java b/src/main/java/io/mycat/server/response/SelectTxReadOnly.java new file mode 100644 index 000000000..17545e70b --- /dev/null +++ b/src/main/java/io/mycat/server/response/SelectTxReadOnly.java @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.server.response; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.server.ServerConnection; +import io.mycat.util.LongUtil; +import io.mycat.util.StringUtil; + +import java.nio.ByteBuffer; + +/** + * @author mycat + */ +public class SelectTxReadOnly { + private static final int FIELD_COUNT = 1; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + private static byte[] longbt= LongUtil.toBytes(0) ; + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + fields[i] = PacketUtil.getField("@@session.tx_read_only", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; + eof.packetId = ++packetId; + + } + + public static void response(ServerConnection c) { + ByteBuffer buffer = c.allocate(); + buffer = header.write(buffer, c,true); + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + buffer = eof.write(buffer, c,true); + byte packetId = eof.packetId; + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(longbt); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + c.write(buffer); + } + +} diff --git a/src/main/java/io/mycat/server/response/SelectUser.java b/src/main/java/io/mycat/server/response/SelectUser.java index 9d6c3886d..93c511d92 100644 --- a/src/main/java/io/mycat/server/response/SelectUser.java +++ b/src/main/java/io/mycat/server/response/SelectUser.java @@ -23,17 +23,17 @@ */ package io.mycat.server.response; +import java.nio.ByteBuffer; + import io.mycat.MycatServer; -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.server.Fields; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.ErrorPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.PacketUtil; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.ErrorPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.server.ServerConnection; import io.mycat.util.StringUtil; /** @@ -41,48 +41,46 @@ */ public class SelectUser { - private static final int FIELD_COUNT = 1; - private static final ResultSetHeaderPacket header = PacketUtil - .getHeader(FIELD_COUNT); - private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; - private static final EOFPacket eof = new EOFPacket(); - private static final ErrorPacket error = PacketUtil.getShutdown(); - static { - int i = 0; - byte packetId = 0; - header.packetId = ++packetId; - fields[i] = PacketUtil.getField("USER()", Fields.FIELD_TYPE_VAR_STRING); - fields[i++].packetId = ++packetId; - eof.packetId = ++packetId; - } + private static final int FIELD_COUNT = 1; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + private static final ErrorPacket error = PacketUtil.getShutdown(); + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + fields[i] = PacketUtil.getField("USER()", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + eof.packetId = ++packetId; + } - public static void response(MySQLFrontConnection c) { - if (MycatServer.getInstance().isOnline()) { - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); - header.write(bufferArray); - for (FieldPacket field : fields) { - field.write(bufferArray); - } - eof.write(bufferArray); - byte packetId = eof.packetId; - RowDataPacket row = new RowDataPacket(FIELD_COUNT); - row.add(getUser(c)); - row.packetId = ++packetId; - row.write(bufferArray); - EOFPacket lastEof = new EOFPacket(); - lastEof.packetId = ++packetId; - lastEof.write(bufferArray); - c.write(bufferArray); - } else { - error.write(c); - } - } + public static void response(ServerConnection c) { + if (MycatServer.getInstance().isOnline()) { + ByteBuffer buffer = c.allocate(); + buffer = header.write(buffer, c,true); + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + buffer = eof.write(buffer, c,true); + byte packetId = eof.packetId; + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(getUser(c)); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + c.write(buffer); + } else { + error.write(c); + } + } - private static byte[] getUser(MySQLFrontConnection c) { - StringBuilder sb = new StringBuilder(); - sb.append(c.getUser()).append('@').append(c.getHost()); - return StringUtil.encode(sb.toString(), c.getCharset()); - } + private static byte[] getUser(ServerConnection c) { + StringBuilder sb = new StringBuilder(); + sb.append(c.getUser()).append('@').append(c.getHost()); + return StringUtil.encode(sb.toString(), c.getCharset()); + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/response/SelectVariables.java b/src/main/java/io/mycat/server/response/SelectVariables.java index d0d30727b..864408d88 100644 --- a/src/main/java/io/mycat/server/response/SelectVariables.java +++ b/src/main/java/io/mycat/server/response/SelectVariables.java @@ -24,33 +24,38 @@ package io.mycat.server.response; import com.google.common.base.Splitter; -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.server.Fields; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.PacketUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import io.mycat.backend.BackendConnection; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.server.NonBlockingSession; +import io.mycat.server.ServerConnection; +import io.mycat.util.LongUtil; +import io.mycat.util.StringUtil; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; /** * @author mycat */ public final class SelectVariables { - private static final Logger LOGGER = LoggerFactory - .getLogger(SelectVariables.class); + private static final Logger LOGGER = LoggerFactory.getLogger(SelectVariables.class); - public static void execute(MySQLFrontConnection c, String sql) { + public static void execute(ServerConnection c, String sql) { String subSql= sql.substring(sql.indexOf("SELECT")+6); List splitVar= Splitter.on(",").omitEmptyStrings().trimResults().splitToList(subSql) ; @@ -69,23 +74,22 @@ public static void execute(MySQLFrontConnection c, String sql) { fields[i++].packetId = ++packetId; } - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); + ByteBuffer buffer = c.allocate(); // write header - header.write(bufferArray); + buffer = header.write(buffer, c,true); // write fields for (FieldPacket field : fields) { - field.write(bufferArray); + buffer = field.write(buffer, c,true); } EOFPacket eof = new EOFPacket(); eof.packetId = ++packetId; // write eof - eof.write(bufferArray); + buffer = eof.write(buffer, c,true); // write rows //byte packetId = eof.packetId; @@ -100,42 +104,44 @@ public static void execute(MySQLFrontConnection c, String sql) { } row.packetId = ++packetId; - row.write(bufferArray); + buffer = row.write(buffer, c,true); // write lastEof EOFPacket lastEof = new EOFPacket(); lastEof.packetId = ++packetId; - lastEof.write(bufferArray); + buffer = lastEof.write(buffer, c,true); // write buffer - c.write(bufferArray); + c.write(buffer); } - private static List convert(List in) { List out=new ArrayList<>(); for (String s : in) { - int asIndex=s.toUpperCase().indexOf(" AS "); + int asIndex=s.toUpperCase().indexOf(" AS "); if(asIndex!=-1) { out.add(s.substring(asIndex+4)) ; } } - if(out.isEmpty()) - { - return in; - } else - { - return out; - } + if(out.isEmpty()) + { + return in; + } else + { + return out; + } } + + + private static final Map variables = new HashMap(); static { variables.put("@@character_set_client", "utf8"); @@ -158,7 +164,6 @@ private static List convert(List in) variables.put("@@wait_timeout", "172800"); variables.put("@@session.auto_increment_increment", "1"); - //for jdbc driver 5.1.37 variables.put("character_set_client", "utf8"); variables.put("character_set_connection", "utf8"); variables.put("character_set_results", "utf8"); diff --git a/src/main/java/io/mycat/server/response/SelectVersion.java b/src/main/java/io/mycat/server/response/SelectVersion.java index 7de6df845..136eeb765 100644 --- a/src/main/java/io/mycat/server/response/SelectVersion.java +++ b/src/main/java/io/mycat/server/response/SelectVersion.java @@ -23,54 +23,51 @@ */ package io.mycat.server.response; -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.server.Fields; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.Versions; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.PacketUtil; +import java.nio.ByteBuffer; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.config.Versions; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.server.ServerConnection; /** * @author mycat */ public class SelectVersion { - private static final int FIELD_COUNT = 1; - private static final ResultSetHeaderPacket header = PacketUtil - .getHeader(FIELD_COUNT); - private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; - private static final EOFPacket eof = new EOFPacket(); - static { - int i = 0; - byte packetId = 0; - header.packetId = ++packetId; - fields[i] = PacketUtil.getField("VERSION()", - Fields.FIELD_TYPE_VAR_STRING); - fields[i++].packetId = ++packetId; - eof.packetId = ++packetId; - } + private static final int FIELD_COUNT = 1; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + fields[i] = PacketUtil.getField("VERSION()", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + eof.packetId = ++packetId; + } - public static void response(MySQLFrontConnection c) { - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); - header.write(bufferArray); - for (FieldPacket field : fields) { - field.write(bufferArray); - } - eof.write(bufferArray); - byte packetId = eof.packetId; - RowDataPacket row = new RowDataPacket(FIELD_COUNT); - row.add(Versions.SERVER_VERSION); - row.packetId = ++packetId; - row.write(bufferArray); - EOFPacket lastEof = new EOFPacket(); - lastEof.packetId = ++packetId; - lastEof.write(bufferArray); - c.write(bufferArray); - } + public static void response(ServerConnection c) { + ByteBuffer buffer = c.allocate(); + buffer = header.write(buffer, c,true); + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + buffer = eof.write(buffer, c,true); + byte packetId = eof.packetId; + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(Versions.SERVER_VERSION); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + c.write(buffer); + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/response/SelectVersionComment.java b/src/main/java/io/mycat/server/response/SelectVersionComment.java index a4d4f8e97..daf538368 100644 --- a/src/main/java/io/mycat/server/response/SelectVersionComment.java +++ b/src/main/java/io/mycat/server/response/SelectVersionComment.java @@ -23,70 +23,63 @@ */ package io.mycat.server.response; -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.server.Fields; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.PacketUtil; +import java.nio.ByteBuffer; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.server.ServerConnection; /** * @author mycat */ -public final class SelectVersionComment { - - private static final byte[] VERSION_COMMENT = "MyCat Server (monitor)" - .getBytes(); - private static final int FIELD_COUNT = 1; - private static final ResultSetHeaderPacket header = PacketUtil - .getHeader(FIELD_COUNT); - private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; - private static final EOFPacket eof = new EOFPacket(); - static { - int i = 0; - byte packetId = 0; - header.packetId = ++packetId; - - fields[i] = PacketUtil.getField("@@VERSION_COMMENT", - Fields.FIELD_TYPE_VAR_STRING); - fields[i++].packetId = ++packetId; - - eof.packetId = ++packetId; - } +public class SelectVersionComment { - public static void response(MySQLFrontConnection c) { - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); + private static final byte[] VERSION_COMMENT = "MyCat Server (OpenCloundDB)".getBytes(); + private static final int FIELD_COUNT = 1; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + fields[i] = PacketUtil.getField("@@VERSION_COMMENT", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + eof.packetId = ++packetId; + } - // write header - header.write(bufferArray); + public static void response(ServerConnection c) { + ByteBuffer buffer = c.allocate(); - // write fields - for (FieldPacket field : fields) { - field.write(bufferArray); + // write header + buffer = header.write(buffer, c,true); - } + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } - // write eof - eof.write(bufferArray); + // write eof + buffer = eof.write(buffer, c,true); - // write rows - byte packetId = eof.packetId; - RowDataPacket row = new RowDataPacket(FIELD_COUNT); - row.add(VERSION_COMMENT); - row.packetId = ++packetId; - row.write(bufferArray); + // write rows + byte packetId = eof.packetId; + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(VERSION_COMMENT); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); - // write last eof - EOFPacket lastEof = new EOFPacket(); - lastEof.packetId = ++packetId; - lastEof.write(bufferArray); + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); - // post write - c.write(bufferArray); - } + // post write + c.write(buffer); + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/response/SessionIncrement.java b/src/main/java/io/mycat/server/response/SessionIncrement.java index de0c14a5d..86bd453bf 100644 --- a/src/main/java/io/mycat/server/response/SessionIncrement.java +++ b/src/main/java/io/mycat/server/response/SessionIncrement.java @@ -23,15 +23,15 @@ */ package io.mycat.server.response; -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.server.Fields; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.PacketUtil; +import java.nio.ByteBuffer; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.server.ServerConnection; import io.mycat.util.LongUtil; /** @@ -39,39 +39,35 @@ */ public class SessionIncrement { - private static final int FIELD_COUNT = 1; - private static final ResultSetHeaderPacket header = PacketUtil - .getHeader(FIELD_COUNT); - private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; - private static final EOFPacket eof = new EOFPacket(); - static { - int i = 0; - byte packetId = 0; - header.packetId = ++packetId; - fields[i] = PacketUtil.getField("@@session.auto_increment_increment", - Fields.FIELD_TYPE_LONG); - fields[i++].packetId = ++packetId; - eof.packetId = ++packetId; - } - - public static void response(MySQLFrontConnection c) { - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); + private static final int FIELD_COUNT = 1; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + fields[i] = PacketUtil.getField("@@session.auto_increment_increment", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; + eof.packetId = ++packetId; + } - header.write(bufferArray); - for (FieldPacket field : fields) { - field.write(bufferArray); - } - eof.write(bufferArray); - byte packetId = eof.packetId; - RowDataPacket row = new RowDataPacket(FIELD_COUNT); - row.add(LongUtil.toBytes(1)); - row.packetId = ++packetId; - row.write(bufferArray); - EOFPacket lastEof = new EOFPacket(); - lastEof.packetId = ++packetId; - lastEof.write(bufferArray); - c.write(bufferArray); - } + public static void response(ServerConnection c) { + ByteBuffer buffer = c.allocate(); + buffer = header.write(buffer, c,true); + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + buffer = eof.write(buffer, c,true); + byte packetId = eof.packetId; + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(LongUtil.toBytes(1)); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + c.write(buffer); + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/response/SessionIsolation.java b/src/main/java/io/mycat/server/response/SessionIsolation.java index 9af2ab966..df22f4dd7 100644 --- a/src/main/java/io/mycat/server/response/SessionIsolation.java +++ b/src/main/java/io/mycat/server/response/SessionIsolation.java @@ -23,15 +23,15 @@ */ package io.mycat.server.response; -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.server.Fields; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.PacketUtil; +import java.nio.ByteBuffer; + +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.server.ServerConnection; import io.mycat.util.StringUtil; /** @@ -52,24 +52,22 @@ public class SessionIsolation { eof.packetId = ++packetId; } - - public static void response(MySQLFrontConnection c) { - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); - header.write(bufferArray); + public static void response(ServerConnection c) { + ByteBuffer buffer = c.allocate(); + buffer = header.write(buffer, c,true); for (FieldPacket field : fields) { - field.write(bufferArray); + buffer = field.write(buffer, c,true); } - eof.write(bufferArray); + buffer = eof.write(buffer, c,true); byte packetId = eof.packetId; RowDataPacket row = new RowDataPacket(FIELD_COUNT); row.add(StringUtil.encode("REPEATABLE-READ",c.getCharset())); row.packetId = ++packetId; - row.write(bufferArray); + buffer = row.write(buffer, c,true); EOFPacket lastEof = new EOFPacket(); lastEof.packetId = ++packetId; - lastEof.write(bufferArray); - c.write(bufferArray); + buffer = lastEof.write(buffer, c,true); + c.write(buffer); } } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/response/ShowCobarCluster.java b/src/main/java/io/mycat/server/response/ShowCobarCluster.java new file mode 100644 index 000000000..c2889a416 --- /dev/null +++ b/src/main/java/io/mycat/server/response/ShowCobarCluster.java @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.server.response; + +import java.nio.ByteBuffer; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.MycatServer; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Alarms; +import io.mycat.config.Fields; +import io.mycat.config.MycatCluster; +import io.mycat.config.MycatConfig; +import io.mycat.config.MycatNode; +import io.mycat.config.model.MycatNodeConfig; +import io.mycat.config.model.SchemaConfig; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.server.ServerConnection; +import io.mycat.util.IntegerUtil; +import io.mycat.util.StringUtil; + +/** + * @author mycat + */ +public class ShowCobarCluster { + + private static final Logger alarm = LoggerFactory.getLogger("alarm"); + + private static final int FIELD_COUNT = 2; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + fields[i] = PacketUtil.getField("HOST", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + fields[i] = PacketUtil.getField("WEIGHT", Fields.FIELD_TYPE_LONG); + fields[i++].packetId = ++packetId; + eof.packetId = ++packetId; + } + + public static void response(ServerConnection c) { + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write field + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + byte packetId = eof.packetId; + for (RowDataPacket row : getRows(c)) { + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + + // last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // post write + c.write(buffer); + } + + private static List getRows(ServerConnection c) { + List rows = new LinkedList(); + MycatConfig config = MycatServer.getInstance().getConfig(); + MycatCluster cluster = config.getCluster(); + Map schemas = config.getSchemas(); + SchemaConfig schema = (c.getSchema() == null) ? null : schemas.get(c.getSchema()); + + // 如果没有指定schema或者schema为null,则使用全部集群。 + if (schema == null) { + Map nodes = cluster.getNodes(); + for (MycatNode n : nodes.values()) { + if (n != null && n.isOnline()) { + rows.add(getRow(n, c.getCharset())); + } + } + } else { + + Map nodes = cluster.getNodes(); + for (MycatNode n : nodes.values()) { + if (n != null && n.isOnline()) { + rows.add(getRow(n, c.getCharset())); + } + } + } + + if (rows.size() == 0) { + alarm.error(Alarms.CLUSTER_EMPTY + c.toString()); + } + + return rows; + } + + private static RowDataPacket getRow(MycatNode node, String charset) { + MycatNodeConfig conf = node.getConfig(); + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(StringUtil.encode(conf.getHost(), charset)); + row.add(IntegerUtil.toBytes(conf.getWeight())); + return row; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/response/ShowCobarStatus.java b/src/main/java/io/mycat/server/response/ShowCobarStatus.java new file mode 100644 index 000000000..1a6de86db --- /dev/null +++ b/src/main/java/io/mycat/server/response/ShowCobarStatus.java @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.server.response; + +import java.nio.ByteBuffer; + +import io.mycat.MycatServer; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.ErrorPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.server.ServerConnection; + +/** + * 加入了offline状态推送,用于心跳语句。 + * + * @author mycat + * @author mycat + */ +public class ShowCobarStatus { + + private static final int FIELD_COUNT = 1; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + private static final RowDataPacket status = new RowDataPacket(FIELD_COUNT); + private static final EOFPacket lastEof = new EOFPacket(); + private static final ErrorPacket error = PacketUtil.getShutdown(); + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + fields[i] = PacketUtil.getField("STATUS", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + eof.packetId = ++packetId; + status.add("ON".getBytes()); + status.packetId = ++packetId; + lastEof.packetId = ++packetId; + } + + public static void response(ServerConnection c) { + if (MycatServer.getInstance().isOnline()) { + ByteBuffer buffer = c.allocate(); + buffer = header.write(buffer, c,true); + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + buffer = eof.write(buffer, c,true); + buffer = status.write(buffer, c,true); + buffer = lastEof.write(buffer, c,true); + c.write(buffer); + } else { + error.write(c); + } + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/response/ShowCollation.java b/src/main/java/io/mycat/server/response/ShowCollation.java deleted file mode 100644 index e67b3885c..000000000 --- a/src/main/java/io/mycat/server/response/ShowCollation.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.server.response; - -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.server.Fields; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.PacketUtil; -import io.mycat.util.IntegerUtil; -import io.mycat.util.LongUtil; - -/** - * @author mycat - * @author mycat - */ -public final class ShowCollation { - - private static final int FIELD_COUNT = 6; - private static final ResultSetHeaderPacket header = PacketUtil - .getHeader(FIELD_COUNT); - private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; - private static final EOFPacket eof = new EOFPacket(); - static { - int i = 0; - byte packetId = 0; - header.packetId = ++packetId; - - fields[i] = PacketUtil.getField("COLLATION", - Fields.FIELD_TYPE_VAR_STRING); - fields[i++].packetId = ++packetId; - - fields[i] = PacketUtil - .getField("CHARSET", Fields.FIELD_TYPE_VAR_STRING); - fields[i++].packetId = ++packetId; - - fields[i] = PacketUtil.getField("ID", Fields.FIELD_TYPE_LONG); - fields[i++].packetId = ++packetId; - - fields[i] = PacketUtil - .getField("DEFAULT", Fields.FIELD_TYPE_VAR_STRING); - fields[i++].packetId = ++packetId; - - fields[i] = PacketUtil.getField("COMPILED", - Fields.FIELD_TYPE_VAR_STRING); - fields[i++].packetId = ++packetId; - - fields[i] = PacketUtil.getField("SORTLEN", Fields.FIELD_TYPE_LONG); - fields[i++].packetId = ++packetId; - - eof.packetId = ++packetId; - } - - public static void execute(MySQLFrontConnection c) { - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); - - // write header - header.write(bufferArray); - // write fields - for (FieldPacket field : fields) { - field.write(bufferArray); - } - - // write eof - eof.write(bufferArray); - - // write rows - byte packetId = eof.packetId; - RowDataPacket row = getRow(c.getCharset()); - row.packetId = ++packetId; - row.write(bufferArray); - - // write lastEof - EOFPacket lastEof = new EOFPacket(); - lastEof.packetId = ++packetId; - lastEof.write(bufferArray); - - // write buffer - c.write(bufferArray); - } - - private static RowDataPacket getRow(String charset) { - RowDataPacket row = new RowDataPacket(FIELD_COUNT); - row.add("utf8_general_ci".getBytes()); - row.add("utf8".getBytes()); - row.add(IntegerUtil.toBytes(33)); - row.add("Yes".getBytes()); - row.add("Yes".getBytes()); - row.add(LongUtil.toBytes(1)); - return row; - } - -} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/response/ShowConnectionSQL.java b/src/main/java/io/mycat/server/response/ShowConnectionSQL.java deleted file mode 100644 index abcc62968..000000000 --- a/src/main/java/io/mycat/server/response/ShowConnectionSQL.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.server.response; - -import io.mycat.net.BufferArray; -import io.mycat.net.Connection; -import io.mycat.net.NetSystem; -import io.mycat.server.Fields; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.PacketUtil; -import io.mycat.util.LongUtil; -import io.mycat.util.StringUtil; -import io.mycat.util.TimeUtil; - -/** - * @author mycat - */ -public final class ShowConnectionSQL { - - private static final int FIELD_COUNT = 6; - private static final ResultSetHeaderPacket header = PacketUtil - .getHeader(FIELD_COUNT); - private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; - private static final EOFPacket eof = new EOFPacket(); - static { - int i = 0; - byte packetId = 0; - header.packetId = ++packetId; - - fields[i] = PacketUtil.getField("ID", Fields.FIELD_TYPE_LONG); - fields[i++].packetId = ++packetId; - - fields[i] = PacketUtil.getField("HOST", Fields.FIELD_TYPE_VAR_STRING); - fields[i++].packetId = ++packetId; - - fields[i] = PacketUtil.getField("SCHEMA", Fields.FIELD_TYPE_VAR_STRING); - fields[i++].packetId = ++packetId; - - fields[i] = PacketUtil.getField("START_TIME", - Fields.FIELD_TYPE_LONGLONG); - fields[i++].packetId = ++packetId; - - fields[i] = PacketUtil.getField("EXECUTE_TIME", - Fields.FIELD_TYPE_LONGLONG); - fields[i++].packetId = ++packetId; - - fields[i] = PacketUtil.getField("SQL", Fields.FIELD_TYPE_VAR_STRING); - fields[i++].packetId = ++packetId; - - eof.packetId = ++packetId; - } - - public static void execute(MySQLFrontConnection c) { - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); - // write header - header.write(bufferArray); - - // write fields - for (FieldPacket field : fields) { - field.write(bufferArray); - } - - // write eof - eof.write(bufferArray); - - // write rows - byte packetId = eof.packetId; - String charset = c.getCharset(); - for (Connection con : NetSystem.getInstance().getAllConnectios() - .values()) { - if (con instanceof MySQLFrontConnection) { - RowDataPacket row = getRow((MySQLFrontConnection) con, charset); - row.packetId = ++packetId; - row.write(bufferArray); - } - } - - // write last eof - EOFPacket lastEof = new EOFPacket(); - lastEof.packetId = ++packetId; - lastEof.write(bufferArray); - - // write buffer - c.write(bufferArray); - } - - private static RowDataPacket getRow(MySQLFrontConnection c, String charset) { - RowDataPacket row = new RowDataPacket(FIELD_COUNT); - row.add(LongUtil.toBytes(c.getId())); - row.add(StringUtil.encode(c.getHost(), charset)); - row.add(StringUtil.encode(c.getSchema(), charset)); - row.add(LongUtil.toBytes(c.getLastReadTime())); - long rt = c.getLastReadTime(); - long wt = c.getLastWriteTime(); - row.add(LongUtil.toBytes((wt > rt) ? (wt - rt) : (TimeUtil - .currentTimeMillis() - rt))); - row.add(null); - return row; - } - -} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/response/ShowDatabase.java b/src/main/java/io/mycat/server/response/ShowDatabase.java deleted file mode 100644 index 9a5e10003..000000000 --- a/src/main/java/io/mycat/server/response/ShowDatabase.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.server.response; - -import io.mycat.MycatServer; -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.server.Fields; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.PacketUtil; -import io.mycat.util.StringUtil; - -import java.util.Map; -import java.util.TreeSet; - -/** - * 查看schema信息 - * - * @author mycat - * @author mycat - */ -public final class ShowDatabase { - - private static final int FIELD_COUNT = 1; - private static final ResultSetHeaderPacket header = PacketUtil - .getHeader(FIELD_COUNT); - private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; - private static final EOFPacket eof = new EOFPacket(); - static { - int i = 0; - byte packetId = 0; - header.packetId = ++packetId; - - fields[i] = PacketUtil.getField("DATABASE", - Fields.FIELD_TYPE_VAR_STRING); - fields[i++].packetId = ++packetId; - - eof.packetId = ++packetId; - } - - public static void execute(MySQLFrontConnection c) { - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); - - // write header - header.write(bufferArray); - - // write fields - for (FieldPacket field : fields) { - field.write(bufferArray); - } - - // write eof - eof.write(bufferArray); - - // write rows - byte packetId = eof.packetId; - Map schemas = MycatServer.getInstance() - .getConfig().getSchemas(); - for (String name : new TreeSet(schemas.keySet())) { - RowDataPacket row = new RowDataPacket(FIELD_COUNT); - row.add(StringUtil.encode(name, c.getCharset())); - row.packetId = ++packetId; - row.write(bufferArray); - } - - // write lastEof - EOFPacket lastEof = new EOFPacket(); - lastEof.packetId = ++packetId; - lastEof.write(bufferArray); - - // write buffer - c.write(bufferArray); - } - -} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/response/ShowDatabases.java b/src/main/java/io/mycat/server/response/ShowDatabases.java index 29369b652..80bfa52ee 100644 --- a/src/main/java/io/mycat/server/response/ShowDatabases.java +++ b/src/main/java/io/mycat/server/response/ShowDatabases.java @@ -23,90 +23,85 @@ */ package io.mycat.server.response; -import io.mycat.MycatServer; -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.server.Fields; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.config.node.MycatConfig; -import io.mycat.server.config.node.UserConfig; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.PacketUtil; -import io.mycat.util.StringUtil; - +import java.nio.ByteBuffer; import java.util.Map; import java.util.Set; import java.util.TreeSet; +import io.mycat.MycatServer; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.config.MycatConfig; +import io.mycat.config.model.UserConfig; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.server.ServerConnection; +import io.mycat.util.StringUtil; + /** * @author mycat */ public class ShowDatabases { - private static final int FIELD_COUNT = 1; - private static final ResultSetHeaderPacket header = PacketUtil - .getHeader(FIELD_COUNT); - private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; - private static final EOFPacket eof = new EOFPacket(); - static { - int i = 0; - byte packetId = 0; - header.packetId = ++packetId; - fields[i] = PacketUtil.getField("DATABASE", - Fields.FIELD_TYPE_VAR_STRING); - fields[i++].packetId = ++packetId; - eof.packetId = ++packetId; - } - - public static void response(MySQLFrontConnection c) { - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); + private static final int FIELD_COUNT = 1; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + fields[i] = PacketUtil.getField("DATABASE", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + eof.packetId = ++packetId; + } - // write header - header.write(bufferArray); + public static void response(ServerConnection c) { + ByteBuffer buffer = c.allocate(); - // write fields - for (FieldPacket field : fields) { - field.write(bufferArray); + // write header + buffer = header.write(buffer, c,true); - } + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } - // write eof - eof.write(bufferArray); + // write eof + buffer = eof.write(buffer, c,true); - // write rows - byte packetId = eof.packetId; - MycatConfig conf = MycatServer.getInstance().getConfig(); - Map users = conf.getUsers(); - UserConfig user = users == null ? null : users.get(c.getUser()); - if (user != null) { - TreeSet schemaSet = new TreeSet(); - Set schemaList = user.getSchemas(); - if (schemaList == null || schemaList.size() == 0) { - schemaSet.addAll(conf.getSchemas().keySet()); - } else { - for (String schema : schemaList) { - schemaSet.add(schema); - } - } - for (String name : schemaSet) { - RowDataPacket row = new RowDataPacket(FIELD_COUNT); - row.add(StringUtil.encode(name, c.getCharset())); - row.packetId = ++packetId; - row.write(bufferArray); - } - } + // write rows + byte packetId = eof.packetId; + MycatConfig conf = MycatServer.getInstance().getConfig(); + Map users = conf.getUsers(); + UserConfig user = users == null ? null : users.get(c.getUser()); + if (user != null) { + TreeSet schemaSet = new TreeSet(); + Set schemaList = user.getSchemas(); + if (schemaList == null || schemaList.size() == 0) { + schemaSet.addAll(conf.getSchemas().keySet()); + } else { + for (String schema : schemaList) { + schemaSet.add(schema); + } + } + for (String name : schemaSet) { + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(StringUtil.encode(name, c.getCharset())); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + } - // write last eof - EOFPacket lastEof = new EOFPacket(); - lastEof.packetId = ++packetId; - lastEof.write(bufferArray); + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); - // post write - c.write(bufferArray); - } + // post write + c.write(buffer); + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/response/ShowFullTables.java b/src/main/java/io/mycat/server/response/ShowFullTables.java new file mode 100644 index 000000000..b01686dd0 --- /dev/null +++ b/src/main/java/io/mycat/server/response/ShowFullTables.java @@ -0,0 +1,195 @@ +package io.mycat.server.response; + +import com.google.common.base.Strings; + +import io.mycat.MycatServer; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.ErrorCode; +import io.mycat.config.Fields; +import io.mycat.config.MycatConfig; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.UserConfig; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.server.ServerConnection; +import io.mycat.server.parser.ServerParse; +import io.mycat.server.util.SchemaUtil; +import io.mycat.util.StringUtil; + +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * show tables impl + * @author yanglixue + * + */ +public class ShowFullTables +{ + + private static final int FIELD_COUNT = 2; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + + private static final String SCHEMA_KEY = "schemaName"; + private static final String LIKE_KEY = "like"; + private static final Pattern pattern = Pattern.compile("^\\s*(SHOW)\\s++(FULL)*\\s*(TABLES)(\\s+(FROM)\\s+([a-zA-Z_0-9]+))?(\\s+(LIKE\\s+'(.*)'))?\\s*",Pattern.CASE_INSENSITIVE); + + /** + * response method. + * @param c + */ + public static void response(ServerConnection c,String stmt,int type) { + String showSchemal= SchemaUtil.parseShowTableSchema(stmt) ; + String cSchema =showSchemal==null? c.getSchema():showSchemal; + SchemaConfig schema = MycatServer.getInstance().getConfig().getSchemas().get(cSchema); + if(schema != null) { + //不分库的schema,show tables从后端 mysql中查 + String node = schema.getDataNode(); + if(!Strings.isNullOrEmpty(node)) { + c.execute(stmt, ServerParse.SHOW); + return; + } + } else { + c.writeErrMessage(ErrorCode.ER_NO_DB_ERROR,"No database selected"); + } + + //分库的schema,直接从SchemaConfig中获取所有表名 + Map parm = buildFields(c,stmt); + Set tableSet = getTableSet(c, parm); + + + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + fields[i] = PacketUtil.getField("Tables in " + parm.get(SCHEMA_KEY), Fields.FIELD_TYPE_VAR_STRING); + fields[i].packetId = ++packetId; + fields[i+1] = PacketUtil.getField("Table_type " , Fields.FIELD_TYPE_VAR_STRING); + fields[i+1].packetId = ++packetId; + eof.packetId = ++packetId; + ByteBuffer buffer = c.allocate(); + + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + + // write eof + buffer = eof.write(buffer, c,true); + + // write rows + packetId = eof.packetId; + + for (String name : tableSet) { + RowDataPacket row = new RowDataPacket(FIELD_COUNT); + row.add(StringUtil.encode(name.toLowerCase(), c.getCharset())); + row.add(StringUtil.encode("BASE TABLE", c.getCharset())); + row.packetId = ++packetId; + buffer = row.write(buffer, c,true); + } + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // post write + c.write(buffer); + + + } + + public static Set getTableSet(ServerConnection c, String stmt) + { + Map parm = buildFields(c,stmt); + return getTableSet(c, parm); + + } + + + private static Set getTableSet(ServerConnection c, Map parm) + { + TreeSet tableSet = new TreeSet(); + MycatConfig conf = MycatServer.getInstance().getConfig(); + + Map users = conf.getUsers(); + UserConfig user = users == null ? null : users.get(c.getUser()); + if (user != null) { + + + Map schemas = conf.getSchemas(); + for (String name:schemas.keySet()){ + if (null !=parm.get(SCHEMA_KEY) && parm.get(SCHEMA_KEY).toUpperCase().equals(name.toUpperCase()) ){ + + if(null==parm.get("LIKE_KEY")){ + tableSet.addAll(schemas.get(name).getTables().keySet()); + }else{ + String p = "^" + parm.get("LIKE_KEY").replaceAll("%", ".*"); + Pattern pattern = Pattern.compile(p,Pattern.CASE_INSENSITIVE); + Matcher ma ; + + for (String tname : schemas.get(name).getTables().keySet()){ + ma=pattern.matcher(tname); + if(ma.matches()){ + tableSet.add(tname); + } + } + + } + + } + }; + + + + } + return tableSet; + } + + /** + * build fields + * @param c + * @param stmt + */ + private static Map buildFields(ServerConnection c,String stmt) { + + Map map = new HashMap(); + + Matcher ma = pattern.matcher(stmt); + + if(ma.find()){ + String schemaName=ma.group(6); + if (null !=schemaName && (!"".equals(schemaName)) && (!"null".equals(schemaName))){ + map.put(SCHEMA_KEY, schemaName); + } + + String like = ma.group(9); + if (null !=like && (!"".equals(like)) && (!"null".equals(like))){ + map.put("LIKE_KEY", like); + } + } + + + if(null==map.get(SCHEMA_KEY)){ + map.put(SCHEMA_KEY, c.getSchema()); + } + + + + + return map; + + } + + +} diff --git a/src/main/java/io/mycat/server/response/ShowHelp.java b/src/main/java/io/mycat/server/response/ShowHelp.java deleted file mode 100644 index acca3b712..000000000 --- a/src/main/java/io/mycat/server/response/ShowHelp.java +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.server.response; - -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.server.Fields; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.PacketUtil; -import io.mycat.util.StringUtil; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - * 打印MycatServer所支持的语句 - * - * @author mycat - * @author mycat - */ -public final class ShowHelp { - - private static final int FIELD_COUNT = 2; - private static final ResultSetHeaderPacket header = PacketUtil - .getHeader(FIELD_COUNT); - private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; - private static final EOFPacket eof = new EOFPacket(); - static { - int i = 0; - byte packetId = 0; - header.packetId = ++packetId; - - fields[i] = PacketUtil.getField("STATEMENT", - Fields.FIELD_TYPE_VAR_STRING); - fields[i++].packetId = ++packetId; - - fields[i] = PacketUtil.getField("DESCRIPTION", - Fields.FIELD_TYPE_VAR_STRING); - fields[i++].packetId = ++packetId; - - eof.packetId = ++packetId; - } - - public static void execute(MySQLFrontConnection c) { - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); - // write header - header.write(bufferArray); - - // write fields - for (FieldPacket field : fields) { - field.write(bufferArray); - } - - // write eof - eof.write(bufferArray); - - // write rows - byte packetId = eof.packetId; - for (String key : keys) { - RowDataPacket row = getRow(key, helps.get(key), c.getCharset()); - row.packetId = ++packetId; - row.write(bufferArray); - } - - // write last eof - EOFPacket lastEof = new EOFPacket(); - lastEof.packetId = ++packetId; - lastEof.write(bufferArray); - - // post write - c.write(bufferArray); - } - - private static RowDataPacket getRow(String stmt, String desc, String charset) { - RowDataPacket row = new RowDataPacket(FIELD_COUNT); - row.add(StringUtil.encode(stmt, charset)); - row.add(StringUtil.encode(desc, charset)); - return row; - } - - private static final Map helps = new HashMap(); - private static final List keys = new ArrayList(); - static { - // show - helps.put("show @@time.current", "Report current timestamp"); - helps.put("show @@time.startup", "Report startup timestamp"); - helps.put("show @@version", "Report Mycat Server version"); - helps.put("show @@server", "Report server status"); - helps.put("show @@threadpool", "Report threadPool status"); - helps.put("show @@database", "Report databases"); - helps.put("show @@datanode", "Report dataNodes"); - helps.put("show @@datanode where schema = ?", "Report dataNodes"); - helps.put("show @@datasource where dataNode = ?", "Report dataSources"); - helps.put("show @@datasource", "Report dataSources"); - helps.put("show @@processor", "Report processor status"); - helps.put("show @@command", "Report commands status"); - helps.put("show @@connection", "Report connection status"); - helps.put("show @@cache", "Report system cache usage"); - helps.put("show @@backend", "Report backend connection status"); - helps.put("show @@session", "Report front session details"); - helps.put("show @@connection.sql", "Report connection sql"); - helps.put("show @@sql.execute", "Report execute status"); - helps.put("show @@sql.detail where id = ?", - "Report execute detail status"); - helps.put("show @@sql where id = ?", "Report specify SQL"); - helps.put("show @@sql.slow", "Report slow SQL"); - helps.put("show @@parser", "Report parser status"); - helps.put("show @@router", "Report router status"); - helps.put("show @@heartbeat", "Report heartbeat status"); - helps.put("show @@slow where schema = ?", "Report schema slow sql"); - helps.put("show @@slow where datanode = ?", "Report datanode slow sql"); - - // switch - helps.put("switch @@datasource name:index", "Switch dataSource"); - - // kill - helps.put("kill @@connection id1,id2,...", - "Kill the specified connections"); - - // stop - helps.put("stop @@heartbeat name:time", "Pause dataNode heartbeat"); - - // reload - helps.put("reload @@config", "Reload basic config from file"); - helps.put("reload @@config_all", "Reload all config from file"); - helps.put("reload @@route", "Reload route config from file"); - helps.put("reload @@user", "Reload user config from file"); - - // rollback - helps.put("rollback @@config", "Rollback all config from memory"); - helps.put("rollback @@route", "Rollback route config from memory"); - helps.put("rollback @@user", "Rollback user config from memory"); - - // offline/online - helps.put("offline", "Change MyCat status to OFF"); - helps.put("online", "Change MyCat status to ON"); - - // clear - helps.put("clear @@slow where schema = ?", "Clear slow sql by schema"); - helps.put("clear @@slow where datanode = ?", - "Clear slow sql by datanode"); - - // list sort - keys.addAll(helps.keySet()); - Collections.sort(keys); - } - -} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/response/ShowMyCATCluster.java b/src/main/java/io/mycat/server/response/ShowMyCATCluster.java index 33da40eb3..9d53b24aa 100644 --- a/src/main/java/io/mycat/server/response/ShowMyCATCluster.java +++ b/src/main/java/io/mycat/server/response/ShowMyCATCluster.java @@ -23,38 +23,36 @@ */ package io.mycat.server.response; -import io.mycat.MycatServer; -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.server.Alarms; -import io.mycat.server.Fields; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.config.cluster.MycatClusterConfig; -import io.mycat.server.config.cluster.MycatNode; -import io.mycat.server.config.cluster.MycatNodeConfig; -import io.mycat.server.config.node.MycatConfig; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.PacketUtil; -import io.mycat.util.IntegerUtil; -import io.mycat.util.StringUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - +import java.nio.ByteBuffer; import java.util.LinkedList; import java.util.List; import java.util.Map; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.MycatServer; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Alarms; +import io.mycat.config.Fields; +import io.mycat.config.MycatCluster; +import io.mycat.config.MycatConfig; +import io.mycat.config.MycatNode; +import io.mycat.config.model.MycatNodeConfig; +import io.mycat.config.model.SchemaConfig; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.server.ServerConnection; +import io.mycat.util.IntegerUtil; +import io.mycat.util.StringUtil; + /** * @author mycat */ public class ShowMyCATCluster { - private static final Logger alarm = LoggerFactory - .getLogger("alarm"); + private static final Logger alarm = LoggerFactory.getLogger("alarm"); private static final int FIELD_COUNT = 2; private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); @@ -71,41 +69,40 @@ public class ShowMyCATCluster { eof.packetId = ++packetId; } - public static void response(MySQLFrontConnection c) { - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); + public static void response(ServerConnection c) { + ByteBuffer buffer = c.allocate(); // write header - header.write(bufferArray); + buffer = header.write(buffer, c,true); // write field for (FieldPacket field : fields) { - field.write(bufferArray); + buffer = field.write(buffer, c,true); } // write eof - eof.write(bufferArray); + buffer = eof.write(buffer, c,true); // write rows byte packetId = eof.packetId; for (RowDataPacket row : getRows(c)) { row.packetId = ++packetId; - row.write(bufferArray); + buffer = row.write(buffer, c,true); } // last eof EOFPacket lastEof = new EOFPacket(); lastEof.packetId = ++packetId; - lastEof.write(bufferArray); + buffer = lastEof.write(buffer, c,true); // post write - c.write(bufferArray); + c.write(buffer); } - private static List getRows(MySQLFrontConnection c) { + private static List getRows(ServerConnection c) { List rows = new LinkedList(); MycatConfig config = MycatServer.getInstance().getConfig(); - MycatClusterConfig cluster = config.getCluster(); + MycatCluster cluster = config.getCluster(); Map schemas = config.getSchemas(); SchemaConfig schema = (c.getSchema() == null) ? null : schemas.get(c.getSchema()); diff --git a/src/main/java/io/mycat/server/response/ShowMyCatStatus.java b/src/main/java/io/mycat/server/response/ShowMyCatStatus.java index 7ca80b0cf..6388dd573 100644 --- a/src/main/java/io/mycat/server/response/ShowMyCatStatus.java +++ b/src/main/java/io/mycat/server/response/ShowMyCatStatus.java @@ -23,17 +23,17 @@ */ package io.mycat.server.response; +import java.nio.ByteBuffer; + import io.mycat.MycatServer; -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.server.Fields; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.ErrorPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.PacketUtil; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.Fields; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.ErrorPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.server.ServerConnection; /** * 加入了offline状态推送,用于心跳语句。 @@ -43,42 +43,39 @@ */ public class ShowMyCatStatus { - private static final int FIELD_COUNT = 1; - private static final ResultSetHeaderPacket header = PacketUtil - .getHeader(FIELD_COUNT); - private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; - private static final EOFPacket eof = new EOFPacket(); - private static final RowDataPacket status = new RowDataPacket(FIELD_COUNT); - private static final EOFPacket lastEof = new EOFPacket(); - private static final ErrorPacket error = PacketUtil.getShutdown(); - static { - int i = 0; - byte packetId = 0; - header.packetId = ++packetId; - fields[i] = PacketUtil.getField("STATUS", Fields.FIELD_TYPE_VAR_STRING); - fields[i++].packetId = ++packetId; - eof.packetId = ++packetId; - status.add("ON".getBytes()); - status.packetId = ++packetId; - lastEof.packetId = ++packetId; - } - - public static void response(MySQLFrontConnection c) { + private static final int FIELD_COUNT = 1; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + private static final RowDataPacket status = new RowDataPacket(FIELD_COUNT); + private static final EOFPacket lastEof = new EOFPacket(); + private static final ErrorPacket error = PacketUtil.getShutdown(); + static { + int i = 0; + byte packetId = 0; + header.packetId = ++packetId; + fields[i] = PacketUtil.getField("STATUS", Fields.FIELD_TYPE_VAR_STRING); + fields[i++].packetId = ++packetId; + eof.packetId = ++packetId; + status.add("ON".getBytes()); + status.packetId = ++packetId; + lastEof.packetId = ++packetId; + } - if (MycatServer.getInstance().isOnline()) { - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); - header.write(bufferArray); - for (FieldPacket field : fields) { - field.write(bufferArray); - } - eof.write(bufferArray); - status.write(bufferArray); - lastEof.write(bufferArray); - c.write(bufferArray); - } else { - error.write(c); - } - } + public static void response(ServerConnection c) { + if (MycatServer.getInstance().isOnline()) { + ByteBuffer buffer = c.allocate(); + buffer = header.write(buffer, c,true); + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } + buffer = eof.write(buffer, c,true); + buffer = status.write(buffer, c,true); + buffer = lastEof.write(buffer, c,true); + c.write(buffer); + } else { + error.write(c); + } + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/server/response/ShowTables.java b/src/main/java/io/mycat/server/response/ShowTables.java index 7013557fd..607d59393 100644 --- a/src/main/java/io/mycat/server/response/ShowTables.java +++ b/src/main/java/io/mycat/server/response/ShowTables.java @@ -1,23 +1,6 @@ package io.mycat.server.response; -import com.google.common.base.Strings; -import io.mycat.MycatServer; -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.server.ErrorCode; -import io.mycat.server.Fields; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.config.node.MycatConfig; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.UserConfig; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.PacketUtil; -import io.mycat.server.parser.ServerParse; -import io.mycat.util.StringUtil; - +import java.nio.ByteBuffer; import java.util.HashMap; import java.util.Map; import java.util.Set; @@ -25,100 +8,115 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import com.google.common.base.Strings; + +import io.mycat.MycatServer; +import io.mycat.backend.mysql.PacketUtil; +import io.mycat.config.ErrorCode; +import io.mycat.config.Fields; +import io.mycat.config.MycatConfig; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.UserConfig; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.server.ServerConnection; +import io.mycat.server.parser.ServerParse; +import io.mycat.server.util.SchemaUtil; +import io.mycat.util.StringUtil; + /** * show tables impl - * * @author yanglixue - * + * */ -public class ShowTables { - - private static final int FIELD_COUNT = 1; - private static final ResultSetHeaderPacket header = PacketUtil - .getHeader(FIELD_COUNT); - private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; - private static final EOFPacket eof = new EOFPacket(); - - private static final String SCHEMA_KEY = "schemaName"; - private static final Pattern pattern = Pattern - .compile( - "^\\s*(SHOW)\\s+(TABLES)(\\s+(FROM)\\s+([a-zA-Z_0-9]+))?(\\s+(LIKE\\s+'(.*)'))?\\s*", - Pattern.CASE_INSENSITIVE); - +public class ShowTables { + + private static final int FIELD_COUNT = 1; + private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT); + private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; + private static final EOFPacket eof = new EOFPacket(); + + private static final String SCHEMA_KEY = "schemaName"; + private static final String LIKE_KEY = "like"; + private static final Pattern pattern = Pattern.compile("^\\s*(SHOW)\\s+(TABLES)(\\s+(FROM)\\s+([a-zA-Z_0-9]+))?(\\s+(LIKE\\s+'(.*)'))?\\s*",Pattern.CASE_INSENSITIVE); + /** * response method. - * * @param c */ - public static void response(MySQLFrontConnection c, String stmt, int type) { - SchemaConfig schema = MycatServer.getInstance().getConfig() - .getSchemas().get(c.getSchema()); - if (schema != null) { - //不分库的schema,show tables从后端 mysql中查 + public static void response(ServerConnection c,String stmt,int type) { + String showSchemal= SchemaUtil.parseShowTableSchema(stmt) ; + String cSchema =showSchemal==null? c.getSchema():showSchemal; + SchemaConfig schema = MycatServer.getInstance().getConfig().getSchemas().get(cSchema); + if(schema != null) { + //不分库的schema,show tables从后端 mysql中查 String node = schema.getDataNode(); if(!Strings.isNullOrEmpty(node)) { - c.execute(stmt, ServerParse.SHOW); + c.execute(stmt, ServerParse.SHOW); return; } - } else { - c.writeErrMessage(ErrorCode.ER_NO_DB_ERROR, "No database selected"); - } - - // 分库的schema,直接从SchemaConfig中获取所有表名 - Map parm = buildFields(c, stmt); - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); + } else { + c.writeErrMessage(ErrorCode.ER_NO_DB_ERROR,"No database selected"); + return; + } + //分库的schema,直接从SchemaConfig中获取所有表名 + Map parm = buildFields(c,stmt); + java.util.Set tableSet = getTableSet(c, parm); int i = 0; byte packetId = 0; header.packetId = ++packetId; - fields[i] = PacketUtil.getField("Tables in " + parm.get(SCHEMA_KEY), - Fields.FIELD_TYPE_VAR_STRING); + fields[i] = PacketUtil.getField("Tables in " + parm.get(SCHEMA_KEY), Fields.FIELD_TYPE_VAR_STRING); fields[i++].packetId = ++packetId; eof.packetId = ++packetId; - // write header - header.write(bufferArray); + ByteBuffer buffer = c.allocate(); - // write fields - for (FieldPacket field : fields) { - field.write(bufferArray); - } + // write header + buffer = header.write(buffer, c,true); + + // write fields + for (FieldPacket field : fields) { + buffer = field.write(buffer, c,true); + } - // write eof - eof.write(bufferArray); + // write eof + buffer = eof.write(buffer, c,true); - // write rows - packetId = eof.packetId; + // write rows + packetId = eof.packetId; - TreeSet tableSet = getTables(c, parm); for (String name : tableSet) { RowDataPacket row = new RowDataPacket(FIELD_COUNT); row.add(StringUtil.encode(name.toLowerCase(), c.getCharset())); row.packetId = ++packetId; - row.write(bufferArray); + buffer = row.write(buffer, c,true); } - // write last eof - EOFPacket lastEof = new EOFPacket(); - lastEof.packetId = ++packetId; - lastEof.write(bufferArray); - - // post write - c.write(bufferArray); + // write last eof + EOFPacket lastEof = new EOFPacket(); + lastEof.packetId = ++packetId; + buffer = lastEof.write(buffer, c,true); + + // post write + c.write(buffer); + + + } - } - public static Set getTableSet(MySQLFrontConnection c, String stmt) + public static Set getTableSet(ServerConnection c, String stmt) { Map parm = buildFields(c,stmt); - return getTables(c, parm); + return getTableSet(c, parm); } - private static TreeSet getTables(MySQLFrontConnection c, Map parm) + + private static Set getTableSet(ServerConnection c, Map parm) { TreeSet tableSet = new TreeSet(); - MycatConfig conf = MycatServer.getInstance().getConfig(); Map users = conf.getUsers(); @@ -127,24 +125,19 @@ private static TreeSet getTables(MySQLFrontConnection c, Map schemas = conf.getSchemas(); - for (String name : schemas.keySet()) { - if (null != parm.get(SCHEMA_KEY) - && parm.get(SCHEMA_KEY).toUpperCase() - .equals(name.toUpperCase())) { + for (String name:schemas.keySet()){ + if (null !=parm.get(SCHEMA_KEY) && parm.get(SCHEMA_KEY).toUpperCase().equals(name.toUpperCase()) ){ - if (null == parm.get("LIKE_KEY")) { + if(null==parm.get("LIKE_KEY")){ tableSet.addAll(schemas.get(name).getTables().keySet()); - } else { - String p = "^" - + parm.get("LIKE_KEY").replaceAll("%", ".*"); - Pattern pattern = Pattern.compile(p, - Pattern.CASE_INSENSITIVE); - Matcher ma; - - for (String tname : schemas.get(name).getTables() - .keySet()) { - ma = pattern.matcher(tname); - if (ma.matches()) { + }else{ + String p = "^" + parm.get("LIKE_KEY").replaceAll("%", ".*"); + Pattern pattern = Pattern.compile(p,Pattern.CASE_INSENSITIVE); + Matcher ma ; + + for (String tname : schemas.get(name).getTables().keySet()){ + ma=pattern.matcher(tname); + if(ma.matches()){ tableSet.add(tname); } } @@ -152,7 +145,7 @@ private static TreeSet getTables(MySQLFrontConnection c, Map getTables(MySQLFrontConnection c, Map buildFields(MySQLFrontConnection c, - String stmt) { - - Map map = new HashMap(); + private static Map buildFields(ServerConnection c,String stmt) { + + Map map = new HashMap(); Matcher ma = pattern.matcher(stmt); - if (ma.find()) { - String schemaName = ma.group(5); - if (null != schemaName && (!"".equals(schemaName)) - && (!"null".equals(schemaName))) { - map.put(SCHEMA_KEY, schemaName); + if(ma.find()){ + String schemaName=ma.group(5); + if (null !=schemaName && (!"".equals(schemaName)) && (!"null".equals(schemaName))){ + map.put(SCHEMA_KEY, schemaName); + } + + String like = ma.group(8); + if (null !=like && (!"".equals(like)) && (!"null".equals(like))){ + map.put("LIKE_KEY", like); + } } - String like = ma.group(8); - if (null != like && (!"".equals(like)) && (!"null".equals(like))) { - map.put("LIKE_KEY", like); - } - } - if (null == map.get(SCHEMA_KEY)) { + if(null==map.get(SCHEMA_KEY)){ map.put(SCHEMA_KEY, c.getSchema()); } + + - - - return map; - + + return map; + } + } diff --git a/src/main/java/io/mycat/server/response/ShowVariables.java b/src/main/java/io/mycat/server/response/ShowVariables.java deleted file mode 100644 index 28fbb6589..000000000 --- a/src/main/java/io/mycat/server/response/ShowVariables.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.server.response; - -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.server.Fields; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.PacketUtil; -import io.mycat.util.StringUtil; - -import java.util.HashMap; -import java.util.Map; - -/** - * @author mycat - */ -public final class ShowVariables { - - private static final int FIELD_COUNT = 2; - private static final ResultSetHeaderPacket header = PacketUtil - .getHeader(FIELD_COUNT); - private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; - private static final EOFPacket eof = new EOFPacket(); - static { - int i = 0; - byte packetId = 0; - header.packetId = ++packetId; - - fields[i] = PacketUtil.getField("VARIABLE_NAME", - Fields.FIELD_TYPE_VAR_STRING); - fields[i++].packetId = ++packetId; - - fields[i] = PacketUtil.getField("VALUE", Fields.FIELD_TYPE_VAR_STRING); - fields[i++].packetId = ++packetId; - - eof.packetId = ++packetId; - } - - public static void execute(MySQLFrontConnection c) { - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); - - // write header - header.write(bufferArray); - - // write fields - for (FieldPacket field : fields) { - field.write(bufferArray); - } - - // write eof - eof.write(bufferArray); - - // write rows - byte packetId = eof.packetId; - for (Map.Entry e : variables.entrySet()) { - RowDataPacket row = getRow(e.getKey(), e.getValue(), c.getCharset()); - row.packetId = ++packetId; - row.write(bufferArray); - } - - // write lastEof - EOFPacket lastEof = new EOFPacket(); - lastEof.packetId = ++packetId; - lastEof.write(bufferArray); - - // write buffer - c.write(bufferArray); - } - - private static RowDataPacket getRow(String name, String value, - String charset) { - RowDataPacket row = new RowDataPacket(FIELD_COUNT); - row.add(StringUtil.encode(name, charset)); - row.add(StringUtil.encode(value, charset)); - return row; - } - - private static final Map variables = new HashMap(); - static { - variables.put("character_set_client", "utf8"); - variables.put("character_set_connection", "utf8"); - variables.put("character_set_results", "utf8"); - variables.put("character_set_server", "utf8"); - variables.put("init_connect", ""); - variables.put("interactive_timeout", "172800"); - variables.put("lower_case_table_names", "1"); - variables.put("max_allowed_packet", "16777216"); - variables.put("net_buffer_length", "8192"); - variables.put("net_write_timeout", "60"); - variables.put("query_cache_size", "0"); - variables.put("query_cache_type", "OFF"); - variables.put("sql_mode", "STRICT_TRANS_TABLES"); - variables.put("system_time_zone", "CST"); - variables.put("time_zone", "SYSTEM"); - variables.put("lower_case_table_names", "1"); - variables.put("tx_isolation", "REPEATABLE-READ"); - variables.put("wait_timeout", "172800"); - } - -} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/response/ShowVersion.java b/src/main/java/io/mycat/server/response/ShowVersion.java deleted file mode 100644 index aed2b6be2..000000000 --- a/src/main/java/io/mycat/server/response/ShowVersion.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.server.response; - -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.server.Fields; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.Versions; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.PacketUtil; - -/** - * 查看CobarServer版本 - * - * @author mycat - */ -public final class ShowVersion { - - private static final int FIELD_COUNT = 1; - private static final ResultSetHeaderPacket header = PacketUtil - .getHeader(FIELD_COUNT); - private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT]; - private static final EOFPacket eof = new EOFPacket(); - static { - int i = 0; - byte packetId = 0; - header.packetId = ++packetId; - - fields[i] = PacketUtil.getField("VERSION", Fields.FIELD_TYPE_STRING); - fields[i++].packetId = ++packetId; - - eof.packetId = ++packetId; - } - - public static void execute(MySQLFrontConnection c) { - BufferArray bufferArray = NetSystem.getInstance().getBufferPool() - .allocateArray(); - - // write header - header.write(bufferArray); - - // write fields - for (FieldPacket field : fields) { - field.write(bufferArray); - } - - // write eof - eof.write(bufferArray); - - // write rows - byte packetId = eof.packetId; - RowDataPacket row = new RowDataPacket(FIELD_COUNT); - row.add(Versions.SERVER_VERSION); - row.packetId = ++packetId; - row.write(bufferArray); - - // write last eof - EOFPacket lastEof = new EOFPacket(); - lastEof.packetId = ++packetId; - lastEof.write(bufferArray); - - // write buffer - c.write(bufferArray); - } - -} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/sqlcmd/CommitCommand.java b/src/main/java/io/mycat/server/sqlcmd/CommitCommand.java index 7e606f231..3fdeaed81 100644 --- a/src/main/java/io/mycat/server/sqlcmd/CommitCommand.java +++ b/src/main/java/io/mycat/server/sqlcmd/CommitCommand.java @@ -1,8 +1,8 @@ package io.mycat.server.sqlcmd; import io.mycat.backend.BackendConnection; +import io.mycat.net.mysql.ErrorPacket; import io.mycat.server.NonBlockingSession; -import io.mycat.server.packet.ErrorPacket; public class CommitCommand implements SQLCtrlCommand { diff --git a/src/main/java/io/mycat/server/sqlhandler/KillHandler.java b/src/main/java/io/mycat/server/sqlhandler/KillHandler.java deleted file mode 100644 index d4948f53d..000000000 --- a/src/main/java/io/mycat/server/sqlhandler/KillHandler.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.server.sqlhandler; - -import io.mycat.net.Connection; -import io.mycat.net.NetSystem; -import io.mycat.server.ErrorCode; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.OkPacket; -import io.mycat.util.StringUtil; - -import java.nio.ByteBuffer; - -/** - * @author mycat - */ -public class KillHandler { - - public static void handle(String stmt, int offset, MySQLFrontConnection c) { - String id = stmt.substring(offset).trim(); - if (StringUtil.isEmpty(id)) { - c.writeErrMessage(ErrorCode.ER_NO_SUCH_THREAD, "NULL connection id"); - } else { - // get value - long value = 0; - try { - value = Long.parseLong(id); - } catch (NumberFormatException e) { - c.writeErrMessage(ErrorCode.ER_NO_SUCH_THREAD, - "Invalid connection id:" + id); - return; - } - - // kill myself - if (value == c.getId()) { - getOkPacket().write(c); - c.write(ByteBuffer.allocate(10)); - return; - } - - // get connection and close it - MySQLFrontConnection fc = null; - - for (Connection conn : NetSystem.getInstance().getAllConnectios() - .values()) { - if (conn instanceof MySQLFrontConnection) { - MySQLFrontConnection theCon = (MySQLFrontConnection) conn; - if (theCon.getId() == value) { - fc = theCon; - break; - } - } - } - - if (fc != null) { - fc.close("killed"); - getOkPacket().write(c); - } else { - c.writeErrMessage(ErrorCode.ER_NO_SUCH_THREAD, - "Unknown connection id:" + id); - } - } - } - - private static OkPacket getOkPacket() { - OkPacket packet = new OkPacket(); - packet.packetId = 1; - packet.affectedRows = 0; - packet.serverStatus = 2; - return packet; - } - -} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/sqlhandler/SetHandler.java b/src/main/java/io/mycat/server/sqlhandler/SetHandler.java deleted file mode 100644 index 0e3e757f1..000000000 --- a/src/main/java/io/mycat/server/sqlhandler/SetHandler.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.server.sqlhandler; - -import io.mycat.server.ErrorCode; -import io.mycat.server.Isolations; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.packet.OkPacket; -import io.mycat.server.parser.ServerParseSet; -import io.mycat.server.response.CharacterSet; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static io.mycat.server.parser.ServerParseSet.*; - -/** - * SET 语句处理 - * - * @author mycat - */ -public final class SetHandler { - - private static final Logger logger = LoggerFactory - .getLogger(SetHandler.class); - private static final byte[] AC_OFF = new byte[] { 7, 0, 0, 1, 0, 0, 0, 0, - 0, 0, 0 }; - - public static void handle(String stmt, MySQLFrontConnection c, int offset) { - // System.out.println("SetHandler: "+stmt); - int rs = ServerParseSet.parse(stmt, offset); - switch (rs & 0xff) { - case AUTOCOMMIT_ON: - if (c.isAutocommit()) { - c.write(OkPacket.OK); - } else { - c.commit(); - c.setAutocommit(true); - } - break; - case AUTOCOMMIT_OFF: { - if (c.isAutocommit()) { - c.setAutocommit(false); - } - c.write(AC_OFF); - break; - } - case XA_FLAG_ON: { - if (c.isAutocommit()) { - c.writeErrMessage(ErrorCode.ERR_WRONG_USED, - "set xa cmd on can't used in autocommit connection "); - return; - } - c.getSession2().setXATXEnabled(true); - c.write(OkPacket.OK); - break; - } - case XA_FLAG_OFF: { - c.writeErrMessage(ErrorCode.ERR_WRONG_USED, - "set xa cmd off not for external use "); - return; - } - case TX_READ_UNCOMMITTED: { - c.setTxIsolation(Isolations.READ_UNCOMMITTED); - c.write(OkPacket.OK); - break; - } - case TX_READ_COMMITTED: { - c.setTxIsolation(Isolations.READ_COMMITTED); - c.write(OkPacket.OK); - break; - } - case TX_REPEATED_READ: { - c.setTxIsolation(Isolations.REPEATED_READ); - c.write(OkPacket.OK); - break; - } - case TX_SERIALIZABLE: { - c.setTxIsolation(Isolations.SERIALIZABLE); - c.write(OkPacket.OK); - break; - } - case NAMES: - String charset = stmt.substring(rs >>> 8).trim(); - if (c.setCharset(charset)) { - c.write(OkPacket.OK); - } else { - c.writeErrMessage(ErrorCode.ER_UNKNOWN_CHARACTER_SET, - "Unknown charset '" + charset + "'"); - } - break; - case CHARACTER_SET_CLIENT: - case CHARACTER_SET_CONNECTION: - case CHARACTER_SET_RESULTS: - CharacterSet.response(stmt, c, rs); - break; - default: - StringBuilder s = new StringBuilder(); - logger.warn(s.append(c).append(stmt) - .append(" is not recoginized and ignored").toString()); - c.write(OkPacket.OK); - } - } - -} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/util/SchemaUtil.java b/src/main/java/io/mycat/server/util/SchemaUtil.java new file mode 100644 index 000000000..c6f1ae675 --- /dev/null +++ b/src/main/java/io/mycat/server/util/SchemaUtil.java @@ -0,0 +1,154 @@ +package io.mycat.server.util; + +import com.alibaba.druid.sql.ast.SQLStatement; +import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser; +import com.alibaba.druid.sql.parser.SQLStatementParser; +import com.alibaba.druid.sql.visitor.SchemaStatVisitor; + +import io.mycat.MycatServer; +import io.mycat.config.model.SchemaConfig; +import io.mycat.route.parser.druid.MycatSchemaStatVisitor; +import io.mycat.server.parser.ServerParse; + +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Created by magicdoom on 2016/1/26. + */ +public class SchemaUtil +{ + public static SchemaInfo parseSchema(String sql) + { + SQLStatementParser parser = new MySqlStatementParser(sql); + return parseTables(parser.parseStatement(),new MycatSchemaStatVisitor() ); + } + public static String detectDefaultDb(String sql, int type) + { + String db=null; + Map schemaConfigMap = MycatServer.getInstance().getConfig() + .getSchemas(); + if(ServerParse.SELECT==type) + { + SchemaUtil.SchemaInfo schemaInfo = SchemaUtil.parseSchema(sql); + if ((schemaInfo==null||schemaInfo.table==null)&&!schemaConfigMap.isEmpty()) + { + db = schemaConfigMap.entrySet().iterator().next().getKey(); + } + + if (schemaInfo != null && schemaInfo.schema != null ) { + + if ( schemaConfigMap.containsKey(schemaInfo.schema) ) { + db = schemaInfo.schema; + + /** + * 对 MySQL 自带的元数据库 information_schema 进行返回 + */ + } else if ( "information_schema".equalsIgnoreCase( schemaInfo.schema ) ) { + db = "information_schema"; + } + } + } + else + if(ServerParse.INSERT==type||ServerParse.UPDATE==type||ServerParse.DELETE==type||ServerParse.DDL==type) + { + SchemaUtil.SchemaInfo schemaInfo = SchemaUtil.parseSchema(sql); + if(schemaInfo!=null&&schemaInfo.schema!=null&&schemaConfigMap.containsKey(schemaInfo.schema) ) { + db = schemaInfo.schema; + } + } else + if((ServerParse.SHOW==type||ServerParse.USE==type||ServerParse.EXPLAIN==type||ServerParse.SET==type + ||ServerParse.HELP==type||ServerParse.DESCRIBE==type) + && !schemaConfigMap.isEmpty()) + { + //兼容mysql gui 不填默认database + db = schemaConfigMap.entrySet().iterator().next().getKey(); + } + return db; + } + + + public static String parseShowTableSchema(String sql) + { + Matcher ma = pattern.matcher(sql); + if(ma.matches()&&ma.groupCount()>=5) + { + return ma.group(5); + } + return null; + } + + private static SchemaInfo parseTables(SQLStatement stmt, SchemaStatVisitor schemaStatVisitor) + { + + stmt.accept(schemaStatVisitor); + String key = schemaStatVisitor.getCurrentTable(); + if (key != null && key.contains("`")) + { + key = key.replaceAll("`", ""); + } + + if (key != null) + { + SchemaInfo schemaInfo=new SchemaInfo(); + int pos = key.indexOf("."); + if (pos > 0) + { + schemaInfo.schema=key.substring(0,pos); + schemaInfo.table=key.substring(pos+1); + } else + { + schemaInfo.table=key; + } + return schemaInfo; + } + + return null; + } + + + public static class SchemaInfo + { + public String table; + public String schema; + + @Override + public String toString() + { + final StringBuffer sb = new StringBuffer("SchemaInfo{"); + sb.append("table='").append(table).append('\''); + sb.append(", schema='").append(schema).append('\''); + sb.append('}'); + return sb.toString(); + } + } + +private static Pattern pattern = Pattern.compile("^\\s*(SHOW)\\s+(FULL)*\\s*(TABLES)\\s+(FROM)\\s+([a-zA-Z_0-9]+)\\s*([a-zA-Z_0-9\\s]*)", Pattern.CASE_INSENSITIVE); + + public static void main(String[] args) + { + String sql = "SELECT name, type FROM `mysql`.`proc` as xxxx WHERE Db='base'"; + // System.out.println(parseSchema(sql)); + sql="insert into aaa.test(id) values(1)" ; + // System.out.println(parseSchema(sql)); + sql="update updatebase.test set xx=1 " ; + //System.out.println(parseSchema(sql)); + sql="CREATE TABLE IF not EXISTS `test` (\n" + " `id` bigint(20) NOT NULL AUTO_INCREMENT,\n" + + " `sid` bigint(20) DEFAULT NULL,\n" + " `name` varchar(45) DEFAULT NULL,\n" + + " `value` varchar(45) DEFAULT NULL,\n" + + " `_slot` int(11) DEFAULT NULL COMMENT '自动迁移算法slot,禁止修改',\n" + " PRIMARY KEY (`id`)\n" + + ") ENGINE=InnoDB AUTO_INCREMENT=805781256930734081 DEFAULT CHARSET=utf8"; + System.out.println(parseSchema(sql)); + String pat3 = "show full tables from base like "; + Matcher ma = pattern.matcher(pat3); + if(ma.matches()) + { + System.out.println(ma.groupCount()); + System.out.println(ma.group(5)); + } + + + + } +} diff --git a/src/main/java/io/mycat/sqlengine/BatchSQLJob.java b/src/main/java/io/mycat/sqlengine/BatchSQLJob.java index 520a5ffe6..73b839f52 100644 --- a/src/main/java/io/mycat/sqlengine/BatchSQLJob.java +++ b/src/main/java/io/mycat/sqlengine/BatchSQLJob.java @@ -1,10 +1,10 @@ package io.mycat.sqlengine; -import io.mycat.net.NetSystem; - import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedQueue; +import io.mycat.MycatServer; + public class BatchSQLJob { private ConcurrentHashMap runningJobs = new ConcurrentHashMap(); @@ -33,7 +33,7 @@ public void setNoMoreJobInput(boolean noMoreJobInput) { private void runJob(SQLJob newJob) { // EngineCtx.LOGGER.info("run job " + newJob); runningJobs.put(newJob.getId(), newJob); - NetSystem.getInstance().getExecutor().execute(newJob); + MycatServer.getInstance().getBusinessExecutor().execute(newJob); } public boolean jobFinished(SQLJob sqlJob) { diff --git a/src/main/java/io/mycat/sqlengine/EngineCtx.java b/src/main/java/io/mycat/sqlengine/EngineCtx.java index 69116f718..f32fd64c1 100644 --- a/src/main/java/io/mycat/sqlengine/EngineCtx.java +++ b/src/main/java/io/mycat/sqlengine/EngineCtx.java @@ -1,23 +1,23 @@ package io.mycat.sqlengine; -import io.mycat.net.BufferArray; -import io.mycat.net.NetSystem; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.NonBlockingSession; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.ResultSetHeaderPacket; -import io.mycat.server.packet.RowDataPacket; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - +import java.nio.ByteBuffer; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.ReentrantLock; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.manager.handler.ConfFileHandler; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.EmptyPacket; +import io.mycat.net.mysql.ResultSetHeaderPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.server.NonBlockingSession; +import io.mycat.server.ServerConnection; + public class EngineCtx { - public static final Logger LOGGER = LoggerFactory - .getLogger(EngineCtx.class); + public static final Logger LOGGER = LoggerFactory.getLogger(ConfFileHandler.class); private final BatchSQLJob bachJob; private AtomicInteger jobId = new AtomicInteger(0); AtomicInteger packetId = new AtomicInteger(0); @@ -26,6 +26,7 @@ public class EngineCtx { private AllJobFinishedListener allJobFinishedListener; private AtomicBoolean headerWrited = new AtomicBoolean(); private final ReentrantLock writeLock = new ReentrantLock(); + private volatile boolean hasError = false; public EngineCtx(NonBlockingSession session) { this.bachJob = new BatchSQLJob(); @@ -78,37 +79,35 @@ public void writeHeader(List afields, List bfields) { writeLock.lock(); // write new header ResultSetHeaderPacket headerPkg = new ResultSetHeaderPacket(); - headerPkg.fieldCount = afields.size() + bfields.size() - 1; + headerPkg.fieldCount = afields.size() +bfields.size()-1; headerPkg.packetId = incPackageId(); LOGGER.debug("packge id " + headerPkg.packetId); - MySQLFrontConnection sc = session.getSource(); - BufferArray bufferArray = NetSystem.getInstance() - .getBufferPool().allocateArray(); - headerPkg.write(bufferArray); + ServerConnection sc = session.getSource(); + ByteBuffer buf = headerPkg.write(sc.allocate(), sc, true); // wirte a fields for (byte[] field : afields) { field[3] = incPackageId(); - bufferArray.write(field); + buf = sc.writeToBuffer(field, buf); } // write b field - for (int i = 1; i < bfields.size(); i++) { - byte[] bfield = bfields.get(i); - bfield[3] = incPackageId(); - bufferArray.write(bfield); + for (int i=1;i afields) { if (headerWrited.compareAndSet(false, true)) { try { @@ -118,48 +117,50 @@ public void writeHeader(List afields) { headerPkg.fieldCount = afields.size();// -1; headerPkg.packetId = incPackageId(); LOGGER.debug("packge id " + headerPkg.packetId); - MySQLFrontConnection sc = session.getSource(); - BufferArray bufferArray = NetSystem.getInstance() - .getBufferPool().allocateArray(); + ServerConnection sc = session.getSource(); + ByteBuffer buf = headerPkg.write(sc.allocate(), sc, true); // wirte a fields for (byte[] field : afields) { field[3] = incPackageId(); - bufferArray.write(field); + buf = sc.writeToBuffer(field, buf); } // write field eof EOFPacket eofPckg = new EOFPacket(); eofPckg.packetId = incPackageId(); - eofPckg.write(bufferArray); - sc.write(bufferArray); - // LOGGER.info("header outputed ,packgId:" + eofPckg.packetId); + buf = eofPckg.write(buf, sc, true); + sc.write(buf); + //LOGGER.info("header outputed ,packgId:" + eofPckg.packetId); } finally { writeLock.unlock(); } } } - + public void writeRow(RowDataPacket rowDataPkg) { - MySQLFrontConnection sc = session.getSource(); + ServerConnection sc = session.getSource(); try { writeLock.lock(); rowDataPkg.packetId = incPackageId(); // 输出完整的 记录到客户端 - rowDataPkg.write(sc); - // LOGGER.info("write row ,packgId:" + rowDataPkg.packetId); + ByteBuffer buf = rowDataPkg.write(sc.allocate(), sc, true); + sc.write(buf); + //LOGGER.info("write row ,packgId:" + rowDataPkg.packetId); } finally { writeLock.unlock(); } } public void writeEof() { - MySQLFrontConnection sc = session.getSource(); + ServerConnection sc = session.getSource(); EOFPacket eofPckg = new EOFPacket(); eofPckg.packetId = incPackageId(); - eofPckg.write(sc); + ByteBuffer buf = eofPckg.write(sc.allocate(), sc, false); + sc.write(buf); LOGGER.info("write eof ,packgId:" + eofPckg.packetId); } + public NonBlockingSession getSession() { return session; @@ -169,10 +170,24 @@ public void onJobFinished(SQLJob sqlJob) { boolean allFinished = bachJob.jobFinished(sqlJob); if (allFinished && finished.compareAndSet(false, true)) { - LOGGER.info("all job finished for front connection: " - + session.getSource()); - allJobFinishedListener.onAllJobFinished(this); + if(!hasError){ + LOGGER.info("all job finished for front connection: " + + session.getSource()); + allJobFinishedListener.onAllJobFinished(this); + }else{ + LOGGER.info("all job finished with error for front connection: " + + session.getSource()); + } } } + + public boolean isHasError() { + return hasError; + } + + public void setHasError(boolean hasError) { + this.hasError = hasError; + } + } diff --git a/src/main/java/io/mycat/sqlengine/MultiRowSQLQueryResultHandler.java b/src/main/java/io/mycat/sqlengine/MultiRowSQLQueryResultHandler.java new file mode 100644 index 000000000..a85845823 --- /dev/null +++ b/src/main/java/io/mycat/sqlengine/MultiRowSQLQueryResultHandler.java @@ -0,0 +1,48 @@ +package io.mycat.sqlengine; + +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * 当SQLJob的结果有多行时,利用该处理器进行处理 + * @author digdeep@126.com + */ +public class MultiRowSQLQueryResultHandler extends OneRawSQLQueryResultHandler{ + private static final Logger LOGGER = LoggerFactory + .getLogger(MultiRowSQLQueryResultHandler.class); + // 获得结果之后,利用该对象进行回调进行通知和处理结果 + private final SQLQueryResultListener>>> callback; + + private List> resultRows = new LinkedList<>(); // 保存结果行 + + public MultiRowSQLQueryResultHandler(String[] fetchCols, + SQLQueryResultListener>>> callback) { + super(fetchCols, null); + this.callback = callback; + } + + @Override + public boolean onRowData(String dataNode, byte[] rowData) { + super.onRowData(dataNode, rowData); + resultRows.add(getResult()); + + return false; + } + + @Override + public void finished(String dataNode, boolean failed, String errorMsg) { + SQLQueryResult>> queryResult = + new SQLQueryResult>>(this.resultRows, !failed); + queryResult.setErrMsg(errorMsg); + if(callback != null) + this.callback.onResult(queryResult); // callback 是构造函数传进来,在得到结果是进行回调 + else + LOGGER.warn(" callback is null "); + } + + +} diff --git a/src/main/java/io/mycat/sqlengine/OneRawSQLQueryResultHandler.java b/src/main/java/io/mycat/sqlengine/OneRawSQLQueryResultHandler.java index fee69f332..1f8d30928 100644 --- a/src/main/java/io/mycat/sqlengine/OneRawSQLQueryResultHandler.java +++ b/src/main/java/io/mycat/sqlengine/OneRawSQLQueryResultHandler.java @@ -1,19 +1,19 @@ package io.mycat.sqlengine; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.RowDataPacket; - import java.util.HashMap; import java.util.List; import java.util.Map; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.RowDataPacket; + public class OneRawSQLQueryResultHandler implements SQLJobHandler { private Map fetchColPosMap; private final SQLQueryResultListener>> callback; private final String[] fetchCols; private int fieldCount = 0; - private Map result ; + private Map result = new HashMap(); public OneRawSQLQueryResultHandler(String[] fetchCols, SQLQueryResultListener>> callBack) { @@ -21,6 +21,7 @@ public OneRawSQLQueryResultHandler(String[] fetchCols, this.callback = callBack; } + private String mark; public void onHeader(String dataNode, byte[] header, List fields) { fieldCount = fields.size(); fetchColPosMap = new HashMap(); @@ -42,32 +43,55 @@ public void onHeader(String dataNode, byte[] header, List fields) { public boolean onRowData(String dataNode, byte[] rowData) { RowDataPacket rowDataPkg = new RowDataPacket(fieldCount); rowDataPkg.read(rowData); - result = new HashMap(); - for (String fetchCol : fetchCols) { - Integer ind = fetchColPosMap.get(fetchCol); + String variableName = ""; + String variableValue = ""; + //fieldcount为2可能是select x也可能是show create table命令 + if(fieldCount==2 && (fetchColPosMap.get("Variable_name")!=null || fetchColPosMap.get("Value")!=null)){ + Integer ind = fetchColPosMap.get("Variable_name"); if (ind != null) { byte[] columnData = rowDataPkg.fieldValues.get(ind); String columnVal = columnData!=null?new String(columnData):null; - result.put(fetchCol, columnVal); - - } else { - LOGGER.warn("cant't find column in sql query result " - + fetchCol); + variableName = columnVal; + } + ind = fetchColPosMap.get("Value"); + if (ind != null) { + byte[] columnData = rowDataPkg.fieldValues.get(ind); + String columnVal = columnData!=null?new String(columnData):null; + variableValue = columnVal; + } + result.put(variableName, variableValue); + }else{ + for (String fetchCol : fetchCols) { + Integer ind = fetchColPosMap.get(fetchCol); + if (ind != null) { + byte[] columnData = rowDataPkg.fieldValues.get(ind); + String columnVal = columnData!=null?new String(columnData):null; + result.put(fetchCol, columnVal); + } else { + LOGGER.warn("cant't find column in sql query result " + fetchCol); + } } } - - // 返回false,表示还有数据要处理,数据处理没有结束; - // 如果返回true,连接会被SQLJob关闭:conn.close("not needed by user proc") - // 对应的各种资源:socketchannel,read buffer,write buffer等都会被回收,连接会被从连接池中删除 - return false; + return false; } @Override - public void finished(String dataNode, boolean failed) { - SQLQueryResult> queryResult= - new SQLQueryResult>(this.result, !failed, dataNode); - this.callback.onResult(queryResult); + public void finished(String dataNode, boolean failed, String errorMsg) { + SQLQueryResult> queryRestl=new SQLQueryResult>(this.result,!failed, dataNode,errorMsg); + this.callback.onResult(queryRestl); + + } + public String getMark() { + return mark; } + public void setMark(String mark) { + this.mark = mark; + } + + // 子类 MultiRowSQLQueryResultHandler 需要使用 + protected Map getResult() { + return result; + } } diff --git a/src/main/java/io/mycat/sqlengine/SQLContext.java b/src/main/java/io/mycat/sqlengine/SQLContext.java deleted file mode 100644 index 8ebf533cc..000000000 --- a/src/main/java/io/mycat/sqlengine/SQLContext.java +++ /dev/null @@ -1,19 +0,0 @@ -package io.mycat.sqlengine; - -import io.mycat.backend.BackendConnection; -import io.mycat.route.RouteResultsetNode; - -import java.util.concurrent.ConcurrentHashMap; - -/** - * sql context used for execute sql - * - * @author wuzhih - * - */ -public class SQLContext { - private ConcurrentHashMap target; - private String curentSQL; - private boolean autoCommit; - -} diff --git a/src/main/java/io/mycat/sqlengine/SQLJob.java b/src/main/java/io/mycat/sqlengine/SQLJob.java index 485de9aad..9002ab42e 100644 --- a/src/main/java/io/mycat/sqlengine/SQLJob.java +++ b/src/main/java/io/mycat/sqlengine/SQLJob.java @@ -1,18 +1,18 @@ package io.mycat.sqlengine; +import java.util.List; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import io.mycat.MycatServer; import io.mycat.backend.BackendConnection; -import io.mycat.backend.PhysicalDBNode; -import io.mycat.backend.PhysicalDatasource; +import io.mycat.backend.datasource.PhysicalDBNode; +import io.mycat.backend.datasource.PhysicalDatasource; +import io.mycat.backend.mysql.nio.handler.ResponseHandler; +import io.mycat.config.MycatConfig; +import io.mycat.net.mysql.ErrorPacket; import io.mycat.route.RouteResultsetNode; -import io.mycat.server.config.node.MycatConfig; -import io.mycat.server.executors.ResponseHandler; -import io.mycat.server.packet.ErrorPacket; import io.mycat.server.parser.ServerParse; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; /** * asyn execute in EngineCtx or standalone (EngineCtx=null) @@ -21,8 +21,9 @@ * */ public class SQLJob implements ResponseHandler, Runnable { - public static final Logger LOGGER = LoggerFactory - .getLogger(SQLJob.class); + + private static final Logger LOGGER = LoggerFactory.getLogger(SQLJob.class); + private final String sql; private final String dataNodeOrDatabase; private BackendConnection connection; @@ -68,8 +69,8 @@ public void run() { ds.getConnection(dataNodeOrDatabase, true, this, null); } } catch (Exception e) { - LOGGER.info("can't get connection for sql ,error:" + e); - doFinished(true); + LOGGER.info("can't get connection for sql ,error:" ,e); + doFinished(true,e.getMessage()); } } @@ -91,7 +92,7 @@ public void connectionAcquired(final BackendConnection conn) { conn.query(sql); connection = conn; } catch (Exception e) {// (UnsupportedEncodingException e) { - doFinished(true); + doFinished(true,e.getMessage()); } } @@ -100,10 +101,13 @@ public boolean isFinished() { return finished; } - private void doFinished(boolean failed) { + private void doFinished(boolean failed,String errorMsg) { finished = true; - jobHandler.finished(dataNodeOrDatabase, failed); + jobHandler.finished(dataNodeOrDatabase, failed,errorMsg ); if (ctx != null) { + if(failed){ + ctx.setHasError(true); + } ctx.onJobFinished(this); } } @@ -111,25 +115,37 @@ private void doFinished(boolean failed) { @Override public void connectionError(Throwable e, BackendConnection conn) { LOGGER.info("can't get connection for sql :" + sql); - doFinished(true); - + doFinished(true,e.getMessage()); } @Override public void errorResponse(byte[] err, BackendConnection conn) { ErrorPacket errPg = new ErrorPacket(); errPg.read(err); - LOGGER.info("error response " + new String(errPg.message) - + " from of sql :" + sql + " at con:" + conn); + + String errMsg = "error response errno:" + errPg.errno + ", " + new String(errPg.message) + + " from of sql :" + sql + " at con:" + conn; + + // @see https://dev.mysql.com/doc/refman/5.6/en/error-messages-server.html + // ER_SPECIFIC_ACCESS_DENIED_ERROR + if ( errPg.errno == 1227 ) { + LOGGER.warn( errMsg ); + + } else { + LOGGER.info( errMsg ); + } + + + + doFinished(true,errMsg); conn.release(); - doFinished(true); - } @Override public void okResponse(byte[] ok, BackendConnection conn) { - // not called for query sql - + conn.syncAndExcute(); + doFinished(false,null); + conn.release(); } @Override @@ -143,23 +159,26 @@ public void fieldEofResponse(byte[] header, List fields, public void rowResponse(byte[] row, BackendConnection conn) { boolean finsihed = jobHandler.onRowData(dataNodeOrDatabase, row); if (finsihed) { + doFinished(false,null); conn.close("not needed by user proc"); - doFinished(false); } } @Override public void rowEofResponse(byte[] eof, BackendConnection conn) { - //connection do synchronization - conn.syncAndExcute(); + doFinished(false,null); conn.release(); - doFinished(false); + } + + @Override + public void writeQueueAvailable() { + } @Override public void connectionClose(BackendConnection conn, String reason) { - doFinished(true); + doFinished(true,reason); } public int getId() { diff --git a/src/main/java/io/mycat/sqlengine/SQLJobHandler.java b/src/main/java/io/mycat/sqlengine/SQLJobHandler.java index 235cedeca..3159f54b4 100644 --- a/src/main/java/io/mycat/sqlengine/SQLJobHandler.java +++ b/src/main/java/io/mycat/sqlengine/SQLJobHandler.java @@ -1,17 +1,15 @@ package io.mycat.sqlengine; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.List; +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + public interface SQLJobHandler { - Logger LOGGER = LoggerFactory - .getLogger(SQLJobHandler.class); + public static final Logger LOGGER = LoggerFactory.getLogger(SQLJobHandler.class); public void onHeader(String dataNode, byte[] header, List fields); public boolean onRowData(String dataNode, byte[] rowData); - public void finished(String dataNode, boolean failed); + public void finished(String dataNode, boolean failed, String errorMsg); } diff --git a/src/main/java/io/mycat/sqlengine/SQLQueryResult.java b/src/main/java/io/mycat/sqlengine/SQLQueryResult.java index 21a96ee88..c5ed514d1 100644 --- a/src/main/java/io/mycat/sqlengine/SQLQueryResult.java +++ b/src/main/java/io/mycat/sqlengine/SQLQueryResult.java @@ -3,21 +3,34 @@ public class SQLQueryResult { private final T result; private final boolean success; + private final String dataNode; // dataNode or database name private String tableName; - + private String errMsg; + public SQLQueryResult(T result, boolean success) { super(); this.result = result; this.success = success; this.dataNode = null; } - public SQLQueryResult(T result, boolean success, String dataNode) { + + public SQLQueryResult(T result, boolean success, String dataNode,String errMsg) { super(); this.result = result; this.success = success; this.dataNode= dataNode; + this.errMsg=errMsg; + } + + public String getErrMsg() { + return errMsg; } + + public void setErrMsg(String errMsg) { + this.errMsg = errMsg; + } + public T getResult() { return result; } @@ -33,5 +46,5 @@ public String getTableName() { public void setTableName(String tableName) { this.tableName = tableName; } - + } diff --git a/src/main/java/io/mycat/sqlengine/mpp/AbstractDataNodeMerge.java b/src/main/java/io/mycat/sqlengine/mpp/AbstractDataNodeMerge.java new file mode 100644 index 000000000..97e45a901 --- /dev/null +++ b/src/main/java/io/mycat/sqlengine/mpp/AbstractDataNodeMerge.java @@ -0,0 +1,152 @@ +package io.mycat.sqlengine.mpp; + +import io.mycat.MycatServer; +import io.mycat.backend.mysql.nio.handler.MultiNodeQueryHandler; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.route.RouteResultset; +import io.mycat.server.NonBlockingSession; +import org.apache.log4j.Logger; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * Created by zagnix on 2016/7/6. + */ +public abstract class AbstractDataNodeMerge implements Runnable{ + + + private static Logger LOGGER = Logger.getLogger(AbstractDataNodeMerge.class); + /** + *row 有多少col + */ + protected int fieldCount; + + /** + * 本次select的路由缓存集 + */ + protected final RouteResultset rrs; + /** + * 夸分片处理handler + */ + protected MultiNodeQueryHandler multiQueryHandler = null; + /** + * 分片结束包 + */ + public PackWraper END_FLAG_PACK = new PackWraper(); + + + /** + * 是否执行流式结果集输出 + */ + + protected boolean isStreamOutputResult = false; + + /** + * rowData缓存队列 + */ + protected BlockingQueue packs = new LinkedBlockingQueue(); + + /** + * 标志业务线程是否启动了? + */ + protected final AtomicBoolean running = new AtomicBoolean(false); + + public AbstractDataNodeMerge(MultiNodeQueryHandler handler,RouteResultset rrs){ + this.rrs = rrs; + this.multiQueryHandler = handler; + } + + public boolean isStreamOutputResult() { + return isStreamOutputResult; + } + + public void setStreamOutputResult(boolean streamOutputResult) { + isStreamOutputResult = streamOutputResult; + } + + /** + * Add a row pack, and may be wake up a business thread to work if not running. + * @param pack row pack + * @return true wake up a business thread, otherwise false + * + * @author Uncle-pan + * @since 2016-03-23 + */ + protected final boolean addPack(final PackWraper pack){ + packs.add(pack); + if(running.get()){ + return false; + } + final MycatServer server = MycatServer.getInstance(); + server.getBusinessExecutor().execute(this); + return true; + } + + /** + * 处理新进来每个row数据,通过PackWraper进行封装, + * 投递到队列中进行后续处理即可。 + * process new record (mysql binary data),if data can output to client + * ,return true + * + * @param dataNode + * DN's name (data from this dataNode) + * @param rowData + * raw data + */ + public boolean onNewRecord(String dataNode, byte[] rowData) { + final PackWraper data = new PackWraper(); + data.dataNode = dataNode; + data.rowData = rowData; + addPack(data); + + return false; + } + + + /** + * 将Map对应的col字段集,返回row中对应的index数组 + * @param columns + * @param toIndexMap + * @return + */ + protected static int[] toColumnIndex(String[] columns, Map toIndexMap) { + int[] result = new int[columns.length]; + ColMeta curColMeta; + for (int i = 0; i < columns.length; i++) { + curColMeta = toIndexMap.get(columns[i].toUpperCase()); + if (curColMeta == null) { + throw new IllegalArgumentException( + "all columns in group by clause should be in the selected column list.!" + + columns[i]); + } + result[i] = curColMeta.colIndex; + } + return result; + } + + @Override + public abstract void run(); + + public abstract void onRowMetaData(Map columToIndx, int fieldCount) throws IOException; + + public void outputMergeResult(NonBlockingSession session, byte[] eof) { + addPack(END_FLAG_PACK); + } + + public RouteResultset getRrs() { + return this.rrs; + } + + /** + * 做最后的结果集输出 + * @return (最多i*(offset+size)行数据) + */ + public abstract List getResults(byte[] eof); + public abstract void clear(); + +} diff --git a/src/main/java/io/mycat/sqlengine/mpp/ColMeta.java b/src/main/java/io/mycat/sqlengine/mpp/ColMeta.java index 57d723166..d4635882d 100644 --- a/src/main/java/io/mycat/sqlengine/mpp/ColMeta.java +++ b/src/main/java/io/mycat/sqlengine/mpp/ColMeta.java @@ -23,7 +23,9 @@ */ package io.mycat.sqlengine.mpp; -public class ColMeta { +import java.io.Serializable; + +public class ColMeta implements Serializable{ public static final int COL_TYPE_DECIMAL = 0; public static final int COL_TYPE_INT = 1; public static final int COL_TYPE_SHORT = 2; @@ -53,6 +55,8 @@ public class ColMeta { public static final int COL_TYPE_GEOMETRY = 0xff; public int colIndex; public final int colType; + + public int decimals; public int avgSumIndex; public int avgCountIndex; diff --git a/src/main/java/io/mycat/sqlengine/mpp/ColumnRoutePair.java b/src/main/java/io/mycat/sqlengine/mpp/ColumnRoutePair.java index c1d4fbebc..bd6e99814 100644 --- a/src/main/java/io/mycat/sqlengine/mpp/ColumnRoutePair.java +++ b/src/main/java/io/mycat/sqlengine/mpp/ColumnRoutePair.java @@ -34,6 +34,16 @@ public class ColumnRoutePair { public final RangeValue rangeValue; public Integer nodeId; + public int slot=-2; + + public int getSlot() { + return slot; + } + + public void setSlot(int slot) { + this.slot = slot; + } + public ColumnRoutePair(String colValue) { super(); this.colValue = colValue; @@ -68,18 +78,23 @@ public int hashCode() { @Override public boolean equals(Object obj) { - if (this == obj) + if (this == obj) { return true; - if (obj == null) + } + if (obj == null) { return false; - if (getClass() != obj.getClass()) + } + if (getClass() != obj.getClass()) { return false; + } ColumnRoutePair other = (ColumnRoutePair) obj; if (colValue == null) { - if (other.colValue != null) + if (other.colValue != null) { return false; - } else if (!colValue.equals(other.colValue)) + } + } else if (!colValue.equals(other.colValue)) { return false; + } if (rangeValue == null) { if (other.rangeValue != null) { @@ -90,10 +105,12 @@ public boolean equals(Object obj) { } if (nodeId == null) { - if (other.nodeId != null) + if (other.nodeId != null) { return false; - } else if (!nodeId.equals(other.nodeId)) + } + } else if (!nodeId.equals(other.nodeId)) { return false; + } return true; } @@ -102,4 +119,4 @@ public String toString() { return "ColumnRoutePair [colValue=" + colValue + ", nodeId=" + nodeId + "]"; } -} \ No newline at end of file +} diff --git a/src/main/java/io/mycat/sqlengine/mpp/DataMergeService.java b/src/main/java/io/mycat/sqlengine/mpp/DataMergeService.java index 653a472ac..4fa6acdcb 100644 --- a/src/main/java/io/mycat/sqlengine/mpp/DataMergeService.java +++ b/src/main/java/io/mycat/sqlengine/mpp/DataMergeService.java @@ -1,310 +1,306 @@ -package io.mycat.sqlengine.mpp; - -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ - -import io.mycat.MycatServer; -import io.mycat.route.RouteResultset; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.NonBlockingSession; -import io.mycat.server.executors.MultiNodeQueryHandler; -import io.mycat.server.packet.EOFPacket; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.server.packet.util.BufferUtil; -import io.mycat.sqlengine.tmp.RowDataSorter; -import io.mycat.util.StringUtil; - -import java.nio.ByteBuffer; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Vector; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.atomic.AtomicInteger; - -import org.apache.log4j.Logger; - -/** - * Data merge service handle data Min,Max,AVG group 、order by 、limit - * - * @author wuzhih /modify by coder_czp/2015/11/2 - * - */ -public class DataMergeService implements Runnable { - - // 保存包和节点的关系 - static class PackWraper { - byte[] data; - String node; - - } - - private int fieldCount; - private RouteResultset rrs; - private RowDataSorter sorter; - private RowDataPacketGrouper grouper; - private int MAX_MUTIL_COUNT = 20000000; - private BlockingQueue packs; - private volatile boolean hasOrderBy = false; - private MultiNodeQueryHandler multiQueryHandler; - private AtomicInteger areadyAdd = new AtomicInteger(); - private ConcurrentHashMap canDiscard; - public static PackWraper END_FLAG_PACK = new PackWraper(); - private List result = new Vector(); - private static Logger LOGGER = Logger.getLogger(DataMergeService.class); - - public DataMergeService(MultiNodeQueryHandler handler, RouteResultset rrs) { - this.rrs = rrs; - this.multiQueryHandler = handler; - this.canDiscard = new ConcurrentHashMap(); - // 在网络很好的情况下,数据会快速填充到队列,当数据大于MAX_MUTIL_COUNT时,等待处理线程处理 - this.packs = new LinkedBlockingQueue(MAX_MUTIL_COUNT); - } - - public RouteResultset getRrs() { - return this.rrs; - } - - public void outputMergeResult(NonBlockingSession session, byte[] eof) { - packs.add(END_FLAG_PACK); - } - - /** - * return merged data - * - * @return (最多i*(offset+size)行数据) - */ - public List getResults(byte[] eof) { - List tmpResult = result; - if (this.grouper != null) { - tmpResult = grouper.getResult(); - grouper = null; - } - if (sorter != null) { - // 处理grouper处理后的数据 - if (tmpResult != null) { - Iterator itor = tmpResult.iterator(); - while (itor.hasNext()) { - sorter.addRow(itor.next()); - itor.remove(); - } - } - tmpResult = sorter.getSortedResult(); - sorter = null; - } - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("prepare mpp merge result for " + rrs.getStatement()); - } - - return tmpResult; - } - - public void onRowMetaData(Map columToIndx, int fieldCount) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("field metadata inf:" + columToIndx.entrySet()); - } - int[] groupColumnIndexs = null; - this.fieldCount = fieldCount; - if (rrs.getGroupByCols() != null) { - groupColumnIndexs = toColumnIndex(rrs.getGroupByCols(), columToIndx); - } - - if (rrs.getHavingCols() != null) { - ColMeta colMeta = columToIndx.get(rrs.getHavingCols().getLeft() - .toUpperCase()); - if (colMeta != null) { - rrs.getHavingCols().setColMeta(colMeta); - } - } - - if (rrs.isHasAggrColumn()) { - List mergCols = new LinkedList(); - Map mergeColsMap = rrs.getMergeCols(); - if (mergeColsMap != null) { - for (Map.Entry mergEntry : mergeColsMap - .entrySet()) { - String colName = mergEntry.getKey().toUpperCase(); - int type = mergEntry.getValue(); - if (MergeCol.MERGE_AVG == type) { - ColMeta sumColMeta = columToIndx.get(colName + "SUM"); - ColMeta countColMeta = columToIndx.get(colName - + "COUNT"); - if (sumColMeta != null && countColMeta != null) { - ColMeta colMeta = new ColMeta(sumColMeta.colIndex, - countColMeta.colIndex, - sumColMeta.getColType()); - mergCols.add(new MergeCol(colMeta, mergEntry - .getValue())); - } - } else { - ColMeta colMeta = columToIndx.get(colName); - mergCols.add(new MergeCol(colMeta, mergEntry.getValue())); - } - } - } - // add no alias merg column - for (Map.Entry fieldEntry : columToIndx.entrySet()) { - String colName = fieldEntry.getKey(); - int result = MergeCol.tryParseAggCol(colName); - if (result != MergeCol.MERGE_UNSUPPORT - && result != MergeCol.MERGE_NOMERGE) { - mergCols.add(new MergeCol(fieldEntry.getValue(), result)); - } - } - grouper = new RowDataPacketGrouper(groupColumnIndexs, - mergCols.toArray(new MergeCol[mergCols.size()]), - rrs.getHavingCols()); - } - if (rrs.getOrderByCols() != null) { - LinkedHashMap orders = rrs.getOrderByCols(); - OrderCol[] orderCols = new OrderCol[orders.size()]; - int i = 0; - for (Map.Entry entry : orders.entrySet()) { - String key = StringUtil.removeBackquote(entry.getKey() - .toUpperCase()); - ColMeta colMeta = columToIndx.get(key); - if (colMeta == null) { - throw new java.lang.IllegalArgumentException( - "all columns in order by clause should be in the selected column list!" - + entry.getKey()); - } - orderCols[i++] = new OrderCol(colMeta, entry.getValue()); - } - // sorter = new RowDataPacketSorter(orderCols); - RowDataSorter tmp = new RowDataSorter(orderCols); - tmp.setLimit(rrs.getLimitStart(), rrs.getLimitSize()); - hasOrderBy = true; - sorter = tmp; - } else { - hasOrderBy = false; - } - MycatServer.getInstance().getListeningExecutorService().execute(this); - } - - /** - * process new record (mysql binary data),if data can output to client - * ,return true - * - * @param dataNode - * DN's name (data from this dataNode) - * @param rowData - * raw data - */ - public boolean onNewRecord(String dataNode, byte[] rowData) { - try { - // 对于无需排序的SQL,取前getLimitSize条就足够 - //由于聚合函数等场景可能有误判的情况,暂时先注释 -// if (!hasOrderBy && areadyAdd.get() >= rrs.getLimitSize()&& rrs.getLimitSize()!=-1) { -// packs.add(END_FLAG_PACK); -// return true; -// } - // 对于需要排序的数据,由于mysql传递过来的数据是有序的, - // 如果某个节点的当前数据已经不会进入,后续的数据也不会入堆 - if (canDiscard.size() == rrs.getNodes().length) { - packs.add(END_FLAG_PACK); - LOGGER.info("other pack can discard,now send to client"); - return true; - } - if (canDiscard.get(dataNode) != null) { - return true; - } - PackWraper data = new PackWraper(); - data.node = dataNode; - data.data = rowData; - packs.put(data); - areadyAdd.getAndIncrement(); - } catch (Exception e) { - throw new RuntimeException(e); - } - return false; - } - - private static int[] toColumnIndex(String[] columns, - Map toIndexMap) { - int[] result = new int[columns.length]; - ColMeta curColMeta; - for (int i = 0; i < columns.length; i++) { - curColMeta = toIndexMap.get(columns[i].toUpperCase()); - if (curColMeta == null) { - throw new java.lang.IllegalArgumentException( - "all columns in group by clause should be in the selected column list.!" - + columns[i]); - } - result[i] = curColMeta.colIndex; - } - return result; - } - - /** - * release resources - */ - public void clear() { - hasOrderBy = false; - result.clear(); - grouper = null; - sorter = null; - } - - @Override - public void run() { - - EOFPacket eofp = new EOFPacket(); - ByteBuffer eof = ByteBuffer.allocate(9); - BufferUtil.writeUB3(eof, eofp.calcPacketSize()); - eof.put(eofp.packetId); - eof.put(eofp.fieldCount); - BufferUtil.writeUB2(eof, eofp.status); - BufferUtil.writeUB2(eof, eofp.warningCount); - MySQLFrontConnection source = multiQueryHandler.getSession() - .getSource(); - - while (!Thread.interrupted()) { - try { - PackWraper pack = packs.take(); - if (pack == END_FLAG_PACK) { - multiQueryHandler.outputMergeResult(source, eof.array()); - break; - } - RowDataPacket row = new RowDataPacket(fieldCount); - row.read(pack.data); - if (grouper != null) { - grouper.addRow(row); - } else if (sorter != null) { - if (!sorter.addRow(row)) { - canDiscard.put(pack.node, true); - } - } else { - result.add(row); - } - } catch (Exception e) { - LOGGER.error("Merge multi data error", e); - } - } - } - -} +package io.mycat.sqlengine.mpp; + +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ + +import io.mycat.MycatServer; +import io.mycat.backend.mysql.BufferUtil; +import io.mycat.backend.mysql.nio.handler.MultiNodeQueryHandler; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.route.RouteResultset; +import io.mycat.route.RouteResultsetNode; +import io.mycat.server.ServerConnection; +import io.mycat.sqlengine.mpp.tmp.RowDataSorter; +import io.mycat.util.StringUtil; + +import org.apache.log4j.Logger; + + + +import java.nio.ByteBuffer; +import java.util.*; +import java.util.concurrent.ConcurrentHashMap; + +/** + * Data merge service handle data Min,Max,AVG group 、order by 、limit + * + * @author wuzhih /modify by coder_czp/2015/11/2 + * + * Fixbug: mycat sql timeout and hang problem. + * @author Uncle-pan + * @since 2016-03-23 + * + */ +public class DataMergeService extends AbstractDataNodeMerge { + + private RowDataSorter sorter; + private RowDataPacketGrouper grouper; + private Map> result = new HashMap>(); + private static Logger LOGGER = Logger.getLogger(DataMergeService.class); + private ConcurrentHashMap canDiscard = new ConcurrentHashMap(); + public DataMergeService(MultiNodeQueryHandler handler, RouteResultset rrs) { + super(handler,rrs); + + for (RouteResultsetNode node : rrs.getNodes()) { + result.put(node.getName(), new LinkedList()); + } + } + + + /** + * @param columToIndx + * @param fieldCount + */ + public void onRowMetaData(Map columToIndx, int fieldCount) { + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("field metadata keys:" + columToIndx.keySet()); + LOGGER.debug("field metadata values:" + columToIndx.values()); + } + + + int[] groupColumnIndexs = null; + this.fieldCount = fieldCount; + + if (rrs.getGroupByCols() != null) { + + groupColumnIndexs = toColumnIndex(rrs.getGroupByCols(), columToIndx); + } + + if (rrs.getHavingCols() != null) { + ColMeta colMeta = columToIndx.get(rrs.getHavingCols().getLeft() + .toUpperCase()); + if (colMeta != null) { + rrs.getHavingCols().setColMeta(colMeta); + } + } + + if (rrs.isHasAggrColumn()) { + List mergCols = new LinkedList(); + Map mergeColsMap = rrs.getMergeCols(); + + + + if (mergeColsMap != null) { + for (Map.Entry mergEntry : mergeColsMap + .entrySet()) { + String colName = mergEntry.getKey().toUpperCase(); + int type = mergEntry.getValue(); + if (MergeCol.MERGE_AVG == type) { + + ColMeta sumColMeta = columToIndx.get(colName + "SUM"); + ColMeta countColMeta = columToIndx.get(colName + + "COUNT"); + if (sumColMeta != null && countColMeta != null) { + ColMeta colMeta = new ColMeta(sumColMeta.colIndex, + countColMeta.colIndex, + sumColMeta.getColType()); + colMeta.decimals = sumColMeta.decimals; // 保存精度 + mergCols.add(new MergeCol(colMeta, mergEntry + .getValue())); + } + } else { + + ColMeta colMeta = columToIndx.get(colName); + mergCols.add(new MergeCol(colMeta, mergEntry.getValue())); + } + } + } + // add no alias merg column + for (Map.Entry fieldEntry : columToIndx.entrySet()) { + String colName = fieldEntry.getKey(); + int result = MergeCol.tryParseAggCol(colName); + if (result != MergeCol.MERGE_UNSUPPORT + && result != MergeCol.MERGE_NOMERGE) { + mergCols.add(new MergeCol(fieldEntry.getValue(), result)); + } + } + + + grouper = new RowDataPacketGrouper(groupColumnIndexs, + mergCols.toArray(new MergeCol[mergCols.size()]), + rrs.getHavingCols()); + } + + if (rrs.getOrderByCols() != null) { + LinkedHashMap orders = rrs.getOrderByCols(); + OrderCol[] orderCols = new OrderCol[orders.size()]; + int i = 0; + for (Map.Entry entry : orders.entrySet()) { + String key = StringUtil.removeBackquote(entry.getKey() + .toUpperCase()); + ColMeta colMeta = columToIndx.get(key); + if (colMeta == null) { + throw new IllegalArgumentException( + "all columns in order by clause should be in the selected column list!" + + entry.getKey()); + } + orderCols[i++] = new OrderCol(colMeta, entry.getValue()); + } + + RowDataSorter tmp = new RowDataSorter(orderCols); + tmp.setLimit(rrs.getLimitStart(), rrs.getLimitSize()); + sorter = tmp; + } + + if (MycatServer.getInstance(). + getConfig().getSystem(). + getUseStreamOutput() == 1 + && grouper == null + && sorter == null) { + setStreamOutputResult(true); + }else { + setStreamOutputResult(false); + } + } + + + /** + * release resources + */ + public void clear() { + result.clear(); + grouper = null; + sorter = null; + } + + @Override + public void run() { + // sort-or-group: no need for us to using multi-threads, because + //both sorter and group are synchronized!! + // @author Uncle-pan + // @since 2016-03-23 + if(!running.compareAndSet(false, true)){ + return; + } + // eof handler has been placed to "if (pack == END_FLAG_PACK){}" in for-statement + // @author Uncle-pan + // @since 2016-03-23 + boolean nulpack = false; + try{ + // loop-on-packs + for (; ; ) { + final PackWraper pack = packs.poll(); + // async: handling row pack queue, this business thread should exit when no pack + // @author Uncle-pan + // @since 2016-03-23 + if(pack == null){ + nulpack = true; + break; + } + // eof: handling eof pack and exit + if (pack == END_FLAG_PACK) { + + + + final int warningCount = 0; + final EOFPacket eofp = new EOFPacket(); + final ByteBuffer eof = ByteBuffer.allocate(9); + BufferUtil.writeUB3(eof, eofp.calcPacketSize()); + eof.put(eofp.packetId); + eof.put(eofp.fieldCount); + BufferUtil.writeUB2(eof, warningCount); + BufferUtil.writeUB2(eof, eofp.status); + final ServerConnection source = multiQueryHandler.getSession().getSource(); + final byte[] array = eof.array(); + multiQueryHandler.outputMergeResult(source, array, getResults(array)); + break; + } + + + // merge: sort-or-group, or simple add + final RowDataPacket row = new RowDataPacket(fieldCount); + row.read(pack.rowData); + + if (grouper != null) { + grouper.addRow(row); + } else if (sorter != null) { + if (!sorter.addRow(row)) { + canDiscard.put(pack.dataNode,true); + } + } else { + result.get(pack.dataNode).add(row); + } + }// rof + }catch(final Exception e){ + multiQueryHandler.handleDataProcessException(e); + }finally{ + running.set(false); + } + // try to check packs, it's possible that adding a pack after polling a null pack + //and before this time pointer!! + // @author Uncle-pan + // @since 2016-03-23 + if(nulpack && !packs.isEmpty()){ + this.run(); + } + } + + + + /** + * return merged data + * @return (最多i*(offset+size)行数据) + */ + public List getResults(byte[] eof) { + + List tmpResult = null; + + if (this.grouper != null) { + tmpResult = grouper.getResult(); + grouper = null; + } + + + if (sorter != null) { + + if (tmpResult != null) { + Iterator itor = tmpResult.iterator(); + while (itor.hasNext()) { + sorter.addRow(itor.next()); + itor.remove(); + } + } + tmpResult = sorter.getSortedResult(); + sorter = null; + } + + + + //no grouper and sorter + if(tmpResult == null){ + tmpResult = new LinkedList(); + for (RouteResultsetNode node : rrs.getNodes()) { + tmpResult.addAll(result.get(node.getName())); + } + } + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("prepare mpp merge result for " + rrs.getStatement()); + } + return tmpResult; + } +} + diff --git a/src/main/java/io/mycat/sqlengine/mpp/DataNodeMergeManager.java b/src/main/java/io/mycat/sqlengine/mpp/DataNodeMergeManager.java new file mode 100644 index 000000000..6bd2316e7 --- /dev/null +++ b/src/main/java/io/mycat/sqlengine/mpp/DataNodeMergeManager.java @@ -0,0 +1,507 @@ +package io.mycat.sqlengine.mpp; + +import io.mycat.MycatServer; +import io.mycat.backend.mysql.BufferUtil; +import io.mycat.backend.mysql.MySQLMessage; +import io.mycat.backend.mysql.nio.handler.MultiNodeQueryHandler; +import io.mycat.memory.MyCatMemory; +import io.mycat.memory.unsafe.memory.mm.DataNodeMemoryManager; +import io.mycat.memory.unsafe.memory.mm.MemoryManager; +import io.mycat.memory.unsafe.row.BufferHolder; +import io.mycat.memory.unsafe.row.StructType; +import io.mycat.memory.unsafe.row.UnsafeRow; +import io.mycat.memory.unsafe.row.UnsafeRowWriter; +import io.mycat.memory.unsafe.utils.MycatPropertyConf; +import io.mycat.memory.unsafe.utils.sort.PrefixComparator; +import io.mycat.memory.unsafe.utils.sort.PrefixComparators; +import io.mycat.memory.unsafe.utils.sort.RowPrefixComputer; +import io.mycat.memory.unsafe.utils.sort.UnsafeExternalRowSorter; +import io.mycat.net.mysql.EOFPacket; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.route.RouteResultset; +import io.mycat.server.ServerConnection; +import io.mycat.util.StringUtil; +import org.apache.log4j.Logger; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.*; +import java.util.concurrent.ConcurrentHashMap; + +import java.util.concurrent.atomic.AtomicBoolean; + + +/** + * Created by zagnix on 2016/6/21. + */ +public class DataNodeMergeManager extends AbstractDataNodeMerge { + + private static Logger LOGGER = Logger.getLogger(DataNodeMergeManager.class); + + /** + * key为datanode的分片节点名字 + * value为对应的排序器 + * 目前,没有使用! + */ + private ConcurrentHashMap unsafeRows = + new ConcurrentHashMap(); + /** + * 全局sorter,排序器 + */ + private UnsafeExternalRowSorter globalSorter = null; + /** + * UnsafeRowGrouper + */ + private UnsafeRowGrouper unsafeRowGrouper = null; + + /** + * 全局merge,排序器 + */ + private UnsafeExternalRowSorter globalMergeResult = null; + + /** + * sorter需要的上下文环境 + */ + private final MyCatMemory myCatMemory; + private final MemoryManager memoryManager; + private final MycatPropertyConf conf; + /** + * Limit N,M + */ + private final int limitStart; + private final int limitSize; + + private int[] mergeColsIndex; + private boolean hasEndFlag = false; + + + private AtomicBoolean isMiddleResultDone; + public DataNodeMergeManager(MultiNodeQueryHandler handler, RouteResultset rrs,AtomicBoolean isMiddleResultDone) { + super(handler,rrs); + this.isMiddleResultDone = isMiddleResultDone; + this.myCatMemory = MycatServer.getInstance().getMyCatMemory(); + this.memoryManager = myCatMemory.getResultMergeMemoryManager(); + this.conf = myCatMemory.getConf(); + this.limitStart = rrs.getLimitStart(); + this.limitSize = rrs.getLimitSize(); + } + + + public void onRowMetaData(Map columToIndx, int fieldCount) throws IOException { + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("field metadata keys:" + columToIndx != null ? columToIndx.keySet() : "null"); + LOGGER.debug("field metadata values:" + columToIndx != null ? columToIndx.values() : "null"); + } + + OrderCol[] orderCols = null; + StructType schema = null; + UnsafeExternalRowSorter.PrefixComputer prefixComputer = null; + PrefixComparator prefixComparator = null; + + + DataNodeMemoryManager dataNodeMemoryManager = null; + UnsafeExternalRowSorter sorter = null; + + int[] groupColumnIndexs = null; + this.fieldCount = fieldCount; + + if (rrs.getGroupByCols() != null) { + groupColumnIndexs = toColumnIndex(rrs.getGroupByCols(), columToIndx); + if (LOGGER.isDebugEnabled()) { + for (int i = 0; i mergCols = new LinkedList(); + Map mergeColsMap = rrs.getMergeCols(); + + if (mergeColsMap != null) { + + if (LOGGER.isDebugEnabled() && rrs.getMergeCols() != null) { + LOGGER.debug("isHasAggrColumn:" + rrs.getMergeCols().toString()); + } + for (Map.Entry mergEntry : mergeColsMap + .entrySet()) { + String colName = mergEntry.getKey().toUpperCase(); + int type = mergEntry.getValue(); + if (MergeCol.MERGE_AVG == type) { + ColMeta sumColMeta = columToIndx.get(colName + "SUM"); + ColMeta countColMeta = columToIndx.get(colName + + "COUNT"); + if (sumColMeta != null && countColMeta != null) { + ColMeta colMeta = new ColMeta(sumColMeta.colIndex, + countColMeta.colIndex, + sumColMeta.getColType()); + mergCols.add(new MergeCol(colMeta, mergEntry + .getValue())); + } + } else { + ColMeta colMeta = columToIndx.get(colName); + mergCols.add(new MergeCol(colMeta, mergEntry.getValue())); + } + } + } + + // add no alias merg column + for (Map.Entry fieldEntry : columToIndx.entrySet()) { + String colName = fieldEntry.getKey(); + int result = MergeCol.tryParseAggCol(colName); + if (result != MergeCol.MERGE_UNSUPPORT + && result != MergeCol.MERGE_NOMERGE) { + mergCols.add(new MergeCol(fieldEntry.getValue(), result)); + } + } + + /** + * Group操作 + */ + MergeCol[] mergColsArrays = mergCols.toArray(new MergeCol[mergCols.size()]); + unsafeRowGrouper = new UnsafeRowGrouper(columToIndx,rrs.getGroupByCols(), + mergColsArrays, + rrs.getHavingCols()); + + if(mergColsArrays!=null&&mergColsArrays.length>0){ + mergeColsIndex = new int[mergColsArrays.length]; + for(int i = 0;i orders = rrs.getOrderByCols(); + orderCols = new OrderCol[orders.size()]; + int i = 0; + for (Map.Entry entry : orders.entrySet()) { + String key = StringUtil.removeBackquote(entry.getKey() + .toUpperCase()); + ColMeta colMeta = columToIndx.get(key); + if (colMeta == null) { + throw new IllegalArgumentException( + "all columns in order by clause should be in the selected column list!" + + entry.getKey()); + } + orderCols[i++] = new OrderCol(colMeta, entry.getValue()); + } + + /** + * 构造全局排序器 + */ + schema = new StructType(columToIndx,fieldCount); + schema.setOrderCols(orderCols); + + prefixComputer = new RowPrefixComputer(schema); + +// if(orderCols.length>0 +// && orderCols[0].getOrderType() +// == OrderCol.COL_ORDER_TYPE_ASC){ +// prefixComparator = PrefixComparators.LONG; +// }else { +// prefixComparator = PrefixComparators.LONG_DESC; +// } + + prefixComparator = getPrefixComparator(orderCols); + + dataNodeMemoryManager = + new DataNodeMemoryManager(memoryManager,Thread.currentThread().getId()); + + /** + * 默认排序,只是将数据连续存储到内存中即可。 + */ + globalSorter = new UnsafeExternalRowSorter( + dataNodeMemoryManager, + myCatMemory, + schema, + prefixComparator, prefixComputer, + conf.getSizeAsBytes("mycat.buffer.pageSize","32k"), + false/**是否使用基数排序*/, + true/**排序*/); + } + + + if(conf.getBoolean("mycat.stream.output.result",false) + && globalSorter == null + && unsafeRowGrouper == null){ + setStreamOutputResult(true); + }else { + + /** + * 1.schema + */ + + schema = new StructType(columToIndx,fieldCount); + schema.setOrderCols(orderCols); + + /** + * 2 .PrefixComputer + */ + prefixComputer = new RowPrefixComputer(schema); + + /** + * 3 .PrefixComparator 默认是ASC,可以选择DESC + */ + + prefixComparator = PrefixComparators.LONG; + + + dataNodeMemoryManager = new DataNodeMemoryManager(memoryManager, + Thread.currentThread().getId()); + + globalMergeResult = new UnsafeExternalRowSorter( + dataNodeMemoryManager, + myCatMemory, + schema, + prefixComparator, + prefixComputer, + conf.getSizeAsBytes("mycat.buffer.pageSize", "32k"), + false,/**是否使用基数排序*/ + false/**不排序*/); + } + } + + private PrefixComparator getPrefixComparator(OrderCol[] orderCols) { + PrefixComparator prefixComparator = null; + OrderCol firstOrderCol = orderCols[0]; + int orderType = firstOrderCol.getOrderType(); + int colType = firstOrderCol.colMeta.colType; + + switch (colType) { + case ColMeta.COL_TYPE_INT: + case ColMeta.COL_TYPE_LONG: + case ColMeta.COL_TYPE_INT24: + case ColMeta.COL_TYPE_SHORT: + case ColMeta.COL_TYPE_LONGLONG: + prefixComparator = (orderType == OrderCol.COL_ORDER_TYPE_ASC ? PrefixComparators.LONG : PrefixComparators.LONG_DESC); + break; + case ColMeta.COL_TYPE_FLOAT: + case ColMeta.COL_TYPE_DOUBLE: + case ColMeta.COL_TYPE_DECIMAL: + case ColMeta.COL_TYPE_NEWDECIMAL: + prefixComparator = (orderType == OrderCol.COL_ORDER_TYPE_ASC ? PrefixComparators.DOUBLE : PrefixComparators.DOUBLE_DESC); + break; + case ColMeta.COL_TYPE_DATE: + case ColMeta.COL_TYPE_TIMSTAMP: + case ColMeta.COL_TYPE_TIME: + case ColMeta.COL_TYPE_YEAR: + case ColMeta.COL_TYPE_DATETIME: + case ColMeta.COL_TYPE_NEWDATE: + case ColMeta.COL_TYPE_BIT: + case ColMeta.COL_TYPE_VAR_STRING: + case ColMeta.COL_TYPE_STRING: + case ColMeta.COL_TYPE_ENUM: + case ColMeta.COL_TYPE_SET: + prefixComparator = (orderType == OrderCol.COL_ORDER_TYPE_ASC ? PrefixComparators.BINARY : PrefixComparators.BINARY_DESC); + break; + default: + prefixComparator = (orderType == OrderCol.COL_ORDER_TYPE_ASC ? PrefixComparators.LONG : PrefixComparators.LONG_DESC); + break; + } + + return prefixComparator; + } + + @Override + public List getResults(byte[] eof) { + return null; + } + + private UnsafeRow unsafeRow = null; + private BufferHolder bufferHolder = null; + private UnsafeRowWriter unsafeRowWriter = null; + private int Index = 0; + + @Override + public void run() { + + if (!running.compareAndSet(false, true)) { + return; + } + + boolean nulpack = false; + + try { + for (; ; ) { + final PackWraper pack = packs.poll(); + + if (pack == null) { + nulpack = true; + break; + } + if (pack == END_FLAG_PACK) { + + hasEndFlag = true; + + if(packs.peek()!=null){ + packs.add(pack); + continue; + } + + /** + * 最后一个节点datenode发送了row eof packet说明了整个 + * 分片数据全部接收完成,进而将结果集全部发给你Mycat 客户端 + */ + final int warningCount = 0; + final EOFPacket eofp = new EOFPacket(); + final ByteBuffer eof = ByteBuffer.allocate(9); + BufferUtil.writeUB3(eof, eofp.calcPacketSize()); + eof.put(eofp.packetId); + eof.put(eofp.fieldCount); + BufferUtil.writeUB2(eof,warningCount); + BufferUtil.writeUB2(eof,eofp.status); + final ServerConnection source = multiQueryHandler.getSession().getSource(); + final byte[] array = eof.array(); + + + Iterator iters = null; + + + if (unsafeRowGrouper != null){ + /** + * group by里面需要排序情况 + */ + if (globalSorter != null){ + iters = unsafeRowGrouper.getResult(globalSorter); + }else { + iters = unsafeRowGrouper.getResult(globalMergeResult); + } + + }else if(globalSorter != null){ + + iters = globalSorter.sort(); + + }else if (!isStreamOutputResult){ + + iters = globalMergeResult.sort(); + + } + + if(iters != null){ + multiQueryHandler.outputMergeResult(source,array,iters,isMiddleResultDone); + } + break; + } + + unsafeRow = new UnsafeRow(fieldCount); + bufferHolder = new BufferHolder(unsafeRow,0); + unsafeRowWriter = new UnsafeRowWriter(bufferHolder,fieldCount); + bufferHolder.reset(); + + /** + *构造一行row,将对应的col填充. + */ + MySQLMessage mm = new MySQLMessage(pack.rowData); + mm.readUB3(); + mm.read(); + + int nullnum = 0; + for (int i = 0; i < fieldCount; i++) { + byte[] colValue = mm.readBytesWithLength(); + if (colValue != null) + unsafeRowWriter.write(i,colValue); + else + { + if(mergeColsIndex!=null&&mergeColsIndex.length>0){ + + if(Arrays.binarySearch(mergeColsIndex, i)<0){ + nullnum++; + } + } + unsafeRow.setNullAt(i); + } + } + + if(mergeColsIndex!=null&&mergeColsIndex.length>0){ + if(nullnum == (fieldCount - mergeColsIndex.length)){ + if(!hasEndFlag){ + packs.add(pack); + continue; + } + } + } + + unsafeRow.setTotalSize(bufferHolder.totalSize()); + + if(unsafeRowGrouper != null){ + unsafeRowGrouper.addRow(unsafeRow); + }else if (globalSorter != null){ + globalSorter.insertRow(unsafeRow); + }else { + globalMergeResult.insertRow(unsafeRow); + } + + unsafeRow = null; + bufferHolder = null; + unsafeRowWriter = null; + } + + } catch (final Exception e) { + e.printStackTrace(); + multiQueryHandler.handleDataProcessException(e); + } finally { + running.set(false); + if (nulpack && !packs.isEmpty()) { + this.run(); + } + } + } + + /** + * 释放DataNodeMergeManager所申请的资源 + */ + public void clear() { + + unsafeRows.clear(); + + synchronized (this) + { + if (unsafeRowGrouper != null) { + unsafeRowGrouper.free(); + unsafeRowGrouper = null; + } + } + + if(globalSorter != null){ + globalSorter.cleanupResources(); + globalSorter = null; + } + + if (globalMergeResult != null){ + globalMergeResult.cleanupResources(); + globalMergeResult = null; + } + } +} diff --git a/src/main/java/io/mycat/sqlengine/mpp/LoadData.java b/src/main/java/io/mycat/sqlengine/mpp/LoadData.java index 295fa78e4..5c3483a1d 100644 --- a/src/main/java/io/mycat/sqlengine/mpp/LoadData.java +++ b/src/main/java/io/mycat/sqlengine/mpp/LoadData.java @@ -1,11 +1,12 @@ package io.mycat.sqlengine.mpp; +import java.io.Serializable; import java.util.List; /** * Created by magicdoom on 2015/3/30. */ -public class LoadData +public class LoadData implements Serializable { public static final String loadDataHint="/*loaddata*/"; private boolean isLocal; diff --git a/src/main/java/io/mycat/sqlengine/mpp/PackWraper.java b/src/main/java/io/mycat/sqlengine/mpp/PackWraper.java new file mode 100644 index 000000000..9e5cf4acc --- /dev/null +++ b/src/main/java/io/mycat/sqlengine/mpp/PackWraper.java @@ -0,0 +1,17 @@ +package io.mycat.sqlengine.mpp; + + +/** + * Created by zagnix on 2016/7/6. + */ + +/** + * 一行数据是从哪个节点来的。 + * 通过dataNode查找对应的sorter, + * 将数据放到对应的datanode的sorter, + * 进行排序. + */ +public final class PackWraper { + public byte[] rowData; + public String dataNode; +} diff --git a/src/main/java/io/mycat/sqlengine/mpp/RangRowDataPacketSorter.java b/src/main/java/io/mycat/sqlengine/mpp/RangRowDataPacketSorter.java index e639a119d..5a8e410ad 100644 --- a/src/main/java/io/mycat/sqlengine/mpp/RangRowDataPacketSorter.java +++ b/src/main/java/io/mycat/sqlengine/mpp/RangRowDataPacketSorter.java @@ -23,8 +23,8 @@ */ package io.mycat.sqlengine.mpp; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.sqlengine.tmp.RowDataSorter; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.sqlengine.mpp.tmp.RowDataSorter; public class RangRowDataPacketSorter extends RowDataSorter { diff --git a/src/main/java/io/mycat/sqlengine/mpp/RangeValue.java b/src/main/java/io/mycat/sqlengine/mpp/RangeValue.java index 5ab54eede..05eba5116 100644 --- a/src/main/java/io/mycat/sqlengine/mpp/RangeValue.java +++ b/src/main/java/io/mycat/sqlengine/mpp/RangeValue.java @@ -31,12 +31,15 @@ public int hashCode(){ @Override public boolean equals(Object obj) { - if (this == obj) + if (this == obj) { return true; - if (obj == null) + } + if (obj == null) { return false; - if (getClass() != obj.getClass()) + } + if (getClass() != obj.getClass()) { return false; + } RangeValue other = (RangeValue) obj; if( beginValue == null ){ diff --git a/src/main/java/io/mycat/sqlengine/mpp/RowDataPacketGrouper.java b/src/main/java/io/mycat/sqlengine/mpp/RowDataPacketGrouper.java index 2ed2bc43c..df55df5da 100644 --- a/src/main/java/io/mycat/sqlengine/mpp/RowDataPacketGrouper.java +++ b/src/main/java/io/mycat/sqlengine/mpp/RowDataPacketGrouper.java @@ -23,17 +23,22 @@ */ package io.mycat.sqlengine.mpp; - -import io.mycat.server.packet.RowDataPacket; -import io.mycat.util.ByteUtil; -import io.mycat.util.LongUtil; - +import java.math.BigDecimal; +import java.math.RoundingMode; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashSet; import java.util.Iterator; import java.util.List; +import java.util.Set; + + +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.util.ByteUtil; +import io.mycat.util.CompareUtil; +import io.mycat.util.LongUtil; /** * implement group function select a,count(*),sum(*) from A group by a @@ -45,7 +50,9 @@ public class RowDataPacketGrouper { private List result = Collections.synchronizedList(new ArrayList()); private final MergeCol[] mergCols; + private int[] mergeColsIndex; private final int[] groupColumnIndexs; + private boolean ishanlderFirstRow = false; //结果集汇聚时,是否已处理第一条记录. private boolean isMergAvg=false; private HavingCols havingCols; @@ -54,6 +61,14 @@ public RowDataPacketGrouper(int[] groupColumnIndexs, MergeCol[] mergCols,HavingC this.groupColumnIndexs = groupColumnIndexs; this.mergCols = mergCols; this.havingCols = havingCols; + + if(mergCols!=null&&mergCols.length>0){ + mergeColsIndex = new int[mergCols.length]; + for(int i = 0;i getResult() { @@ -81,36 +96,43 @@ private void filterHaving(){ byte[] right = havingCols.getRight().getBytes( StandardCharsets.UTF_8); int index = havingCols.getColMeta().getColIndex(); + int colType = havingCols.getColMeta().getColType(); // Added by winbill. 20160312. while (it.hasNext()){ RowDataPacket rowDataPacket = it.next(); switch (havingCols.getOperator()) { case "=": - if (eq(rowDataPacket.fieldValues.get(index),right)) { + /* Add parameter of colType, Modified by winbill. 20160312. */ + if (eq(rowDataPacket.fieldValues.get(index),right,colType)) { it.remove(); } break; case ">": - if (gt(rowDataPacket.fieldValues.get(index),right)) { + /* Add parameter of colType, Modified by winbill. 20160312. */ + if (gt(rowDataPacket.fieldValues.get(index),right,colType)) { it.remove(); } break; case "<": - if (lt(rowDataPacket.fieldValues.get(index),right)) { + /* Add parameter of colType, Modified by winbill. 20160312. */ + if (lt(rowDataPacket.fieldValues.get(index),right,colType)) { it.remove(); } break; case ">=": - if (gt(rowDataPacket.fieldValues.get(index),right) && eq(rowDataPacket.fieldValues.get(index),right)) { + /* Add parameter of colType, Modified by winbill. 20160312. */ + if (gt(rowDataPacket.fieldValues.get(index),right,colType) && eq(rowDataPacket.fieldValues.get(index),right,colType)) { it.remove(); } break; case "<=": - if (lt(rowDataPacket.fieldValues.get(index),right) && eq(rowDataPacket.fieldValues.get(index),right)) { + /* Add parameter of colType, Modified by winbill. 20160312. */ + if (lt(rowDataPacket.fieldValues.get(index),right,colType) && eq(rowDataPacket.fieldValues.get(index),right,colType)) { it.remove(); } break; case "!=": - if (neq(rowDataPacket.fieldValues.get(index),right)) { + /* Add parameter of colType, Modified by winbill. 20160312. */ + if (neq(rowDataPacket.fieldValues.get(index),right,colType)) { it.remove(); } break; @@ -119,22 +141,66 @@ private void filterHaving(){ } - private boolean lt(byte[] l, byte[] r) { - return -1 != ByteUtil.compareNumberByte(l, r); + /* + * Using new compare function instead of compareNumberByte + * Modified by winbill. 20160312. + */ + private boolean lt(byte[] l, byte[] r, final int colType) { +// return -1 != ByteUtil.compareNumberByte(l, r); + return -1 != RowDataPacketGrouper.compareObject(l, r, colType); } - private boolean gt(byte[] l, byte[] r) { - return 1 != ByteUtil.compareNumberByte(l, r); + private boolean gt(byte[] l, byte[] r, final int colType) { +// return 1 != ByteUtil.compareNumberByte(l, r, havingCol); + return 1 != RowDataPacketGrouper.compareObject(l, r, colType); } - private boolean eq(byte[] l, byte[] r) { - return 0 != ByteUtil.compareNumberByte(l, r); + private boolean eq(byte[] l, byte[] r, final int colType) { +// return 0 != ByteUtil.compareNumberByte(l, r, havingCol); + return 0 != RowDataPacketGrouper.compareObject(l, r, colType); } - private boolean neq(byte[] l, byte[] r) { - return 0 == ByteUtil.compareNumberByte(l, r); + private boolean neq(byte[] l, byte[] r, final int colType) { +// return 0 == ByteUtil.compareNumberByte(l, r, havingCol); + return 0 == RowDataPacketGrouper.compareObject(l, r, colType); } + /* + * Compare with the value of having column + * winbill. 20160312. + */ + public static final int compareObject(byte[] left,byte[] right, final int colType) { + switch (colType) { + case ColMeta.COL_TYPE_SHORT: + case ColMeta.COL_TYPE_INT: + case ColMeta.COL_TYPE_INT24: + case ColMeta.COL_TYPE_LONG: + return CompareUtil.compareInt(ByteUtil.getInt(left), ByteUtil.getInt(right)); + case ColMeta.COL_TYPE_LONGLONG: + return CompareUtil.compareLong(ByteUtil.getLong(left), ByteUtil.getLong(right)); + case ColMeta.COL_TYPE_FLOAT: + case ColMeta.COL_TYPE_DOUBLE: + case ColMeta.COL_TYPE_DECIMAL: + case ColMeta.COL_TYPE_NEWDECIMAL: + return CompareUtil.compareDouble(ByteUtil.getDouble(left), ByteUtil.getDouble(right)); + case ColMeta.COL_TYPE_DATE: + case ColMeta.COL_TYPE_TIMSTAMP: + case ColMeta.COL_TYPE_TIME: + case ColMeta.COL_TYPE_YEAR: + case ColMeta.COL_TYPE_DATETIME: + case ColMeta.COL_TYPE_NEWDATE: + case ColMeta.COL_TYPE_BIT: + case ColMeta.COL_TYPE_VAR_STRING: + case ColMeta.COL_TYPE_STRING: + // ENUM和SET类型都是字符串,按字符串处理 + case ColMeta.COL_TYPE_ENUM: + case ColMeta.COL_TYPE_SET: + return ByteUtil.compareNumberByte(left, right); + // BLOB相关类型和GEOMETRY类型不支持排序,略掉 + } + return 0; + } + public void addRow(RowDataPacket rowDataPkg) { for (RowDataPacket row : result) { if (sameGropuColums(rowDataPkg, row)) { @@ -152,6 +218,23 @@ private void aggregateRow(RowDataPacket toRow, RowDataPacket newRow) { if (mergCols == null) { return; } + + /* + * 这里进行一次判断, 在跨分片聚合的情况下,如果有一个没有记录的分片,最先返回,可能返回有null 的情况. + */ + if(!ishanlderFirstRow&&mergeColsIndex!=null&&mergeColsIndex.length>0){ + List values = toRow.fieldValues; + for(int i=0;i=0){ + continue; + } + if(values.get(i)==null){ + values.set(i, newRow.fieldValues.get(i)); + } + } + ishanlderFirstRow = true; + } + for (MergeCol merg : mergCols) { if(merg.mergeType!=MergeCol.MERGE_AVG) { @@ -165,18 +248,16 @@ private void aggregateRow(RowDataPacket toRow, RowDataPacket newRow) { } } } - - - - } private void mergAvg(RowDataPacket toRow) { if (mergCols == null) { return; } + + - + Set rmIndexSet = new HashSet(); for (MergeCol merg : mergCols) { if(merg.mergeType==MergeCol.MERGE_AVG) { @@ -187,12 +268,16 @@ private void mergAvg(RowDataPacket toRow) { if (result != null) { toRow.fieldValues.set(merg.colMeta.avgSumIndex, result); - toRow.fieldValues.remove(merg.colMeta.avgCountIndex) ; - toRow.fieldCount=toRow.fieldCount-1; +// toRow.fieldValues.remove(merg.colMeta.avgCountIndex) ; +// toRow.fieldCount=toRow.fieldCount-1; + rmIndexSet.add(merg.colMeta.avgCountIndex); } } } - + for(Integer index : rmIndexSet) { + toRow.fieldValues.remove(index); + toRow.fieldCount = toRow.fieldCount - 1; + } } @@ -209,14 +294,17 @@ private byte[] mertFields(byte[] bs, byte[] bs2, int colType, int mergeType) { } switch (mergeType) { case MergeCol.MERGE_SUM: - if (colType == ColMeta.COL_TYPE_NEWDECIMAL - || colType == ColMeta.COL_TYPE_DOUBLE - || colType == ColMeta.COL_TYPE_FLOAT - || colType == ColMeta.COL_TYPE_DECIMAL) { + if (colType == ColMeta.COL_TYPE_DOUBLE + || colType == ColMeta.COL_TYPE_FLOAT) { Double vale = ByteUtil.getDouble(bs) + ByteUtil.getDouble(bs2); return vale.toString().getBytes(); // return String.valueOf(vale).getBytes(); + } else if(colType == ColMeta.COL_TYPE_NEWDECIMAL + || colType == ColMeta.COL_TYPE_DECIMAL) { + BigDecimal d1 = new BigDecimal(new String(bs)); + d1 = d1.add(new BigDecimal(new String(bs2))); + return String.valueOf(d1).getBytes(); } // continue to count case case MergeCol.MERGE_COUNT: { @@ -247,10 +335,19 @@ private byte[] mertFields(byte[] bs, byte[] bs2, int colType, int mergeType) { // return ByteUtil.compareNumberArray2(bs, bs2, 2); } case MergeCol.MERGE_AVG: { - double aDouble = ByteUtil.getDouble(bs); - long s2 = Long.parseLong(new String(bs2)); - Double vale = aDouble / s2; - return vale.toString().getBytes(); + if (colType == ColMeta.COL_TYPE_DOUBLE + || colType == ColMeta.COL_TYPE_FLOAT) { + double aDouble = ByteUtil.getDouble(bs); + long s2 = Long.parseLong(new String(bs2)); + Double vale = aDouble / s2; + return vale.toString().getBytes(); + } else if(colType == ColMeta.COL_TYPE_NEWDECIMAL + || colType == ColMeta.COL_TYPE_DECIMAL) { + BigDecimal sum = new BigDecimal(new String(bs)); + // mysql avg 处理精度为 sum结果的精度扩展4, 采用四舍五入 + BigDecimal avg = sum.divide(new BigDecimal(new String(bs2)), sum.scale() + 4, RoundingMode.HALF_UP); + return avg.toString().getBytes(); + } } default: return null; diff --git a/src/main/java/io/mycat/sqlengine/mpp/RowDataPacketSorter.java b/src/main/java/io/mycat/sqlengine/mpp/RowDataPacketSorter.java index 88c12e934..66cc84061 100644 --- a/src/main/java/io/mycat/sqlengine/mpp/RowDataPacketSorter.java +++ b/src/main/java/io/mycat/sqlengine/mpp/RowDataPacketSorter.java @@ -1,196 +1,197 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.sqlengine.mpp; - -import io.mycat.server.packet.RowDataPacket; -import io.mycat.util.ByteUtil; -import io.mycat.util.CompareUtil; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class RowDataPacketSorter { - - private List sorted = Collections.synchronizedList(new ArrayList()); - private static final Logger LOGGER = LoggerFactory.getLogger(RowDataPacketSorter.class); - private RowDataPacket[] array, resultTemp; - protected final OrderCol[] orderCols; - private int p1, pr, p2; - - public RowDataPacketSorter(OrderCol[] orderCols) { - super(); - this.orderCols = orderCols; - } - - public boolean addRow(RowDataPacket row) { - return this.sorted.add(row); - - } - - public List getSortedResult() { - try { - this.mergeSort(sorted.toArray(new RowDataPacket[sorted.size()])); - } catch (Exception e) { - LOGGER.error("getSortedResultError",e); - } - if (array != null) { - Collections.addAll(this.sorted, array); - } - - return sorted; - } - - private RowDataPacket[] mergeSort(RowDataPacket[] result) throws Exception { - this.sorted.clear(); - array = result; - if (result == null || result.length < 2 || this.orderCols == null || orderCols.length < 1) { - return result; - } - mergeR(0, result.length - 1); - - return array; - } - - private void mergeR(int startIndex, int endIndex) { - if (startIndex < endIndex) { - int mid = (startIndex + endIndex) / 2; - - mergeR(startIndex, mid); - - mergeR(mid + 1, endIndex); - - merge(startIndex, mid, endIndex); - } - } - - private void merge(int startIndex, int midIndex, int endIndex) { - resultTemp = new RowDataPacket[(endIndex - startIndex + 1)]; - - pr = 0; - p1 = startIndex; - p2 = midIndex + 1; - while (p1 <= midIndex || p2 <= endIndex) { - if (p1 == midIndex + 1) { - while (p2 <= endIndex) { - resultTemp[pr++] = array[p2++]; - - } - } else if (p2 == endIndex + 1) { - while (p1 <= midIndex) { - resultTemp[pr++] = array[p1++]; - } - - } else { - compare(0); - } - } - for (p1 = startIndex, p2 = 0; p1 <= endIndex; p1++, p2++) { - array[p1] = resultTemp[p2]; - - } - } - - /** - * 递归按照排序字段进行排序 - * - * @param byColumnIndex - */ - private void compare(int byColumnIndex) { - - if (byColumnIndex == this.orderCols.length) { - if (this.orderCols[byColumnIndex - 1].orderType == OrderCol.COL_ORDER_TYPE_ASC) { - - resultTemp[pr++] = array[p1++]; - } else { - resultTemp[pr++] = array[p2++]; - } - return; - } - - byte[] left = array[p1].fieldValues.get(this.orderCols[byColumnIndex].colMeta.colIndex); - byte[] right = array[p2].fieldValues.get(this.orderCols[byColumnIndex].colMeta.colIndex); - - if (compareObject(left, right, this.orderCols[byColumnIndex]) <= 0) { - if (compareObject(left, right, this.orderCols[byColumnIndex]) < 0) { - if (this.orderCols[byColumnIndex].orderType == OrderCol.COL_ORDER_TYPE_ASC) {// 升序 - resultTemp[pr++] = array[p1++]; - } else { - resultTemp[pr++] = array[p2++]; - } - } else {// 如果当前字段相等,则按照下一个字段排序 - compare(byColumnIndex + 1); - - } - - } else { - if (this.orderCols[byColumnIndex].orderType == OrderCol.COL_ORDER_TYPE_ASC) {// 升序 - resultTemp[pr++] = array[p2++]; - } else { - resultTemp[pr++] = array[p1++]; - } - - } - } - - public static final int compareObject(Object l, Object r, OrderCol orderCol) { - - int colType = orderCol.getColMeta().getColType(); - byte[] left = (byte[]) l; - byte[] right = (byte[]) r; - // System.out.println("------------" + colType); - switch (colType) { - case ColMeta.COL_TYPE_DECIMAL: - case ColMeta.COL_TYPE_INT: - case ColMeta.COL_TYPE_SHORT: - case ColMeta.COL_TYPE_LONG: - case ColMeta.COL_TYPE_FLOAT: - case ColMeta.COL_TYPE_DOUBLE: - case ColMeta.COL_TYPE_LONGLONG: - case ColMeta.COL_TYPE_INT24: - case ColMeta.COL_TYPE_NEWDECIMAL: - // 因为mysql的日期也是数字字符串方式表达,因此可以跟整数等一起对待 - case ColMeta.COL_TYPE_DATE: - case ColMeta.COL_TYPE_TIMSTAMP: - case ColMeta.COL_TYPE_TIME: - case ColMeta.COL_TYPE_YEAR: - case ColMeta.COL_TYPE_DATETIME: - case ColMeta.COL_TYPE_NEWDATE: - case ColMeta.COL_TYPE_BIT: - return ByteUtil.compareNumberByte(left, right); - case ColMeta.COL_TYPE_VAR_STRING: - case ColMeta.COL_TYPE_STRING: - // ENUM和SET类型都是字符串,按字符串处理 - case ColMeta.COL_TYPE_ENUM: - case ColMeta.COL_TYPE_SET: - return CompareUtil.compareString(ByteUtil.getString(left), ByteUtil.getString(right)); - - // BLOB相关类型和GEOMETRY类型不支持排序,略掉 - } - return 0; - } +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.sqlengine.mpp; + +import java.util.Collection; +import java.util.Collections; +import java.util.concurrent.ConcurrentLinkedQueue; + +import io.mycat.memory.unsafe.utils.BytesTools; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.util.ByteUtil; + +public class RowDataPacketSorter { + + private static final Logger LOGGER = LoggerFactory.getLogger(RowDataPacketSorter.class); + protected final OrderCol[] orderCols; + + private Collection sorted = new ConcurrentLinkedQueue(); + private RowDataPacket[] array, resultTemp; + private int p1, pr, p2; + + public RowDataPacketSorter(OrderCol[] orderCols) { + super(); + this.orderCols = orderCols; + } + + public boolean addRow(RowDataPacket row) { + return this.sorted.add(row); + + } + + public Collection getSortedResult() { + try { + this.mergeSort(sorted.toArray(new RowDataPacket[sorted.size()])); + } catch (Exception e) { + LOGGER.error("getSortedResultError",e); + } + if (array != null) { + Collections.addAll(this.sorted, array); + } + + return sorted; + } + + private RowDataPacket[] mergeSort(RowDataPacket[] result) throws Exception { + this.sorted.clear(); + array = result; + if (result == null || result.length < 2 || this.orderCols == null || orderCols.length < 1) { + return result; + } + mergeR(0, result.length - 1); + + return array; + } + + private void mergeR(int startIndex, int endIndex) { + if (startIndex < endIndex) { + int mid = (startIndex + endIndex) / 2; + + mergeR(startIndex, mid); + + mergeR(mid + 1, endIndex); + + merge(startIndex, mid, endIndex); + } + } + + private void merge(int startIndex, int midIndex, int endIndex) { + resultTemp = new RowDataPacket[(endIndex - startIndex + 1)]; + + pr = 0; + p1 = startIndex; + p2 = midIndex + 1; + while (p1 <= midIndex || p2 <= endIndex) { + if (p1 == midIndex + 1) { + while (p2 <= endIndex) { + resultTemp[pr++] = array[p2++]; + + } + } else if (p2 == endIndex + 1) { + while (p1 <= midIndex) { + resultTemp[pr++] = array[p1++]; + } + + } else { + compare(0); + } + } + for (p1 = startIndex, p2 = 0; p1 <= endIndex; p1++, p2++) { + array[p1] = resultTemp[p2]; + + } + } + + /** + * 递归按照排序字段进行排序 + * + * @param byColumnIndex + */ + private void compare(int byColumnIndex) { + + if (byColumnIndex == this.orderCols.length) { + if (this.orderCols[byColumnIndex - 1].orderType == OrderCol.COL_ORDER_TYPE_ASC) { + + resultTemp[pr++] = array[p1++]; + } else { + resultTemp[pr++] = array[p2++]; + } + return; + } + + byte[] left = array[p1].fieldValues.get(this.orderCols[byColumnIndex].colMeta.colIndex); + byte[] right = array[p2].fieldValues.get(this.orderCols[byColumnIndex].colMeta.colIndex); + + if (compareObject(left, right, this.orderCols[byColumnIndex]) <= 0) { + if (compareObject(left, right, this.orderCols[byColumnIndex]) < 0) { + if (this.orderCols[byColumnIndex].orderType == OrderCol.COL_ORDER_TYPE_ASC) {// 升序 + resultTemp[pr++] = array[p1++]; + } else { + resultTemp[pr++] = array[p2++]; + } + } else {// 如果当前字段相等,则按照下一个字段排序 + compare(byColumnIndex + 1); + + } + + } else { + if (this.orderCols[byColumnIndex].orderType == OrderCol.COL_ORDER_TYPE_ASC) {// 升序 + resultTemp[pr++] = array[p2++]; + } else { + resultTemp[pr++] = array[p1++]; + } + + } + } + + public static final int compareObject(Object l, Object r, OrderCol orderCol) { + return compareObject(( byte[])l, (byte[])r, orderCol); + } + + public static final int compareObject(byte[] left,byte[] right, OrderCol orderCol) { + int colType = orderCol.getColMeta().getColType(); + switch (colType) { + case ColMeta.COL_TYPE_DECIMAL: + case ColMeta.COL_TYPE_INT: + case ColMeta.COL_TYPE_SHORT: + case ColMeta.COL_TYPE_LONG: + case ColMeta.COL_TYPE_FLOAT: + case ColMeta.COL_TYPE_DOUBLE: + case ColMeta.COL_TYPE_LONGLONG: + case ColMeta.COL_TYPE_INT24: + case ColMeta.COL_TYPE_NEWDECIMAL: + // 因为mysql的日期也是数字字符串方式表达,因此可以跟整数等一起对待 + case ColMeta.COL_TYPE_DATE: + case ColMeta.COL_TYPE_TIMSTAMP: + case ColMeta.COL_TYPE_TIME: + case ColMeta.COL_TYPE_YEAR: + case ColMeta.COL_TYPE_DATETIME: + case ColMeta.COL_TYPE_NEWDATE: + case ColMeta.COL_TYPE_BIT: +// return BytesTools.compareTo(left,right); + return ByteUtil.compareNumberByte(left, right); + case ColMeta.COL_TYPE_VAR_STRING: + case ColMeta.COL_TYPE_STRING: + // ENUM和SET类型都是字符串,按字符串处理 + case ColMeta.COL_TYPE_ENUM: + case ColMeta.COL_TYPE_SET: + return BytesTools.compareTo(left,right); + // BLOB相关类型和GEOMETRY类型不支持排序,略掉 + } + return 0; + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/sqlengine/mpp/UnsafeRowGrouper.java b/src/main/java/io/mycat/sqlengine/mpp/UnsafeRowGrouper.java new file mode 100644 index 000000000..c91353246 --- /dev/null +++ b/src/main/java/io/mycat/sqlengine/mpp/UnsafeRowGrouper.java @@ -0,0 +1,944 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.sqlengine.mpp; + +import io.mycat.MycatServer; +import io.mycat.memory.MyCatMemory; +import io.mycat.memory.unsafe.KVIterator; +import io.mycat.memory.unsafe.map.UnsafeFixedWidthAggregationMap; +import io.mycat.memory.unsafe.memory.mm.DataNodeMemoryManager; +import io.mycat.memory.unsafe.memory.mm.MemoryManager; +import io.mycat.memory.unsafe.row.BufferHolder; +import io.mycat.memory.unsafe.row.StructType; +import io.mycat.memory.unsafe.row.UnsafeRow; +import io.mycat.memory.unsafe.row.UnsafeRowWriter; + +import io.mycat.memory.unsafe.utils.BytesTools; +import io.mycat.memory.unsafe.utils.MycatPropertyConf; +import io.mycat.memory.unsafe.utils.sort.UnsafeExternalRowSorter; +import io.mycat.util.ByteUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.annotation.Nonnull; +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.math.BigDecimal; +import java.math.RoundingMode; +import java.nio.charset.StandardCharsets; +import java.text.NumberFormat; +import java.util.*; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Created by zagnix on 2016/6/26. + * + * implement group function select a,count(*),sum(*) from A group by a + * + */ +public class UnsafeRowGrouper { + private static final Logger logger = LoggerFactory.getLogger(UnsafeRowGrouper.class); + + private UnsafeFixedWidthAggregationMap aggregationMap = null; + private final Map columToIndx; + private final MergeCol[] mergCols; + private String[] sortColumnsByIndex = null; + private final String[] columns; + private boolean isMergAvg=false; + private HavingCols havingCols; + private UnsafeRow groupKey = null; + private UnsafeRow valueKey = null; + private BufferHolder bufferHolder = null; + private UnsafeRowWriter unsafeRowWriter = null; + private final int groupKeyfieldCount; + private final int valuefieldCount; + private StructType groupKeySchema ; + private StructType aggBufferSchema; + private UnsafeRow emptyAggregationBuffer; + private final MyCatMemory myCatMemory; + private final MemoryManager memoryManager; + private final MycatPropertyConf conf; + + public UnsafeRowGrouper(Map columToIndx,String[] columns, MergeCol[] mergCols, HavingCols havingCols) { + super(); + assert columns!=null; + assert columToIndx!=null; + assert mergCols !=null; + this.columToIndx = columToIndx; + this.columns = columns; + this.mergCols = mergCols; + this.havingCols = havingCols; + this.sortColumnsByIndex = columns !=null ? toSortColumnsByIndex(columns,columToIndx):null; + this.groupKeyfieldCount = columns != null?columns.length:0; + this.valuefieldCount = columToIndx != null?columToIndx.size():0; + this.myCatMemory = MycatServer.getInstance().getMyCatMemory(); + this.memoryManager = myCatMemory.getResultMergeMemoryManager(); + this.conf = myCatMemory.getConf(); + + logger.debug("columToIndx :" + (columToIndx != null ? columToIndx.toString():"null")); + + initGroupKey(); + initEmptyValueKey(); + + DataNodeMemoryManager dataNodeMemoryManager = + new DataNodeMemoryManager(memoryManager,Thread.currentThread().getId()); + + aggregationMap = new UnsafeFixedWidthAggregationMap( + emptyAggregationBuffer, + aggBufferSchema, + groupKeySchema, + dataNodeMemoryManager, + 1024, + conf.getSizeAsBytes("mycat.buffer.pageSize", "32k"), + false); + } + + private String[] toSortColumnsByIndex(String[] columns, Map columToIndx) { + + Map map = new HashMap(); + + ColMeta curColMeta; + for (int i = 0; i < columns.length; i++) { + curColMeta = columToIndx.get(columns[i].toUpperCase()); + if (curColMeta == null) { + throw new IllegalArgumentException( + "all columns in group by clause should be in the selected column list.!" + + columns[i]); + } + map.put(columns[i],curColMeta.colIndex); + } + + + String[] sortColumnsByIndex = new String[map.size()]; + + List> entryList = new ArrayList< + Map.Entry>( + map.entrySet()); + + Collections.sort(entryList, new Comparator>() { + @Override + public int compare(Map.Entry o1, Map.Entry o2) { + return o1.getValue().compareTo(o2.getValue()); + } + }); + + Iterator> iter = entryList.iterator(); + Map.Entry tmpEntry = null; + + int index = 0; + + while (iter.hasNext()) { + tmpEntry = iter.next(); + sortColumnsByIndex[index++] = tmpEntry.getKey(); + } + + return sortColumnsByIndex; + } + + private void initGroupKey(){ + /** + * 构造groupKey + */ + Map groupcolMetaMap = new HashMap(this.groupKeyfieldCount); + + groupKey = new UnsafeRow(this.groupKeyfieldCount); + bufferHolder = new BufferHolder(groupKey,0); + unsafeRowWriter = new UnsafeRowWriter(bufferHolder,this.groupKeyfieldCount); + bufferHolder.reset(); + + ColMeta curColMeta = null; + + for (int i = 0; i < this.groupKeyfieldCount; i++) { + curColMeta = this.columToIndx.get(sortColumnsByIndex[i].toUpperCase()); + groupcolMetaMap.put(sortColumnsByIndex[i],curColMeta); + + + switch (curColMeta.colType) { + case ColMeta.COL_TYPE_BIT: + groupKey.setByte(i, (byte) 0); + break; + case ColMeta.COL_TYPE_INT: + case ColMeta.COL_TYPE_INT24: + case ColMeta.COL_TYPE_LONG: + groupKey.setInt(i, 0); + break; + case ColMeta.COL_TYPE_SHORT: + groupKey.setShort(i, (short) 0); + break; + case ColMeta.COL_TYPE_FLOAT: + groupKey.setFloat(i, 0); + break; + case ColMeta.COL_TYPE_DOUBLE: + groupKey.setDouble(i, 0); + break; + case ColMeta.COL_TYPE_NEWDECIMAL: +// groupKey.setDouble(i, 0); + unsafeRowWriter.write(i, new BigDecimal(0L)); + break; + case ColMeta.COL_TYPE_LONGLONG: + groupKey.setLong(i, 0); + break; + default: + unsafeRowWriter.write(i, "init".getBytes()); + break; + } + + } + groupKey.setTotalSize(bufferHolder.totalSize()); + + groupKeySchema = new StructType(groupcolMetaMap,this.groupKeyfieldCount); + groupKeySchema.setOrderCols(null); + } + + private void initEmptyValueKey(){ + /** + * 构造valuerow + */ + emptyAggregationBuffer = new UnsafeRow(this.valuefieldCount); + bufferHolder = new BufferHolder(emptyAggregationBuffer,0); + unsafeRowWriter = new UnsafeRowWriter(bufferHolder,this.valuefieldCount); + bufferHolder.reset(); + + ColMeta curColMeta = null; + for (Map.Entry fieldEntry : columToIndx.entrySet()) { + curColMeta = fieldEntry.getValue(); + + switch (curColMeta.colType) { + case ColMeta.COL_TYPE_BIT: + emptyAggregationBuffer.setByte(curColMeta.colIndex, (byte) 0); + break; + case ColMeta.COL_TYPE_INT: + case ColMeta.COL_TYPE_INT24: + case ColMeta.COL_TYPE_LONG: + emptyAggregationBuffer.setInt(curColMeta.colIndex, 0); + break; + case ColMeta.COL_TYPE_SHORT: + emptyAggregationBuffer.setShort(curColMeta.colIndex, (short) 0); + break; + case ColMeta.COL_TYPE_LONGLONG: + emptyAggregationBuffer.setLong(curColMeta.colIndex, 0); + break; + case ColMeta.COL_TYPE_FLOAT: + emptyAggregationBuffer.setFloat(curColMeta.colIndex, 0); + break; + case ColMeta.COL_TYPE_DOUBLE: + emptyAggregationBuffer.setDouble(curColMeta.colIndex, 0); + break; + case ColMeta.COL_TYPE_NEWDECIMAL: +// emptyAggregationBuffer.setDouble(curColMeta.colIndex, 0); + unsafeRowWriter.write(curColMeta.colIndex, new BigDecimal(0L)); + break; + default: + unsafeRowWriter.write(curColMeta.colIndex, "init".getBytes()); + break; + } + + } + + emptyAggregationBuffer.setTotalSize(bufferHolder.totalSize()); + aggBufferSchema = new StructType(columToIndx,this.valuefieldCount); + aggBufferSchema.setOrderCols(null); + } + + + public Iterator getResult(@Nonnull UnsafeExternalRowSorter sorter) throws IOException { + KVIterator iter = aggregationMap.iterator(); + /** + * 求平均值 + */ + if (isMergeAvg() && !isMergAvg){ + try { + while (iter.next()){ + mergAvg(iter.getValue()); + } + } catch (IOException e) { + logger.error(e.getMessage()); + } + isMergAvg = true; + processAvgFieldPrecision(); + } + /** + * group having + */ + if (havingCols !=null){ + filterHaving(sorter); + }else{ + + /** + * KVIterator ==>Iterator + */ + insertValue(sorter); + } + return sorter.sort(); + } + + /** + * 处理AVG列精度 + */ + private void processAvgFieldPrecision() { + for(String key : columToIndx.keySet()) { + if(isAvgField(key)) { // AVG列的小数点精度默认取SUM小数点精度, 计算和返回的小数点精度应该扩展4 + ColMeta colMeta = columToIndx.get(key); + colMeta.decimals += 4; + } + } + } + + /** + * 判断列是否为AVG列 + * @param columnName + * @return + */ + private boolean isAvgField(String columnName) { + Pattern pattern = Pattern.compile("AVG([1-9]\\d*|0)SUM"); + Matcher matcher = pattern.matcher(columnName); + return matcher.find(); + } + + + public UnsafeRow getAllBinaryRow(UnsafeRow row) throws UnsupportedEncodingException { + + UnsafeRow value = new UnsafeRow( this.valuefieldCount); + bufferHolder = new BufferHolder(value,0); + unsafeRowWriter = new UnsafeRowWriter(bufferHolder, this.valuefieldCount); + bufferHolder.reset(); + ColMeta curColMeta = null; + + for (Map.Entry fieldEntry : columToIndx.entrySet()) { + curColMeta = fieldEntry.getValue(); + + if (!row.isNullAt(curColMeta.colIndex)) { + switch (curColMeta.colType) { + case ColMeta.COL_TYPE_BIT: + unsafeRowWriter.write(curColMeta.colIndex, row.getByte(curColMeta.colIndex)); + break; + case ColMeta.COL_TYPE_INT: + case ColMeta.COL_TYPE_LONG: + case ColMeta.COL_TYPE_INT24: + unsafeRowWriter.write(curColMeta.colIndex, + BytesTools.int2Bytes(row.getInt(curColMeta.colIndex))); + break; + case ColMeta.COL_TYPE_SHORT: + unsafeRowWriter.write(curColMeta.colIndex, + BytesTools.short2Bytes(row.getShort(curColMeta.colIndex))); + break; + case ColMeta.COL_TYPE_LONGLONG: + unsafeRowWriter.write(curColMeta.colIndex, + BytesTools.long2Bytes(row.getLong(curColMeta.colIndex))); + break; + case ColMeta.COL_TYPE_FLOAT: + unsafeRowWriter.write(curColMeta.colIndex, + BytesTools.float2Bytes(row.getFloat(curColMeta.colIndex))); + break; + case ColMeta.COL_TYPE_DOUBLE: + unsafeRowWriter.write(curColMeta.colIndex, + BytesTools.double2Bytes(row.getDouble(curColMeta.colIndex))); + break; + case ColMeta.COL_TYPE_NEWDECIMAL: + int scale = curColMeta.decimals; + BigDecimal decimalVal = row.getDecimal(curColMeta.colIndex, scale); + unsafeRowWriter.write(curColMeta.colIndex, decimalVal.toString().getBytes()); + break; + default: + unsafeRowWriter.write(curColMeta.colIndex, + row.getBinary(curColMeta.colIndex)); + break; + } + }else { + unsafeRowWriter.setNullAt(curColMeta.colIndex); + } + } + + value.setTotalSize(bufferHolder.totalSize()); + return value; + } + + private void insertValue(@Nonnull UnsafeExternalRowSorter sorter){ + KVIterator it = aggregationMap.iterator(); + try { + while (it.next()){ + UnsafeRow row = getAllBinaryRow(it.getValue()); + sorter.insertRow(row); + } + } catch (IOException e) { + logger.error("group insertValue err: " + e.getMessage()); + free(); + } + } + + private void filterHaving(@Nonnull UnsafeExternalRowSorter sorter){ + + if (havingCols.getColMeta() == null || aggregationMap == null) { + return; + } + KVIterator it = aggregationMap.iterator(); + byte[] right = havingCols.getRight().getBytes(StandardCharsets.UTF_8); + int index = havingCols.getColMeta().getColIndex(); + try { + while (it.next()){ + UnsafeRow row = getAllBinaryRow(it.getValue()); + switch (havingCols.getOperator()) { + case "=": + if (eq(row.getBinary(index),right)) { + sorter.insertRow(row); + } + break; + case ">": + if (gt(row.getBinary(index),right)) { + sorter.insertRow(row); + } + break; + case "<": + if (lt(row.getBinary(index),right)) { + sorter.insertRow(row); + } + break; + case ">=": + if (gt(row.getBinary(index),right) || eq(row.getBinary(index),right)) { + sorter.insertRow(row); + } + break; + case "<=": + if (lt(row.getBinary(index),right) || eq(row.getBinary(index),right)) { + sorter.insertRow(row); + } + break; + case "!=": + if (neq(row.getBinary(index),right)) { + sorter.insertRow(row); + } + break; + } + } + } catch (IOException e) { + logger.error(e.getMessage()); + } + + } + + private boolean lt(byte[] l, byte[] r) { + return -1 >= ByteUtil.compareNumberByte(l, r); + } + + private boolean gt(byte[] l, byte[] r) { + return 1 <= ByteUtil.compareNumberByte(l, r); + } + + private boolean eq(byte[] l, byte[] r) { + return 0 == ByteUtil.compareNumberByte(l, r); + } + + private boolean neq(byte[] l, byte[] r) { + return 0 != ByteUtil.compareNumberByte(l, r); + } + + /** + * 构造groupKey + */ + private UnsafeRow getGroupKey(UnsafeRow row) throws UnsupportedEncodingException { + + UnsafeRow key = null; + if(this.sortColumnsByIndex == null){ + /** + * 针对没有group by关键字 + * select count(*) from table; + */ + key = new UnsafeRow(this.groupKeyfieldCount+1); + bufferHolder = new BufferHolder(key,0); + unsafeRowWriter = new UnsafeRowWriter(bufferHolder,this.groupKeyfieldCount+1); + bufferHolder.reset(); + unsafeRowWriter.write(0,"same".getBytes()); + key.setTotalSize(bufferHolder.totalSize()); + return key; + } + + + key = new UnsafeRow(this.groupKeyfieldCount); + bufferHolder = new BufferHolder(key,0); + unsafeRowWriter = new UnsafeRowWriter(bufferHolder,this.groupKeyfieldCount); + bufferHolder.reset(); + + + ColMeta curColMeta = null; + for (int i = 0; i < this.groupKeyfieldCount;i++) { + curColMeta = this.columToIndx.get(sortColumnsByIndex[i].toUpperCase()); + if(!row.isNullAt(curColMeta.colIndex)){ + switch(curColMeta.colType){ + case ColMeta.COL_TYPE_BIT: + key.setByte(i,row.getByte(curColMeta.colIndex)); + case ColMeta.COL_TYPE_INT: + case ColMeta.COL_TYPE_LONG: + case ColMeta.COL_TYPE_INT24: + key.setInt(i, + BytesTools.getInt(row.getBinary(curColMeta.colIndex))); + break; + case ColMeta.COL_TYPE_SHORT: + key.setShort(i, + BytesTools.getShort(row.getBinary(curColMeta.colIndex))); + break; + case ColMeta.COL_TYPE_FLOAT: + key.setFloat(i, + BytesTools.getFloat(row.getBinary(curColMeta.colIndex))); + break; + case ColMeta.COL_TYPE_DOUBLE: + key.setDouble(i, + BytesTools.getDouble(row.getBinary(curColMeta.colIndex))); + break; + case ColMeta.COL_TYPE_NEWDECIMAL: +// key.setDouble(i, +// BytesTools.getDouble(row.getBinary(curColMeta.colIndex))); + unsafeRowWriter.write(i, + new BigDecimal(new String(row.getBinary(curColMeta.colIndex)))); + break; + case ColMeta.COL_TYPE_LONGLONG: + key.setLong(i, + BytesTools.getLong(row.getBinary(curColMeta.colIndex))); + break; + default: + unsafeRowWriter.write(i, + row.getBinary(curColMeta.colIndex)); + break; + } + }else { + key.setNullAt(i); + } + } + + key.setTotalSize(bufferHolder.totalSize()); + + return key; + } + + + /** + * 构造value + */ + private UnsafeRow getValue(UnsafeRow row) throws UnsupportedEncodingException { + + UnsafeRow value = new UnsafeRow(this.valuefieldCount); + bufferHolder = new BufferHolder(value,0); + unsafeRowWriter = new UnsafeRowWriter(bufferHolder,this.valuefieldCount); + bufferHolder.reset(); + ColMeta curColMeta = null; + for (Map.Entry fieldEntry : columToIndx.entrySet()) { + curColMeta = fieldEntry.getValue(); + if(!row.isNullAt(curColMeta.colIndex)) { + switch (curColMeta.colType) { + case ColMeta.COL_TYPE_BIT: + value.setByte(curColMeta.colIndex, row.getByte(curColMeta.colIndex)); + break; + case ColMeta.COL_TYPE_INT: + case ColMeta.COL_TYPE_LONG: + case ColMeta.COL_TYPE_INT24: + value.setInt(curColMeta.colIndex, + BytesTools.getInt(row.getBinary(curColMeta.colIndex))); + + break; + case ColMeta.COL_TYPE_SHORT: + value.setShort(curColMeta.colIndex, + BytesTools.getShort(row.getBinary(curColMeta.colIndex))); + break; + case ColMeta.COL_TYPE_LONGLONG: + value.setLong(curColMeta.colIndex, + BytesTools.getLong(row.getBinary(curColMeta.colIndex))); + + + break; + case ColMeta.COL_TYPE_FLOAT: + value.setFloat(curColMeta.colIndex, + BytesTools.getFloat(row.getBinary(curColMeta.colIndex))); + + break; + case ColMeta.COL_TYPE_DOUBLE: + value.setDouble(curColMeta.colIndex, BytesTools.getDouble(row.getBinary(curColMeta.colIndex))); + + break; + case ColMeta.COL_TYPE_NEWDECIMAL: +// value.setDouble(curColMeta.colIndex, BytesTools.getDouble(row.getBinary(curColMeta.colIndex))); + unsafeRowWriter.write(curColMeta.colIndex, + new BigDecimal(new String(row.getBinary(curColMeta.colIndex)))); + break; + default: + unsafeRowWriter.write(curColMeta.colIndex, + row.getBinary(curColMeta.colIndex)); + break; + } + }else { + switch(curColMeta.colType) { + case ColMeta.COL_TYPE_NEWDECIMAL: + BigDecimal nullDecimal = null; + unsafeRowWriter.write(curColMeta.colIndex, nullDecimal); + break; + default: + value.setNullAt(curColMeta.colIndex); + break; + } + } + } + + + value.setTotalSize(bufferHolder.totalSize()); + return value; + } + + public void addRow(UnsafeRow rowDataPkg) throws UnsupportedEncodingException { + UnsafeRow key = getGroupKey(rowDataPkg); + UnsafeRow value = getValue(rowDataPkg); + + if(aggregationMap.find(key)){ + UnsafeRow rs = aggregationMap.getAggregationBuffer(key); + aggregateRow(key,rs,value); + }else { + aggregationMap.put(key,value); + } + + return; + } + + + private boolean isMergeAvg(){ + + if (mergCols == null) { + return false; + } + + for (MergeCol merg : mergCols) { + if(merg.mergeType == MergeCol.MERGE_AVG) { + return true; + } + } + return false; + } + + private void aggregateRow(UnsafeRow key,UnsafeRow toRow, UnsafeRow newRow) throws UnsupportedEncodingException { + if (mergCols == null) { + return; + } + + for (MergeCol merg : mergCols) { + if(merg.mergeType != MergeCol.MERGE_AVG && merg.colMeta !=null) { + byte[] result = null; + byte[] left = null; + byte[] right = null; + int type = merg.colMeta.colType; + int index = merg.colMeta.colIndex; + switch(type){ + case ColMeta.COL_TYPE_INT: + case ColMeta.COL_TYPE_LONG: + case ColMeta.COL_TYPE_INT24: + left = BytesTools.int2Bytes(toRow.getInt(index)); + right = BytesTools.int2Bytes(newRow.getInt(index)); + break; + case ColMeta.COL_TYPE_SHORT: + left = BytesTools.short2Bytes(toRow.getShort(index)); + right =BytesTools.short2Bytes(newRow.getShort(index)); + break; + case ColMeta.COL_TYPE_LONGLONG: + left = BytesTools.long2Bytes(toRow.getLong(index)); + right = BytesTools.long2Bytes(newRow.getLong(index)); + break; + case ColMeta.COL_TYPE_FLOAT: + left = BytesTools.float2Bytes(toRow.getFloat(index)); + right = BytesTools.float2Bytes(newRow.getFloat(index)); + break; + case ColMeta.COL_TYPE_DOUBLE: + left = BytesTools.double2Bytes(toRow.getDouble(index)); + right = BytesTools.double2Bytes(newRow.getDouble(index)); + break; + case ColMeta.COL_TYPE_NEWDECIMAL: +// left = BytesTools.double2Bytes(toRow.getDouble(index)); +// right = BytesTools.double2Bytes(newRow.getDouble(index)); + int scale = merg.colMeta.decimals; + BigDecimal decimalLeft = toRow.getDecimal(index, scale); + BigDecimal decimalRight = newRow.getDecimal(index, scale); + left = decimalLeft == null ? null : decimalLeft.toString().getBytes(); + right = decimalRight == null ? null : decimalRight.toString().getBytes(); + break; + case ColMeta.COL_TYPE_DATE: + case ColMeta.COL_TYPE_TIMSTAMP: + case ColMeta.COL_TYPE_TIME: + case ColMeta.COL_TYPE_YEAR: + case ColMeta.COL_TYPE_DATETIME: + case ColMeta.COL_TYPE_NEWDATE: + case ColMeta.COL_TYPE_BIT: + case ColMeta.COL_TYPE_VAR_STRING: + case ColMeta.COL_TYPE_STRING: + case ColMeta.COL_TYPE_ENUM: + case ColMeta.COL_TYPE_SET: + left = toRow.getBinary(index); + right = newRow.getBinary(index); + break; + default: + break; + } + + result = mertFields(left,right,type,merg.mergeType); + + if (result != null) { + switch(type){ + case ColMeta.COL_TYPE_BIT: + toRow.setByte(index,result[0]); + case ColMeta.COL_TYPE_INT: + case ColMeta.COL_TYPE_LONG: + case ColMeta.COL_TYPE_INT24: + toRow.setInt(index,BytesTools.getInt(result)); + break; + case ColMeta.COL_TYPE_SHORT: + toRow.setShort(index,BytesTools.getShort(result)); + break; + case ColMeta.COL_TYPE_LONGLONG: + toRow.setLong(index,BytesTools.getLong(result)); + break; + case ColMeta.COL_TYPE_FLOAT: + toRow.setFloat(index,BytesTools.getFloat(result)); + break; + case ColMeta.COL_TYPE_DOUBLE: + toRow.setDouble(index,BytesTools.getDouble(result)); + break; + case ColMeta.COL_TYPE_NEWDECIMAL: +// toRow.setDouble(index,BytesTools.getDouble(result)); + toRow.updateDecimal(index, new BigDecimal(new String(result))); + break; + /** + *TODO UnsafeFixedWidthAggregationMap 中存放 + * UnsafeRow时,非数值类型的列不可更改其值, + * 为了统一处理聚合函数这块 + * 做max或者min聚合时候,目前解决方法 + * 先free原来 UnsafeFixedWidthAggregationMap对象。 + * 然后重新创建一个UnsafeFixedWidthAggregationMap对象 + * 然后存放最新的max或者min值作为下次比较。 + **/ + case ColMeta.COL_TYPE_DATE: + case ColMeta.COL_TYPE_TIMSTAMP: + case ColMeta.COL_TYPE_TIME: + case ColMeta.COL_TYPE_YEAR: + case ColMeta.COL_TYPE_DATETIME: + case ColMeta.COL_TYPE_NEWDATE: + case ColMeta.COL_TYPE_VAR_STRING: + case ColMeta.COL_TYPE_STRING: + case ColMeta.COL_TYPE_ENUM: + case ColMeta.COL_TYPE_SET: + aggregationMap.free(); + DataNodeMemoryManager dataNodeMemoryManager = + new DataNodeMemoryManager(memoryManager,Thread.currentThread().getId()); + aggregationMap = new UnsafeFixedWidthAggregationMap( + emptyAggregationBuffer, + aggBufferSchema, + groupKeySchema, + dataNodeMemoryManager, + 1024, + conf.getSizeAsBytes("mycat.buffer.pageSize", "32k"), + false); + UnsafeRow unsafeRow = new UnsafeRow(toRow.numFields()); + bufferHolder = new BufferHolder(unsafeRow, 0); + unsafeRowWriter = new UnsafeRowWriter(bufferHolder, toRow.numFields()); + bufferHolder.reset(); + for (int i = 0; i < toRow.numFields(); i++) { + + if (!toRow.isNullAt(i) && i != index) { + unsafeRowWriter.write(i, toRow.getBinary(i)); + } else if (!toRow.isNullAt(i) && i == index) { + unsafeRowWriter.write(i,result); + } else if (toRow.isNullAt(i)){ + unsafeRow.setNullAt(i); + } + } + unsafeRow.setTotalSize(bufferHolder.totalSize()); + aggregationMap.put(key, unsafeRow); + break; + default: + break; + } + } + } + } + } + + private void mergAvg(UnsafeRow toRow) throws UnsupportedEncodingException { + + if (mergCols == null) { + return; + } + + for (MergeCol merg : mergCols) { + if(merg.mergeType==MergeCol.MERGE_AVG) { + byte[] result = null; + byte[] avgSum = null; + byte[] avgCount = null; + + int type = merg.colMeta.colType; + int avgSumIndex = merg.colMeta.avgSumIndex; + int avgCountIndex = merg.colMeta.avgCountIndex; + + switch(type){ + case ColMeta.COL_TYPE_BIT: + avgSum = BytesTools.toBytes(toRow.getByte(avgSumIndex)); + avgCount = BytesTools.toBytes(toRow.getLong(avgCountIndex)); + break; + case ColMeta.COL_TYPE_INT: + case ColMeta.COL_TYPE_LONG: + case ColMeta.COL_TYPE_INT24: + avgSum = BytesTools.int2Bytes(toRow.getInt(avgSumIndex)); + avgCount = BytesTools.long2Bytes(toRow.getLong(avgCountIndex)); + break; + case ColMeta.COL_TYPE_SHORT: + avgSum =BytesTools.short2Bytes(toRow.getShort(avgSumIndex)); + avgCount = BytesTools.long2Bytes(toRow.getLong(avgCountIndex)); + break; + + case ColMeta.COL_TYPE_LONGLONG: + avgSum = BytesTools.long2Bytes(toRow.getLong(avgSumIndex)); + avgCount = BytesTools.long2Bytes(toRow.getLong(avgCountIndex)); + + break; + case ColMeta.COL_TYPE_FLOAT: + avgSum = BytesTools.float2Bytes(toRow.getFloat(avgSumIndex)); + avgCount = BytesTools.long2Bytes(toRow.getLong(avgCountIndex)); + + break; + case ColMeta.COL_TYPE_DOUBLE: + avgSum = BytesTools.double2Bytes(toRow.getDouble(avgSumIndex)); + avgCount = BytesTools.long2Bytes(toRow.getLong(avgCountIndex)); + break; + case ColMeta.COL_TYPE_NEWDECIMAL: +// avgSum = BytesTools.double2Bytes(toRow.getDouble(avgSumIndex)); +// avgCount = BytesTools.long2Bytes(toRow.getLong(avgCountIndex)); + int scale = merg.colMeta.decimals; + BigDecimal sumDecimal = toRow.getDecimal(avgSumIndex, scale); + avgSum = sumDecimal == null ? null : sumDecimal.toString().getBytes(); + avgCount = BytesTools.long2Bytes(toRow.getLong(avgCountIndex)); + break; + default: + break; + } + + result = mertFields(avgSum,avgCount,merg.colMeta.colType,merg.mergeType); + + if (result != null) { + switch(type){ + case ColMeta.COL_TYPE_BIT: + toRow.setByte(avgSumIndex,result[0]); + break; + case ColMeta.COL_TYPE_INT: + case ColMeta.COL_TYPE_LONG: + case ColMeta.COL_TYPE_INT24: + toRow.setInt(avgSumIndex,BytesTools.getInt(result)); + break; + case ColMeta.COL_TYPE_SHORT: + toRow.setShort(avgSumIndex,BytesTools.getShort(result)); + break; + case ColMeta.COL_TYPE_LONGLONG: + toRow.setLong(avgSumIndex,BytesTools.getLong(result)); + break; + case ColMeta.COL_TYPE_FLOAT: + toRow.setFloat(avgSumIndex,BytesTools.getFloat(result)); + break; + case ColMeta.COL_TYPE_DOUBLE: + toRow.setDouble(avgSumIndex,ByteUtil.getDouble(result)); + break; + case ColMeta.COL_TYPE_NEWDECIMAL: +// toRow.setDouble(avgSumIndex,ByteUtil.getDouble(result)); + toRow.updateDecimal(avgSumIndex, new BigDecimal(new String(result))); + break; + default: + break; + } + } + } + } + } + + private byte[] mertFields(byte[] bs, byte[] bs2, int colType, int mergeType) throws UnsupportedEncodingException { + + if(bs2==null || bs2.length==0) { + return bs; + }else if(bs==null || bs.length==0) { + return bs2; + } + + switch (mergeType) { + case MergeCol.MERGE_SUM: + if (colType == ColMeta.COL_TYPE_DOUBLE + || colType == ColMeta.COL_TYPE_FLOAT){ + double value = BytesTools.getDouble(bs) + + BytesTools.getDouble(bs2); + + return BytesTools.double2Bytes(value); + } else if(colType == ColMeta.COL_TYPE_NEWDECIMAL + || colType == ColMeta.COL_TYPE_DECIMAL) { + BigDecimal decimal = new BigDecimal(new String(bs)); + decimal = decimal.add(new BigDecimal(new String(bs2))); + return decimal.toString().getBytes(); + } + + + case MergeCol.MERGE_COUNT: { + long s1 = BytesTools.getLong(bs); + long s2 = BytesTools.getLong(bs2); + long total = s1 + s2; + return BytesTools.long2Bytes(total); + } + + case MergeCol.MERGE_MAX: { + int compare = ByteUtil.compareNumberByte(bs,bs2); + return (compare > 0) ? bs : bs2; + } + + case MergeCol.MERGE_MIN: { + int compare = ByteUtil.compareNumberByte(bs,bs2); + return (compare > 0) ? bs2 : bs; + + } + case MergeCol.MERGE_AVG: { + /** + * 元素总个数 + */ + long count = BytesTools.getLong(bs2); + if (colType == ColMeta.COL_TYPE_DOUBLE + || colType == ColMeta.COL_TYPE_FLOAT) { + /** + * 数值总和 + */ + double sum = BytesTools.getDouble(bs); + double value = sum / count; + return BytesTools.double2Bytes(value); + } else if(colType == ColMeta.COL_TYPE_NEWDECIMAL + || colType == ColMeta.COL_TYPE_DECIMAL){ + BigDecimal sum = new BigDecimal(new String(bs)); + // AVG计算时候小数点精度扩展4, 并且四舍五入 + BigDecimal avg = sum.divide(new BigDecimal(count), sum.scale() + 4, RoundingMode.HALF_UP); + return avg.toString().getBytes(); + } + } + default: + return null; + } + } + + public void free(){ + if(aggregationMap != null) + aggregationMap.free(); + } +} diff --git a/src/main/java/io/mycat/sqlengine/mpp/NodeRowDataPacket.java b/src/main/java/io/mycat/sqlengine/mpp/model/NodeRowDataPacket.java similarity index 97% rename from src/main/java/io/mycat/sqlengine/mpp/NodeRowDataPacket.java rename to src/main/java/io/mycat/sqlengine/mpp/model/NodeRowDataPacket.java index d9463955d..944227fe4 100644 --- a/src/main/java/io/mycat/sqlengine/mpp/NodeRowDataPacket.java +++ b/src/main/java/io/mycat/sqlengine/mpp/model/NodeRowDataPacket.java @@ -1,11 +1,11 @@ -package io.mycat.sqlengine.mpp; - -import io.mycat.route.RouteResultsetNode; -import io.mycat.server.packet.RowDataPacket; +package io.mycat.sqlengine.mpp.model; import java.util.ArrayList; import java.util.List; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.route.RouteResultsetNode; + public class NodeRowDataPacket { private RouteResultsetNode node; diff --git a/src/main/java/io/mycat/sqlengine/mpp/RangRowDataPacket.java b/src/main/java/io/mycat/sqlengine/mpp/model/RangRowDataPacket.java similarity index 96% rename from src/main/java/io/mycat/sqlengine/mpp/RangRowDataPacket.java rename to src/main/java/io/mycat/sqlengine/mpp/model/RangRowDataPacket.java index da3dddd45..d37eba6b8 100644 --- a/src/main/java/io/mycat/sqlengine/mpp/RangRowDataPacket.java +++ b/src/main/java/io/mycat/sqlengine/mpp/model/RangRowDataPacket.java @@ -1,10 +1,10 @@ -package io.mycat.sqlengine.mpp; - -import io.mycat.server.packet.RowDataPacket; +package io.mycat.sqlengine.mpp.model; import java.util.ArrayList; import java.util.List; +import io.mycat.net.mysql.RowDataPacket; + public class RangRowDataPacket { public static final int DATA_TYPE_ALL = 100; public static final int DATA_TYPE_TRIM = 200; diff --git a/src/main/java/io/mycat/sqlengine/tmp/HeapItf.java b/src/main/java/io/mycat/sqlengine/tmp/HeapItf.java deleted file mode 100644 index eb865896b..000000000 --- a/src/main/java/io/mycat/sqlengine/tmp/HeapItf.java +++ /dev/null @@ -1,58 +0,0 @@ -package io.mycat.sqlengine.tmp; - - -import io.mycat.server.packet.RowDataPacket; - -import java.util.List; - -/** - * @author coderczp-2014-12-17 - */ -public interface HeapItf { - - /** - * 构建堆 - */ - void buildHeap(); - - /** - * 获取堆根节点 - * - * @return - */ - RowDataPacket getRoot(); - - /** - * 向堆添加元素 - * - * @param row - */ - void add(RowDataPacket row); - - /** - * 获取堆数据 - * - * @return - */ - List getData(); - - /** - * 设置根节点元素 - * - * @param root - */ - void setRoot(RowDataPacket root); - - /** - * 向已满的堆添加元素 - * - * @param row - */ - boolean addIfRequired(RowDataPacket row); - - /** - * 堆排序 - */ - void heapSort(int size); - -} diff --git a/src/main/java/io/mycat/sqlengine/tmp/MaxHeap.java b/src/main/java/io/mycat/sqlengine/tmp/MaxHeap.java deleted file mode 100644 index 3aaedf52c..000000000 --- a/src/main/java/io/mycat/sqlengine/tmp/MaxHeap.java +++ /dev/null @@ -1,134 +0,0 @@ -package io.mycat.sqlengine.tmp; - -import io.mycat.server.packet.RowDataPacket; - -import java.util.ArrayList; -import java.util.List; -/** - * 最大堆排序,适用于顺序排序 - * - * @author coderczp-2014-12-8 - */ -public class MaxHeap implements HeapItf { - - private RowDataCmp cmp; - private List data; - - public MaxHeap(RowDataCmp cmp, int size) { - this.cmp = cmp; - this.data = new ArrayList<>(); - } - - @Override - public void buildHeap() { - int len = data.size(); - for (int i = len / 2 - 1; i >= 0; i--) { - heapifyRecursive(i, len); - } - } - - private void heapify(int i, int size) { - int max = 0; - int mid = size >> 1;// ==size/2 - while (i <= mid) { - max = i; - int left = i << 1; - int right = left + 1; - if (left < size && cmp.compare(data.get(left), data.get(i)) > 0) { - max = left; - } - if (right < size && cmp.compare(data.get(right), data.get(max)) > 0) { - max = right; - } - if (i == max) - break; - if (i != max) { - RowDataPacket tmp = data.get(i); - data.set(i, data.get(max)); - data.set(max, tmp); - i = max; - } - } - - } - - // 递归版本 - protected void heapifyRecursive(int i, int size) { - int l = left(i); - int r = right(i); - int max = i; - if (l < size && cmp.compare(data.get(l), data.get(i)) > 0) - max = l; - if (r < size && cmp.compare(data.get(r), data.get(max)) > 0) - max = r; - if (i == max) - return; - swap(i, max); - heapifyRecursive(max, size); - } - - - private int right(int i) { - return (i + 1) << 1; - } - - private int left(int i) { - return ((i + 1) << 1) - 1; - } - - private void swap(int i, int j) { - RowDataPacket tmp = data.get(i); - RowDataPacket elementAt = data.get(j); - data.set(i, elementAt); - data.set(j, tmp); - } - - @Override - public RowDataPacket getRoot() { - return data.get(0); - } - - @Override - public void setRoot(RowDataPacket root) { - data.set(0, root); - heapifyRecursive(0, data.size()); - } - - @Override - public List getData() { - return data; - } - - @Override - public void add(RowDataPacket row) { - data.add(row); - } - - @Override - public boolean addIfRequired(RowDataPacket row) { - // 淘汰堆里最小的数据 - RowDataPacket root = getRoot(); - if (cmp.compare(row, root) < 0) { - setRoot(row); - return true; - } - return false; - } - - @Override - public void heapSort(int size) { - final int total = data.size(); - // 容错处理 - if (size <= 0 || size > total) { - size = total; - } - final int min = size == total ? 0 : (total - size - 1); - - // 末尾与头交换,交换后调整最大堆 - for (int i = total - 1; i > min; i--) { - swap(0, i); - heapifyRecursive(0, i); - } - } - -} diff --git a/src/main/java/io/mycat/sqlengine/tmp/MinHeap.java b/src/main/java/io/mycat/sqlengine/tmp/MinHeap.java deleted file mode 100644 index f8feab49f..000000000 --- a/src/main/java/io/mycat/sqlengine/tmp/MinHeap.java +++ /dev/null @@ -1,104 +0,0 @@ -package io.mycat.sqlengine.tmp; - -import io.mycat.server.packet.RowDataPacket; - -import java.util.ArrayList; -import java.util.List; - -/** - * 最小堆排序,适用于倒序排序 - * - * @author coderczp-2014-12-8 - */ -public class MinHeap implements HeapItf { - - private RowDataCmp cmp; - private List data; - - public MinHeap(RowDataCmp cmp, int size) { - this.cmp = cmp; - this.data = new ArrayList<>(); - } - - @Override - public void buildHeap() { - int len = data.size(); - for (int i = len / 2 - 1; i >= 0; i--) { - heapify(i, len); - } - } - - private void heapify(int i, int size) { - int l = left(i); - int r = right(i); - int smallest = i; - if (l < size && cmp.compare(data.get(l), data.get(i)) < 0) - smallest = l; - if (r < size && cmp.compare(data.get(r), data.get(smallest)) < 0) - smallest = r; - if (i == smallest) - return; - swap(i, smallest); - heapify(smallest, size); - } - - private int right(int i) { - return (i + 1) << 1; - } - - private int left(int i) { - return ((i + 1) << 1) - 1; - } - - private void swap(int i, int j) { - RowDataPacket tmp = data.get(i); - RowDataPacket elementAt = data.get(j); - data.set(i, elementAt); - data.set(j, tmp); - } - - public RowDataPacket getRoot() { - return data.get(0); - } - - public void setRoot(RowDataPacket root) { - data.set(0, root); - heapify(0, data.size()); - } - - public List getData() { - return data; - } - - public void add(RowDataPacket row) { - data.add(row); - } - - @Override - public boolean addIfRequired(RowDataPacket row) { - // 淘汰堆里最小的数据 - RowDataPacket root = getRoot(); - if (cmp.compare(row, root) > 0) { - setRoot(row); - return true; - } - return false; - } - - @Override - public void heapSort(int size) { - final int total = data.size(); - //容错处理 - if (size <= 0 || size > total) { - size = total; - } - final int min = size == total ? 0 : (total - size - 1); - - //末尾与头交换,交换后调整最大堆 - for (int i = total - 1; i > min; i--) { - swap(0, i); - heapify(0, i); - } - } - -} diff --git a/src/main/java/io/mycat/sqlengine/tmp/RowDataCmp.java b/src/main/java/io/mycat/sqlengine/tmp/RowDataCmp.java deleted file mode 100644 index 708a25a03..000000000 --- a/src/main/java/io/mycat/sqlengine/tmp/RowDataCmp.java +++ /dev/null @@ -1,43 +0,0 @@ -package io.mycat.sqlengine.tmp; - -import io.mycat.server.packet.RowDataPacket; -import io.mycat.sqlengine.mpp.OrderCol; -import io.mycat.sqlengine.mpp.RowDataPacketSorter; - -import java.util.Comparator; - -/** - * - * @author coderczp-2014-12-8 - */ -public class RowDataCmp implements Comparator { - - private OrderCol[] orderCols; - - public RowDataCmp(OrderCol[] orderCols) { - this.orderCols = orderCols; - } - - @Override - public int compare(RowDataPacket o1, RowDataPacket o2) { - OrderCol[] tmp = this.orderCols; - int cmp = 0; - int len = tmp.length; - //依次比较order by语句上的多个排序字段的值 - int type = OrderCol.COL_ORDER_TYPE_ASC; - for (int i = 0; i < len; i++) { - int colIndex = tmp[i].colMeta.colIndex; - byte[] left = o1.fieldValues.get(colIndex); - byte[] right = o2.fieldValues.get(colIndex); - if (tmp[i].orderType == type) { - cmp = RowDataPacketSorter.compareObject(left, right, tmp[i]); - } else { - cmp = RowDataPacketSorter.compareObject(right, left, tmp[i]); - } - if (cmp != 0) - return cmp; - } - return cmp; - } - -} diff --git a/src/main/java/io/mycat/sqlengine/tmp/RowDataSorter.java b/src/main/java/io/mycat/sqlengine/tmp/RowDataSorter.java deleted file mode 100644 index 26d7cd007..000000000 --- a/src/main/java/io/mycat/sqlengine/tmp/RowDataSorter.java +++ /dev/null @@ -1,80 +0,0 @@ -package io.mycat.sqlengine.tmp; - -import io.mycat.server.packet.RowDataPacket; -import io.mycat.sqlengine.mpp.OrderCol; -import io.mycat.sqlengine.mpp.RowDataPacketSorter; - -import java.util.List; - -/** - * - * @author coderczp-2014-12-8 - */ -public class RowDataSorter extends RowDataPacketSorter { - - // 记录总数(=offset+limit) - private volatile int total; - // 查询的记录数(=limit) - private volatile int size; - // 堆 - private volatile HeapItf heap; - // 多列比较器 - private volatile RowDataCmp cmp; - // 是否执行过buildHeap - private volatile boolean hasBuild; - - public RowDataSorter(OrderCol[] orderCols) { - super(orderCols); - this.cmp = new RowDataCmp(orderCols); - } - - public synchronized void setLimit(int start, int size) { - // 容错处理 - if (start < 0) { - start = 0; - } - if (size <= 0) { - this.total = this.size = Integer.MAX_VALUE; - } else { - this.total = start + size; - this.size = size; - } - // 统一采用顺序,order by 条件交给比较器去处理 - this.heap = new MaxHeap(cmp, total); - } - - @Override - public synchronized boolean addRow(RowDataPacket row) { - if (heap.getData().size() < total) { - heap.add(row); - return true; - } - // 堆已满,构建最大堆,并执行淘汰元素逻辑 - if (heap.getData().size() == total && hasBuild == false) { - heap.buildHeap(); - hasBuild = true; - } - return heap.addIfRequired(row); - } - - @Override - public List getSortedResult() { - final List data = heap.getData(); - int size = data.size(); - if (size < 2) { - return data; - } else { - // 构建最大堆并排序 - if (!hasBuild) { - heap.buildHeap(); - } - heap.heapSort(this.size); - return heap.getData(); - } - } - - public RowDataCmp getCmp() { - return cmp; - } - -} diff --git a/src/main/java/io/mycat/statistic/CommandCount.java b/src/main/java/io/mycat/statistic/CommandCount.java new file mode 100644 index 000000000..2a307635b --- /dev/null +++ b/src/main/java/io/mycat/statistic/CommandCount.java @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.statistic; + +/** + * @author mycat + */ +public class CommandCount { + + private long initDB; + private long query; + private long stmtPrepare; + private long stmtSendLongData; + private long stmtReset; + private long stmtExecute; + private long stmtClose; + private long ping; + private long kill; + private long quit; + private long heartbeat; + private long other; + public CommandCount(){ + + } + public void doInitDB() { + ++initDB; + } + + public long initDBCount() { + return initDB; + } + + public void doQuery() { + ++query; + } + + public long queryCount() { + return query; + } + + public void doStmtPrepare() { + ++stmtPrepare; + } + + public long stmtPrepareCount() { + return stmtPrepare; + } + + public void doStmtSendLongData() { + ++stmtSendLongData; + } + + public long stmtSendLongDataCount() { + return stmtSendLongData; + } + + public void doStmtReset() { + ++stmtReset; + } + + public long stmtResetCount() { + return stmtReset; + } + + public void doStmtExecute() { + ++stmtExecute; + } + + public long stmtExecuteCount() { + return stmtExecute; + } + + public void doStmtClose() { + ++stmtClose; + } + + public long stmtCloseCount() { + return stmtClose; + } + + public void doPing() { + ++ping; + } + + public long pingCount() { + return ping; + } + + public void doKill() { + ++kill; + } + + public long killCount() { + return kill; + } + + public void doQuit() { + ++quit; + } + + public long quitCount() { + return quit; + } + + public void doOther() { + ++other; + } + + public long heartbeat() { + return heartbeat; + } + + public void doHeartbeat() { + ++heartbeat; + } + + public long otherCount() { + return other; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/statistic/DataSourceSyncRecorder.java b/src/main/java/io/mycat/statistic/DataSourceSyncRecorder.java new file mode 100644 index 000000000..0a123f98d --- /dev/null +++ b/src/main/java/io/mycat/statistic/DataSourceSyncRecorder.java @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.statistic; + +import java.text.SimpleDateFormat; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import io.mycat.config.model.DataHostConfig; +import io.mycat.util.TimeUtil; + +/** + * 记录最近3个时段的平均响应时间,默认1,10,30分钟。 + * + * @author songwie + */ +public class DataSourceSyncRecorder { + + private Map records; + private final List asynRecords;//value,time + private static final Logger LOGGER = LoggerFactory.getLogger("DataSourceSyncRecorder"); + + + private static final long SWAP_TIME = 24 * 60 * 60 * 1000L; + + //日期处理 + private static final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); + private int switchType = 2; + + public DataSourceSyncRecorder() { + this.records = new HashMap() ; + this.asynRecords = new LinkedList(); + } + + public String get() { + return records.toString(); + } + + public void set(Map resultResult,int switchType) { + try{ + long time = TimeUtil.currentTimeMillis(); + this.switchType = switchType; + + remove(time); + + if (resultResult!=null && !resultResult.isEmpty()) { + this.records = resultResult; + if(switchType==DataHostConfig.SYN_STATUS_SWITCH_DS){ //slave + String sencords = resultResult.get("Seconds_Behind_Master"); + long Seconds_Behind_Master = -1; + if(sencords!=null){ + Seconds_Behind_Master = Long.parseLong(sencords); + } + this.asynRecords.add(new Record(TimeUtil.currentTimeMillis(),Seconds_Behind_Master)); + } + if(switchType==DataHostConfig.CLUSTER_STATUS_SWITCH_DS){//cluster + double wsrep_local_recv_queue_avg = Double.valueOf(resultResult.get("wsrep_local_recv_queue_avg")); + this.asynRecords.add(new Record(TimeUtil.currentTimeMillis(),wsrep_local_recv_queue_avg)); + } + + return; + } + }catch(Exception e){ + LOGGER.error("record DataSourceSyncRecorder error " + e.getMessage()); + } + + } + + /** + * 删除超过统计时间段的数据 + */ + private void remove(long time) { + final List recordsAll = this.asynRecords; + while (recordsAll.size() > 0) { + Record record = recordsAll.get(0); + if (time >= record.time + SWAP_TIME) { + recordsAll.remove(0); + } else { + break; + } + } + } + + public int getSwitchType() { + return this.switchType; + } + public void setSwitchType(int switchType) { + this.switchType = switchType; + } + public Map getRecords() { + return this.records; + } + public List getAsynRecords() { + return this.asynRecords; + } + public static SimpleDateFormat getSdf() { + return sdf; + } + + /** + * @author mycat + */ + public static class Record { + private Object value; + private long time; + + Record(long time, Object value) { + this.time = time; + this.value = value; + } + public Object getValue() { + return this.value; + } + public void setValue(Object value) { + this.value = value; + } + public long getTime() { + return this.time; + } + public void setTime(long time) { + this.time = time; + } + + + } +} diff --git a/src/main/java/io/mycat/backend/HeartbeatRecorder.java b/src/main/java/io/mycat/statistic/HeartbeatRecorder.java similarity index 58% rename from src/main/java/io/mycat/backend/HeartbeatRecorder.java rename to src/main/java/io/mycat/statistic/HeartbeatRecorder.java index ba77d840a..e9a6ecf6d 100644 --- a/src/main/java/io/mycat/backend/HeartbeatRecorder.java +++ b/src/main/java/io/mycat/statistic/HeartbeatRecorder.java @@ -21,12 +21,16 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.backend; +package io.mycat.statistic; -import io.mycat.util.TimeUtil; - -import java.util.LinkedList; import java.util.List; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.util.TimeUtil; /** * 记录最近3个时段的平均响应时间,默认1,10,30分钟。 @@ -39,14 +43,19 @@ public class HeartbeatRecorder { private static final long AVG1_TIME = 60 * 1000L; private static final long AVG2_TIME = 10 * 60 * 1000L; private static final long AVG3_TIME = 30 * 60 * 1000L; + private static final long SWAP_TIME = 24 * 60 * 60 * 1000L; private long avg1; private long avg2; private long avg3; - private final List records; + private final Queue records; + private final Queue recordsAll; + + private static final Logger LOGGER = LoggerFactory.getLogger("DataSourceSyncRecorder"); public HeartbeatRecorder() { - this.records = new LinkedList(); + this.records = new ConcurrentLinkedQueue(); + this.recordsAll = new ConcurrentLinkedQueue(); } public String get() { @@ -54,33 +63,49 @@ public String get() { } public void set(long value) { - if (value < 0) { - return; - } - long time = TimeUtil.currentTimeMillis(); - remove(time); - int size = records.size(); - if (size == 0) { - records.add(new Record(value, time)); - avg1 = avg2 = avg3 = value; - return; - } - if (size >= MAX_RECORD_SIZE) { - records.remove(0); - } - records.add(new Record(value, time)); - calculate(time); + try{ + long time = TimeUtil.currentTimeMillis(); + if (value < 0) { + recordsAll.offer(new Record(0, time)); + return; + } + remove(time); + int size = records.size(); + if (size == 0) { + records.offer(new Record(value, time)); + avg1 = avg2 = avg3 = value; + return; + } + if (size >= MAX_RECORD_SIZE) { + records.poll(); + } + records.offer(new Record(value, time)); + recordsAll.offer(new Record(value, time)); + calculate(time); + }catch(Exception e){ + LOGGER.error("record HeartbeatRecorder error " ,e); + } } /** * 删除超过统计时间段的数据 */ private void remove(long time) { - final List records = this.records; + final Queue records = this.records; while (records.size() > 0) { - Record record = records.get(0); + Record record = records.peek(); if (time >= record.time + AVG3_TIME) { - records.remove(0); + records.poll(); + } else { + break; + } + } + + final Queue recordsAll = this.recordsAll; + while (recordsAll.size() > 0) { + Record record = recordsAll.peek(); + if (time >= record.time + SWAP_TIME) { + recordsAll.poll(); } else { break; } @@ -113,17 +138,34 @@ private void calculate(long time) { avg3 = (v3 / c3); } - /** + public Queue getRecordsAll() { + return this.recordsAll; + } + + /** * @author mycat */ - private static class Record { - private long value; - private long time; + public static class Record { + private long value; + private long time; Record(long value, long time) { this.value = value; this.time = time; } + public long getValue() { + return this.value; + } + public void setValue(long value) { + this.value = value; + } + public long getTime() { + return this.time; + } + public void setTime(long time) { + this.time = time; + } + + } - -} \ No newline at end of file +} diff --git a/src/main/java/io/mycat/statistic/SQLRecord.java b/src/main/java/io/mycat/statistic/SQLRecord.java new file mode 100644 index 000000000..2d78ed2b5 --- /dev/null +++ b/src/main/java/io/mycat/statistic/SQLRecord.java @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.statistic; + +/** + * @author mycat + */ +public final class SQLRecord implements Comparable { + + public String host; + public String schema; + public String statement; + public long startTime; + public long executeTime; + public String dataNode; + public int dataNodeIndex; + + @Override + public int compareTo(SQLRecord o) { + //执行时间从大到小 + long para = o.executeTime - executeTime; + //开始时间从大到小 + return (int) (para == 0 ? (o.startTime - startTime) : para ); + } + + @Override + public boolean equals(Object arg0) { + return super.equals(arg0); + } + + @Override + public int hashCode() { + // TODO Auto-generated method stub + return super.hashCode(); + } + + + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/server/config/node/DataNodeConfig.java b/src/main/java/io/mycat/statistic/SQLRecorder.java similarity index 50% rename from src/main/java/io/mycat/server/config/node/DataNodeConfig.java rename to src/main/java/io/mycat/statistic/SQLRecorder.java index 3f8d0be99..14f6ffdd5 100644 --- a/src/main/java/io/mycat/server/config/node/DataNodeConfig.java +++ b/src/main/java/io/mycat/statistic/SQLRecorder.java @@ -21,60 +21,55 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.config.node; +package io.mycat.statistic; + +import java.util.*; +import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.ConcurrentSkipListSet; +import java.util.concurrent.locks.ReentrantLock; /** - * 用于描述一个数据节点的配置 + * SQL统计排序记录器 * * @author mycat */ -public final class DataNodeConfig { - - private String name; - private String database; - private String dataHost; - - public DataNodeConfig() { - super(); - } +public final class SQLRecorder { - public DataNodeConfig(String name, String database, String dataHost) { - super(); - this.name = name; - this.database = database; - this.dataHost = dataHost; - } - - public String getName() { - return name; - } + private final int count; + SortedSet records; - public String getDatabase() { - return database; + public SQLRecorder(int count) { + this.count = count; + this.records = new ConcurrentSkipListSet<>(); } - public String getDataHost() { - return dataHost; + public List getRecords() { + List keyList = new ArrayList(records); + return keyList; } - public void setName(String name) { - this.name = name; - } - public void setDatabase(String database) { - this.database = database; + public void add(SQLRecord record) { + records.add(record); } - public void setDataHost(String dataHost) { - this.dataHost = dataHost; + public void clear() { + records.clear(); } - @Override - public String toString() { - return "DataNodeConfig{" + - "name='" + name + '\'' + - ", database='" + database + '\'' + - ", dataHost='" + dataHost + '\'' + - '}'; + public void recycle(){ + if(records.size() > count){ + SortedSet records2 = new ConcurrentSkipListSet<>(); + List keyList = new ArrayList(records); + int i = 0; + for(SQLRecord key : keyList){ + if(i == count) { + break; + } + records2.add(key); + i++; + } + records = records2; + } } } \ No newline at end of file diff --git a/src/main/java/io/mycat/statistic/stat/Histogram.java b/src/main/java/io/mycat/statistic/stat/Histogram.java new file mode 100644 index 000000000..f50d2c8ed --- /dev/null +++ b/src/main/java/io/mycat/statistic/stat/Histogram.java @@ -0,0 +1,82 @@ +package io.mycat.statistic.stat; + +import java.util.concurrent.atomic.AtomicLongArray; + +public class Histogram { + + private final long[] ranges; + private final AtomicLongArray rangeCounters; + + public Histogram(long... ranges){ + this.ranges = ranges; + this.rangeCounters = new AtomicLongArray(ranges.length); + } + + public void reset() { + for (int i = 0; i < rangeCounters.length(); i++) { + rangeCounters.set(i, 0); + } + } + + public void record(long range) { + int index = rangeCounters.length(); + for (int i = 0; i < ranges.length; i++) { + if (range == ranges[i]) { + index = i; + break; + } + } + + rangeCounters.incrementAndGet(index); + } + + public long get(int index) { + return rangeCounters.get(index); + } + + public long[] toArray() { + long[] array = new long[rangeCounters.length()]; + for (int i = 0; i < rangeCounters.length(); i++) { + array[i] = rangeCounters.get(i); + } + return array; + } + + public long[] toArrayAndReset() { + long[] array = new long[rangeCounters.length()]; + for (int i = 0; i < rangeCounters.length(); i++) { + array[i] = rangeCounters.getAndSet(i, 0); + } + + return array; + } + + public long[] getRanges() { + return ranges; + } + + public long getValue(int index) { + return rangeCounters.get(index); + } + + public long getSum() { + long sum = 0; + for (int i = 0; i < rangeCounters.length(); ++i) { + sum += rangeCounters.get(i); + } + return sum; + } + + public String toString() { + StringBuilder buf = new StringBuilder(); + buf.append('['); + for (int i = 0; i < rangeCounters.length(); i++) { + if (i != 0) { + buf.append(", "); + } + buf.append(rangeCounters.get(i)); + } + buf.append(']'); + return buf.toString(); + } +} diff --git a/src/main/java/io/mycat/statistic/stat/HostStatAnalyzer.java b/src/main/java/io/mycat/statistic/stat/HostStatAnalyzer.java new file mode 100644 index 000000000..462ee9f30 --- /dev/null +++ b/src/main/java/io/mycat/statistic/stat/HostStatAnalyzer.java @@ -0,0 +1,16 @@ +package io.mycat.statistic.stat; + +/** + * 前端SQL客户端主机 的访问统计 + * + * @author zhuam + * + */ +public class HostStatAnalyzer implements QueryResultListener { + + @Override + public void onQueryResult(QueryResult query) { + + } + +} diff --git a/src/main/java/io/mycat/statistic/stat/QueryConditionAnalyzer.java b/src/main/java/io/mycat/statistic/stat/QueryConditionAnalyzer.java new file mode 100644 index 000000000..a97b954aa --- /dev/null +++ b/src/main/java/io/mycat/statistic/stat/QueryConditionAnalyzer.java @@ -0,0 +1,227 @@ +package io.mycat.statistic.stat; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.ReentrantLock; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import com.alibaba.druid.sql.ast.SQLStatement; +import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser; +import com.alibaba.druid.sql.dialect.mysql.visitor.MySqlSchemaStatVisitor; +import com.alibaba.druid.stat.TableStat.Condition; + +import io.mycat.server.parser.ServerParse; + +/** + * 特定 SQL 查询条件的统计分析 + * -------------------------------------------------- + * + * 例: + * SELECT * FROM v1user Where userName=? AND cityName =? + * SELECT * FROM v1user Where userName=? + * SELECT * FROM v1user Where userName=? AND age > 20 + * + * SELECT * FROM v1user Where userName = "张三" AND cityName = "北京"; + * SELECT * FROM v1user Where userName = "李四" + * SELECT * FROM v1user Where userName = "张三" AND age > 20 + * + * 现在我们希望知道DB 中 业务比较关注的 userName 有哪些,次数是多少, 怎么处理哩,如下 + * + * 设置: 表名&条件列 ( v1user&userName ) 即可,取消请设置 NULL + * + * @author zhuam + * + */ +public class QueryConditionAnalyzer implements QueryResultListener { + private final static long MAX_QUERY_MAP_SIZE = 100000; + private static final Logger LOGGER = LoggerFactory.getLogger(QueryConditionAnalyzer.class); + + private String tableName = null; + private String columnName = null; + + // column value -> count +// private final HashMap map = new HashMap(); + private final Map map = new ConcurrentHashMap<>(); + + private ReentrantLock lock = new ReentrantLock(); + + private SQLParser sqlParser = new SQLParser(); + + private final static QueryConditionAnalyzer instance = new QueryConditionAnalyzer(); + + private QueryConditionAnalyzer() {} + + public static QueryConditionAnalyzer getInstance() { + return instance; + } + + + @Override + public void onQueryResult(QueryResult queryResult) { + +// this.lock.lock(); +// try { + + int sqlType = queryResult.getSqlType(); + String sql = queryResult.getSql(); + + switch(sqlType) { + case ServerParse.SELECT: + List values = sqlParser.parseConditionValues(sql, this.tableName, this.columnName); + if ( values != null ) { + + if ( this.map.size() < MAX_QUERY_MAP_SIZE ) { + + for(Object value : values) { + AtomicLong count = this.map.get(value); + if (count == null) { + count = new AtomicLong(1L); + } else { + count.getAndIncrement(); + } + this.map.put(value, count); + } + + } else { + LOGGER.debug(" this map is too large size "); + } + } + } + +// } finally { +// this.lock.unlock(); +// } + } + + public boolean setCf(String cf) { + + boolean isOk = false; + + this.lock.lock(); + try { + + if ( !"NULL".equalsIgnoreCase(cf) ) { + + String[] table_column = cf.split("&"); + if ( table_column != null && table_column.length == 2 ) { + this.tableName = table_column[0]; + this.columnName = table_column[1]; + this.map.clear(); + + isOk = true; + } + + } else { + + this.tableName = null; + this.columnName = null; + this.map.clear(); + + isOk = true; + } + + } finally { + this.lock.unlock(); + } + + return isOk; + } + + public String getKey() { + return this.tableName + "." + this.columnName; + } + + public List> getValues() { + List> list = new ArrayList>(map.entrySet()); + return list; + } + + + // SQL 解析 + class SQLParser { + + /** + * 去掉库名、去掉`` + * @param tableName + * @return + */ + private String fixName(String tableName) { + if ( tableName != null ) { + tableName = tableName.replace("`", ""); + int dotIdx = tableName.indexOf("."); + if ( dotIdx > 0 ) { + tableName = tableName.substring(1 + dotIdx).trim(); + } + } + return tableName; + } + + /** + * 解析 SQL 获取指定表及条件列的值 + * + * @param sql + * @param tableName + * @param colnumName + * @return + */ + public List parseConditionValues(String sql, String tableName, String colnumName) { + + List values = null; + + if ( sql != null && tableName != null && columnName != null ) { + + values = new ArrayList(); + + MySqlStatementParser parser = new MySqlStatementParser(sql); + SQLStatement stmt = parser.parseStatement(); + + MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor(); + stmt.accept(visitor); + + String currentTable = visitor.getCurrentTable(); + if ( tableName.equalsIgnoreCase( currentTable ) ) { + + List conditions = visitor.getConditions(); + for(Condition condition: conditions) { + + String ccN = condition.getColumn().getName(); + ccN = fixName(ccN); + + if ( colnumName.equalsIgnoreCase( ccN ) ) { + List ccVL = condition.getValues(); + values.addAll( ccVL ); + } + } + } + } + return values; + } + } + + /* ----------------------------------------------------------------- + public static void main(String arg[]) { + + String sql = "SELECT `fnum`, `forg`, `fdst`, `airline`, `ftype` , `ports_of_call`, " + + "`scheduled_deptime`, `scheduled_arrtime`, `actual_deptime`, `actual_arrtime`, " + + "`flight_status_code` FROM dynamic " + + "WHERE `fnum` = 'CA123' AND `forg` = 'PEK' AND `fdst` = 'SHA' " + + "AND `scheduled_deptime` BETWEEN 1212121 AND 232323233 " + + "AND `fservice` = 'J' AND `fcategory` = 1 " + + "AND `share_execute_flag` = 1 ORDER BY scheduled_deptime"; + + QueryResult qr = new QueryResult("zhuam", ServerParse.SELECT, sql, 0); + + QueryConditionAnalyzer analyzer = QueryConditionAnalyzer.getInstance(); + analyzer.setTableColumnFilter("dynamic&fnum"); + analyzer.onQuery(qr); + + List> list = analyzer.getValues(); + System.out.println( list ); + } + */ +} \ No newline at end of file diff --git a/src/main/java/io/mycat/statistic/stat/QueryResult.java b/src/main/java/io/mycat/statistic/stat/QueryResult.java new file mode 100644 index 000000000..9ff9c629f --- /dev/null +++ b/src/main/java/io/mycat/statistic/stat/QueryResult.java @@ -0,0 +1,71 @@ +package io.mycat.statistic.stat; + +/** + * SQL 执行结果 + * + * @author zhuam + * + */ +public class QueryResult { + + private String user; //用户 + private int sqlType; //SQL类型 + private String sql; //SQL + private long sqlRows; //SQL 返回或影响的结果集长度 + private long netInBytes; //NET IN 字节数 + private long netOutBytes; //NET OUT 字节数 + private long startTime; //开始时间 + private long endTime; //结束时间 + private int resultSize; //结果集大小 + + public QueryResult(String user, int sqlType, String sql, long sqlRows, + long netInBytes, long netOutBytes, long startTime, long endTime + ,int resultSize) { + super(); + this.user = user; + this.sqlType = sqlType; + this.sql = sql; + this.sqlRows = sqlRows; + this.netInBytes = netInBytes; + this.netOutBytes = netOutBytes; + this.startTime = startTime; + this.endTime = endTime; + this.resultSize=resultSize; + } + + public String getUser() { + return user; + } + + public int getSqlType() { + return sqlType; + } + + public String getSql() { + return sql; + } + + public long getSqlRows() { + return sqlRows; + } + + public long getNetInBytes() { + return netInBytes; + } + + public long getNetOutBytes() { + return netOutBytes; + } + + public long getStartTime() { + return startTime; + } + + public long getEndTime() { + return endTime; + } + + public int getResultSize() { + return resultSize; + } +} diff --git a/src/main/java/io/mycat/statistic/stat/QueryResultDispatcher.java b/src/main/java/io/mycat/statistic/stat/QueryResultDispatcher.java new file mode 100644 index 000000000..ae29b3e06 --- /dev/null +++ b/src/main/java/io/mycat/statistic/stat/QueryResultDispatcher.java @@ -0,0 +1,70 @@ +package io.mycat.statistic.stat; + +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.MycatServer; + +/** + * SQL执行后的派发 QueryResult 事件 + * + * @author zhuam + * + */ +public class QueryResultDispatcher { + + private static final Logger LOGGER = LoggerFactory.getLogger(QueryResultDispatcher.class); + + private static List listeners = new CopyOnWriteArrayList(); + + // 初始化强制加载 + static { + listeners.add( UserStatAnalyzer.getInstance() ); + listeners.add( TableStatAnalyzer.getInstance() ); + listeners.add( QueryConditionAnalyzer.getInstance() ); + } + + public static void addListener(QueryResultListener listener) { + if (listener == null) { + throw new NullPointerException(); + } + listeners.add(listener); + } + + public static void removeListener(QueryResultListener listener) { + listeners.remove(listener); + } + + public static void removeAllListener() { + listeners.clear(); + } + + public static void dispatchQuery(final QueryResult queryResult) { + + + // 是否派发 QueryResult 事件 + int useSqlStat = MycatServer.getInstance().getConfig().getSystem().getUseSqlStat(); + if ( useSqlStat == 0 ) { + return; + } + + //TODO:异步分发,待进一步调优 + MycatServer.getInstance().getBusinessExecutor().execute(new Runnable() { + + public void run() { + + for(QueryResultListener listener: listeners) { + try { + listener.onQueryResult( queryResult ); + } catch(Exception e) { + LOGGER.error("error:",e); + } + } + } + }); + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/statistic/stat/QueryResultListener.java b/src/main/java/io/mycat/statistic/stat/QueryResultListener.java new file mode 100644 index 000000000..313646f1f --- /dev/null +++ b/src/main/java/io/mycat/statistic/stat/QueryResultListener.java @@ -0,0 +1,7 @@ +package io.mycat.statistic.stat; + +public interface QueryResultListener { + + public void onQueryResult(QueryResult queryResult); + +} diff --git a/src/main/java/io/mycat/statistic/stat/SqlFrequency.java b/src/main/java/io/mycat/statistic/stat/SqlFrequency.java new file mode 100644 index 000000000..a10fb2a2b --- /dev/null +++ b/src/main/java/io/mycat/statistic/stat/SqlFrequency.java @@ -0,0 +1,88 @@ +package io.mycat.statistic.stat; + +import java.util.concurrent.atomic.AtomicLong; + +public class SqlFrequency implements Comparable{ + private String sql; + private AtomicLong count = new AtomicLong(0); + private long lastTime = 0; + private long executeTime = 0; + private long allExecuteTime = 0; + private long maxTime = 0; + private long avgTime = 0; + private long minTime = 0; + + public String getSql() { + return sql; + } + + public void setSql(String sql) { + this.sql = sql; + } + + public long getCount() { + return this.count.get(); + } + + public void incCount() { + this.count.getAndIncrement(); + } + + public long getLastTime() { + return lastTime; + } + + public void setLastTime(long lastTime) { + this.lastTime = lastTime; + } + + public long getExecuteTime() { + return executeTime; + } + + public long getMaxTime() { + return maxTime; + } + + public long getMinTime() { + return minTime; + } + + public long getAvgTime() { + return avgTime; + } + + public void setExecuteTime(long execTime) { + if (execTime > this.maxTime) { + this.maxTime = execTime; + } + if (this.minTime == 0) { + this.minTime = execTime; + } + if (execTime > 0 + && execTime < this.minTime) { + this.minTime = execTime; + } + this.allExecuteTime+=execTime; + if (count.get() > 0) { + this.avgTime = this.allExecuteTime / this.count.get(); + } + this.executeTime = execTime; + } + + @Override + public int compareTo(SqlFrequency o) { + long para = o.count.get() - count.get(); + long para2 = o.lastTime - lastTime; + return para == 0L ? (int)(para2 == 0L ? o.allExecuteTime - allExecuteTime : para2) : (int)para ; + } + + @Override + public boolean equals(Object obj) { + if(obj instanceof SqlFrequency) { + return this.compareTo((SqlFrequency)obj) == 0; + } else { + return super.equals(obj); + } + } +} \ No newline at end of file diff --git a/src/main/java/io/mycat/statistic/stat/SqlResultSet.java b/src/main/java/io/mycat/statistic/stat/SqlResultSet.java new file mode 100644 index 000000000..2c2c10cbc --- /dev/null +++ b/src/main/java/io/mycat/statistic/stat/SqlResultSet.java @@ -0,0 +1,30 @@ +package io.mycat.statistic.stat; +/** + * 结果集记录模型 + */ +public class SqlResultSet { + private String sql; + private int resultSetSize = 0; + private int count; + + public String getSql() { + return sql; + } + public void setSql(String sql) { + this.sql = sql; + } + public int getResultSetSize() { + return resultSetSize; + } + public void setResultSetSize(int resultSetSize) { + this.resultSetSize = resultSetSize; + } + public int getCount() { + return count; + } + public void count() { + this.count++; + } + + +} diff --git a/src/main/java/io/mycat/statistic/stat/SqlResultSizeRecorder.java b/src/main/java/io/mycat/statistic/stat/SqlResultSizeRecorder.java new file mode 100644 index 000000000..c9f943175 --- /dev/null +++ b/src/main/java/io/mycat/statistic/stat/SqlResultSizeRecorder.java @@ -0,0 +1,65 @@ +package io.mycat.statistic.stat; + +import java.util.concurrent.ConcurrentHashMap; + +import com.alibaba.druid.sql.visitor.ParameterizedOutputVisitorUtils; + +/** + * 大结果集 SQL + * + */ +public class SqlResultSizeRecorder { + + private ConcurrentHashMap sqlResultSetMap = new ConcurrentHashMap(); + + + + public void addSql(String sql,int resultSetSize ){ + SqlResultSet sqlResultSet; + SqlParser sqlParserHigh = new SqlParser(); + sql=sqlParserHigh.mergeSql(sql); + if(this.sqlResultSetMap.containsKey(sql)){ + sqlResultSet =this.sqlResultSetMap.get(sql); + sqlResultSet.count(); + sqlResultSet.setSql(sql); + System.out.println(sql); + sqlResultSet.setResultSetSize(resultSetSize); + }else{ + sqlResultSet = new SqlResultSet(); + sqlResultSet.setResultSetSize(resultSetSize); + sqlResultSet.setSql(sql); + this.sqlResultSetMap.put(sql, sqlResultSet); + } + } + + + /** + * 获取 SQL 大结果集记录 + */ + public ConcurrentHashMap getSqlResultSet() { + + return sqlResultSetMap; + } + + + public void clearSqlResultSet() { + sqlResultSetMap.clear(); + } + + class SqlParser { + + public String fixSql(String sql) { + if ( sql != null) + return sql.replace("\n", " "); + return sql; + } + + public String mergeSql(String sql) { + + String newSql = ParameterizedOutputVisitorUtils.parameterize(sql, "mysql"); + return fixSql( newSql ); + } + + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/statistic/stat/TableStat.java b/src/main/java/io/mycat/statistic/stat/TableStat.java new file mode 100644 index 000000000..2ba86bfe8 --- /dev/null +++ b/src/main/java/io/mycat/statistic/stat/TableStat.java @@ -0,0 +1,152 @@ +package io.mycat.statistic.stat; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicLong; + +import io.mycat.server.parser.ServerParse; + +/** + * SQL统计中,统计出来每个表的读,写的TPS,分辨出当前最热的表, + * 并且根据是否有关联JOIN来区分为几个不同的“区域”,是一个重要功能,意味着,某些表可以转移到其他的数据库里,做智能优化。 + * + * 首先是每个表的读写TPS 2个指标,有时段。然后是 哪那些表有JOIN查询 ,来区分 独立的区域 + * + * @author zhuam + * + */ +public class TableStat implements Comparable { + + //1、读写 + //2、主表 + //3、关联表 次数 + //4、读写 TPS + + public String table; + + private final AtomicLong rCount = new AtomicLong(0); + private final AtomicLong wCount = new AtomicLong(0); + + // 关联表 + private final ConcurrentHashMap relaTableMap = new ConcurrentHashMap(); + + /** + * 最后执行时间 + */ + private long lastExecuteTime; + + + public TableStat(String table) { + super(); + this.table = table; + } + + public void reset() { + this.rCount.set(0); + this.wCount.set(0); + this.relaTableMap.clear(); + this.lastExecuteTime = 0; + } + + public void update(int sqlType, String sql, long startTime, long endTime, List relaTables) { + + //记录 RW + switch(sqlType) { + case ServerParse.SELECT: + this.rCount.incrementAndGet(); + break; + case ServerParse.UPDATE: + case ServerParse.INSERT: + case ServerParse.DELETE: + case ServerParse.REPLACE: + this.wCount.incrementAndGet(); + break; + } + + // 记录 关联表执行情况 + for(String table: relaTables) { + RelaTable relaTable = this.relaTableMap.get( table ); + if ( relaTable == null ) { + relaTable = new RelaTable(table, 1); + } else { + relaTable.incCount(); + } + this.relaTableMap.put(table, relaTable); + } + + this.lastExecuteTime = endTime; + } + + public String getTable() { + return table; + } + + public long getRCount() { + return this.rCount.get(); + } + + public long getWCount() { + return this.wCount.get(); + } + + public int getCount() { + return (int)(getRCount()+getWCount()); + } + + public List getRelaTables() { + List tables = new ArrayList(); + tables.addAll( this.relaTableMap.values() ); + return tables; + } + + public long getLastExecuteTime() { + return lastExecuteTime; + } + + @Override + public int compareTo(TableStat o) { + long para = o.getCount() - getCount(); + long para2 = o.getLastExecuteTime() - getLastExecuteTime(); + return para == 0? (para2 == 0? o.getTable().hashCode() - getTable().hashCode() :(int) para2) : (int)para ; + } + + @Override + public boolean equals(Object obj) { + if(obj instanceof TableStat) { + return this.compareTo((TableStat)obj) == 0; + } else { + return super.equals(obj); + } + } + + /** + * 关联表 + * @author Ben + * + */ + public static class RelaTable { + + private String tableName; + private int count; + + public RelaTable(String tableName, int count) { + super(); + this.tableName = tableName; + this.count = count; + } + + public String getTableName() { + return this.tableName; + } + + public int getCount() { + return this.count; + } + + public void incCount() { + this.count++; + } + } + +} diff --git a/src/main/java/io/mycat/statistic/stat/TableStatAnalyzer.java b/src/main/java/io/mycat/statistic/stat/TableStatAnalyzer.java new file mode 100644 index 000000000..219dfa82c --- /dev/null +++ b/src/main/java/io/mycat/statistic/stat/TableStatAnalyzer.java @@ -0,0 +1,228 @@ +package io.mycat.statistic.stat; + +import java.util.*; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentSkipListSet; +import java.util.concurrent.locks.ReentrantLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import com.alibaba.druid.sql.ast.SQLStatement; +import com.alibaba.druid.sql.ast.statement.SQLDeleteStatement; +import com.alibaba.druid.sql.ast.statement.SQLExprTableSource; +import com.alibaba.druid.sql.ast.statement.SQLInsertStatement; +import com.alibaba.druid.sql.ast.statement.SQLSelectStatement; +import com.alibaba.druid.sql.ast.statement.SQLUpdateStatement; +import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlReplaceStatement; +import com.alibaba.druid.sql.dialect.mysql.visitor.MySqlASTVisitorAdapter; +import com.alibaba.druid.sql.parser.SQLParserUtils; +import com.alibaba.druid.sql.parser.SQLStatementParser; +import com.alibaba.druid.sql.visitor.SQLASTVisitorAdapter; +import com.alibaba.druid.util.JdbcConstants; + +import io.mycat.server.parser.ServerParse; +import io.mycat.util.StringUtil; + +/** + * 按SQL表名进行计算 + * + * @author zhuam + * + */ +public class TableStatAnalyzer implements QueryResultListener { + + private static final Logger LOGGER = LoggerFactory.getLogger(TableStatAnalyzer.class); + + private Map tableStatMap = new ConcurrentHashMap<>(); + private ReentrantLock lock = new ReentrantLock(); + + //解析SQL 提取表名 + private SQLParser sqlParser = new SQLParser(); + + private final static TableStatAnalyzer instance = new TableStatAnalyzer(); + + private TableStatAnalyzer() {} + + public static TableStatAnalyzer getInstance() { + return instance; + } + + @Override + public void onQueryResult(QueryResult queryResult) { + + int sqlType = queryResult.getSqlType(); + String sql = queryResult.getSql(); + switch(sqlType) { + case ServerParse.SELECT: + case ServerParse.UPDATE: + case ServerParse.INSERT: + case ServerParse.DELETE: + case ServerParse.REPLACE: + + //关联表提取 + String masterTable = null; + List relaTables = new ArrayList(); + + List tables = sqlParser.parseTableNames(sql); + for(int i = 0; i < tables.size(); i++) { + String table = tables.get(i); + if ( i == 0 ) { + masterTable = table; + } else { + relaTables.add( table ); + } + } + + if ( masterTable != null ) { + TableStat tableStat = getTableStat( masterTable ); + tableStat.update(sqlType, sql, queryResult.getStartTime(), queryResult.getEndTime(), relaTables); + } + break; + } + } + + private TableStat getTableStat(String tableName) { + TableStat userStat = tableStatMap.get(tableName); + if (userStat == null) { + if(lock.tryLock()){ + try{ + userStat = new TableStat(tableName); + tableStatMap.put(tableName, userStat); + } finally { + lock.unlock(); + } + }else{ + while(userStat == null){ + userStat = tableStatMap.get(tableName); + } + } + } + return userStat; + } + + public Map getTableStatMap() { + Map map = new LinkedHashMap(tableStatMap.size()); + map.putAll(tableStatMap); + return map; + } + + /** + * 获取 table 访问排序统计 + */ + public List getTableStats(boolean isClear) { + SortedSet tableStatSortedSet = new TreeSet<>(tableStatMap.values()); + List list = new ArrayList<>(tableStatSortedSet); + return list; + } + + public void ClearTable() { + tableStatMap.clear(); + } + + + /** + * 解析 table name + */ + private static class SQLParser { + + private SQLStatement parseStmt(String sql) { + SQLStatementParser statParser = SQLParserUtils.createSQLStatementParser(sql, "mysql"); + SQLStatement stmt = statParser.parseStatement(); + return stmt; + } + + /** + * 去掉库名、去掉`` + * @param tableName + * @return + */ + private String fixName(String tableName) { + if ( tableName != null ) { + tableName = tableName.replace("`", ""); + int dotIdx = tableName.indexOf("."); + if ( dotIdx > 0 ) { + tableName = tableName.substring(1 + dotIdx).trim(); + } + } + return tableName; + } + + /** + * 解析 SQL table name + */ + public List parseTableNames(String sql) { + final List tables = new ArrayList(); + try{ + + SQLStatement stmt = parseStmt(sql); + if (stmt instanceof MySqlReplaceStatement ) { + String table = ((MySqlReplaceStatement)stmt).getTableName().getSimpleName(); + tables.add( fixName( table ) ); + + } else if (stmt instanceof SQLInsertStatement ) { + String table = ((SQLInsertStatement)stmt).getTableName().getSimpleName(); + tables.add( fixName( table ) ); + + } else if (stmt instanceof SQLUpdateStatement ) { + String table = ((SQLUpdateStatement)stmt).getTableName().getSimpleName(); + tables.add( fixName( table ) ); + + } else if (stmt instanceof SQLDeleteStatement ) { + String table = ((SQLDeleteStatement)stmt).getTableName().getSimpleName(); + tables.add( fixName( table ) ); + + } else if (stmt instanceof SQLSelectStatement ) { + + //TODO: modify by owenludong + String dbType = ((SQLSelectStatement) stmt).getDbType(); + if( !StringUtil.isEmpty(dbType) && JdbcConstants.MYSQL.equals(dbType) ){ + stmt.accept(new MySqlASTVisitorAdapter() { + public boolean visit(SQLExprTableSource x){ + tables.add( fixName( x.toString() ) ); + return super.visit(x); + } + }); + + } else { + stmt.accept(new SQLASTVisitorAdapter() { + public boolean visit(SQLExprTableSource x){ + tables.add( fixName( x.toString() ) ); + return super.visit(x); + } + }); + } + } + } catch (Exception e) { + LOGGER.error("TableStatAnalyzer err:",e.toString()); + } + + return tables; + } + } + + +/* public static void main(String[] args) { + + List sqls = new ArrayList(); + + sqls.add( "SELECT id, name, age FROM v1select1 a LEFT OUTER JOIN v1select2 b ON a.id = b.id WHERE a.name = 12 "); + sqls.add( "insert into v1user_insert(id, name) values(1,3)"); + sqls.add( "delete from v1user_delete where id= 2"); + sqls.add( "update v1user_update set id=2 where id=3"); + sqls.add( "select ename,deptno,sal from v1user_subquery1 where deptno=(select deptno from v1user_subquery2 where loc='NEW YORK')"); + sqls.add( "replace into v1user_insert(id, name) values(1,3)"); + sqls.add( "select * from v1xx where id=3 group by zz"); + sqls.add( "select * from v1yy where xx=3 limit 0,3"); + sqls.add( "SELECT * FROM (SELECT * FROM posts ORDER BY dateline DESC) GROUP BY tid ORDER BY dateline DESC LIMIT 10"); + + for(String sql: sqls) { + List tables = TableStatAnalyzer.getInstance().sqlParser.parseTableNames(sql); + for(String t: tables) { + System.out.println( t ); + } + } + } + */ + +} diff --git a/src/main/java/io/mycat/statistic/stat/UserSqlHighStat.java b/src/main/java/io/mycat/statistic/stat/UserSqlHighStat.java new file mode 100644 index 000000000..fa6e94abd --- /dev/null +++ b/src/main/java/io/mycat/statistic/stat/UserSqlHighStat.java @@ -0,0 +1,102 @@ +package io.mycat.statistic.stat; + +import java.util.*; +import java.util.Map.Entry; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.ConcurrentSkipListSet; +import java.util.concurrent.locks.ReentrantLock; + +import com.alibaba.druid.sql.visitor.ParameterizedOutputVisitorUtils; +import io.mycat.statistic.SQLRecord; + +public class UserSqlHighStat { + + private static final int CAPACITY_SIZE = 1024; + + private Map sqlFrequencyMap = new ConcurrentHashMap<>(); + + private ReentrantLock lock = new ReentrantLock(); + + + private SqlParser sqlParser = new SqlParser(); + + public void addSql(String sql, long executeTime,long startTime, long endTime ){ + String newSql = this.sqlParser.mergeSql(sql); + SqlFrequency frequency = this.sqlFrequencyMap.get(newSql); + if ( frequency == null) { + //防止新建的时候的并发问题,只有新建的时候有锁 + if(lock.tryLock()){ + try{ + frequency = new SqlFrequency(); + frequency.setSql( newSql ); + } finally { + lock.unlock(); + } + } else{ + while(frequency == null){ + frequency = this.sqlFrequencyMap.get(newSql); + } + } + } + frequency.setLastTime( endTime ); + frequency.incCount(); + //TODO 目前setExecuteTime方法由于弃用锁,所以某些参数不准确,为了性能,放弃这些参数的准确性。下一步期待更多优化 + frequency.setExecuteTime(executeTime); + this.sqlFrequencyMap.put(newSql, frequency); + } + + + /** + * 获取 SQL 访问频率 + */ + public List getSqlFrequency(boolean isClear) { + List list = new ArrayList<>(this.sqlFrequencyMap.values()); + if(isClear){ + clearSqlFrequency(); + } + return list; + } + + + private void clearSqlFrequency() { + sqlFrequencyMap.clear(); + } + + public void recycle() { + if(sqlFrequencyMap.size() > CAPACITY_SIZE){ + Map sqlFrequencyMap2 = new ConcurrentHashMap<>(); + SortedSet sqlFrequencySortedSet = new TreeSet<>(this.sqlFrequencyMap.values()); + List keyList = new ArrayList(sqlFrequencySortedSet); + int i = 0; + for(SqlFrequency key : keyList){ + if(i == CAPACITY_SIZE) { + break; + } + sqlFrequencyMap2.put(key.getSql(),key); + i++; + } + sqlFrequencyMap = sqlFrequencyMap2; + } + } + + + + private static class SqlParser { + + public String fixSql(String sql) { + if ( sql != null) { + return sql.replace("\n", " "); + } + return sql; + } + + public String mergeSql(String sql) { + + String newSql = ParameterizedOutputVisitorUtils.parameterize(sql, "mysql"); + return fixSql( newSql ); + } + + } + +} diff --git a/src/main/java/io/mycat/statistic/stat/UserSqlLargeStat.java b/src/main/java/io/mycat/statistic/stat/UserSqlLargeStat.java new file mode 100644 index 000000000..b1f238700 --- /dev/null +++ b/src/main/java/io/mycat/statistic/stat/UserSqlLargeStat.java @@ -0,0 +1,112 @@ +package io.mycat.statistic.stat; + +import java.util.ArrayList; +import java.util.List; +import java.util.SortedSet; +import java.util.concurrent.ConcurrentSkipListSet; + +public class UserSqlLargeStat { + + private final int count; + private SortedSet sqls; + + public UserSqlLargeStat(int count) { + this.count = count; + this.sqls = new ConcurrentSkipListSet<>(); + } + + public List getSqls() { + List list = new ArrayList<>(sqls); + return list; + } + + public void add(String sql, long sqlRows, long executeTime, long startTime, long endTime) { + SqlLarge sqlLarge = new SqlLarge(sql, sqlRows, executeTime, startTime, endTime); + this.add( sqlLarge ); + } + + public void add(SqlLarge sql) { + sqls.add(sql); + } + + public void reset() { + this.clear(); + } + + public void clear() { + sqls.clear(); + } + + public void recycle() { + if(sqls.size() > count){ + SortedSet sqls2 = new ConcurrentSkipListSet<>(); + List keyList = new ArrayList(sqls); + int i = 0; + for(SqlLarge key : keyList){ + if(i == count) { + break; + } + sqls2.add(key); + i++; + } + sqls = sqls2; + } + } + + /** + * 记录 SQL 及返回行数 + */ + public static class SqlLarge implements Comparable { + + private String sql; + private long sqlRows; + private long executeTime; + private long startTime; + private long endTime; + + public SqlLarge(String sql, long sqlRows, long executeTime, long startTime, long endTime) { + super(); + this.sql = sql; + this.sqlRows = sqlRows; + this.executeTime = executeTime; + this.startTime = startTime; + this.endTime = endTime; + } + + public String getSql() { + return sql; + } + + public long getSqlRows() { + return sqlRows; + } + + public long getStartTime() { + return startTime; + } + + public long getExecuteTime() { + return executeTime; + } + + public long getEndTime() { + return endTime; + } + + @Override + public int compareTo(SqlLarge o) { + long para = o.sqlRows - sqlRows; + return para == 0 ? (o.sql.hashCode() - sql.hashCode()) :(int) (para); + } + + @Override + public boolean equals(Object arg0) { + return super.equals(arg0); + } + + @Override + public int hashCode() { + return super.hashCode(); + } + } +} \ No newline at end of file diff --git a/src/main/java/io/mycat/statistic/stat/UserSqlLastStat.java b/src/main/java/io/mycat/statistic/stat/UserSqlLastStat.java new file mode 100644 index 000000000..925aa607f --- /dev/null +++ b/src/main/java/io/mycat/statistic/stat/UserSqlLastStat.java @@ -0,0 +1,105 @@ +package io.mycat.statistic.stat; + +import java.util.*; +import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.ConcurrentSkipListSet; +import java.util.concurrent.locks.ReentrantLock; + +/** + * 最后执行的 Sql + * + * @author zhuam + * + */ +public class UserSqlLastStat { + private static final int MAX_RECORDS = 1024; + private SortedSet sqls; + + public UserSqlLastStat(int count) { + this.sqls = new ConcurrentSkipListSet<>(); + } + + public List getSqls() { + List keyList = new ArrayList(sqls); + return keyList; + } + + public void add(String sql, long executeTime, long startTime, long endTime ) { + SqlLast sqlLast = new SqlLast(sql, executeTime, startTime, endTime); + sqls.add(sqlLast); + } + + public void reset() { + this.clear(); + } + + public void clear() { + sqls.clear(); + } + + public void recycle(){ + if(sqls.size() > MAX_RECORDS){ + SortedSet sqls2 = new ConcurrentSkipListSet<>(); + List keyList = new ArrayList(sqls); + int i = 0; + for(SqlLast key : keyList){ + if(i == MAX_RECORDS) { + break; + } + sqls2.add(key); + i++; + } + sqls = sqls2; + } + } + /** + * 记录SQL + */ + public static class SqlLast implements Comparable{ + + private String sql; + private long executeTime; + private long startTime; + private long endTime; + + public SqlLast(String sql, long executeTime, long startTime, long endTime) { + super(); + this.sql = sql; + this.executeTime = executeTime; + this.startTime = startTime; + this.endTime = endTime; + } + + public String getSql() { + return sql; + } + + public long getStartTime() { + return startTime; + } + + public long getExecuteTime() { + return executeTime; + } + + public long getEndTime() { + return endTime; + } + + @Override + public int compareTo(SqlLast o) { + long st1 = o == null ? 0 : o.getStartTime(); + return (int) ( st1 - this.getStartTime()); + } + + @Override + public boolean equals(Object obj) { + if(obj instanceof SqlLast) { + return this.compareTo((SqlLast)obj) == 0; + } else { + return super.equals(obj); + } + } + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/statistic/stat/UserSqlRWStat.java b/src/main/java/io/mycat/statistic/stat/UserSqlRWStat.java new file mode 100644 index 000000000..011778f7e --- /dev/null +++ b/src/main/java/io/mycat/statistic/stat/UserSqlRWStat.java @@ -0,0 +1,182 @@ +package io.mycat.statistic.stat; + +import java.util.TimeZone; +import java.util.concurrent.atomic.AtomicLong; + +import io.mycat.server.parser.ServerParse; + +/** + * SQL R/W 执行状态 + * 因为这里的所有元素都近似为非必须原子更新的,即: + * 例如:rCount和netInBytes没有必要非得保持同步更新,在同一个事务内 + * 只要最后更新了即可,所以将其中的元素拆成一个一个原子类,没必要保证精确的保持原样不加任何锁 + * + * @author zhuam + * + */ +public class UserSqlRWStat { + + + /** + * R/W 次数 + */ + private AtomicLong rCount = new AtomicLong(0L); + private AtomicLong wCount = new AtomicLong(0L); + + /** + * 每秒QPS + */ + private int qps = 0; + + /** + * Net In/Out 字节数 + */ + private AtomicLong netInBytes = new AtomicLong(0L); + private AtomicLong netOutBytes = new AtomicLong(0L); + + /** + * 最大的并发 + */ + private int concurrentMax = 1; + + /** + * 执行耗时 + * + * 10毫秒内、 10 - 200毫秒内、 1秒内、 超过 1秒 + */ + private final Histogram timeHistogram = new Histogram( new long[] { 10, 200, 1000, 2000 } ); + + /** + * 执行所在时段 + * + * 22-06 夜间、 06-13 上午、 13-18下午、 18-22 晚间 + */ + private final Histogram executeHistogram = new Histogram(new long[] { 6, 13, 18, 22 }); + + /** + * 最后执行时间 + * 不用很精确,所以不加任何锁 + */ + private long lastExecuteTime; + + + private int time_zone_offset = 0; + private int one_hour = 3600 * 1000; + + public UserSqlRWStat() { + this.time_zone_offset = TimeZone.getDefault().getRawOffset(); + } + + public void reset() { + this.rCount = new AtomicLong(0L); + this.wCount = new AtomicLong(0L); + this.concurrentMax = 1; + this.lastExecuteTime = 0; + this.netInBytes = new AtomicLong(0L); + this.netOutBytes = new AtomicLong(0L); + + this.timeHistogram.reset(); + this.executeHistogram.reset(); + } + + public void add(int sqlType, String sql, long executeTime, long netInBytes, long netOutBytes, long startTime, long endTime) { + + + switch(sqlType) { + case ServerParse.SELECT: + case ServerParse.SHOW: + this.rCount.incrementAndGet(); + break; + case ServerParse.UPDATE: + case ServerParse.INSERT: + case ServerParse.DELETE: + case ServerParse.REPLACE: + this.wCount.incrementAndGet(); + break; + } + + //SQL执行所在的耗时区间 + if ( executeTime <= 10 ) { + this.timeHistogram.record(10); + + } else if ( executeTime > 10 && executeTime <= 200 ) { + this.timeHistogram.record(200); + + } else if ( executeTime > 200 && executeTime <= 1000 ) { + this.timeHistogram.record(1000); + + } else if ( executeTime > 1000) { + this.timeHistogram.record(2000); + } + + //SQL执行所在的时间区间 + long hour0 = endTime / ( 24L * (long)one_hour ) * ( 24L * (long)one_hour )- (long)time_zone_offset; + long hour06 = hour0 + 6L * (long)one_hour - 1L; + long hour13 = hour0 + 13L * (long)one_hour - 1L; + long hour18 = hour0 + 18L * (long)one_hour - 1L; + long hour22 = hour0 + 22L * (long)one_hour - 1L; + + if ( endTime <= hour06 || endTime > hour22 ) { + this.executeHistogram.record(6); + + } else if ( endTime > hour06 && endTime <= hour13 ) { + this.executeHistogram.record(13); + + } else if ( endTime > hour13 && endTime <= hour18 ) { + this.executeHistogram.record(18); + + } else if ( endTime > hour18 && endTime <= hour22 ) { + this.executeHistogram.record(22); + } + + this.lastExecuteTime = endTime; + + this.netInBytes.addAndGet(netInBytes); + this.netOutBytes.addAndGet(netOutBytes); + } + + public long getLastExecuteTime() { + return lastExecuteTime; + } + + public long getNetInBytes() { + return netInBytes.get(); + } + + public long getNetOutBytes() { + return netOutBytes.get(); + } + + public int getConcurrentMax() { + return concurrentMax; + } + + public void setConcurrentMax(int concurrentMax) { + this.concurrentMax = concurrentMax; + } + + public int getQps() { + return qps; + } + + public void setQps(int qps) { + this.qps = qps; + } + + public long getRCount() { + return this.rCount.get(); + } + + public long getWCount() { + return this.wCount.get(); + } + + public Histogram getTimeHistogram() { + return this.timeHistogram; + } + + public Histogram getExecuteHistogram() { + return this.executeHistogram; + } + +} diff --git a/src/main/java/io/mycat/statistic/stat/UserStat.java b/src/main/java/io/mycat/statistic/stat/UserStat.java new file mode 100644 index 000000000..12c2830e5 --- /dev/null +++ b/src/main/java/io/mycat/statistic/stat/UserStat.java @@ -0,0 +1,199 @@ +package io.mycat.statistic.stat; + +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import io.mycat.MycatServer; +import io.mycat.server.parser.ServerParse; +import io.mycat.statistic.SQLRecord; +import io.mycat.statistic.SQLRecorder; + +/** + * 用户状态 + * + * @author Ben + */ +public class UserStat { + + private long SQL_SLOW_TIME = 100; + + private String user; + + /** + * 最大的并发 + */ + private final AtomicInteger runningCount = new AtomicInteger(); + private final AtomicInteger concurrentMax = new AtomicInteger(); + + /** + * SQL 大集合插入、返回记录 + */ + private UserSqlLargeStat sqlLargeStat = null; + + /** + * SQL 执行记录 + */ + private UserSqlLastStat sqlLastStat = null; + + /** + * CURD 执行分布 + */ + private UserSqlRWStat sqlRwStat = null; + + /** + * 用户高频SQL分析 + */ + private UserSqlHighStat sqlHighStat = null; + + /** + * 慢查询记录器 TOP 10 + */ + private SQLRecorder sqlRecorder; + + /** + * 大结果集记录 + */ + private SqlResultSizeRecorder sqlResultSizeRecorder = null; + + /** + * 读写锁 + */ +// private ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); + + public UserStat(String user) { + super(); + + int size = MycatServer.getInstance().getConfig().getSystem().getSqlRecordCount(); + + this.user = user; + this.sqlRwStat = new UserSqlRWStat(); + this.sqlLastStat = new UserSqlLastStat(50); + this.sqlLargeStat = new UserSqlLargeStat(10); + this.sqlHighStat = new UserSqlHighStat(); + this.sqlRecorder = new SQLRecorder( size ); + this.sqlResultSizeRecorder = new SqlResultSizeRecorder(); + } + + public String getUser() { + return user; + } + + public SQLRecorder getSqlRecorder() { + return sqlRecorder; + } + + public UserSqlRWStat getRWStat() { + return sqlRwStat; + } + + public UserSqlLastStat getSqlLastStat() { + return this.sqlLastStat; + } + + public UserSqlLargeStat getSqlLargeRowStat() { + return this.sqlLargeStat; + } + + public UserSqlHighStat getSqlHigh(){ + return this.sqlHighStat; + } + + public SqlResultSizeRecorder getSqlResultSizeRecorder() { + return this.sqlResultSizeRecorder; + } + + + public void setSlowTime(long time) { + this.SQL_SLOW_TIME = time; + this.sqlRecorder.clear(); + } + + public void clearSql() { + this.sqlLastStat.reset(); + } + + public void clearSqlslow() { + this.sqlRecorder.clear(); + } + + public void clearRwStat() { + this.sqlRwStat.reset(); + } + + public void reset() { + this.sqlRecorder.clear(); + this.sqlResultSizeRecorder.clearSqlResultSet(); + this.sqlRwStat.reset(); + this.sqlLastStat.reset(); + + this.runningCount.set(0); + this.concurrentMax.set(0); + } + + /** + * 更新状态 + * + * @param sqlType + * @param sql + * @param startTime + */ + public void update(int sqlType, String sql, long sqlRows, + long netInBytes, long netOutBytes, long startTime, long endTime ,int rseultSetSize) { + + //before 计算最大并发数 + //----------------------------------------------------- + int invoking = runningCount.incrementAndGet(); + for (;;) { + int max = concurrentMax.get(); + if (invoking > max) { + if (concurrentMax.compareAndSet(max, invoking)) { + break; + } + } else { + break; + } + } + //----------------------------------------------------- + +// this.lock.writeLock().lock(); +// try { + + //慢查询记录 + long executeTime = endTime - startTime; + if ( executeTime >= SQL_SLOW_TIME ){ + SQLRecord record = new SQLRecord(); + record.executeTime = executeTime; + record.statement = sql; + record.startTime = startTime; + this.sqlRecorder.add(record); + } + + //执行状态记录 + this.sqlRwStat.setConcurrentMax( concurrentMax.get() ); + this.sqlRwStat.add(sqlType, sql, executeTime, netInBytes, netOutBytes, startTime, endTime); + + //记录最新执行的SQL + this.sqlLastStat.add(sql, executeTime, startTime, endTime ); + + //记录高频SQL + this.sqlHighStat.addSql(sql, executeTime, startTime, endTime); + + //记录SQL Select 返回超过 10000 行的 大结果集 + if ( sqlType == ServerParse.SELECT && sqlRows > 10000 ) { + this.sqlLargeStat.add(sql, sqlRows, executeTime, startTime, endTime); + } + + //记录超过阈值的大结果集sql + if(rseultSetSize>=MycatServer.getInstance().getConfig().getSystem().getMaxResultSet()){ + this.sqlResultSizeRecorder.addSql(sql, rseultSetSize); + } + +// } finally { +// this.lock.writeLock().unlock(); +// } + + //after + //----------------------------------------------------- + runningCount.decrementAndGet(); + } +} \ No newline at end of file diff --git a/src/main/java/io/mycat/statistic/stat/UserStatAnalyzer.java b/src/main/java/io/mycat/statistic/stat/UserStatAnalyzer.java new file mode 100644 index 000000000..0d4d3aa24 --- /dev/null +++ b/src/main/java/io/mycat/statistic/stat/UserStatAnalyzer.java @@ -0,0 +1,59 @@ +package io.mycat.statistic.stat; + +import java.util.LinkedHashMap; +import java.util.Map; + +import io.mycat.server.parser.ServerParse; + +/** + * 按访问用户 计算SQL的运行状态 + * + * @author Ben + * + */ +public class UserStatAnalyzer implements QueryResultListener { + + private LinkedHashMap userStatMap = new LinkedHashMap(); + + private final static UserStatAnalyzer instance = new UserStatAnalyzer(); + + private UserStatAnalyzer() { + } + + public static UserStatAnalyzer getInstance() { + return instance; + } + + @Override + public void onQueryResult(QueryResult query) { + switch( query.getSqlType() ) { + case ServerParse.SELECT: + case ServerParse.UPDATE: + case ServerParse.INSERT: + case ServerParse.DELETE: + case ServerParse.REPLACE: + String user = query.getUser(); + int sqlType = query.getSqlType(); + String sql = query.getSql(); + long sqlRows = query.getSqlRows(); + long netInBytes = query.getNetInBytes(); + long netOutBytes = query.getNetOutBytes(); + long startTime = query.getStartTime(); + long endTime = query.getEndTime(); + int resultSetSize=query.getResultSize(); + UserStat userStat = userStatMap.get(user); + if (userStat == null) { + userStat = new UserStat(user); + userStatMap.put(user, userStat); + } + userStat.update(sqlType, sql, sqlRows, netInBytes, netOutBytes, startTime, endTime,resultSetSize); + break; + } + } + + public Map getUserStatMap() { + Map map = new LinkedHashMap(userStatMap.size()); + map.putAll(userStatMap); + return map; + } +} diff --git a/src/main/java/io/mycat/util/ByteBufferUtil.java b/src/main/java/io/mycat/util/ByteBufferUtil.java index 6dd09e581..9f90a2937 100644 --- a/src/main/java/io/mycat/util/ByteBufferUtil.java +++ b/src/main/java/io/mycat/util/ByteBufferUtil.java @@ -194,8 +194,9 @@ else if (startIndex >= buffer.limit()) for (int i = startIndex; i >= buffer.position(); i--) { - if (valueToFind == buffer.get(i)) + if (valueToFind == buffer.get(i)) { return i; + } } return -1; @@ -233,8 +234,9 @@ public static ByteBuffer clone(ByteBuffer buffer) { assert buffer != null; - if (buffer.remaining() == 0) + if (buffer.remaining() == 0) { return EMPTY_BYTE_BUFFER; + } ByteBuffer clone = ByteBuffer.allocate(buffer.remaining()); @@ -357,8 +359,9 @@ public static InputStream inputStream(ByteBuffer bytes) { public int read() { - if (!copy.hasRemaining()) + if (!copy.hasRemaining()) { return -1; + } return copy.get() & 0xFF; } @@ -366,8 +369,9 @@ public int read() @Override public int read(byte[] bytes, int off, int len) { - if (!copy.hasRemaining()) + if (!copy.hasRemaining()) { return -1; + } len = Math.min(len, copy.remaining()); copy.get(bytes, off, len); @@ -398,9 +402,12 @@ public int available() */ public static int compareSubArrays(ByteBuffer bytes1, int offset1, ByteBuffer bytes2, int offset2, int length) { - if (bytes1 == null) + if (bytes1 == null) { return bytes2 == null ? 0 : -1; - if (bytes2 == null) return 1; + } + if (bytes2 == null) { + return 1; + } assert bytes1.limit() >= offset1 + length : "The first byte array isn't long enough for the specified offset and length."; assert bytes2.limit() >= offset2 + length : "The second byte array isn't long enough for the specified offset and length."; @@ -408,10 +415,12 @@ public static int compareSubArrays(ByteBuffer bytes1, int offset1, ByteBuffer by { byte byte1 = bytes1.get(offset1 + i); byte byte2 = bytes2.get(offset2 + i); - if (byte1 == byte2) - continue; +// if (byte1 == byte2) +// continue; // compare non-equal bytes as unsigned - return (byte1 & 0xFF) < (byte2 & 0xFF) ? -1 : 1; + if( byte1 != byte2 ) { + return (byte1 & 0xFF) < (byte2 & 0xFF) ? -1 : 1; + } } return 0; } @@ -426,8 +435,9 @@ public static ByteBuffer bytes(InetAddress address) // Returns whether {@code prefix} is a prefix of {@code value}. public static boolean isPrefix(ByteBuffer prefix, ByteBuffer value) { - if (prefix.remaining() > value.remaining()) + if (prefix.remaining() > value.remaining()) { return false; + } int diff = value.remaining() - prefix.remaining(); return prefix.equals(value.duplicate().limit(value.remaining() - diff)); diff --git a/src/main/java/io/mycat/util/ByteUtil.java b/src/main/java/io/mycat/util/ByteUtil.java index 7d0e1c313..721e02861 100644 --- a/src/main/java/io/mycat/util/ByteUtil.java +++ b/src/main/java/io/mycat/util/ByteUtil.java @@ -37,10 +37,12 @@ public class ByteUtil { * @return -1 means b1 < b2, or 0 means b1=b2 else return 1 */ public static int compareNumberByte(byte[] b1, byte[] b2) { - if(b1 == null || b1.length == 0) + if(b1 == null || b1.length == 0) { return -1; - else if(b2 == null || b2.length == 0) + } + else if(b2 == null || b2.length == 0) { return 1; + } boolean isNegetive = b1[0] == 45 || b2[0] == 45; if (isNegetive == false && b1.length != b2.length) { return b1.length - b2.length; @@ -85,11 +87,13 @@ public static byte[] compareNumberArray2(byte[] b1, byte[] b2, int order) { } int len = b1.length > b2.length ? b1.length : b2.length; for (int i = 0; i < len; i++) { - if (b1[i] != b2[i]) - if (order == 1) + if (b1[i] != b2[i]) { + if (order == 1) { return ((b1[i] & 0xff) - (b2[i] & 0xff)) > 0 ? b1 : b2; - else + } else { return ((b1[i] & 0xff) - (b2[i] & 0xff)) > 0 ? b2 : b1; + } + } } return b1; @@ -151,8 +155,8 @@ public static byte[] getBytes(String data) { } public static short getShort(byte[] bytes) { -// return (short) ((0xff & bytes[0]) | (0xff00 & (bytes[1] << 8))); return Short.parseShort(new String(bytes)); +// return (short) ((0xff & bytes[0]) | (0xff00 & (bytes[1] << 8))); } public static char getChar(byte[] bytes) { @@ -310,5 +314,35 @@ private static byte[] getBytesFromDate(Date date) { } return bytes; } + + // 支持 byte dump + //--------------------------------------------------------------------- + public static String dump(byte[] data, int offset, int length) { + + StringBuilder sb = new StringBuilder(); + sb.append(" byte dump log "); + sb.append(System.lineSeparator()); + sb.append(" offset ").append( offset ); + sb.append(" length ").append( length ); + sb.append(System.lineSeparator()); + int lines = (length - 1) / 16 + 1; + for (int i = 0, pos = 0; i < lines; i++, pos += 16) { + sb.append(String.format("0x%04X ", i * 16)); + for (int j = 0, pos1 = pos; j < 16; j++, pos1++) { + sb.append(pos1 < length ? String.format("%02X ", data[offset + pos1]) : " "); + } + sb.append(" "); + for (int j = 0, pos1 = pos; j < 16; j++, pos1++) { + sb.append(pos1 < length ? print(data[offset + pos1]) : '.'); + } + sb.append(System.lineSeparator()); + } + sb.append(length).append(" bytes").append(System.lineSeparator()); + return sb.toString(); + } + + public static char print(byte b) { + return (b < 32 || b > 127) ? '.' : (char) b; + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/util/CollectionUtil.java b/src/main/java/io/mycat/util/CollectionUtil.java index fc615e8b3..afbb33e7a 100644 --- a/src/main/java/io/mycat/util/CollectionUtil.java +++ b/src/main/java/io/mycat/util/CollectionUtil.java @@ -1,58 +1,61 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.util; - -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; - -/** - * @author mycat - */ -public class CollectionUtil { - /** - * @param orig - * if null, return intersect - */ - public static Set intersectSet(Set orig, Set intersect) { - if (orig == null) - return intersect; - if (intersect == null || orig.isEmpty()) - return Collections.emptySet(); - Set set = new HashSet(orig.size()); - for (Object p : orig) { - if (intersect.contains(p)) - set.add(p); - } - return set; - } - public static boolean isEmpty(Collection collection){ - return collection==null || collection.isEmpty(); - } - public static boolean isEmpty(Map map){ - return map==null || map.isEmpty(); - } +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.util; + +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +/** + * @author mycat + */ +public class CollectionUtil { + /** + * @param orig + * if null, return intersect + */ + public static Set intersectSet(Set orig, Set intersect) { + if (orig == null) { + return intersect; + } + if (intersect == null || orig.isEmpty()) { + return Collections.emptySet(); + } + Set set = new HashSet(orig.size()); + for (Object p : orig) { + if (intersect.contains(p)) { + set.add(p); + } + } + return set; + } + public static boolean isEmpty(Collection collection){ + return collection==null || collection.isEmpty(); + } + public static boolean isEmpty(Map map){ + return map==null || map.isEmpty(); + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/util/CompareUtil.java b/src/main/java/io/mycat/util/CompareUtil.java index 046b9765b..9965e7c23 100644 --- a/src/main/java/io/mycat/util/CompareUtil.java +++ b/src/main/java/io/mycat/util/CompareUtil.java @@ -77,10 +77,12 @@ public static int compareLong(long l,long r){ public static int compareString(String l,String r){ // return compareStringForChinese(l,r); - if(l == null) + if(l == null) { return -1; - else if(r == null) + } + else if(r == null) { return 1; + } return l.compareTo(r); } @@ -133,7 +135,9 @@ private static int compareStringForChinese(String s1, String s2) { //获取一个汉字/字母的Char值 private static int getCharCode(String s){ - if (s==null || s.length()==0) return -1;//保护代码 + if (s==null || s.length()==0) { + return -1;//保护代码 + } byte [] b = s.getBytes(); int value = 0; //保证取第一个字符(汉字或者英文) diff --git a/src/main/java/io/mycat/util/CompressUtil.java b/src/main/java/io/mycat/util/CompressUtil.java new file mode 100644 index 000000000..2818a3107 --- /dev/null +++ b/src/main/java/io/mycat/util/CompressUtil.java @@ -0,0 +1,307 @@ +package io.mycat.util; + +import com.google.common.collect.Lists; + +import io.mycat.backend.mysql.BufferUtil; +import io.mycat.backend.mysql.MySQLMessage; +import io.mycat.net.AbstractConnection; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.zip.Deflater; +import java.util.zip.Inflater; + + +/** + * 压缩数据包协议 + * + * http://dev.mysql.com/doc/internals/en/compressed-packet-header.html + * + * (包头) + * 3 Bytes 压缩长度 + * 1 Bytes 压缩序列号 + * 3 Bytes 压缩前的长度 + * + * (包体) + * n Bytes 压缩内容 或 未压缩内容 + * + * | -------------------------------------------------------------------------------------- | + * | comp-length | seq-id | uncomp-len | Compressed Payload | + * | ------------------------------------------------ ------------------------------------- | + * | 22 00 00 | 00 | 32 00 00 | compress("\x2e\x00\x00\x00\x03select ...") | + * | -------------------------------------------------------------------------------------- | + * + * Q:为什么消息体是 压缩内容 或者未压缩内容? + * A:这是因为mysql内部有一个约定,如果查询语句payload小于50字节时, 对内容不压缩而保持原貌的方式,而mysql此举是为了减少CPU性能开销 + * + */ +public class CompressUtil { + + public static final int MINI_LENGTH_TO_COMPRESS = 50; + public static final int NO_COMPRESS_PACKET_LENGTH = MINI_LENGTH_TO_COMPRESS + 4; + + + /** + * 压缩数据包 + * @param input + * @param con + * @param compressUnfinishedDataQueue + * @return + */ + public static ByteBuffer compressMysqlPacket(ByteBuffer input, AbstractConnection con, + ConcurrentLinkedQueue compressUnfinishedDataQueue) { + + byte[] byteArrayFromBuffer = getByteArrayFromBuffer(input); + con.recycle(input); + + byteArrayFromBuffer = mergeBytes(byteArrayFromBuffer, compressUnfinishedDataQueue); + return compressMysqlPacket(byteArrayFromBuffer, con, compressUnfinishedDataQueue); + } + + + /** + * 压缩数据包 + * @param data + * @param con + * @param compressUnfinishedDataQueue + * @return + */ + private static ByteBuffer compressMysqlPacket(byte[] data, AbstractConnection con, + ConcurrentLinkedQueue compressUnfinishedDataQueue) { + + ByteBuffer byteBuf = con.allocate(); + byteBuf = con.checkWriteBuffer(byteBuf, data.length, false); //TODO: 数据量大的时候, 此处是是性能的堵点 + + MySQLMessage msg = new MySQLMessage(data); + while ( msg.hasRemaining() ) { + + //包体的长度 + int packetLength = 0; + + //可读的长度 + int readLength = msg.length() - msg.position(); + if ( readLength > 3 ) { + packetLength = msg.readUB3(); + msg.move(-3); + } + + //校验数据包完整性 + if ( readLength < packetLength + 4 ) { + byte[] packet = msg.readBytes(readLength); + if (packet.length != 0) { + compressUnfinishedDataQueue.add(packet); //不完整的包 + } + } else { + + byte[] packet = msg.readBytes(packetLength + 4); + if ( packet.length != 0 ) { + + if ( packet.length <= NO_COMPRESS_PACKET_LENGTH ) { + BufferUtil.writeUB3(byteBuf, packet.length); //压缩长度 + byteBuf.put(packet[3]); //压缩序号 + BufferUtil.writeUB3(byteBuf, 0); //压缩前的长度设置为0 + byteBuf.put(packet); //包体 + + } else { + + byte[] compress = compress(packet); //压缩 + + BufferUtil.writeUB3(byteBuf, compress.length); + byteBuf.put(packet[3]); + BufferUtil.writeUB3(byteBuf, packet.length); + byteBuf.put(compress); + } + } + } + } + return byteBuf; + } + + /** + * 解压数据包,同时做分包处理 + * + * @param data + * @param decompressUnfinishedDataQueue + * @return + */ + public static List decompressMysqlPacket(byte[] data, + ConcurrentLinkedQueue decompressUnfinishedDataQueue) { + + MySQLMessage msg = new MySQLMessage(data); + + //包头 + //----------------------------------------- + int packetLength = msg.readUB3(); //压缩的包长 + byte packetId = msg.read(); //压缩的包号 + int oldLen = msg.readUB3(); //压缩前的长度 + + //未压缩, 直接返回 + if ( packetLength == data.length - 4 ) { + return Lists.newArrayList(data); + + //压缩不成功的, 直接返回 + } else if (oldLen == 0) { + byte[] readBytes = msg.readBytes(); + return splitPack(readBytes, decompressUnfinishedDataQueue); + + //解压 + } else { + byte[] de = decompress(data, 7, data.length - 7); + return splitPack(de, decompressUnfinishedDataQueue); + } + } + + /** + * 分包处理 + * + * @param in + * @param decompressUnfinishedDataQueue + * @return + */ + private static List splitPack(byte[] in, ConcurrentLinkedQueue decompressUnfinishedDataQueue) { + + //合并 + in = mergeBytes(in, decompressUnfinishedDataQueue); + + List smallPackList = new ArrayList<>(); + + MySQLMessage msg = new MySQLMessage(in); + while ( msg.hasRemaining() ) { + + int readLength = msg.length() - msg.position(); + int packetLength = 0; + if (readLength > 3) { + packetLength = msg.readUB3(); + msg.move(-3); + } + + if (readLength < packetLength + 4) { + byte[] packet = msg.readBytes(readLength); + if ( packet.length != 0 ) { + decompressUnfinishedDataQueue.add(packet); + } + + } else { + byte[] packet = msg.readBytes(packetLength + 4); + if ( packet.length != 0 ) { + smallPackList.add(packet); + } + } + } + + return smallPackList; + } + + /** + * 合并 解压未完成的字节 + */ + private static byte[] mergeBytes(byte[] in, ConcurrentLinkedQueue decompressUnfinishedDataQueue) { + + if ( !decompressUnfinishedDataQueue.isEmpty() ) { + + ByteArrayOutputStream out = new ByteArrayOutputStream(); + try { + while ( !decompressUnfinishedDataQueue.isEmpty() ) { + out.write(decompressUnfinishedDataQueue.poll()); + } + out.write(in); + in = out.toByteArray(); + + } catch (IOException e) { + throw new RuntimeException(e); + } finally { + try { + out.close(); + } catch (IOException e) { + } + } + } + return in; + } + + private static byte[] getByteArrayFromBuffer(ByteBuffer byteBuf) { + byteBuf.flip(); + byte[] row = new byte[byteBuf.limit()]; + byteBuf.get(row); + byteBuf.clear(); + return row; + } + + public static byte[] compress(ByteBuffer byteBuf) { + return compress(getByteArrayFromBuffer(byteBuf)); + } + + /** + * 适用于mysql与客户端交互时zlib 压缩 + * + * @param data + * @return + */ + public static byte[] compress(byte[] data) { + + byte[] output = null; + + Deflater compresser = new Deflater(); + compresser.setInput(data); + compresser.finish(); + + ByteArrayOutputStream out = new ByteArrayOutputStream(data.length); + byte[] result = new byte[1024]; + try { + while (!compresser.finished()) { + int length = compresser.deflate(result); + out.write(result, 0, length); + } + output = out.toByteArray(); + } finally { + try { + out.close(); + } catch (Exception e) { + } + compresser.end(); + } + + return output; + } + + /** + * 适用于mysql与客户端交互时zlib解压 + * + * @param data 数据 + * @param off 偏移量 + * @param len 长度 + * @return + */ + public static byte[] decompress(byte[] data, int off, int len) { + + byte[] output = null; + + Inflater decompresser = new Inflater(); + decompresser.reset(); + decompresser.setInput(data, off, len); + + ByteArrayOutputStream out = new ByteArrayOutputStream(data.length); + try { + byte[] result = new byte[1024]; + while (!decompresser.finished()) { + int i = decompresser.inflate(result); + out.write(result, 0, i); + } + output = out.toByteArray(); + } catch (Exception e) { + throw new RuntimeException(e); + } finally { + try { + out.close(); + } catch (Exception e) { + } + decompresser.end(); + } + return output; + } + +} diff --git a/src/main/java/io/mycat/util/DecryptUtil.java b/src/main/java/io/mycat/util/DecryptUtil.java new file mode 100644 index 000000000..48fcb54fc --- /dev/null +++ b/src/main/java/io/mycat/util/DecryptUtil.java @@ -0,0 +1,399 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.util; + +import java.security.InvalidKeyException; +import java.security.Key; +import java.security.KeyFactory; +import java.security.KeyPair; +import java.security.KeyPairGenerator; +import java.security.NoSuchAlgorithmException; +import java.security.PrivateKey; +import java.security.PublicKey; +import java.security.SecureRandom; +import java.security.interfaces.RSAPrivateKey; +import java.security.interfaces.RSAPublicKey; +import java.security.spec.PKCS8EncodedKeySpec; +import java.security.spec.RSAPrivateKeySpec; +import java.security.spec.RSAPublicKeySpec; +import java.security.spec.X509EncodedKeySpec; + +import javax.crypto.Cipher; + +import io.mycat.config.util.ConfigException; + +/** + * @author songwie + * + */ +public class DecryptUtil { + + private static final String DEFAULT_PRIVATE_KEY_STRING = "MIIBVAIBADANBgkqhkiG9w0BAQEFAASCAT4wggE6AgEAAkEAocbCrurZGbC5GArEHKlAfDSZi7gFBnd4yxOt0rwTqKBFzGyhtQLu5PRKjEiOXVa95aeIIBJ6OhC2f8FjqFUpawIDAQABAkAPejKaBYHrwUqUEEOe8lpnB6lBAsQIUFnQI/vXU4MV+MhIzW0BLVZCiarIQqUXeOhThVWXKFt8GxCykrrUsQ6BAiEA4vMVxEHBovz1di3aozzFvSMdsjTcYRRo82hS5Ru2/OECIQC2fAPoXixVTVY7bNMeuxCP4954ZkXp7fEPDINCjcQDywIgcc8XLkkPcs3Jxk7uYofaXaPbg39wuJpEmzPIxi3k0OECIGubmdpOnin3HuCP/bbjbJLNNoUdGiEmFL5hDI4UdwAdAiEAtcAwbm08bKN7pwwvyqaCBC//VnEWaq39DCzxr+Z2EIk="; + public static final String DEFAULT_PUBLIC_KEY_STRING = "MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKHGwq7q2RmwuRgKxBypQHw0mYu4BQZ3eMsTrdK8E6igRcxsobUC7uT0SoxIjl1WveWniCASejoQtn/BY6hVKWsCAwEAAQ=="; + + public static void main(String[] args) throws Exception { + String password = args[0]; + System.out.println(encrypt(password)); + } + + public static String mycatDecrypt(String usingDecrypt,String user ,String passwrod){ + if("1".equals(usingDecrypt)){ + //type:user:password + //0:test:test + boolean flag = false; + try { + String passwrods[] = DecryptUtil.decrypt(passwrod).split(":"); + if("0".equals(passwrods[0]) + && user.equals(passwrods[1])){ + flag = true; + return passwrods[2]; + } + if(flag==false){ + throw new ConfigException("user " + user + " passwrod need to decrype ,but decrype password is wrong !"); + } + } catch (Exception e2) { + throw new ConfigException("user " + user + " passwrod need to decrype ,but decrype password is wrong !",e2); + } + } + return passwrod; + } + public static String DBHostDecrypt(String usingDecrypt,String host,String user ,String passwrod){ + if("1".equals(usingDecrypt)){ + //type:host:user:password + //1:myhost1:test:test + boolean flag = false; + try { + String passwrods[] = DecryptUtil.decrypt(passwrod).split(":"); + if("1".equals(passwrods[0]) && host.equals(passwrods[1]) && user.equals(passwrods[2])){ + return passwrods[3]; + } + if(flag==false){ + throw new ConfigException("user " + user + " passwrod need to decrype ,but decrype password is wrong !"); + } + } catch (Exception e2) { + throw new ConfigException("host " + host + ",user " + user + " passwrod need to decrype ,but decrype password is wrong !",e2); + } + } + return passwrod; + } + + + public static String decrypt(String cipherText) throws Exception { + return decrypt((String) null, cipherText); + } + + public static String decrypt(String publicKeyText, String cipherText) + throws Exception { + PublicKey publicKey = getPublicKey(publicKeyText); + + return decrypt(publicKey, cipherText); + } + + public static PublicKey getPublicKey(String publicKeyText) { + if (publicKeyText == null || publicKeyText.length() == 0) { + publicKeyText = DecryptUtil.DEFAULT_PUBLIC_KEY_STRING; + } + + try { + byte[] publicKeyBytes = Base64.base64ToByteArray(publicKeyText); + X509EncodedKeySpec x509KeySpec = new X509EncodedKeySpec( + publicKeyBytes); + + KeyFactory keyFactory = KeyFactory.getInstance("RSA"); + return keyFactory.generatePublic(x509KeySpec); + } catch (Exception e) { + throw new IllegalArgumentException("Failed to get public key", e); + } + } + + + public static String decrypt(PublicKey publicKey, String cipherText) + throws Exception { + Cipher cipher = Cipher.getInstance("RSA"); + try { + cipher.init(Cipher.DECRYPT_MODE, publicKey); + } catch (InvalidKeyException e) { + // 因为 IBM JDK 不支持私钥加密, 公钥解密, 所以要反转公私钥 + // 也就是说对于解密, 可以通过公钥的参数伪造一个私钥对象欺骗 IBM JDK + RSAPublicKey rsaPublicKey = (RSAPublicKey) publicKey; + RSAPrivateKeySpec spec = new RSAPrivateKeySpec(rsaPublicKey.getModulus(), rsaPublicKey.getPublicExponent()); + Key fakePrivateKey = KeyFactory.getInstance("RSA").generatePrivate(spec); + cipher = Cipher.getInstance("RSA"); //It is a stateful object. so we need to get new one. + cipher.init(Cipher.DECRYPT_MODE, fakePrivateKey); + } + + if (cipherText == null || cipherText.length() == 0) { + return cipherText; + } + + byte[] cipherBytes = Base64.base64ToByteArray(cipherText); + byte[] plainBytes = cipher.doFinal(cipherBytes); + + return new String(plainBytes); + } + + public static String encrypt(String plainText) throws Exception { + return encrypt((String) null, plainText); + } + + public static String encrypt(String key, String plainText) throws Exception { + if (key == null) { + key = DEFAULT_PRIVATE_KEY_STRING; + } + + byte[] keyBytes = Base64.base64ToByteArray(key); + return encrypt(keyBytes, plainText); + } + + public static String encrypt(byte[] keyBytes, String plainText) + throws Exception { + PKCS8EncodedKeySpec spec = new PKCS8EncodedKeySpec(keyBytes); + KeyFactory factory = KeyFactory.getInstance("RSA"); + PrivateKey privateKey = factory.generatePrivate(spec); + Cipher cipher = Cipher.getInstance("RSA"); + try { + cipher.init(Cipher.ENCRYPT_MODE, privateKey); + } catch (InvalidKeyException e) { + //For IBM JDK, 原因请看解密方法中的说明 + RSAPrivateKey rsaPrivateKey = (RSAPrivateKey) privateKey; + RSAPublicKeySpec publicKeySpec = new RSAPublicKeySpec(rsaPrivateKey.getModulus(), rsaPrivateKey.getPrivateExponent()); + Key fakePublicKey = KeyFactory.getInstance("RSA").generatePublic(publicKeySpec); + cipher = Cipher.getInstance("RSA"); + cipher.init(Cipher.ENCRYPT_MODE, fakePublicKey); + } + + byte[] encryptedBytes = cipher.doFinal(plainText.getBytes("UTF-8")); + String encryptedString = Base64.byteArrayToBase64(encryptedBytes); + + return encryptedString; + } + + public static byte[][] genKeyPairBytes(int keySize) + throws NoSuchAlgorithmException { + byte[][] keyPairBytes = new byte[2][]; + + KeyPairGenerator gen = KeyPairGenerator.getInstance("RSA"); + gen.initialize(keySize, new SecureRandom()); + KeyPair pair = gen.generateKeyPair(); + + keyPairBytes[0] = pair.getPrivate().getEncoded(); + keyPairBytes[1] = pair.getPublic().getEncoded(); + + return keyPairBytes; + } + + public static String[] genKeyPair(int keySize) + throws NoSuchAlgorithmException { + byte[][] keyPairBytes = genKeyPairBytes(keySize); + String[] keyPairs = new String[2]; + + keyPairs[0] = Base64.byteArrayToBase64(keyPairBytes[0]); + keyPairs[1] = Base64.byteArrayToBase64(keyPairBytes[1]); + + return keyPairs; + } + + static class Base64 { + + /** + * Translates the specified byte array into a Base64 string as per Preferences.put(byte[]). + */ + public static String byteArrayToBase64(byte[] a) { + return byteArrayToBase64(a, false); + } + + /** + * Translates the specified byte array into an "alternate representation" Base64 string. This non-standard variant + * uses an alphabet that does not contain the uppercase alphabetic characters, which makes it suitable for use in + * situations where case-folding occurs. + */ + public static String byteArrayToAltBase64(byte[] a) { + return byteArrayToBase64(a, true); + } + + private static String byteArrayToBase64(byte[] a, boolean alternate) { + int aLen = a.length; + int numFullGroups = aLen / 3; + int numBytesInPartialGroup = aLen - 3 * numFullGroups; + int resultLen = 4 * ((aLen + 2) / 3); + StringBuilder result = new StringBuilder(resultLen); + char[] intToAlpha = (alternate ? intToAltBase64 : intToBase64); + + // Translate all full groups from byte array elements to Base64 + int inCursor = 0; + for (int i = 0; i < numFullGroups; i++) { + int byte0 = a[inCursor++] & 0xff; + int byte1 = a[inCursor++] & 0xff; + int byte2 = a[inCursor++] & 0xff; + result.append(intToAlpha[byte0 >> 2]); + result.append(intToAlpha[(byte0 << 4) & 0x3f | (byte1 >> 4)]); + result.append(intToAlpha[(byte1 << 2) & 0x3f | (byte2 >> 6)]); + result.append(intToAlpha[byte2 & 0x3f]); + } + + // Translate partial group if present + if (numBytesInPartialGroup != 0) { + int byte0 = a[inCursor++] & 0xff; + result.append(intToAlpha[byte0 >> 2]); + if (numBytesInPartialGroup == 1) { + result.append(intToAlpha[(byte0 << 4) & 0x3f]); + result.append("=="); + } else { + // assert numBytesInPartialGroup == 2; + int byte1 = a[inCursor++] & 0xff; + result.append(intToAlpha[(byte0 << 4) & 0x3f | (byte1 >> 4)]); + result.append(intToAlpha[(byte1 << 2) & 0x3f]); + result.append('='); + } + } + // assert inCursor == a.length; + // assert result.length() == resultLen; + return result.toString(); + } + + /** + * This array is a lookup table that translates 6-bit positive integer index values into their "Base64 Alphabet" + * equivalents as specified in Table 1 of RFC 2045. + */ + private static final char intToBase64[] = { 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', + 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', + 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', + '3', '4', '5', '6', '7', '8', '9', '+', '/' }; + + /** + * This array is a lookup table that translates 6-bit positive integer index values into their + * "Alternate Base64 Alphabet" equivalents. This is NOT the real Base64 Alphabet as per in Table 1 of RFC 2045. This + * alternate alphabet does not use the capital letters. It is designed for use in environments where "case folding" + * occurs. + */ + private static final char intToAltBase64[] = { '!', '"', '#', '$', '%', '&', '\'', '(', ')', ',', '-', '.', ':', + ';', '<', '>', '@', '[', ']', '^', '`', '_', '{', '|', '}', '~', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', + 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', + '3', '4', '5', '6', '7', '8', '9', '+', '?' }; + + /** + * Translates the specified Base64 string (as per Preferences.get(byte[])) into a byte array. + * + * @throw IllegalArgumentException if s is not a valid Base64 string. + */ + public static byte[] base64ToByteArray(String s) { + return base64ToByteArray(s, false); + } + + /** + * Translates the specified "alternate representation" Base64 string into a byte array. + * + * @throw IllegalArgumentException or ArrayOutOfBoundsException if s is not a valid alternate + * representation Base64 string. + */ + public static byte[] altBase64ToByteArray(String s) { + return base64ToByteArray(s, true); + } + + private static byte[] base64ToByteArray(String s, boolean alternate) { + byte[] alphaToInt = (alternate ? altBase64ToInt : base64ToInt); + int sLen = s.length(); + int numGroups = sLen / 4; + if (4 * numGroups != sLen) { + throw new IllegalArgumentException("String length must be a multiple of four."); + } + int missingBytesInLastGroup = 0; + int numFullGroups = numGroups; + if (sLen != 0) { + if (s.charAt(sLen - 1) == '=') { + missingBytesInLastGroup++; + numFullGroups--; + } + if (s.charAt(sLen - 2) == '=') { + missingBytesInLastGroup++; + } + } + byte[] result = new byte[3 * numGroups - missingBytesInLastGroup]; + + // Translate all full groups from base64 to byte array elements + int inCursor = 0, outCursor = 0; + for (int i = 0; i < numFullGroups; i++) { + int ch0 = base64toInt(s.charAt(inCursor++), alphaToInt); + int ch1 = base64toInt(s.charAt(inCursor++), alphaToInt); + int ch2 = base64toInt(s.charAt(inCursor++), alphaToInt); + int ch3 = base64toInt(s.charAt(inCursor++), alphaToInt); + result[outCursor++] = (byte) ((ch0 << 2) | (ch1 >> 4)); + result[outCursor++] = (byte) ((ch1 << 4) | (ch2 >> 2)); + result[outCursor++] = (byte) ((ch2 << 6) | ch3); + } + + // Translate partial group, if present + if (missingBytesInLastGroup != 0) { + int ch0 = base64toInt(s.charAt(inCursor++), alphaToInt); + int ch1 = base64toInt(s.charAt(inCursor++), alphaToInt); + result[outCursor++] = (byte) ((ch0 << 2) | (ch1 >> 4)); + + if (missingBytesInLastGroup == 1) { + int ch2 = base64toInt(s.charAt(inCursor++), alphaToInt); + result[outCursor++] = (byte) ((ch1 << 4) | (ch2 >> 2)); + } + } + // assert inCursor == s.length()-missingBytesInLastGroup; + // assert outCursor == result.length; + return result; + } + + /** + * Translates the specified character, which is assumed to be in the "Base 64 Alphabet" into its equivalent 6-bit + * positive integer. + * + * @throw IllegalArgumentException or ArrayOutOfBoundsException if c is not in the Base64 Alphabet. + */ + private static int base64toInt(char c, byte[] alphaToInt) { + int result = alphaToInt[c]; + if (result < 0) { + throw new IllegalArgumentException("Illegal character " + c); + } + return result; + } + + /** + * This array is a lookup table that translates unicode characters drawn from the "Base64 Alphabet" (as specified in + * Table 1 of RFC 2045) into their 6-bit positive integer equivalents. Characters that are not in the Base64 + * alphabet but fall within the bounds of the array are translated to -1. + */ + private static final byte base64ToInt[] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 62, + -1, -1, -1, 63, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, -1, -1, -1, -1, -1, -1, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51 }; + + /** + * This array is the analogue of base64ToInt, but for the nonstandard variant that avoids the use of uppercase + * alphabetic characters. + */ + private static final byte altBase64ToInt[] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, 62, 9, 10, + 11, -1, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 12, 13, 14, -1, 15, 63, 16, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 17, -1, 18, 19, 21, 20, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 22, 23, 24, 25 }; + + } + +} diff --git a/src/main/java/io/mycat/util/ExecutorUtil.java b/src/main/java/io/mycat/util/ExecutorUtil.java new file mode 100644 index 000000000..4ba1a7a0e --- /dev/null +++ b/src/main/java/io/mycat/util/ExecutorUtil.java @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.util; + +import java.util.concurrent.LinkedTransferQueue; + +/** + * @author mycat + */ +public class ExecutorUtil { + + public static final NameableExecutor create(String name, int size) { + return create(name, size, true); + } + + private static final NameableExecutor create(String name, int size, boolean isDaemon) { + NameableThreadFactory factory = new NameableThreadFactory(name, isDaemon); + return new NameableExecutor(name, size, new LinkedTransferQueue(), factory); + } + + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/util/FastByteOperations.java b/src/main/java/io/mycat/util/FastByteOperations.java index a469ecea9..beec26205 100644 --- a/src/main/java/io/mycat/util/FastByteOperations.java +++ b/src/main/java/io/mycat/util/FastByteOperations.java @@ -106,8 +106,9 @@ static ByteOperations getBest() String arch = System.getProperty("os.arch"); boolean unaligned = arch.equals("i386") || arch.equals("x86") || arch.equals("amd64") || arch.equals("x86_64"); - if (!unaligned) + if (!unaligned) { return new PureJavaOperations(); + } try { Class theClass = Class.forName(UNSAFE_COMPARER_NAME); @@ -220,10 +221,12 @@ public int compare(ByteBuffer buffer1, ByteBuffer buffer2) public void copy(ByteBuffer src, int srcPosition, byte[] trg, int trgPosition, int length) { - if (src.hasArray()) + if (src.hasArray()) { System.arraycopy(src.array(), src.arrayOffset() + srcPosition, trg, trgPosition, length); - else + } + else { copy(null, srcPosition + theUnsafe.getLong(src, DIRECT_BUFFER_ADDRESS_OFFSET), trg, trgPosition, length); + } } public void copy(ByteBuffer srcBuf, int srcPosition, ByteBuffer trgBuf, int trgPosition, int length) @@ -245,18 +248,21 @@ public void copy(ByteBuffer srcBuf, int srcPosition, ByteBuffer trgBuf, int trgP public static void copy(Object src, long srcOffset, ByteBuffer trgBuf, int trgPosition, int length) { - if (trgBuf.hasArray()) + if (trgBuf.hasArray()) { copy(src, srcOffset, trgBuf.array(), trgBuf.arrayOffset() + trgPosition, length); - else + } + else { copy(src, srcOffset, null, trgPosition + theUnsafe.getLong(trgBuf, DIRECT_BUFFER_ADDRESS_OFFSET), length); + } } public static void copy(Object src, long srcOffset, byte[] trg, int trgPosition, int length) { if (length <= MIN_COPY_THRESHOLD) { - for (int i = 0 ; i < length ; i++) + for (int i = 0 ; i < length ; i++) { trg[trgPosition + i] = theUnsafe.getByte(src, srcOffset + i); + } } else { @@ -355,8 +361,9 @@ public static int compareTo(Object buffer1, long memoryOffset1, int length1, if (lw != rw) { - if (BIG_ENDIAN) + if (BIG_ENDIAN) { return UnsignedLongs.compare(lw, rw); + } return UnsignedLongs.compare(Long.reverseBytes(lw), Long.reverseBytes(rw)); } @@ -366,8 +373,9 @@ public static int compareTo(Object buffer1, long memoryOffset1, int length1, { int b1 = theUnsafe.getByte(buffer1, memoryOffset1 + i) & 0xFF; int b2 = theUnsafe.getByte(buffer2, memoryOffset2 + i) & 0xFF; - if (b1 != b2) + if (b1 != b2) { return b1 - b2; + } } return length1 - length2; @@ -383,8 +391,9 @@ public int compare(byte[] buffer1, int offset1, int length1, byte[] buffer2, int offset2, int length2) { // Short circuit equal case - if (buffer1 == buffer2 && offset1 == offset2 && length1 == length2) + if (buffer1 == buffer2 && offset1 == offset2 && length1 == length2) { return 0; + } int end1 = offset1 + length1; int end2 = offset2 + length2; @@ -402,9 +411,10 @@ public int compare(byte[] buffer1, int offset1, int length1, public int compare(ByteBuffer buffer1, byte[] buffer2, int offset2, int length2) { - if (buffer1.hasArray()) + if (buffer1.hasArray()) { return compare(buffer1.array(), buffer1.arrayOffset() + buffer1.position(), buffer1.remaining(), - buffer2, offset2, length2); + buffer2, offset2, length2); + } return compare(buffer1, ByteBuffer.wrap(buffer2, offset2, length2)); } diff --git a/src/main/java/io/mycat/util/HexFormatUtil.java b/src/main/java/io/mycat/util/HexFormatUtil.java index 3f296bee6..82999135d 100644 --- a/src/main/java/io/mycat/util/HexFormatUtil.java +++ b/src/main/java/io/mycat/util/HexFormatUtil.java @@ -27,6 +27,18 @@ * @author mycat */ public final class HexFormatUtil { + + private final static char[] hexArray = "0123456789ABCDEF".toCharArray(); + + public static String bytesToHexString(byte[] bytes) { + char[] hexChars = new char[bytes.length * 2]; + for ( int j = 0; j < bytes.length; j++ ) { + int v = bytes[j] & 0xFF; + hexChars[j * 2] = hexArray[v >>> 4]; + hexChars[j * 2 + 1] = hexArray[v & 0x0F]; + } + return new String(hexChars); + } public static byte[] fromHex(String src) { String[] hex = src.split(" "); diff --git a/src/main/java/io/mycat/util/IntegerUtil.java b/src/main/java/io/mycat/util/IntegerUtil.java index 5b0392f96..e69e46b5c 100644 --- a/src/main/java/io/mycat/util/IntegerUtil.java +++ b/src/main/java/io/mycat/util/IntegerUtil.java @@ -44,8 +44,9 @@ public final class IntegerUtil { 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z' }; public static byte[] toBytes(int i) { - if (i == Integer.MIN_VALUE) + if (i == Integer.MIN_VALUE) { return minValue; + } int size = (i < 0) ? stringSize(-i) + 1 : stringSize(i); byte[] buf = new byte[size]; getBytes(i, size, buf); @@ -54,8 +55,9 @@ public static byte[] toBytes(int i) { static int stringSize(int x) { for (int i = 0;; i++) { - if (x <= sizeTable[i]) + if (x <= sizeTable[i]) { return i + 1; + } } } @@ -86,8 +88,9 @@ static void getBytes(int i, int index, byte[] buf) { r = i - ((q << 3) + (q << 1)); // r = i-(q*10) ... buf[--charPos] = digits[r]; i = q; - if (i == 0) + if (i == 0) { break; + } } if (sign != 0) { buf[--charPos] = sign; diff --git a/src/main/java/io/mycat/util/LongUtil.java b/src/main/java/io/mycat/util/LongUtil.java index 498a82eb7..b4020e2b6 100644 --- a/src/main/java/io/mycat/util/LongUtil.java +++ b/src/main/java/io/mycat/util/LongUtil.java @@ -31,8 +31,9 @@ public final class LongUtil { private static final byte[] minValue = "-9223372036854775808".getBytes(); public static byte[] toBytes(long i) { - if (i == Long.MIN_VALUE) + if (i == Long.MIN_VALUE) { return minValue; + } int size = (i < 0) ? stringSize(-i) + 1 : stringSize(i); byte[] buf = new byte[size]; getBytes(i, size, buf); @@ -42,8 +43,9 @@ public static byte[] toBytes(long i) { static int stringSize(long x) { long p = 10; for (int i = 1; i < 19; i++) { - if (x < p) + if (x < p) { return i; + } p = 10 * p; } return 19; @@ -89,8 +91,9 @@ static void getBytes(long i, int index, byte[] buf) { r = i2 - ((q2 << 3) + (q2 << 1)); // r = i2-(q2*10) ... buf[--charPos] = IntegerUtil.digits[r]; i2 = q2; - if (i2 == 0) + if (i2 == 0) { break; + } } if (sign != 0) { buf[--charPos] = sign; diff --git a/src/main/java/io/mycat/util/MysqlDefs.java b/src/main/java/io/mycat/util/MysqlDefs.java index c87e61ad7..1bca80439 100644 --- a/src/main/java/io/mycat/util/MysqlDefs.java +++ b/src/main/java/io/mycat/util/MysqlDefs.java @@ -591,6 +591,20 @@ public static String typeToName(int mysqlType) { } } + public static boolean isBianry(byte mysqlType) { + int type = mysqlType; + if(type < 0) { + type += 256; + } + + if(type == MysqlDefs.FIELD_TYPE_BLOB || type == MysqlDefs.FIELD_TYPE_TINY_BLOB || + type == MysqlDefs.FIELD_TYPE_MEDIUM_BLOB || type == MysqlDefs.FIELD_TYPE_LONG_BLOB) { + return true; + } + + return false; + } + private static Map mysqlToJdbcTypesMap = new HashMap(); static { diff --git a/src/main/java/io/mycat/util/NameableExecutor.java b/src/main/java/io/mycat/util/NameableExecutor.java new file mode 100644 index 000000000..80291e4d9 --- /dev/null +++ b/src/main/java/io/mycat/util/NameableExecutor.java @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.util; + +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +/** + * @author mycat + */ +public class NameableExecutor extends ThreadPoolExecutor { + + protected String name; + + public NameableExecutor(String name, int size, BlockingQueue queue, ThreadFactory factory) { + super(size, size, Long.MAX_VALUE, TimeUnit.NANOSECONDS, queue, factory); + this.name = name; + } + + public String getName() { + return name; + } + +} \ No newline at end of file diff --git a/src/main/java/io/mycat/net/NameableThreadFactory.java b/src/main/java/io/mycat/util/NameableThreadFactory.java similarity index 97% rename from src/main/java/io/mycat/net/NameableThreadFactory.java rename to src/main/java/io/mycat/util/NameableThreadFactory.java index 9f7395b81..0bd17531e 100644 --- a/src/main/java/io/mycat/net/NameableThreadFactory.java +++ b/src/main/java/io/mycat/util/NameableThreadFactory.java @@ -1,4 +1,4 @@ -package io.mycat.net; +package io.mycat.util; import java.util.concurrent.ThreadFactory; import java.util.concurrent.atomic.AtomicInteger; diff --git a/src/main/java/io/mycat/util/ObjectUtil.java b/src/main/java/io/mycat/util/ObjectUtil.java index 38bf1b4bd..fa21f95bd 100644 --- a/src/main/java/io/mycat/util/ObjectUtil.java +++ b/src/main/java/io/mycat/util/ObjectUtil.java @@ -32,6 +32,7 @@ import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; +import java.lang.reflect.Field; import java.lang.reflect.InvocationTargetException; import java.util.Arrays; import java.util.List; @@ -44,6 +45,31 @@ */ public final class ObjectUtil { private static final Logger LOGGER = LoggerFactory.getLogger(ObjectUtil.class); + + + public static Object getStaticFieldValue(String className,String fieldName) + { + Class clazz = null; + try + { + clazz = Class.forName(className); + Field field = clazz.getField(fieldName); + if(field!=null) { + return field.get(null); + } + } catch (ClassNotFoundException e) + { + //LOGGER.error("getStaticFieldValue", e); + } catch (NoSuchFieldException e) + { + // LOGGER.error("getStaticFieldValue", e); + } catch (IllegalAccessException e) + { + // LOGGER.error("getStaticFieldValue", e); + } + return null; + } + public static Object copyObject(Object object) { ByteArrayOutputStream b = new ByteArrayOutputStream(); @@ -54,11 +80,11 @@ public static Object copyObject(Object object) { ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(b.toByteArray())); return ois.readObject(); } catch (IOException e) { - LOGGER.error("copyObjectIOError", e); + throw new RuntimeException(e); } catch (ClassNotFoundException e) { - LOGGER.error("copyObjectError", e); + throw new RuntimeException(e); } - return null; + } /** @@ -250,8 +276,8 @@ public static void copyProperties(Object fromObj, Object toObj) { .indexOf(propertyDescriptor)); if (pd.getDisplayName().equals( propertyDescriptor.getDisplayName()) - && !pd.getDisplayName().equals("class")) { - if(propertyDescriptor.getWriteMethod() != null) + && !pd.getDisplayName().equals("class") + && propertyDescriptor.getWriteMethod() != null) { propertyDescriptor.getWriteMethod().invoke(toObj, pd.getReadMethod().invoke(fromObj, null)); } @@ -266,4 +292,4 @@ public static void copyProperties(Object fromObj, Object toObj) { throw new RuntimeException(e); } } -} \ No newline at end of file +} diff --git a/src/main/java/io/mycat/util/ProcessUtil.java b/src/main/java/io/mycat/util/ProcessUtil.java new file mode 100644 index 000000000..49c6eb951 --- /dev/null +++ b/src/main/java/io/mycat/util/ProcessUtil.java @@ -0,0 +1,107 @@ +package io.mycat.util; + +import io.mycat.migrate.MigrateUtils; +import io.mycat.util.dataMigrator.DataMigratorUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.util.Arrays; +import java.util.List; + +public class ProcessUtil +{ + private static Logger LOGGER = LoggerFactory.getLogger((ProcessUtil.class)); + + + + public static int exec(String cmd) { + Process process = null; + try { + Runtime runtime = Runtime.getRuntime(); + process = runtime.exec(cmd); + new StreamGobble(process.getInputStream(), "INFO").start(); + new StreamGobble(process.getErrorStream(), "ERROR").start(); + return process.waitFor(); + } catch (Throwable t) { + LOGGER.error(t.getMessage()); + } finally { + if (process != null) + process.destroy(); + + } + return 0; + } + public static String execReturnString(List cmd) { + Process process = null; + try { + // Runtime runtime = Runtime.getRuntime(); + // process = runtime.exec(cmd); + ProcessBuilder pb = new ProcessBuilder(cmd); + pb.redirectErrorStream(true); + process=pb.start(); + StreamGobble inputGobble = new StreamGobble(process.getInputStream(), "INFO"); + inputGobble.start(); + new StreamGobble(process.getErrorStream(), "ERROR").start(); + process.waitFor(); + return inputGobble.getResult(); + } catch (Throwable t) { + LOGGER.error(t.getMessage()); + } finally { + if (process != null) + process.destroy(); + + } + return null; + } + public static String execReturnString(String cmd) { + Process process = null; + try { + Runtime runtime = Runtime.getRuntime(); + process = runtime.exec(cmd); + StreamGobble inputGobble = new StreamGobble(process.getInputStream(), "INFO"); + inputGobble.start(); + new StreamGobble(process.getErrorStream(), "ERROR").start(); + process.waitFor(); + return inputGobble.getResult(); + } catch (Throwable t) { + LOGGER.error(t.getMessage()); + } finally { + if (process != null) + process.destroy(); + + } + return null; + } + public static int exec(String cmd,File dir) { + Process process = null; + try { + Runtime runtime = Runtime.getRuntime(); + process = runtime.exec(cmd,null,dir); + new StreamGobble(process.getInputStream(), "INFO").start(); + new StreamGobble(process.getErrorStream(), "ERROR").start(); + return process.waitFor(); + + } catch (Throwable t) { + LOGGER.error(t.getMessage()); + } finally { + if (process != null) + process.destroy(); + + } + return 0; + } + + public static void main(String[] args) { + + +// List argss= Arrays.asList("mysqldump", "-h127.0.0.1", "-P3301", "-uczn", +// "-p123", "base1","test", "--single-transaction","-q","--default-character-set=utf8mb4","--hex-blob","--where=(_slot>=100 and _slot<=1000) or (_slot>=2000 and _slot <=100000)", "--master-data=1","-Tc:\\999" +// ,"--fields-enclosed-by=\\\"","--fields-terminated-by=,", "--lines-terminated-by=\\n", "--fields-escaped-by=\\\\"); +// String result= ProcessUtil.execReturnString(argss); +// System.out.println(result); + + } + + +} diff --git a/src/main/java/io/mycat/util/RandomUtil.java b/src/main/java/io/mycat/util/RandomUtil.java index 34db91b90..33027362d 100644 --- a/src/main/java/io/mycat/util/RandomUtil.java +++ b/src/main/java/io/mycat/util/RandomUtil.java @@ -68,4 +68,36 @@ private static long next() { return nextSeed; } + /** + * 随机指定范围内N个不重复的数 + * 最简单最基本的方法 + * @param min 指定范围最小值(包含) + * @param max 指定范围最大值(不包含) + * @param n 随机数个数 + */ + public static int[] getNRandom(int min, int max, int n){ + if (n > (max - min + 1) || max < min) { + return null; + } + int[] result = new int[n]; + for(int i = 0 ; i < n ; i++){ + result[i] = -9999; + } + int count = 0; + while(count < n) { + int num = (int) ((Math.random() * (max - min)) + min); + boolean flag = true; + for (int j = 0; j < n; j++) { + if(num == result[j]){ + flag = false; + break; + } + } + if(flag){ + result[count] = num; + count++; + } + } + return result; + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/util/ResultSetUtil.java b/src/main/java/io/mycat/util/ResultSetUtil.java index e04bbdb5b..bd03e1641 100644 --- a/src/main/java/io/mycat/util/ResultSetUtil.java +++ b/src/main/java/io/mycat/util/ResultSetUtil.java @@ -1,13 +1,14 @@ package io.mycat.util; -import io.mycat.server.packet.FieldPacket; -import io.mycat.server.packet.RowDataPacket; - +import java.sql.Connection; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.util.List; +import io.mycat.net.mysql.FieldPacket; +import io.mycat.net.mysql.RowDataPacket; + /** * * @author struct @@ -20,15 +21,15 @@ public static int toFlag(ResultSetMetaData metaData, int column) int flags = 0; if (metaData.isNullable(column) == 1) { - flags |= 0001; + flags |= 1; } if (metaData.isSigned(column)) { - flags |= 0020; + flags |= 16; } if (metaData.isAutoIncrement(column)) { - flags |= 0200; + flags |= 128; } return flags; @@ -58,6 +59,10 @@ public static void resultSetToFieldPacket(String charset, int javaType = MysqlDefs.javaTypeDetect( metaData.getColumnType(j), fieldPacket.decimals); fieldPacket.type = (byte) (MysqlDefs.javaTypeMysql(javaType) & 0xff); + if(MysqlDefs.isBianry((byte) fieldPacket.type)) { + // 63 represent binary character set + fieldPacket.charsetIndex = 63; + } fieldPks.add(fieldPacket); //values+=metaData.getColumnLabel(j)+"|"+metaData.getColumnName(j)+" "; } @@ -79,7 +84,8 @@ public static String getColumnValAsString(byte[] row, RowDataPacket rowDataPkg = new RowDataPacket(fieldValues.size()); rowDataPkg.read(row); byte[] columnData = rowDataPkg.fieldValues.get(columnIndex); - return new String(columnData); + //columnData 为空时,直接返回null + return columnData==null?null:new String(columnData); } public static byte[] getColumnVal(byte[] row, List fieldValues, diff --git a/src/main/java/io/mycat/util/SelectorUtil.java b/src/main/java/io/mycat/util/SelectorUtil.java new file mode 100644 index 000000000..73d6cd4d9 --- /dev/null +++ b/src/main/java/io/mycat/util/SelectorUtil.java @@ -0,0 +1,58 @@ +package io.mycat.util; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.channels.SelectableChannel; +import java.nio.channels.SelectionKey; +import java.nio.channels.Selector; +import java.util.ConcurrentModificationException; + +/** + * Selector工具类 + * Created by Hash Zhang on 2017/7/24. + */ +public class SelectorUtil { + private static final Logger logger = LoggerFactory.getLogger(SelectorUtil.class); + + public static final int REBUILD_COUNT_THRESHOLD = 512; + + public static final long MIN_SELECT_TIME_IN_NANO_SECONDS = 500000L; + + public static Selector rebuildSelector(final Selector oldSelector) throws IOException { + final Selector newSelector; + try { + newSelector = Selector.open(); + } catch (Exception e) { + logger.warn("Failed to create a new Selector.", e); + return null; + } + + int nChannels = 0; + for (;;) { + try { + for (SelectionKey key: oldSelector.keys()) { + Object a = key.attachment(); + try { + if (!key.isValid() || key.channel().keyFor(newSelector) != null) { + continue; + } + int interestOps = key.interestOps(); + key.cancel(); + key.channel().register(newSelector, interestOps, a); + nChannels ++; + } catch (Exception e) { + logger.warn("Failed to re-register a Channel to the new Selector.", e); + } + } + } catch (ConcurrentModificationException e) { + // Probably due to concurrent modification of the key set. + continue; + } + break; + } + oldSelector.close(); + return newSelector; + } +} diff --git a/src/main/java/io/mycat/util/SetIgnoreUtil.java b/src/main/java/io/mycat/util/SetIgnoreUtil.java new file mode 100644 index 000000000..4f83f4c44 --- /dev/null +++ b/src/main/java/io/mycat/util/SetIgnoreUtil.java @@ -0,0 +1,48 @@ +package io.mycat.util; + +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * 忽略部分SET 指令 + * + * 实际使用中PHP用户经常会操作多个SET指令组成一个Stmt , 所以该指令检测功能独立出来 + * + * @author zhuam + * + */ +public class SetIgnoreUtil { + + private static List ptrnIgnoreList = new ArrayList(); + + static { + + //TODO: 忽略部分 SET 指令, 避免WARN 不断的刷日志 + String[] ignores = new String[] { + "(?i)set (sql_mode)", + "(?i)set (interactive_timeout|wait_timeout|net_read_timeout|net_write_timeout|lock_wait_timeout|slave_net_timeout)", + "(?i)set (connect_timeout|delayed_insert_timeout|innodb_lock_wait_timeout|innodb_rollback_on_timeout)", + "(?i)set (profiling|profiling_history_size)" + }; + + for (int i = 0; i < ignores.length; ++i) { + ptrnIgnoreList.add(Pattern.compile(ignores[i])); + } + } + + public static boolean isIgnoreStmt(String stmt) { + boolean ignore = false; + Matcher matcherIgnore; + for (Pattern ptrnIgnore : ptrnIgnoreList) { + matcherIgnore = ptrnIgnore.matcher( stmt ); + if (matcherIgnore.find()) { + ignore = true; + break; + } + } + return ignore; + } + +} diff --git a/src/main/java/io/mycat/util/SmallSet.java b/src/main/java/io/mycat/util/SmallSet.java index a2970a268..4dcabe778 100644 --- a/src/main/java/io/mycat/util/SmallSet.java +++ b/src/main/java/io/mycat/util/SmallSet.java @@ -60,8 +60,9 @@ public boolean add(E e) { single = e; return true; case 1: - if (isEquals(e, single)) + if (isEquals(e, single)) { return false; + } list = new ArrayList(initSize); list.add(single); list.add(e); @@ -70,8 +71,9 @@ public boolean add(E e) { default: for (int i = 0; i < list.size(); ++i) { E e1 = list.get(i); - if (isEquals(e1, e)) + if (isEquals(e1, e)) { return false; + } } list.add(e); ++size; @@ -80,8 +82,9 @@ public boolean add(E e) { } private boolean isEquals(E e1, E e2) { - if (e1 == null) + if (e1 == null) { return e2 == null; + } return e1.equals(e2); } @@ -128,21 +131,24 @@ public E next() { @Override public void remove() { - if (!next) + if (!next) { throw new IllegalStateException(); + } switch (size) { case 0: throw new IllegalStateException(); case 1: size = i = 0; single = null; - if (list != null && !list.isEmpty()) + if (list != null && !list.isEmpty()) { list.remove(0); + } break; default: list.remove(--i); - if (--size == 1) + if (--size == 1) { single = list.get(0); + } break; } next = false; diff --git a/src/main/java/io/mycat/util/SplitUtil.java b/src/main/java/io/mycat/util/SplitUtil.java index c454fe67c..22865dd74 100644 --- a/src/main/java/io/mycat/util/SplitUtil.java +++ b/src/main/java/io/mycat/util/SplitUtil.java @@ -274,8 +274,9 @@ public static String[] splitByByteSize(String string, int size) { return new String[]{string}; } byte[] bytes = string.getBytes(); - if (bytes.length <= size) - return new String[] { string }; + if (bytes.length <= size) { + return new String[]{string}; + } // 分成的条数不确定(整除的情况下也许会多出一条),所以先用list再转化为array List list = new ArrayList(); int offset = 0;// 偏移量,也就是截取的字符串的首字节的位置 @@ -290,19 +291,21 @@ public static String[] splitByByteSize(String string, int size) { break; } if (bytes[position - 1] > 0 - || (bytes[position - 1] < 0 && bytes[position - 2] < 0)) + || (bytes[position - 1] < 0 && bytes[position - 2] < 0)){ // 截断点是字母,或者是汉字 length = size; - else + } else { // 截断点在汉字中间 length = size - 1; + } String s = new String(bytes, offset, length); list.add(s); offset += length; } String[] array = new String[list.size()]; - for (int i = 0; i < array.length; i++) + for (int i = 0; i < array.length; i++) { array[i] = (String) list.get(i); + } return array; } diff --git a/src/main/java/io/mycat/util/StreamGobble.java b/src/main/java/io/mycat/util/StreamGobble.java new file mode 100644 index 000000000..86181eaff --- /dev/null +++ b/src/main/java/io/mycat/util/StreamGobble.java @@ -0,0 +1,43 @@ +package io.mycat.util; + + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; + +public class StreamGobble extends Thread { + InputStream is; + String type; + private StringBuffer result=new StringBuffer(); + + public String getResult() { + return result.toString(); + } + + private static Logger LOG = LoggerFactory.getLogger((StreamGobble.class)); + + StreamGobble(InputStream is, String type) { + this.is = is; + this.type = type; + } + + public void run() { + try { + InputStreamReader isr = new InputStreamReader(is); + BufferedReader br = new BufferedReader(isr); + String line = null; + while ((line = br.readLine()) != null) { + result.append(line).append("\n"); + LOG.info(line); + } + } catch (IOException ioe) { + LOG.error(ioe.getMessage()); + } + } +} + + diff --git a/src/main/java/io/mycat/util/StringUtil.java b/src/main/java/io/mycat/util/StringUtil.java index 9a7ccdb14..9968bbb81 100644 --- a/src/main/java/io/mycat/util/StringUtil.java +++ b/src/main/java/io/mycat/util/StringUtil.java @@ -23,19 +23,23 @@ */ package io.mycat.util; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import io.mycat.sqlengine.mpp.LoadData; import java.io.ByteArrayOutputStream; import java.io.UnsupportedEncodingException; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.util.Random; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * @author mycat */ public class StringUtil { + public static final String TABLE_COLUMN_SEPARATOR = "."; + private static final Logger LOGGER = LoggerFactory.getLogger(StringUtil.class); private static final byte[] EMPTY_BYTE_ARRAY = new byte[0]; private static final Random RANDOM = new Random(); @@ -50,7 +54,7 @@ public class StringUtil { * 字符串hash算法:s[0]*31^(n-1) + s[1]*31^(n-2) + ... + s[n-1]
* 其中s[]为字符串的字符数组,换算成程序的表达式为:
* h = 31*h + s.charAt(i); => h = (h << 5) - h + s.charAt(i);
- * + * * @param start * hash for s.substring(start, end) * @param end @@ -119,10 +123,12 @@ public static boolean isEmpty(String str) { public static byte[] hexString2Bytes(char[] hexString, int offset, int length) { - if (hexString == null) + if (hexString == null) { return null; - if (length == 0) + } + if (length == 0) { return EMPTY_BYTE_ARRAY; + } boolean odd = length << 31 == Integer.MIN_VALUE; byte[] bs = new byte[odd ? (length + 1) >> 1 : length >> 1]; for (int i = offset, limit = offset + length; i < limit; ++i) { @@ -265,8 +271,9 @@ public static String dumpAsHex(byte[] src, int length) { int ptemp = p; for (int j = 0; j < 8; j++) { String hexVal = Integer.toHexString(src[ptemp] & 0xff); - if (hexVal.length() == 1) + if (hexVal.length() == 1) { out.append('0'); + } out.append(hexVal).append(' '); ptemp++; } @@ -285,8 +292,9 @@ public static String dumpAsHex(byte[] src, int length) { int n = 0; for (int i = p; i < length; i++) { String hexVal = Integer.toHexString(src[i] & 0xff); - if (hexVal.length() == 1) + if (hexVal.length() == 1) { out.append('0'); + } out.append(hexVal).append(' '); n++; } @@ -308,8 +316,9 @@ public static String dumpAsHex(byte[] src, int length) { public static byte[] escapeEasternUnicodeByteStream(byte[] src, String srcString, int offset, int length) { - if ((src == null) || (src.length == 0)) + if ((src == null) || (src.length == 0)) { return src; + } int bytesLen = src.length; int bufIndex = 0; int strIndex = 0; @@ -319,37 +328,41 @@ public static byte[] escapeEasternUnicodeByteStream(byte[] src, out.write(src[bufIndex++]); } else {// Grab the first byte int loByte = src[bufIndex]; - if (loByte < 0) + if (loByte < 0) { loByte += 256; // adjust for signedness/wrap-around + } out.write(loByte);// We always write the first byte if (loByte >= 0x80) { if (bufIndex < (bytesLen - 1)) { int hiByte = src[bufIndex + 1]; - if (hiByte < 0) + if (hiByte < 0) { hiByte += 256; // adjust for signedness/wrap-around + } out.write(hiByte);// write the high byte here, and // increment the index for the high // byte bufIndex++; - if (hiByte == 0x5C) + if (hiByte == 0x5C) { out.write(hiByte);// escape 0x5c if necessary + } } - } else if (loByte == 0x5c) { - if (bufIndex < (bytesLen - 1)) { + } else if (loByte == 0x5c + && bufIndex < (bytesLen - 1)) { int hiByte = src[bufIndex + 1]; - if (hiByte < 0) + if (hiByte < 0) { hiByte += 256; // adjust for signedness/wrap-around + } if (hiByte == 0x62) {// we need to escape the 0x5c out.write(0x5c); out.write(0x62); bufIndex++; } - } } bufIndex++; } - if (bufIndex >= bytesLen) + if (bufIndex >= bytesLen) { break;// we're done + } strIndex++; } return out.toByteArray(); @@ -374,8 +387,9 @@ public static boolean equalsIgnoreCase(String str1, String str2) { } public static int countChar(String str, char c) { - if (str == null || str.isEmpty()) + if (str == null || str.isEmpty()) { return 0; + } final int len = str.length(); int cnt = 0; for (int i = 0; i < len; ++i) { @@ -459,8 +473,8 @@ public static String replaceChars(String str, String searchChars, /** * insert into tablexxx - * - * @param sql + * + * @param oriSql * @return */ public static String getTableName(String oriSql) { @@ -531,7 +545,7 @@ public static String getTableName(String oriSql) { } return sql.substring(tableStartIndx, tableEndIndex); } - + /** * 移除`符号 * @param str @@ -552,6 +566,35 @@ public static String removeBackquote(String str){ return ""; } + public static String makeString(Object... args) { + StringBuilder stringBuilder = new StringBuilder(); + for (Object arg : args) { + stringBuilder.append(arg); + } + return stringBuilder.toString(); + } + + public static boolean isNull(String src) { + if (src == null || src.trim().equals("") || src.trim().equalsIgnoreCase("undefined")) { + return true; + } + return false; + } + + public static String sha1(String data) throws NoSuchAlgorithmException { + MessageDigest md = MessageDigest.getInstance("SHA1"); + md.update(data.getBytes()); + StringBuffer buf = new StringBuffer(); + byte[] bits = md.digest(); + for (int i = 0; i < bits.length; i++) { + int a = bits[i]; + if (a < 0) a += 256; + if (a < 16) buf.append("0"); + buf.append(Integer.toHexString(a)); + } + return buf.toString(); + } + public static void main(String[] args) { System.out.println(getTableName("insert into ssd (id) values (s)")); System.out.println(getTableName("insert into ssd(id) values (s)")); @@ -559,7 +602,7 @@ public static void main(String[] args) { System.out.println(getTableName(" insert into isd(id) values (s)")); System.out.println(getTableName("INSERT INTO test_activity_input (id,vip_no")); System.out.println(getTableName("/* ApplicationName=DBeaver 3.3.1 - Main connection */ insert into employee(id,name,sharding_id) values(4,’myhome’,10011)")); - - } + System.out.println(countChar("insert into ssd (id) values (s) ,(s),(7);",'(')); + } } \ No newline at end of file diff --git a/src/main/java/io/mycat/util/TimeUtil.java b/src/main/java/io/mycat/util/TimeUtil.java index eb1eaf708..e486dbd25 100644 --- a/src/main/java/io/mycat/util/TimeUtil.java +++ b/src/main/java/io/mycat/util/TimeUtil.java @@ -1,14 +1,42 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ package io.mycat.util; /** * 弱精度的计时器,考虑性能不使用同步策略。 - **/ + * + * @author mycat + */ public class TimeUtil { private static volatile long CURRENT_TIME = System.currentTimeMillis(); public static final long currentTimeMillis() { return CURRENT_TIME; } + public static final long currentTimeNanos() { + return System.nanoTime(); + } public static final void update() { CURRENT_TIME = System.currentTimeMillis(); diff --git a/src/main/java/io/mycat/util/ZKUtils.java b/src/main/java/io/mycat/util/ZKUtils.java new file mode 100644 index 000000000..ddccd63bf --- /dev/null +++ b/src/main/java/io/mycat/util/ZKUtils.java @@ -0,0 +1,103 @@ +package io.mycat.util; + +import io.mycat.MycatServer; +import io.mycat.config.loader.zkprocess.comm.ZkConfig; +import io.mycat.config.loader.zkprocess.comm.ZkParamCfg; +import org.apache.curator.framework.CuratorFramework; +import org.apache.curator.framework.CuratorFrameworkFactory; +import org.apache.curator.framework.recipes.cache.PathChildrenCache; +import org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent; +import org.apache.curator.framework.recipes.cache.PathChildrenCacheListener; +import org.apache.curator.retry.ExponentialBackoffRetry; +import org.apache.curator.retry.RetryForever; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.*; + +public class ZKUtils { + private static final Logger LOGGER = LoggerFactory.getLogger(ZKUtils.class); + static CuratorFramework curatorFramework = null; + static ConcurrentMap watchMap = new ConcurrentHashMap<>(); + + static { + curatorFramework = createConnection(); + Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() { + @Override + public void run() { + if (curatorFramework != null) + curatorFramework.close(); + watchMap.clear(); + } + })); + } + + public static String getZKBasePath() { + String clasterID = ZkConfig.getInstance().getValue(ZkParamCfg.ZK_CFG_CLUSTERID); + + return "/mycat/" + clasterID + "/"; + } + + public static CuratorFramework getConnection() { + return curatorFramework; + } + + private static CuratorFramework createConnection() { + String url = ZkConfig.getInstance().getZkURL(); + + CuratorFramework curatorFramework = CuratorFrameworkFactory.newClient(url, new ExponentialBackoffRetry(100, 6)); + + // start connection + curatorFramework.start(); + // wait 3 second to establish connect + try { + curatorFramework.blockUntilConnected(3, TimeUnit.SECONDS); + if (curatorFramework.getZookeeperClient().isConnected()) { + return curatorFramework; + } + } catch (InterruptedException ignored) { + Thread.currentThread().interrupt(); + } + + // fail situation + curatorFramework.close(); + throw new RuntimeException("failed to connect to zookeeper service : " + url); + } + + public static void closeWatch(List watchs) { + for (String watch : watchs) { + closeWatch(watch); + } + } + + public static void closeWatch(String path) { + PathChildrenCache childrenCache = watchMap.get(path); + if (childrenCache != null) { + try { + childrenCache.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } + + public static void addChildPathCache(String path, PathChildrenCacheListener listener) { + NameableExecutor businessExecutor = MycatServer.getInstance().getBusinessExecutor(); + ExecutorService executor = businessExecutor == null ? Executors.newFixedThreadPool(5) : businessExecutor; + + try { + /** + * 监听子节点的变化情况 + */ + final PathChildrenCache childrenCache = new PathChildrenCache(getConnection(), path, true); + childrenCache.start(PathChildrenCache.StartMode.POST_INITIALIZED_EVENT); + childrenCache.getListenable().addListener(listener, executor); + watchMap.put(path, childrenCache); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + +} diff --git a/src/main/java/io/mycat/util/CmdArgs.java b/src/main/java/io/mycat/util/cmd/CmdArgs.java similarity index 97% rename from src/main/java/io/mycat/util/CmdArgs.java rename to src/main/java/io/mycat/util/cmd/CmdArgs.java index 4365274a6..a391787d0 100644 --- a/src/main/java/io/mycat/util/CmdArgs.java +++ b/src/main/java/io/mycat/util/cmd/CmdArgs.java @@ -1,4 +1,4 @@ -package io.mycat.util; +package io.mycat.util.cmd; import java.util.HashMap; import java.util.Map; diff --git a/src/main/java/io/mycat/util/dataMigrator/ConfigComparer.java b/src/main/java/io/mycat/util/dataMigrator/ConfigComparer.java new file mode 100644 index 000000000..528d925b6 --- /dev/null +++ b/src/main/java/io/mycat/util/dataMigrator/ConfigComparer.java @@ -0,0 +1,261 @@ +package io.mycat.util.dataMigrator; + +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Properties; +import java.util.Set; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.config.loader.SchemaLoader; +import io.mycat.config.loader.xml.XMLSchemaLoader; +import io.mycat.config.model.DBHostConfig; +import io.mycat.config.model.DataHostConfig; +import io.mycat.config.model.DataNodeConfig; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.TableConfig; +import io.mycat.config.model.rule.RuleConfig; +import io.mycat.config.util.ConfigException; +import io.mycat.route.function.AbstractPartitionAlgorithm; + +/** + * 数据迁移新旧配置文件加载、对比 + * @author haonan108 + * + */ +public class ConfigComparer { + + private static final Logger LOGGER = LoggerFactory.getLogger(ConfigComparer.class); + /* + *指定需要进行数据迁移的表及对应schema + * 配置文件格式 + * schema1=tb1,tb2,... + * schema2=all + * ... + */ + private final static String TABLES_FILE = "/migrateTables.properties"; + private final static String NEW_SCHEMA = "/newSchema.xml"; + private final static String NEW_RULE = "/newRule.xml"; + private final static String DN_INDEX_FILE = "/dnindex.properties"; + + private SchemaLoader oldLoader; + private SchemaLoader newLoader; + + private Map oldDataHosts; + private Map oldDataNodes; + private Map oldSchemas; + + private Map newDataHosts; + private Map newDataNodes; + private Map newSchemas; + + //即使发生主备切换也使用主数据源 + private boolean isAwaysUseMaster; + private Properties dnIndexProps; + + //此类主要目的是通过加载新旧配置文件来获取表迁移信息,migratorTables就是最终要获取的迁移信息集合 + private List migratorTables = new ArrayList(); + + public ConfigComparer(boolean isAwaysUseMaster) throws Exception{ + this.isAwaysUseMaster = isAwaysUseMaster; + loadOldConfig(); + loadNewConfig(); + loadTablesFile(); + } + + public List getMigratorTables(){ + return migratorTables; + } + + private void loadOldConfig(){ + try{ + oldLoader = new XMLSchemaLoader(); + oldDataHosts = oldLoader.getDataHosts(); + oldDataNodes = oldLoader.getDataNodes(); + oldSchemas = oldLoader.getSchemas(); + }catch(Exception e){ + throw new ConfigException(" old config for migrate read fail!please check schema.xml or rule.xml "+e); + } + + } + + private void loadNewConfig(){ + try{ + newLoader = new XMLSchemaLoader(NEW_SCHEMA, NEW_RULE); + newDataHosts = newLoader.getDataHosts(); + newDataNodes = newLoader.getDataNodes(); + newSchemas = newLoader.getSchemas(); + }catch(Exception e){ + throw new ConfigException(" new config for migrate read fail!please check newSchema.xml or newRule.xml "+e); + } + + } + + + private void loadTablesFile() throws Exception{ + Properties pro = new Properties(); + if(!isAwaysUseMaster){ + dnIndexProps = loadDnIndexProps(); + } + try{ + pro.load(ConfigComparer.class.getResourceAsStream(TABLES_FILE)); + }catch(Exception e){ + throw new ConfigException("tablesFile.properties read fail!"); + } + Iterator> it = pro.entrySet().iterator(); + while(it.hasNext()){ + Entry entry = it.next(); + String schemaName = entry.getKey().toString(); + String tables = entry.getValue().toString(); + loadMigratorTables(schemaName,getTables(tables)); + } + } + + private String[] getTables(String tables){ + if(tables.equalsIgnoreCase("all") || tables.isEmpty()){ + return new String[]{}; + }else{ + return tables.split(","); + } + } + + /* + * 加载迁移表信息,tables大小为0表示迁移schema下所有表 + */ + private void loadMigratorTables(String schemaName,String[] tables){ + if(!DataMigratorUtil.isKeyExistIgnoreCase(oldSchemas, schemaName)){ + throw new ConfigException("oldSchema:"+schemaName+" is not exists!"); + } + if(!DataMigratorUtil.isKeyExistIgnoreCase(newSchemas,schemaName)){ + throw new ConfigException("newSchema:"+schemaName+" is not exists!"); + } + Map oldTables = DataMigratorUtil.getValueIgnoreCase(oldSchemas, schemaName).getTables(); + Map newTables = DataMigratorUtil.getValueIgnoreCase(newSchemas, schemaName).getTables(); + if(tables.length>0){ + //指定schema下的表进行迁移 + for(int i =0;i oldSet = oldTables.keySet(); + Set newSet = newTables.keySet(); + if(!oldSet.equals(newSet)){ + throw new ConfigException("new & old table config is not equal!"); + } + for(String tableName:oldSet){ + TableConfig oldTable = oldTables.get(tableName); + TableConfig newTable = newTables.get(tableName); + loadMigratorTable(oldTable, newTable,schemaName,tableName); + } + } + + } + + + + private void loadMigratorTable(TableConfig oldTable,TableConfig newTable,String schemaName,String tableName){ + //禁止配置非拆分表 + if(oldTable == null || newTable == null){ + throw new ConfigException("please check tableFile.properties,make sure "+schemaName+":"+tableName+" is sharding table "); + } + //忽略全局表 + if(oldTable.isGlobalTable()||newTable.isGlobalTable()){ + String message = "global table: "+schemaName+":"+tableName+" is ignore!"; + System.out.println("Warn: "+message); + LOGGER.warn(message); + }else{ + List oldDN = getDataNodes(oldTable,oldDataNodes,oldDataHosts); + List newDN = getDataNodes(newTable,newDataNodes,newDataHosts); + //忽略数据节点分布没有发生变化的表 + if(isNeedMigrate(oldDN,newDN)){ + checkRuleConfig(oldTable.getRule(), newTable.getRule(),schemaName,tableName); + RuleConfig newRC=newTable.getRule(); + TableMigrateInfo tmi = new TableMigrateInfo(schemaName, tableName, oldDN, newDN, newRC.getRuleAlgorithm(), newRC.getColumn()); + migratorTables.add(tmi); + }else{ + String message = schemaName+":"+tableName+" is ignore,no need to migrate!"; + LOGGER.warn(message); + System.out.println("Warn: "+message); + } + + } + } + + //对比前后表数据节点分布是否一致 + private boolean isNeedMigrate(List oldDN,List newDN){ + if(oldDN.size() != newDN.size()){ + return true; + } + return false; + } + + //获取拆分表对应节点列表,具体到实例地址、库 + private List getDataNodes(TableConfig tableConfig,Map dnConfig,Map dhConfig){ + List dataNodes = new ArrayList(); + //TO-DO + ArrayList dataNodeNames = tableConfig.getDataNodes(); + int i = 0; + for(String name:dataNodeNames){ + DataNodeConfig config = dnConfig.get(name); + String db = config.getDatabase(); + String dataHost = config.getDataHost(); + DataHostConfig dh = dhConfig.get(dataHost); + String dbType = dh.getDbType(); + DBHostConfig[] writeHosts = dh.getWriteHosts(); + DBHostConfig currentWriteHost; + if(isAwaysUseMaster){ + currentWriteHost = writeHosts[0]; + }else{ + //迁移数据发生在当前切换后的数据源 + currentWriteHost = writeHosts[Integer.valueOf(dnIndexProps.getProperty(dh.getName()))]; + } + DataNode dn = new DataNode(name,currentWriteHost.getIp(), currentWriteHost.getPort(), currentWriteHost.getUser(), currentWriteHost.getPassword(), db, dbType,i++); + dataNodes.add(dn); + } + + return dataNodes; + } + + //校验前后路由规则是否一致 + private void checkRuleConfig(RuleConfig oldRC,RuleConfig newRC,String schemaName,String tableName){ + if(!oldRC.getColumn().equalsIgnoreCase(newRC.getColumn())){ + throw new ConfigException(schemaName+":"+tableName+" old & new partition column is not same!"); + } + AbstractPartitionAlgorithm oldAlg = oldRC.getRuleAlgorithm(); + AbstractPartitionAlgorithm newAlg = newRC.getRuleAlgorithm(); + //判断路由算法前后是否一致 + if(!oldAlg.getClass().isAssignableFrom(newAlg.getClass())){ + throw new ConfigException(schemaName+":"+tableName+" old & new rule Algorithm is not same!"); + } + } + + private Properties loadDnIndexProps() { + Properties prop = new Properties(); + InputStream is = null; + try { + is = ConfigComparer.class.getResourceAsStream(DN_INDEX_FILE); + prop.load(is); + } catch (Exception e) { + throw new ConfigException("please check file \"dnindex.properties\" "+e.getMessage()); + } finally { + try { + if(is !=null){ + is.close(); + } + } catch (IOException e) { + throw new ConfigException(e.getMessage()); + } + } + return prop; + } +} diff --git a/src/main/java/io/mycat/util/dataMigrator/DataClearRunner.java b/src/main/java/io/mycat/util/dataMigrator/DataClearRunner.java new file mode 100644 index 000000000..0d1563c20 --- /dev/null +++ b/src/main/java/io/mycat/util/dataMigrator/DataClearRunner.java @@ -0,0 +1,84 @@ +package io.mycat.util.dataMigrator; + +import java.io.File; +import java.io.IOException; +import java.sql.Connection; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.alibaba.druid.util.JdbcUtils; + +/** + * 清理数据扩容缩容后的冗余数据 + * @author haonan108 + * + */ +public class DataClearRunner implements Runnable{ + + private static final Logger LOGGER = LoggerFactory.getLogger(DataClearRunner.class); + private DataNode srcDn; + private File tempFile; + private TableMigrateInfo tableInfo; + + public DataClearRunner(TableMigrateInfo tableInfo,DataNode srcDn,File tempFile){ + this.tableInfo = tableInfo; + this.srcDn = srcDn; + this.tempFile = tempFile; + } + @Override + public void run() { + String data = ""; + long offset = 0; + Connection con = null; + try { + long start = System.currentTimeMillis(); + con = DataMigratorUtil.getMysqlConnection(srcDn); + if(tableInfo.isExpantion()){ + deleteDataDependFile(data, offset, con); + }else{ + //缩容,移除的节点直接truncate删除数据,非移除的节点按照临时文件的中值进行删除操作 + List list = tableInfo.getRemovedDataNodes(); + boolean isRemovedDn = false; + for(DataNode dn:list){ + if(srcDn.equals(dn)){ + isRemovedDn = true; + } + } + if(isRemovedDn){ + String sql = "truncate "+tableInfo.getTableName(); + JdbcUtils.execute(con, sql, new ArrayList<>()); + }else{ + deleteDataDependFile(data, offset, con); + } + } + long end = System.currentTimeMillis(); + System.out.println(tableInfo.getSchemaAndTableName()+" clean dataNode "+srcDn.getName()+" completed in "+(end-start)+"ms"); + + } catch (Exception e) { + String errMessage = srcDn.toString()+":"+"clean data error!"; + LOGGER.error(errMessage, e); + tableInfo.setError(true); + tableInfo.getErrMessage().append(errMessage+"\n"); + } finally{ + JdbcUtils.close(con); + } + } + + private void deleteDataDependFile(String data,long offset,Connection con) throws IOException, SQLException{ + while((data=DataMigratorUtil.readData(tempFile,offset,DataMigrator.margs.getQueryPageSize())).length()>0){ + offset += data.getBytes().length; + if(data.startsWith(",")){ + data = data.substring(1, data.length()); + } + if(data.endsWith(",")){ + data = data.substring(0,data.length()-1); + } + String sql = "delete from "+tableInfo.getTableName()+" where "+tableInfo.getColumn()+" in ("+data+")"; + JdbcUtils.execute(con, sql, new ArrayList<>()); + } + } +} diff --git a/src/main/java/io/mycat/util/dataMigrator/DataIO.java b/src/main/java/io/mycat/util/dataMigrator/DataIO.java new file mode 100644 index 000000000..b336cd981 --- /dev/null +++ b/src/main/java/io/mycat/util/dataMigrator/DataIO.java @@ -0,0 +1,35 @@ +package io.mycat.util.dataMigrator; + +import java.io.File; +import java.io.IOException; + +/** + * 数据导入导出接口,mysql、oracle等数据库通过实现此接口提供具体的数据导入导出功能 + * @author haonan108 + * + */ +public interface DataIO { + + /** + * 导入数据 + * @param dn 导入到具体的数据库 + * @param file 导入的文件 + * @throws IOException + * @throws InterruptedException + */ + + void importData(TableMigrateInfo table,DataNode dn,String tableName,File file) throws IOException, InterruptedException; + + /** + * 根据条件导出迁移数据 + * @param dn 导出哪个具体的数据库 + * @param tableName 导出的表名称 + * @param export 文件导出到哪里 + * @param condion 导出文件依赖的具体条件 + * @return + * @throws IOException + * @throws InterruptedException + */ + File exportData(TableMigrateInfo table,DataNode dn,String tableName,File exportPath,File condion) throws IOException, InterruptedException; + +} diff --git a/src/main/java/io/mycat/util/dataMigrator/DataIOFactory.java b/src/main/java/io/mycat/util/dataMigrator/DataIOFactory.java new file mode 100644 index 000000000..60099a372 --- /dev/null +++ b/src/main/java/io/mycat/util/dataMigrator/DataIOFactory.java @@ -0,0 +1,19 @@ +package io.mycat.util.dataMigrator; + +import io.mycat.util.dataMigrator.dataIOImpl.MysqlDataIO; +import io.mycat.util.exception.DataMigratorException; + +public class DataIOFactory { + + public static final String MYSQL = "mysql"; + public static final String ORACLE = "oracle"; + + public static DataIO createDataIO(String dbType){ + switch (dbType) { + case MYSQL: + return new MysqlDataIO(); + default: + throw new DataMigratorException("dbType:"+dbType+" is not support for the moment!"); + } + } +} diff --git a/src/main/java/io/mycat/util/dataMigrator/DataMigrateRunner.java b/src/main/java/io/mycat/util/dataMigrator/DataMigrateRunner.java new file mode 100644 index 000000000..ab4f32251 --- /dev/null +++ b/src/main/java/io/mycat/util/dataMigrator/DataMigrateRunner.java @@ -0,0 +1,54 @@ +package io.mycat.util.dataMigrator; + +import java.io.File; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +/** + * 数据迁移过程类 + * @author haonan108 + * + */ +public class DataMigrateRunner implements Runnable{ + + private static final Logger LOGGER = LoggerFactory.getLogger(DataMigrateRunner.class); + private DataNode src; + private DataNode target; + private String tableName; + private DataIO dataIO; + private File conditionFile; + private TableMigrateInfo table; + + + + public DataMigrateRunner(TableMigrateInfo table, DataNode src,DataNode target,String tableName,File conditionFile){ + this.tableName = tableName; + this.conditionFile= conditionFile; + this.src = src; + this.target = target; + this.table = table; + dataIO = DataIOFactory.createDataIO(src.getDbType()); + } + + @Override + public void run() { + if(table.isError()) { + return; + } + try { + long start = System.currentTimeMillis(); + File loadFile = dataIO.exportData(table,src, tableName, conditionFile.getParentFile(), conditionFile); + dataIO.importData(table,target,tableName, loadFile); + long end = System.currentTimeMillis(); + System.out.println(table.getSchemaAndTableName()+" "+src.getName()+"->"+target.getName()+" completed in "+(end-start)+"ms"); + } catch (Exception e) { + String errMessage = table.getSchemaAndTableName()+" "+src.getName()+"->"+target.getName()+" migrate err! "+e.getMessage(); + LOGGER.error(errMessage, e); + table.setError(true); + table.getErrMessage().append(errMessage); + } + } + +} diff --git a/src/main/java/io/mycat/util/dataMigrator/DataMigrator.java b/src/main/java/io/mycat/util/dataMigrator/DataMigrator.java new file mode 100644 index 000000000..cb94a7ddf --- /dev/null +++ b/src/main/java/io/mycat/util/dataMigrator/DataMigrator.java @@ -0,0 +1,278 @@ +package io.mycat.util.dataMigrator; + +import java.io.File; +import java.sql.SQLException; +import java.text.DateFormat; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +/** + * 数据迁移统一调度类,支持扩容缩容 + * 原理:读取需要迁移的数据节点表所有拆分字段数据,按照扩容或缩容后的配置对拆分字段重新计算路由节点, + * 将需要迁移的数据导出,然后导入到扩容或缩容后对应的数据节点 + * @author haonan108 + * + */ +public class DataMigrator { + + private static final Logger LOGGER = LoggerFactory.getLogger(DataMigrator.class); + + public static DataMigratorArgs margs; + + private List migrateTables; + + private ExecutorService executor; + + private List clearGroup = new ArrayList<>(); + + public DataMigrator(String[] args){ + margs = new DataMigratorArgs(args); + executor = new ThreadPoolExecutor(margs.getThreadCount(), margs.getThreadCount(), + 0L, TimeUnit.MILLISECONDS, + new LinkedBlockingQueue(),new ThreadPoolExecutor.CallerRunsPolicy()); + + + try { + createTempParentDir(margs.getTempFileDir()); + ConfigComparer loader = new ConfigComparer(margs.isAwaysUseMaster()); + migrateTables = loader.getMigratorTables(); + //建表 + for(TableMigrateInfo table:migrateTables){ + table.setTableStructure(); + table.createTableToNewDataNodes(); + } + } catch (Exception e) { + LOGGER.error(e.getMessage(),e); + System.out.println(e.getMessage()); + //配置错误退出迁移程序 + System.exit(-1); + } + } + + public static void main(String[] args) throws SQLException { + long start = System.currentTimeMillis(); + DateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss:SSS"); + System.out.println("\n"+format.format(new Date())+" [1]-> creating migrator schedule and temp files for migrate..."); + //初始化配置 + DataMigrator migrator = new DataMigrator(args); + + //生成中间文件 + migrator.createTempFiles(); + migrator.changeSize(); + migrator.printInfo(); + + //迁移数据 + System.out.println("\n"+format.format(new Date())+" [2]-> start migrate data..."); + migrator.migrateData(); + + //清除中间临时文件、清除被迁移掉的冗余数据 + System.out.println("\n"+format.format(new Date())+" [3]-> cleaning redundant data..."); + migrator.clear(); + + //校验数据是否迁移成功 + System.out.println("\n"+format.format(new Date())+" [4]-> validating tables migrate result..."); + migrator.validate(); + migrator.clearTempFiles(); + long end = System.currentTimeMillis(); + System.out.println("\n"+format.format(new Date())+" migrate data complete in "+(end-start)+"ms"); + } + + //打印各个表的迁移数据信息 + private void printInfo() { + for(TableMigrateInfo table:migrateTables){ + table.printMigrateInfo(); + table.printMigrateSchedule(); + } + } + + //删除临时文件 + private void clearTempFiles() { + File tempFileDir = new File(margs.getTempFileDir()); + if(tempFileDir.exists() && margs.isDeleteTempDir()){ + DataMigratorUtil.deleteDir(tempFileDir); + } + } + + //生成需要进行迁移的数据依赖的拆分字段值文件 + private void createTempFiles(){ + for(TableMigrateInfo table:migrateTables){ + //创建具体拆分表中间临时文件 + createTableTempFiles(table); + } + executor.shutdown(); + while(true){ + if(executor.isTerminated()){ + break; + } + try { + Thread.sleep(200); + } catch (InterruptedException e) { + LOGGER.error("error",e); + } + } + } + + private void migrateData() throws SQLException{ + executor = new ThreadPoolExecutor(margs.getThreadCount(), margs.getThreadCount(), + 0L, TimeUnit.MILLISECONDS, + new LinkedBlockingQueue(),new ThreadPoolExecutor.CallerRunsPolicy()); + for(TableMigrateInfo table:migrateTables){ + if(!table.isError()){ //忽略已出错的拆分表 + List detailList = table.getDataNodesDetail(); + for(DataNodeMigrateInfo info:detailList){ + executor.execute(new DataMigrateRunner(table, info.getSrc(), info.getTarget(), table.getTableName(), info.getTempFile())); + } + } + } + executor.shutdown(); + while(true){ + if(executor.isTerminated()){ + break; + } + try { + Thread.sleep(200); + } catch (InterruptedException e) { + LOGGER.error("error",e); + } + } + } + + //缩容需要重新计算表大小 + private void changeSize() throws SQLException { + for(TableMigrateInfo table:migrateTables){ + if(!table.isExpantion()){ + List oldDn = table.getOldDataNodes(); + long size = 0L; + for(DataNode dn:oldDn){ + size+=DataMigratorUtil.querySize(dn, table.getTableName()); + } + table.setSize(size); + } + } + } + + //校验迁移计划中数据迁移情况同数据实际落盘是否一致 + private void validate() throws SQLException { + for(TableMigrateInfo table:migrateTables){ + if (table.isError()) { + continue; + } + long size = table.getSize().get(); + long factSize = 0L; + for(DataNode dn:table.getNewDataNodes()){ + factSize+=DataMigratorUtil.querySize(dn, table.getTableName()); + } + if(factSize != size){ + String message = "migrate error!after migrate should be:"+size+" but fact is:"+factSize; + table.setError(true); + table.setErrMessage(message); + } + } + + //打印最终迁移结果信息 + String title = "migrate result"; + Map result = new HashMap(); + for(TableMigrateInfo table:migrateTables){ + String resultMessage = table.isError()?"fail! reason: "+table.getErrMessage():"success"; + result.put(table.getSchemaAndTableName(), resultMessage); + } + String info = DataMigratorUtil.printMigrateInfo(title, result, "->"); + System.out.println(info); + } + + //清除中间临时文件、导出的迁移数据文件、已被迁移的原始节点冗余数据 + private void clear(){ + for(TableMigrateInfo table:migrateTables){ + makeClearDataGroup(table); + } + for(DataNodeClearGroup group:clearGroup){ + clearData(group.getTempFiles(), group.getTableInfo()); + } + } + + //同一主机上的mysql执行按where条件删除数据并发多了性能反而下降很快 + //按照主机ip进行分组,每个主机ip分配一个线程池,线程池大小可配置,默认为当前主机环境cpu核数的一半 + private void makeClearDataGroup(TableMigrateInfo table){ + List list = table.getDataNodesDetail(); + //将数据节点按主机ip分组,每组分配一个线程池 + for(DataNodeMigrateInfo dnInfo:list){ + DataNode src = dnInfo.getSrc(); + String ip =src.getIp(); + File f = dnInfo.getTempFile(); + DataNodeClearGroup group = getDataNodeClearGroup(ip,table); + if(group == null){ + group = new DataNodeClearGroup(ip, table); + clearGroup.add(group); + } + group.getTempFiles().put(f, src); + } + } + + private DataNodeClearGroup getDataNodeClearGroup(String ip, TableMigrateInfo table){ + DataNodeClearGroup result = null; + for(DataNodeClearGroup group:clearGroup){ + if(group.getIp().equals(ip) && group.getTableInfo().equals(table)){ + result = group; + } + } + return result; + } + + private void clearData(Map map,TableMigrateInfo table){ + if(table.isError()) { + return; + } + ExecutorService executor = new ThreadPoolExecutor(margs.getThreadCount(), margs.getThreadCount(), + 0L, TimeUnit.MILLISECONDS, + new LinkedBlockingQueue(),new ThreadPoolExecutor.CallerRunsPolicy()); + Iterator> it = map.entrySet().iterator(); + while(it.hasNext()){ + Entry et = it.next(); + File f =et.getKey(); + DataNode srcDn = et.getValue(); + executor.execute(new DataClearRunner(table, srcDn, f)); + } + executor.shutdown(); + while(true){ + if(executor.isTerminated()){ + break; + } + try { + Thread.sleep(200); + } catch (InterruptedException e) { + LOGGER.error("error",e); + } + } + } + + private void createTempParentDir(String dir){ + File outputDir = new File(dir); + if(outputDir.exists()){ + DataMigratorUtil.deleteDir(outputDir); + } + outputDir.mkdirs(); + outputDir.setWritable(true); + } + + private void createTableTempFiles(TableMigrateInfo table) { + List oldDn = table.getOldDataNodes(); + //生成迁移中间文件,并生成迁移执行计划 + for(DataNode dn:oldDn){ + executor.execute(new MigratorConditonFilesMaker(table,dn,margs.getTempFileDir(),margs.getQueryPageSize())); + } + } +} diff --git a/src/main/java/io/mycat/util/dataMigrator/DataMigratorArgs.java b/src/main/java/io/mycat/util/dataMigrator/DataMigratorArgs.java new file mode 100644 index 000000000..429c2e031 --- /dev/null +++ b/src/main/java/io/mycat/util/dataMigrator/DataMigratorArgs.java @@ -0,0 +1,147 @@ +package io.mycat.util.dataMigrator; + +import java.io.File; + +import io.mycat.config.model.SystemConfig; +import io.mycat.util.cmd.CmdArgs; + + + + +/** + * 数据迁移工具依赖参数 + * @author haonan108 + * + */ +public class DataMigratorArgs { + + /** 并行线程数*/ + public static final String THREAD_COUNT = "threadCount"; + + /** mysqldump命令所在路径 */ + public static final String MYSQL_BIN = "mysqlBin"; + + /** 数据迁移生成的中间文件指定存放目录*/ + public static final String TEMP_FILE_DIR = "tempFileDir"; + + /** 使用主数据源还是当前数据源(如果发生主备切换存在数据源选择问题)*/ + public static final String IS_AWAYS_USE_MASTER = "isAwaysUseMaster"; + + /**生成中间临时文件一次加载的数据量*/ + public static final String QUERY_PAGE_SIZE = "queryPageSize"; + + public static final String DEL_THRAD_COUNT = "delThreadCount"; + + /** mysqldump导出中间文件命令操作系统限制长度 */ + public static final String MYSQL_DUMP_CMD_LENGTH = "cmdLength"; + + public static final String CHARSET = "charset"; + + /**完成扩容缩容后清除临时文件 默认为true*/ + public static final String DELETE_TEMP_FILE_DIR = "deleteTempFileDir"; + + + + private static final int DEFAULT_THREAD_COUNT = Runtime.getRuntime().availableProcessors()*2; + + private static final int DEFAULT_DEL_THRAD_COUNT = Runtime.getRuntime().availableProcessors()/2; + + private static final int DEFAULT_CMD_LENGTH = 110*1024;//操作系统命令行限制长度 110KB + + private static final int DEFAULT_PAGE_SIZE = 100000;//默认一次读取10w条数据 + + private static final String DEFAULT_CHARSET = "utf8"; + + private CmdArgs cmdArgs; + + public DataMigratorArgs(String[] args){ + cmdArgs = CmdArgs.getInstance(args); + } + + public String getString(String name){ + return cmdArgs.getString(name); + } + + public String getMysqlBin(){ + String result = getString(MYSQL_BIN); + if(result ==null) { + return ""; + } + if(!result.isEmpty() &&!result.endsWith("/")){ + result +="/"; + } + return result; + } + + public String getTempFileDir(){ + String path = getString(TEMP_FILE_DIR); + if(null == path || path.trim().isEmpty()){ + return SystemConfig.getHomePath()+File.separator+"temp"; + } + return path; + } + + public int getThreadCount(){ + String count =getString(THREAD_COUNT); + if(null == count||count.isEmpty()|| count.equals("0") ){ + return DEFAULT_THREAD_COUNT; + } + return Integer.valueOf(count); + } + + public int getDelThreadCount(){ + String count =getString(DEL_THRAD_COUNT); + if(null == count||count.isEmpty()|| count.equals("0") ){ + return DEFAULT_DEL_THRAD_COUNT; + } + return Integer.valueOf(count); + } + + public boolean isAwaysUseMaster(){ + String result = getString(IS_AWAYS_USE_MASTER); + if(null == result||result.isEmpty()||result.equals("true")){ + return true; + } + return false; + } + + public int getCmdLength(){ + String result = getString(MYSQL_DUMP_CMD_LENGTH); + if(null == result||result.isEmpty()){ + return DEFAULT_CMD_LENGTH; + } + if(result.contains("*")){ + String[] arr = result.split("\\*"); + int j = 1; + for (int i = 0; i < arr.length; i++) { + j *= Integer.valueOf(arr[i]); + } + return j; + } + return Integer.valueOf(result); + } + + public int getQueryPageSize(){ + String result = getString(QUERY_PAGE_SIZE); + if(null == result||result.isEmpty()){ + return DEFAULT_PAGE_SIZE; + } + return Integer.valueOf(result); + } + + public String getCharSet(){ + String result = getString(CHARSET); + if(null == result||result.isEmpty()){ + return DEFAULT_CHARSET; + } + return result; + } + + public boolean isDeleteTempDir(){ + String result = getString(DELETE_TEMP_FILE_DIR); + if(null == result||result.isEmpty()||result.equals("true")){ + return true; + } + return false; + } +} diff --git a/src/main/java/io/mycat/util/dataMigrator/DataMigratorUtil.java b/src/main/java/io/mycat/util/dataMigrator/DataMigratorUtil.java new file mode 100644 index 000000000..bc4fccab8 --- /dev/null +++ b/src/main/java/io/mycat/util/dataMigrator/DataMigratorUtil.java @@ -0,0 +1,400 @@ +package io.mycat.util.dataMigrator; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.regex.Matcher; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.alibaba.druid.util.JdbcUtils; + +public class DataMigratorUtil { + + private static final Logger LOGGER = LoggerFactory.getLogger(DataMigratorUtil.class); + + static{ + try { + Class.forName("com.mysql.jdbc.Driver"); + } catch (ClassNotFoundException e) { + LOGGER.error("",e); + } + } + + /** + * 添加数据到文件末尾 + * @param file + * @param content + * @throws IOException + */ + public static void appendDataToFile(File file, String content) throws IOException { + RandomAccessFile randomFile = null; + try { + // 打开一个随机访问文件流,按读写方式 + randomFile = new RandomAccessFile(file, "rw"); + // 文件长度,字节数 + long fileLength = randomFile.length(); + // 将写文件指针移到文件尾。 + randomFile.seek(fileLength); + randomFile.writeBytes(content); + content = null; + } catch (IOException e) { + LOGGER.error("appendDataToFile is error!",e); + } finally{ + if(randomFile != null){ + try { + randomFile.close(); + } catch (IOException e) { + LOGGER.error("error",e); + } + } + } + } + + public static String readDataFromFile(File file,long offset,int length) throws IOException{ + RandomAccessFile randomFile = null; + try { + // 打开一个随机访问文件流,按读写方式 + randomFile = new RandomAccessFile(file, "rw"); + randomFile.seek(offset); + byte[] buffer = new byte[length]; + randomFile.read(buffer); + return new String(buffer).trim(); + } catch (IOException e) { + throw e; + } finally{ + if(randomFile != null){ + try { + randomFile.close(); + } catch (IOException e) { + LOGGER.error("error",e); + } + } + } + } + + /** + * 读取逗号分隔的文件数据 + * @param file + * @param start 文件起始位置 + * @param length 读取字节数 + * @return + * @throws IOException + */ + public static String readData(File file,long start,int length) throws IOException{ + String data = readDataFromFile(file, start, length); + if((start+length)<=file.length()){ + data = data.substring(0, data.lastIndexOf(",")); + } + + return data; + } + + public static final int BUFSIZE = 1024 * 8; + + public static void mergeFiles(File outFile, File f) throws IOException { + FileChannel outChannel = null; + FileOutputStream fos = null; + FileInputStream fis = null; + try { + fos = new FileOutputStream(outFile,true); + fis = new FileInputStream(f); + outChannel = fos.getChannel(); + FileChannel fc = fis.getChannel(); + ByteBuffer bb = ByteBuffer.allocate(BUFSIZE); + while(fc.read(bb) != -1){ + bb.flip(); + outChannel.write(bb); + bb.clear(); + } + fc.close(); + } catch (IOException e) { + throw e; + } finally { + try { + if(fos != null){ + fos.close(); + } + if(fis != null){ + fis.close(); + } + if (outChannel != null){ + outChannel.close(); + } + } + catch (IOException e) { + LOGGER.error("error",e); + } + } + } + + /** + * 统计文件有多少行 + * @param file + * @return + */ + public static long countLine(File file) throws IOException{ + long count = 0L; + RandomAccessFile randomFile = null; + + // 打开一个随机访问文件流,按读写方式 + try { + randomFile = new RandomAccessFile(file, "rw"); + String s =""; + while((s=randomFile.readLine())!=null && !s.trim().isEmpty()){ + count++; + } + } catch (FileNotFoundException e) { + throw e; + }finally{ + if(randomFile != null){ + try { + randomFile.close(); + } catch (IOException e) { + LOGGER.error("error",e); + } + } + } + + return count; + } + + /** + * 递归删除目录下的所有文件及子目录下所有文件 + * @param dir 将要删除的文件目录 + * @return boolean Returns "true" if all deletions were successful. + * If a deletion fails, the method stops attempting to + * delete and returns "false". + */ + public static boolean deleteDir(File dir) { + if (dir.isDirectory()) { + String[] children = dir.list(); + for (int i=0; i paramList= Arrays.asList(params); + for(Object param:paramList){ + cmd = cmd.replaceFirst("\\"+mark, Matcher.quoteReplacement(param.toString())); + } + return cmd; + } + + public static Connection getMysqlConnection(DataNode dn) throws SQLException{ + Connection con = null; + con = DriverManager.getConnection(dn.getUrl(), dn.getUserName(), dn.getPwd()); + return con; + } + + public static List> executeQuery(Connection conn, String sql,Object... parameters) throws SQLException{ + return JdbcUtils.executeQuery(conn, sql, Arrays.asList(parameters)); + } + + //查询表数据量 + public static long querySize(DataNode dn,String tableName) throws SQLException{ + List> list=null; + long size = 0L; + Connection con = null; + try { + con = getMysqlConnection(dn); + list = executeQuery(con, "select count(1) size from "+tableName); + size = (long) list.get(0).get("size"); + } catch (SQLException e) { + throw e; + }finally{ + JdbcUtils.close(con); + } + return size; + } + + public static void createTable(DataNode dn,String table) throws SQLException{ + Connection con = null; + try { + con = getMysqlConnection(dn); + JdbcUtils.execute(con, table, new ArrayList<>()); + } catch (SQLException e) { + throw e; + }finally{ + JdbcUtils.close(con); + } + } + + /** + * 格式化数据迁移信息 + * +---------title-------+ + * |key1 = value1 | + * |key2 = value2 | + * |... | + * +---------------------+ + * @param title + * @param map + * @param mark + * @return + */ + public static String printMigrateInfo(String title,Map map,String mark){ + StringBuilder result = new StringBuilder(" "); + List mergeList = new ArrayList<>(); + + Iterator> itor = map.entrySet().iterator(); + + int maxKeyLength = 0; + int maxValueLength = 0; + while(itor.hasNext()){ + Entry entry = itor.next(); + String key = entry.getKey(); + String value = entry.getValue(); + maxKeyLength = (key.length()>maxKeyLength)?key.length():maxKeyLength; + maxValueLength = (value.length()>maxValueLength)?value.length():maxValueLength; + } + + int maxLength=maxKeyLength+maxValueLength+2+mark.length(); + if(maxLength<= title.length()){ + maxLength = title.length()+8; + } + itor = map.entrySet().iterator(); + //合并key和value,并找出长度最大的字符串 + while(itor.hasNext()){ + Entry entry = itor.next(); + String key = entry.getKey(); + String value = entry.getValue(); + int keyLength = maxKeyLength-key.length(); + StringBuilder keySb = new StringBuilder(key); + for(int i=0;i maxLineLength){ + maxLength = maxLineLength; + } + //拼第一行title + StringBuilder titleSb = new StringBuilder("+"); + int halfLength = (maxLength-title.length())/2; + for(int i=0;i changeList = new ArrayList<>(); + //调整内容 + for(int i=0;i=maxLength){ + String[] str = content.split(mark); + String key = str[0]; + String value =str[1]; + String[] values = getValues(value,maxLength-maxKeyLength-1-mark.length()); + for(int j=0;j 0){ + StringBuilder keySb = new StringBuilder(); + for(int k=0;k boolean isKeyExistIgnoreCase(Map map,String key){ + return map.containsKey(key.toLowerCase()) || map.containsKey(key.toUpperCase()); + } + + public static T getValueIgnoreCase(Map map,String key){ + T result = map.get(key.toLowerCase()); + return result==null?map.get(key.toUpperCase()):result; + } + + public static Process exeCmdByOs(String cmd) throws IOException{ + Process process = null; + + Runtime runtime = Runtime.getRuntime(); + + String osName = System.getProperty("os.name"); + + if(osName.toLowerCase().startsWith("win")){ + process = runtime.exec((new String[]{"cmd","/C",cmd})); + }else{ + process = runtime.exec((new String[]{"sh","-c",cmd})); + } + return process; + } + + private static String[] getValues(String value, int maxValueLength) { + int length = value.length()/maxValueLength; + if(value.length()%maxValueLength>0){ + length+=1; + } + String[] result = new String[length]; + for(int i=0;i tempFiles = new HashMap<>(); + private TableMigrateInfo tableInfo; + + public DataNodeClearGroup(String ip, TableMigrateInfo tableInfo) { + super(); + this.ip = ip; + this.tableInfo = tableInfo; + } + public String getIp() { + return ip; + } + public void setIp(String ip) { + this.ip = ip; + } + public Map getTempFiles() { + return tempFiles; + } + public void setTempFiles(Map tempFiles) { + this.tempFiles = tempFiles; + } + public TableMigrateInfo getTableInfo() { + return tableInfo; + } + public void setTableInfo(TableMigrateInfo tableInfo) { + this.tableInfo = tableInfo; + } + +} diff --git a/src/main/java/io/mycat/util/dataMigrator/DataNodeMigrateInfo.java b/src/main/java/io/mycat/util/dataMigrator/DataNodeMigrateInfo.java new file mode 100644 index 000000000..c8e51692c --- /dev/null +++ b/src/main/java/io/mycat/util/dataMigrator/DataNodeMigrateInfo.java @@ -0,0 +1,60 @@ +package io.mycat.util.dataMigrator; + +import java.io.File; + +/** + * 数据迁移时数据节点间迁移信息 + * @author haonan108 + * + */ +public class DataNodeMigrateInfo { + + private DataNode src; + private DataNode target; + private File tempFile; + private long size; + private TableMigrateInfo table; + + public DataNodeMigrateInfo(TableMigrateInfo table, DataNode src, DataNode target, File tempFile, long size) { + super(); + this.table = table; + this.src = src; + this.target = target; + this.tempFile = tempFile; + this.size = size; + } + + public TableMigrateInfo getTable() { + return table; + } + + public void setTable(TableMigrateInfo table) { + this.table = table; + } + + public DataNode getSrc() { + return src; + } + public void setSrc(DataNode src) { + this.src = src; + } + public DataNode getTarget() { + return target; + } + public void setTarget(DataNode target) { + this.target = target; + } + public File getTempFile() { + return tempFile; + } + public void setTempFile(File tempFile) { + this.tempFile = tempFile; + } + public long getSize() { + return size; + } + public void setSize(long size) { + this.size = size; + } + +} diff --git a/src/main/java/io/mycat/util/dataMigrator/MigratorConditonFilesMaker.java b/src/main/java/io/mycat/util/dataMigrator/MigratorConditonFilesMaker.java new file mode 100644 index 000000000..6ab7a1862 --- /dev/null +++ b/src/main/java/io/mycat/util/dataMigrator/MigratorConditonFilesMaker.java @@ -0,0 +1,182 @@ +package io.mycat.util.dataMigrator; + +import java.io.File; +import java.io.IOException; +import java.sql.Connection; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.alibaba.druid.util.JdbcUtils; + +import io.mycat.route.function.AbstractPartitionAlgorithm; +import io.mycat.util.CollectionUtil; + +/** + * 对具体某个节点重新路由 生成导出数据所依赖的中间文件 + * @author haonan108 + */ +public class MigratorConditonFilesMaker implements Runnable{ + + private static final Logger LOGGER = LoggerFactory.getLogger(MigratorConditonFilesMaker.class); + private DataNode srcDn; + private List newDnList; + private String column; + private String tableName; + private AbstractPartitionAlgorithm alg; + private String tempFileDir; + private TableMigrateInfo tableInfo; + private int newDnSize; + private int pageSize; + + private Map files = new HashMap<>(); + + Map map = new HashMap<>();//存放节点发生变化的拆分字段字符串数据 key:dn索引 value 拆分字段值,以逗号分隔 + + public MigratorConditonFilesMaker(TableMigrateInfo tableInfo,DataNode srcDn,String tempFileDir, int pageSize){ + this.tableInfo = tableInfo; + this.tempFileDir = tempFileDir; + this.srcDn = srcDn; + this.newDnList = tableInfo.getNewDataNodes(); + this.column = tableInfo.getColumn(); + this.tableName = tableInfo.getTableName(); + this.alg = tableInfo.getNewRuleAlgorithm(); + this.newDnSize = newDnList.size(); + this.pageSize = pageSize; + } + + @Override + public void run() { + if(tableInfo.isError()) { + return; + } + + long[] count = new long[newDnSize]; + int page=0; + List> list=null; + + Connection con = null; + try { + con = DataMigratorUtil.getMysqlConnection(srcDn); + //创建空的中间临时文件 + createTempFiles(); + + //暂时只实现mysql的分页查询 + list = DataMigratorUtil.executeQuery(con, "select " + + column+ " from " + tableName + " limit ?,?", page++ * pageSize, + pageSize); + int total = 0; //该节点表总数据量 + + while (!CollectionUtil.isEmpty(list)) { + if(tableInfo.isError()) { + return; + } + flushData(false); + for(int i=0,l=list.size();i sf=list.get(i); + String filedVal = sf.get(column).toString(); + Integer newIndex=alg.calculate(filedVal); + total++; + DataNode newDn = newDnList.get(newIndex); + if(!srcDn.equals(newDn)){ + count[newIndex]++; + map.get(newDn).append(filedVal+","); + } + } + list = DataMigratorUtil.executeQuery(con, "select " + + column + " from " + tableName + " limit ?,?", page++ * pageSize, + pageSize); + } + flushData(true); + statisticalData(total,count); + } catch (Exception e) { + //发生错误,终止此拆分表所有节点线程任务,记录错误信息,退出此拆分表迁移任务 + String message = "["+tableInfo.getSchemaName()+":"+tableName+"] src dataNode: "+srcDn.getUrl()+ + " prepare temp files is failed! this table's migrator will exit! "+e.getMessage(); + tableInfo.setError(true); + tableInfo.setErrMessage(message); + System.out.println(message); + LOGGER.error(message, e); + }finally{ + JdbcUtils.close(con); + } + } + + //创建中间临时文件 + private void createTempFiles() throws IOException{ + File parentFile = createDirIfNotExist(); + for(DataNode dn:newDnList){ + if(!srcDn.equals(dn)){ + map.put(dn, new StringBuilder()); + createTempFile(parentFile,dn); + } + } + } + + + //中间临时文件 格式: srcDnName-targetDnName.txt 中间文件存在的话会被清除 + private void createTempFile(File parentFile, DataNode dn) throws IOException { + File f = new File(parentFile,srcDn.getName()+"(old)"+"-"+dn.getName()+"(new).txt"); + if(f.exists()){ + f.delete(); + } + f.createNewFile(); + files.put(dn, f); + } + + //统计各节点数据迁移信息,并移除空文件 + private void statisticalData(int total, long[] count){ + tableInfo.getSize().addAndGet(total); + List list = tableInfo.getDataNodesDetail(); + List sizeList = new ArrayList<>(); + for(int i=0;i0){ + DataNodeMigrateInfo info =new DataNodeMigrateInfo(tableInfo,srcDn, targetDn, files.get(targetDn), c); + list.add(info); + }else{ + File f = files.get(targetDn); + if(f != null && f.exists()){ + f.delete(); + } + files.remove(targetDn); + } + } + Map map = tableInfo.getDnMigrateSize(); + map.put(srcDn.getName()+"["+total+"]", sizeList.toString()); + } + + //将迁移字段值写入中间文件,数据超过1024或者要求强制才写入,避免重复打开关闭写入文件 + private void flushData(boolean isForce) throws IOException { + for(DataNode dn:newDnList){ + StringBuilder sb = map.get(dn); + if(sb == null) { + continue; + } + if((isForce || sb.toString().getBytes().length>1024) && sb.length()>0){ + String s = sb.toString(); + if(isForce){//最后一次将末尾的','截掉 + s = s.substring(0, s.length()-1); + } + DataMigratorUtil.appendDataToFile(files.get(dn),s); + sb = new StringBuilder(); + map.put(dn, sb); + } + } + } + + //创建中间临时文件父目录 + private File createDirIfNotExist() { + File f = new File(tempFileDir,tableInfo.getSchemaName()+"-"+tableName); + if(!f.exists()){ + f.mkdirs(); + } + return f; + } +} diff --git a/src/main/java/io/mycat/util/dataMigrator/TableMigrateInfo.java b/src/main/java/io/mycat/util/dataMigrator/TableMigrateInfo.java new file mode 100644 index 000000000..d4269ff06 --- /dev/null +++ b/src/main/java/io/mycat/util/dataMigrator/TableMigrateInfo.java @@ -0,0 +1,243 @@ +package io.mycat.util.dataMigrator; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.concurrent.atomic.AtomicLong; + +import com.alibaba.druid.util.JdbcUtils; + +import io.mycat.route.function.AbstractPartitionAlgorithm; + + +/** + * 表迁移信息,包括: + * 表名、迁移前后的数据节点、表数据量、迁移前后数据分布对比 + * @author haonan108 + * + */ + +public class TableMigrateInfo { + + private String schemaName; + private String tableName; + private List oldDataNodes; + private List newDataNodes; + private AtomicLong size = new AtomicLong(); + + private List dataNodesDetail = new ArrayList<>();//节点间数据迁移详细信息 + + private AbstractPartitionAlgorithm newRuleAlgorithm; + private String column; + + private boolean isExpantion; //true:扩容 false:缩容 + + private volatile boolean isError; + + private StringBuffer errMessage = new StringBuffer(); + + private String tableStructure = ""; //记录建表信息,迁移后的节点表不存在的话自动建表 + + private Map dnMigrateSize; + + public TableMigrateInfo(String schemaName, String tableName, List oldDataNodes, + List newDataNodes, AbstractPartitionAlgorithm newRuleAlgorithm, String column) { + super(); + this.schemaName = schemaName; + this.tableName = tableName; + this.oldDataNodes = oldDataNodes; + this.newDataNodes = newDataNodes; + this.newRuleAlgorithm = newRuleAlgorithm; + this.column = column; + if(newDataNodes.size()>oldDataNodes.size()){ + isExpantion = true; + }else{ + isExpantion = false; + } + dnMigrateSize = new TreeMap<>(new Comparator() { + @Override + public int compare(String o1, String o2) { + return o1.compareTo(o2); + } + }); + } + + //读取表结构 + public void setTableStructure() throws SQLException{ + DataNode dn = this.getOldDataNodes().get(0); + Connection con = null; + try { + con = DataMigratorUtil.getMysqlConnection(dn); + List> list = DataMigratorUtil.executeQuery(con, "show create table "+tableName); + Map m = list.get(0); + String str = m.get("Create Table").toString(); + str = str.replaceAll("CREATE TABLE", "Create Table if not exists"); + setTableStructure(str); + } catch (SQLException e) { + throw e; + }finally { + JdbcUtils.close(con); + } + } + + //缩容后,找出被移除的节点 + public List getRemovedDataNodes(){ + List list = new ArrayList<>(); + list.addAll(oldDataNodes); + list.removeAll(newDataNodes); + return list; + } + + //扩容后,找出除旧节点以外新增加的节点 + public List getNewAddDataNodes(){ + List list = new ArrayList<>(); + list.addAll(newDataNodes); + list.removeAll(oldDataNodes); + return list; + } + + //对新增的节点创建表:create table if not exists + public void createTableToNewDataNodes() throws SQLException{ + if(this.isExpantion){ + List newDataNodes = getNewAddDataNodes(); + for(DataNode dn:newDataNodes){ + DataMigratorUtil.createTable(dn, this.tableStructure); + } + } + } + + //打印迁移信息 + @SuppressWarnings({ "unchecked", "rawtypes" }) + public void printMigrateInfo(){ + Map map = new LinkedHashMap(); + map.put("tableSize", size.get()+""); + map.put("migrate before", oldDataNodes.toString()); + map.put("migrate after", newDataNodes.toString()); + map.put("rule function", newRuleAlgorithm.getClass().getSimpleName()); + String title = getSchemaAndTableName()+" migrate info"; + System.out.println(DataMigratorUtil.printMigrateInfo(title, map, "=")); + } + + public void printMigrateSchedule(){ + String title = getSchemaAndTableName()+" migrate schedule"; + System.out.println(DataMigratorUtil.printMigrateInfo(title, dnMigrateSize, "->")); + } + + /** + * 是否为扩容,true:扩容,false:缩容 + * @return + */ + public boolean isExpantion(){ + return isExpantion; + } + + public List getDataNodesDetail() { + return dataNodesDetail; + } + + public void setDataNodesDetail(List dataNodesDetail) { + this.dataNodesDetail = dataNodesDetail; + } + + public String getSchemaName() { + return schemaName; + } + + public void setSchemaName(String schemaName) { + this.schemaName = schemaName; + } + + public String getTableName() { + return tableName; + } + + public void setTableName(String tableName) { + this.tableName = tableName; + } + + public List getOldDataNodes() { + return oldDataNodes; + } + + public void setOldDataNodes(List oldDataNodes) { + this.oldDataNodes = oldDataNodes; + } + + public List getNewDataNodes() { + return newDataNodes; + } + + public void setNewDataNodes(List newDataNodes) { + this.newDataNodes = newDataNodes; + } + + public AbstractPartitionAlgorithm getNewRuleAlgorithm() { + return newRuleAlgorithm; + } + + public void setNewRuleAlgorithm(AbstractPartitionAlgorithm newRuleAlgorithm) { + this.newRuleAlgorithm = newRuleAlgorithm; + } + + public String getColumn() { + return column; + } + + public void setColumn(String column) { + this.column = column; + } + + public String getSchemaAndTableName(){ + return "["+schemaName+":"+tableName+"]"; + } + + public StringBuffer getErrMessage() { + return errMessage; + } + + public void setErrMessage(String errMessage) { + this.errMessage = new StringBuffer(errMessage); + } + + public AtomicLong getSize() { + return size; + } + + public void setSize(long size){ + this.size = new AtomicLong(size); + } + + public boolean isError() { + return isError; + } + + public void setError(boolean isError) { + this.isError = isError; + } + + public String getTableStructure() { + return tableStructure; + } + + public void setTableStructure(String tableStructure) { + this.tableStructure = tableStructure; + } + + public void setSize(AtomicLong size) { + this.size = size; + } + + public Map getDnMigrateSize() { + return dnMigrateSize; + } + + public void setDnMigrateSize(Map dnMigrateSize) { + this.dnMigrateSize = dnMigrateSize; + } + +} diff --git a/src/main/java/io/mycat/util/dataMigrator/dataIOImpl/MysqlDataIO.java b/src/main/java/io/mycat/util/dataMigrator/dataIOImpl/MysqlDataIO.java new file mode 100644 index 000000000..c34baea77 --- /dev/null +++ b/src/main/java/io/mycat/util/dataMigrator/dataIOImpl/MysqlDataIO.java @@ -0,0 +1,129 @@ +package io.mycat.util.dataMigrator.dataIOImpl; + +import java.io.BufferedReader; +import java.io.File; +import java.io.IOException; +import java.io.InputStreamReader; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.mycat.util.dataMigrator.DataIO; +import io.mycat.util.dataMigrator.DataMigrator; +import io.mycat.util.dataMigrator.DataMigratorUtil; +import io.mycat.util.dataMigrator.DataNode; +import io.mycat.util.dataMigrator.TableMigrateInfo; +import io.mycat.util.exception.DataMigratorException; + +/** + * mysql导入导出实现类 + * @author haonan108 + * + */ +public class MysqlDataIO implements DataIO{ + + private static final Logger LOGGER = LoggerFactory.getLogger(MysqlDataIO.class); + + private String mysqlBin; + private int cmdLength; + private String charset; + + public MysqlDataIO(){ + cmdLength = DataMigrator.margs.getCmdLength(); + charset = DataMigrator.margs.getCharSet(); + mysqlBin = DataMigrator.margs.getMysqlBin(); + } + + @Override + public void importData(TableMigrateInfo table,DataNode dn,String tableName, File file) throws IOException, InterruptedException { + String ip = dn.getIp(); + int port = dn.getPort(); + String user = dn.getUserName(); + String pwd = dn.getPwd(); + String db = dn.getDb(); + +// String loadData ="?mysql -h? -P? -u? -p? -D? --local-infile=1 -e \"load data local infile '?' replace into table ? CHARACTER SET '?' FIELDS TERMINATED BY ',' LINES TERMINATED BY '\\r\\n'\""; + String loadData = "?mysql -h? -P? -u? -p? -D? -f --default-character-set=? -e \"source ?\""; + loadData = DataMigratorUtil.paramsAssignment(loadData,"?",mysqlBin,ip,port,user,pwd,db,charset,file.getAbsolutePath()); + LOGGER.info(table.getSchemaAndTableName()+" "+loadData); + Process process = DataMigratorUtil.exeCmdByOs(loadData); + + //获取错误信息 + InputStreamReader in = new InputStreamReader(process.getErrorStream()); + BufferedReader br = new BufferedReader(in); + String errMessage = null; + while ((errMessage = br.readLine()) != null) { + if(errMessage.trim().toLowerCase().contains("err")){ + System.out.println(errMessage+" -> "+loadData); + throw new DataMigratorException(errMessage+" -> "+loadData); + } + } + + process.waitFor(); + } + + @Override + public File exportData(TableMigrateInfo table,DataNode dn, String tableName, File export, File condition) throws IOException, InterruptedException { + String ip = dn.getIp(); + int port = dn.getPort(); + String user = dn.getUserName(); + String pwd = dn.getPwd(); + String db = dn.getDb(); + +// String mysqlDump = "?mysqldump -h? -P? -u? -p? ? ? --no-create-info --default-character-set=? " +// + "--add-locks=false --tab='?' --fields-terminated-by=',' --lines-terminated-by='\\r\\n' --where='? in(?)'"; + //由于mysqldump导出csv格式文件只能导出到本地,暂时替换成导出insert形式的文件 + String mysqlDump = "?mysqldump -h? -P? -u? -p? ? ? --compact --no-create-info --default-character-set=? --add-locks=false --where=\"? in (#)\" --result-file=\"?\""; + + String fileName = condition.getName(); + File exportPath = new File(export,fileName.substring(0, fileName.indexOf(".txt"))); + if(!exportPath.exists()){ + exportPath.mkdirs(); + } + File exportFile = new File(exportPath,tableName.toLowerCase()+".txt"); + //拼接mysqldump命令,不拼接where条件:--where=id in(?) + mysqlDump = DataMigratorUtil.paramsAssignment(mysqlDump,"?",mysqlBin,ip,port,user,pwd,db,tableName,charset,table.getColumn(),exportFile); + + String data = ""; + //由于操作系统对命令行长度的限制,导出过程被拆分成多次,最后需要将导出的数据文件合并 + File mergedFile = new File(exportPath,tableName.toLowerCase()+".sql"); + if(!mergedFile.exists()){ + mergedFile.createNewFile(); + } + int offset = 0; + while((data=DataMigratorUtil.readData(condition,offset,cmdLength)).length()>0){ + offset += data.getBytes().length; + if(data.startsWith(",")){ + data = data.substring(1, data.length()); + } + if(data.endsWith(",")){ + data = data.substring(0,data.length()-1); + } + String mysqlDumpCmd = DataMigratorUtil.paramsAssignment(mysqlDump,"#",data); + LOGGER.info(table.getSchemaAndTableName()+mysqlDump); + LOGGER.debug(table.getSchemaAndTableName()+" "+mysqlDumpCmd); + + Process process = DataMigratorUtil.exeCmdByOs(mysqlDumpCmd); + //获取错误信息 + InputStreamReader in = new InputStreamReader(process.getErrorStream()); + BufferedReader br = new BufferedReader(in); + String errMessage = null; + while ((errMessage = br.readLine()) != null) { + if(errMessage.trim().toLowerCase().contains("err")){ + System.out.println(errMessage+" -> "+mysqlDump); + throw new DataMigratorException(errMessage+" -> "+mysqlDump); + }else{ + LOGGER.info(table.getSchemaAndTableName()+mysqlDump+" exe info:"+errMessage); + } + } + process.waitFor(); + + //合并文件 + DataMigratorUtil.mergeFiles(mergedFile, exportFile); + if(exportFile.exists()){ + exportFile.delete(); + } + } + return mergedFile; + } +} diff --git a/src/main/java/io/mycat/util/exception/DataMigratorException.java b/src/main/java/io/mycat/util/exception/DataMigratorException.java new file mode 100644 index 000000000..332ff62e9 --- /dev/null +++ b/src/main/java/io/mycat/util/exception/DataMigratorException.java @@ -0,0 +1,36 @@ +package io.mycat.util.exception; +/** + * + * @author haonan108 + * + */ +public class DataMigratorException extends RuntimeException{ + + private static final long serialVersionUID = -6706826479467595980L; + + public DataMigratorException() { + super(); + + } + + public DataMigratorException(String message, Throwable cause, + boolean enableSuppression, boolean writableStackTrace) { + super(message, cause, enableSuppression, writableStackTrace); + + } + + public DataMigratorException(String message, Throwable cause) { + super(message, cause); + + } + + public DataMigratorException(String message) { + super(message); + + } + + public DataMigratorException(Throwable cause) { + super(cause); + + } +} diff --git a/src/main/java/io/mycat/server/exception/ErrorPacketException.java b/src/main/java/io/mycat/util/exception/ErrorPacketException.java similarity index 97% rename from src/main/java/io/mycat/server/exception/ErrorPacketException.java rename to src/main/java/io/mycat/util/exception/ErrorPacketException.java index 2fd3727c7..006c28646 100644 --- a/src/main/java/io/mycat/server/exception/ErrorPacketException.java +++ b/src/main/java/io/mycat/util/exception/ErrorPacketException.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.exception; +package io.mycat.util.exception; /** * @author mycat diff --git a/src/main/java/io/mycat/server/exception/HeartbeatException.java b/src/main/java/io/mycat/util/exception/HeartbeatException.java similarity index 97% rename from src/main/java/io/mycat/server/exception/HeartbeatException.java rename to src/main/java/io/mycat/util/exception/HeartbeatException.java index e22800c23..a30ceada2 100644 --- a/src/main/java/io/mycat/server/exception/HeartbeatException.java +++ b/src/main/java/io/mycat/util/exception/HeartbeatException.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.exception; +package io.mycat.util.exception; /** * @author mycat diff --git a/src/main/java/io/mycat/server/exception/MurmurHashException.java b/src/main/java/io/mycat/util/exception/MurmurHashException.java similarity index 94% rename from src/main/java/io/mycat/server/exception/MurmurHashException.java rename to src/main/java/io/mycat/util/exception/MurmurHashException.java index 8664dd87b..be24d18db 100644 --- a/src/main/java/io/mycat/server/exception/MurmurHashException.java +++ b/src/main/java/io/mycat/util/exception/MurmurHashException.java @@ -1,4 +1,4 @@ -package io.mycat.server.exception; +package io.mycat.util.exception; public class MurmurHashException extends RuntimeException{ diff --git a/src/main/java/io/mycat/server/exception/RehashException.java b/src/main/java/io/mycat/util/exception/RehashException.java similarity index 94% rename from src/main/java/io/mycat/server/exception/RehashException.java rename to src/main/java/io/mycat/util/exception/RehashException.java index f36219659..4f8b47bcc 100644 --- a/src/main/java/io/mycat/server/exception/RehashException.java +++ b/src/main/java/io/mycat/util/exception/RehashException.java @@ -1,4 +1,4 @@ -package io.mycat.server.exception; +package io.mycat.util.exception; public class RehashException extends RuntimeException{ diff --git a/src/main/java/io/mycat/server/exception/UnknownCharsetException.java b/src/main/java/io/mycat/util/exception/UnknownCharsetException.java similarity index 97% rename from src/main/java/io/mycat/server/exception/UnknownCharsetException.java rename to src/main/java/io/mycat/util/exception/UnknownCharsetException.java index 84e8ea8a1..9cbf7b1d2 100644 --- a/src/main/java/io/mycat/server/exception/UnknownCharsetException.java +++ b/src/main/java/io/mycat/util/exception/UnknownCharsetException.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.exception; +package io.mycat.util.exception; /** * 未知字符集异常 diff --git a/src/main/java/io/mycat/server/exception/UnknownDataNodeException.java b/src/main/java/io/mycat/util/exception/UnknownDataNodeException.java similarity index 97% rename from src/main/java/io/mycat/server/exception/UnknownDataNodeException.java rename to src/main/java/io/mycat/util/exception/UnknownDataNodeException.java index 03ba86a47..66419182e 100644 --- a/src/main/java/io/mycat/server/exception/UnknownDataNodeException.java +++ b/src/main/java/io/mycat/util/exception/UnknownDataNodeException.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.exception; +package io.mycat.util.exception; /** * @author mycat diff --git a/src/main/java/io/mycat/server/exception/UnknownPacketException.java b/src/main/java/io/mycat/util/exception/UnknownPacketException.java similarity index 97% rename from src/main/java/io/mycat/server/exception/UnknownPacketException.java rename to src/main/java/io/mycat/util/exception/UnknownPacketException.java index 5c0f07cdc..1bfc3dad1 100644 --- a/src/main/java/io/mycat/server/exception/UnknownPacketException.java +++ b/src/main/java/io/mycat/util/exception/UnknownPacketException.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.exception; +package io.mycat.util.exception; /** * 未知数据包异常 diff --git a/src/main/java/io/mycat/server/exception/UnknownTxIsolationException.java b/src/main/java/io/mycat/util/exception/UnknownTxIsolationException.java similarity index 97% rename from src/main/java/io/mycat/server/exception/UnknownTxIsolationException.java rename to src/main/java/io/mycat/util/exception/UnknownTxIsolationException.java index 79efb24c3..00d65cfc4 100644 --- a/src/main/java/io/mycat/server/exception/UnknownTxIsolationException.java +++ b/src/main/java/io/mycat/util/exception/UnknownTxIsolationException.java @@ -21,7 +21,7 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server.exception; +package io.mycat.util.exception; /** * 未知事物隔离级别异常 diff --git a/src/main/java/io/mycat/util/rehasher/RehashCmdArgs.java b/src/main/java/io/mycat/util/rehasher/RehashCmdArgs.java index 6058d4b68..becfb5a3c 100644 --- a/src/main/java/io/mycat/util/rehasher/RehashCmdArgs.java +++ b/src/main/java/io/mycat/util/rehasher/RehashCmdArgs.java @@ -1,8 +1,5 @@ package io.mycat.util.rehasher; -import io.mycat.util.CmdArgs; -import io.mycat.util.StringUtil; - import java.io.BufferedReader; import java.io.FileInputStream; import java.io.IOException; @@ -10,6 +7,9 @@ import java.util.ArrayList; import java.util.List; +import io.mycat.util.StringUtil; +import io.mycat.util.cmd.CmdArgs; + public class RehashCmdArgs { public static final String JDBC_DRIVER="jdbcDriver"; public static final String JDBC_URL="jdbcUrl"; diff --git a/src/main/java/io/mycat/util/rehasher/RehashLauncher.java b/src/main/java/io/mycat/util/rehasher/RehashLauncher.java index 3b195c9d6..379a6c5f7 100644 --- a/src/main/java/io/mycat/util/rehasher/RehashLauncher.java +++ b/src/main/java/io/mycat/util/rehasher/RehashLauncher.java @@ -1,11 +1,5 @@ package io.mycat.util.rehasher; -import io.mycat.route.function.AbstractPartitionAlgorithm; -import io.mycat.route.function.PartitionByMod; -import io.mycat.route.function.PartitionByMurmurHash; -import io.mycat.server.exception.RehashException; -import io.mycat.util.CollectionUtil; - import java.io.File; import java.io.IOException; import java.io.PrintStream; @@ -21,6 +15,12 @@ import com.alibaba.druid.pool.DruidDataSource; import com.alibaba.druid.util.JdbcUtils; +import io.mycat.route.function.AbstractPartitionAlgorithm; +import io.mycat.route.function.PartitionByMod; +import io.mycat.route.function.PartitionByMurmurHash; +import io.mycat.util.CollectionUtil; +import io.mycat.util.exception.RehashException; + /** * 本工具依赖druid,Mycat已经包含druid,druid配置请查阅相关文档。相关参数请看RehashCmdArgs * @author wujingrun @@ -101,6 +101,7 @@ private void initHashAlg() throws IOException{ murmur.setCount(rehashHosts.length); murmur.setSeed(args.getMurmurHashSeed()); murmur.setVirtualBucketTimes(args.getMurmurHashVirtualBucketTimes()); + murmur.setWeightMapFile(args.getMurmurWeightMapFile()); murmur.init(); } else if (HashType.MOD.equals(args.getHashType())) { alg=new PartitionByMod(); diff --git a/src/main/resources/auto-sharding-long.txt b/src/main/resources/auto-sharding-long.txt new file mode 100644 index 000000000..c0bb3db9b --- /dev/null +++ b/src/main/resources/auto-sharding-long.txt @@ -0,0 +1,3 @@ +2000001-4000000=1 +0-2000000=0 +4000001-8000000=2 diff --git a/src/main/resources/auto-sharding-rang-mod.txt b/src/main/resources/auto-sharding-rang-mod.txt new file mode 100644 index 000000000..871972387 --- /dev/null +++ b/src/main/resources/auto-sharding-rang-mod.txt @@ -0,0 +1,5 @@ +800M1-1000M=6 +600M1-800M=4 +200M1-400M=1 +0-200M=5 +400M1-600M=4 diff --git a/src/main/resources/autopartition-long.txt b/src/main/resources/autopartition-long.txt new file mode 100644 index 000000000..9936faa2f --- /dev/null +++ b/src/main/resources/autopartition-long.txt @@ -0,0 +1,5 @@ +# range start-end ,data node index +# K=1000,M=10000. +0-500M=0 +500M-1000M=1 +1000M-1500M=2 \ No newline at end of file diff --git a/src/main/resources/caches.properties b/src/main/resources/cacheservice.properties similarity index 100% rename from src/main/resources/caches.properties rename to src/main/resources/cacheservice.properties diff --git a/src/main/resources/dbseq.sql b/src/main/resources/dbseq.sql new file mode 100644 index 000000000..134e592e0 --- /dev/null +++ b/src/main/resources/dbseq.sql @@ -0,0 +1,95 @@ +DROP TABLE IF EXISTS MYCAT_SEQUENCE; +CREATE TABLE MYCAT_SEQUENCE ( name VARCHAR(64) NOT NULL, current_value BIGINT(20) NOT NULL, increment INT NOT NULL DEFAULT 1, PRIMARY KEY (name) ) ENGINE=InnoDB; + +-- ---------------------------- +-- Function structure for `mycat_seq_currval` +-- ---------------------------- +DROP FUNCTION IF EXISTS `mycat_seq_currval`; +DELIMITER ;; +CREATE FUNCTION `mycat_seq_currval`(seq_name VARCHAR(64)) RETURNS varchar(64) CHARSET latin1 + DETERMINISTIC +BEGIN + DECLARE retval VARCHAR(64); + SET retval="-1,0"; + SELECT concat(CAST(current_value AS CHAR),",",CAST(increment AS CHAR) ) INTO retval FROM MYCAT_SEQUENCE WHERE name = seq_name; + RETURN retval ; +END +;; +DELIMITER ; + +-- ---------------------------- +-- Function structure for `mycat_seq_nextval` +-- ---------------------------- +DROP FUNCTION IF EXISTS `mycat_seq_nextval`; +DELIMITER ;; +CREATE FUNCTION `mycat_seq_nextval`(seq_name VARCHAR(64)) RETURNS varchar(64) CHARSET latin1 + DETERMINISTIC +BEGIN + DECLARE retval VARCHAR(64); + DECLARE val BIGINT; + DECLARE inc INT; + DECLARE seq_lock INT; + set val = -1; + set inc = 0; + SET seq_lock = -1; + SELECT GET_LOCK(seq_name, 15) into seq_lock; + if seq_lock = 1 then + SELECT current_value + increment, increment INTO val, inc FROM MYCAT_SEQUENCE WHERE name = seq_name for update; + if val != -1 then + UPDATE MYCAT_SEQUENCE SET current_value = val WHERE name = seq_name; + end if; + SELECT RELEASE_LOCK(seq_name) into seq_lock; + end if; + SELECT concat(CAST((val - inc + 1) as CHAR),",",CAST(inc as CHAR)) INTO retval; + RETURN retval; +END +;; +DELIMITER ; + +-- ---------------------------- +-- Function structure for `mycat_seq_setvals` +-- ---------------------------- +DROP FUNCTION IF EXISTS `mycat_seq_nextvals`; +DELIMITER ;; +CREATE FUNCTION `mycat_seq_nextvals`(seq_name VARCHAR(64), count INT) RETURNS VARCHAR(64) CHARSET latin1 + DETERMINISTIC +BEGIN + DECLARE retval VARCHAR(64); + DECLARE val BIGINT; + DECLARE seq_lock INT; + SET val = -1; + SET seq_lock = -1; + SELECT GET_LOCK(seq_name, 15) into seq_lock; + if seq_lock = 1 then + SELECT current_value + count INTO val FROM MYCAT_SEQUENCE WHERE name = seq_name for update; + IF val != -1 THEN + UPDATE MYCAT_SEQUENCE SET current_value = val WHERE name = seq_name; + END IF; + SELECT RELEASE_LOCK(seq_name) into seq_lock; + end if; + SELECT CONCAT(CAST((val - count + 1) as CHAR), ",", CAST(val as CHAR)) INTO retval; + RETURN retval; +END +;; +DELIMITER ; + +-- ---------------------------- +-- Function structure for `mycat_seq_setval` +-- ---------------------------- +DROP FUNCTION IF EXISTS `mycat_seq_setval`; +DELIMITER ;; +CREATE FUNCTION `mycat_seq_setval`(seq_name VARCHAR(64), value BIGINT) RETURNS varchar(64) CHARSET latin1 + DETERMINISTIC +BEGIN + DECLARE retval VARCHAR(64); + DECLARE inc INT; + SET inc = 0; + SELECT increment INTO inc FROM MYCAT_SEQUENCE WHERE name = seq_name; + UPDATE MYCAT_SEQUENCE SET current_value = value WHERE name = seq_name; + SELECT concat(CAST(value as CHAR),",",CAST(inc as CHAR)) INTO retval; + RETURN retval; +END +;; +DELIMITER ; + +INSERT INTO MYCAT_SEQUENCE VALUES ('GLOBAL', 1, 1); \ No newline at end of file diff --git a/src/main/resources/dnindex.properties b/src/main/resources/dnindex.properties deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/main/resources/index_to_charset.properties b/src/main/resources/index_to_charset.properties new file mode 100644 index 000000000..b990ce466 --- /dev/null +++ b/src/main/resources/index_to_charset.properties @@ -0,0 +1,219 @@ +1=big5 +2=latin2 +3=dec8 +4=cp850 +5=latin1 +6=hp8 +7=koi8r +8=latin1 +9=latin2 +10=swe7 +11=ascii +12=ujis +13=sjis +14=cp1251 +15=latin1 +16=hebrew +18=tis620 +19=euckr +20=latin7 +21=latin2 +22=koi8u +23=cp1251 +24=gb2312 +25=greek +26=cp1250 +27=latin2 +28=gbk +29=cp1257 +30=latin5 +31=latin1 +32=armscii8 +33=utf8 +34=cp1250 +35=ucs2 +36=cp866 +37=keybcs2 +38=macce +39=macroman +40=cp852 +41=latin7 +42=latin7 +43=macce +44=cp1250 +45=utf8mb4 +46=utf8mb4 +47=latin1 +48=latin1 +49=latin1 +50=cp1251 +51=cp1251 +52=cp1251 +53=macroman +54=utf16 +55=utf16 +56=utf16le +57=cp1256 +58=cp1257 +59=cp1257 +60=utf32 +61=utf32 +62=utf16le +63=binary +64=armscii8 +65=ascii +66=cp1250 +67=cp1256 +68=cp866 +69=dec8 +70=greek +71=hebrew +72=hp8 +73=keybcs2 +74=koi8r +75=koi8u +77=latin2 +78=latin5 +79=latin7 +80=cp850 +81=cp852 +82=swe7 +83=utf8 +84=big5 +85=euckr +86=gb2312 +87=gbk +88=sjis +89=tis620 +90=ucs2 +91=ujis +92=geostd8 +93=geostd8 +94=latin1 +95=cp932 +96=cp932 +97=eucjpms +98=eucjpms +99=cp1250 +101=utf16 +102=utf16 +103=utf16 +104=utf16 +105=utf16 +106=utf16 +107=utf16 +108=utf16 +109=utf16 +110=utf16 +111=utf16 +112=utf16 +113=utf16 +114=utf16 +115=utf16 +116=utf16 +117=utf16 +118=utf16 +119=utf16 +120=utf16 +121=utf16 +122=utf16 +123=utf16 +124=utf16 +128=ucs2 +129=ucs2 +130=ucs2 +131=ucs2 +132=ucs2 +133=ucs2 +134=ucs2 +135=ucs2 +136=ucs2 +137=ucs2 +138=ucs2 +139=ucs2 +140=ucs2 +141=ucs2 +142=ucs2 +143=ucs2 +144=ucs2 +145=ucs2 +146=ucs2 +147=ucs2 +148=ucs2 +149=ucs2 +150=ucs2 +151=ucs2 +159=ucs2 +160=utf32 +161=utf32 +162=utf32 +163=utf32 +164=utf32 +165=utf32 +166=utf32 +167=utf32 +168=utf32 +169=utf32 +170=utf32 +171=utf32 +172=utf32 +173=utf32 +174=utf32 +175=utf32 +176=utf32 +177=utf32 +178=utf32 +179=utf32 +180=utf32 +181=utf32 +182=utf32 +183=utf32 +192=utf8 +193=utf8 +194=utf8 +195=utf8 +196=utf8 +197=utf8 +198=utf8 +199=utf8 +200=utf8 +201=utf8 +202=utf8 +203=utf8 +204=utf8 +205=utf8 +206=utf8 +207=utf8 +208=utf8 +209=utf8 +210=utf8 +211=utf8 +212=utf8 +213=utf8 +214=utf8 +215=utf8 +223=utf8 +224=utf8mb4 +225=utf8mb4 +226=utf8mb4 +227=utf8mb4 +228=utf8mb4 +229=utf8mb4 +230=utf8mb4 +231=utf8mb4 +232=utf8mb4 +233=utf8mb4 +234=utf8mb4 +235=utf8mb4 +236=utf8mb4 +237=utf8mb4 +238=utf8mb4 +239=utf8mb4 +240=utf8mb4 +241=utf8mb4 +242=utf8mb4 +243=utf8mb4 +244=utf8mb4 +245=utf8mb4 +246=utf8mb4 +247=utf8mb4 \ No newline at end of file diff --git a/src/main/resources/log4j2.xml b/src/main/resources/log4j2.xml index 15fd0a78a..b952e524f 100644 --- a/src/main/resources/log4j2.xml +++ b/src/main/resources/log4j2.xml @@ -2,13 +2,13 @@ - + - - %d{YYYY-MM-dd HH:mm:ss.SSS} %5p [%t] (%l) - %m%n + %d{yyyy-MM-dd HH:mm:ss.SSS} %5p [%t] (%l) - %m%n @@ -18,9 +18,15 @@ - - - - + + + + + + + + + + diff --git a/src/main/resources/migrateTables.properties b/src/main/resources/migrateTables.properties new file mode 100644 index 000000000..0bf89ec5e --- /dev/null +++ b/src/main/resources/migrateTables.properties @@ -0,0 +1,6 @@ +#schema1=tb1,tb2,... +#schema2=all(写all或者不写将对此schema下拆分节点变化的拆分表全部进行重新路由) +#... + +#sample +#TESTDB=travelrecord,company,goods \ No newline at end of file diff --git a/src/main/resources/mycat.xml b/src/main/resources/mycat.xml deleted file mode 100644 index bcb59bbcd..000000000 --- a/src/main/resources/mycat.xml +++ /dev/null @@ -1,92 +0,0 @@ - - - - - - 2 - 8066 - 9066 - utf8 - - - digdeep - testdb - - - - - - -
-
- - - - - - - - select user() - - - - - - - - - - - select 1 - - - - - - - 0 - - - 0 - 1 - 2 - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/src/main/resources/myid.properties b/src/main/resources/myid.properties new file mode 100644 index 000000000..ad52dda15 --- /dev/null +++ b/src/main/resources/myid.properties @@ -0,0 +1,9 @@ +loadZk=false +zkURL=127.0.0.1:2181 +clusterId=mycat-cluster-1 +myid=mycat_fz_01 +clusterSize=3 +clusterNodes=mycat_fz_01,mycat_fz_02,mycat_fz_04 +#server booster ; booster install on db same server,will reset all minCon to 2 +type=server +boosterDataHosts=dataHost1 diff --git a/src/main/resources/partition-hash-int.txt b/src/main/resources/partition-hash-int.txt new file mode 100644 index 000000000..c13b35142 --- /dev/null +++ b/src/main/resources/partition-hash-int.txt @@ -0,0 +1,2 @@ +10000=0 +10010=1 \ No newline at end of file diff --git a/src/main/resources/partition-range-mod.txt b/src/main/resources/partition-range-mod.txt new file mode 100644 index 000000000..c649b53b3 --- /dev/null +++ b/src/main/resources/partition-range-mod.txt @@ -0,0 +1,6 @@ +# range start-end ,data node group size +0-200M=5 +200M1-400M=1 +400M1-600M=4 +600M1-800M=4 +800M1-1000M=6 diff --git a/src/main/resources/rule.dtd b/src/main/resources/rule.dtd new file mode 100644 index 000000000..303acef9f --- /dev/null +++ b/src/main/resources/rule.dtd @@ -0,0 +1,35 @@ + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/main/resources/rule.xml b/src/main/resources/rule.xml new file mode 100644 index 000000000..94679cdbd --- /dev/null +++ b/src/main/resources/rule.xml @@ -0,0 +1,131 @@ + + + + + + + id + func1 + + + + + + user_id + func1 + + + + + + sharding_id + hash-int + + + + + id + rang-long + + + + + id + mod-long + + + + + id + murmur + + + + + id + crc32slot + + + + + create_time + partbymonth + + + + + calldate + latestMonth + + + + + + id + rang-mod + + + + + + id + jump-consistent-hash + + + + + 0 + 2 + 160 + + + + + + 2 + + + partition-hash-int.txt + + + autopartition-long.txt + + + + 3 + + + + 8 + 128 + + + 24 + + + yyyy-MM-dd + 2015-01-01 + + + + partition-range-mod.txt + + + + 3 + + diff --git a/src/main/resources/schema.dtd b/src/main/resources/schema.dtd new file mode 100644 index 000000000..144cc032f --- /dev/null +++ b/src/main/resources/schema.dtd @@ -0,0 +1,87 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/main/resources/schema.xml b/src/main/resources/schema.xml new file mode 100644 index 000000000..d5d5f58e2 --- /dev/null +++ b/src/main/resources/schema.xml @@ -0,0 +1,78 @@ + + + + + + +
+ + +
+
+ +
+ +
+
+ + + + +
+ +
+ + + + + + + select user() + + + + + + + + + + + + \ No newline at end of file diff --git a/src/main/resources/sequence_conf.properties b/src/main/resources/sequence_conf.properties index e64f28b1f..b96c3acea 100644 --- a/src/main/resources/sequence_conf.properties +++ b/src/main/resources/sequence_conf.properties @@ -1,8 +1,27 @@ -GLOBAL_SEQ.HISIDS= -GLOBAL_SEQ.MINID=1001 -GLOBAL_SEQ.MAXID=1000000000 -GLOBAL_SEQ.CURID=1000 -TUSER.HISIDS= -TUSER.MINID=1001 -TUSER.MAXID=1000000000 -TUSER.CURID=1000 \ No newline at end of file +#default global sequence +GLOBAL.HISIDS= +GLOBAL.MINID=10001 +GLOBAL.MAXID=20000 +GLOBAL.CURID=10000 + +# self define sequence +COMPANY.HISIDS= +COMPANY.MINID=1001 +COMPANY.MAXID=2000 +COMPANY.CURID=1000 + +CUSTOMER.HISIDS= +CUSTOMER.MINID=1001 +CUSTOMER.MAXID=2000 +CUSTOMER.CURID=1000 + +ORDER.HISIDS= +ORDER.MINID=1001 +ORDER.MAXID=2000 +ORDER.CURID=1000 + +HOTNEWS.HISIDS= +HOTNEWS.MINID=1001 +HOTNEWS.MAXID=2000 +HOTNEWS.CURID=1000 + diff --git a/src/main/resources/sequence_db_conf.properties b/src/main/resources/sequence_db_conf.properties new file mode 100644 index 000000000..b51fd1233 --- /dev/null +++ b/src/main/resources/sequence_db_conf.properties @@ -0,0 +1,5 @@ +#sequence stored in datanode +GLOBAL=dn1 +COMPANY=dn1 +CUSTOMER=dn1 +ORDERS=dn1 \ No newline at end of file diff --git a/src/main/resources/sequence_distributed_conf.properties b/src/main/resources/sequence_distributed_conf.properties new file mode 100644 index 000000000..1cacd659c --- /dev/null +++ b/src/main/resources/sequence_distributed_conf.properties @@ -0,0 +1,2 @@ +INSTANCEID=01 +CLUSTERID=01 diff --git a/src/main/resources/sequence_time_conf.properties b/src/main/resources/sequence_time_conf.properties new file mode 100644 index 000000000..6c9b5b271 --- /dev/null +++ b/src/main/resources/sequence_time_conf.properties @@ -0,0 +1,3 @@ +#sequence depend on TIME +WORKID=01 +DATAACENTERID=01 \ No newline at end of file diff --git a/src/main/resources/server.dtd b/src/main/resources/server.dtd new file mode 100644 index 000000000..c805a9337 --- /dev/null +++ b/src/main/resources/server.dtd @@ -0,0 +1,62 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/src/main/resources/server.xml b/src/main/resources/server.xml new file mode 100644 index 000000000..7bb02ce6f --- /dev/null +++ b/src/main/resources/server.xml @@ -0,0 +1,114 @@ + + + + + + 0 + 1 + 0 + 0 + + 2 + false + + + + + + 0 + + + + + + + + + 0 + + + 1 + + + 64k + + + 1k + + 0 + + + 384m + + + + false + + + + + + + + + + + + + + + + + + + + 123456 + TESTDB + + + + + + + user + TESTDB + true + + + diff --git a/src/main/resources/sharding-by-enum.txt b/src/main/resources/sharding-by-enum.txt new file mode 100644 index 000000000..f2a00424e --- /dev/null +++ b/src/main/resources/sharding-by-enum.txt @@ -0,0 +1,2 @@ +10000=0 +10010=1 diff --git a/src/main/resources/wrapper.conf b/src/main/resources/wrapper.conf deleted file mode 100644 index f771ae15e..000000000 --- a/src/main/resources/wrapper.conf +++ /dev/null @@ -1,123 +0,0 @@ -#******************************************************************** -# Wrapper Properties -#******************************************************************** -# Java Application -wrapper.java.command=java -wrapper.working.dir=.. - -# Java Main class. This class must implement the WrapperListener interface -# or guarantee that the WrapperManager class is initialized. Helper -# classes are provided to do this for you. See the Integration section -# of the documentation for details. -wrapper.java.mainclass=org.tanukisoftware.wrapper.WrapperSimpleApp -set.default.REPO_DIR=lib -set.APP_BASE=. - -# Java Classpath (include wrapper.jar) Add class path elements as -# needed starting from 1 -wrapper.java.classpath.1=lib/wrapper.jar -wrapper.java.classpath.2=conf -wrapper.java.classpath.3=%REPO_DIR%/* - -# Java Library Path (location of Wrapper.DLL or libwrapper.so) -wrapper.java.library.path.1=lib - -# Java Additional Parameters -#wrapper.java.additional.1= -wrapper.java.additional.1=-DMYCAT_HOME=. -wrapper.java.additional.2=-server -wrapper.java.additional.3=-XX:MaxPermSize=64M -wrapper.java.additional.4=-XX:+AggressiveOpts -wrapper.java.additional.5=-XX:MaxDirectMemorySize=2G -wrapper.java.additional.6=-Dcom.sun.management.jmxremote -wrapper.java.additional.7=-Dcom.sun.management.jmxremote.port=1984 -wrapper.java.additional.8=-Dcom.sun.management.jmxremote.authenticate=false -wrapper.java.additional.9=-Dcom.sun.management.jmxremote.ssl=false - -# Initial Java Heap Size (in MB) -#wrapper.java.initmemory=3 -wrapper.java.initmemory=2048 - -# Maximum Java Heap Size (in MB) -#wrapper.java.maxmemory=64 -wrapper.java.maxmemory=2048 - -# Application parameters. Add parameters as needed starting from 1 -wrapper.app.parameter.1=org.opencloudb.MycatStartup -wrapper.app.parameter.2=start - -#******************************************************************** -# Wrapper Logging Properties -#******************************************************************** -# Format of output for the console. (See docs for formats) -wrapper.console.format=PM - -# Log Level for console output. (See docs for log levels) -wrapper.console.loglevel=INFO - -# Log file to use for wrapper output logging. -wrapper.logfile=logs/wrapper.log - -# Format of output for the log file. (See docs for formats) -wrapper.logfile.format=LPTM - -# Log Level for log file output. (See docs for log levels) -wrapper.logfile.loglevel=INFO - -# Maximum size that the log file will be allowed to grow to before -# the log is rolled. Size is specified in bytes. The default value -# of 0, disables log rolling. May abbreviate with the 'k' (kb) or -# 'm' (mb) suffix. For example: 10m = 10 megabytes. -wrapper.logfile.maxsize=0 - -# Maximum number of rolled log files which will be allowed before old -# files are deleted. The default value of 0 implies no limit. -wrapper.logfile.maxfiles=0 - -# Log Level for sys/event log output. (See docs for log levels) -wrapper.syslog.loglevel=NONE - -#******************************************************************** -# Wrapper Windows Properties -#******************************************************************** -# Title to use when running as a console -wrapper.console.title=Mycat-server - -#******************************************************************** -# Wrapper Windows NT/2000/XP Service Properties -#******************************************************************** -# WARNING - Do not modify any of these properties when an application -# using this configuration file has been installed as a service. -# Please uninstall the service before modifying this section. The -# service can then be reinstalled. - -# Name of the service -wrapper.ntservice.name=mycat - -# Display name of the service -wrapper.ntservice.displayname=Mycat-server - -# Description of the service -wrapper.ntservice.description=The project of Mycat-server - -# Service dependencies. Add dependencies as needed starting from 1 -wrapper.ntservice.dependency.1= - -# Mode in which the service is installed. AUTO_START or DEMAND_START -wrapper.ntservice.starttype=AUTO_START - -# Allow the service to interact with the desktop. -wrapper.ntservice.interactive=false - -configuration.directory.in.classpath.first=conf - -mycat.registry.address=zookeeper://121.40.121.133:4181 -#mycat.registry.address=jdbc:mysql://121.40.121.133:3306:blog - - - - - - - - diff --git a/src/main/resources/zk.yaml b/src/main/resources/zk.yaml deleted file mode 100644 index 9949ad8f2..000000000 --- a/src/main/resources/zk.yaml +++ /dev/null @@ -1,2 +0,0 @@ -zkURL : 127.0.0.1:2181 -myID : mycat_fz_01 diff --git a/src/main/resources/zkconf/auto-sharding-long.txt b/src/main/resources/zkconf/auto-sharding-long.txt new file mode 100644 index 000000000..c0bb3db9b --- /dev/null +++ b/src/main/resources/zkconf/auto-sharding-long.txt @@ -0,0 +1,3 @@ +2000001-4000000=1 +0-2000000=0 +4000001-8000000=2 diff --git a/src/main/resources/zkconf/auto-sharding-rang-mod.txt b/src/main/resources/zkconf/auto-sharding-rang-mod.txt new file mode 100644 index 000000000..871972387 --- /dev/null +++ b/src/main/resources/zkconf/auto-sharding-rang-mod.txt @@ -0,0 +1,5 @@ +800M1-1000M=6 +600M1-800M=4 +200M1-400M=1 +0-200M=5 +400M1-600M=4 diff --git a/src/main/resources/zkconf/autopartition-long.txt b/src/main/resources/zkconf/autopartition-long.txt new file mode 100644 index 000000000..9936faa2f --- /dev/null +++ b/src/main/resources/zkconf/autopartition-long.txt @@ -0,0 +1,5 @@ +# range start-end ,data node index +# K=1000,M=10000. +0-500M=0 +500M-1000M=1 +1000M-1500M=2 \ No newline at end of file diff --git a/src/main/resources/zkconf/cacheservice.properties b/src/main/resources/zkconf/cacheservice.properties new file mode 100644 index 000000000..03e0e5852 --- /dev/null +++ b/src/main/resources/zkconf/cacheservice.properties @@ -0,0 +1,7 @@ +#used for mycat cache service conf +factory.encache=io.mycat.cache.impl.EnchachePooFactory +#key is pool name ,value is type,max size, expire seconds +pool.SQLRouteCache=encache,10000,1800 +pool.ER_SQL2PARENTID=encache,1000,1800 +layedpool.TableID2DataNodeCache=encache,10000,18000 +layedpool.TableID2DataNodeCache.TESTDB_ORDERS=50000,18000 \ No newline at end of file diff --git a/src/main/resources/zkconf/ehcache.xml b/src/main/resources/zkconf/ehcache.xml new file mode 100644 index 000000000..c7496f63f --- /dev/null +++ b/src/main/resources/zkconf/ehcache.xml @@ -0,0 +1,8 @@ + + + \ No newline at end of file diff --git a/src/main/resources/zkconf/index_to_charset.properties b/src/main/resources/zkconf/index_to_charset.properties new file mode 100644 index 000000000..b990ce466 --- /dev/null +++ b/src/main/resources/zkconf/index_to_charset.properties @@ -0,0 +1,219 @@ +1=big5 +2=latin2 +3=dec8 +4=cp850 +5=latin1 +6=hp8 +7=koi8r +8=latin1 +9=latin2 +10=swe7 +11=ascii +12=ujis +13=sjis +14=cp1251 +15=latin1 +16=hebrew +18=tis620 +19=euckr +20=latin7 +21=latin2 +22=koi8u +23=cp1251 +24=gb2312 +25=greek +26=cp1250 +27=latin2 +28=gbk +29=cp1257 +30=latin5 +31=latin1 +32=armscii8 +33=utf8 +34=cp1250 +35=ucs2 +36=cp866 +37=keybcs2 +38=macce +39=macroman +40=cp852 +41=latin7 +42=latin7 +43=macce +44=cp1250 +45=utf8mb4 +46=utf8mb4 +47=latin1 +48=latin1 +49=latin1 +50=cp1251 +51=cp1251 +52=cp1251 +53=macroman +54=utf16 +55=utf16 +56=utf16le +57=cp1256 +58=cp1257 +59=cp1257 +60=utf32 +61=utf32 +62=utf16le +63=binary +64=armscii8 +65=ascii +66=cp1250 +67=cp1256 +68=cp866 +69=dec8 +70=greek +71=hebrew +72=hp8 +73=keybcs2 +74=koi8r +75=koi8u +77=latin2 +78=latin5 +79=latin7 +80=cp850 +81=cp852 +82=swe7 +83=utf8 +84=big5 +85=euckr +86=gb2312 +87=gbk +88=sjis +89=tis620 +90=ucs2 +91=ujis +92=geostd8 +93=geostd8 +94=latin1 +95=cp932 +96=cp932 +97=eucjpms +98=eucjpms +99=cp1250 +101=utf16 +102=utf16 +103=utf16 +104=utf16 +105=utf16 +106=utf16 +107=utf16 +108=utf16 +109=utf16 +110=utf16 +111=utf16 +112=utf16 +113=utf16 +114=utf16 +115=utf16 +116=utf16 +117=utf16 +118=utf16 +119=utf16 +120=utf16 +121=utf16 +122=utf16 +123=utf16 +124=utf16 +128=ucs2 +129=ucs2 +130=ucs2 +131=ucs2 +132=ucs2 +133=ucs2 +134=ucs2 +135=ucs2 +136=ucs2 +137=ucs2 +138=ucs2 +139=ucs2 +140=ucs2 +141=ucs2 +142=ucs2 +143=ucs2 +144=ucs2 +145=ucs2 +146=ucs2 +147=ucs2 +148=ucs2 +149=ucs2 +150=ucs2 +151=ucs2 +159=ucs2 +160=utf32 +161=utf32 +162=utf32 +163=utf32 +164=utf32 +165=utf32 +166=utf32 +167=utf32 +168=utf32 +169=utf32 +170=utf32 +171=utf32 +172=utf32 +173=utf32 +174=utf32 +175=utf32 +176=utf32 +177=utf32 +178=utf32 +179=utf32 +180=utf32 +181=utf32 +182=utf32 +183=utf32 +192=utf8 +193=utf8 +194=utf8 +195=utf8 +196=utf8 +197=utf8 +198=utf8 +199=utf8 +200=utf8 +201=utf8 +202=utf8 +203=utf8 +204=utf8 +205=utf8 +206=utf8 +207=utf8 +208=utf8 +209=utf8 +210=utf8 +211=utf8 +212=utf8 +213=utf8 +214=utf8 +215=utf8 +223=utf8 +224=utf8mb4 +225=utf8mb4 +226=utf8mb4 +227=utf8mb4 +228=utf8mb4 +229=utf8mb4 +230=utf8mb4 +231=utf8mb4 +232=utf8mb4 +233=utf8mb4 +234=utf8mb4 +235=utf8mb4 +236=utf8mb4 +237=utf8mb4 +238=utf8mb4 +239=utf8mb4 +240=utf8mb4 +241=utf8mb4 +242=utf8mb4 +243=utf8mb4 +244=utf8mb4 +245=utf8mb4 +246=utf8mb4 +247=utf8mb4 \ No newline at end of file diff --git a/src/main/resources/zkconf/partition-hash-int.txt b/src/main/resources/zkconf/partition-hash-int.txt new file mode 100644 index 000000000..c13b35142 --- /dev/null +++ b/src/main/resources/zkconf/partition-hash-int.txt @@ -0,0 +1,2 @@ +10000=0 +10010=1 \ No newline at end of file diff --git a/src/main/resources/zkconf/partition-range-mod.txt b/src/main/resources/zkconf/partition-range-mod.txt new file mode 100644 index 000000000..c649b53b3 --- /dev/null +++ b/src/main/resources/zkconf/partition-range-mod.txt @@ -0,0 +1,6 @@ +# range start-end ,data node group size +0-200M=5 +200M1-400M=1 +400M1-600M=4 +600M1-800M=4 +800M1-1000M=6 diff --git a/src/main/resources/zkconf/rule.xml b/src/main/resources/zkconf/rule.xml new file mode 100644 index 000000000..a37bab403 --- /dev/null +++ b/src/main/resources/zkconf/rule.xml @@ -0,0 +1,120 @@ + + + + + + + id + func1 + + + + + + user_id + func1 + + + + + + sharding_id + hash-int + + + + + id + rang-long + + + + + id + mod-long + + + + + id + murmur + + + + + create_date + partbymonth + + + + + calldate + latestMonth + + + + + + id + rang-mod + + + + + + id + jump-consistent-hash + + + + + 0 + 2 + 160 + + + + + partition-hash-int.txt + + + autopartition-long.txt + + + + 3 + + + + 8 + 128 + + + 24 + + + yyyy-MM-dd + 2015-01-01 + + + + partition-range-mod.txt + + + + 3 + + diff --git a/src/main/resources/zkconf/schema.xml b/src/main/resources/zkconf/schema.xml new file mode 100644 index 000000000..d5d5f58e2 --- /dev/null +++ b/src/main/resources/zkconf/schema.xml @@ -0,0 +1,78 @@ + + + + + + + + + +
+
+ +
+ +
+
+ + + + +
+ +
+ + + + + + + select user() + + + + + + + + + + + +
\ No newline at end of file diff --git a/src/main/resources/zkconf/sequence_conf.properties b/src/main/resources/zkconf/sequence_conf.properties new file mode 100644 index 000000000..b96c3acea --- /dev/null +++ b/src/main/resources/zkconf/sequence_conf.properties @@ -0,0 +1,27 @@ +#default global sequence +GLOBAL.HISIDS= +GLOBAL.MINID=10001 +GLOBAL.MAXID=20000 +GLOBAL.CURID=10000 + +# self define sequence +COMPANY.HISIDS= +COMPANY.MINID=1001 +COMPANY.MAXID=2000 +COMPANY.CURID=1000 + +CUSTOMER.HISIDS= +CUSTOMER.MINID=1001 +CUSTOMER.MAXID=2000 +CUSTOMER.CURID=1000 + +ORDER.HISIDS= +ORDER.MINID=1001 +ORDER.MAXID=2000 +ORDER.CURID=1000 + +HOTNEWS.HISIDS= +HOTNEWS.MINID=1001 +HOTNEWS.MAXID=2000 +HOTNEWS.CURID=1000 + diff --git a/src/main/resources/zkconf/sequence_db_conf.properties b/src/main/resources/zkconf/sequence_db_conf.properties new file mode 100644 index 000000000..b51fd1233 --- /dev/null +++ b/src/main/resources/zkconf/sequence_db_conf.properties @@ -0,0 +1,5 @@ +#sequence stored in datanode +GLOBAL=dn1 +COMPANY=dn1 +CUSTOMER=dn1 +ORDERS=dn1 \ No newline at end of file diff --git a/src/main/resources/zkconf/sequence_distributed_conf-mycat_fz_01.properties b/src/main/resources/zkconf/sequence_distributed_conf-mycat_fz_01.properties new file mode 100644 index 000000000..e7a857033 --- /dev/null +++ b/src/main/resources/zkconf/sequence_distributed_conf-mycat_fz_01.properties @@ -0,0 +1,2 @@ +INSTANCEID=02 +CLUSTERID=02 diff --git a/src/main/resources/zkconf/sequence_distributed_conf.properties b/src/main/resources/zkconf/sequence_distributed_conf.properties new file mode 100644 index 000000000..1cacd659c --- /dev/null +++ b/src/main/resources/zkconf/sequence_distributed_conf.properties @@ -0,0 +1,2 @@ +INSTANCEID=01 +CLUSTERID=01 diff --git a/src/main/resources/zkconf/sequence_time_conf-mycat_fz_01.properties b/src/main/resources/zkconf/sequence_time_conf-mycat_fz_01.properties new file mode 100644 index 000000000..c123f570e --- /dev/null +++ b/src/main/resources/zkconf/sequence_time_conf-mycat_fz_01.properties @@ -0,0 +1,3 @@ +#sequence depend on TIME +WORKID=03 +DATAACENTERID=03 \ No newline at end of file diff --git a/src/main/resources/zkconf/sequence_time_conf.properties b/src/main/resources/zkconf/sequence_time_conf.properties new file mode 100644 index 000000000..6c9b5b271 --- /dev/null +++ b/src/main/resources/zkconf/sequence_time_conf.properties @@ -0,0 +1,3 @@ +#sequence depend on TIME +WORKID=01 +DATAACENTERID=01 \ No newline at end of file diff --git a/src/main/resources/zkconf/server-mycat_fz_01.xml b/src/main/resources/zkconf/server-mycat_fz_01.xml new file mode 100644 index 000000000..6c773cbd6 --- /dev/null +++ b/src/main/resources/zkconf/server-mycat_fz_01.xml @@ -0,0 +1,84 @@ + + + + + + 1 + 0 + druidparser + 2 + + + + + + 0 + + + + + + + + + 0 + + + 1 + + + 1m + + + 1k + + 0 + + + 389m + + + digdeep + TESTDB + + + + user + TESTDB + true + + + + diff --git a/src/main/resources/zkconf/server.xml b/src/main/resources/zkconf/server.xml new file mode 100644 index 000000000..d18205989 --- /dev/null +++ b/src/main/resources/zkconf/server.xml @@ -0,0 +1,84 @@ + + + + + + 1 + 0 + druidparser + 2 + + + + + + 0 + + + + + + + + + 0 + + + 1 + + + 1m + + + 1k + + 0 + + + 384m + + + digdeep + TESTDB + + + + user + TESTDB + true + + + + diff --git a/src/main/resources/zkconf/sharding-by-enum.txt b/src/main/resources/zkconf/sharding-by-enum.txt new file mode 100644 index 000000000..f2a00424e --- /dev/null +++ b/src/main/resources/zkconf/sharding-by-enum.txt @@ -0,0 +1,2 @@ +10000=0 +10010=1 diff --git a/src/main/resources/zkdownload/auto-sharding-long.txt b/src/main/resources/zkdownload/auto-sharding-long.txt new file mode 100644 index 000000000..c0bb3db9b --- /dev/null +++ b/src/main/resources/zkdownload/auto-sharding-long.txt @@ -0,0 +1,3 @@ +2000001-4000000=1 +0-2000000=0 +4000001-8000000=2 diff --git a/src/main/java/demo/catlets/MyHellowJoin.java b/src/test/java/demo/catlets/MyHellowJoin.java similarity index 86% rename from src/main/java/demo/catlets/MyHellowJoin.java rename to src/test/java/demo/catlets/MyHellowJoin.java index 593b402a5..486f1e7ad 100644 --- a/src/main/java/demo/catlets/MyHellowJoin.java +++ b/src/test/java/demo/catlets/MyHellowJoin.java @@ -1,155 +1,155 @@ -package demo.catlets; - -import io.mycat.cache.LayerCachePool; -import io.mycat.server.MySQLFrontConnection; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SystemConfig; -import io.mycat.server.packet.RowDataPacket; -import io.mycat.sqlengine.AllJobFinishedListener; -import io.mycat.sqlengine.Catlet; -import io.mycat.sqlengine.EngineCtx; -import io.mycat.sqlengine.SQLJobHandler; -import io.mycat.util.ByteUtil; -import io.mycat.util.ResultSetUtil; - -import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentLinkedQueue; - -public class MyHellowJoin implements Catlet { - - public void processSQL(String sql, EngineCtx ctx) { - - DirectDBJoinHandler joinHandler = new DirectDBJoinHandler(ctx); - String[] dataNodes = { "dn1", "dn2", "dn3" }; - ctx.executeNativeSQLSequnceJob(dataNodes, sql, joinHandler); - ctx.setAllJobFinishedListener(new AllJobFinishedListener() { - - @Override - public void onAllJobFinished(EngineCtx ctx) { - ctx.writeEof(); - - } - }); - } - - @Override - public void route(SystemConfig sysConfig, SchemaConfig schema, int sqlType, - String realSQL, String charset, MySQLFrontConnection sc, - LayerCachePool cachePool) { - - - } -} - -class DirectDBJoinHandler implements SQLJobHandler { - private List fields; - private final EngineCtx ctx; - - public DirectDBJoinHandler(EngineCtx ctx) { - super(); - this.ctx = ctx; - } - - private Map rows = new ConcurrentHashMap(); - private ConcurrentLinkedQueue ids = new ConcurrentLinkedQueue(); - - @Override - public void onHeader(String dataNode, byte[] header, List fields) { - this.fields = fields; - - } - - private void createQryJob(int batchSize) { - int count = 0; - Map batchRows = new ConcurrentHashMap(); - String theId = null; - StringBuilder sb = new StringBuilder().append('('); - while ((theId = ids.poll()) != null) { - batchRows.put(theId, rows.remove(theId)); - sb.append(theId).append(','); - if (count++ > batchSize) { - break; - } - } - if (count == 0) { - return; - } - sb.deleteCharAt(sb.length() - 1).append(')'); - String querySQL = "select b.id, b.title from hotnews b where id in " - + sb; - ctx.executeNativeSQLParallJob(new String[] { "dn1", "dn2", "dn3" }, - querySQL, new MyRowOutPutDataHandler(fields, ctx, batchRows)); - } - - @Override - public boolean onRowData(String dataNode, byte[] rowData) { - - String id = ResultSetUtil.getColumnValAsString(rowData, fields, 0); - // 放入结果集 - rows.put(id, rowData); - ids.offer(id); - - int batchSize = 999; - // 满1000条,发送一个查询请求 - if (ids.size() > batchSize) { - createQryJob(batchSize); - } - - return false; - } - - @Override - public void finished(String dataNode, boolean failed) { - if (!failed) { - createQryJob(Integer.MAX_VALUE); - } - // no more jobs - ctx.endJobInput(); - } - -} - -class MyRowOutPutDataHandler implements SQLJobHandler { - private final List afields; - private List bfields; - private final EngineCtx ctx; - private final Map arows; - - public MyRowOutPutDataHandler(List afields, EngineCtx ctx, - Map arows) { - super(); - this.afields = afields; - this.ctx = ctx; - this.arows = arows; - } - - @Override - public void onHeader(String dataNode, byte[] header, List bfields) { - this.bfields=bfields; - ctx.writeHeader(afields, bfields); - } - - @Override - public boolean onRowData(String dataNode, byte[] rowData) { - RowDataPacket rowDataPkg = ResultSetUtil.parseRowData(rowData, bfields); - // 获取Id字段, - String id = ByteUtil.getString(rowDataPkg.fieldValues.get(0)); - byte[] bname = rowDataPkg.fieldValues.get(1); - // 查找ID对应的A表的记录 - byte[] arow = arows.remove(id); - rowDataPkg = ResultSetUtil.parseRowData(arow, afields); - // 设置b.name 字段 - rowDataPkg.add(bname); - - ctx.writeRow(rowDataPkg); - // EngineCtx.LOGGER.info("out put row "); - return false; - } - - @Override - public void finished(String dataNode, boolean failed) { - - } -} +package demo.catlets; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedQueue; + +import io.mycat.cache.LayerCachePool; +import io.mycat.catlets.Catlet; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.net.mysql.RowDataPacket; +import io.mycat.server.ServerConnection; +import io.mycat.sqlengine.AllJobFinishedListener; +import io.mycat.sqlengine.EngineCtx; +import io.mycat.sqlengine.SQLJobHandler; +import io.mycat.util.ByteUtil; +import io.mycat.util.ResultSetUtil; + +public class MyHellowJoin implements Catlet { + + public void processSQL(String sql, EngineCtx ctx) { + + DirectDBJoinHandler joinHandler = new DirectDBJoinHandler(ctx); + String[] dataNodes = { "dn1", "dn2", "dn3" }; + ctx.executeNativeSQLSequnceJob(dataNodes, sql, joinHandler); + ctx.setAllJobFinishedListener(new AllJobFinishedListener() { + + @Override + public void onAllJobFinished(EngineCtx ctx) { + ctx.writeEof(); + + } + }); + } + + @Override + public void route(SystemConfig sysConfig, SchemaConfig schema, int sqlType, + String realSQL, String charset, ServerConnection sc, + LayerCachePool cachePool) { + + + } +} + +class DirectDBJoinHandler implements SQLJobHandler { + private List fields; + private final EngineCtx ctx; + + public DirectDBJoinHandler(EngineCtx ctx) { + super(); + this.ctx = ctx; + } + + private Map rows = new ConcurrentHashMap(); + private ConcurrentLinkedQueue ids = new ConcurrentLinkedQueue(); + + @Override + public void onHeader(String dataNode, byte[] header, List fields) { + this.fields = fields; + + } + + private void createQryJob(int batchSize) { + int count = 0; + Map batchRows = new ConcurrentHashMap(); + String theId = null; + StringBuilder sb = new StringBuilder().append('('); + while ((theId = ids.poll()) != null) { + batchRows.put(theId, rows.remove(theId)); + sb.append(theId).append(','); + if (count++ > batchSize) { + break; + } + } + if (count == 0) { + return; + } + sb.deleteCharAt(sb.length() - 1).append(')'); + String querySQL = "select b.id, b.title from hotnews b where id in " + + sb; + ctx.executeNativeSQLParallJob(new String[] { "dn1", "dn2", "dn3" }, + querySQL, new MyRowOutPutDataHandler(fields, ctx, batchRows)); + } + + @Override + public boolean onRowData(String dataNode, byte[] rowData) { + + String id = ResultSetUtil.getColumnValAsString(rowData, fields, 0); + // 放入结果集 + rows.put(id, rowData); + ids.offer(id); + + int batchSize = 999; + // 满1000条,发送一个查询请求 + if (ids.size() > batchSize) { + createQryJob(batchSize); + } + + return false; + } + + @Override + public void finished(String dataNode, boolean failed, String errorMsg) { + if (!failed) { + createQryJob(Integer.MAX_VALUE); + } + // no more jobs + ctx.endJobInput(); + } + +} + +class MyRowOutPutDataHandler implements SQLJobHandler { + private final List afields; + private List bfields; + private final EngineCtx ctx; + private final Map arows; + + public MyRowOutPutDataHandler(List afields, EngineCtx ctx, + Map arows) { + super(); + this.afields = afields; + this.ctx = ctx; + this.arows = arows; + } + + @Override + public void onHeader(String dataNode, byte[] header, List bfields) { + this.bfields=bfields; + ctx.writeHeader(afields, bfields); + } + + @Override + public boolean onRowData(String dataNode, byte[] rowData) { + RowDataPacket rowDataPkg = ResultSetUtil.parseRowData(rowData, bfields); + // 获取Id字段, + String id = ByteUtil.getString(rowDataPkg.fieldValues.get(0)); + byte[] bname = rowDataPkg.fieldValues.get(1); + // 查找ID对应的A表的记录 + byte[] arow = arows.remove(id); + rowDataPkg = ResultSetUtil.parseRowData(arow, afields); + // 设置b.name 字段 + rowDataPkg.add(bname); + + ctx.writeRow(rowDataPkg); + // EngineCtx.LOGGER.info("out put row "); + return false; + } + + @Override + public void finished(String dataNode, boolean failed, String errorMsg) { + + } +} diff --git a/src/test/java/demo/test/TestClass1.java b/src/test/java/demo/test/TestClass1.java index e05fe270f..53bd54fce 100644 --- a/src/test/java/demo/test/TestClass1.java +++ b/src/test/java/demo/test/TestClass1.java @@ -1,13 +1,49 @@ package demo.test; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Statement; + /** - * for dnyclass load test only ,dont delete - * @author wuzhih + * @author mycat * */ public class TestClass1 { - public TestClass1() - { - System.out.println(TestClass1.class.getName()+ " created"); - } + public static void main( String args[] ) throws SQLException , ClassNotFoundException { + String jdbcdriver="com.mysql.jdbc.Driver"; + String jdbcurl="jdbc:mysql://127.0.0.1:8066/TESTDB?useUnicode=true&characterEncoding=utf-8"; + String username="test"; + String password="test"; + System.out.println("开始连接mysql:"+jdbcurl); + Class.forName(jdbcdriver); + Connection c = DriverManager.getConnection(jdbcurl,username,password); + Statement st = c.createStatement(); + print( "test jdbc " , st.executeQuery("select count(*) from travelrecord ")); + System.out.println("OK......"); + } + + static void print( String name , ResultSet res ) + throws SQLException { + System.out.println( name); + ResultSetMetaData meta=res.getMetaData(); + //System.out.println( "\t"+res.getRow()+"条记录"); + String str=""; + for(int i=1;i<=meta.getColumnCount();i++){ + str+=meta.getColumnName(i)+" "; + //System.out.println( meta.getColumnName(i)+" "); + } + System.out.println("\t"+str); + str=""; + while ( res.next() ){ + for(int i=1;i<=meta.getColumnCount();i++){ + str+= res.getString(i)+" "; + } + System.out.println("\t"+str); + str=""; + } + } } diff --git a/src/test/java/io/mycat/ConfigInitializerTest.java b/src/test/java/io/mycat/ConfigInitializerTest.java index 4bf3df863..6a364ce4b 100644 --- a/src/test/java/io/mycat/ConfigInitializerTest.java +++ b/src/test/java/io/mycat/ConfigInitializerTest.java @@ -2,8 +2,8 @@ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the * terms of the GNU General Public License version 2 only, as published by the * Free Software Foundation. * @@ -16,16 +16,16 @@ * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address + * + * Any questions about this component can be directed to it's project Web address * https://code.google.com/p/opencloudb/. * */ package io.mycat; -import io.mycat.server.config.loader.ConfigInitializer; import org.junit.Test; +import io.mycat.config.ConfigInitializer; /** * @author mycat diff --git a/src/test/java/io/mycat/EchoBioServer.java b/src/test/java/io/mycat/EchoBioServer.java index 6e9b8198e..37f220f14 100644 --- a/src/test/java/io/mycat/EchoBioServer.java +++ b/src/test/java/io/mycat/EchoBioServer.java @@ -84,13 +84,14 @@ public void run() { } catch (IOException e) { e.printStackTrace(); - if (socket != null) + if (socket != null) { try { socket.close(); } catch (IOException e1) { - + e1.printStackTrace(); } + } } } } diff --git a/src/test/java/io/mycat/ExecutorTestMain.java b/src/test/java/io/mycat/ExecutorTestMain.java index e4b74f128..6c64a2ffc 100644 --- a/src/test/java/io/mycat/ExecutorTestMain.java +++ b/src/test/java/io/mycat/ExecutorTestMain.java @@ -23,11 +23,11 @@ */ package io.mycat; -import io.mycat.net.ExecutorUtil; - import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.atomic.AtomicLong; +import io.mycat.util.ExecutorUtil; + /** * @author mycat */ diff --git a/src/test/java/io/mycat/SimpleCachePool.java b/src/test/java/io/mycat/SimpleCachePool.java index fdbc92519..e36e4940d 100644 --- a/src/test/java/io/mycat/SimpleCachePool.java +++ b/src/test/java/io/mycat/SimpleCachePool.java @@ -23,12 +23,12 @@ */ package io.mycat; -import io.mycat.cache.CacheStatic; -import io.mycat.cache.LayerCachePool; - import java.util.HashMap; import java.util.Map; +import io.mycat.cache.CacheStatic; +import io.mycat.cache.LayerCachePool; + public class SimpleCachePool implements LayerCachePool { private HashMap cacheMap = new HashMap(); diff --git a/src/test/java/io/mycat/backend/jdbc/mongodb/MongoClientPropertyHelperTest.java b/src/test/java/io/mycat/backend/jdbc/mongodb/MongoClientPropertyHelperTest.java new file mode 100644 index 000000000..c5162eb02 --- /dev/null +++ b/src/test/java/io/mycat/backend/jdbc/mongodb/MongoClientPropertyHelperTest.java @@ -0,0 +1,23 @@ +package io.mycat.backend.jdbc.mongodb; + +import org.junit.Test; + +import java.util.Properties; + +/** + * @author liuxinsi + * @mail akalxs@gmail.com + */ +public class MongoClientPropertyHelperTest { + @Test + public void testFormatProperties() { + Properties pro = new Properties(); + pro.put("authMechanism", "SCRAM-SHA-1"); + pro.put("readPreference", "nearest"); + pro.put("maxPoolSize", 10); + pro.put("ssl", true); + String options = MongoClientPropertyHelper.formatProperties(pro); + System.out.println(options); + + } +} diff --git a/src/test/java/io/mycat/backend/jdbc/mongodb/MongoEmbeddedObjectProcessorTest.java b/src/test/java/io/mycat/backend/jdbc/mongodb/MongoEmbeddedObjectProcessorTest.java new file mode 100644 index 000000000..169c75067 --- /dev/null +++ b/src/test/java/io/mycat/backend/jdbc/mongodb/MongoEmbeddedObjectProcessorTest.java @@ -0,0 +1,212 @@ +package io.mycat.backend.jdbc.mongodb; + +import com.google.common.collect.Lists; +import com.mongodb.BasicDBObject; +import org.bson.types.ObjectId; +import org.junit.Assert; +import org.junit.Test; + +import java.util.Date; +import java.util.List; +import java.util.Set; + +/** + * @author liuxinsi + * @mail akalxs@gmail.com + */ +public class MongoEmbeddedObjectProcessorTest { + @Test + public void testValueMapperWithObjectId() { + String id = "5978776b8d69f75e091067ed"; + + Object obj = MongoEmbeddedObjectProcessor.valueMapper("_id", id, ObjectId.class); + if (!(obj instanceof ObjectId)) { + Assert.fail("not objectId"); + } + } + + @Test + public void testValueMapperWithEmbeddedObject() { + BasicDBObject dbObj = new BasicDBObject(); + dbObj.put("str", "t1"); + dbObj.put("inte", 1); + dbObj.put("date", new Date()); + dbObj.put("lon", 100L); + dbObj.put("bool", true); + dbObj.put("strs", new String[]{"a", "b", "c"}); + dbObj.put("intes", new Integer[]{1, 2, 3}); + dbObj.put("bytes", "ttt".getBytes()); + dbObj.put("b", "a".getBytes()[0]); + + Object o = MongoEmbeddedObjectProcessor.valueMapper("embObj", dbObj, TestObject.class); + if (!(o instanceof TestObject)) { + Assert.fail("not emb obj"); + } + } + + @Test + public void testValueMapperWithDeepEmbeddedObject() { + BasicDBObject dbObj = new BasicDBObject(); + dbObj.put("str", "t1"); + dbObj.put("inte", 1); + dbObj.put("date", new Date()); + dbObj.put("lon", 100L); + dbObj.put("bool", true); + dbObj.put("strs", new String[]{"a", "b", "c"}); + dbObj.put("intes", new Integer[]{1, 2, 3}); + dbObj.put("bytes", "ttt".getBytes()); + dbObj.put("b", "a".getBytes()[0]); + + BasicDBObject embedObj = new BasicDBObject(); + embedObj.put("embeddedStr", "e1"); + + BasicDBObject deepEmbedObj1 = new BasicDBObject(); + deepEmbedObj1.put("str", "aaa"); + + BasicDBObject deepEmbedObj2 = new BasicDBObject(); + deepEmbedObj2.put("str", "bbb"); + + + embedObj.put("testObjectList", Lists.newArrayList(deepEmbedObj1, deepEmbedObj2)); + + dbObj.put("embeddedObject", embedObj); + + Object o = MongoEmbeddedObjectProcessor.valueMapper("embObj", dbObj, TestObject.class); + if (!(o instanceof TestObject)) { + Assert.fail("not emb obj"); + } + System.out.println(o); + } +} + +class TestObject { + private ObjectId _id; + private String str; + private Integer inte; + private Date date; + private Long lon; + private Boolean bool; + private String[] strs; + private Integer[] intes; + private byte[] bytes; + private Byte b; + private EmbeddedObject embeddedObject; + + public ObjectId get_id() { + return _id; + } + + public void set_id(ObjectId _id) { + this._id = _id; + } + + public String getStr() { + return str; + } + + public void setStr(String str) { + this.str = str; + } + + public Integer getInte() { + return inte; + } + + public void setInte(Integer inte) { + this.inte = inte; + } + + public Date getDate() { + return date; + } + + public void setDate(Date date) { + this.date = date; + } + + public Long getLon() { + return lon; + } + + public void setLon(Long lon) { + this.lon = lon; + } + + public Boolean getBool() { + return bool; + } + + public void setBool(Boolean bool) { + this.bool = bool; + } + + public String[] getStrs() { + return strs; + } + + public void setStrs(String[] strs) { + this.strs = strs; + } + + public Integer[] getIntes() { + return intes; + } + + public void setIntes(Integer[] intes) { + this.intes = intes; + } + + public byte[] getBytes() { + return bytes; + } + + public void setBytes(byte[] bytes) { + this.bytes = bytes; + } + + public Byte getB() { + return b; + } + + public void setB(Byte b) { + this.b = b; + } + + public EmbeddedObject getEmbeddedObject() { + return embeddedObject; + } + + public void setEmbeddedObject(EmbeddedObject embeddedObject) { + this.embeddedObject = embeddedObject; + } +} + +class EmbeddedObject { + private String embeddedStr; + private List testObjectList; + private Set someCodeSet; + + public String getEmbeddedStr() { + return embeddedStr; + } + + public void setEmbeddedStr(String embeddedStr) { + this.embeddedStr = embeddedStr; + } + + public List getTestObjectList() { + return testObjectList; + } + + public void setTestObjectList(List testObjectList) { + this.testObjectList = testObjectList; + } + + public Set getSomeCodeSet() { + return someCodeSet; + } + + public void setSomeCodeSet(Set someCodeSet) { + this.someCodeSet = someCodeSet; + } +} \ No newline at end of file diff --git a/src/test/java/io/mycat/backend/postgresql/EchoServer.java b/src/test/java/io/mycat/backend/postgresql/EchoServer.java deleted file mode 100644 index 5b6496beb..000000000 --- a/src/test/java/io/mycat/backend/postgresql/EchoServer.java +++ /dev/null @@ -1,63 +0,0 @@ -package io.mycat.backend.postgresql; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.net.ServerSocket; -import java.net.Socket; - -import org.slf4j.LoggerFactory; - -public class EchoServer { - public static class EchoThor extends Thread { - org.slf4j.Logger logger = LoggerFactory.getLogger(EchoThor.class); - - private Socket socket; - - public EchoThor(Socket socket) { - this.socket = socket; - } - - /* - * (non-Javadoc) - * - * @see java.lang.Thread#run() - */ - @Override - public void run() { - try { - InputStream in = socket.getInputStream(); - OutputStream out = socket.getOutputStream(); - for (int i = 0; i < 1000000; i++) { - byte[] b = new byte[1024 * 5]; - in.read(b); - logger.info("读到了数据...."); - out.write("测试一下".getBytes()); - } - } catch (IOException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - - } - - } - - public static void main(String[] args) { - try { - @SuppressWarnings("resource") - ServerSocket serverSocket = new ServerSocket(5210); - - for (;;) { - Socket socket = serverSocket.accept(); - new EchoThor(socket).start(); - ; - } - - } catch (IOException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - - } -} diff --git a/src/test/java/io/mycat/backend/postgresql/PostgresqlKnightriders.java b/src/test/java/io/mycat/backend/postgresql/PostgresqlKnightriders.java deleted file mode 100644 index ad25248ea..000000000 --- a/src/test/java/io/mycat/backend/postgresql/PostgresqlKnightriders.java +++ /dev/null @@ -1,433 +0,0 @@ -package io.mycat.backend.postgresql; - -import io.mycat.backend.postgresql.packet.AuthenticationPacket; -import io.mycat.backend.postgresql.packet.AuthenticationPacket.AuthType; -import io.mycat.backend.postgresql.packet.BackendKeyData; -import io.mycat.backend.postgresql.packet.DataRow; -import io.mycat.backend.postgresql.packet.DataRow.DataColumn; -import io.mycat.backend.postgresql.packet.Parse; -import io.mycat.backend.postgresql.packet.PasswordMessage; -import io.mycat.backend.postgresql.packet.PostgreSQLPacket; -import io.mycat.backend.postgresql.packet.PostgreSQLPacket.DateType; -import io.mycat.backend.postgresql.packet.Query; -import io.mycat.backend.postgresql.packet.Terminate; -import io.mycat.backend.postgresql.utils.PIOUtils; -import io.mycat.backend.postgresql.utils.PacketUtils; - -import java.io.IOException; -import java.io.OutputStream; -import java.net.InetSocketAddress; -import java.net.Socket; -import java.nio.ByteBuffer; -import java.nio.channels.SelectionKey; -import java.nio.channels.Selector; -import java.nio.channels.SocketChannel; -import java.util.ArrayList; -import java.util.List; -import java.util.TimeZone; -import java.util.UUID; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.alibaba.fastjson.JSON; - -/************* - * 提交代码.. - * - * @author Coollf - * - */ -public class PostgresqlKnightriders { - - private static Logger logger = LoggerFactory - .getLogger(PostgresqlKnightriders.class); - - public static void main(String[] args) { - - List paramList = new ArrayList(); - String user = "postgres"; - String password = "coollf"; - String database = "mycat"; - String appName = "MyCat-Server"; - - paramList.add(new String[] { "user", user }); - paramList.add(new String[] { "database", database }); - paramList.add(new String[] { "client_encoding", "UTF8" }); - paramList.add(new String[] { "DateStyle", "ISO" }); - paramList.add(new String[] { "TimeZone", createPostgresTimeZone() }); - paramList.add(new String[] { "extra_float_digits", "3" }); - paramList.add(new String[] { "application_name", appName }); - - boolean nio = false; - - try { - Socket socket = new Socket("localhost", 5432); - if (nio) { - - SocketChannel channel = SocketChannel - .open(new InetSocketAddress("localhost", 5210)); - - channel.configureBlocking(false); - - // 打开并注册选择器到信道 - Selector selector = Selector.open(); - channel.register(selector, SelectionKey.OP_READ - | SelectionKey.OP_WRITE); - - // 启动读取线程 - new TCPClientReadThread(selector); - - // sendStartupPacket(channel, paramList.toArray(new - // String[0][])); - ByteBuffer in = ByteBuffer.allocate(10); - channel.read(in); - // System.out.println(in); - - } else { - sendStartupPacket(socket, paramList.toArray(new String[0][])); - PostgreSQLPacket packet = readParsePacket(socket).get(0); - if (packet instanceof AuthenticationPacket) { - AuthType aut = ((AuthenticationPacket) packet) - .getAuthType(); - if (aut != AuthType.Ok) { - PasswordMessage pak = new PasswordMessage(user, - password, aut, - ((AuthenticationPacket) packet).getSalt()); - ByteBuffer buffer = ByteBuffer - .allocate(pak.getLength() + 1); - pak.write(buffer); - socket.getOutputStream().write(buffer.array()); - List sqlPacket = readParsePacket(socket); - System.out.println(JSON.toJSONString(sqlPacket)); - int pid = 0; - int secretKey = 0; - for (PostgreSQLPacket p : sqlPacket) { - if (p instanceof BackendKeyData) { - pid = ((BackendKeyData) p).getPid(); - secretKey = ((BackendKeyData) p).getSecretKey(); - } - } - - Query query = new Query( - "SELECT text_,timestamp_ from ump_types"); - // Query query = new Query("SELECT 1"+"\0"); - - ByteBuffer oby = ByteBuffer - .allocate(query.getLength() + 1); - query.write(oby); - - socket.getOutputStream().write(oby.array()); - - sqlPacket = readParsePacket(socket); - for (PostgreSQLPacket p : sqlPacket) { - if (p instanceof DataRow) { - ; - for (DataColumn c : ((DataRow) p).getColumns()) { - System.out.println(new String(c.getData(), - "utf-8")); - } - } - } - System.out.println(JSON.toJSONString(sqlPacket)); - query = new Query(""); - oby = ByteBuffer.allocate(query.getLength() + 1); - query.write(oby); - - socket.getOutputStream().write(oby.array()); - - sqlPacket = readParsePacket(socket); - System.out.println(JSON.toJSONString(sqlPacket)); - - // CancelRequest cancelRequest = new CancelRequest(pid, - // secretKey); - // oby = ByteBuffer.allocate(cancelRequest.getLength()); - // cancelRequest.write(oby); - // socket.getOutputStream().write(oby.array()); - // List pgs = readParsePacket(socket); - // System.out.println(JSON.toJSONString(pgs)); - - // 解析sql - String uuid = UUID.randomUUID().toString(); - - String sql = "INSERT into ump_coupon(id_,name_,time) VALUES (4 , ? , now());"; - Parse parse = new Parse(null, sql,DateType.UNKNOWN); - oby = ByteBuffer.allocate(parse.getPacketSize()); - - parse.write(oby); - socket.getOutputStream().write(oby.array()); - socket.getOutputStream().write(new byte[]{0}); - List tre = readParsePacket(socket); - System.out.println(JSON.toJSONString(tre)); - -// Terminate terminate = new Terminate(); -// oby = ByteBuffer.allocate(terminate.getLength() + 1); -// terminate.write(oby); -// socket.getOutputStream().write(oby.array()); - tre = readParsePacket(socket); - System.out.println(tre); - - } - } - } - - System.in.read(); - System.in.read(); - - } catch (Exception e) { - e.printStackTrace(); - } - } - - private static List readParsePacket(Socket socket) - throws IOException, IllegalAccessException { - byte[] bytes = new byte[1024 * 10]; - int leg = socket.getInputStream().read(bytes, 0, bytes.length); - - int offset = 0; - return PacketUtils.parsePacket(bytes, offset, leg); - } - - /** - * Convert Java time zone to postgres time zone. All others stay the same - * except that GMT+nn changes to GMT-nn and vise versa. - * - * @return The current JVM time zone in postgresql format. - */ - private static String createPostgresTimeZone() { - String tz = TimeZone.getDefault().getID(); - if (tz.length() <= 3 || !tz.startsWith("GMT")) { - return tz; - } - char sign = tz.charAt(3); - String start; - if (sign == '+') { - start = "GMT-"; - } else if (sign == '-') { - start = "GMT+"; - } else { - // unknown type - return tz; - } - - return start + tz.substring(4); - } - - private static void sendStartupPacket(Socket socket, String[][] params) - throws IOException { - OutputStream sout = socket.getOutputStream(); - if (logger.isDebugEnabled()) { - StringBuilder details = new StringBuilder(); - for (int i = 0; i < params.length; ++i) { - if (i != 0) - details.append(", "); - details.append(params[i][0]); - details.append("="); - details.append(params[i][1]); - } - logger.debug(" FE=> StartupPacket(" + details + ")"); - } - - /* - * Precalculate message length and encode params. - */ - int length = 4 + 4; - byte[][] encodedParams = new byte[params.length * 2][]; - for (int i = 0; i < params.length; ++i) { - encodedParams[i * 2] = params[i][0].getBytes("UTF-8"); - encodedParams[i * 2 + 1] = params[i][1].getBytes("UTF-8"); - length += encodedParams[i * 2].length + 1 - + encodedParams[i * 2 + 1].length + 1; - } - - length += 1; // Terminating \0 - - ByteBuffer buffer = ByteBuffer.allocate(length); - - /* - * Send the startup message. - */ - PIOUtils.SendInteger4(length, buffer); - PIOUtils.SendInteger2(3, buffer); // protocol major - PIOUtils.SendInteger2(0, buffer); // protocol minor - for (byte[] encodedParam : encodedParams) { - PIOUtils.Send(encodedParam, buffer); - PIOUtils.SendChar(0, buffer); - } - sout.write(buffer.array()); - } - -} - -class TCPClientReadThread implements Runnable { - - private static Logger logger = LoggerFactory - .getLogger(TCPClientReadThread.class); - private Selector selector; - - - private ByteBuffer bs ; - - public TCPClientReadThread(Selector selector) { - this.selector = selector; - - new Thread(this).start(); - } - - public void run() { - boolean a = false; - try { - while (selector.select() > 0) { - System.out.println("....."); - // 遍历每个有可用IO操作Channel对应的SelectionKey - for (SelectionKey sk : selector.selectedKeys()) { - - if (sk.isWritable()) { - SocketChannel sc = (SocketChannel) sk.channel(); - if (!a) { - sendStartupPacket(sc); - a = true; - } - if(this.bs!= null){ - sc.write(bs); - } - // 删除正在处理的SelectionKey - // selector.selectedKeys().remove(sk); - sk.interestOps(SelectionKey.OP_READ); - } - if (sk.isReadable()) { - // 使用NIO读取Channel中的数据 - SocketChannel sc = (SocketChannel) sk.channel(); - ByteBuffer buffer = ByteBuffer.allocate(1024); - sc.read(buffer); - buffer.flip(); - - byte[] array = buffer.array(); - List ls = PacketUtils.parsePacket( - array, 0, buffer.limit()); - if (ls.size() > 0) { - if (ls.get(0) instanceof AuthenticationPacket) { - AuthenticationPacket aut = (AuthenticationPacket) ls - .get(0); - if (aut.getAuthType() != AuthType.Ok) { - PasswordMessage pak = new PasswordMessage( - "postgres", "coollf", - aut.getAuthType(), aut.getSalt()); - ByteBuffer _buffer = ByteBuffer - .allocate(pak.getLength() + 2); - pak.write(_buffer); - //_buffer.put((byte)0); - _buffer.flip(); - this.bs = _buffer; - // sk.interestOps(SelectionKey.OP_READ); - }else{ - logger.error("登陆成功啦啦啦...."); - } - } - } - sk.interestOps(SelectionKey.OP_WRITE); - - // 控制台打印出来 - System.out.println("接收到来自服务器" + JSON.toJSONString(ls)); - - // 为下一次读取作准备 - // sk.interestOps(SelectionKey.OP_WRITE); - } - - } - } - System.out.println("熄火了....."); - } catch (IOException ex) { - ex.printStackTrace(); - } - } - - private static void sendStartupPacket(SocketChannel socketChannel) - throws IOException { - List paramList = new ArrayList(); - String user = "postgres"; - String password = "coollf"; - String database = "odoo"; - String appName = "MyCat-Server"; - String assumeMinServerVersion = "9.0.0"; - - paramList.add(new String[] { "user", user }); - paramList.add(new String[] { "database", database }); - paramList.add(new String[] { "client_encoding", "UTF8" }); - paramList.add(new String[] { "DateStyle", "ISO" }); - paramList.add(new String[] { "TimeZone", createPostgresTimeZone() }); - paramList.add(new String[] { "extra_float_digits", "3" }); - paramList.add(new String[] { "application_name", appName }); - - String[][] params = paramList.toArray(new String[0][]); - - if (logger.isDebugEnabled()) { - StringBuilder details = new StringBuilder(); - for (int i = 0; i < params.length; ++i) { - if (i != 0) - details.append(", "); - details.append(params[i][0]); - details.append("="); - details.append(params[i][1]); - } - logger.debug(" FE=> StartupPacket(" + details + ")"); - } - - /* - * Precalculate message length and encode params. - */ - int length = 4 + 4; - byte[][] encodedParams = new byte[params.length * 2][]; - for (int i = 0; i < params.length; ++i) { - encodedParams[i * 2] = params[i][0].getBytes("UTF-8"); - encodedParams[i * 2 + 1] = params[i][1].getBytes("UTF-8"); - length += encodedParams[i * 2].length + 1 - + encodedParams[i * 2 + 1].length + 1; - } - - length += 1; // Terminating \0 - - ByteBuffer buffer = ByteBuffer.allocate(length); - - /* - * Send the startup message. - */ - PIOUtils.SendInteger4(length, buffer); - PIOUtils.SendInteger2(3, buffer); // protocol major - PIOUtils.SendInteger2(0, buffer); // protocol minor - for (byte[] encodedParam : encodedParams) { - PIOUtils.Send(encodedParam, buffer); - PIOUtils.SendChar(0, buffer); - } - PIOUtils.Send(new byte[] { 0 }, buffer); - - buffer.flip(); - socketChannel.write(buffer); - - } - - /** - * Convert Java time zone to postgres time zone. All others stay the same - * except that GMT+nn changes to GMT-nn and vise versa. - * - * @return The current JVM time zone in postgresql format. - */ - private static String createPostgresTimeZone() { - String tz = TimeZone.getDefault().getID(); - if (tz.length() <= 3 || !tz.startsWith("GMT")) { - return tz; - } - char sign = tz.charAt(3); - String start; - if (sign == '+') { - start = "GMT-"; - } else if (sign == '-') { - start = "GMT+"; - } else { - // unknown type - return tz; - } - - return start + tz.substring(4); - } -} \ No newline at end of file diff --git a/src/test/java/io/mycat/buffer/TestByteBufferArena.java b/src/test/java/io/mycat/buffer/TestByteBufferArena.java new file mode 100644 index 000000000..55bce2456 --- /dev/null +++ b/src/test/java/io/mycat/buffer/TestByteBufferArena.java @@ -0,0 +1,141 @@ +package io.mycat.buffer; + +import junit.framework.Assert; +import org.junit.Test; +import sun.nio.ch.DirectBuffer; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * 仿照Netty的思路,针对MyCat内存缓冲策略优化 + * 测试ByteBufferArena + * + * @author Hash Zhang + * @version 1.0 + * @time 17:19 2016/5/17 + * @see @https://github.com/netty/netty + */ +public class TestByteBufferArena { + int pageSize = 256; + int chunkSize = 1024 * 8; + int chunkCount = 8*128; + @Test + public void testAllocate() { + int allocTimes = 1024 ; + ByteBufferArena byteBufferArena = new ByteBufferArena(chunkSize,pageSize,chunkCount,8); + long start = System.currentTimeMillis(); + for (int i = 0; i < allocTimes; i++) { +// System.out.println("allocate "+i); +// long start=System.nanoTime(); + int size = (i % 1024) + 1 ; + ByteBuffer byteBufer = byteBufferArena.allocate(size); + ByteBuffer byteBufer2 = byteBufferArena.allocate(size); + ByteBuffer byteBufer3 = byteBufferArena.allocate(size); +// System.out.println("alloc "+size+" usage "+(System.nanoTime()-start)); +// start=System.nanoTime(); + byteBufferArena.recycle(byteBufer); + byteBufferArena.recycle(byteBufer3); +// System.out.println("recycle usage "+(System.nanoTime()-start)); + } + long used = (System.currentTimeMillis() - start); + System.out.println("ByteBufferArena total used time " + used + " avg speed " + allocTimes / used); + } + + @Test + public void testAllocateDirect() { + int pageSize = 1024 ; + int allocTimes = 100; + DirectByteBufferPool pool = new DirectByteBufferPool(pageSize, (short) 256, (short) 8,0); + long start = System.currentTimeMillis(); + for (int i = 0; i < allocTimes; i++) { + //System.out.println("allocate "+i); + //long start=System.nanoTime(); + int size = (i % 1024) + 1 ; + ByteBuffer byteBufer = pool.allocate(size); + ByteBuffer byteBufer2 = pool.allocate(size); + ByteBuffer byteBufer3 = pool.allocate(size); + //System.out.println("alloc "+size+" usage "+(System.nanoTime()-start)); + //start=System.nanoTime(); + pool.recycle(byteBufer); + pool.recycle(byteBufer3); + //System.out.println("recycle usage "+(System.nanoTime()-start)); + } + long used = (System.currentTimeMillis() - start); +// System.out.println("DirectByteBufferPool total used time " + used + " avg speed " + allocTimes / used); + } + + @Test + public void testExpansion(){ + ByteBufferArena byteBufferArena = new ByteBufferArena(1024,8,1,8); + for (int i = 0; i < 1 ; i++) { + ByteBuffer byteBufer = byteBufferArena.allocate(256); + ByteBuffer byteBufer2 = byteBufferArena.allocate(256); + ByteBuffer byteBufer3 = byteBufferArena.allocate(256); + + byteBufferArena.recycle(byteBufer); + } + } + + @Test + public void testAllocateWithDifferentAddress() { + int size = 256; + int pageSize = size * 4; + int allocTimes = 8; + ByteBufferArena byteBufferArena = new ByteBufferArena(256*4,256,2,8); + Map buffs = new HashMap(8); + ByteBuffer byteBuffer = null; + DirectBuffer directBuffer = null; + ByteBuffer temp = null; + long address; + boolean failure = false; + for (int i = 0; i < allocTimes; i++) { + byteBuffer = byteBufferArena.allocate(size); + if (byteBuffer == null) { + Assert.fail("Should have enough memory"); + } + directBuffer = (DirectBuffer) byteBuffer; + address = directBuffer.address(); + System.out.println(address); + temp = buffs.get(address); + buffs.put(address, byteBuffer); + if (null != temp) { + failure = true; + break; + } + } + + for (ByteBuffer buff : buffs.values()) { + byteBufferArena.recycle(buff); + } + + if (failure == true) { + Assert.fail("Allocate with same address"); + } + } + + @Test + public void testAllocateNullWhenOutOfMemory() { + int size = 256; + int pageSize = size * 4; + int allocTimes = 9; + ByteBufferArena pool = new ByteBufferArena(256*4,256,2,8);; + long start = System.currentTimeMillis(); + ByteBuffer byteBuffer = null; + List buffs = new ArrayList(); + int i = 0; + for (; i < allocTimes; i++) { + byteBuffer = pool.allocate(size); + if (byteBuffer == null) { + break; + } + buffs.add(byteBuffer); + } + for (ByteBuffer buff : buffs) { + pool.recycle(buff); + } + } +} diff --git a/src/test/java/io/mycat/buffer/TestDirectByteBufferPool.java b/src/test/java/io/mycat/buffer/TestDirectByteBufferPool.java new file mode 100644 index 000000000..76ba5ebdc --- /dev/null +++ b/src/test/java/io/mycat/buffer/TestDirectByteBufferPool.java @@ -0,0 +1,164 @@ +package io.mycat.buffer; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import junit.framework.Assert; +import org.junit.Test; + +import sun.nio.ch.DirectBuffer; + +public class TestDirectByteBufferPool { + + @Test + public void testAllocate() { + int pageSize = 1024 ; + int allocTimes = 1024; + DirectByteBufferPool pool = new DirectByteBufferPool(pageSize, (short) 256, (short) 8,0); + long start = System.currentTimeMillis(); + for (int i = 0; i < allocTimes; i++) { + //System.out.println("allocate "+i); + //long start=System.nanoTime(); + int size = (i % 1024) + 1 ; + ByteBuffer byteBufer = pool.allocate(size); + ByteBuffer byteBufer2 = pool.allocate(size); + ByteBuffer byteBufer3 = pool.allocate(size); + //System.out.println("alloc "+size+" usage "+(System.nanoTime()-start)); + //start=System.nanoTime(); + pool.recycle(byteBufer); + pool.recycle(byteBufer3); + //System.out.println("recycle usage "+(System.nanoTime()-start)); + } + long used = (System.currentTimeMillis() - start); + System.out.println("total used time " + used + " avg speed " + allocTimes / used); + } + + @Test + public void testAllocateWithDifferentAddress() { + int size = 256; + int pageSize = size * 4; + int allocTimes = 8; + DirectByteBufferPool pool = new DirectByteBufferPool(pageSize, (short) 256, (short) 2,0); + + Map buffs = new HashMap(8); + ByteBuffer byteBuffer = null; + DirectBuffer directBuffer = null; + ByteBuffer temp = null; + long address; + boolean failure = false; + for (int i = 0; i < allocTimes; i++) { + byteBuffer = pool.allocate(size); + if (byteBuffer == null) { + Assert.fail("Should have enough memory"); + } + directBuffer = (DirectBuffer) byteBuffer; + address = directBuffer.address(); + System.out.println(address); + temp = buffs.get(address); + buffs.put(address, byteBuffer); + if (null != temp) { + failure = true; + break; + } + } + + for (ByteBuffer buff : buffs.values()) { + pool.recycle(buff); + } + + if (failure == true) { + Assert.fail("Allocate with same address"); + } + } + + @Test + public void testAllocateNullWhenOutOfMemory() { + int size = 256; + int pageSize = size * 4; + int allocTimes = 9; + DirectByteBufferPool pool = new DirectByteBufferPool(pageSize, (short) 256, (short) 2,0); + long start = System.currentTimeMillis(); + ByteBuffer byteBuffer = null; + List buffs = new ArrayList(); + int i = 0; + for (; i < allocTimes; i++) { + byteBuffer = pool.allocate(size); + if (byteBuffer == null||!(byteBuffer instanceof DirectBuffer) ) { + break; + } + buffs.add(byteBuffer); + } + for (ByteBuffer buff : buffs) { + pool.recycle(buff); + } + + Assert.assertEquals("Should out of memory when i = " + 8, i, 8); + } + + @Test + public void testAllocateSign() { + int size = 256; + int pageSize = size * 4; + int allocTimes = 9; + DirectByteBufferPool pool = new DirectByteBufferPool(pageSize, (short) 256, (short) 2,0); + long start = System.currentTimeMillis(); + ByteBuffer byteBuffer = null; + List buffs = new ArrayList(); + int i = 0; + for (; i < allocTimes; i++) { + byteBuffer = pool.allocate(size); + if (byteBuffer == null||!(byteBuffer instanceof DirectBuffer) ) { + break; + } + buffs.add(byteBuffer); + } + for (ByteBuffer buff : buffs) { + pool.recycle(buff); + } + + Assert.assertEquals("Should out of memory when i = " + 8, i, 8); + } + + @Test + public void testExpandBuffer(){ + int size = 512; + int pageSize = 1024*1024; + int allocTimes = 9; + DirectByteBufferPool pool = new DirectByteBufferPool(pageSize, (short) 512, (short) 64,0); + ByteBuffer byteBuffer = pool.allocate(1024); + String str = "DirectByteBufferPool pool = new DirectByteBufferPool(pageSize, (short) 256, (short) 8)"; + ByteBuffer newByteBuffer = null; + int i = 0; + while (i<10){ + if(byteBuffer.remaining() freeMaps = new ConcurrentHashMap<>(); + final MyCatMemoryAllocator memoryAllocator = + new MyCatMemoryAllocator(Runtime.getRuntime().availableProcessors()*2); + @Test + public void testMemAlloc(){ + + for (int i = 0; i <10000/**20000000*/; i++) { + ByteBuffer byteBuffer = getBuffer(8194); + byteBuffer.put("helll world".getBytes()); + byteBuffer.flip(); + byte [] src= new byte[byteBuffer.remaining()]; + byteBuffer.get(src); + Assert.assertEquals("helll world",new String(src)); + free(byteBuffer); + } + } + + + public ByteBuffer getBuffer(int len) + { + ByteBuf byteBuf = memoryAllocator.directBuffer(len); + ByteBuffer byteBuffer = byteBuf.nioBuffer(0,len); + freeMaps.put(PlatformDependent.directBufferAddress(byteBuffer),byteBuf); + return byteBuffer; + } + + public void free(ByteBuffer byteBuffer) + { + ByteBuf byteBuf1 = freeMaps.get(PlatformDependent.directBufferAddress(byteBuffer)); + byteBuf1.release(); + Assert.assertEquals(0,byteBuf1.refCnt()); + } + + + public static String getString(ByteBuffer buffer) { + Charset charset = null; + CharsetDecoder decoder = null; + CharBuffer charBuffer = null; + try { + charset = Charset.forName("UTF-8"); + decoder = charset.newDecoder(); + charBuffer = decoder.decode(buffer.asReadOnlyBuffer()); + return charBuffer.toString(); + } catch (Exception ex) { + ex.printStackTrace(); + return "error"; + } + } + + public static ByteBuffer getByteBuffer(String str) + { + return ByteBuffer.wrap(str.getBytes()); + } +} diff --git a/src/test/java/io/mycat/cache/DefaultLayedCachePoolTest.java b/src/test/java/io/mycat/cache/DefaultLayedCachePoolTest.java index 1551c090b..1ea7dd0b8 100644 --- a/src/test/java/io/mycat/cache/DefaultLayedCachePoolTest.java +++ b/src/test/java/io/mycat/cache/DefaultLayedCachePoolTest.java @@ -23,11 +23,14 @@ */ package io.mycat.cache; -import io.mycat.cache.impl.EnchachePooFactory; import junit.framework.Assert; import org.junit.Test; +import io.mycat.cache.CacheStatic; +import io.mycat.cache.DefaultLayedCachePool; +import io.mycat.cache.impl.EnchachePooFactory; + public class DefaultLayedCachePoolTest { private static DefaultLayedCachePool layedCachePool; diff --git a/src/test/java/io/mycat/cache/EnCachePoolTest.java b/src/test/java/io/mycat/cache/EnCachePoolTest.java index b52ce5f70..a8135da54 100644 --- a/src/test/java/io/mycat/cache/EnCachePoolTest.java +++ b/src/test/java/io/mycat/cache/EnCachePoolTest.java @@ -23,7 +23,6 @@ */ package io.mycat.cache; -import io.mycat.cache.impl.EnchachePool; import junit.framework.Assert; import net.sf.ehcache.Cache; import net.sf.ehcache.CacheManager; @@ -32,6 +31,9 @@ import org.junit.Test; +import io.mycat.cache.CacheStatic; +import io.mycat.cache.impl.EnchachePool; + public class EnCachePoolTest { private static EnchachePool enCachePool; diff --git a/src/test/java/io/mycat/cache/TestCachePoolPerformance.java b/src/test/java/io/mycat/cache/TestCachePoolPerformance.java index 6d520a009..3f97ab952 100644 --- a/src/test/java/io/mycat/cache/TestCachePoolPerformance.java +++ b/src/test/java/io/mycat/cache/TestCachePoolPerformance.java @@ -23,12 +23,14 @@ */ package io.mycat.cache; +import io.mycat.cache.CachePool; +import io.mycat.cache.CacheStatic; +import io.mycat.cache.impl.EnchachePool; +import io.mycat.cache.impl.MapDBCachePooFactory; /** * test cache performance ,for encache test set VM param -server -Xms1100M -Xmx1100M * for mapdb set vm param -server -Xms100M -Xmx100M -XX:MaxPermSize=1G */ -import io.mycat.cache.impl.EnchachePool; -import io.mycat.cache.impl.MapDBCachePooFactory; import net.sf.ehcache.Cache; import net.sf.ehcache.CacheManager; import net.sf.ehcache.config.CacheConfiguration; diff --git a/src/test/java/io/mycat/classload/TestDynClassLoad.java b/src/test/java/io/mycat/classload/TestDynClassLoad.java index 90330041c..dfac03f35 100644 --- a/src/test/java/io/mycat/classload/TestDynClassLoad.java +++ b/src/test/java/io/mycat/classload/TestDynClassLoad.java @@ -1,10 +1,10 @@ package io.mycat.classload; -import io.mycat.server.classloader.DynaClassLoader; - import org.junit.Assert; import org.junit.Test; +import io.mycat.config.classloader.DynaClassLoader; + public class TestDynClassLoad { @Test diff --git a/src/test/java/io/mycat/config/ConfigTest.java b/src/test/java/io/mycat/config/ConfigTest.java new file mode 100644 index 000000000..6bcefb201 --- /dev/null +++ b/src/test/java/io/mycat/config/ConfigTest.java @@ -0,0 +1,153 @@ +package io.mycat.config; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Map; + +import org.junit.Test; + +import io.mycat.backend.datasource.PhysicalDBPool; +import io.mycat.backend.datasource.PhysicalDatasource; +import io.mycat.backend.jdbc.JDBCDatasource; +import io.mycat.backend.mysql.nio.MySQLDataSource; +import io.mycat.config.loader.ConfigLoader; +import io.mycat.config.loader.xml.XMLConfigLoader; +import io.mycat.config.loader.xml.XMLSchemaLoader; +import io.mycat.config.model.DBHostConfig; +import io.mycat.config.model.DataHostConfig; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.config.model.TableConfig; +import io.mycat.config.model.UserConfig; +import io.mycat.config.util.ConfigException; +import junit.framework.Assert; + +public class ConfigTest { + + private SystemConfig system; + private final Map users; + private Map schemas; + private Map dataHosts; + + public ConfigTest() { + + String schemaFile = "/config/schema.xml"; + String ruleFile = "/config/rule.xml"; + + XMLSchemaLoader schemaLoader = new XMLSchemaLoader(schemaFile, ruleFile); + XMLConfigLoader configLoader = new XMLConfigLoader(schemaLoader); + + this.system = configLoader.getSystemConfig(); + this.users = configLoader.getUserConfigs(); + this.schemas = configLoader.getSchemaConfigs(); + this.dataHosts = initDataHosts(configLoader); + + } + + /** + * 测试 临时读可用 配置 + */ + @Test + public void testTempReadHostAvailable() { + PhysicalDBPool pool = this.dataHosts.get("localhost2"); + DataHostConfig hostConfig = pool.getSource().getHostConfig(); + Assert.assertTrue( hostConfig.isTempReadHostAvailable() == true ); + } + + /** + * 测试 用户服务降级 拒连 配置 + */ + @Test + public void testReadUserBenchmark() { + UserConfig userConfig = this.users.get("test"); + int benchmark = userConfig.getBenchmark(); + Assert.assertTrue( benchmark == 11111 ); + } + + + /** + * 测试 读服务的 权重 + * + * @throws Exception + */ + @Test + public void testReadHostWeight() throws Exception { + + ArrayList okSources = new ArrayList(); + + PhysicalDBPool pool = this.dataHosts.get("localhost2"); + okSources.addAll(pool.getAllDataSources()); + PhysicalDatasource source = pool.randomSelect( okSources ); + + Assert.assertTrue( source != null ); + } + + /** + * 测试 动态日期表 + * + * @throws Exception + */ + @Test + public void testDynamicYYYYMMTable() throws Exception { + SchemaConfig sc = this.schemas.get("dbtest1"); + Map tbm = sc.getTables(); + Assert.assertTrue( tbm.size() == 32); + } + + private Map initDataHosts(ConfigLoader configLoader) { + Map nodeConfs = configLoader.getDataHosts(); + Map nodes = new HashMap( + nodeConfs.size()); + for (DataHostConfig conf : nodeConfs.values()) { + PhysicalDBPool pool = getPhysicalDBPool(conf, configLoader); + nodes.put(pool.getHostName(), pool); + } + return nodes; + } + + private PhysicalDatasource[] createDataSource(DataHostConfig conf, + String hostName, String dbType, String dbDriver, + DBHostConfig[] nodes, boolean isRead) { + PhysicalDatasource[] dataSources = new PhysicalDatasource[nodes.length]; + if (dbType.equals("mysql") && dbDriver.equals("native")) { + for (int i = 0; i < nodes.length; i++) { + nodes[i].setIdleTimeout(system.getIdleTimeout()); + MySQLDataSource ds = new MySQLDataSource(nodes[i], conf, isRead); + dataSources[i] = ds; + } + + } else if(dbDriver.equals("jdbc")) + { + for (int i = 0; i < nodes.length; i++) { + nodes[i].setIdleTimeout(system.getIdleTimeout()); + JDBCDatasource ds = new JDBCDatasource(nodes[i], conf, isRead); + dataSources[i] = ds; + } + } + else { + throw new ConfigException("not supported yet !" + hostName); + } + return dataSources; + } + + private PhysicalDBPool getPhysicalDBPool(DataHostConfig conf, + ConfigLoader configLoader) { + String name = conf.getName(); + String dbType = conf.getDbType(); + String dbDriver = conf.getDbDriver(); + PhysicalDatasource[] writeSources = createDataSource(conf, name, + dbType, dbDriver, conf.getWriteHosts(), false); + Map readHostsMap = conf.getReadHosts(); + Map readSourcesMap = new HashMap( + readHostsMap.size()); + for (Map.Entry entry : readHostsMap.entrySet()) { + PhysicalDatasource[] readSources = createDataSource(conf, name, + dbType, dbDriver, entry.getValue(), true); + readSourcesMap.put(entry.getKey(), readSources); + } + PhysicalDBPool pool = new PhysicalDBPool(conf.getName(),conf, writeSources, + readSourcesMap, conf.getBalance(), conf.getWriteType()); + return pool; + } + +} diff --git a/src/test/java/io/mycat/memory/unsafe/PlatformUtilSuite.java b/src/test/java/io/mycat/memory/unsafe/PlatformUtilSuite.java new file mode 100644 index 000000000..78f6bccd4 --- /dev/null +++ b/src/test/java/io/mycat/memory/unsafe/PlatformUtilSuite.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe; + +import org.junit.Assert; +import org.junit.Test; + +public class PlatformUtilSuite { + + @Test + public void overlappingCopyMemory() { + byte[] data = new byte[3 * 1024 * 1024]; + int size = 2 * 1024 * 1024; + for (int i = 0; i < data.length; ++i) { + data[i] = (byte)i; + } + + Platform.copyMemory(data, Platform.BYTE_ARRAY_OFFSET, data, Platform.BYTE_ARRAY_OFFSET, size); + for (int i = 0; i < data.length; ++i) { + Assert.assertEquals((byte)i, data[i]); + } + + Platform.copyMemory( + data, + Platform.BYTE_ARRAY_OFFSET + 1, + data, + Platform.BYTE_ARRAY_OFFSET, + size); + for (int i = 0; i < size; ++i) { + Assert.assertEquals((byte)(i + 1), data[i]); + } + + for (int i = 0; i < data.length; ++i) { + data[i] = (byte)i; + } + Platform.copyMemory( + data, + Platform.BYTE_ARRAY_OFFSET, + data, + Platform.BYTE_ARRAY_OFFSET + 1, + size); + for (int i = 0; i < size; ++i) { + Assert.assertEquals((byte)i, data[i + 1]); + } + } +} diff --git a/src/test/java/io/mycat/memory/unsafe/array/LongArraySuite.java b/src/test/java/io/mycat/memory/unsafe/array/LongArraySuite.java new file mode 100644 index 000000000..2d3868a7e --- /dev/null +++ b/src/test/java/io/mycat/memory/unsafe/array/LongArraySuite.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.array; + +import io.mycat.memory.unsafe.memory.MemoryBlock; +import org.junit.Assert; +import org.junit.Test; + + +public class LongArraySuite { + + @Test + public void basicTest() { + long[] bytes = new long[2]; + LongArray arr = new LongArray(MemoryBlock.fromLongArray(bytes)); + arr.set(0, 1L); + arr.set(1, 2L); + arr.set(1, 3L); + Assert.assertEquals(2, arr.size()); + Assert.assertEquals(1L, arr.get(0)); + Assert.assertEquals(3L, arr.get(1)); + + arr.zeroOut(); + Assert.assertEquals(0L, arr.get(0)); + Assert.assertEquals(0L, arr.get(1)); + } +} diff --git a/src/test/java/io/mycat/memory/unsafe/hash/Murmur3_x86_32Suite.java b/src/test/java/io/mycat/memory/unsafe/hash/Murmur3_x86_32Suite.java new file mode 100644 index 000000000..d71e9c4e3 --- /dev/null +++ b/src/test/java/io/mycat/memory/unsafe/hash/Murmur3_x86_32Suite.java @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.hash; + + +import io.mycat.memory.unsafe.Platform; +import org.junit.Assert; +import org.junit.Test; + + +import java.nio.charset.StandardCharsets; +import java.util.HashSet; +import java.util.Random; +import java.util.Set; + +/** + * Test file based on Guava's Murmur3Hash32Test. + */ +public class Murmur3_x86_32Suite { + + private static final Murmur3_x86_32 hasher = new Murmur3_x86_32(0); + + @Test + public void testKnownIntegerInputs() { + Assert.assertEquals(593689054, hasher.hashInt(0)); + Assert.assertEquals(-189366624, hasher.hashInt(-42)); + Assert.assertEquals(-1134849565, hasher.hashInt(42)); + Assert.assertEquals(-1718298732, hasher.hashInt(Integer.MIN_VALUE)); + Assert.assertEquals(-1653689534, hasher.hashInt(Integer.MAX_VALUE)); + } + + @Test + public void testKnownLongInputs() { + Assert.assertEquals(1669671676, hasher.hashLong(0L)); + Assert.assertEquals(-846261623, hasher.hashLong(-42L)); + Assert.assertEquals(1871679806, hasher.hashLong(42L)); + Assert.assertEquals(1366273829, hasher.hashLong(Long.MIN_VALUE)); + Assert.assertEquals(-2106506049, hasher.hashLong(Long.MAX_VALUE)); + } + + @Test + public void randomizedStressTest() { + int size = 65536; + Random rand = new Random(); + + // A set used to track collision rate. + Set hashcodes = new HashSet(); + for (int i = 0; i < size; i++) { + int vint = rand.nextInt(); + long lint = rand.nextLong(); + Assert.assertEquals(hasher.hashInt(vint), hasher.hashInt(vint)); + Assert.assertEquals(hasher.hashLong(lint), hasher.hashLong(lint)); + + hashcodes.add(hasher.hashLong(lint)); + } + + // A very loose bound. + Assert.assertTrue(hashcodes.size() > size * 0.95); + } + + @Test + public void randomizedStressTestBytes() { + int size = 65536; + Random rand = new Random(); + + // A set used to track collision rate. + Set hashcodes = new HashSet(); + for (int i = 0; i < size; i++) { + int byteArrSize = rand.nextInt(100) * 8; + byte[] bytes = new byte[byteArrSize]; + rand.nextBytes(bytes); + + Assert.assertEquals( + hasher.hashUnsafeWords(bytes, Platform.BYTE_ARRAY_OFFSET, byteArrSize), + hasher.hashUnsafeWords(bytes, Platform.BYTE_ARRAY_OFFSET, byteArrSize)); + + hashcodes.add(hasher.hashUnsafeWords( + bytes, Platform.BYTE_ARRAY_OFFSET, byteArrSize)); + } + + // A very loose bound. + Assert.assertTrue(hashcodes.size() > size * 0.95); + } + + @Test + public void randomizedStressTestPaddedStrings() { + int size = 64000; + // A set used to track collision rate. + Set hashcodes = new HashSet(); + for (int i = 0; i < size; i++) { + int byteArrSize = 8; + byte[] strBytes = String.valueOf(i).getBytes(StandardCharsets.UTF_8); + byte[] paddedBytes = new byte[byteArrSize]; + System.arraycopy(strBytes, 0, paddedBytes, 0, strBytes.length); + + Assert.assertEquals( + hasher.hashUnsafeWords(paddedBytes, Platform.BYTE_ARRAY_OFFSET, byteArrSize), + hasher.hashUnsafeWords(paddedBytes, Platform.BYTE_ARRAY_OFFSET, byteArrSize)); + + hashcodes.add(hasher.hashUnsafeWords( + paddedBytes, Platform.BYTE_ARRAY_OFFSET, byteArrSize)); + } + + // A very loose bound. + Assert.assertTrue(hashcodes.size() > size * 0.95); + } +} diff --git a/src/test/java/io/mycat/memory/unsafe/map/AbstractBytesToBytesMapSuite.java b/src/test/java/io/mycat/memory/unsafe/map/AbstractBytesToBytesMapSuite.java new file mode 100644 index 000000000..73d8adf1e --- /dev/null +++ b/src/test/java/io/mycat/memory/unsafe/map/AbstractBytesToBytesMapSuite.java @@ -0,0 +1,643 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.map; + +import io.mycat.memory.unsafe.Platform; +import io.mycat.memory.unsafe.array.ByteArrayMethods; +import io.mycat.memory.unsafe.memory.TestMemoryManager; +import io.mycat.memory.unsafe.memory.mm.DataNodeMemoryManager; +import io.mycat.memory.unsafe.storage.DataNodeDiskManager; +import io.mycat.memory.unsafe.storage.SerializerManager; +import io.mycat.memory.unsafe.utils.JavaUtils; +import io.mycat.memory.unsafe.utils.MycatPropertyConf; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mock; + + +import java.io.File; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.*; + +import static org.hamcrest.Matchers.greaterThan; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.mockito.Answers.RETURNS_SMART_NULLS; + + +public abstract class AbstractBytesToBytesMapSuite { + + private final Random rand = new Random(42); + MycatPropertyConf conf = new MycatPropertyConf() + .set("mycat.memory.offHeap.enabled", "" + useOffHeapMemoryAllocator()) + .set("mycat.memory.offHeap.size", "256mb"); + private TestMemoryManager memoryManager = + new TestMemoryManager(conf + ); + + private DataNodeMemoryManager dataNodeMemoryManager = + new DataNodeMemoryManager(memoryManager,0); + + private SerializerManager serializerManager = new SerializerManager(); + private static final long PAGE_SIZE_BYTES = 1L << 26; // 64 megabytes + + final LinkedList spillFilesCreated = new LinkedList(); + File tempDir; + + DataNodeDiskManager blockManager = new DataNodeDiskManager(conf,true,serializerManager); + + +/* + private static final class CompressStream extends AbstractFunction1 { + @Override + public OutputStream apply(OutputStream stream) { + return stream; + } + } +*/ + @Before + public void setup() { + } + + @After + public void tearDown() throws IOException { + //Utils.deleteRecursively(tempDir); + //tempDir = null; + + if (dataNodeMemoryManager != null) { + Assert.assertEquals(0L, dataNodeMemoryManager.cleanUpAllAllocatedMemory()); + long leakedMemory = dataNodeMemoryManager.getMemoryConsumptionForThisConnection(); + dataNodeMemoryManager = null; + Assert.assertEquals(0L, leakedMemory); + } + } + + protected abstract boolean useOffHeapMemoryAllocator(); + + private static byte[] getByteArray(Object base, long offset, int size) { + final byte[] arr = new byte[size]; + Platform.copyMemory(base, offset, arr, Platform.BYTE_ARRAY_OFFSET, size); + return arr; + } + + private byte[] getRandomByteArray(int numWords) { + Assert.assertTrue(numWords >= 0); + final int lengthInBytes = numWords * 8; + final byte[] bytes = new byte[lengthInBytes]; + rand.nextBytes(bytes); + return bytes; + } + + /** + * Fast equality checking for byte arrays, since these comparisons are a bottleneck + * in our stress tests. + */ + private static boolean arrayEquals( + byte[] expected, + Object base, + long offset, + long actualLengthBytes) { + return (actualLengthBytes == expected.length) && ByteArrayMethods.arrayEquals( + expected, + Platform.BYTE_ARRAY_OFFSET, + base, + offset, + expected.length + ); + } + + @Test + public void emptyMap() { + BytesToBytesMap map = new BytesToBytesMap(dataNodeMemoryManager, 64, PAGE_SIZE_BYTES); + try { + Assert.assertEquals(0, map.numKeys()); + final int keyLengthInWords = 10; + final int keyLengthInBytes = keyLengthInWords * 8; + final byte[] key = getRandomByteArray(keyLengthInWords); + Assert.assertFalse(map.lookup(key, Platform.BYTE_ARRAY_OFFSET, keyLengthInBytes).isDefined()); + Assert.assertFalse(map.iterator().hasNext()); + } finally { + map.free(); + } + } + + @Test + public void setAndRetrieveAKey() { + BytesToBytesMap map = new BytesToBytesMap(dataNodeMemoryManager, 64, PAGE_SIZE_BYTES); + final int recordLengthWords = 10; + final int recordLengthBytes = recordLengthWords * 8; + final byte[] keyData = getRandomByteArray(recordLengthWords); + final byte[] valueData = getRandomByteArray(recordLengthWords); + try { + final BytesToBytesMap.Location loc = + map.lookup(keyData, Platform.BYTE_ARRAY_OFFSET, recordLengthBytes); + Assert.assertFalse(loc.isDefined()); + Assert.assertTrue(loc.append( + keyData, + Platform.BYTE_ARRAY_OFFSET, + recordLengthBytes, + valueData, + Platform.BYTE_ARRAY_OFFSET, + recordLengthBytes + )); + // After storing the key and value, the other location methods should return results that + // reflect the result of this store without us having to call lookup() again on the same key. + Assert.assertEquals(recordLengthBytes, loc.getKeyLength()); + Assert.assertEquals(recordLengthBytes, loc.getValueLength()); + Assert.assertArrayEquals(keyData, + getByteArray(loc.getKeyBase(), loc.getKeyOffset(), recordLengthBytes)); + Assert.assertArrayEquals(valueData, + getByteArray(loc.getValueBase(), loc.getValueOffset(), recordLengthBytes)); + + // After calling lookup() the location should still point to the correct data. + Assert.assertTrue( + map.lookup(keyData, Platform.BYTE_ARRAY_OFFSET, recordLengthBytes).isDefined()); + Assert.assertEquals(recordLengthBytes, loc.getKeyLength()); + Assert.assertEquals(recordLengthBytes, loc.getValueLength()); + Assert.assertArrayEquals(keyData, + getByteArray(loc.getKeyBase(), loc.getKeyOffset(), recordLengthBytes)); + Assert.assertArrayEquals(valueData, + getByteArray(loc.getValueBase(), loc.getValueOffset(), recordLengthBytes)); + + try { + Assert.assertTrue(loc.append( + keyData, + Platform.BYTE_ARRAY_OFFSET, + recordLengthBytes, + valueData, + Platform.BYTE_ARRAY_OFFSET, + recordLengthBytes + )); + Assert.fail("Should not be able to set a new value for a key"); + } catch (AssertionError e) { + // Expected exception; do nothing. + } + } finally { + map.free(); + } + } + + private void iteratorTestBase(boolean destructive) throws Exception { + final int size = 4096; + BytesToBytesMap map = new BytesToBytesMap(dataNodeMemoryManager, size / 2, PAGE_SIZE_BYTES); + try { + for (long i = 0; i < size; i++) { + final long[] value = new long[] { i }; + final BytesToBytesMap.Location loc = + map.lookup(value, Platform.LONG_ARRAY_OFFSET, 8); + Assert.assertFalse(loc.isDefined()); + // Ensure that we store some zero-length keys + if (i % 5 == 0) { + Assert.assertTrue(loc.append( + null, + Platform.LONG_ARRAY_OFFSET, + 0, + value, + Platform.LONG_ARRAY_OFFSET, + 8 + )); + } else { + Assert.assertTrue(loc.append( + value, + Platform.LONG_ARRAY_OFFSET, + 8, + value, + Platform.LONG_ARRAY_OFFSET, + 8 + )); + } + } + final BitSet valuesSeen = new BitSet(size); + final Iterator iter; + if (destructive) { + iter = map.destructiveIterator(); + } else { + iter = map.iterator(); + } + int numPages = map.getNumDataPages(); + int countFreedPages = 0; + while (iter.hasNext()) { + final BytesToBytesMap.Location loc = iter.next(); + Assert.assertTrue(loc.isDefined()); + final long value = Platform.getLong(loc.getValueBase(), loc.getValueOffset()); + final long keyLength = loc.getKeyLength(); + if (keyLength == 0) { + Assert.assertTrue("value " + value + " was not divisible by 5", value % 5 == 0); + } else { + final long key = Platform.getLong(loc.getKeyBase(), loc.getKeyOffset()); + Assert.assertEquals(value, key); + } + valuesSeen.set((int) value); + if (destructive) { + // The iterator moves onto next page and frees previous page + if (map.getNumDataPages() < numPages) { + numPages = map.getNumDataPages(); + countFreedPages++; + } + } + } + if (destructive) { + // Latest page is not freed by iterator but by map itself + Assert.assertEquals(countFreedPages, numPages - 1); + } + Assert.assertEquals(size, valuesSeen.cardinality()); + } finally { + map.free(); + } + } + + @Test + public void iteratorTest() throws Exception { + iteratorTestBase(false); + } + + @Test + public void destructiveIteratorTest() throws Exception { + iteratorTestBase(true); + } + + @Test + public void iteratingOverDataPagesWithWastedSpace() throws Exception { + final int NUM_ENTRIES = 1000 * 1000; + final int KEY_LENGTH = 24; + final int VALUE_LENGTH = 40; + final BytesToBytesMap map = + new BytesToBytesMap(dataNodeMemoryManager, NUM_ENTRIES, PAGE_SIZE_BYTES); + // Each record will take 8 + 24 + 40 = 72 bytes of space in the data page. Our 64-megabyte + // pages won't be evenly-divisible by records of this size, which will cause us to waste some + // space at the end of the page. This is necessary in order for us to take the end-of-record + // handling branch in iterator(). + try { + for (int i = 0; i < NUM_ENTRIES; i++) { + final long[] key = new long[] { i, i, i }; // 3 * 8 = 24 bytes + final long[] value = new long[] { i, i, i, i, i }; // 5 * 8 = 40 bytes + + final BytesToBytesMap.Location loc = map.lookup( + key, + Platform.LONG_ARRAY_OFFSET, + KEY_LENGTH + ); + + Assert.assertFalse(loc.isDefined()); + Assert.assertTrue(loc.append( + key, + Platform.LONG_ARRAY_OFFSET, + KEY_LENGTH, + value, + Platform.LONG_ARRAY_OFFSET, + VALUE_LENGTH + )); + } + Assert.assertEquals(2, map.getNumDataPages()); + + final BitSet valuesSeen = new BitSet(NUM_ENTRIES); + final Iterator iter = map.iterator(); + final long[] key = new long[KEY_LENGTH / 8]; + final long[] value = new long[VALUE_LENGTH / 8]; + while (iter.hasNext()) { + final BytesToBytesMap.Location loc = iter.next(); + Assert.assertTrue(loc.isDefined()); + Assert.assertEquals(KEY_LENGTH, loc.getKeyLength()); + Assert.assertEquals(VALUE_LENGTH, loc.getValueLength()); + Platform.copyMemory( + loc.getKeyBase(), + loc.getKeyOffset(), + key, + Platform.LONG_ARRAY_OFFSET, + KEY_LENGTH + ); + Platform.copyMemory( + loc.getValueBase(), + loc.getValueOffset(), + value, + Platform.LONG_ARRAY_OFFSET, + VALUE_LENGTH + ); + for (long j : key) { + Assert.assertEquals(key[0], j); + } + for (long j : value) { + Assert.assertEquals(key[0], j); + } + valuesSeen.set((int) key[0]); + } + Assert.assertEquals(NUM_ENTRIES, valuesSeen.cardinality()); + } finally { + map.free(); + } + } + + @Test + public void randomizedStressTest() { + final int size = 65536; + // Java arrays' hashCodes() aren't based on the arrays' contents, so we need to wrap arrays + // into ByteBuffers in order to use them as keys here. + final Map expected = new HashMap(); + final BytesToBytesMap map = new BytesToBytesMap(dataNodeMemoryManager, size, PAGE_SIZE_BYTES); + try { + // Fill the map to 90% full so that we can trigger probing + for (int i = 0; i < size * 0.9; i++) { + final byte[] key = getRandomByteArray(rand.nextInt(10) + 1); + final byte[] value = getRandomByteArray(rand.nextInt(10) + 1); + + if (!expected.containsKey(ByteBuffer.wrap(key))) { + expected.put(ByteBuffer.wrap(key), value); + final BytesToBytesMap.Location loc = map.lookup( + key, + Platform.BYTE_ARRAY_OFFSET, + key.length + ); + Assert.assertFalse(loc.isDefined()); + Assert.assertTrue(loc.append( + key, + Platform.BYTE_ARRAY_OFFSET, + key.length, + value, + Platform.BYTE_ARRAY_OFFSET, + value.length + )); + // After calling putNewKey, the following should be true, even before calling + // lookup(): + Assert.assertTrue(loc.isDefined()); + Assert.assertEquals(key.length, loc.getKeyLength()); + Assert.assertEquals(value.length, loc.getValueLength()); + Assert.assertTrue(arrayEquals(key, loc.getKeyBase(), loc.getKeyOffset(), key.length)); + Assert.assertTrue( + arrayEquals(value, loc.getValueBase(), loc.getValueOffset(), value.length)); + } + } + +/** + for (Map.Entry entry : expected.entrySet()) { + final byte[] key = JavaUtils.bufferToArray(entry.getKey()); + final byte[] value = entry.getValue(); + final BytesToBytesMap.Location loc = + map.lookup(key, Platform.BYTE_ARRAY_OFFSET, key.length); + Assert.assertTrue(loc.isDefined()); + Assert.assertTrue( + arrayEquals(key, loc.getKeyBase(), loc.getKeyOffset(), loc.getKeyLength())); + Assert.assertTrue( + arrayEquals(value, loc.getValueBase(), loc.getValueOffset(), loc.getValueLength())); + } +*/ + } finally { + map.free(); + } + } + + @Test + public void randomizedTestWithRecordsLargerThanPageSize() { + final long pageSizeBytes = 128; + final BytesToBytesMap map = new BytesToBytesMap(dataNodeMemoryManager, 64, pageSizeBytes); + // Java arrays' hashCodes() aren't based on the arrays' contents, so we need to wrap arrays + // into ByteBuffers in order to use them as keys here. + final Map expected = new HashMap(); + try { + for (int i = 0; i < 1000; i++) { + final byte[] key = getRandomByteArray(rand.nextInt(128)); + final byte[] value = getRandomByteArray(rand.nextInt(128)); + if (!expected.containsKey(ByteBuffer.wrap(key))) { + expected.put(ByteBuffer.wrap(key), value); + final BytesToBytesMap.Location loc = map.lookup( + key, + Platform.BYTE_ARRAY_OFFSET, + key.length + ); + Assert.assertFalse(loc.isDefined()); + Assert.assertTrue(loc.append( + key, + Platform.BYTE_ARRAY_OFFSET, + key.length, + value, + Platform.BYTE_ARRAY_OFFSET, + value.length + )); + // After calling putNewKey, the following should be true, even before calling + // lookup(): + Assert.assertTrue(loc.isDefined()); + Assert.assertEquals(key.length, loc.getKeyLength()); + Assert.assertEquals(value.length, loc.getValueLength()); + Assert.assertTrue(arrayEquals(key, loc.getKeyBase(), loc.getKeyOffset(), key.length)); + Assert.assertTrue( + arrayEquals(value, loc.getValueBase(), loc.getValueOffset(), value.length)); + } + } +/** + for (Map.Entry entry : expected.entrySet()) { + final byte[] key = JavaUtils.bufferToArray(entry.getKey()); + final byte[] value = entry.getValue(); + final BytesToBytesMap.Location loc = + map.lookup(key, Platform.BYTE_ARRAY_OFFSET, key.length); + Assert.assertTrue(loc.isDefined()); + Assert.assertTrue( + arrayEquals(key, loc.getKeyBase(), loc.getKeyOffset(), loc.getKeyLength())); + Assert.assertTrue( + arrayEquals(value, loc.getValueBase(), loc.getValueOffset(), loc.getValueLength())); + } +*/ + } finally { + map.free(); + } + } + + @Test + public void failureToAllocateFirstPage() { + memoryManager.limit(1024); // longArray + BytesToBytesMap map = new BytesToBytesMap(dataNodeMemoryManager, 1, PAGE_SIZE_BYTES); + try { + final long[] emptyArray = new long[0]; + final BytesToBytesMap.Location loc = + map.lookup(emptyArray, Platform.LONG_ARRAY_OFFSET, 0); + Assert.assertFalse(loc.isDefined()); + Assert.assertFalse(loc.append( + emptyArray, Platform.LONG_ARRAY_OFFSET, 0, emptyArray, Platform.LONG_ARRAY_OFFSET, 0)); + } finally { + map.free(); + } + } + + + @Test + public void failureToGrow() { + BytesToBytesMap map = new BytesToBytesMap(dataNodeMemoryManager, 1, 1024); + try { + boolean success = true; + int i; + for (i = 0; i < 127; i++) { + if (i > 0) { + memoryManager.limit(0); + } + final long[] arr = new long[]{i}; + final BytesToBytesMap.Location loc = map.lookup(arr, Platform.LONG_ARRAY_OFFSET, 8); + success = + loc.append(arr, Platform.LONG_ARRAY_OFFSET, 8, arr, Platform.LONG_ARRAY_OFFSET, 8); + if (!success) { + break; + } + } + Assert.assertThat(i, greaterThan(0)); + Assert.assertFalse(success); + } finally { + map.free(); + } + } + + @Test + public void spillInIterator() throws IOException { + BytesToBytesMap map = new BytesToBytesMap( + dataNodeMemoryManager, blockManager, serializerManager, 1, 0.75, 1024, false); + try { + int i; + for (i = 0; i < 1024; i++) { + final long[] arr = new long[]{i}; + final BytesToBytesMap.Location loc = map.lookup(arr, Platform.LONG_ARRAY_OFFSET, 8); + loc.append(arr, Platform.LONG_ARRAY_OFFSET, 8, arr, Platform.LONG_ARRAY_OFFSET, 8); + } + BytesToBytesMap.MapIterator iter = map.iterator(); + for (i = 0; i < 100; i++) { + iter.next(); + } + // Non-destructive iterator is not spillable + Assert.assertEquals(0, iter.spill(1024L * 10)); + for (i = 100; i < 1024; i++) { + iter.next(); + } + + BytesToBytesMap.MapIterator iter2 = map.destructiveIterator(); + for (i = 0; i < 100; i++) { + iter2.next(); + } + Assert.assertTrue(iter2.spill(1024) >= 1024); + for (i = 100; i < 1024; i++) { + iter2.next(); + } + assertFalse(iter2.hasNext()); + } finally { + map.free(); + for (File spillFile : spillFilesCreated) { + assertFalse("Spill file " + spillFile.getPath() + " was not cleaned up", + spillFile.exists()); + } + } + } + + @Test + public void multipleValuesForSameKey() { + BytesToBytesMap map = + new BytesToBytesMap(dataNodeMemoryManager, blockManager, serializerManager, 1, 0.75, 1024, false); + try { + int i; + for (i = 0; i < 1024; i++) { + final long[] arr = new long[]{i}; + map.lookup(arr, Platform.LONG_ARRAY_OFFSET, 8) + .append(arr, Platform.LONG_ARRAY_OFFSET, 8, arr, Platform.LONG_ARRAY_OFFSET, 8); + } + assert map.numKeys() == 1024; + assert map.numValues() == 1024; + for (i = 0; i < 1024; i++) { + final long[] arr = new long[]{i}; + map.lookup(arr, Platform.LONG_ARRAY_OFFSET, 8) + .append(arr, Platform.LONG_ARRAY_OFFSET, 8, arr, Platform.LONG_ARRAY_OFFSET, 8); + } + assert map.numKeys() == 1024; + assert map.numValues() == 2048; + for (i = 0; i < 1024; i++) { + final long[] arr = new long[]{i}; + final BytesToBytesMap.Location loc = map.lookup(arr, Platform.LONG_ARRAY_OFFSET, 8); + assert loc.isDefined(); + assert loc.nextValue(); + assert !loc.nextValue(); + } + BytesToBytesMap.MapIterator iter = map.iterator(); + for (i = 0; i < 2048; i++) { + assert iter.hasNext(); + final BytesToBytesMap.Location loc = iter.next(); + assert loc.isDefined(); + } + } finally { + map.free(); + } + } + + @Test + public void initialCapacityBoundsChecking() { + try { + new BytesToBytesMap(dataNodeMemoryManager, 0, PAGE_SIZE_BYTES); + Assert.fail("Expected IllegalArgumentException to be thrown"); + } catch (IllegalArgumentException e) { + // expected exception + } + + try { + new BytesToBytesMap( + dataNodeMemoryManager, + BytesToBytesMap.MAX_CAPACITY + 1, + PAGE_SIZE_BYTES); + Assert.fail("Expected IllegalArgumentException to be thrown"); + } catch (IllegalArgumentException e) { + // expected exception + } + } + + @Test + public void testPeakMemoryUsed() { + final long recordLengthBytes = 32; + final long pageSizeBytes = 256 + 8; // 8 bytes for end-of-page marker + final long numRecordsPerPage = (pageSizeBytes - 8) / recordLengthBytes; + final BytesToBytesMap map = new BytesToBytesMap(dataNodeMemoryManager, 1024, pageSizeBytes); + + // Since BytesToBytesMap is append-only, we expect the total memory consumption to be + // monotonically increasing. More specifically, every time we allocate a new page it + // should increase by exactly the size of the page. In this regard, the memory usage + // at any given time is also the peak memory used. + long previousPeakMemory = map.getPeakMemoryUsedBytes(); + long newPeakMemory; + try { + for (long i = 0; i < numRecordsPerPage * 10; i++) { + final long[] value = new long[]{i}; + map.lookup(value, Platform.LONG_ARRAY_OFFSET, 8).append( + value, + Platform.LONG_ARRAY_OFFSET, + 8, + value, + Platform.LONG_ARRAY_OFFSET, + 8); + newPeakMemory = map.getPeakMemoryUsedBytes(); + if (i % numRecordsPerPage == 0) { + // We allocated a new page for this record, so peak memory should change + assertEquals(previousPeakMemory + pageSizeBytes, newPeakMemory); + } else { + assertEquals(previousPeakMemory, newPeakMemory); + } + previousPeakMemory = newPeakMemory; + } + + // Freeing the map should not change the peak memory + map.free(); + newPeakMemory = map.getPeakMemoryUsedBytes(); + assertEquals(previousPeakMemory, newPeakMemory); + + } finally { + map.free(); + } + } + +} diff --git a/src/test/java/io/mycat/memory/unsafe/map/BytesToBytesMapOffHeapSuite.java b/src/test/java/io/mycat/memory/unsafe/map/BytesToBytesMapOffHeapSuite.java new file mode 100644 index 000000000..ec6a27883 --- /dev/null +++ b/src/test/java/io/mycat/memory/unsafe/map/BytesToBytesMapOffHeapSuite.java @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.map; + +public class BytesToBytesMapOffHeapSuite extends AbstractBytesToBytesMapSuite { + + @Override + protected boolean useOffHeapMemoryAllocator() { + return true; + } +} diff --git a/src/test/java/io/mycat/memory/unsafe/map/BytesToBytesMapOnHeapSuite.java b/src/test/java/io/mycat/memory/unsafe/map/BytesToBytesMapOnHeapSuite.java new file mode 100644 index 000000000..f248f2a51 --- /dev/null +++ b/src/test/java/io/mycat/memory/unsafe/map/BytesToBytesMapOnHeapSuite.java @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.map; + +public class BytesToBytesMapOnHeapSuite extends AbstractBytesToBytesMapSuite { + + @Override + protected boolean useOffHeapMemoryAllocator() { + return false; + } +} diff --git a/src/test/java/io/mycat/memory/unsafe/map/MapSorterByValueTest.java b/src/test/java/io/mycat/memory/unsafe/map/MapSorterByValueTest.java new file mode 100644 index 000000000..ab5e3eaa0 --- /dev/null +++ b/src/test/java/io/mycat/memory/unsafe/map/MapSorterByValueTest.java @@ -0,0 +1,54 @@ +package io.mycat.memory.unsafe.map; + +import org.junit.Test; + +import java.util.*; + +/** + * Created by znix on 2016/7/4. + */ +public class MapSorterByValueTest { + @Test + public void testMapSorterByValue(){ + Map map = new HashMap(); + map.put("q",23); + map.put("b",4); + map.put("c",5); + map.put("d",6); + + Map resultMap = mapSorterByValue(map); //按Value进行排序 + + for (Map.Entry entry : resultMap.entrySet()) { + System.out.println(entry.getKey() + " " + entry.getValue()); + } + } + + private Map mapSorterByValue(Map map) { + if (map == null || map.isEmpty()) { + return null; + } + + Map sortedMap = new LinkedHashMap(); + + List> entryList = new ArrayList< + Map.Entry>( + map.entrySet()); + + Collections.sort(entryList, new Comparator>() { + @Override + public int compare(Map.Entry o1, Map.Entry o2) { + return o1.getValue().compareTo(o2.getValue()); + } + }); + + Iterator> iter = entryList.iterator(); + Map.Entry tmpEntry = null; + while (iter.hasNext()) { + tmpEntry = iter.next(); + sortedMap.put(tmpEntry.getKey(), tmpEntry.getValue()); + } + return sortedMap; + } +} + + diff --git a/src/test/java/io/mycat/memory/unsafe/map/UnsafeFixedWidthAggregationMapSuite.java b/src/test/java/io/mycat/memory/unsafe/map/UnsafeFixedWidthAggregationMapSuite.java new file mode 100644 index 000000000..8f3a5d1d0 --- /dev/null +++ b/src/test/java/io/mycat/memory/unsafe/map/UnsafeFixedWidthAggregationMapSuite.java @@ -0,0 +1,314 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.map; + +import io.mycat.memory.MyCatMemory; +import io.mycat.memory.unsafe.KVIterator; +import io.mycat.memory.unsafe.memory.mm.DataNodeMemoryManager; +import io.mycat.memory.unsafe.memory.mm.MemoryManager; +import io.mycat.memory.unsafe.row.BufferHolder; +import io.mycat.memory.unsafe.row.StructType; +import io.mycat.memory.unsafe.row.UnsafeRow; +import io.mycat.memory.unsafe.row.UnsafeRowWriter; +import io.mycat.memory.unsafe.utils.BytesTools; +import io.mycat.sqlengine.mpp.ColMeta; +import io.mycat.sqlengine.mpp.OrderCol; +import org.apache.log4j.Logger; +import org.junit.Assert; +import org.junit.Test; + + +import java.io.IOException; +import java.util.*; + +/** + * Created by zagnix on 2016/6/4. + */ +public class UnsafeFixedWidthAggregationMapSuite { + private StructType groupKeySchema ; + private StructType aggBufferSchema; + private UnsafeRow emptyAggregationBuffer; + private long PAGE_SIZE_BYTES = 1L << 20; + + private final Random rand = new Random(42); + + private static Logger LOGGER = Logger.getLogger(UnsafeFixedWidthAggregationMapSuite.class); + @Test + public void testAggregateMap() throws NoSuchFieldException, IllegalAccessException, IOException { + /** + * 创造上文环境 + */ + MyCatMemory myCatMemory = new MyCatMemory(); + MemoryManager memoryManager = myCatMemory.getResultMergeMemoryManager(); + DataNodeMemoryManager dataNodeMemoryManager = new DataNodeMemoryManager(memoryManager, Thread.currentThread().getId()); + + /** + * 构造数据字段group key + */ + + int fieldCount = 2; + ColMeta colMeta = null; + Map colMetaMap = new HashMap(fieldCount); + colMeta = new ColMeta(0,ColMeta.COL_TYPE_STRING); + colMetaMap.put("id",colMeta); + colMeta = new ColMeta(1,ColMeta.COL_TYPE_STRING); + colMetaMap.put("name",colMeta); + + OrderCol[] orderCols = new OrderCol[1]; + OrderCol orderCol = new OrderCol(colMetaMap.get("id"),OrderCol.COL_ORDER_TYPE_DESC); + orderCols[0] = orderCol; + + groupKeySchema = new StructType(colMetaMap,fieldCount); + groupKeySchema.setOrderCols(orderCols); + + + /** + * 构造数据字段value key + */ + fieldCount = 4; + colMeta = null; + colMetaMap = new HashMap(fieldCount); + colMeta = new ColMeta(0,ColMeta.COL_TYPE_STRING); + colMetaMap.put("id",colMeta); + colMeta = new ColMeta(1,ColMeta.COL_TYPE_STRING); + colMetaMap.put("name",colMeta); + colMeta = new ColMeta(2,ColMeta.COL_TYPE_INT); + colMetaMap.put("age",colMeta); + + colMeta = new ColMeta(3,ColMeta.COL_TYPE_LONGLONG); + colMetaMap.put("score",colMeta); + + + orderCols = new OrderCol[1]; + orderCol = new OrderCol(colMetaMap.get("id"),OrderCol.COL_ORDER_TYPE_DESC); + orderCols[0] = orderCol; + + aggBufferSchema = new StructType(colMetaMap,fieldCount); + aggBufferSchema.setOrderCols(orderCols); + + /** + *emtpy Row value + */ + BufferHolder bufferHolder ; + emptyAggregationBuffer = new UnsafeRow(4); + bufferHolder = new BufferHolder(emptyAggregationBuffer,0); + UnsafeRowWriter unsafeRowWriter = new UnsafeRowWriter(bufferHolder,4); + bufferHolder.reset(); + String value = "o"; + unsafeRowWriter.write(0,value.getBytes()); + unsafeRowWriter.write(1,value.getBytes()); + emptyAggregationBuffer.setInt(2,0); + emptyAggregationBuffer.setLong(3,0); + emptyAggregationBuffer.setTotalSize(bufferHolder.totalSize()); + + + UnsafeFixedWidthAggregationMap map = new UnsafeFixedWidthAggregationMap( + emptyAggregationBuffer, + aggBufferSchema, + groupKeySchema, + dataNodeMemoryManager, + 2*1024, + PAGE_SIZE_BYTES, + true); + + + /** + * 造数据 + */ + + int i; + + List rows = new ArrayList(); + for ( i = 0; i < 100000; i++) { + /** + * key + */ + UnsafeRow groupKey = new UnsafeRow(2); + bufferHolder = new BufferHolder(groupKey,0); + unsafeRowWriter = new UnsafeRowWriter(bufferHolder,2); + bufferHolder.reset(); + + unsafeRowWriter.write(0, BytesTools.toBytes(rand.nextInt(10000000))); + unsafeRowWriter.write(1,BytesTools.toBytes(rand.nextInt(10000000))); + + groupKey.setTotalSize(bufferHolder.totalSize()); + + UnsafeRow valueKey = new UnsafeRow(4); + bufferHolder = new BufferHolder(valueKey,0); + unsafeRowWriter = new UnsafeRowWriter(bufferHolder,4); + bufferHolder.reset(); + + unsafeRowWriter.write(0, BytesTools.toBytes(rand.nextInt(10))); + unsafeRowWriter.write(1,BytesTools.toBytes(rand.nextInt(10))); + valueKey.setInt(2,i); + valueKey.setLong(3,1); + valueKey.setTotalSize(bufferHolder.totalSize()); + + if(map.find(groupKey)){ + UnsafeRow rs = map.getAggregationBuffer(groupKey); + rs.setLong(3,i+valueKey.getLong(3)); + rs.setInt(2,100+valueKey.getInt(2)); + }else { + map.put(groupKey,valueKey); + } + rows.add(valueKey); + } + + + KVIterator iter = map.iterator(); + int j = 0; + while (iter.next()){ + Assert.assertEquals(j,iter.getValue().getInt(2)); + j++; + iter.getValue().setInt(2,5000000); + iter.getValue().setLong(3,600000); + } + + Assert.assertEquals(rows.size(),j); + int k = 0; + KVIterator iter1 = map.iterator(); + while (iter1.next()){ + k++; + // LOGGER.error("(" + BytesTools.toInt(iter1.getKey().getBinary(0)) + "," + + // iter1.getValue().getInt(2) +"," +iter1.getValue().getLong(3)+")"); + + Assert.assertEquals(5000000,iter1.getValue().getInt(2)); + Assert.assertEquals(600000,iter1.getValue().getLong(3)); + } + + Assert.assertEquals(j,k); + + map.free(); + + } +@Test +public void testWithMemoryLeakDetection() throws IOException, NoSuchFieldException, IllegalAccessException { + MyCatMemory myCatMemory = new MyCatMemory(); + MemoryManager memoryManager = myCatMemory.getResultMergeMemoryManager(); + DataNodeMemoryManager dataNodeMemoryManager = new DataNodeMemoryManager(memoryManager, + Thread.currentThread().getId()); + int fieldCount = 3; + ColMeta colMeta = null; + Map colMetaMap = new HashMap(fieldCount); + colMeta = new ColMeta(0,ColMeta.COL_TYPE_STRING); + colMetaMap.put("id",colMeta); + colMeta = new ColMeta(1,ColMeta.COL_TYPE_STRING); + colMetaMap.put("name",colMeta); + colMeta = new ColMeta(2,ColMeta.COL_TYPE_STRING); + colMetaMap.put("age",colMeta); + + + OrderCol[] orderCols = new OrderCol[1]; + OrderCol orderCol = new OrderCol(colMetaMap.get("id"),OrderCol.COL_ORDER_TYPE_DESC); + orderCols[0] = orderCol; + + groupKeySchema = new StructType(colMetaMap,fieldCount); + groupKeySchema.setOrderCols(orderCols); + + + + fieldCount = 3; + colMeta = null; + colMetaMap = new HashMap(fieldCount); + colMeta = new ColMeta(0,ColMeta.COL_TYPE_LONGLONG); + colMetaMap.put("age",colMeta); + colMeta = new ColMeta(1,ColMeta.COL_TYPE_LONGLONG); + colMetaMap.put("age1",colMeta); + colMeta = new ColMeta(2,ColMeta.COL_TYPE_STRING); + colMetaMap.put("name",colMeta); + + orderCols = new OrderCol[1]; + orderCol = new OrderCol(colMetaMap.get("id"),OrderCol.COL_ORDER_TYPE_DESC); + orderCols[0] = orderCol; + + aggBufferSchema = new StructType(colMetaMap,fieldCount); + aggBufferSchema.setOrderCols(orderCols); + + /** + * value + */ + BufferHolder bufferHolder ; + emptyAggregationBuffer = new UnsafeRow(3); + bufferHolder = new BufferHolder(emptyAggregationBuffer,0); + UnsafeRowWriter unsafeRowWriter = new UnsafeRowWriter(bufferHolder,3); + bufferHolder.reset(); + String value = "ok,hello"; + emptyAggregationBuffer.setLong(0,0); + emptyAggregationBuffer.setLong(1,0); + unsafeRowWriter.write(2,value.getBytes()); + emptyAggregationBuffer.setTotalSize(bufferHolder.totalSize()); + + UnsafeFixedWidthAggregationMap map = new UnsafeFixedWidthAggregationMap( + emptyAggregationBuffer, + aggBufferSchema, + groupKeySchema, + dataNodeMemoryManager, + 2*1024, + PAGE_SIZE_BYTES, + false + ); + + + int i; + + List rows = new ArrayList(); + for ( i = 0; i < 1000; i++) { + String line = "testUnsafeRow" + i; + /** + * key + */ + UnsafeRow groupKey = new UnsafeRow(3); + bufferHolder = new BufferHolder(groupKey,0); + unsafeRowWriter = new UnsafeRowWriter(bufferHolder,3); + bufferHolder.reset(); + + final byte[] key = getRandomByteArray(rand.nextInt(8)); + String age = "5"+i; + unsafeRowWriter.write(0,key); + unsafeRowWriter.write(1,line.getBytes()); + unsafeRowWriter.write(2,age.getBytes()); + groupKey.setTotalSize(bufferHolder.totalSize()); + + map.getAggregationBuffer(groupKey); + + rows.add(groupKey); + } + + Assert.assertEquals(i ,rows.size() ); + + + + UnsafeRow row = rows.get(12); + UnsafeRow rs = map.getAggregationBuffer(row); + rs.setLong(0,12); + rs = map.getAggregationBuffer(row); + Assert.assertEquals(12,rs.getLong(0)); + + map.free(); + + } + + private byte[] getRandomByteArray(int numWords) { + Assert.assertTrue(numWords >= 0); + final int lengthInBytes = numWords * 8; + final byte[] bytes = new byte[lengthInBytes]; + rand.nextBytes(bytes); + return bytes; + } + +} diff --git a/src/test/java/io/mycat/memory/unsafe/memory/MemoryManagerSuite.java b/src/test/java/io/mycat/memory/unsafe/memory/MemoryManagerSuite.java new file mode 100644 index 000000000..41272b076 --- /dev/null +++ b/src/test/java/io/mycat/memory/unsafe/memory/MemoryManagerSuite.java @@ -0,0 +1,7 @@ +package io.mycat.memory.unsafe.memory; + +/** + * Created by zagnix on 2016/6/6. + */ +public interface MemoryManagerSuite { +} diff --git a/src/test/java/io/mycat/memory/unsafe/memory/MycatMemoryTest.java b/src/test/java/io/mycat/memory/unsafe/memory/MycatMemoryTest.java new file mode 100644 index 000000000..cca83996c --- /dev/null +++ b/src/test/java/io/mycat/memory/unsafe/memory/MycatMemoryTest.java @@ -0,0 +1,24 @@ +package io.mycat.memory.unsafe.memory; + + +import io.mycat.memory.MyCatMemory; +import io.mycat.memory.unsafe.Platform; +import org.junit.Test; + +/** + * Created by zagnix on 2016/6/12. + */ +public class MycatMemoryTest { + + /** + * -Xmx1024m -XX:MaxDirectMemorySize=1G + */ + @Test + public void testMycatMemory() throws NoSuchFieldException, IllegalAccessException { + MyCatMemory myCatMemory = new MyCatMemory(); + System.out.println(myCatMemory.getResultSetBufferSize()); + System.out.println(Platform.getMaxHeapMemory()); + System.out.println(Platform.getMaxDirectMemory()); + } + +} diff --git a/src/test/java/io/mycat/memory/unsafe/memory/TaskMemoryManagerSuite.java b/src/test/java/io/mycat/memory/unsafe/memory/TaskMemoryManagerSuite.java new file mode 100644 index 000000000..4ab0aaccb --- /dev/null +++ b/src/test/java/io/mycat/memory/unsafe/memory/TaskMemoryManagerSuite.java @@ -0,0 +1,120 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.memory; + +import io.mycat.memory.unsafe.memory.mm.DataNodeMemoryManager; +import io.mycat.memory.unsafe.memory.mm.MemoryMode; +import io.mycat.memory.unsafe.memory.mm.ResultMergeMemoryManager; +import io.mycat.memory.unsafe.utils.MycatPropertyConf; +import org.junit.Assert; +import org.junit.Test; + +public class TaskMemoryManagerSuite { + + @Test + public void leakedPageMemoryIsDetected() { + final DataNodeMemoryManager manager = new DataNodeMemoryManager( + new ResultMergeMemoryManager( + new MycatPropertyConf().set("mycat.memory.offHeap.enabled", "false") + .set("mycat.memory.offHeap.size","32768"), + 1, + Long.MAX_VALUE + ), + 0); + manager.allocatePage(4096, null); // leak memory + Assert.assertEquals(4096, manager.getMemoryConsumptionForThisConnection()); + Assert.assertEquals(4096, manager.cleanUpAllAllocatedMemory()); + } + + @Test + public void encodePageNumberAndOffsetOffHeap() { + final MycatPropertyConf conf = new MycatPropertyConf() + .set("mycat.memory.offHeap.enabled", "true") + .set("mycat.memory.offHeap.size", "1000"); + final DataNodeMemoryManager manager = new DataNodeMemoryManager(new TestMemoryManager(conf), 0); + final MemoryBlock dataPage = manager.allocatePage(256, null); + // In off-heap mode, an offset is an absolute address that may require more than 51 bits to + // encode. This map exercises that corner-case: + final long offset = ((1L << DataNodeMemoryManager.OFFSET_BITS) + 10); + final long encodedAddress = manager.encodePageNumberAndOffset(dataPage, offset); + Assert.assertEquals(null, manager.getPage(encodedAddress)); + Assert.assertEquals(offset, manager.getOffsetInPage(encodedAddress)); + } + + @Test + public void encodePageNumberAndOffsetOnHeap() { + final DataNodeMemoryManager manager = new DataNodeMemoryManager( + new TestMemoryManager(new MycatPropertyConf().set("mycat.memory.offHeap.enabled", "false")), 0); + final MemoryBlock dataPage = manager.allocatePage(256, null); + final long encodedAddress = manager.encodePageNumberAndOffset(dataPage, 64); + Assert.assertEquals(dataPage.getBaseObject(), manager.getPage(encodedAddress)); + Assert.assertEquals(64, manager.getOffsetInPage(encodedAddress)); + } + + @Test + public void cooperativeSpilling() throws InterruptedException { + final TestMemoryManager memoryManager = new TestMemoryManager(new MycatPropertyConf()); + memoryManager.limit(100); + final DataNodeMemoryManager manager = new DataNodeMemoryManager(memoryManager, 0); + + TestMemoryConsumer c1 = new TestMemoryConsumer(manager); + TestMemoryConsumer c2 = new TestMemoryConsumer(manager); + c1.use(100); + Assert.assertEquals(100, c1.getUsed()); + c2.use(100); + Assert.assertEquals(100, c2.getUsed()); + Assert.assertEquals(0, c1.getUsed()); // spilled + c1.use(100); + Assert.assertEquals(100, c1.getUsed()); + Assert.assertEquals(0, c2.getUsed()); // spilled + + c1.use(50); + Assert.assertEquals(50, c1.getUsed()); // spilled + Assert.assertEquals(0, c2.getUsed()); + c2.use(50); + Assert.assertEquals(50, c1.getUsed()); + Assert.assertEquals(50, c2.getUsed()); + + c1.use(100); + Assert.assertEquals(100, c1.getUsed()); + Assert.assertEquals(0, c2.getUsed()); // spilled + + c1.free(20); + Assert.assertEquals(80, c1.getUsed()); + c2.use(10); + Assert.assertEquals(80, c1.getUsed()); + Assert.assertEquals(10, c2.getUsed()); + c2.use(100); + Assert.assertEquals(100, c2.getUsed()); + Assert.assertEquals(0, c1.getUsed()); // spilled + + c1.free(0); + c2.free(100); + Assert.assertEquals(0, manager.cleanUpAllAllocatedMemory()); + } + + @Test + public void offHeapConfigurationBackwardsCompatibility() { + final MycatPropertyConf conf = new MycatPropertyConf() + .set("mycat.memory.offHeap.enabled", "true") + .set("mycat.memory.offHeap.size","1000"); + final DataNodeMemoryManager manager = new DataNodeMemoryManager(new TestMemoryManager(conf), 0); + Assert.assertSame(MemoryMode.OFF_HEAP, manager.tungstenMemoryMode); + } + +} diff --git a/src/test/java/io/mycat/memory/unsafe/memory/TestMemoryConsumer.java b/src/test/java/io/mycat/memory/unsafe/memory/TestMemoryConsumer.java new file mode 100644 index 000000000..ae2b321e1 --- /dev/null +++ b/src/test/java/io/mycat/memory/unsafe/memory/TestMemoryConsumer.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.memory; + +import io.mycat.memory.unsafe.memory.mm.DataNodeMemoryManager; +import io.mycat.memory.unsafe.memory.mm.MemoryConsumer; + +import java.io.IOException; + +public class TestMemoryConsumer extends MemoryConsumer { + public TestMemoryConsumer(DataNodeMemoryManager memoryManager) { + super(memoryManager); + } + + @Override + public long spill(long size, MemoryConsumer trigger) throws IOException { + long used = getUsed(); + free(used); + return used; + } + + void use(long size) throws InterruptedException { + long got = dataNodeMemoryManager.acquireExecutionMemory( + size, + dataNodeMemoryManager.tungstenMemoryMode, + this); + used += got; + } + + void free(long size) { + used -= size; + dataNodeMemoryManager.releaseExecutionMemory( + size, + dataNodeMemoryManager.tungstenMemoryMode, + this); + } +} + + diff --git a/src/test/java/io/mycat/memory/unsafe/memory/TestMemoryManager.java b/src/test/java/io/mycat/memory/unsafe/memory/TestMemoryManager.java new file mode 100644 index 000000000..662247042 --- /dev/null +++ b/src/test/java/io/mycat/memory/unsafe/memory/TestMemoryManager.java @@ -0,0 +1,56 @@ + + +package io.mycat.memory.unsafe.memory; + + +import io.mycat.memory.unsafe.memory.mm.MemoryManager; +import io.mycat.memory.unsafe.memory.mm.MemoryMode; +import io.mycat.memory.unsafe.utils.MycatPropertyConf; + +public class TestMemoryManager extends MemoryManager { + + public TestMemoryManager(MycatPropertyConf conf){ + super(conf,1, Long.MAX_VALUE); + } + + private boolean oomOnce = false; + private long available = Long.MAX_VALUE; + + + + @Override + protected long acquireExecutionMemory( + long numBytes, + long taskAttemptId, + MemoryMode memoryMode){ + if (oomOnce) { + oomOnce = false; + return 0; + } else if (available >= numBytes) { + available -= numBytes; + return numBytes; + } else { + long grant = available; + available = 0; + return grant; + } + } + +@Override +public void releaseExecutionMemory( + long numBytes, + long taskAttemptId, + MemoryMode memoryMode){ + available += numBytes; + } + + + public void markExecutionAsOutOfMemoryOnce(){ + oomOnce = true; + } + + public void limit(long avail){ + available = avail; + } + +} diff --git a/src/test/java/io/mycat/memory/unsafe/row/UnsafeRowList.java b/src/test/java/io/mycat/memory/unsafe/row/UnsafeRowList.java new file mode 100644 index 000000000..c564472cf --- /dev/null +++ b/src/test/java/io/mycat/memory/unsafe/row/UnsafeRowList.java @@ -0,0 +1,48 @@ +package io.mycat.memory.unsafe.row; + + +import org.junit.Assert; +import org.junit.Test; + +import java.util.ArrayList; + +/** + * Created by zagnix on 2016/6/27. + */ +public class UnsafeRowList { + @Test + public void testUnsafeRowList(){ + ArrayList list = new ArrayList(); + UnsafeRow unsafeRow ; + BufferHolder bufferHolder ; + UnsafeRowWriter unsafeRowWriter; + String line = "testUnsafeRow"; + + for (int i = 0; i <10; i++) { + unsafeRow = new UnsafeRow(3); + bufferHolder = new BufferHolder(unsafeRow); + unsafeRowWriter = new UnsafeRowWriter(bufferHolder,3); + bufferHolder.reset(); + + unsafeRow.setInt(0,89); + unsafeRowWriter.write(1,line.getBytes(),0,line.length()); + unsafeRow.setInt(2,23); + + unsafeRow.setTotalSize(bufferHolder.totalSize()); + list.add(unsafeRow); + } + + + for (int i = 0; i <10; i++) { + UnsafeRow row = list.get(i); + row.setInt(0,1000+i); + } + + + for (int i = 0; i <10; i++) { + UnsafeRow row = list.get(i); + Assert.assertEquals(1000+i,row.getInt(0)); + } + + } +} diff --git a/src/test/java/io/mycat/memory/unsafe/row/UnsafeRowSuite.java b/src/test/java/io/mycat/memory/unsafe/row/UnsafeRowSuite.java new file mode 100644 index 000000000..ad0da8dd5 --- /dev/null +++ b/src/test/java/io/mycat/memory/unsafe/row/UnsafeRowSuite.java @@ -0,0 +1,91 @@ +package io.mycat.memory.unsafe.row; + + +import junit.framework.Assert; + +import static org.junit.Assert.assertEquals; + +import java.math.BigDecimal; + +import org.junit.Test; + +/** + * Created by zagnix on 2016/6/10. + */ +public class UnsafeRowSuite { + + + @Test + public void testUnsafeRowSingle(){ + UnsafeRow unsafeRow = new UnsafeRow(5); + BufferHolder bufferHolder = new BufferHolder(unsafeRow,64); + UnsafeRowWriter unsafeRowWriter = new UnsafeRowWriter(bufferHolder,5); + bufferHolder.reset(); + + String line2 = "testUnsafeRow3"; + unsafeRow.setFloat(0, 7.4f); + unsafeRow.setInt(1, 7); + unsafeRow.setLong(2,455555); + unsafeRowWriter.write(3,line2.getBytes(),0, line2.length()); + unsafeRow.setNullAt(4); + + unsafeRow.setInt(1, 9); + + assert(unsafeRow.getFloat(0) == 7.4f); + assert(unsafeRow.getInt(1) == 9); + assert(unsafeRow.getLong(2) == 455555); + Assert.assertEquals("testUnsafeRow3",new String(unsafeRow.getBinary(3))); + assert (false==unsafeRow.isNullAt(3)); + assert (true==unsafeRow.isNullAt(4)); + } + + public void testUnsafeRowWithDecimal() { + + int fieldCount = 4; + + String value = "12345678901234567890123456789.0123456789"; + String value1 = "100"; + BigDecimal decimal = new BigDecimal(value); + BigDecimal decimal1 = new BigDecimal(value1); + System.out.println("decimal precision : " + decimal.precision() + ", scale : " + decimal.scale()); + + UnsafeRow unsafeRow = new UnsafeRow(fieldCount); + BufferHolder bufferHolder = new BufferHolder(unsafeRow,64); + UnsafeRowWriter unsafeRowWriter = new UnsafeRowWriter(bufferHolder,fieldCount); + bufferHolder.reset(); + + unsafeRow.setInt(0, 100); + unsafeRow.setDouble(1, 0.99); + unsafeRow.setLong(2, 1000); + unsafeRowWriter.write(3, decimal); + + assertEquals(100, unsafeRow.getInt(0)); + assertEquals("0.99", String.valueOf(unsafeRow.getDouble(1))); + assertEquals(1000, unsafeRow.getLong(2)); + assertEquals(decimal, unsafeRow.getDecimal(3, decimal.scale())); + + unsafeRow.updateDecimal(3, decimal1); + assertEquals(decimal1, unsafeRow.getDecimal(3, decimal1.scale())); + + // update null decimal + BigDecimal nullDecimal = null; + unsafeRow.updateDecimal(3, nullDecimal); + assertEquals(nullDecimal, unsafeRow.getDecimal(3, 0)); + + unsafeRow.updateDecimal(3, decimal); + assertEquals(decimal, unsafeRow.getDecimal(3, decimal.scale())); + + } + + +// @Test +// public void testUnsafeRowInsert(){ +// UnsafeRow unsafeRow = new UnsafeRow(4); +// +// assert(unsafeRow.getFloat(0) == 7.4f); +// assert(unsafeRow.getInt(1) == 9); +// assert(unsafeRow.getLong(2) == 455555); +// Assert.assertEquals("testUnsafeRow3",new String(unsafeRow.getBinary(3))); +// } + +}; diff --git a/src/test/java/io/mycat/memory/unsafe/sort/HashPartitioner.java b/src/test/java/io/mycat/memory/unsafe/sort/HashPartitioner.java new file mode 100644 index 000000000..06da865e0 --- /dev/null +++ b/src/test/java/io/mycat/memory/unsafe/sort/HashPartitioner.java @@ -0,0 +1,18 @@ +package io.mycat.memory.unsafe.sort; + + +import io.mycat.memory.unsafe.utils.JavaUtils; + +/** + * Created by zagnix on 2016/6/6. + */ +public class HashPartitioner { + private int index =0; + public HashPartitioner(int i) { + this.index = i; + } + public int getPartition(String key){ + return JavaUtils.nonNegativeMod(key.hashCode(), index); + } + +} diff --git a/src/test/java/io/mycat/memory/unsafe/sort/TestTimSort.java b/src/test/java/io/mycat/memory/unsafe/sort/TestTimSort.java new file mode 100644 index 000000000..ecdd17ce7 --- /dev/null +++ b/src/test/java/io/mycat/memory/unsafe/sort/TestTimSort.java @@ -0,0 +1,136 @@ +/** + * Copyright 2015 Stijn de Gouw + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package io.mycat.memory.unsafe.sort; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +/** + * This codes generates a int array which fails the standard TimSort. + * + * The blog that reported the bug + * http://www.envisage-project.eu/timsort-specification-and-verification/ + * + * This codes was originally wrote by Stijn de Gouw, modified by Evan Yu to adapt to + * our test suite. + * + * https://github.com/abstools/java-timsort-bug + * https://github.com/abstools/java-timsort-bug/blob/master/LICENSE + */ +public class TestTimSort { + + private static final int MIN_MERGE = 32; + + /** + * Returns an array of integers that demonstrate the bug in TimSort + */ + public static int[] getTimSortBugTestSet(int length) { + int minRun = minRunLength(length); + List runs = runsJDKWorstCase(minRun, length); + return createArray(runs, length); + } + + private static int minRunLength(int n) { + int r = 0; // Becomes 1 if any 1 bits are shifted off + while (n >= MIN_MERGE) { + r |= (n & 1); + n >>= 1; + } + return n + r; + } + + private static int[] createArray(List runs, int length) { + int[] a = new int[length]; + Arrays.fill(a, 0); + int endRun = -1; + for (long len : runs) { + a[endRun += len] = 1; + } + a[length - 1] = 0; + return a; + } + + /** + * Fills runs with a sequence of run lengths of the form
+ * Y_n x_{n,1} x_{n,2} ... x_{n,l_n}
+ * Y_{n-1} x_{n-1,1} x_{n-1,2} ... x_{n-1,l_{n-1}}
+ * ...
+ * Y_1 x_{1,1} x_{1,2} ... x_{1,l_1}
+ * The Y_i's are chosen to satisfy the invariant throughout execution, + * but the x_{i,j}'s are merged (by TimSort.mergeCollapse) + * into an X_i that violates the invariant. + * + * @param length The sum of all run lengths that will be added to runs. + */ + private static List runsJDKWorstCase(int minRun, int length) { + List runs = new ArrayList<>(); + + long runningTotal = 0, Y = minRun + 4, X = minRun; + + while (runningTotal + Y + X <= length) { + runningTotal += X + Y; + generateJDKWrongElem(runs, minRun, X); + runs.add(0, Y); + // X_{i+1} = Y_i + x_{i,1} + 1, since runs.get(1) = x_{i,1} + X = Y + runs.get(1) + 1; + // Y_{i+1} = X_{i+1} + Y_i + 1 + Y += X + 1; + } + + if (runningTotal + X <= length) { + runningTotal += X; + generateJDKWrongElem(runs, minRun, X); + } + + runs.add(length - runningTotal); + return runs; + } + + /** + * Adds a sequence x_1, ..., x_n of run lengths to runs such that:
+ * 1. X = x_1 + ... + x_n
+ * 2. x_j >= minRun for all j
+ * 3. x_1 + ... + x_{j-2} < x_j < x_1 + ... + x_{j-1} for all j
+ * These conditions guarantee that TimSort merges all x_j's one by one + * (resulting in X) using only merges on the second-to-last element. + * + * @param X The sum of the sequence that should be added to runs. + */ + private static void generateJDKWrongElem(List runs, int minRun, long X) { + for (long newTotal; X >= 2 * minRun + 1; X = newTotal) { + //Default strategy + newTotal = X / 2 + 1; + //Specialized strategies + if (3 * minRun + 3 <= X && X <= 4 * minRun + 1) { + // add x_1=MIN+1, x_2=MIN, x_3=X-newTotal to runs + newTotal = 2 * minRun + 1; + } else if (5 * minRun + 5 <= X && X <= 6 * minRun + 5) { + // add x_1=MIN+1, x_2=MIN, x_3=MIN+2, x_4=X-newTotal to runs + newTotal = 3 * minRun + 3; + } else if (8 * minRun + 9 <= X && X <= 10 * minRun + 9) { + // add x_1=MIN+1, x_2=MIN, x_3=MIN+2, x_4=2MIN+2, x_5=X-newTotal to runs + newTotal = 5 * minRun + 5; + } else if (13 * minRun + 15 <= X && X <= 16 * minRun + 17) { + // add x_1=MIN+1, x_2=MIN, x_3=MIN+2, x_4=2MIN+2, x_5=3MIN+4, x_6=X-newTotal to runs + newTotal = 8 * minRun + 9; + } + runs.add(0, X - newTotal); + } + runs.add(0, X); + } +} diff --git a/src/test/java/io/mycat/memory/unsafe/sort/UnsafeExternalRowSorterTest.java b/src/test/java/io/mycat/memory/unsafe/sort/UnsafeExternalRowSorterTest.java new file mode 100644 index 000000000..17a9f38f7 --- /dev/null +++ b/src/test/java/io/mycat/memory/unsafe/sort/UnsafeExternalRowSorterTest.java @@ -0,0 +1,169 @@ +package io.mycat.memory.unsafe.sort; + +import io.mycat.memory.MyCatMemory; +import io.mycat.memory.unsafe.array.ByteArrayMethods; +import io.mycat.memory.unsafe.memory.mm.DataNodeMemoryManager; +import io.mycat.memory.unsafe.memory.mm.MemoryManager; +import io.mycat.memory.unsafe.row.BufferHolder; +import io.mycat.memory.unsafe.row.StructType; +import io.mycat.memory.unsafe.row.UnsafeRow; +import io.mycat.memory.unsafe.row.UnsafeRowWriter; +import io.mycat.memory.unsafe.storage.DataNodeDiskManager; +import io.mycat.memory.unsafe.storage.SerializerManager; +import io.mycat.memory.unsafe.utils.BytesTools; +import io.mycat.memory.unsafe.utils.MycatPropertyConf; +import io.mycat.memory.unsafe.utils.sort.PrefixComparator; +import io.mycat.memory.unsafe.utils.sort.PrefixComparators; +import io.mycat.memory.unsafe.utils.sort.RowPrefixComputer; +import io.mycat.memory.unsafe.utils.sort.UnsafeExternalRowSorter; +import io.mycat.sqlengine.mpp.ColMeta; +import io.mycat.sqlengine.mpp.OrderCol; +import io.mycat.util.ExecutorUtil; +import io.mycat.util.NameableExecutor; +import org.junit.Assert; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +import java.io.IOException; + +import java.lang.reflect.Array; +import java.util.*; +import java.util.concurrent.Future; + +/** + * Created by zagnix on 2016/6/19. + */ +public class UnsafeExternalRowSorterTest { + + private static final int TEST_SIZE = 100000; + public static final Logger LOGGER = LoggerFactory.getLogger(UnsafeExternalRowSorterTest.class); + + /** + * 测试类型 LONG,INT,SHORT,Float,Double,String,Binary + * 经测试基数排序可以适用上述数据类型,大大提高排序速度 + + */ + @Test + public void testUnsafeExternalRowSorter() throws NoSuchFieldException, IllegalAccessException, IOException { + MyCatMemory myCatMemory = new MyCatMemory(); + MemoryManager memoryManager = myCatMemory.getResultMergeMemoryManager(); + DataNodeDiskManager blockManager = myCatMemory.getBlockManager(); + SerializerManager serializerManager = myCatMemory.getSerializerManager(); + MycatPropertyConf conf = myCatMemory.getConf(); + DataNodeMemoryManager dataNodeMemoryManager = new DataNodeMemoryManager(memoryManager, + Thread.currentThread().getId()); + /** + * 1.schema ,模拟一个field字段值 + * + */ + int fieldCount = 3; + ColMeta colMeta = null; + Map colMetaMap = new HashMap(fieldCount); + colMeta = new ColMeta(0, ColMeta.COL_TYPE_STRING); + colMetaMap.put("id", colMeta); + colMeta = new ColMeta(1, ColMeta.COL_TYPE_STRING); + colMetaMap.put("name", colMeta); + colMeta = new ColMeta(2, ColMeta.COL_TYPE_STRING); + colMetaMap.put("age", colMeta); + + + OrderCol[] orderCols = new OrderCol[1]; + OrderCol orderCol = new OrderCol(colMetaMap.get("id"), + OrderCol.COL_ORDER_TYPE_ASC); + orderCols[0] = orderCol; + /** + * 2 .PrefixComputer + */ + StructType schema = new StructType(colMetaMap, fieldCount); + schema.setOrderCols(orderCols); + + UnsafeExternalRowSorter.PrefixComputer prefixComputer = + new RowPrefixComputer(schema); + + /** + * 3 .PrefixComparator 默认是ASC,可以选择DESC + */ + final PrefixComparator prefixComparator = PrefixComparators.LONG; + + UnsafeExternalRowSorter sorter = + new UnsafeExternalRowSorter(dataNodeMemoryManager, + myCatMemory, + schema, + prefixComparator, + prefixComputer, + conf.getSizeAsBytes("mycat.buffer.pageSize","1m"), + true, /**使用基数排序?true or false*/ + true); + + UnsafeRow unsafeRow; + BufferHolder bufferHolder; + UnsafeRowWriter unsafeRowWriter; + String line = "testUnsafeRow"; + // List floats = new ArrayList(); + List longs = new ArrayList(); + final Random rand = new Random(42); + for (int i = 0; i < TEST_SIZE; i++) { + unsafeRow = new UnsafeRow(3); + bufferHolder = new BufferHolder(unsafeRow); + unsafeRowWriter = new UnsafeRowWriter(bufferHolder,3); + bufferHolder.reset(); + + String key = getRandomString(rand.nextInt(300)+100); + + //long v = rand.nextLong(); + // longs.add(v); + unsafeRowWriter.write(0,key.getBytes()); + // unsafeRowWriter.write(0, BytesTools.toBytes(v)); + unsafeRowWriter.write(1, line.getBytes()); + unsafeRowWriter.write(2, ("35" + 1).getBytes()); + + unsafeRow.setTotalSize(bufferHolder.totalSize()); + sorter.insertRow(unsafeRow); + } + + Iterator iter = sorter.sort(); +/* + float [] com = new float[floats.size()]; + for (int i = 0; i = 2); + UnsafeExternalSorter.SpillableIterator iter = + (UnsafeExternalSorter.SpillableIterator) sorter.getSortedIterator(); + int lastv = 0; + for (int i = 0; i < n / 3; i++) { + iter.hasNext(); + iter.loadNext(); + assertTrue(Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()) == i); + lastv = i; + } + assertTrue(iter.spill() > 0); + assertEquals(0, iter.spill()); + assertTrue(Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()) == lastv); + for (int i = n / 3; i < n; i++) { + iter.hasNext(); + iter.loadNext(); + assertEquals(i, Platform.getLong(iter.getBaseObject(), iter.getBaseOffset())); + } + sorter.cleanupResources(); + assertSpillFilesWereCleanedUp(); + } + + @Test + public void forcedSpillingWithNotReadIterator() throws Exception { + final UnsafeExternalSorter sorter = newSorter(); + long[] record = new long[100]; + int recordSize = record.length * 8; + int n = (int) pageSizeBytes / recordSize * 3; + for (int i = 0; i < n; i++) { + record[0] = (long) i; + sorter.insertRecord(record, Platform.LONG_ARRAY_OFFSET, recordSize, 0); + } + assertTrue(sorter.getNumberOfAllocatedPages() >= 2); + UnsafeExternalSorter.SpillableIterator iter = + (UnsafeExternalSorter.SpillableIterator) sorter.getSortedIterator(); + assertTrue(iter.spill() > 0); + assertEquals(0, iter.spill()); + for (int i = 0; i < n; i++) { + iter.hasNext(); + iter.loadNext(); + assertEquals(i, Platform.getLong(iter.getBaseObject(), iter.getBaseOffset())); + } + sorter.cleanupResources(); + assertSpillFilesWereCleanedUp(); + } + + @Test + public void forcedSpillingWithoutComparator() throws Exception { + final UnsafeExternalSorter sorter = UnsafeExternalSorter.create( + DATA_NODE_MEMORY_MANAGER, + blockManager, + serializerManager, + null, + null, + /* initialSize */ 1024, + pageSizeBytes, + shouldUseRadixSort(),true); + long[] record = new long[100]; + int recordSize = record.length * 8; + int n = (int) pageSizeBytes / recordSize * 3; + int batch = n / 4; + for (int i = 0; i < n; i++) { + record[0] = (long) i; + sorter.insertRecord(record, Platform.LONG_ARRAY_OFFSET, recordSize, 0); + if (i % batch == batch - 1) { + sorter.spill(); + } + } + UnsafeSorterIterator iter = sorter.getIterator(); + for (int i = 0; i < n; i++) { + iter.hasNext(); + iter.loadNext(); + assertEquals(i, Platform.getLong(iter.getBaseObject(), iter.getBaseOffset())); + } + sorter.cleanupResources(); + assertSpillFilesWereCleanedUp(); + } + + @Test + public void testPeakMemoryUsed() throws Exception { + final long recordLengthBytes = 8; + final long pageSizeBytes = 256; + final long numRecordsPerPage = pageSizeBytes / recordLengthBytes; + final UnsafeExternalSorter sorter = UnsafeExternalSorter.create( + DATA_NODE_MEMORY_MANAGER, + blockManager, + serializerManager, + recordComparator, + prefixComparator, + 1024, + pageSizeBytes, + shouldUseRadixSort(),true); + + // Peak memory should be monotonically increasing. More specifically, every time + // we allocate a new page it should increase by exactly the size of the page. + long previousPeakMemory = sorter.getPeakMemoryUsedBytes(); + long newPeakMemory; + try { + for (int i = 0; i < numRecordsPerPage * 10; i++) { + insertNumber(sorter, i); + newPeakMemory = sorter.getPeakMemoryUsedBytes(); + if (i % numRecordsPerPage == 0) { + // We allocated a new page for this record, so peak memory should change + assertEquals(previousPeakMemory + pageSizeBytes, newPeakMemory); + } else { + assertEquals(previousPeakMemory, newPeakMemory); + } + previousPeakMemory = newPeakMemory; + } + + // Spilling should not change peak memory + sorter.spill(); + newPeakMemory = sorter.getPeakMemoryUsedBytes(); + assertEquals(previousPeakMemory, newPeakMemory); + for (int i = 0; i < numRecordsPerPage; i++) { + insertNumber(sorter, i); + } + newPeakMemory = sorter.getPeakMemoryUsedBytes(); + assertEquals(previousPeakMemory, newPeakMemory); + } finally { + sorter.cleanupResources(); + assertSpillFilesWereCleanedUp(); + } + } + +} + diff --git a/src/test/java/io/mycat/memory/unsafe/sort/UnsafeInMemorySorterRadixSortSuite.java b/src/test/java/io/mycat/memory/unsafe/sort/UnsafeInMemorySorterRadixSortSuite.java new file mode 100644 index 000000000..d08c1b0f8 --- /dev/null +++ b/src/test/java/io/mycat/memory/unsafe/sort/UnsafeInMemorySorterRadixSortSuite.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.sort; + +public class UnsafeInMemorySorterRadixSortSuite extends UnsafeInMemorySorterSuite { + @Override + protected boolean shouldUseRadixSort() { return true; } +} diff --git a/src/test/java/io/mycat/memory/unsafe/sort/UnsafeInMemorySorterSuite.java b/src/test/java/io/mycat/memory/unsafe/sort/UnsafeInMemorySorterSuite.java new file mode 100644 index 000000000..ed41f48f2 --- /dev/null +++ b/src/test/java/io/mycat/memory/unsafe/sort/UnsafeInMemorySorterSuite.java @@ -0,0 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.mycat.memory.unsafe.sort; + +import io.mycat.memory.unsafe.Platform; +import io.mycat.memory.unsafe.memory.MemoryBlock; +import io.mycat.memory.unsafe.memory.TestMemoryConsumer; +import io.mycat.memory.unsafe.memory.TestMemoryManager; +import io.mycat.memory.unsafe.memory.mm.DataNodeMemoryManager; +import io.mycat.memory.unsafe.utils.MycatPropertyConf; +import io.mycat.memory.unsafe.utils.sort.*; +import org.junit.Assert; +import org.junit.Test; + +import java.nio.charset.StandardCharsets; +import java.util.Arrays; + +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.isIn; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThat; +import static org.mockito.Mockito.mock; + +public class UnsafeInMemorySorterSuite { + + protected boolean shouldUseRadixSort() { return true; } + + private static String getStringFromDataPage(Object baseObject,long baseOffset,int length) { + final byte[] strBytes = new byte[length]; + Platform.copyMemory(baseObject,baseOffset,strBytes, Platform.BYTE_ARRAY_OFFSET,length); + return new String(strBytes,StandardCharsets.UTF_8); + } + + @Test + public void testSortingEmptyInput() { + final DataNodeMemoryManager memoryManager = new DataNodeMemoryManager( + new TestMemoryManager(new MycatPropertyConf().set("mycat.memory.offHeap.enabled", "false")), 0); + final TestMemoryConsumer consumer = new TestMemoryConsumer(memoryManager); + final UnsafeInMemorySorter sorter = new UnsafeInMemorySorter(consumer, + memoryManager, + mock(RecordComparator.class), + mock(PrefixComparator.class), + 100, + shouldUseRadixSort(),true); + final UnsafeSorterIterator iter = sorter.getSortedIterator(); + Assert.assertFalse(iter.hasNext()); + } + + @Test + public void testSortingOnlyByIntegerPrefix() throws Exception { + final String[] dataToSort = new String[] { + "Boba", + "Pearls", + "Tapioca", + "Taho", + "Condensed Milk", + "Jasmine", + "Milk Tea", + "Lychee", + "Mango" + }; + final DataNodeMemoryManager memoryManager = new DataNodeMemoryManager( + new TestMemoryManager(new MycatPropertyConf().set("mycat.memory.offHeap.enabled","false")), 0); + final TestMemoryConsumer consumer = new TestMemoryConsumer(memoryManager); + final MemoryBlock dataPage = memoryManager.allocatePage(2048, null); + + final Object baseObject = dataPage.getBaseObject(); + + // Write the records into the data page: + long position = dataPage.getBaseOffset(); + + for (String str : dataToSort) { + final byte[] strBytes = str.getBytes(StandardCharsets.UTF_8); + Platform.putInt(baseObject, position, strBytes.length); + position += 4; + Platform.copyMemory(strBytes,Platform.BYTE_ARRAY_OFFSET,baseObject, position, strBytes.length); + position += strBytes.length; + } + + // Since the key fits within the 8-byte prefix, we don't need to do any record comparison, so + // use a dummy comparator + final RecordComparator recordComparator = new RecordComparator() { + @Override + public int compare( + Object leftBaseObject, + long leftBaseOffset, + Object rightBaseObject, + long rightBaseOffset) { + return 0; + } + }; + // Compute key prefixes based on the records' partition ids + + final HashPartitioner hashPartitioner = new HashPartitioner(4); + + // Use integer comparison for comparing prefixes (which are partition ids, in this case) + final PrefixComparator prefixComparator = PrefixComparators.LONG; + + UnsafeInMemorySorter sorter = new UnsafeInMemorySorter( + consumer,memoryManager,recordComparator, + prefixComparator, dataToSort.length, + shouldUseRadixSort(),true); + + // Given a page of records, insert those records into the sorter one-by-one: + position = dataPage.getBaseOffset(); + System.out.println("(0)address = " + position); + + for (int i = 0; i < dataToSort.length; i++) { + + if (!sorter.hasSpaceForAnotherRecord()) { + sorter.expandPointerArray(consumer.allocateLongArray(sorter.getMemoryUsage() / 8 * 2)); + } + + // position now points to the start of a record (which holds its length). + final int recordLength = Platform.getInt(baseObject,position); + + final long address = memoryManager.encodePageNumberAndOffset(dataPage,position); + + + final String str = getStringFromDataPage(baseObject,position+4,recordLength); + + final int partitionId = hashPartitioner.getPartition(str); + System.out.println("(" + partitionId + "," + str + ")"); + + sorter.insertRecord(address,partitionId); + + position += 4 + recordLength; + } + + + + final UnsafeSorterIterator iter = sorter.getSortedIterator(); + + int iterLength = 0; + long prevPrefix = -1; + + Arrays.sort(dataToSort); + + + + while (iter.hasNext()) { + iter.loadNext(); + + final String str = getStringFromDataPage(iter.getBaseObject(), iter.getBaseOffset(), iter.getRecordLength()); + + final long keyPrefix = iter.getKeyPrefix(); + + assertThat(str, isIn(Arrays.asList(dataToSort))); + assertThat(keyPrefix, greaterThanOrEqualTo(prevPrefix)); + + prevPrefix = keyPrefix; + + iterLength++; + } + + + + assertEquals(dataToSort.length, iterLength); + } +} diff --git a/src/test/java/io/mycat/memory/unsafe/storage/BlockManagerTest.java b/src/test/java/io/mycat/memory/unsafe/storage/BlockManagerTest.java new file mode 100644 index 000000000..73f62cb88 --- /dev/null +++ b/src/test/java/io/mycat/memory/unsafe/storage/BlockManagerTest.java @@ -0,0 +1,108 @@ +package io.mycat.memory.unsafe.storage; + +import com.google.common.io.Closeables; +import io.mycat.memory.unsafe.utils.MycatPropertyConf; +import org.junit.Assert; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.*; + +/** + * Created by zagnix on 2016/6/4. + */ +public class BlockManagerTest { + private static final Logger logger = LoggerFactory.getLogger(BlockManagerTest.class); + + @Test + public void testNewDiskBlockManager() throws IOException { + MycatPropertyConf conf = new MycatPropertyConf(); + SerializerManager serializerManager = new SerializerManager(); + DataNodeDiskManager blockManager = new DataNodeDiskManager(conf,true,serializerManager); + DataNodeFileManager diskBlockManager = blockManager.diskBlockManager(); + /** + * 生成一个文本文件 + */ + File file = diskBlockManager.getFile("mycat1"); + FileOutputStream fos = new FileOutputStream(file); + BufferedOutputStream bos = new BufferedOutputStream(fos); + + bos.write("KOKKKKKK".getBytes()); + bos.flush(); + bos.close(); + fos.close(); + + + /** + * 读刚刚写入的文件 + */ + File file1 = diskBlockManager.getFile("mycat1"); + FileInputStream ios = new FileInputStream(file1); + + BufferedInputStream bin = new BufferedInputStream(ios); + byte[] str = new byte["KOKKKKKK".getBytes().length]; + int size = bin.read(str); + bin.close(); + ios.close(); + + Assert.assertEquals("KOKKKKKK",new String(str)); + + + + File file2 = diskBlockManager.getFile("mycat1"); + + DiskRowWriter writer = blockManager. + getDiskWriter(null,file2,DummySerializerInstance.INSTANCE,1024*1024); + byte [] writeBuffer = new byte[4]; + int v =4; + writeBuffer[0] = (byte)(v >>> 24); + writeBuffer[1] = (byte)(v >>> 16); + writeBuffer[2] = (byte)(v >>> 8); + writeBuffer[3] = (byte)(v >>> 0); + writer.write(writeBuffer,0,4); + + + writer.write("you are ok? 1111111111111".getBytes(),0,"you are ok? 1111111111111".getBytes().length); + writer.write("you are ok? 1111111111111".getBytes(),0,"you are ok? 1111111111111".getBytes().length); + writer.write("you are ok? 1111111111111".getBytes(),0,"you are ok? 1111111111111".getBytes().length); + writer.write("you are ok? 1111111111111".getBytes(),0,"you are ok? 1111111111111".getBytes().length); + writer.write("you are ok? 1111111111111".getBytes(),0,"you are ok? 1111111111111".getBytes().length); + writer.write("you are ok? 1111111111111".getBytes(),0,"you are ok? 1111111111111".getBytes().length); + writer.write("you are ok? 1111111111111".getBytes(),0,"you are ok? 1111111111111".getBytes().length); + writer.write("you are ok? 1111111111111".getBytes(),0,"you are ok? 1111111111111".getBytes().length); + + writer.close(); + + + try { + Thread.sleep(100); + } catch (InterruptedException e) { + logger.error(e.getMessage()); + } + + assert (file2.length() > 0); + final BufferedInputStream bs = new BufferedInputStream(new FileInputStream(file2)); + try { + InputStream in = serializerManager.wrapForCompression(null,bs); + DataInputStream din= new DataInputStream(in); + int numRecords = din.readInt(); + Assert.assertEquals(4,numRecords); + din.close(); + in.close(); + bs.close(); + + } catch (IOException e) { + Closeables.close(bs, /* swallowIOException = */ true); + throw e; + } + + } + + @Test + public void testNewDiskBlockWriter(){ + MycatPropertyConf conf = new MycatPropertyConf(); + SerializerManager serializerManager = new SerializerManager(); + } + +} diff --git a/src/test/java/io/mycat/memory/unsafe/storage/SerializerManagerTest.java b/src/test/java/io/mycat/memory/unsafe/storage/SerializerManagerTest.java new file mode 100644 index 000000000..00f5f5ff2 --- /dev/null +++ b/src/test/java/io/mycat/memory/unsafe/storage/SerializerManagerTest.java @@ -0,0 +1,36 @@ +package io.mycat.memory.unsafe.storage; + +import org.junit.Assert; +import org.junit.Test; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; + +/** + * Created by zagnix on 2016/6/4. + */ +public class SerializerManagerTest { + @Test + public void testNewSerializerManager() throws IOException { + SerializerManager serializerManager = new SerializerManager(); + final int[] value = new int[1]; + OutputStream s = serializerManager.wrapForCompression(null, new OutputStream() { + @Override + public void write(int b) throws IOException { + value[0] = b; + } + }); + + s.write(10); + Assert.assertEquals(10,value[0]); + + InputStream in = serializerManager.wrapForCompression(null, new InputStream() { + @Override + public int read() throws IOException { + return 10; + } + }); + Assert.assertEquals(10,in.read()); + } +} diff --git a/src/test/java/io/mycat/memory/unsafe/types/CalendarIntervalSuite.java b/src/test/java/io/mycat/memory/unsafe/types/CalendarIntervalSuite.java new file mode 100644 index 000000000..14d26fb9e --- /dev/null +++ b/src/test/java/io/mycat/memory/unsafe/types/CalendarIntervalSuite.java @@ -0,0 +1,239 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one or more +* contributor license agreements. See the NOTICE file distributed with +* this work for additional information regarding copyright ownership. +* The ASF licenses this file to You under the Apache License, Version 2.0 +* (the "License"); you may not use this file except in compliance with +* the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package io.mycat.memory.unsafe.types; + +import org.junit.Test; + +import static org.junit.Assert.*; + +public class CalendarIntervalSuite { + + @Test + public void equalsTest() { + CalendarInterval i1 = new CalendarInterval(3, 123); + CalendarInterval i2 = new CalendarInterval(3, 321); + CalendarInterval i3 = new CalendarInterval(1, 123); + CalendarInterval i4 = new CalendarInterval(3, 123); + + assertNotSame(i1, i2); + assertNotSame(i1, i3); + assertNotSame(i2, i3); + assertEquals(i1, i4); + } + + @Test + public void toStringTest() { + CalendarInterval i; + + i = new CalendarInterval(34, 0); + assertEquals("interval 2 years 10 months", i.toString()); + + i = new CalendarInterval(-34, 0); + assertEquals("interval -2 years -10 months", i.toString()); + + i = new CalendarInterval(0, 3 * CalendarInterval.MICROS_PER_WEEK + 13 * CalendarInterval.MICROS_PER_HOUR + 123); + assertEquals("interval 3 weeks 13 hours 123 microseconds", i.toString()); + + i = new CalendarInterval(0, -3 * CalendarInterval.MICROS_PER_WEEK - 13 * CalendarInterval.MICROS_PER_HOUR - 123); + assertEquals("interval -3 weeks -13 hours -123 microseconds", i.toString()); + + i = new CalendarInterval(34, 3 * CalendarInterval.MICROS_PER_WEEK + 13 * CalendarInterval.MICROS_PER_HOUR + 123); + assertEquals("interval 2 years 10 months 3 weeks 13 hours 123 microseconds", i.toString()); + } + + @Test + public void fromStringTest() { + testSingleUnit("year", 3, 36, 0); + testSingleUnit("month", 3, 3, 0); + testSingleUnit("week", 3, 0, 3 * CalendarInterval.MICROS_PER_WEEK); + testSingleUnit("day", 3, 0, 3 * CalendarInterval.MICROS_PER_DAY); + testSingleUnit("hour", 3, 0, 3 * CalendarInterval.MICROS_PER_HOUR); + testSingleUnit("minute", 3, 0, 3 *CalendarInterval. MICROS_PER_MINUTE); + testSingleUnit("second", 3, 0, 3 * CalendarInterval.MICROS_PER_SECOND); + testSingleUnit("millisecond", 3, 0, 3 *CalendarInterval. MICROS_PER_MILLI); + testSingleUnit("microsecond", 3, 0, 3); + + String input; + + input = "interval -5 years 23 month"; + CalendarInterval result = new CalendarInterval(-5 * 12 + 23, 0); + assertEquals(CalendarInterval.fromString(input), result); + + input = "interval -5 years 23 month "; + assertEquals(CalendarInterval.fromString(input), result); + + input = " interval -5 years 23 month "; + assertEquals(CalendarInterval.fromString(input), result); + + // Error cases + input = "interval 3month 1 hour"; + assertNull(CalendarInterval.fromString(input)); + + input = "interval 3 moth 1 hour"; + assertNull(CalendarInterval.fromString(input)); + + input = "interval"; + assertNull(CalendarInterval.fromString(input)); + + input = "int"; + assertNull(CalendarInterval.fromString(input)); + + input = ""; + assertNull(CalendarInterval.fromString(input)); + + input = null; + assertNull(CalendarInterval.fromString(input)); + } + + @Test + public void fromYearMonthStringTest() { + String input; + CalendarInterval i; + + input = "99-10"; + i = new CalendarInterval(99 * 12 + 10, 0L); + assertEquals(CalendarInterval.fromYearMonthString(input), i); + + input = "-8-10"; + i = new CalendarInterval(-8 * 12 - 10, 0L); + assertEquals(CalendarInterval.fromYearMonthString(input), i); + + try { + input = "99-15"; + CalendarInterval.fromYearMonthString(input); + fail("Expected to throw an exception for the invalid input"); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("month 15 outside range")); + } + } + + @Test + public void fromDayTimeStringTest() { + String input; + CalendarInterval i; + + input = "5 12:40:30.999999999"; + i = new CalendarInterval(0, 5 * CalendarInterval.MICROS_PER_DAY + 12 * CalendarInterval.MICROS_PER_HOUR + + 40 *CalendarInterval. MICROS_PER_MINUTE + 30 *CalendarInterval. MICROS_PER_SECOND + 999999L); + assertEquals(CalendarInterval.fromDayTimeString(input), i); + + input = "10 0:12:0.888"; + i = new CalendarInterval(0, 10 * CalendarInterval.MICROS_PER_DAY + 12 * CalendarInterval.MICROS_PER_MINUTE); + assertEquals(CalendarInterval.fromDayTimeString(input), i); + + input = "-3 0:0:0"; + i = new CalendarInterval(0, -3 * CalendarInterval.MICROS_PER_DAY); + assertEquals(CalendarInterval.fromDayTimeString(input), i); + + try { + input = "5 30:12:20"; + CalendarInterval.fromDayTimeString(input); + fail("Expected to throw an exception for the invalid input"); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("hour 30 outside range")); + } + + try { + input = "5 30-12"; + CalendarInterval.fromDayTimeString(input); + fail("Expected to throw an exception for the invalid input"); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("not match day-time format")); + } + } + + @Test + public void fromSingleUnitStringTest() { + String input; + CalendarInterval i; + + input = "12"; + i = new CalendarInterval(12 * 12, 0L); + assertEquals(CalendarInterval.fromSingleUnitString("year", input), i); + + input = "100"; + i = new CalendarInterval(0, 100 * CalendarInterval.MICROS_PER_DAY); + assertEquals(CalendarInterval.fromSingleUnitString("day", input), i); + + input = "1999.38888"; + i = new CalendarInterval(0, 1999 *CalendarInterval. MICROS_PER_SECOND + 38); + assertEquals(CalendarInterval.fromSingleUnitString("second", input), i); + + try { + input = String.valueOf(Integer.MAX_VALUE); + CalendarInterval.fromSingleUnitString("year", input); + fail("Expected to throw an exception for the invalid input"); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("outside range")); + } + + try { + input = String.valueOf(Long.MAX_VALUE / CalendarInterval.MICROS_PER_HOUR + 1); + CalendarInterval.fromSingleUnitString("hour", input); + fail("Expected to throw an exception for the invalid input"); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("outside range")); + } + } + + @Test + public void addTest() { + String input = "interval 3 month 1 hour"; + String input2 = "interval 2 month 100 hour"; + + CalendarInterval interval = CalendarInterval.fromString(input); + CalendarInterval interval2 = CalendarInterval.fromString(input2); + + assertEquals(interval.add(interval2), new CalendarInterval(5, 101 * CalendarInterval.MICROS_PER_HOUR)); + + input = "interval -10 month -81 hour"; + input2 = "interval 75 month 200 hour"; + + interval = CalendarInterval.fromString(input); + interval2 = CalendarInterval.fromString(input2); + + assertEquals(interval.add(interval2), new CalendarInterval(65, 119 * CalendarInterval.MICROS_PER_HOUR)); + } + + @Test + public void subtractTest() { + String input = "interval 3 month 1 hour"; + String input2 = "interval 2 month 100 hour"; + + CalendarInterval interval = CalendarInterval.fromString(input); + CalendarInterval interval2 = CalendarInterval.fromString(input2); + + assertEquals(interval.subtract(interval2), new CalendarInterval(1, -99 * CalendarInterval.MICROS_PER_HOUR)); + + input = "interval -10 month -81 hour"; + input2 = "interval 75 month 200 hour"; + + interval = CalendarInterval.fromString(input); + interval2 = CalendarInterval.fromString(input2); + + assertEquals(interval.subtract(interval2), new CalendarInterval(-85, -281 * CalendarInterval.MICROS_PER_HOUR)); + } + + private static void testSingleUnit(String unit, int number, int months, long microseconds) { + String input1 = "interval " + number + " " + unit; + String input2 = "interval " + number + " " + unit + "s"; + CalendarInterval result = new CalendarInterval(months, microseconds); + assertEquals(CalendarInterval.fromString(input1), result); + assertEquals(CalendarInterval.fromString(input2), result); + } +} diff --git a/src/test/java/io/mycat/memory/unsafe/types/UTF8StringSuite.java b/src/test/java/io/mycat/memory/unsafe/types/UTF8StringSuite.java new file mode 100644 index 000000000..038ac1a09 --- /dev/null +++ b/src/test/java/io/mycat/memory/unsafe/types/UTF8StringSuite.java @@ -0,0 +1,492 @@ +package io.mycat.memory.unsafe.types; + +/* +* Licensed to the Apache Software Foundation (ASF) under one or more +* contributor license agreements. See the NOTICE file distributed with +* this work for additional information regarding copyright ownership. +* The ASF licenses this file to You under the Apache License, Version 2.0 +* (the "License"); you may not use this file except in compliance with +* the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + + +import com.google.common.collect.ImmutableMap; +import org.junit.Test; + +import java.io.UnsupportedEncodingException; +import java.util.Arrays; +import java.util.HashMap; + +import static org.junit.Assert.*; + + +public class UTF8StringSuite { + + private static void checkBasic(String str, int len) throws UnsupportedEncodingException { + UTF8String s1 = UTF8String.fromString(str); + UTF8String s2 = UTF8String.fromBytes(str.getBytes("utf8")); + assertEquals(s1.numChars(), len); + assertEquals(s2.numChars(), len); + + assertEquals(s1.toString(), str); + assertEquals(s2.toString(), str); + assertEquals(s1, s2); + + assertEquals(s1.hashCode(), s2.hashCode()); + + assertEquals(0, s1.compareTo(s2)); + + assertTrue(s1.contains(s2)); + assertTrue(s2.contains(s1)); + assertTrue(s1.startsWith(s1)); + assertTrue(s1.endsWith(s1)); + } + + @Test + public void basicTest() throws UnsupportedEncodingException { + checkBasic("", 0); + checkBasic("hello", 5); + checkBasic("大 千 世 界", 7); + } + + @Test + public void emptyStringTest() { + assertEquals(UTF8String.EMPTY_UTF8, UTF8String.fromString("")); + assertEquals(UTF8String.EMPTY_UTF8, UTF8String.fromBytes(new byte[0])); + assertEquals(0, UTF8String.EMPTY_UTF8.numChars()); + assertEquals(0, UTF8String.EMPTY_UTF8.numBytes()); + } + + @Test + public void prefix() { + assertTrue(UTF8String.fromString("a").getPrefix() - UTF8String.fromString("b").getPrefix() < 0); + assertTrue(UTF8String.fromString("ab").getPrefix() - UTF8String.fromString("b").getPrefix() < 0); + assertTrue( + UTF8String.fromString("abbbbbbbbbbbasdf").getPrefix() - UTF8String.fromString("bbbbbbbbbbbbasdf").getPrefix() < 0); + assertTrue(UTF8String.fromString("").getPrefix() - UTF8String.fromString("a").getPrefix() < 0); + assertTrue(UTF8String.fromString("你好").getPrefix() - UTF8String.fromString("世界").getPrefix() > 0); + + byte[] buf1 = {1, 2, 3, 4, 5, 6, 7, 8, 9}; + byte[] buf2 = {1, 2, 3}; + UTF8String str1 = UTF8String.fromBytes(buf1, 0, 3); + UTF8String str2 = UTF8String.fromBytes(buf1, 0, 8); + UTF8String str3 = UTF8String.fromBytes(buf2); + assertTrue(str1.getPrefix() - str2.getPrefix() < 0); + assertEquals(str1.getPrefix(), str3.getPrefix()); + } + + @Test + public void compareTo() { + assertTrue(UTF8String.fromString("").compareTo(UTF8String.fromString("a")) < 0); + assertTrue(UTF8String.fromString("abc").compareTo(UTF8String.fromString("ABC")) > 0); + assertTrue(UTF8String.fromString("abc0").compareTo(UTF8String.fromString("abc")) > 0); + assertTrue(UTF8String.fromString("abcabcabc").compareTo(UTF8String.fromString("abcabcabc")) == 0); + assertTrue(UTF8String.fromString("aBcabcabc").compareTo(UTF8String.fromString("Abcabcabc")) > 0); + assertTrue(UTF8String.fromString("Abcabcabc").compareTo(UTF8String.fromString("abcabcabC")) < 0); + assertTrue(UTF8String.fromString("abcabcabc").compareTo(UTF8String.fromString("abcabcabC")) > 0); + + assertTrue(UTF8String.fromString("abc").compareTo(UTF8String.fromString("世界")) < 0); + assertTrue(UTF8String.fromString("你好").compareTo(UTF8String.fromString("世界")) > 0); + assertTrue(UTF8String.fromString("你好123").compareTo(UTF8String.fromString("你好122")) > 0); + } + + protected static void testUpperandLower(String upper, String lower) { + UTF8String us = UTF8String.fromString(upper); + UTF8String ls = UTF8String.fromString(lower); + assertEquals(ls, us.toLowerCase()); + assertEquals(us, ls.toUpperCase()); + assertEquals(us, us.toUpperCase()); + assertEquals(ls, ls.toLowerCase()); + } + + @Test + public void upperAndLower() { + testUpperandLower("", ""); + testUpperandLower("0123456", "0123456"); + testUpperandLower("ABCXYZ", "abcxyz"); + testUpperandLower("ЀЁЂѺΏỀ", "ѐёђѻώề"); + testUpperandLower("大千世界 数据砖头", "大千世界 数据砖头"); + } + + @Test + public void titleCase() { + assertEquals(UTF8String.fromString(""), UTF8String.fromString("").toTitleCase()); + assertEquals(UTF8String.fromString("Ab Bc Cd"), UTF8String.fromString("ab bc cd").toTitleCase()); + assertEquals(UTF8String.fromString("Ѐ Ё Ђ Ѻ Ώ Ề"), UTF8String.fromString("ѐ ё ђ ѻ ώ ề").toTitleCase()); + assertEquals(UTF8String.fromString("大千世界 数据砖头"), UTF8String.fromString("大千世界 数据砖头").toTitleCase()); + } + + @Test + public void concatTest() { + assertEquals(UTF8String.EMPTY_UTF8, UTF8String.concat()); + assertNull(UTF8String.concat((UTF8String) null)); + assertEquals(UTF8String.EMPTY_UTF8, UTF8String.concat(UTF8String.EMPTY_UTF8)); + assertEquals(UTF8String.fromString("ab"), UTF8String.concat(UTF8String.fromString("ab"))); + assertEquals(UTF8String.fromString("ab"), UTF8String.concat(UTF8String.fromString("a"), UTF8String.fromString("b"))); + assertEquals(UTF8String.fromString("abc"), UTF8String.concat(UTF8String.fromString("a"), UTF8String.fromString("b"), UTF8String.fromString("c"))); + assertNull(UTF8String.concat(UTF8String.fromString("a"), null, UTF8String.fromString("c"))); + assertNull(UTF8String.concat(UTF8String.fromString("a"), null, null)); + assertNull(UTF8String.concat(null, null, null)); + assertEquals(UTF8String.fromString("数据砖头"), UTF8String.concat(UTF8String.fromString("数据"), UTF8String.fromString("砖头"))); + } + + @Test + public void concatWsTest() { + // Returns null if the separator is null + assertNull(UTF8String.concatWs(null, (UTF8String) null)); + assertNull(UTF8String.concatWs(null, UTF8String.fromString("a"))); + + // If separator is null, concatWs should skip all null inputs and never return null. + UTF8String sep = UTF8String.fromString("哈哈"); + assertEquals( + UTF8String.EMPTY_UTF8, + UTF8String.concatWs(sep, UTF8String.EMPTY_UTF8)); + assertEquals( + UTF8String.fromString("ab"), + UTF8String.concatWs(sep, UTF8String.fromString("ab"))); + assertEquals( + UTF8String.fromString("a哈哈b"), + UTF8String.concatWs(sep, UTF8String.fromString("a"), UTF8String.fromString("b"))); + assertEquals( + UTF8String.fromString("a哈哈b哈哈c"), + UTF8String.concatWs(sep, UTF8String.fromString("a"), UTF8String.fromString("b"), UTF8String.fromString("c"))); + assertEquals( + UTF8String.fromString("a哈哈c"), + UTF8String.concatWs(sep, UTF8String.fromString("a"), null, UTF8String.fromString("c"))); + assertEquals( + UTF8String.fromString("a"), + UTF8String.concatWs(sep, UTF8String.fromString("a"), null, null)); + assertEquals( + UTF8String.EMPTY_UTF8, + UTF8String.concatWs(sep, null, null, null)); + assertEquals( + UTF8String.fromString("数据哈哈砖头"), + UTF8String.concatWs(sep, UTF8String.fromString("数据"), UTF8String.fromString("砖头"))); + } + + @Test + public void contains() { + assertTrue(UTF8String.EMPTY_UTF8.contains(UTF8String.EMPTY_UTF8)); + assertTrue(UTF8String.fromString("hello").contains(UTF8String.fromString("ello"))); + assertFalse(UTF8String.fromString("hello").contains(UTF8String.fromString("vello"))); + assertFalse(UTF8String.fromString("hello").contains(UTF8String.fromString("hellooo"))); + assertTrue(UTF8String.fromString("大千世界").contains(UTF8String.fromString("千世界"))); + assertFalse(UTF8String.fromString("大千世界").contains(UTF8String.fromString("世千"))); + assertFalse(UTF8String.fromString("大千世界").contains(UTF8String.fromString("大千世界好"))); + } + + @Test + public void startsWith() { + assertTrue(UTF8String.EMPTY_UTF8.startsWith(UTF8String.EMPTY_UTF8)); + assertTrue(UTF8String.fromString("hello").startsWith(UTF8String.fromString("hell"))); + assertFalse(UTF8String.fromString("hello").startsWith(UTF8String.fromString("ell"))); + assertFalse(UTF8String.fromString("hello").startsWith(UTF8String.fromString("hellooo"))); + assertTrue(UTF8String.fromString("数据砖头").startsWith(UTF8String.fromString("数据"))); + assertFalse(UTF8String.fromString("大千世界").startsWith(UTF8String.fromString("千"))); + assertFalse(UTF8String.fromString("大千世界").startsWith(UTF8String.fromString("大千世界好"))); + } + + @Test + public void endsWith() { + assertTrue(UTF8String.EMPTY_UTF8.endsWith(UTF8String.EMPTY_UTF8)); + assertTrue(UTF8String.fromString("hello").endsWith(UTF8String.fromString("ello"))); + assertFalse(UTF8String.fromString("hello").endsWith(UTF8String.fromString("ellov"))); + assertFalse(UTF8String.fromString("hello").endsWith(UTF8String.fromString("hhhello"))); + assertTrue(UTF8String.fromString("大千世界").endsWith(UTF8String.fromString("世界"))); + assertFalse(UTF8String.fromString("大千世界").endsWith(UTF8String.fromString("世"))); + assertFalse(UTF8String.fromString("数据砖头").endsWith(UTF8String.fromString("我的数据砖头"))); + } + + @Test + public void substring() { + assertEquals(UTF8String.EMPTY_UTF8, UTF8String.fromString("hello").substring(0, 0)); + assertEquals(UTF8String.fromString("el"), UTF8String.fromString("hello").substring(1, 3)); + assertEquals(UTF8String.fromString("数"), UTF8String.fromString("数据砖头").substring(0, 1)); + assertEquals(UTF8String.fromString("据砖"), UTF8String.fromString("数据砖头").substring(1, 3)); + assertEquals(UTF8String.fromString("头"), UTF8String.fromString("数据砖头").substring(3, 5)); + assertEquals(UTF8String.fromString("ߵ梷"), UTF8String.fromString("ߵ梷").substring(0, 2)); + } + + @Test + public void trims() { + assertEquals(UTF8String.fromString("hello"), UTF8String.fromString(" hello ").trim()); + assertEquals(UTF8String.fromString("hello "), UTF8String.fromString(" hello ").trimLeft()); + assertEquals(UTF8String.fromString(" hello"), UTF8String.fromString(" hello ").trimRight()); + + assertEquals(UTF8String.EMPTY_UTF8, UTF8String.fromString(" ").trim()); + assertEquals(UTF8String.EMPTY_UTF8, UTF8String.fromString(" ").trimLeft()); + assertEquals(UTF8String.EMPTY_UTF8, UTF8String.fromString(" ").trimRight()); + + assertEquals(UTF8String.fromString("数据砖头"), UTF8String.fromString(" 数据砖头 ").trim()); + assertEquals(UTF8String.fromString("数据砖头 "), UTF8String.fromString(" 数据砖头 ").trimLeft()); + assertEquals(UTF8String.fromString(" 数据砖头"), UTF8String.fromString(" 数据砖头 ").trimRight()); + + assertEquals(UTF8String.fromString("数据砖头"), UTF8String.fromString("数据砖头").trim()); + assertEquals(UTF8String.fromString("数据砖头"), UTF8String.fromString("数据砖头").trimLeft()); + assertEquals(UTF8String.fromString("数据砖头"), UTF8String.fromString("数据砖头").trimRight()); + } + + @Test + public void indexOf() { + assertEquals(0, UTF8String.EMPTY_UTF8.indexOf(UTF8String.EMPTY_UTF8, 0)); + assertEquals(-1, UTF8String.EMPTY_UTF8.indexOf(UTF8String.fromString("l"), 0)); + assertEquals(0, UTF8String.fromString("hello").indexOf(UTF8String.EMPTY_UTF8, 0)); + assertEquals(2, UTF8String.fromString("hello").indexOf(UTF8String.fromString("l"), 0)); + assertEquals(3, UTF8String.fromString("hello").indexOf(UTF8String.fromString("l"), 3)); + assertEquals(-1, UTF8String.fromString("hello").indexOf(UTF8String.fromString("a"), 0)); + assertEquals(2, UTF8String.fromString("hello").indexOf(UTF8String.fromString("ll"), 0)); + assertEquals(-1, UTF8String.fromString("hello").indexOf(UTF8String.fromString("ll"), 4)); + assertEquals(1, UTF8String.fromString("数据砖头").indexOf(UTF8String.fromString("据砖"), 0)); + assertEquals(-1, UTF8String.fromString("数据砖头").indexOf(UTF8String.fromString("数"), 3)); + assertEquals(0, UTF8String.fromString("数据砖头").indexOf(UTF8String.fromString("数"), 0)); + assertEquals(3, UTF8String.fromString("数据砖头").indexOf(UTF8String.fromString("头"), 0)); + } + + @Test + public void substring_index() { + assertEquals(UTF8String.fromString("www.apache.org"), + UTF8String.fromString("www.apache.org").subStringIndex(UTF8String.fromString("."), 3)); + assertEquals(UTF8String.fromString("www.apache"), + UTF8String.fromString("www.apache.org").subStringIndex(UTF8String.fromString("."), 2)); + assertEquals(UTF8String.fromString("www"), + UTF8String.fromString("www.apache.org").subStringIndex(UTF8String.fromString("."), 1)); + assertEquals(UTF8String.fromString(""), + UTF8String.fromString("www.apache.org").subStringIndex(UTF8String.fromString("."), 0)); + assertEquals(UTF8String.fromString("org"), + UTF8String.fromString("www.apache.org").subStringIndex(UTF8String.fromString("."), -1)); + assertEquals(UTF8String.fromString("apache.org"), + UTF8String.fromString("www.apache.org").subStringIndex(UTF8String.fromString("."), -2)); + assertEquals(UTF8String.fromString("www.apache.org"), + UTF8String.fromString("www.apache.org").subStringIndex(UTF8String.fromString("."), -3)); + // str is empty string + assertEquals(UTF8String.fromString(""), + UTF8String.fromString("").subStringIndex(UTF8String.fromString("."), 1)); + // empty string delim + assertEquals(UTF8String.fromString(""), + UTF8String.fromString("www.apache.org").subStringIndex(UTF8String.fromString(""), 1)); + // delim does not exist in str + assertEquals(UTF8String.fromString("www.apache.org"), + UTF8String.fromString("www.apache.org").subStringIndex(UTF8String.fromString("#"), 2)); + // delim is 2 chars + assertEquals(UTF8String.fromString("www||apache"), + UTF8String.fromString("www||apache||org").subStringIndex(UTF8String.fromString("||"), 2)); + assertEquals(UTF8String.fromString("apache||org"), + UTF8String.fromString("www||apache||org").subStringIndex(UTF8String.fromString("||"), -2)); + // non ascii chars + assertEquals(UTF8String.fromString("大千世界大"), + UTF8String.fromString("大千世界大千世界").subStringIndex(UTF8String.fromString("千"), 2)); + // overlapped delim + assertEquals(UTF8String.fromString("||"), UTF8String.fromString("||||||").subStringIndex(UTF8String.fromString("|||"), 3)); + assertEquals(UTF8String.fromString("|||"), UTF8String.fromString("||||||").subStringIndex(UTF8String.fromString("|||"), -4)); + } + + @Test + public void reverse() { + assertEquals(UTF8String.fromString("olleh"), UTF8String.fromString("hello").reverse()); + assertEquals(UTF8String.EMPTY_UTF8, UTF8String.EMPTY_UTF8.reverse()); + assertEquals(UTF8String.fromString("者行孙"), UTF8String.fromString("孙行者").reverse()); + assertEquals(UTF8String.fromString("者行孙 olleh"), UTF8String.fromString("hello 孙行者").reverse()); + } + + @Test + public void repeat() { + assertEquals(UTF8String.fromString("数d数d数d数d数d"), UTF8String.fromString("数d").repeat(5)); + assertEquals(UTF8String.fromString("数d"), UTF8String.fromString("数d").repeat(1)); + assertEquals(UTF8String.EMPTY_UTF8, UTF8String.fromString("数d").repeat(-1)); + } + + @Test + public void pad() { + assertEquals(UTF8String.fromString("hel"), UTF8String.fromString("hello").lpad(3, UTF8String.fromString("????"))); + assertEquals(UTF8String.fromString("hello"), UTF8String.fromString("hello").lpad(5, UTF8String.fromString("????"))); + assertEquals(UTF8String.fromString("?hello"), UTF8String.fromString("hello").lpad(6, UTF8String.fromString("????"))); + assertEquals(UTF8String.fromString("???????hello"), UTF8String.fromString("hello").lpad(12, UTF8String.fromString("????"))); + assertEquals(UTF8String.fromString("?????hello"), UTF8String.fromString("hello").lpad(10, UTF8String.fromString("?????"))); + assertEquals(UTF8String.fromString("???????"), UTF8String.EMPTY_UTF8.lpad(7, UTF8String.fromString("?????"))); + + assertEquals(UTF8String.fromString("hel"), UTF8String.fromString("hello").rpad(3, UTF8String.fromString("????"))); + assertEquals(UTF8String.fromString("hello"), UTF8String.fromString("hello").rpad(5, UTF8String.fromString("????"))); + assertEquals(UTF8String.fromString("hello?"), UTF8String.fromString("hello").rpad(6, UTF8String.fromString("????"))); + assertEquals(UTF8String.fromString("hello???????"), UTF8String.fromString("hello").rpad(12, UTF8String.fromString("????"))); + assertEquals(UTF8String.fromString("hello?????"), UTF8String.fromString("hello").rpad(10, UTF8String.fromString("?????"))); + assertEquals(UTF8String.fromString("???????"), UTF8String.EMPTY_UTF8.rpad(7, UTF8String.fromString("?????"))); + + assertEquals(UTF8String.fromString("数据砖"), UTF8String.fromString("数据砖头").lpad(3, UTF8String.fromString("????"))); + assertEquals(UTF8String.fromString("?数据砖头"), UTF8String.fromString("数据砖头").lpad(5, UTF8String.fromString("????"))); + assertEquals(UTF8String.fromString("??数据砖头"), UTF8String.fromString("数据砖头").lpad(6, UTF8String.fromString("????"))); + assertEquals(UTF8String.fromString("孙行数据砖头"), UTF8String.fromString("数据砖头").lpad(6, UTF8String.fromString("孙行者"))); + assertEquals(UTF8String.fromString("孙行者数据砖头"), UTF8String.fromString("数据砖头").lpad(7, UTF8String.fromString("孙行者"))); + assertEquals( + UTF8String.fromString("孙行者孙行者孙行数据砖头"), + UTF8String.fromString("数据砖头").lpad(12, UTF8String.fromString("孙行者"))); + + assertEquals(UTF8String.fromString("数据砖"), UTF8String.fromString("数据砖头").rpad(3, UTF8String.fromString("????"))); + assertEquals(UTF8String.fromString("数据砖头?"), UTF8String.fromString("数据砖头").rpad(5, UTF8String.fromString("????"))); + assertEquals(UTF8String.fromString("数据砖头??"), UTF8String.fromString("数据砖头").rpad(6, UTF8String.fromString("????"))); + assertEquals(UTF8String.fromString("数据砖头孙行"), UTF8String.fromString("数据砖头").rpad(6, UTF8String.fromString("孙行者"))); + assertEquals(UTF8String.fromString("数据砖头孙行者"), UTF8String.fromString("数据砖头").rpad(7, UTF8String.fromString("孙行者"))); + assertEquals( + UTF8String.fromString("数据砖头孙行者孙行者孙行"), + UTF8String.fromString("数据砖头").rpad(12, UTF8String.fromString("孙行者"))); + + assertEquals(UTF8String.EMPTY_UTF8, UTF8String.fromString("数据砖头").lpad(-10, UTF8String.fromString("孙行者"))); + assertEquals(UTF8String.EMPTY_UTF8, UTF8String.fromString("数据砖头").lpad(-10, UTF8String.EMPTY_UTF8)); + assertEquals(UTF8String.fromString("数据砖头"), UTF8String.fromString("数据砖头").lpad(5, UTF8String.EMPTY_UTF8)); + assertEquals(UTF8String.fromString("数据砖"), UTF8String.fromString("数据砖头").lpad(3, UTF8String.EMPTY_UTF8)); + assertEquals(UTF8String.EMPTY_UTF8, UTF8String.EMPTY_UTF8.lpad(3, UTF8String.EMPTY_UTF8)); + + assertEquals(UTF8String.EMPTY_UTF8, UTF8String.fromString("数据砖头").rpad(-10, UTF8String.fromString("孙行者"))); + assertEquals(UTF8String.EMPTY_UTF8, UTF8String.fromString("数据砖头").rpad(-10, UTF8String.EMPTY_UTF8)); + assertEquals(UTF8String.fromString("数据砖头"), UTF8String.fromString("数据砖头").rpad(5, UTF8String.EMPTY_UTF8)); + assertEquals(UTF8String.fromString("数据砖"), UTF8String.fromString("数据砖头").rpad(3, UTF8String.EMPTY_UTF8)); + assertEquals(UTF8String.EMPTY_UTF8, UTF8String.EMPTY_UTF8.rpad(3, UTF8String.EMPTY_UTF8)); + } + + @Test + public void substringSQL() { + UTF8String e = UTF8String.fromString("example"); + assertEquals(e.substringSQL(0, 2), UTF8String.fromString("ex")); + assertEquals(e.substringSQL(1, 2), UTF8String.fromString("ex")); + assertEquals(e.substringSQL(0, 7), UTF8String.fromString("example")); + assertEquals(e.substringSQL(1, 2), UTF8String.fromString("ex")); + assertEquals(e.substringSQL(0, 100), UTF8String.fromString("example")); + assertEquals(e.substringSQL(1, 100), UTF8String.fromString("example")); + assertEquals(e.substringSQL(2, 2), UTF8String.fromString("xa")); + assertEquals(e.substringSQL(1, 6), UTF8String.fromString("exampl")); + assertEquals(e.substringSQL(2, 100), UTF8String.fromString("xample")); + assertEquals(e.substringSQL(0, 0), UTF8String.fromString("")); + assertEquals(e.substringSQL(100, 4), UTF8String.EMPTY_UTF8); + assertEquals(e.substringSQL(0, Integer.MAX_VALUE), UTF8String.fromString("example")); + assertEquals(e.substringSQL(1, Integer.MAX_VALUE), UTF8String.fromString("example")); + assertEquals(e.substringSQL(2, Integer.MAX_VALUE), UTF8String.fromString("xample")); + } + + @Test + public void split() { + assertTrue(Arrays.equals(UTF8String.fromString("ab,def,ghi").split(UTF8String.fromString(","), -1), + new UTF8String[]{UTF8String.fromString("ab"), UTF8String.fromString("def"), UTF8String.fromString("ghi")})); + assertTrue(Arrays.equals(UTF8String.fromString("ab,def,ghi").split(UTF8String.fromString(","), 2), + new UTF8String[]{UTF8String.fromString("ab"), UTF8String.fromString("def,ghi")})); + assertTrue(Arrays.equals(UTF8String.fromString("ab,def,ghi").split(UTF8String.fromString(","), 2), + new UTF8String[]{UTF8String.fromString("ab"), UTF8String.fromString("def,ghi")})); + } + + @Test + public void levenshteinDistance() { + assertEquals(0, UTF8String.EMPTY_UTF8.levenshteinDistance(UTF8String.EMPTY_UTF8)); + assertEquals(1, UTF8String.EMPTY_UTF8.levenshteinDistance(UTF8String.fromString("a"))); + assertEquals(7, UTF8String.fromString("aaapppp").levenshteinDistance(UTF8String.EMPTY_UTF8)); + assertEquals(1, UTF8String.fromString("frog").levenshteinDistance(UTF8String.fromString("fog"))); + assertEquals(3, UTF8String.fromString("fly").levenshteinDistance(UTF8String.fromString("ant"))); + assertEquals(7, UTF8String.fromString("elephant").levenshteinDistance(UTF8String.fromString("hippo"))); + assertEquals(7, UTF8String.fromString("hippo").levenshteinDistance(UTF8String.fromString("elephant"))); + assertEquals(8, UTF8String.fromString("hippo").levenshteinDistance(UTF8String.fromString("zzzzzzzz"))); + assertEquals(1, UTF8String.fromString("hello").levenshteinDistance(UTF8String.fromString("hallo"))); + assertEquals(4, UTF8String.fromString("世界千世").levenshteinDistance(UTF8String.fromString("千a世b"))); + } + + @Test + public void translate() { + assertEquals( + UTF8String.fromString("1a2s3ae"), + UTF8String.fromString("translate").translate(ImmutableMap.of( + 'r', '1', + 'n', '2', + 'l', '3', + 't', '\0' + ))); + assertEquals( + UTF8String.fromString("translate"), + UTF8String.fromString("translate").translate(new HashMap())); + assertEquals( + UTF8String.fromString("asae"), + UTF8String.fromString("translate").translate(ImmutableMap.of( + 'r', '\0', + 'n', '\0', + 'l', '\0', + 't', '\0' + ))); + assertEquals( + UTF8String.fromString("aa世b"), + UTF8String.fromString("花花世界").translate(ImmutableMap.of( + '花', 'a', + '界', 'b' + ))); + } + + @Test + public void createBlankString() { + assertEquals(UTF8String.fromString(" "), UTF8String.blankString(1)); + assertEquals(UTF8String.fromString(" "), UTF8String.blankString(2)); + assertEquals(UTF8String.fromString(" "), UTF8String.blankString(3)); + assertEquals(UTF8String.fromString(""), UTF8String.blankString(0)); + } + + @Test + public void findInSet() { + assertEquals(1, UTF8String.fromString("ab").findInSet(UTF8String.fromString("ab"))); + assertEquals(2, UTF8String.fromString("a,b").findInSet(UTF8String.fromString("b"))); + assertEquals(3, UTF8String.fromString("abc,b,ab,c,def").findInSet(UTF8String.fromString("ab"))); + assertEquals(1, UTF8String.fromString("ab,abc,b,ab,c,def").findInSet(UTF8String.fromString("ab"))); + assertEquals(4, UTF8String.fromString(",,,ab,abc,b,ab,c,def").findInSet(UTF8String.fromString("ab"))); + assertEquals(1, UTF8String.fromString(",ab,abc,b,ab,c,def").findInSet(UTF8String.fromString(""))); + assertEquals(4, UTF8String.fromString("数据砖头,abc,b,ab,c,def").findInSet(UTF8String.fromString("ab"))); + assertEquals(6, UTF8String.fromString("数据砖头,abc,b,ab,c,def").findInSet(UTF8String.fromString("def"))); + } + + @Test + public void soundex() { + assertEquals(UTF8String.fromString("Robert").soundex(), UTF8String.fromString("R163")); + assertEquals(UTF8String.fromString("Rupert").soundex(), UTF8String.fromString("R163")); + assertEquals(UTF8String.fromString("Rubin").soundex(), UTF8String.fromString("R150")); + assertEquals(UTF8String.fromString("Ashcraft").soundex(), UTF8String.fromString("A261")); + assertEquals(UTF8String.fromString("Ashcroft").soundex(), UTF8String.fromString("A261")); + assertEquals(UTF8String.fromString("Burroughs").soundex(), UTF8String.fromString("B620")); + assertEquals(UTF8String.fromString("Burrows").soundex(), UTF8String.fromString("B620")); + assertEquals(UTF8String.fromString("Ekzampul").soundex(), UTF8String.fromString("E251")); + assertEquals(UTF8String.fromString("Example").soundex(), UTF8String.fromString("E251")); + assertEquals(UTF8String.fromString("Ellery").soundex(), UTF8String.fromString("E460")); + assertEquals(UTF8String.fromString("Euler").soundex(), UTF8String.fromString("E460")); + assertEquals(UTF8String.fromString("Ghosh").soundex(), UTF8String.fromString("G200")); + assertEquals(UTF8String.fromString("Gauss").soundex(), UTF8String.fromString("G200")); + assertEquals(UTF8String.fromString("Gutierrez").soundex(), UTF8String.fromString("G362")); + assertEquals(UTF8String.fromString("Heilbronn").soundex(), UTF8String.fromString("H416")); + assertEquals(UTF8String.fromString("Hilbert").soundex(), UTF8String.fromString("H416")); + assertEquals(UTF8String.fromString("Jackson").soundex(), UTF8String.fromString("J250")); + assertEquals(UTF8String.fromString("Kant").soundex(), UTF8String.fromString("K530")); + assertEquals(UTF8String.fromString("Knuth").soundex(), UTF8String.fromString("K530")); + assertEquals(UTF8String.fromString("Lee").soundex(), UTF8String.fromString("L000")); + assertEquals(UTF8String.fromString("Lukasiewicz").soundex(), UTF8String.fromString("L222")); + assertEquals(UTF8String.fromString("Lissajous").soundex(), UTF8String.fromString("L222")); + assertEquals(UTF8String.fromString("Ladd").soundex(), UTF8String.fromString("L300")); + assertEquals(UTF8String.fromString("Lloyd").soundex(), UTF8String.fromString("L300")); + assertEquals(UTF8String.fromString("Moses").soundex(), UTF8String.fromString("M220")); + assertEquals(UTF8String.fromString("O'Hara").soundex(), UTF8String.fromString("O600")); + assertEquals(UTF8String.fromString("Pfister").soundex(), UTF8String.fromString("P236")); + assertEquals(UTF8String.fromString("Rubin").soundex(), UTF8String.fromString("R150")); + assertEquals(UTF8String.fromString("Robert").soundex(), UTF8String.fromString("R163")); + assertEquals(UTF8String.fromString("Rupert").soundex(), UTF8String.fromString("R163")); + assertEquals(UTF8String.fromString("Soundex").soundex(), UTF8String.fromString("S532")); + assertEquals(UTF8String.fromString("Sownteks").soundex(), UTF8String.fromString("S532")); + assertEquals(UTF8String.fromString("Tymczak").soundex(), UTF8String.fromString("T522")); + assertEquals(UTF8String.fromString("VanDeusen").soundex(), UTF8String.fromString("V532")); + assertEquals(UTF8String.fromString("Washington").soundex(), UTF8String.fromString("W252")); + assertEquals(UTF8String.fromString("Wheaton").soundex(), UTF8String.fromString("W350")); + + assertEquals(UTF8String.fromString("a").soundex(), UTF8String.fromString("A000")); + assertEquals(UTF8String.fromString("ab").soundex(), UTF8String.fromString("A100")); + assertEquals(UTF8String.fromString("abc").soundex(), UTF8String.fromString("A120")); + assertEquals(UTF8String.fromString("abcd").soundex(), UTF8String.fromString("A123")); + assertEquals(UTF8String.fromString("").soundex(), UTF8String.fromString("")); + assertEquals(UTF8String.fromString("123").soundex(), UTF8String.fromString("123")); + assertEquals(UTF8String.fromString("世界千世").soundex(), UTF8String.fromString("世界千世")); + } +} diff --git a/src/test/java/io/mycat/migrate/MigrateUtilsTest.java b/src/test/java/io/mycat/migrate/MigrateUtilsTest.java new file mode 100644 index 000000000..c58a2d9b8 --- /dev/null +++ b/src/test/java/io/mycat/migrate/MigrateUtilsTest.java @@ -0,0 +1,270 @@ +package io.mycat.migrate; + +import com.google.common.collect.Lists; +import io.mycat.migrate.MigrateTask; +import io.mycat.migrate.MigrateUtils; +import org.junit.Assert; +import org.junit.Test; + +import java.io.ByteArrayInputStream; +import java.util.*; + +import static io.mycat.route.function.PartitionByCRC32PreSlot.Range; + +/** + * Created by magicdoom on 2016/9/16. + */ +public class MigrateUtilsTest { + @Test + public void balanceExpand() + { String table="test"; + Map> integerListMap = new TreeMap<>(); + integerListMap.put(0,Lists.newArrayList(new Range(0,32))) ; + integerListMap.put(1,Lists.newArrayList(new Range(33,65))) ; + integerListMap.put(2,Lists.newArrayList(new Range(66,99))) ; + pringList("beforse balance :",integerListMap); + //dn1=0-32 dn2=33-65 dn3=66-99 + int totalSlots=100; + List oldDataNodes = Lists.newArrayList("dn1","dn2","dn3"); + List newDataNodes = Lists.newArrayList("dn4","dn5"); + Map> tasks= MigrateUtils + .balanceExpand(table, integerListMap, oldDataNodes, newDataNodes,totalSlots); + for (Map.Entry> stringListEntry : tasks.entrySet()) { + String key=stringListEntry.getKey(); + List rangeList=new ArrayList<>(); + List value=stringListEntry.getValue(); + for (MigrateTask task : value) { + rangeList.addAll(task.getSlots()); + } + Assert.assertEquals(true,value.size()==2); + if("dn4".equals(key)) { + Assert.assertEquals(0, rangeList.get(0).start); + Assert.assertEquals(12, rangeList.get(0).end); + Assert.assertEquals(33, rangeList.get(1).start); + Assert.assertEquals(39, rangeList.get(1).end); + } else if("dn5".equals(key)) { + Assert.assertEquals(40, rangeList.get(0).start); + Assert.assertEquals(45, rangeList.get(0).end); + Assert.assertEquals(66, rangeList.get(1).start); + Assert.assertEquals(79, rangeList.get(1).end); + } + integerListMap.put(Integer.parseInt(key.substring(2))-1,rangeList); + } + + pringList("after balance :",integerListMap); + + System.out.println("agin balance ....................."); + + + + + oldDataNodes = Lists.newArrayList("dn1","dn2","dn3","dn4","dn5"); + newDataNodes = Lists.newArrayList("dn6","dn7","dn8","dn9"); + Map> tasks1= MigrateUtils.balanceExpand(table, integerListMap, oldDataNodes, newDataNodes,totalSlots); + for (Map.Entry> stringListEntry : tasks1.entrySet()) { + String key=stringListEntry.getKey(); + List rangeList=new ArrayList<>(); + List value=stringListEntry.getValue(); + for (MigrateTask task : value) { + rangeList.addAll(task.getSlots()); + } + if("dn6".equals(key)) { + Assert.assertEquals(13, rangeList.get(0).start); + Assert.assertEquals(21, rangeList.get(0).end); + Assert.assertEquals(46, rangeList.get(1).start); + Assert.assertEquals(48, rangeList.get(1).end); + } else if("dn7".equals(key)) { + Assert.assertEquals(49, rangeList.get(0).start); + Assert.assertEquals(54, rangeList.get(0).end); + Assert.assertEquals(80, rangeList.get(1).start); + Assert.assertEquals(84, rangeList.get(1).end); + } else if("dn8".equals(key)) { + Assert.assertEquals(85, rangeList.get(0).start); + Assert.assertEquals(88, rangeList.get(0).end); + Assert.assertEquals(0, rangeList.get(1).start); + Assert.assertEquals(6, rangeList.get(1).end); + } else if("dn9".equals(key)) { + Assert.assertEquals(7, rangeList.get(0).start); + Assert.assertEquals(8, rangeList.get(0).end); + Assert.assertEquals(40, rangeList.get(1).start); + Assert.assertEquals(45, rangeList.get(1).end); + } + + integerListMap.put(Integer.parseInt(key.substring(2))-1,rangeList); + } + + pringList("agin balance :",integerListMap); + + + oldDataNodes = Lists.newArrayList("dn1","dn2","dn3","dn4","dn5","dn6","dn7","dn8","dn9"); + newDataNodes = Lists.newArrayList("dn10","dn11","dn12","dn13","dn14","dn15","dn16","dn17","dn18","dn19","dn20","dn21","dn22","dn23","dn24","dn25","dn26","dn27","dn28","dn29","dn30","dn31","dn32","dn33","dn34","dn35","dn36","dn37","dn38","dn39","dn40","dn41","dn42","dn43","dn44","dn45","dn46","dn47","dn48","dn49","dn50","dn51","dn52","dn53","dn54","dn55","dn56","dn57","dn58","dn59","dn60","dn61","dn62","dn63","dn64","dn65","dn66","dn67","dn68","dn69","dn70","dn71","dn72","dn73","dn74","dn75","dn76","dn77","dn78","dn79","dn80","dn81","dn82","dn83","dn84","dn85","dn86","dn87","dn88","dn89","dn90","dn91","dn92","dn93","dn94","dn95","dn96","dn97","dn98","dn99","dn100" + ); + Map> tasks2= MigrateUtils.balanceExpand(table, integerListMap, oldDataNodes, newDataNodes,totalSlots); + for (Map.Entry> stringListEntry : tasks2.entrySet()) { + String key=stringListEntry.getKey(); + List rangeList=new ArrayList<>(); + List value=stringListEntry.getValue(); + for (MigrateTask task : value) { + rangeList.addAll(task.getSlots()); + } + + if("dn10".equals(key)) { + Assert.assertEquals(22, rangeList.get(0).start); + Assert.assertEquals(22, rangeList.get(0).end); + } else if("dn100".equals(key)) { + Assert.assertEquals(67, rangeList.get(0).start); + Assert.assertEquals(67, rangeList.get(0).end); + } else if("dn50".equals(key)) { + Assert.assertEquals(69, rangeList.get(0).start); + Assert.assertEquals(69, rangeList.get(0).end); + } else if("dn99".equals(key)) { + Assert.assertEquals(66, rangeList.get(0).start); + Assert.assertEquals(66, rangeList.get(0).end); + + } + integerListMap.put(Integer.parseInt(key.substring(2))-1,rangeList); + } + + pringList("agin agin balance :",integerListMap); + + } + @Test + public void balanceExpand1() { + String table = "test1"; + //4=81920-102399 + // 3=61440-81919 + // 2=40960-61439 + // 1=20480-40959 + // 0=0-20479 + Map> integerListMap = new TreeMap<>(); + integerListMap.put(0, Lists.newArrayList(new Range(0, 20479))); + integerListMap.put(1, Lists.newArrayList(new Range(20480, 40959))); + integerListMap.put(2, Lists.newArrayList(new Range(40960, 61439))); + integerListMap.put(3, Lists.newArrayList(new Range(61440, 81919))); + integerListMap.put(4, Lists.newArrayList(new Range(81920, 102399))); + pringList("beforse balance :", integerListMap); + //dn1=0-32 dn2=33-65 dn3=66-99 + int totalSlots = 102400; + List oldDataNodes = Lists.newArrayList("dn1", "dn2", "dn3","dn4", "dn5"); + List newDataNodes = Lists.newArrayList("dn6", "dn7", "dn8","dn9", "dn10"); + Map> tasks = MigrateUtils + .balanceExpand(table, integerListMap, oldDataNodes, newDataNodes, totalSlots); + + List allTaskList=new ArrayList<>(); + + for (Map.Entry> stringListEntry : tasks.entrySet()) { + String key=stringListEntry.getKey(); + List rangeList=new ArrayList<>(); + List value=stringListEntry.getValue(); + allTaskList.addAll(value); + for (MigrateTask task : value) { + rangeList.addAll(task.getSlots()); + } + + + integerListMap.put(Integer.parseInt(key.substring(2))-1,rangeList); + } + pringList("after balance :", integerListMap); + + + List allNewDataNodes=new ArrayList<>(); + allNewDataNodes.addAll(oldDataNodes); + allNewDataNodes.addAll(newDataNodes); + Properties prop = new Properties(); + prop.put("0","0-20479"); + prop.put("1","20480-40959"); + prop.put("2","40960-61439"); + prop.put("3","61440-81919"); + prop.put("4","81920-102399"); + for (MigrateTask migrateTask : allTaskList) { + modifyRuleData(prop,migrateTask,allNewDataNodes); + } + + System.out.println(); + } + + private void modifyRuleData( Properties prop ,MigrateTask task ,List allNewDataNodes){ + int fromIndex=-1; + int toIndex=-1; + List dataNodes= allNewDataNodes; + for (int i = 0; i < dataNodes.size(); i++) { + String dataNode = dataNodes.get(i); + if(dataNode.equalsIgnoreCase(task.getFrom())){ + fromIndex=i; + } else + if(dataNode.equalsIgnoreCase(task.getTo())){ + toIndex=i; + } + } + String from= prop.getProperty(String.valueOf(fromIndex)) ; + String to= prop.getProperty(String.valueOf(toIndex)) ; + String fromRemain=removeRangesRemain(from,task.getSlots()); + String taskRanges = MigrateUtils.convertRangeListToString(task.getSlots()); + String newTo=to==null? taskRanges : to+","+taskRanges; + prop.setProperty(String.valueOf(fromIndex),fromRemain); + prop.setProperty(String.valueOf(toIndex),newTo); + } + + private String removeRangesRemain(String ori,List rangeList){ + List ranges=MigrateUtils.convertRangeStringToList(ori); + List ramain= MigrateUtils.removeAndGetRemain(ranges,rangeList); + return MigrateUtils.convertRangeListToString(ramain); + } + + private void pringList(String comm,Map> integerListMap) { + System.out.println(comm); + for (Map.Entry> integerListEntry : integerListMap.entrySet()) { + Integer key=integerListEntry.getKey(); + List value=integerListEntry.getValue(); + System.out.println(key+":"+listToString(value)+":"+MigrateUtils.getCurTotalSize(value)); + } + } + + private String listToString(List rangeList) + { String rtn=""; + for (Range range : rangeList) { + rtn=rtn+range.start+"-"+range.end+","; + } + + return rtn; + } + + + + @Test + public void removeAndGetRemain(){ + List oldRangeList1=Lists.newArrayList(new Range(0,51199)); + List newRangeList1=Lists.newArrayList(new Range(0,20479),new Range(20480,30719)); + List result1=MigrateUtils.removeAndGetRemain(oldRangeList1,newRangeList1); + Assert.assertEquals(1,result1.size()); + Assert.assertEquals(30720,result1.get(0).start); + Assert.assertEquals(51199,result1.get(0).end); + + List oldRangeList2=Lists.newArrayList(new Range(51200,102399)); + List newRangeList2=Lists.newArrayList(new Range(61440,81919),new Range(51200,61439)); + List result2=MigrateUtils.removeAndGetRemain(oldRangeList2,newRangeList2); + Assert.assertEquals(1,result2.size()); + Assert.assertEquals(81920,result2.get(0).start); + Assert.assertEquals(102399,result2.get(0).end); + + } + @Test + public void removeAndGetRemain1(){ + List oldRangeList1=Lists.newArrayList(new Range(0,0),new Range(1,5),new Range(6,40000),new Range(40001,51199)); + List newRangeList1=Lists.newArrayList(new Range(0,3),new Range(20480,30719)); + List result1=MigrateUtils.removeAndGetRemain(oldRangeList1,newRangeList1); + Assert.assertEquals(4,result1.size()); + Assert.assertEquals(4,result1.get(0).start); + Assert.assertEquals(5,result1.get(0).end); + Assert.assertEquals(6,result1.get(1).start); + Assert.assertEquals(20479,result1.get(1).end); + Assert.assertEquals(30720,result1.get(2).start); + Assert.assertEquals(40000,result1.get(2).end); + Assert.assertEquals(40001,result1.get(3).start); + Assert.assertEquals(51199,result1.get(3).end); + + + + } + +} diff --git a/src/test/java/io/mycat/model/M2.java b/src/test/java/io/mycat/model/M2.java index 4f7e571db..851ec9f52 100644 --- a/src/test/java/io/mycat/model/M2.java +++ b/src/test/java/io/mycat/model/M2.java @@ -23,11 +23,10 @@ */ package io.mycat.model; -import io.mycat.net.ExecutorUtil; - import java.util.concurrent.BlockingQueue; import java.util.concurrent.ThreadPoolExecutor; +import io.mycat.util.ExecutorUtil; import jsr166y.LinkedTransferQueue; /** diff --git a/src/test/java/io/mycat/mpp/TestSorter.java b/src/test/java/io/mycat/mpp/TestSorter.java index f5be14f2c..422c3e282 100644 --- a/src/test/java/io/mycat/mpp/TestSorter.java +++ b/src/test/java/io/mycat/mpp/TestSorter.java @@ -23,11 +23,11 @@ */ package io.mycat.mpp; -import io.mycat.util.ByteUtil; - import org.junit.Assert; import org.junit.Test; +import io.mycat.util.ByteUtil; + public class TestSorter { @Test diff --git a/src/test/java/io/mycat/mysql/MySQLMessageTest.java b/src/test/java/io/mycat/mysql/MySQLMessageTest.java index 912c44cb1..d261fe766 100644 --- a/src/test/java/io/mycat/mysql/MySQLMessageTest.java +++ b/src/test/java/io/mycat/mysql/MySQLMessageTest.java @@ -23,11 +23,11 @@ */ package io.mycat.mysql; -import io.mycat.server.packet.MySQLMessage; - import org.junit.Assert; import org.junit.Test; +import io.mycat.backend.mysql.MySQLMessage; + /** * @author mycat */ diff --git a/src/test/java/io/mycat/parser/ManagerParserTest.java b/src/test/java/io/mycat/parser/ManagerParserTest.java index 813e55915..3b3ccef43 100644 --- a/src/test/java/io/mycat/parser/ManagerParserTest.java +++ b/src/test/java/io/mycat/parser/ManagerParserTest.java @@ -23,16 +23,16 @@ */ package io.mycat.parser; -import io.mycat.server.parser.ManagerParse; -import io.mycat.server.parser.ManagerParseClear; -import io.mycat.server.parser.ManagerParseReload; -import io.mycat.server.parser.ManagerParseRollback; -import io.mycat.server.parser.ManagerParseShow; -import io.mycat.server.parser.ManagerParseStop; - import org.junit.Assert; import org.junit.Test; +import io.mycat.route.parser.ManagerParse; +import io.mycat.route.parser.ManagerParseClear; +import io.mycat.route.parser.ManagerParseReload; +import io.mycat.route.parser.ManagerParseRollback; +import io.mycat.route.parser.ManagerParseShow; +import io.mycat.route.parser.ManagerParseStop; + /** * @author mycat */ @@ -394,5 +394,20 @@ public void testclearSlowDataNode() { Assert.assertEquals(ManagerParseClear.SLOW_DATANODE, 0xff & ManagerParseClear.parse("clear @@SLOW where DATANODE= d", 5)); } + @Test + public void testHeartBearDetail() { + Assert.assertEquals(ManagerParseShow.HEARTBEAT_DETAIL, + 0xff & ManagerParseShow.parse("show @@heartbeat.detail where name=master",5)); + } + @Test + public void testSynStatus() { + Assert.assertEquals(ManagerParseShow.DATASOURCE_SYNC, + 0xff & ManagerParseShow.parse("show @@datasource.synstatus",5)); + } + @Test + public void testSynDetail() { + Assert.assertEquals(ManagerParseShow.DATASOURCE_SYNC_DETAIL, + 0xff & ManagerParseShow.parse("show @@datasource.syndetail where name=slave",5)); + } } \ No newline at end of file diff --git a/src/test/java/io/mycat/parser/ManagerParserTestPerf.java b/src/test/java/io/mycat/parser/ManagerParserTestPerf.java index ee447be69..4cc6bbb41 100644 --- a/src/test/java/io/mycat/parser/ManagerParserTestPerf.java +++ b/src/test/java/io/mycat/parser/ManagerParserTestPerf.java @@ -23,7 +23,7 @@ */ package io.mycat.parser; -import io.mycat.server.parser.ManagerParse; +import io.mycat.route.parser.ManagerParse; /** * @author mycat diff --git a/src/test/java/io/mycat/parser/ServerParseTest.java b/src/test/java/io/mycat/parser/ServerParseTest.java index 1600cc306..24581e1c0 100644 --- a/src/test/java/io/mycat/parser/ServerParseTest.java +++ b/src/test/java/io/mycat/parser/ServerParseTest.java @@ -1,10 +1,11 @@ package io.mycat.parser; -import io.mycat.server.parser.ServerParse; import junit.framework.Assert; import org.junit.Test; +import io.mycat.server.parser.ServerParse; + public class ServerParseTest { /** * public static final int OTHER = -1; diff --git a/src/test/java/io/mycat/parser/ServerParserTest.java b/src/test/java/io/mycat/parser/ServerParserTest.java index 5306f18d3..bb94cd13d 100644 --- a/src/test/java/io/mycat/parser/ServerParserTest.java +++ b/src/test/java/io/mycat/parser/ServerParserTest.java @@ -23,15 +23,15 @@ */ package io.mycat.parser; +import org.junit.Assert; +import org.junit.Test; + import io.mycat.server.parser.ServerParse; import io.mycat.server.parser.ServerParseSelect; import io.mycat.server.parser.ServerParseSet; import io.mycat.server.parser.ServerParseShow; import io.mycat.server.parser.ServerParseStart; -import org.junit.Assert; -import org.junit.Test; - /** * @author mycat */ @@ -63,6 +63,20 @@ public void testComment() { Assert.assertEquals(ServerParse.MYSQL_COMMENT, ServerParse.parse("/*SET @saved_cs_client = @@character_set_client */")); } + @Test + public void testMycatComment() { + Assert.assertEquals(ServerParse.SELECT, 0xff & ServerParse.parse("/*#mycat:schema=DN1*/SELECT ...")); + Assert.assertEquals(ServerParse.UPDATE, 0xff & ServerParse.parse("/*#mycat: schema = DN1 */ UPDATE ...")); + Assert.assertEquals(ServerParse.DELETE, 0xff & ServerParse.parse("/*#mycat: sql = SELECT id FROM user */ DELETE ...")); + } + + @Test + public void testOldMycatComment() { + Assert.assertEquals(ServerParse.SELECT, 0xff & ServerParse.parse("/*!mycat:schema=DN1*/SELECT ...")); + Assert.assertEquals(ServerParse.UPDATE, 0xff & ServerParse.parse("/*!mycat: schema = DN1 */ UPDATE ...")); + Assert.assertEquals(ServerParse.DELETE, 0xff & ServerParse.parse("/*!mycat: sql = SELECT id FROM user */ DELETE ...")); + } + @Test public void testIsDelete() { Assert.assertEquals(ServerParse.DELETE, ServerParse.parse("delete ...")); @@ -453,5 +467,32 @@ public void testLastInsertId() { stmt = "select last_insert_id(#\n\r) as 'a"; Assert.assertEquals(ServerParseSelect.OTHER, ServerParseSelect.parse(stmt, 6)); } + + @Test + public void testLockTable() { + Assert.assertEquals(ServerParse.LOCK, ServerParse.parse("lock tables ttt write;")); + Assert.assertEquals(ServerParse.LOCK, ServerParse.parse(" lock tables ttt read;")); + Assert.assertEquals(ServerParse.LOCK, ServerParse.parse("lock tables")); + } + + @Test + public void testUnlockTable() { + Assert.assertEquals(ServerParse.UNLOCK, ServerParse.parse("unlock tables")); + Assert.assertEquals(ServerParse.UNLOCK, ServerParse.parse(" unlock tables")); + } + + @Test + public void testSetXAOn() { + Assert.assertEquals(ServerParseSet.XA_FLAG_ON, ServerParseSet.parse("set xa=on", 3)); + Assert.assertEquals(ServerParseSet.XA_FLAG_ON, ServerParseSet.parse("set xa = on", 3)); + Assert.assertEquals(ServerParseSet.XA_FLAG_ON, ServerParseSet.parse("set xa \t\n\r = \t\n\r on", 3)); + } + + @Test + public void testSetXAOff() { + Assert.assertEquals(ServerParseSet.XA_FLAG_OFF, ServerParseSet.parse("set xa=off", 3)); + Assert.assertEquals(ServerParseSet.XA_FLAG_OFF, ServerParseSet.parse("set xa = off", 3)); + Assert.assertEquals(ServerParseSet.XA_FLAG_OFF, ServerParseSet.parse("set xa \t\n\r = \t\n\r off", 3)); + } } \ No newline at end of file diff --git a/src/test/java/io/mycat/parser/TestEscapeProcess.java b/src/test/java/io/mycat/parser/TestEscapeProcess.java index 6a6939af7..829eafff0 100644 --- a/src/test/java/io/mycat/parser/TestEscapeProcess.java +++ b/src/test/java/io/mycat/parser/TestEscapeProcess.java @@ -17,6 +17,7 @@ package io.mycat.parser; import static org.junit.Assert.assertEquals; +import io.mycat.MycatServer; import io.mycat.server.interceptor.impl.DefaultSqlInterceptor; import org.junit.Test; @@ -38,6 +39,7 @@ public class TestEscapeProcess { @Test public void testEscapeProcess() { + MycatServer.getInstance().getConfig().getSystem().setDefaultSqlParser("fdbparser"); String sqlProcessed = DefaultSqlInterceptor.processEscape(sql); assertEquals(sqlProcessed, sqlret); String sqlProcessed1 = DefaultSqlInterceptor diff --git a/src/test/java/io/mycat/parser/druid/DruidSelectParserTest.java b/src/test/java/io/mycat/parser/druid/DruidSelectParserTest.java new file mode 100644 index 000000000..f36b3fc51 --- /dev/null +++ b/src/test/java/io/mycat/parser/druid/DruidSelectParserTest.java @@ -0,0 +1,70 @@ +package io.mycat.parser.druid; + +import com.alibaba.druid.sql.ast.SQLExpr; +import com.alibaba.druid.sql.ast.expr.SQLIdentifierExpr; +import io.mycat.route.parser.druid.impl.DruidSelectParser; +import org.junit.Assert; +import org.junit.Test; + +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Created by Hash Zhang on 2016/4/29. + * Modified by Hash Zhang on 2016/5/25 add testGroupByWithViewAlias. + */ +public class DruidSelectParserTest { + DruidSelectParser druidSelectParser = new DruidSelectParser(); + + /** + * 此方法检测DruidSelectParser的buildGroupByCols方法是否修改了函数列 + * 因为select的函数列并不做alias处理, + * 所以在groupby也对函数列不做修改 + * + * @throws NoSuchMethodException + * @throws InvocationTargetException + * @throws IllegalAccessException + */ + @Test + public void testGroupByWithAlias() throws NoSuchMethodException, InvocationTargetException, IllegalAccessException { + String functionColumn = "DATE_FORMAT(h.times,'%b %d %Y %h:%i %p')"; + Object result = invokeGroupBy(functionColumn); + Assert.assertEquals(functionColumn, ((String[]) result)[0]); + } + + /** + * 此方法检测DruidSelectParser对于子查询别名的全局解析 + * + * @throws NoSuchMethodException + * @throws InvocationTargetException + * @throws IllegalAccessException + */ + @Test + public void testGroupByWithViewAlias() throws NoSuchMethodException, InvocationTargetException, IllegalAccessException { + String functionColumn = "select id from (select h.id from hotnews h union select h.title from hotnews h ) as t1 group by t1.id;"; + Object result = invokeGroupBy(functionColumn); + Assert.assertEquals(functionColumn, ((String[]) result)[0]); + } + + public Object invokeGroupBy(String functionColumn) throws NoSuchMethodException, InvocationTargetException, IllegalAccessException { + Map aliaColumns = new TreeMap<>(); + SQLIdentifierExpr sqlExpr = mock(SQLIdentifierExpr.class); + SQLIdentifierExpr expr = mock(SQLIdentifierExpr.class); + List groupByItems = new ArrayList<>(); + groupByItems.add(sqlExpr); + when((sqlExpr).getName()).thenReturn(functionColumn); + Class c = DruidSelectParser.class; + Method method = c.getDeclaredMethod("buildGroupByCols", new Class[]{List.class, Map.class}); + method.setAccessible(true); + return method.invoke(druidSelectParser, groupByItems, aliaColumns); + } + + +} \ No newline at end of file diff --git a/src/test/java/io/mycat/parser/druid/DruidSequenceHandlerTest.java b/src/test/java/io/mycat/parser/druid/DruidSequenceHandlerTest.java index c9e255cc9..68aa17f58 100644 --- a/src/test/java/io/mycat/parser/druid/DruidSequenceHandlerTest.java +++ b/src/test/java/io/mycat/parser/druid/DruidSequenceHandlerTest.java @@ -1,58 +1,59 @@ -package io.mycat.parser.druid; - -import static junit.framework.Assert.assertEquals; -import io.mycat.route.parser.druid.DruidSequenceHandler; -import io.mycat.server.config.node.SystemConfig; - -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import org.junit.Test; - -/** - * 获取MYCAT SEQ 表名。 - */ -public class DruidSequenceHandlerTest { - - @Test - public void test() { - DruidSequenceHandler handler = new DruidSequenceHandler(SystemConfig.SEQUENCEHANDLER_LOCALFILE); - - String sql = "select next value for mycatseq_xxxx".toUpperCase(); - String tableName = handler.getTableName(sql); - assertEquals(tableName, "XXXX"); - - - sql = " insert into test(id,sid)values(next value for MYCATSEQ_TEST,1)".toUpperCase(); - tableName = handler.getTableName(sql); - assertEquals(tableName, "TEST"); - - sql = " insert into test(id,sid)values(next value for MYCATSEQ_TEST ,1)".toUpperCase(); - tableName = handler.getTableName(sql); - assertEquals(tableName, "TEST"); - - sql = " insert into test(id)values(next value for MYCATSEQ_TEST )".toUpperCase(); - tableName = handler.getTableName(sql); - assertEquals(tableName, "TEST"); - } - - @Test - public void test2() { - DruidSequenceHandler handler = new DruidSequenceHandler(SystemConfig.SEQUENCEHANDLER_LOCALFILE); - - String sql = "/* APPLICATIONNAME=DBEAVER 3.3.2 - MAIN CONNECTION */ SELECT NEXT VALUE FOR MYCATSEQ_XXXX".toUpperCase(); - String tableName = handler.getTableName(sql); - assertEquals(tableName, "XXXX"); - } - - public static void main(String[] args) - { - String patten="(?:(\\s*next\\s+value\\s+for\\s*MYCATSEQ_(\\w+))(,|\\)|\\s)*)+"; - Pattern pattern = Pattern.compile(patten,Pattern.CASE_INSENSITIVE); - String sql="insert into test(id,sid)values( next value for MYCATSEQ_TEST ,1)"; - Matcher matcher = pattern.matcher(sql); - System.out.println(matcher.find()); - System.out.println(matcher.group(1)); - System.out.println(matcher.group(2)); - } -} +package io.mycat.parser.druid; + +import static junit.framework.Assert.assertEquals; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.junit.Test; + +import io.mycat.config.model.SystemConfig; +import io.mycat.route.parser.druid.DruidSequenceHandler; + +/** + * 获取MYCAT SEQ 表名。 + */ +public class DruidSequenceHandlerTest { + + @Test + public void test() { + DruidSequenceHandler handler = new DruidSequenceHandler(SystemConfig.SEQUENCEHANDLER_LOCALFILE); + + String sql = "select next value for mycatseq_xxxx".toUpperCase(); + String tableName = handler.getTableName(sql); + assertEquals(tableName, "XXXX"); + + + sql = " insert into test(id,sid)values(next value for MYCATSEQ_TEST,1)".toUpperCase(); + tableName = handler.getTableName(sql); + assertEquals(tableName, "TEST"); + + sql = " insert into test(id,sid)values(next value for MYCATSEQ_TEST ,1)".toUpperCase(); + tableName = handler.getTableName(sql); + assertEquals(tableName, "TEST"); + + sql = " insert into test(id)values(next value for MYCATSEQ_TEST )".toUpperCase(); + tableName = handler.getTableName(sql); + assertEquals(tableName, "TEST"); + } + + @Test + public void test2() { + DruidSequenceHandler handler = new DruidSequenceHandler(SystemConfig.SEQUENCEHANDLER_LOCALFILE); + + String sql = "/* APPLICATIONNAME=DBEAVER 3.3.2 - MAIN CONNECTION */ SELECT NEXT VALUE FOR MYCATSEQ_XXXX".toUpperCase(); + String tableName = handler.getTableName(sql); + assertEquals(tableName, "XXXX"); + } + + public static void main(String[] args) + { + String patten="(?:(\\s*next\\s+value\\s+for\\s*MYCATSEQ_(\\w+))(,|\\)|\\s)*)+"; + Pattern pattern = Pattern.compile(patten,Pattern.CASE_INSENSITIVE); + String sql="insert into test(id,sid)values( next value for MYCATSEQ_TEST ,1)"; + Matcher matcher = pattern.matcher(sql); + System.out.println(matcher.find()); + System.out.println(matcher.group(1)); + System.out.println(matcher.group(2)); + } +} diff --git a/src/test/java/io/mycat/parser/druid/DruidUpdateParserTest.java b/src/test/java/io/mycat/parser/druid/DruidUpdateParserTest.java new file mode 100644 index 000000000..208244998 --- /dev/null +++ b/src/test/java/io/mycat/parser/druid/DruidUpdateParserTest.java @@ -0,0 +1,178 @@ +package io.mycat.parser.druid; + +import com.alibaba.druid.sql.ast.SQLExpr; +import com.alibaba.druid.sql.ast.SQLStatement; +import com.alibaba.druid.sql.ast.expr.SQLBinaryOpExpr; +import com.alibaba.druid.sql.ast.statement.SQLUpdateStatement; +import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlUpdateStatement; +import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.TableConfig; +import io.mycat.route.RouteResultset; +import io.mycat.route.parser.druid.impl.DruidUpdateParser; +import org.junit.Assert; +import org.junit.Test; + +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.sql.SQLNonTransientException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * @author Hash Zhang + * @version 1.0.0 + * @date 2016/7/7 + */ + +public class DruidUpdateParserTest { + /** + * 测试单表更新分片字段 + * @throws NoSuchMethodException + */ + @Test + public void testUpdateShardColumn() throws NoSuchMethodException{ + throwExceptionParse("update hotnews set id = 1 where name = 234;", true); + throwExceptionParse("update hotnews set id = 1 where id = 3;", true); + throwExceptionParse("update hotnews set id = 1, name = '123' where id = 1 and name = '234'", false); + throwExceptionParse("update hotnews set id = 1, name = '123' where id = 1 or name = '234'", true); + throwExceptionParse("update hotnews set id = 'A', name = '123' where id = 'A' and name = '234'", false); + throwExceptionParse("update hotnews set id = 'A', name = '123' where id = 'A' or name = '234'", true); + throwExceptionParse("update hotnews set id = 1.5, name = '123' where id = 1.5 and name = '234'", false); + throwExceptionParse("update hotnews set id = 1.5, name = '123' where id = 1.5 or name = '234'", true); + + throwExceptionParse("update hotnews set id = 1, name = '123' where name = '234' and (id = 1 or age > 3)", true); + throwExceptionParse("update hotnews set id = 1, name = '123' where id = 1 and (name = '234' or age > 3)", false); + + // 子查询,特殊的运算符between等情况 + throwExceptionParse("update hotnews set id = 1, name = '123' where id = 1 and name in (select name from test)", false); + throwExceptionParse("update hotnews set id = 1, name = '123' where name = '123' and id in (select id from test)", true); + throwExceptionParse("update hotnews set id = 1, name = '123' where id between 1 and 3", true); + throwExceptionParse("update hotnews set id = 1, name = '123' where id between 1 and 3 and name = '234'", true); + throwExceptionParse("update hotnews set id = 1, name = '123' where id between 1 and 3 or name = '234'", true); + throwExceptionParse("update hotnews set id = 1, name = '123' where id = 1 and name between '124' and '234'", false); + } + + /** + * 测试单表别名更新分片字段 + * @throws NoSuchMethodException + */ + @Test + public void testAliasUpdateShardColumn() throws NoSuchMethodException{ + throwExceptionParse("update hotnews h set h.id = 1 where h.name = 234;", true); + throwExceptionParse("update hotnews h set h.id = 1 where h.id = 3;", true); + throwExceptionParse("update hotnews h set h.id = 1, h.name = '123' where h.id = 1 and h.name = '234'", false); + throwExceptionParse("update hotnews h set h.id = 1, h.name = '123' where h.id = 1 or h.name = '234'", true); + throwExceptionParse("update hotnews h set h.id = 'A', h.name = '123' where h.id = 'A' and h.name = '234'", false); + throwExceptionParse("update hotnews h set h.id = 'A', h.name = '123' where h.id = 'A' or h.name = '234'", true); + throwExceptionParse("update hotnews h set h.id = 1.5, h.name = '123' where h.id = 1.5 and h.name = '234'", false); + throwExceptionParse("update hotnews h set h.id = 1.5, h.name = '123' where h.id = 1.5 or h.name = '234'", true); + + throwExceptionParse("update hotnews h set id = 1, h.name = '123' where h.id = 1 and h.name = '234'", false); + throwExceptionParse("update hotnews h set h.id = 1, h.name = '123' where id = 1 or h.name = '234'", true); + + throwExceptionParse("update hotnews h set h.id = 1, h.name = '123' where h.name = '234' and (h.id = 1 or h.age > 3)", true); + throwExceptionParse("update hotnews h set h.id = 1, h.name = '123' where h.id = 1 and (h.name = '234' or h.age > 3)", false); + } + + public void throwExceptionParse(String sql, boolean throwException) throws NoSuchMethodException { + MySqlStatementParser parser = new MySqlStatementParser(sql); + List statementList = parser.parseStatementList(); + SQLStatement sqlStatement = statementList.get(0); + MySqlUpdateStatement update = (MySqlUpdateStatement) sqlStatement; + SchemaConfig schemaConfig = mock(SchemaConfig.class); + Map tables = mock(Map.class); + TableConfig tableConfig = mock(TableConfig.class); + String tableName = "hotnews"; + when((schemaConfig).getTables()).thenReturn(tables); + when(tables.get(tableName)).thenReturn(tableConfig); + when(tableConfig.getParentTC()).thenReturn(null); + RouteResultset routeResultset = new RouteResultset(sql, 11); + Class c = DruidUpdateParser.class; + Method method = c.getDeclaredMethod("confirmShardColumnNotUpdated", new Class[]{SQLUpdateStatement.class, SchemaConfig.class, String.class, String.class, String.class, RouteResultset.class}); + method.setAccessible(true); + try { + method.invoke(c.newInstance(), update, schemaConfig, tableName, "ID", "", routeResultset); + if (throwException) { + System.out.println("未抛异常,解析通过则不对!"); + Assert.assertTrue(false); + } else { + System.out.println("未抛异常,解析通过,此情况分片字段可能在update语句中但是实际不会被更新"); + Assert.assertTrue(true); + } + } catch (Exception e) { + if (throwException) { + System.out.println(e.getCause().getClass()); + Assert.assertTrue(e.getCause() instanceof SQLNonTransientException); + System.out.println("抛异常原因为SQLNonTransientException则正确"); + } else { + System.out.println("抛异常,需要检查"); + Assert.assertTrue(false); + } + } + } + + /* + * 添加一个static方法用于打印一个SQL的where子句,比如这样的一条SQL: + * update mytab t set t.ptn_col = 'A', col1 = 3 where ptn_col = 'A' and (col1 = 4 or col2 > 5); + * where子句的语法树如下 + * AND + * / \ + * = OR + * / \ / \ + * ptn_col 'A' = > + * / \ / \ + * col1 4 col2 5 + * 其输出如下,(按层输出,并且每层最后输出下一层的节点数目) + * BooleanAnd Num of nodes in next level: 2 + * Equality BooleanOr Num of nodes in next level: 4 + * ptn_col 'A' Equality Equality Num of nodes in next level: 4 + * col1 4 col2 5 Num of nodes in next level: 0 + * + * 因为大部分的update的where子句都比较简单,按层次打印应该足够清晰,未来可以完全按照逻辑打印类似上面的整棵树结构 + */ + public static void printWhereClauseAST(SQLExpr sqlExpr) { + // where子句的AST sqlExpr可以通过 MySqlUpdateStatement.getWhere(); 获得 + if (sqlExpr == null) + return; + ArrayList exprNode = new ArrayList<>(); + int i = 0, curLevel = 1, nextLevel = 0; + SQLExpr iterExpr; + exprNode.add(sqlExpr); + while (true) { + iterExpr = exprNode.get(i++); + if (iterExpr == null) + break; + + if (iterExpr instanceof SQLBinaryOpExpr) { + System.out.print(((SQLBinaryOpExpr) iterExpr).getOperator()); + } else { + System.out.print(iterExpr.toString()); + } + System.out.print("\t"); + curLevel--; + + if (iterExpr instanceof SQLBinaryOpExpr) { + if (((SQLBinaryOpExpr) iterExpr).getLeft() != null) { + exprNode.add(((SQLBinaryOpExpr) iterExpr).getLeft()); + nextLevel++; + } + if (((SQLBinaryOpExpr) iterExpr).getRight() != null) { + exprNode.add(((SQLBinaryOpExpr) iterExpr).getRight()); + nextLevel++; + } + } + if (curLevel == 0) { + System.out.println("\t\tNum of nodes in next level: " + nextLevel); + curLevel = nextLevel; + nextLevel = 0; + } + if (exprNode.size() == i) + break; + } + } +} diff --git a/src/test/java/io/mycat/parser/druid/MycatSchemaStatVisitorTest.java b/src/test/java/io/mycat/parser/druid/MycatSchemaStatVisitorTest.java index 65c642310..df84c15a9 100644 --- a/src/test/java/io/mycat/parser/druid/MycatSchemaStatVisitorTest.java +++ b/src/test/java/io/mycat/parser/druid/MycatSchemaStatVisitorTest.java @@ -1,7 +1,5 @@ package io.mycat.parser.druid; -import io.mycat.route.parser.druid.MycatSchemaStatVisitor; - import java.util.ArrayList; import java.util.List; @@ -14,16 +12,33 @@ import com.alibaba.druid.sql.parser.SQLStatementParser; import com.alibaba.druid.stat.TableStat.Condition; +import io.mycat.route.parser.druid.MycatSchemaStatVisitor; + /** * TODO: 增加描述 - * + * * @author user * @date 2015-6-2 下午5:50:25 - * @version 0.1.0 + * @version 0.1.0 * @copyright wonhigh.cn */ public class MycatSchemaStatVisitorTest { - + + /** + * 从注解中解析 mycat schema + */ + @Test + public void test4() { + String sql = "/*!mycat:schema = helper1 */update adm_task a set a.remark = 'test' where id=1"; + Assert.assertEquals(getSchema(sql), "helper1."); + sql = "/*!mycat:schema=helper1*/update adm_task a set a.remark = 'test' where id=1"; + Assert.assertEquals(getSchema(sql), "helper1."); + sql = "/*!mycat:schema= helper1*/update adm_task a set a.remark = 'test' where id=1"; + Assert.assertEquals(getSchema(sql), "helper1."); + System.out.println(getSchema(sql)); + } + + /** * 3层嵌套or语句 */ @@ -39,29 +54,29 @@ public void test1() { Assert.assertEquals(list.get(2).size(), 3); Assert.assertEquals(list.get(3).size(), 4); Assert.assertEquals(list.get(4).size(), 3); - + Assert.assertEquals(list.get(0).get(0).toString(), "travelrecord.days = 5"); Assert.assertEquals(list.get(0).get(1).toString(), "travelrecord.id = (1, 2)"); - + Assert.assertEquals(list.get(1).get(0).toString(), "travelrecord.fee = 3"); Assert.assertEquals(list.get(1).get(1).toString(), "travelrecord.id = (1, 2)"); - + Assert.assertEquals(list.get(2).get(0).toString(), "travelrecord.fee = 0"); Assert.assertEquals(list.get(2).get(1).toString(), "travelrecord.traveldate = 2015-05-04 00:00:07.375"); Assert.assertEquals(list.get(2).get(2).toString(), "travelrecord.id = (1, 2)"); - + Assert.assertEquals(list.get(3).get(0).toString(), "travelrecord.fee = null"); Assert.assertEquals(list.get(3).get(1).toString(), "travelrecord.days = null"); Assert.assertEquals(list.get(3).get(2).toString(), "travelrecord.traveldate = 2015-05-04 00:00:07.375"); Assert.assertEquals(list.get(3).get(3).toString(), "travelrecord.id = (1, 2)"); - + Assert.assertEquals(list.get(4).get(0).toString(), "travelrecord.user_id = 2"); Assert.assertEquals(list.get(4).get(1).toString(), "travelrecord.traveldate = 2015-05-04 00:00:07.375"); Assert.assertEquals(list.get(4).get(2).toString(), "travelrecord.id = (1, 2)"); System.out.println(list.size()); } - + /** * 1层嵌套or语句 */ @@ -70,23 +85,23 @@ public void test2() { String sql = "select id from travelrecord " + " where id = 1 and ( fee=3 or days=5 or name = 'zhangsan')" ; List> list = getConditionList(sql); - + Assert.assertEquals(list.size(), 3); Assert.assertEquals(list.get(0).size(), 2); Assert.assertEquals(list.get(1).size(), 2); Assert.assertEquals(list.get(2).size(), 2); - + Assert.assertEquals(list.get(0).get(0).toString(), "travelrecord.name = zhangsan"); Assert.assertEquals(list.get(0).get(1).toString(), "travelrecord.id = 1"); - + Assert.assertEquals(list.get(1).get(0).toString(), "travelrecord.days = 5"); Assert.assertEquals(list.get(1).get(1).toString(), "travelrecord.id = 1"); - + Assert.assertEquals(list.get(2).get(0).toString(), "travelrecord.fee = 3"); Assert.assertEquals(list.get(2).get(1).toString(), "travelrecord.id = 1"); } - + /** * 1层嵌套or语句 */ @@ -95,21 +110,165 @@ public void test3() { String sql = "select id from travelrecord " + " where id = 1 and fee=3 or days=5 or name = 'zhangsan'" ; List> list = getConditionList(sql); - + Assert.assertEquals(list.size(), 3); - + Assert.assertEquals(list.get(0).size(), 1); Assert.assertEquals(list.get(1).size(), 1); Assert.assertEquals(list.get(2).size(), 2); Assert.assertEquals(list.get(0).get(0).toString(), "travelrecord.name = zhangsan"); - + Assert.assertEquals(list.get(1).get(0).toString(), "travelrecord.days = 5"); - + Assert.assertEquals(list.get(2).get(0).toString(), "travelrecord.id = 1"); Assert.assertEquals(list.get(2).get(1).toString(), "travelrecord.fee = 3"); } + String sql = "select count(*) count from (select *\r\n" + + " from (select b.sales_count,\r\n" + " b.special_type,\r\n" + + " a.prod_offer_id offer_id,\r\n" + + " a.alias_name as offer_name,\r\n" + + " (select c.attr_value_name\r\n" + + " from attr_value c\r\n" + + " where c.attr_id = '994001448'\r\n" + + " and c.attr_value = b.special_type) as attr_value_name,\r\n" + + " a.offer_type offer_kind,\r\n" + + " a.offer_comments,\r\n" + " a.is_ge\r\n" + + " from prod_offer a, special_offer b\r\n" + + " where a.prod_offer_id = b.prod_offer_id\r\n" + + " and (a.offer_type = '11' or a.offer_type = '10')\r\n" + + " and (b.region_id = '731' or b.region_id = '10000000')\r\n" + + " and a.status_cd = '10'\r\n" + + " and b.special_type = '0'\r\n" + " union all\r\n" + + " select b.sales_count,\r\n" + " b.special_type,\r\n" + + " a.prod_offer_id offer_id,\r\n" + + " a.alias_name as offer_name,\r\n" + + " (select c.attr_value_name\r\n" + + " from attr_value c\r\n" + + " where c.attr_id = '994001448'\r\n" + + " and c.attr_value = b.special_type) as attr_value_name,\r\n" + + " a.offer_type offer_kind,\r\n" + + " a.offer_comments,\r\n" + " a.is_ge\r\n" + + " from prod_offer a, special_offer b\r\n" + + " where a.prod_offer_id = b.prod_offer_id\r\n" + + " and (a.offer_type = '11' or a.offer_type = '10')\r\n" + + " and (b.region_id = '731' or b.region_id = '10000000')\r\n" + + " and a.status_cd = '10'\r\n" + + " and b.special_type = '1'\r\n" + + " and exists (select 1\r\n" + + " from prod_offer_channel l\r\n" + + " where a.prod_offer_id = l.prod_offer_id\r\n" + + " and l.channel_id = '11')\r\n" + + " and not exists\r\n" + " (select 1\r\n" + + " from product_offer_cat ml\r\n" + + " where ml.prod_offer_id = a.prod_offer_id\r\n" + + " and ml.offer_cat_type = '89')\r\n" + + " and (exists (select 1\r\n" + + " from sales_restrication\r\n" + + " where object_id = a.prod_offer_id\r\n" + + " and domain_id = '1965868'\r\n" + + " and restrication_flag = '0'\r\n" + + " and domain_type = '19F'\r\n" + + " and state = '00A') or exists\r\n" + + " (select 1\r\n" + + " from sales_restrication\r\n" + + " where object_id = a.prod_offer_id\r\n" + + " and domain_id = '843073100000000'\r\n" + + " and restrication_flag = '0'\r\n" + + " and domain_type = '19E'\r\n" + + " and state = '00A') or exists\r\n" + + " (select 1\r\n" + + " from sales_restrication\r\n" + + " where object_id = a.prod_offer_id\r\n" + + " and domain_id = '1965868'\r\n" + + " and restrication_flag = '0'\r\n" + + " and domain_type = '19X'\r\n" + + " and state = '00A'\r\n" + + " and (max_value = 1 or min_value = 1)\r\n" + + " and extended_field = '731') or exists\r\n" + + " (select 1\r\n" + + " from sales_restrication\r\n" + + " where object_id = a.prod_offer_id\r\n" + + " and domain_id = concat('-', '11')\r\n" + + " and restrication_flag = '0'\r\n" + + " and domain_type = '19F'\r\n" + + " and state = '00A') or not exists\r\n" + + " (select 1\r\n" + + " from sales_restrication\r\n" + + " where object_id = a.prod_offer_id\r\n" + + " and restrication_flag = '0'\r\n" + + " and (domain_type in ('19F', '19E') or\r\n" + + " (domain_type = '19X' and\r\n" + + " extended_field = '731' and\r\n" + + " (max_value = 1 or min_value = 1)))\r\n" + + " and state = '00A'))\r\n" + + " and not exists (select 1\r\n" + + " from sales_restrication\r\n" + + " where object_id = a.prod_offer_id\r\n" + + " and domain_id = '1965868'\r\n" + + " and restrication_flag = '1'\r\n" + + " and domain_type = '19F'\r\n" + + " and state = '00A')\r\n" + + " and not exists (select 1\r\n" + + " from sales_restrication\r\n" + + " where object_id = a.prod_offer_id\r\n" + + " and domain_id = '843073100000000'\r\n" + + " and restrication_flag = '1'\r\n" + + " and domain_type = '19E'\r\n" + + " and state = '00A')\r\n" + + " and not exists\r\n" + " (select 1\r\n" + + " from sales_restrication\r\n" + + " where object_id = a.prod_offer_id\r\n" + + " and domain_id = '1965868'\r\n" + + " and restrication_flag = '1'\r\n" + + " and domain_type = '19X'\r\n" + + " and state = '00A'\r\n" + + " and (min_value = 1 or max_value = 1)\r\n" + + " and extended_field = '731')\r\n" + + " and not exists (select 1\r\n" + + " from sales_restrication\r\n" + + " where object_id = a.prod_offer_id\r\n" + + " and domain_id = concat('-', '11')\r\n" + + " and restrication_flag = '1'\r\n" + + " and domain_type = '19F'\r\n" + + " and state = '00A')\r\n" + " and exists\r\n" + + " (select 1\r\n" + " from prod_offer_region v1\r\n" + + " where v1.prod_offer_id = a.prod_offer_id\r\n" + + " and (v1.common_region_id = '731' or\r\n" + + " v1.common_region_id = '10000000' or\r\n" + + " v1.common_region_id = '73101'))) t\r\n" + + " order by t.sales_count desc)"; + + /** + * 8层以上 嵌套or语句 + */ + @Test + public void test5() { + List> list = getConditionList(sql); + + Assert.assertTrue(list.size() < 100); + } + + private String getSchema(String sql) { + SQLStatementParser parser =null; + parser = new MySqlStatementParser(sql); + + MycatSchemaStatVisitor visitor = null; + SQLStatement statement = null; + //解析出现问题统一抛SQL语法错误 + try { + statement = parser.parseStatement(); + visitor = new MycatSchemaStatVisitor(); + } catch (Exception e) { + e.printStackTrace(); + } + statement.accept(visitor); + + + return visitor.getCurrentTable(); + } + private List> getConditionList(String sql) { SQLStatementParser parser =null; parser = new MySqlStatementParser(sql); @@ -124,7 +283,7 @@ private List> getConditionList(String sql) { e.printStackTrace(); } statement.accept(visitor); - + List> mergedConditionList = new ArrayList>(); if(visitor.hasOrCondition()) {//包含or语句 //TODO @@ -133,7 +292,7 @@ private List> getConditionList(String sql) { } else {//不包含OR语句 mergedConditionList.add(visitor.getConditions()); } - + return mergedConditionList; } } diff --git a/src/test/java/io/mycat/parser/primitive/TestFunctionParser.java b/src/test/java/io/mycat/parser/primitive/TestFunctionParser.java new file mode 100644 index 000000000..60eb2209a --- /dev/null +++ b/src/test/java/io/mycat/parser/primitive/TestFunctionParser.java @@ -0,0 +1,29 @@ +package io.mycat.parser.primitive; + +import io.mycat.route.parser.primitive.FunctionParser; +import io.mycat.route.parser.primitive.Model.Function; +import junit.framework.Assert; +import org.junit.Test; + +import java.sql.SQLNonTransientException; + +/** + * @author Hash Zhang + * @version 1.0.0 + * @date 2016/7/26 + */ +public class TestFunctionParser { + + @Test + public void testMultiFunctions() throws SQLNonTransientException { + Assert.assertEquals("[arg1, a.t]",testFunctionParse("function1(arg1,a.t)")); + Assert.assertEquals("[arg1, a.t]",testFunctionParse("function1(arg1,a.t,\"ast(,)\")")); + Assert.assertEquals("[arg1, a.t, c.t, x]",testFunctionParse("function1(arg1,a.t,\"ast(,)\",\",\",function2(c.t,function3(x)))")); + Assert.assertEquals("[arg1, a.t, c.t, x]",testFunctionParse("function1(arg1,a.t,\"ast(,)\",\",\",function2(c.t,\"(,)\",function3(function4(x))))")); + } + + public String testFunctionParse(String function) throws SQLNonTransientException { + Function function1 = FunctionParser.parseFunction(function); + return FunctionParser.getFields(function1).toString(); + } +} diff --git a/src/test/java/io/mycat/parser/util/PairUtilTest.java b/src/test/java/io/mycat/parser/util/PairUtilTest.java index e1681e2f8..6b9ada7be 100644 --- a/src/test/java/io/mycat/parser/util/PairUtilTest.java +++ b/src/test/java/io/mycat/parser/util/PairUtilTest.java @@ -23,13 +23,14 @@ */ package io.mycat.parser.util; -import io.mycat.util.Pair; -import io.mycat.util.PairUtil; import junit.framework.TestCase; import org.junit.Assert; import org.junit.Test; +import io.mycat.route.parser.util.Pair; +import io.mycat.route.parser.util.PairUtil; + /** * @author mycat */ diff --git a/src/test/java/io/mycat/performance/AbstractMultiTreadBatchTester.java b/src/test/java/io/mycat/performance/AbstractMultiTreadBatchTester.java index bc1a15b4e..4f668e48d 100644 --- a/src/test/java/io/mycat/performance/AbstractMultiTreadBatchTester.java +++ b/src/test/java/io/mycat/performance/AbstractMultiTreadBatchTester.java @@ -23,12 +23,14 @@ */ package io.mycat.performance; +import java.io.FileInputStream; import java.sql.SQLException; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Date; import java.util.Iterator; import java.util.LinkedList; +import java.util.Properties; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicLong; @@ -120,7 +122,7 @@ public ArrayList[] createAllJobs() throws Exception { String sqlTemplate = pros.getProperty("sql"); String batchSizeStr=pros.getProperty("batch"); String autocommitStr=pros.getProperty("autocommit"); - boolean autocommit=autocommitStr==null?false:Boolean.valueOf(autocommitStr); + boolean autocommit= autocommitStr!=null && Boolean.valueOf(autocommitStr); int batchSize=batchSizeStr==null?100:Integer.parseInt(batchSizeStr); System.out.println("total record "+total+ " batch size:"+batchSize+" autocomit "+autocommit); return createSQLTemplateJobs(total, sqlTemplate,batchSize,autocommit); diff --git a/src/main/java/io/mycat/server/Versions.template b/src/test/java/io/mycat/postgres/PostgresTest.java similarity index 83% rename from src/main/java/io/mycat/server/Versions.template rename to src/test/java/io/mycat/postgres/PostgresTest.java index a3dc4c5d6..91b744e3b 100644 --- a/src/main/java/io/mycat/server/Versions.template +++ b/src/test/java/io/mycat/postgres/PostgresTest.java @@ -21,17 +21,17 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server; +package io.mycat.postgres; + +import org.junit.Test; /** * @author mycat */ -public interface Versions { - - /**协议版本**/ - public static final byte PROTOCOL_VERSION = 10; +public class PostgresTest { - /**服务器版**/ - public static final byte[] SERVER_VERSION = "@server-version@".getBytes(); + @Test + public void testNothing() { + } } \ No newline at end of file diff --git a/src/test/java/io/mycat/queue/Queue.java b/src/test/java/io/mycat/queue/Queue.java index 593613df3..c9bf38e18 100644 --- a/src/test/java/io/mycat/queue/Queue.java +++ b/src/test/java/io/mycat/queue/Queue.java @@ -197,8 +197,9 @@ public String toString() { buf.append(", elements={"); for (int i = 0; i < count; i++) { int pos = (i + start) % size; - if (i > 0) + if (i > 0) { buf.append(", "); + } buf.append(items[pos]); } buf.append("}]"); diff --git a/src/test/java/io/mycat/queue/QueueSimpleMain.java b/src/test/java/io/mycat/queue/QueueSimpleMain.java index d9234cbe5..14df5c974 100644 --- a/src/test/java/io/mycat/queue/QueueSimpleMain.java +++ b/src/test/java/io/mycat/queue/QueueSimpleMain.java @@ -62,8 +62,9 @@ public void run() { @Override public void run() { for (;;) { - if (queue.offer("A")) + if (queue.offer("A")) { putCount++; + } } } }.start(); @@ -73,8 +74,9 @@ public void run() { public void run() { for (;;) { // try { - if (queue.poll() != null) + if (queue.poll() != null) { takeCount++; + } // } catch (InterruptedException e) { // e.printStackTrace(); // } diff --git a/src/test/java/io/mycat/route/.gitignore b/src/test/java/io/mycat/route/.gitignore new file mode 100644 index 000000000..4350f9380 --- /dev/null +++ b/src/test/java/io/mycat/route/.gitignore @@ -0,0 +1 @@ +/DruidMysqlSqlSubqueriesParserTest.java diff --git a/src/test/java/io/mycat/route/DDLRouteTest.java b/src/test/java/io/mycat/route/DDLRouteTest.java index d572d3e35..dd3dbf33f 100644 --- a/src/test/java/io/mycat/route/DDLRouteTest.java +++ b/src/test/java/io/mycat/route/DDLRouteTest.java @@ -1,32 +1,71 @@ package io.mycat.route; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.junit.Test; + +import io.mycat.MycatServer; import io.mycat.SimpleCachePool; import io.mycat.cache.CacheService; import io.mycat.cache.LayerCachePool; +import io.mycat.config.loader.SchemaLoader; +import io.mycat.config.loader.xml.XMLSchemaLoader; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.config.model.TableConfig; +import io.mycat.route.RouteResultset; +import io.mycat.route.RouteService; +import io.mycat.route.RouteStrategy; import io.mycat.route.factory.RouteStrategyFactory; import io.mycat.route.util.RouterUtil; -import io.mycat.server.config.loader.ConfigInitializer; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SystemConfig; -import io.mycat.server.config.node.TableConfig; import io.mycat.server.parser.ServerParse; import junit.framework.Assert; -import org.junit.Test; - -import java.util.ArrayList; -import java.util.List; -import java.util.Map; public class DDLRouteTest { protected Map schemaMap; protected LayerCachePool cachePool = new SimpleCachePool(); - protected RouteStrategy routeStrategy = RouteStrategyFactory.getRouteStrategy("druidparser"); + protected RouteStrategy routeStrategy ; public DDLRouteTest() { - ConfigInitializer confInit = new ConfigInitializer(true); - schemaMap = confInit.getSchemas(); + String schemaFile = "/route/schema.xml"; + String ruleFile = "/route/rule.xml"; + SchemaLoader schemaLoader = new XMLSchemaLoader(schemaFile, ruleFile); + schemaMap = schemaLoader.getSchemas(); + MycatServer.getInstance().getConfig().getSchemas().putAll(schemaMap); + MycatServer.getInstance().getConfig().getSystem().setUseGlobleTableCheck(0); //DDL Route Test 不开启全局表一致性检查 + RouteStrategyFactory.init(); + routeStrategy = RouteStrategyFactory.getRouteStrategy("druidparser"); } + + @Test + public void testSpecialCharDDL() throws Exception { + SchemaConfig schema = schemaMap.get("TESTDB"); + CacheService cacheService = new CacheService(); + RouteService routerService = new RouteService(cacheService); + + // alter table test + String sql = " ALTER TABLE COMPANY\r\nADD COLUMN TEST VARCHAR(255) NULL AFTER CREATE_DATE,\r\n CHARACTER SET = UTF8"; + sql = RouterUtil.getFixedSql(sql); + List dataNodes = new ArrayList<>(); + String tablename = RouterUtil.getTableName(sql, RouterUtil.getAlterTablePos(sql, 0)); + Map tables = schema.getTables(); + TableConfig tc; + if (tables != null && (tc = tables.get(tablename)) != null) { + dataNodes = tc.getDataNodes(); + } + int nodeSize = dataNodes.size(); + + int rs = ServerParse.parse(sql); + int sqlType = rs & 0xff; + RouteResultset rrs = routerService.route(new SystemConfig(), schema, sqlType, sql, "UTF-8", null); + Assert.assertTrue("COMPANY".equals(tablename)); + Assert.assertTrue(rrs.getNodes().length == nodeSize); + } + + /** * ddl deal test * @@ -37,7 +76,7 @@ public void testDDL() throws Exception { SchemaConfig schema = schemaMap.get("TESTDB"); CacheService cacheService = new CacheService(); RouteService routerService = new RouteService(cacheService); - + // create table/view/function/.. String sql = " create table company(idd int)"; sql = RouterUtil.getFixedSql(sql); @@ -47,6 +86,7 @@ public void testDDL() throws Exception { // 小写表名,需要额外转为 大写 做比较 String tablename = RouterUtil.getTableName(sql, RouterUtil.getCreateTablePos(upsql, 0)); tablename = tablename.toUpperCase(); + List dataNodes = new ArrayList<>(); Map tables = schema.getTables(); TableConfig tc; @@ -54,15 +94,18 @@ public void testDDL() throws Exception { dataNodes = tc.getDataNodes(); } int nodeSize = dataNodes.size(); + int rs = ServerParse.parse(sql); int sqlType = rs & 0xff; RouteResultset rrs = routerService.route(new SystemConfig(), schema, sqlType, sql, "UTF-8", null); + Assert.assertTrue("COMPANY".equals(tablename)); Assert.assertTrue(rrs.getNodes().length == nodeSize); - // drop table company - sql = " drop table company"; + // drop table test + sql = " drop table COMPANY"; sql = RouterUtil.getFixedSql(sql); upsql = sql.toUpperCase(); + tablename = RouterUtil.getTableName(sql, RouterUtil.getDropTablePos(upsql, 0)); tables = schema.getTables(); if (tables != null && (tc = tables.get(tablename)) != null) { @@ -71,9 +114,9 @@ public void testDDL() throws Exception { nodeSize = dataNodes.size(); rs = ServerParse.parse(sql); - sqlType = rs & 0xff; + sqlType = rs & 0xff; rrs = routerService.route(new SystemConfig(), schema, sqlType, sql, "UTF-8", null); - Assert.assertTrue("company".equals(tablename)); + Assert.assertTrue("COMPANY".equals(tablename)); Assert.assertTrue(rrs.getNodes().length == nodeSize); //alter table @@ -87,7 +130,7 @@ public void testDDL() throws Exception { } nodeSize = dataNodes.size(); rs = ServerParse.parse(sql); - sqlType = rs & 0xff; + sqlType = rs & 0xff; rrs = routerService.route(new SystemConfig(), schema, sqlType, sql, "UTF-8", null); Assert.assertTrue("COMPANY".equals(tablename)); Assert.assertTrue(rrs.getNodes().length == nodeSize); @@ -103,14 +146,18 @@ public void testDDL() throws Exception { } nodeSize = dataNodes.size(); rs = ServerParse.parse(sql); - sqlType = rs & 0xff; + sqlType = rs & 0xff; rrs = routerService.route(new SystemConfig(), schema, sqlType, sql, "UTF-8", null); Assert.assertTrue("COMPANY".equals(tablename)); Assert.assertTrue(rrs.getNodes().length == nodeSize); + + } + + @Test public void testDDLDefaultNode() throws Exception { SchemaConfig schema = schemaMap.get("solo1"); @@ -124,7 +171,7 @@ public void testDDLDefaultNode() throws Exception { //TODO:modify by zhuam 小写表名,转为大写比较 String tablename = RouterUtil.getTableName(sql, RouterUtil.getCreateTablePos(upsql, 0)); - tablename = tablename.toUpperCase(); + tablename = tablename.toUpperCase(); List dataNodes = new ArrayList<>(); Map tables = schema.getTables(); @@ -224,4 +271,81 @@ public void testDDLDefaultNode() throws Exception { } + + + + @Test + public void testTableMetaRead() throws Exception { + final SchemaConfig schema = schemaMap.get("cndb"); + + String sql = "desc offer"; + RouteResultset rrs = routeStrategy.route(new SystemConfig(), schema, ServerParse.DESCRIBE, sql, null, null, + cachePool); + Assert.assertEquals(false, rrs.isCacheAble()); + Assert.assertEquals(-1L, rrs.getLimitSize()); + Assert.assertEquals(1, rrs.getNodes().length); + // random return one node + // Assert.assertEquals("offer_dn[0]", rrs.getNodes()[0].getName()); + Assert.assertEquals("desc offer", rrs.getNodes()[0].getStatement()); + + sql = " desc cndb.offer"; + rrs = routeStrategy.route(new SystemConfig(), schema, ServerParse.DESCRIBE, sql, null, null, cachePool); + Assert.assertEquals(false, rrs.isCacheAble()); + Assert.assertEquals(-1L, rrs.getLimitSize()); + Assert.assertEquals(1, rrs.getNodes().length); + // random return one node + // Assert.assertEquals("offer_dn[0]", rrs.getNodes()[0].getName()); + Assert.assertEquals("desc offer", rrs.getNodes()[0].getStatement()); + + sql = " desc cndb.offer col1"; + rrs = routeStrategy.route(new SystemConfig(), schema, ServerParse.DESCRIBE, sql, null, null, cachePool); + Assert.assertEquals(false, rrs.isCacheAble()); + Assert.assertEquals(-1L, rrs.getLimitSize()); + Assert.assertEquals(1, rrs.getNodes().length); + // random return one node + // Assert.assertEquals("offer_dn[0]", rrs.getNodes()[0].getName()); + Assert.assertEquals("desc offer col1", rrs.getNodes()[0].getStatement()); + + sql = "SHOW FULL COLUMNS FROM offer IN db_name WHERE true"; + rrs = routeStrategy.route(new SystemConfig(), schema, ServerParse.SHOW, sql, null, null, + cachePool); + Assert.assertEquals(false, rrs.isCacheAble()); + Assert.assertEquals(-1L, rrs.getLimitSize()); + Assert.assertEquals(1, rrs.getNodes().length); + // random return one node + // Assert.assertEquals("offer_dn[0]", rrs.getNodes()[0].getName()); + Assert.assertEquals("SHOW FULL COLUMNS FROM offer WHERE true", + rrs.getNodes()[0].getStatement()); + + sql = "SHOW FULL COLUMNS FROM db.offer IN db_name WHERE true"; + rrs = routeStrategy.route(new SystemConfig(), schema, ServerParse.SHOW, sql, null, null, + cachePool); + Assert.assertEquals(-1L, rrs.getLimitSize()); + Assert.assertEquals(false, rrs.isCacheAble()); + Assert.assertEquals(1, rrs.getNodes().length); + // random return one node + // Assert.assertEquals("offer_dn[0]", rrs.getNodes()[0].getName()); + Assert.assertEquals("SHOW FULL COLUMNS FROM offer WHERE true", + rrs.getNodes()[0].getStatement()); + + + sql = "SHOW FULL TABLES FROM `TESTDB` WHERE Table_type != 'VIEW'"; + rrs = routeStrategy.route(new SystemConfig(), schema, ServerParse.SHOW, sql, null, null, + cachePool); + Assert.assertEquals(-1L, rrs.getLimitSize()); + Assert.assertEquals(false, rrs.isCacheAble()); + Assert.assertEquals("SHOW FULL TABLES WHERE Table_type != 'VIEW'", rrs.getNodes()[0].getStatement()); + + sql = "SHOW INDEX IN offer FROM db_name"; + rrs = routeStrategy.route(new SystemConfig(), schema, ServerParse.SHOW, sql, null, null, + cachePool); + Assert.assertEquals(false, rrs.isCacheAble()); + Assert.assertEquals(-1L, rrs.getLimitSize()); + Assert.assertEquals(1, rrs.getNodes().length); + // random return one node + // Assert.assertEquals("offer_dn[0]", rrs.getNodes()[0].getName()); + Assert.assertEquals("SHOW INDEX FROM offer", + rrs.getNodes()[0].getStatement()); + } + } diff --git a/src/test/java/io/mycat/route/DQLRouteTest.java b/src/test/java/io/mycat/route/DQLRouteTest.java new file mode 100644 index 000000000..b0acbd858 --- /dev/null +++ b/src/test/java/io/mycat/route/DQLRouteTest.java @@ -0,0 +1,139 @@ +package io.mycat.route; + +import java.lang.reflect.Method; +import java.sql.SQLSyntaxErrorException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.junit.Test; + +import io.mycat.MycatServer; +import io.mycat.SimpleCachePool; +import io.mycat.cache.LayerCachePool; +import io.mycat.config.loader.SchemaLoader; +import io.mycat.config.loader.xml.XMLSchemaLoader; +import io.mycat.config.model.SchemaConfig; +import io.mycat.route.parser.druid.DruidShardingParseInfo; +import io.mycat.route.parser.druid.MycatSchemaStatVisitor; +import io.mycat.route.parser.druid.MycatStatementParser; +import io.mycat.route.parser.druid.RouteCalculateUnit; +import io.mycat.route.factory.RouteStrategyFactory; + + +import com.alibaba.druid.sql.ast.SQLStatement; +import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser; +import com.alibaba.druid.sql.parser.SQLStatementParser; +import com.alibaba.druid.sql.visitor.SchemaStatVisitor; +import com.alibaba.druid.stat.TableStat.Condition; + +import junit.framework.Assert; + +public class DQLRouteTest { + + protected Map schemaMap; + protected LayerCachePool cachePool = new SimpleCachePool(); + protected RouteStrategy routeStrategy = RouteStrategyFactory.getRouteStrategy("druidparser"); + private Map tableAliasMap = new HashMap(); + + protected DruidShardingParseInfo ctx; + + public DQLRouteTest() { + String schemaFile = "/route/schema.xml"; + String ruleFile = "/route/rule.xml"; + SchemaLoader schemaLoader = new XMLSchemaLoader(schemaFile, ruleFile); + schemaMap = schemaLoader.getSchemas(); + MycatServer.getInstance().getConfig().getSchemas().putAll(schemaMap); + } + + @Test + public void test() throws Exception { + String stmt = "select * from `offer` where id = 100"; + SchemaConfig schema = schemaMap.get("mysqldb"); + RouteResultset rrs = new RouteResultset(stmt, 7); + SQLStatementParser parser = null; + if (schema.isNeedSupportMultiDBType()) { + parser = new MycatStatementParser(stmt); + } else { + parser = new MySqlStatementParser(stmt); + } + SQLStatement statement; + MycatSchemaStatVisitor visitor = null; + + try { + statement = parser.parseStatement(); + visitor = new MycatSchemaStatVisitor(); + } catch (Exception t) { + throw new SQLSyntaxErrorException(t); + } + ctx = new DruidShardingParseInfo(); + ctx.setSql(stmt); + + List taskList = visitorParse(rrs, statement, visitor); + Assert.assertEquals(true, !taskList.get(0).getTablesAndConditions().isEmpty()); + } + + @SuppressWarnings("unchecked") + private List visitorParse(RouteResultset rrs, SQLStatement stmt, MycatSchemaStatVisitor visitor) throws Exception { + + stmt.accept(visitor); + + List> mergedConditionList = new ArrayList>(); + if (visitor.hasOrCondition()) {// 包含or语句 + // TODO + // 根据or拆分 + mergedConditionList = visitor.splitConditions(); + } else {// 不包含OR语句 + mergedConditionList.add(visitor.getConditions()); + } + + if (visitor.getAliasMap() != null) { + for (Map.Entry entry : visitor.getAliasMap().entrySet()) { + String key = entry.getKey(); + String value = entry.getValue(); + if (key != null && key.indexOf("`") >= 0) { + key = key.replaceAll("`", ""); + } + if (value != null && value.indexOf("`") >= 0) { + value = value.replaceAll("`", ""); + } + // 表名前面带database的,去掉 + if (key != null) { + int pos = key.indexOf("."); + if (pos > 0) { + key = key.substring(pos + 1); + } + } + + if (key.equals(value)) { + ctx.addTable(key.toUpperCase()); + } + // else { + // tableAliasMap.put(key, value); + // } + tableAliasMap.put(key.toUpperCase(), value); + } + visitor.getAliasMap().putAll(tableAliasMap); + ctx.setTableAliasMap(tableAliasMap); + } + + //利用反射机制单元测试DefaultDruidParser类的私有方法buildRouteCalculateUnits + Class clazz = Class.forName("io.mycat.route.parser.druid.impl.DefaultDruidParser"); + Method buildRouteCalculateUnits = clazz.getDeclaredMethod("buildRouteCalculateUnits", + new Class[] { SchemaStatVisitor.class, List.class }); + //System.out.println("buildRouteCalculateUnits:\t" + buildRouteCalculateUnits); + Object newInstance = clazz.newInstance(); + buildRouteCalculateUnits.setAccessible(true); + Object returnValue = buildRouteCalculateUnits.invoke(newInstance, + new Object[] { visitor, mergedConditionList }); + List retList = new ArrayList(); + if (returnValue instanceof ArrayList) { + retList.add(((ArrayList)returnValue).get(0)); + //retList = (ArrayList)returnValue; + //System.out.println(taskList.get(0).getTablesAndConditions().values()); + } + return retList; + } + +} diff --git a/src/test/java/io/mycat/route/DeleteSqlParseTest.java b/src/test/java/io/mycat/route/DeleteSqlParseTest.java new file mode 100644 index 000000000..691a17087 --- /dev/null +++ b/src/test/java/io/mycat/route/DeleteSqlParseTest.java @@ -0,0 +1,51 @@ +package io.mycat.route; + +import java.sql.SQLNonTransientException; +import java.util.Map; + +import junit.framework.Assert; + +import org.junit.Test; + +import io.mycat.SimpleCachePool; +import io.mycat.cache.LayerCachePool; +import io.mycat.config.loader.SchemaLoader; +import io.mycat.config.loader.xml.XMLSchemaLoader; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.route.factory.RouteStrategyFactory; + +/** + * 测试删除 + * + * @author huangyiming + * + */ +public class DeleteSqlParseTest { + protected Map schemaMap; + protected LayerCachePool cachePool = new SimpleCachePool(); + protected RouteStrategy routeStrategy; + + public DeleteSqlParseTest() { + String schemaFile = "/route/schema.xml"; + String ruleFile = "/route/rule.xml"; + SchemaLoader schemaLoader = new XMLSchemaLoader(schemaFile, ruleFile); + schemaMap = schemaLoader.getSchemas(); + RouteStrategyFactory.init(); + routeStrategy = RouteStrategyFactory.getRouteStrategy("druidparser"); + } + + @Test + public void testDeleteToRoute() throws SQLNonTransientException { + String sql = "delete t from offer as t "; + SchemaConfig schema = schemaMap.get("config"); + RouteResultset rrs = routeStrategy.route(new SystemConfig(), schema, -1, sql, null, + null, cachePool); + Assert.assertEquals(128, rrs.getNodes().length); + + } + + + + +} diff --git a/src/test/java/io/mycat/route/DruidDb2SqlParserTest.java b/src/test/java/io/mycat/route/DruidDb2SqlParserTest.java index d3c8bbc41..e9431783e 100644 --- a/src/test/java/io/mycat/route/DruidDb2SqlParserTest.java +++ b/src/test/java/io/mycat/route/DruidDb2SqlParserTest.java @@ -1,36 +1,42 @@ package io.mycat.route; -import io.mycat.SimpleCachePool; -import io.mycat.cache.LayerCachePool; -import io.mycat.route.factory.RouteStrategyFactory; -import io.mycat.server.config.loader.ConfigInitializer; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SystemConfig; - import java.sql.SQLNonTransientException; import java.util.Map; -import junit.framework.Assert; - import org.junit.Test; +import io.mycat.MycatServer; +import io.mycat.SimpleCachePool; +import io.mycat.cache.LayerCachePool; +import io.mycat.config.loader.SchemaLoader; +import io.mycat.config.loader.xml.XMLSchemaLoader; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.route.factory.RouteStrategyFactory; +import junit.framework.Assert; + public class DruidDb2SqlParserTest { protected Map schemaMap; protected LayerCachePool cachePool = new SimpleCachePool(); - protected RouteStrategy routeStrategy = RouteStrategyFactory.getRouteStrategy("druidparser"); + protected RouteStrategy routeStrategy; public DruidDb2SqlParserTest() { - ConfigInitializer confInit = new ConfigInitializer(true); - schemaMap = confInit.getSchemas(); + String schemaFile = "/route/schema.xml"; + String ruleFile = "/route/rule.xml"; + SchemaLoader schemaLoader = new XMLSchemaLoader(schemaFile, ruleFile); + schemaMap = schemaLoader.getSchemas(); + MycatServer.getInstance().getConfig().getSchemas().putAll(schemaMap); + RouteStrategyFactory.init(); + routeStrategy = RouteStrategyFactory.getRouteStrategy("druidparser"); } @Test public void testLimitToDb2Page() throws SQLNonTransientException { String sql = "select * from offer order by id desc limit 5,10"; SchemaConfig schema = schemaMap.get("db2db"); - RouteResultset rrs = routeStrategy.route(new SystemConfig(), schema, -1, sql, null, - null, cachePool); + RouteResultset rrs = routeStrategy.route(new SystemConfig(), schema, -1, sql, null, + null, cachePool); Assert.assertEquals(2, rrs.getNodes().length); Assert.assertEquals(5, rrs.getLimitStart()); Assert.assertEquals(10, rrs.getLimitSize()); @@ -46,7 +52,7 @@ public void testLimitToDb2Page() throws SQLNonTransientException { Assert.assertEquals(15, rrs.getNodes()[0].getLimitSize()); Assert.assertEquals(0, rrs.getLimitStart()); Assert.assertEquals(15, rrs.getLimitSize()); - + sql="select * from offer1 order by id desc limit 5,10" ; rrs = routeStrategy.route(new SystemConfig(), schema, -1, sql, null, null, cachePool); diff --git a/src/test/java/io/mycat/route/DruidMysqlCreateTableTest.java b/src/test/java/io/mycat/route/DruidMysqlCreateTableTest.java new file mode 100644 index 000000000..03598d0b5 --- /dev/null +++ b/src/test/java/io/mycat/route/DruidMysqlCreateTableTest.java @@ -0,0 +1,121 @@ +package io.mycat.route; + +import com.alibaba.druid.sql.ast.SQLExpr; +import com.alibaba.druid.sql.ast.SQLName; +import com.alibaba.druid.sql.ast.SQLStatement; +import com.alibaba.druid.sql.ast.expr.SQLIdentifierExpr; +import com.alibaba.druid.sql.ast.statement.SQLColumnDefinition; +import com.alibaba.druid.sql.ast.statement.SQLCreateTableStatement; +import com.alibaba.druid.sql.ast.statement.SQLTableElement; +import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlInsertStatement; +import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser; + +import io.mycat.MycatServer; +import io.mycat.SimpleCachePool; +import io.mycat.cache.LayerCachePool; +import io.mycat.config.loader.SchemaLoader; +import io.mycat.config.loader.xml.XMLSchemaLoader; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.route.factory.RouteStrategyFactory; +import io.mycat.server.interceptor.impl.GlobalTableUtil; +import io.mycat.util.StringUtil; +import junit.framework.Assert; +import org.junit.Test; + +import java.sql.SQLNonTransientException; +import java.util.List; +import java.util.Map; + +public class DruidMysqlCreateTableTest +{ + protected Map schemaMap; + protected LayerCachePool cachePool = new SimpleCachePool(); + protected RouteStrategy routeStrategy; + private static final String originSql1 = "CREATE TABLE autoslot" + + "(" + + " ID BIGINT AUTO_INCREMENT," + + " CHANNEL_ID INT(11)," + + " CHANNEL_INFO varchar(128)," + + " CONSTRAINT RETL_MARK_ID PRIMARY KEY (ID)" + + ") ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;"; + + + public DruidMysqlCreateTableTest() { + String schemaFile = "/route/schema.xml"; + String ruleFile = "/route/rule.xml"; + SchemaLoader schemaLoader = new XMLSchemaLoader(schemaFile, ruleFile); + schemaMap = schemaLoader.getSchemas(); + MycatServer.getInstance().getConfig().getSchemas().putAll(schemaMap); + RouteStrategyFactory.init(); + routeStrategy = RouteStrategyFactory.getRouteStrategy("druidparser"); + } + + @Test + public void testCreate() throws SQLNonTransientException { + + SchemaConfig schema = schemaMap.get("mysqldb"); + RouteResultset rrs = routeStrategy.route(new SystemConfig(), schema, -1, originSql1, null, + null, cachePool); + Assert.assertEquals(2, rrs.getNodes().length); + String sql= rrs.getNodes()[0].getStatement(); + + Assert.assertTrue(parseSql(sql)); + + + + + + } + + // @Test + public void testInsert() throws SQLNonTransientException { + + SchemaConfig schema = schemaMap.get("mysqldb"); + RouteResultset rrs = routeStrategy.route(new SystemConfig(), schema, -1, "insert into autoslot (id,sid) values(1,2) ", null, + null, cachePool); + Assert.assertEquals(1, rrs.getNodes().length); + + Assert.assertTrue(isInsertHasSlot(rrs.getStatement())); + + + + + + } + + private boolean isInsertHasSlot(String sql) + { + MySqlStatementParser parser = new MySqlStatementParser(sql); + MySqlInsertStatement insertStatement= (MySqlInsertStatement)parser.parseStatement(); + List cc= insertStatement.getColumns(); + for (SQLExpr sqlExpr : cc) { + SQLIdentifierExpr c= (SQLIdentifierExpr) sqlExpr; + if("_slot".equalsIgnoreCase(c.getName()) &&cc.size()==insertStatement.getValues().getValues().size()) return true; + } + return false; + } + + public boolean parseSql(String sql) { + MySqlStatementParser parser = new MySqlStatementParser(sql); + SQLStatement statement = parser.parseStatement(); + return hasColumn(statement); + } + + private static boolean hasColumn(SQLStatement statement){ + for (SQLTableElement tableElement : ((SQLCreateTableStatement)statement).getTableElementList()) { + SQLName sqlName = null; + if (tableElement instanceof SQLColumnDefinition) { + sqlName = ((SQLColumnDefinition)tableElement).getName(); + } + if (sqlName != null) { + String simpleName = sqlName.getSimpleName(); + simpleName = StringUtil.removeBackquote(simpleName); + if (tableElement instanceof SQLColumnDefinition && "_slot".equalsIgnoreCase(simpleName)) { + return true; + } + } + } + return false; + } +} diff --git a/src/test/java/io/mycat/route/DruidMysqlHavingTest.java b/src/test/java/io/mycat/route/DruidMysqlHavingTest.java new file mode 100644 index 000000000..62b1f1218 --- /dev/null +++ b/src/test/java/io/mycat/route/DruidMysqlHavingTest.java @@ -0,0 +1,58 @@ +package io.mycat.route; + +import java.sql.SQLNonTransientException; +import java.util.Map; + +import org.junit.Test; + +import io.mycat.MycatServer; +import io.mycat.SimpleCachePool; +import io.mycat.cache.LayerCachePool; +import io.mycat.config.loader.SchemaLoader; +import io.mycat.config.loader.xml.XMLSchemaLoader; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.route.factory.RouteStrategyFactory; +import junit.framework.Assert; + +public class DruidMysqlHavingTest +{ + protected Map schemaMap; + protected LayerCachePool cachePool = new SimpleCachePool(); + protected RouteStrategy routeStrategy; + + public DruidMysqlHavingTest() { + String schemaFile = "/route/schema.xml"; + String ruleFile = "/route/rule.xml"; + SchemaLoader schemaLoader = new XMLSchemaLoader(schemaFile, ruleFile); + schemaMap = schemaLoader.getSchemas(); + MycatServer.getInstance().getConfig().getSchemas().putAll(schemaMap); + RouteStrategyFactory.init(); + routeStrategy = RouteStrategyFactory.getRouteStrategy("druidparser"); + } + + @Test + public void testHaving() throws SQLNonTransientException { + String sql = "select avg(offer_id) avgofferid, member_id from offer_detail group by member_id having avgofferid > 100"; + SchemaConfig schema = schemaMap.get("cndb"); + RouteResultset rrs = routeStrategy.route(new SystemConfig(), schema, -1, sql, null, + null, cachePool); + Assert.assertEquals(3, rrs.getSqlMerge().getHavingColsName().length); + + sql = "select avg(offer_id) avgofferid, member_id from offer_detail group by member_id having avg(offer_id) > 100"; + rrs = routeStrategy.route(new SystemConfig(), schema, -1, sql, null, + null, cachePool); + Assert.assertEquals(3, rrs.getSqlMerge().getHavingColsName().length); + + sql = "select count(offer_id) countofferid, member_id from offer_detail group by member_id having countofferid > 100"; + rrs = routeStrategy.route(new SystemConfig(), schema, -1, sql, null, + null, cachePool); + Assert.assertEquals(3, rrs.getSqlMerge().getHavingColsName().length); + + sql = "select count(offer_id) countofferid, member_id from offer_detail group by member_id having count(offer_id) > 100"; + rrs = routeStrategy.route(new SystemConfig(), schema, -1, sql, null, + null, cachePool); + Assert.assertEquals(3, rrs.getSqlMerge().getHavingColsName().length); + + } +} diff --git a/src/test/java/io/mycat/route/DruidMysqlRouteStrategyTest.java b/src/test/java/io/mycat/route/DruidMysqlRouteStrategyTest.java index 1aa6776d1..0851cb213 100644 --- a/src/test/java/io/mycat/route/DruidMysqlRouteStrategyTest.java +++ b/src/test/java/io/mycat/route/DruidMysqlRouteStrategyTest.java @@ -1,13 +1,5 @@ package io.mycat.route; -import io.mycat.SimpleCachePool; -import io.mycat.cache.LayerCachePool; -import io.mycat.route.factory.RouteStrategyFactory; -import io.mycat.server.config.loader.ConfigInitializer; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SystemConfig; -import io.mycat.server.parser.ServerParse; - import java.sql.SQLNonTransientException; import java.util.Collection; import java.util.HashMap; @@ -16,27 +8,46 @@ import java.util.NoSuchElementException; import java.util.Set; -import junit.framework.Assert; -import junit.framework.TestCase; - +import org.junit.Before; import org.junit.Test; import com.alibaba.druid.sql.ast.SQLStatement; import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser; +import io.mycat.MycatServer; +import io.mycat.SimpleCachePool; +import io.mycat.cache.LayerCachePool; +import io.mycat.config.loader.SchemaLoader; +import io.mycat.config.loader.xml.XMLSchemaLoader; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.route.RouteResultset; +import io.mycat.route.RouteResultsetNode; +import io.mycat.route.RouteStrategy; +import io.mycat.route.factory.RouteStrategyFactory; +import io.mycat.server.parser.ServerParse; +import junit.framework.Assert; +import junit.framework.TestCase; + public class DruidMysqlRouteStrategyTest extends TestCase { protected Map schemaMap; protected LayerCachePool cachePool = new SimpleCachePool(); - protected RouteStrategy routeStrategy = RouteStrategyFactory.getRouteStrategy("druidparser"); + protected RouteStrategy routeStrategy ; public DruidMysqlRouteStrategyTest() { - ConfigInitializer confInit = new ConfigInitializer(true); - schemaMap = confInit.getSchemas(); + String schemaFile = "/route/schema.xml"; + String ruleFile = "/route/rule.xml"; + SchemaLoader schemaLoader = new XMLSchemaLoader(schemaFile, ruleFile); + schemaMap = schemaLoader.getSchemas(); + MycatServer.getInstance().getConfig().getSchemas().putAll(schemaMap); + RouteStrategyFactory.init(); + routeStrategy = RouteStrategyFactory.getRouteStrategy("druidparser"); } protected void setUp() throws Exception { // super.setUp(); // schemaMap = CobarServer.getInstance().getConfig().getSchemas(); + } // public void testAlias() throws Exception { @@ -46,6 +57,7 @@ protected void setUp() throws Exception { // null, cachePool); // } + public void testRouteInsertShort() throws Exception { String sql = "inSErt into offer_detail (`offer_id`, gmt) values (123,now())"; SchemaConfig schema = schemaMap.get("cndb"); @@ -90,6 +102,8 @@ public void testRouteInsertShort() throws Exception { rrs.getNodes()[0].getStatement()); + + sql = "\n" + " INSERT INTO \n" + "`offer` \n" + @@ -100,6 +114,7 @@ public void testRouteInsertShort() throws Exception { rrs = routeStrategy.route(new SystemConfig(), schema, -1, sql, null, null, cachePool); Assert.assertEquals(1, rrs.getNodes().length); + } public void testGlobalTableroute() throws Exception { @@ -125,7 +140,7 @@ public void testGlobalTableroute() throws Exception { rrs = routeStrategy.route(new SystemConfig(), schema, -1, sql, null, null, cachePool); Assert.assertEquals(3, rrs.getNodes().length); Assert.assertEquals(false, rrs.isCacheAble()); - + // delete of global table route to every datanode defined sql = "delete from company where id = 1"; schema = schemaMap.get("TESTDB"); @@ -148,23 +163,6 @@ public void testGlobalTableroute() throws Exception { } - - /** - * 测试 global table 的or语句 - * - * - * @throws Exception - */ - @Test - public void testGlobalTableOr() throws Exception { - SchemaConfig schema = schemaMap.get("TESTDB"); - String sql = "select id from company where 1 = 1 and name ='company1' or name = 'company2'" ; - for(int i = 0; i < 20; i++) { - RouteResultset rrs = routeStrategy.route(new SystemConfig(), schema, ServerParse.SELECT, sql, null, null, cachePool); - Assert.assertTrue(rrs.getNodes().length == 1); - } - } - public void testMoreGlobalTableroute() throws Exception { String sql = null; SchemaConfig schema = schemaMap.get("TESTDB"); @@ -174,7 +172,7 @@ public void testMoreGlobalTableroute() throws Exception { schema = schemaMap.get("TESTDB"); rrs = routeStrategy.route(new SystemConfig(), schema, -1, sql, null, null, cachePool); Assert.assertEquals(1, rrs.getNodes().length); - Assert.assertEquals(true, rrs.isCacheAble()); + Assert.assertEquals(false, rrs.isCacheAble()); // 全局表涉及到多个节点时,不缓存路由结果 } @@ -344,7 +342,6 @@ public void testroute() throws Exception { SchemaConfig schema = schemaMap.get("cndb"); String sql = "select * from independent where member='abc'"; - RouteResultset rrs = routeStrategy.route(new SystemConfig(), schema, 1, sql, null, null, cachePool); Assert.assertEquals(true, rrs.isCacheAble()); @@ -408,7 +405,7 @@ public void testERroute() throws Exception { } Assert.assertEquals( true, - err.startsWith("parent relation column can't be updated ORDERS->CUSTOMER_ID")); + err.startsWith("Parent relevant column can't be updated ORDERS->CUSTOMER_ID")); // route by parent rule ,update sql sql = "update orders set id=1 ,name='aaa' where customer_id=2000001"; @@ -537,7 +534,7 @@ public void testModifySQLLimit() throws Exception { String sql = null; RouteResultset rrs = null; - //SQL span multi datanode + //SQL span multi datanode sql = "select * from orders limit 2,3"; rrs = routeStrategy.route(new SystemConfig(), schema, ServerParse.SELECT, sql, null, null, cachePool); Assert.assertEquals(true, rrs.isCacheAble()); @@ -614,7 +611,7 @@ public void testGroupLimit() throws Exception { public void testTableMetaRead() throws Exception { final SchemaConfig schema = schemaMap.get("cndb"); - String sql = "desc offer"; + String sql = " desc offer"; RouteResultset rrs = routeStrategy.route(new SystemConfig(), schema, ServerParse.DESCRIBE, sql, null, null, cachePool); Assert.assertEquals(false, rrs.isCacheAble()); @@ -795,6 +792,17 @@ public void testNonPartitionSQL() throws Exception { SchemaConfig schema = schemaMap.get("cndb"); String sql = null; RouteResultset rrs = null; + + schema = schemaMap.get("dubbo2"); + sql = "SHOW TABLES from db_name like 'solo'"; + rrs = routeStrategy.route(new SystemConfig(), schema, 9, sql, null, null, cachePool); + Assert.assertEquals(false, rrs.isCacheAble()); + Assert.assertEquals(-1L, rrs.getLimitSize()); + Assert.assertEquals(1, rrs.getNodes().length); + Assert.assertEquals("dn1", rrs.getNodes()[0].getName()); + Assert.assertEquals("SHOW TABLES like 'solo'", + rrs.getNodes()[0].getStatement()); + schema = schemaMap.get("dubbo"); sql = "SHOW TABLES from db_name like 'solo'"; rrs = routeStrategy.route(new SystemConfig(), schema, 9, sql, null, null, cachePool); @@ -805,6 +813,8 @@ public void testNonPartitionSQL() throws Exception { Assert.assertEquals("SHOW TABLES like 'solo'", rrs.getNodes()[0].getStatement()); + + sql = "desc cndb.offer"; rrs = routeStrategy.route(new SystemConfig(), schema, 1, sql, null, null, cachePool); Assert.assertEquals(false, rrs.isCacheAble()); @@ -927,7 +937,7 @@ public void testInsertOnDuplicateKey() throws Exception { Assert.assertEquals(1, rrs.getNodes().length); Assert.assertEquals("dn1", rrs.getNodes()[0].getName()); - //insert ... on duplicate key ,partion key can't be updated + //insert ... on duplicate key ,sharding key can't be updated sql = "insert into employee (id,name,sharding_id) values(1,'testonly',10000) " + "on duplicate key update name=VALUES(name),id = VALUES(id),sharding_id = VALUES(sharding_id)"; @@ -935,7 +945,7 @@ public void testInsertOnDuplicateKey() throws Exception { rrs = routeStrategy.route(new SystemConfig(), schema, ServerParse.SELECT, sql, null, null, cachePool); } catch (Exception e) { - Assert.assertEquals("partion key can't be updated: EMPLOYEE -> SHARDING_ID", e.getMessage()); + Assert.assertEquals("Sharding column can't be updated: EMPLOYEE -> SHARDING_ID", e.getMessage()); } @@ -961,7 +971,7 @@ public void testAggregateExpr() throws Exception { rrs = routeStrategy.route(new SystemConfig(), schema, ServerParse.SELECT, sql, null, null, cachePool); Assert.assertTrue(rrs.getMergeCols().containsKey("c")); } - + /** * 测试between语句的路由 * @@ -974,7 +984,7 @@ public void testBetweenExpr() throws Exception { // 400M1-600M=2 // 600M1-800M=3 // 800M1-1000M=4 - + SchemaConfig schema = schemaMap.get("TESTDB"); String sql = "select * from customer where id between 1 and 5;"; RouteResultset rrs = routeStrategy.route(new SystemConfig(), schema, ServerParse.SELECT, sql, null, null, cachePool); @@ -984,24 +994,24 @@ public void testBetweenExpr() throws Exception { sql = "select * from customer where id between 1 and 2000001;"; rrs = routeStrategy.route(new SystemConfig(), schema, ServerParse.SELECT, sql, null, null, cachePool); Assert.assertTrue(rrs.getNodes().length == 2); - + sql = "select * from customer where id between 2000001 and 3000001;"; rrs = routeStrategy.route(new SystemConfig(), schema, ServerParse.SELECT, sql, null, null, cachePool); Assert.assertTrue(rrs.getNodes().length == 1); Assert.assertTrue(rrs.getNodes()[0].getName().equals("dn2")); - + sql = "delete from customer where id between 2000001 and 3000001;"; rrs = routeStrategy.route(new SystemConfig(), schema, ServerParse.SELECT, sql, null, null, cachePool); Assert.assertTrue(rrs.getNodes().length == 1); Assert.assertTrue(rrs.getNodes()[0].getName().equals("dn2")); - + sql = "update customer set name='newName' where id between 2000001 and 3000001;"; rrs = routeStrategy.route(new SystemConfig(), schema, ServerParse.SELECT, sql, null, null, cachePool); Assert.assertTrue(rrs.getNodes().length == 1); Assert.assertTrue(rrs.getNodes()[0].getName().equals("dn2")); - + } - + /** * 测试or语句的路由 * @@ -1014,7 +1024,7 @@ public void testOr() throws Exception { // 400M1-600M=2 // 600M1-800M=3 // 800M1-1000M=4 - + SchemaConfig schema = schemaMap.get("TESTDB"); String sql = "select * from customer where sharding_id=10000 or 1=1;"; RouteResultset rrs = routeStrategy.route(new SystemConfig(), schema, ServerParse.SELECT, sql, null, null, cachePool); @@ -1026,13 +1036,13 @@ public void testOr() throws Exception { rrs = routeStrategy.route(new SystemConfig(), schema, ServerParse.SELECT, sql, null, null, cachePool); Assert.assertTrue(rrs.getNodes()[0].getName().equals("dn1")); Assert.assertTrue(rrs.getNodes()[1].getName().equals("dn2")); - + sql = "select * from customer where sharding_id = 10000 or user_id = 'wangwu'"; rrs = routeStrategy.route(new SystemConfig(), schema, ServerParse.SELECT, sql, null, null, cachePool); Assert.assertTrue(rrs.getNodes()[0].getName().equals("dn1")); Assert.assertTrue(rrs.getNodes()[1].getName().equals("dn2")); } - + /** * 测试父子表,查询子表的语句路由到多个节点 * @throws Exception @@ -1046,7 +1056,7 @@ public void testERRouteMutiNode() throws Exception { Assert.assertTrue(rrs.getNodes()[0].getName().equals("dn1")); Assert.assertTrue(rrs.getNodes()[1].getName().equals("dn2")); } - + /** * 测试多层or语句 * @@ -1061,22 +1071,68 @@ public void testMultiLevelOr() throws Exception { RouteResultset rrs = routeStrategy.route(new SystemConfig(), schema, ServerParse.SELECT, sql, null, null, cachePool); Assert.assertTrue(rrs.getNodes().length == 1); - + sql = "select id from travelrecord " + " where id = 1 and ( fee=3 or days=5 or (traveldate = '2015-05-04 00:00:07.375' " + " and (user_id=2 or fee=days or fee = 0))) and name = 'zhangsan' or id = 2000001" ; rrs = routeStrategy.route(new SystemConfig(), schema, ServerParse.SELECT, sql, null, null, cachePool); - + Assert.assertTrue(rrs.getNodes().length == 2); - + sql = "select id from travelrecord " + " where id = 1 and ( fee=3 or days=5 or (traveldate = '2015-05-04 00:00:07.375' " + " and (user_id=2 or fee=days or fee = 0))) and name = 'zhangsan' or id = 2000001 or id = 4000001" ; rrs = routeStrategy.route(new SystemConfig(), schema, ServerParse.SELECT, sql, null, null, cachePool); - + Assert.assertTrue(rrs.getNodes().length == 3); } + + /** + * 测试 global table 的or语句 + * + * + * @throws Exception + */ + @Test + public void testGlobalTableOr() throws Exception { + SchemaConfig schema = schemaMap.get("TESTDB"); + String sql = "select id from company where 1 = 1 and name ='company1' or name = 'company2'" ; + for(int i = 0; i < 20; i++) { + RouteResultset rrs = routeStrategy.route(new SystemConfig(), schema, ServerParse.SELECT, sql, null, null, cachePool); + Assert.assertTrue(rrs.getNodes().length == 1); + } + } + + /** + * 测试别名路由 + * + * @throws Exception + */ + public void testAlias() throws Exception { + + SchemaConfig schema = schemaMap.get("TESTDB"); + RouteResultset rrs = null; + //不支持childtable 批量插入 + //update 全局表 + String sql = "update company a set name = '' where a.id = 1;"; + rrs = routeStrategy.route(new SystemConfig(), schema, 1, sql, null, null, + cachePool); + Assert.assertEquals(3, rrs.getNodes().length); + + //update带别名时的路由 + sql = "update travelrecord a set name = '' where a.id = 1;"; + rrs = routeStrategy.route(new SystemConfig(), schema, 1, sql, null, null, + cachePool); + Assert.assertEquals(1, rrs.getNodes().length); + + //别名大小写路由 + sql = "select * from travelrecord A where a.id = 1;"; + rrs = routeStrategy.route(new SystemConfig(), schema, 1, sql, null, null, + cachePool); + Assert.assertEquals(1, rrs.getNodes().length); + } + private String formatSql(String sql) { MySqlStatementParser parser = new MySqlStatementParser(sql); SQLStatement stmt = parser.parseStatement(); diff --git a/src/test/java/io/mycat/route/DruidMysqlSqlParserTest.java b/src/test/java/io/mycat/route/DruidMysqlSqlParserTest.java index 128cf89c5..b41eeef86 100644 --- a/src/test/java/io/mycat/route/DruidMysqlSqlParserTest.java +++ b/src/test/java/io/mycat/route/DruidMysqlSqlParserTest.java @@ -1,28 +1,35 @@ package io.mycat.route; -import io.mycat.SimpleCachePool; -import io.mycat.cache.LayerCachePool; -import io.mycat.route.factory.RouteStrategyFactory; -import io.mycat.server.config.loader.ConfigInitializer; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SystemConfig; - import java.sql.SQLNonTransientException; import java.util.Map; -import junit.framework.Assert; - import org.junit.Test; +import io.mycat.MycatServer; +import io.mycat.SimpleCachePool; +import io.mycat.cache.LayerCachePool; +import io.mycat.config.loader.SchemaLoader; +import io.mycat.config.loader.xml.XMLSchemaLoader; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.route.factory.RouteStrategyFactory; +import io.mycat.server.parser.ServerParse; +import junit.framework.Assert; + public class DruidMysqlSqlParserTest { protected Map schemaMap; protected LayerCachePool cachePool = new SimpleCachePool(); - protected RouteStrategy routeStrategy = RouteStrategyFactory.getRouteStrategy("druidparser"); + protected RouteStrategy routeStrategy; public DruidMysqlSqlParserTest() { - ConfigInitializer confInit = new ConfigInitializer(true); - schemaMap = confInit.getSchemas(); + String schemaFile = "/route/schema.xml"; + String ruleFile = "/route/rule.xml"; + SchemaLoader schemaLoader = new XMLSchemaLoader(schemaFile, ruleFile); + schemaMap = schemaLoader.getSchemas(); + MycatServer.getInstance().getConfig().getSchemas().putAll(schemaMap); + RouteStrategyFactory.init(); + routeStrategy = RouteStrategyFactory.getRouteStrategy("druidparser"); } @Test @@ -46,7 +53,7 @@ public void testLimitPage() throws SQLNonTransientException { Assert.assertEquals(15, rrs.getNodes()[0].getLimitSize()); Assert.assertEquals(0, rrs.getLimitStart()); Assert.assertEquals(15, rrs.getLimitSize()); - + sql="select * from offer1 order by id desc limit 5,10" ; rrs = routeStrategy.route(new SystemConfig(), schema, -1, sql, null, null, cachePool); @@ -67,10 +74,15 @@ public void testLimitPage() throws SQLNonTransientException { Assert.assertEquals(0, rrs.getNodes()[0].getLimitStart()); Assert.assertEquals(10, rrs.getNodes()[0].getLimitSize()); Assert.assertEquals("dn1", rrs.getNodes()[0].getName()); - } - + @Test + public void testLockTableSql() throws SQLNonTransientException{ + String sql = "lock tables goods write"; + SchemaConfig schema = schemaMap.get("TESTDB"); + RouteResultset rrs = routeStrategy.route(new SystemConfig(), schema, ServerParse.LOCK, sql, null, null, cachePool); + Assert.assertEquals(3, rrs.getNodes().length); + } } diff --git a/src/test/java/io/mycat/route/DruidOracleSqlParserTest.java b/src/test/java/io/mycat/route/DruidOracleSqlParserTest.java index 597e71662..f5fd17f9e 100644 --- a/src/test/java/io/mycat/route/DruidOracleSqlParserTest.java +++ b/src/test/java/io/mycat/route/DruidOracleSqlParserTest.java @@ -1,29 +1,35 @@ package io.mycat.route; -import io.mycat.SimpleCachePool; -import io.mycat.cache.LayerCachePool; -import io.mycat.route.factory.RouteStrategyFactory; -import io.mycat.server.config.loader.ConfigInitializer; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SystemConfig; - import java.sql.SQLNonTransientException; import java.util.Map; -import junit.framework.Assert; - import org.junit.Test; +import io.mycat.MycatServer; +import io.mycat.SimpleCachePool; +import io.mycat.cache.LayerCachePool; +import io.mycat.config.loader.SchemaLoader; +import io.mycat.config.loader.xml.XMLSchemaLoader; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.route.factory.RouteStrategyFactory; +import junit.framework.Assert; + public class DruidOracleSqlParserTest { protected Map schemaMap; protected LayerCachePool cachePool = new SimpleCachePool(); - protected RouteStrategy routeStrategy = RouteStrategyFactory.getRouteStrategy("druidparser"); + protected RouteStrategy routeStrategy; public DruidOracleSqlParserTest() { - ConfigInitializer confInit = new ConfigInitializer(true); - schemaMap = confInit.getSchemas(); + String schemaFile = "/route/schema.xml"; + String ruleFile = "/route/rule.xml"; + SchemaLoader schemaLoader = new XMLSchemaLoader(schemaFile, ruleFile); + schemaMap = schemaLoader.getSchemas(); + MycatServer.getInstance().getConfig().getSchemas().putAll(schemaMap); + RouteStrategyFactory.init(); + routeStrategy = RouteStrategyFactory.getRouteStrategy("druidparser"); } @Test @@ -75,7 +81,7 @@ public void testLimitToOraclePage() throws SQLNonTransientException { Assert.assertEquals(15, rrs.getNodes()[0].getLimitSize()); Assert.assertEquals(0, rrs.getLimitStart()); Assert.assertEquals(15, rrs.getLimitSize()); - + sql="select * from offer1 order by id desc limit 5,10" ; rrs = routeStrategy.route(new SystemConfig(), schema, -1, sql, null, null, cachePool); diff --git a/src/test/java/io/mycat/route/DruidPostgresqlSqlParserTest.java b/src/test/java/io/mycat/route/DruidPostgresqlSqlParserTest.java index 248b469c9..8ae49dcf9 100644 --- a/src/test/java/io/mycat/route/DruidPostgresqlSqlParserTest.java +++ b/src/test/java/io/mycat/route/DruidPostgresqlSqlParserTest.java @@ -1,28 +1,34 @@ package io.mycat.route; -import io.mycat.SimpleCachePool; -import io.mycat.cache.LayerCachePool; -import io.mycat.route.factory.RouteStrategyFactory; -import io.mycat.server.config.loader.ConfigInitializer; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SystemConfig; - import java.sql.SQLNonTransientException; import java.util.Map; -import junit.framework.Assert; - import org.junit.Test; +import io.mycat.MycatServer; +import io.mycat.SimpleCachePool; +import io.mycat.cache.LayerCachePool; +import io.mycat.config.loader.SchemaLoader; +import io.mycat.config.loader.xml.XMLSchemaLoader; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.route.factory.RouteStrategyFactory; +import junit.framework.Assert; + public class DruidPostgresqlSqlParserTest { protected Map schemaMap; protected LayerCachePool cachePool = new SimpleCachePool(); - protected RouteStrategy routeStrategy = RouteStrategyFactory.getRouteStrategy("druidparser"); + protected RouteStrategy routeStrategy; public DruidPostgresqlSqlParserTest() { - ConfigInitializer confInit = new ConfigInitializer(true); - schemaMap = confInit.getSchemas(); + String schemaFile = "/route/schema.xml"; + String ruleFile = "/route/rule.xml"; + SchemaLoader schemaLoader = new XMLSchemaLoader(schemaFile, ruleFile); + schemaMap = schemaLoader.getSchemas(); + MycatServer.getInstance().getConfig().getSchemas().putAll(schemaMap); + RouteStrategyFactory.init(); + routeStrategy = RouteStrategyFactory.getRouteStrategy("druidparser"); } @Test @@ -44,7 +50,7 @@ public void testLimitToPgPage() throws SQLNonTransientException { Assert.assertEquals(15, rrs.getNodes()[0].getLimitSize()); Assert.assertEquals(0, rrs.getLimitStart()); Assert.assertEquals(15, rrs.getLimitSize()); - + sql="select * from offer1 order by id desc limit 5,10" ; rrs = routeStrategy.route(new SystemConfig(), schema, -1, sql, null, null, cachePool); diff --git a/src/test/java/io/mycat/route/DruidSqlServerSqlParserTest.java b/src/test/java/io/mycat/route/DruidSqlServerSqlParserTest.java index e68f77e87..eb23d8830 100644 --- a/src/test/java/io/mycat/route/DruidSqlServerSqlParserTest.java +++ b/src/test/java/io/mycat/route/DruidSqlServerSqlParserTest.java @@ -1,28 +1,34 @@ package io.mycat.route; -import io.mycat.SimpleCachePool; -import io.mycat.cache.LayerCachePool; -import io.mycat.route.factory.RouteStrategyFactory; -import io.mycat.server.config.loader.ConfigInitializer; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SystemConfig; - import java.sql.SQLNonTransientException; import java.util.Map; -import junit.framework.Assert; - import org.junit.Test; +import io.mycat.MycatServer; +import io.mycat.SimpleCachePool; +import io.mycat.cache.LayerCachePool; +import io.mycat.config.loader.SchemaLoader; +import io.mycat.config.loader.xml.XMLSchemaLoader; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.route.factory.RouteStrategyFactory; +import junit.framework.Assert; + public class DruidSqlServerSqlParserTest { protected Map schemaMap; protected LayerCachePool cachePool = new SimpleCachePool(); - protected RouteStrategy routeStrategy = RouteStrategyFactory.getRouteStrategy("druidparser"); + protected RouteStrategy routeStrategy; public DruidSqlServerSqlParserTest() { - ConfigInitializer confInit = new ConfigInitializer(true); - schemaMap = confInit.getSchemas(); + String schemaFile = "/route/schema.xml"; + String ruleFile = "/route/rule.xml"; + SchemaLoader schemaLoader = new XMLSchemaLoader(schemaFile, ruleFile); + schemaMap = schemaLoader.getSchemas(); + MycatServer.getInstance().getConfig().getSchemas().putAll(schemaMap); + RouteStrategyFactory.init(); + routeStrategy = RouteStrategyFactory.getRouteStrategy("druidparser"); } @Test @@ -47,7 +53,7 @@ public void testLimitToSqlServerPage() throws SQLNonTransientException { Assert.assertEquals(15, rrs.getNodes()[0].getLimitSize()); Assert.assertEquals(0, rrs.getLimitStart()); Assert.assertEquals(15, rrs.getLimitSize()); - + sql="select * from offer1 order by id desc limit 5,10" ; rrs = routeStrategy.route(new SystemConfig(), schema, -1, sql, null, null, cachePool); @@ -151,8 +157,7 @@ public void testSqlServerPageSQL() throws SQLNonTransientException { @Test - public void testTopPageSQL() throws SQLNonTransientException - { + public void testTopPageSQL() throws SQLNonTransientException { SchemaConfig schema = schemaMap.get("sqlserverdb"); RouteResultset rrs = null; diff --git a/src/test/java/io/mycat/route/HintDBTypeTest.java b/src/test/java/io/mycat/route/HintDBTypeTest.java new file mode 100644 index 000000000..0d1303522 --- /dev/null +++ b/src/test/java/io/mycat/route/HintDBTypeTest.java @@ -0,0 +1,64 @@ +package io.mycat.route; + +import io.mycat.MycatServer; +import io.mycat.SimpleCachePool; +import io.mycat.cache.CacheService; +import io.mycat.cache.LayerCachePool; +import io.mycat.config.loader.SchemaLoader; +import io.mycat.config.loader.xml.XMLSchemaLoader; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.route.factory.RouteStrategyFactory; +import io.mycat.server.parser.ServerParse; + +import java.util.Map; + +import junit.framework.Assert; + +import org.junit.Test; + +public class HintDBTypeTest { + protected Map schemaMap; + protected LayerCachePool cachePool = new SimpleCachePool(); + protected RouteStrategy routeStrategy; + + public HintDBTypeTest() { + String schemaFile = "/route/schema.xml"; + String ruleFile = "/route/rule.xml"; + SchemaLoader schemaLoader = new XMLSchemaLoader(schemaFile, ruleFile); + schemaMap = schemaLoader.getSchemas(); + MycatServer.getInstance().getConfig().getSchemas().putAll(schemaMap); + RouteStrategyFactory.init(); + routeStrategy = RouteStrategyFactory.getRouteStrategy("druidparser"); + } + /** + * 测试注解 + * + * @throws Exception + */ + @Test + public void testHint() throws Exception { + SchemaConfig schema = schemaMap.get("TESTDB"); + //使用注解(新注解,/*!mycat*/),runOnSlave=false 强制走主节点 + String sql = "/*!mycat:db_type=master*/select * from employee where sharding_id=1"; + CacheService cacheService = new CacheService(); + RouteService routerService = new RouteService(cacheService); + RouteResultset rrs = routerService.route(new SystemConfig(), schema, ServerParse.SELECT, sql, "UTF-8", null); + Assert.assertTrue(!rrs.getRunOnSlave()); + + //使用注解(新注解,/*#mycat*/),runOnSlave=false 强制走主节点 + sql = "/*#mycat:db_type=master*/select * from employee where sharding_id=1"; + rrs = routerService.route(new SystemConfig(), schema, ServerParse.SELECT, sql, "UTF-8", null); + Assert.assertTrue(!rrs.getRunOnSlave()); + + //使用注解(新注解,/*mycat*/),runOnSlave=false 强制走主节点 + sql = "/*mycat:db_type=master*/select * from employee where sharding_id=1"; + rrs = routerService.route(new SystemConfig(), schema, ServerParse.SELECT, sql, "UTF-8", null); + Assert.assertTrue(!rrs.getRunOnSlave()); + + //不使用注解,runOnSlave=null, 根据读写分离策略走主从库 + sql = "select * from employee where sharding_id=1"; + rrs = routerService.route(new SystemConfig(), schema, ServerParse.SELECT, sql, "UTF-8", null); + Assert.assertTrue(rrs.getRunOnSlave()==null); + } +} diff --git a/src/test/java/io/mycat/route/HintTest.java b/src/test/java/io/mycat/route/HintTest.java index 2bcc35c21..1c33794c8 100644 --- a/src/test/java/io/mycat/route/HintTest.java +++ b/src/test/java/io/mycat/route/HintTest.java @@ -1,28 +1,38 @@ package io.mycat.route; -import io.mycat.SimpleCachePool; -import io.mycat.cache.CacheService; -import io.mycat.cache.LayerCachePool; -import io.mycat.route.factory.RouteStrategyFactory; -import io.mycat.server.config.loader.ConfigInitializer; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SystemConfig; -import io.mycat.server.parser.ServerParse; - import java.util.Map; import junit.framework.Assert; import org.junit.Test; +import io.mycat.MycatServer; +import io.mycat.SimpleCachePool; +import io.mycat.cache.CacheService; +import io.mycat.cache.LayerCachePool; +import io.mycat.config.loader.SchemaLoader; +import io.mycat.config.loader.xml.XMLSchemaLoader; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.route.RouteResultset; +import io.mycat.route.RouteService; +import io.mycat.route.RouteStrategy; +import io.mycat.route.factory.RouteStrategyFactory; +import io.mycat.server.parser.ServerParse; + public class HintTest { protected Map schemaMap; protected LayerCachePool cachePool = new SimpleCachePool(); - protected RouteStrategy routeStrategy = RouteStrategyFactory.getRouteStrategy("fdbparser"); + protected RouteStrategy routeStrategy; public HintTest() { - ConfigInitializer confInit = new ConfigInitializer(true); - schemaMap = confInit.getSchemas(); + String schemaFile = "/route/schema.xml"; + String ruleFile = "/route/rule.xml"; + SchemaLoader schemaLoader = new XMLSchemaLoader(schemaFile, ruleFile); + schemaMap = schemaLoader.getSchemas(); + MycatServer.getInstance().getConfig().getSchemas().putAll(schemaMap); + RouteStrategyFactory.init(); + routeStrategy = RouteStrategyFactory.getRouteStrategy("fdbparser"); } /** * 测试注解 @@ -32,7 +42,7 @@ public HintTest() { @Test public void testHint() throws Exception { SchemaConfig schema = schemaMap.get("TESTDB"); - //使用注解(新注解,/*后面没有空格),路由到1个节点 + //使用注解(新注解,/*后面没有空格),路由到1个节点 String sql = "/*!mycat: sql = select * from employee where sharding_id = 10010 */select * from employee"; CacheService cacheService = new CacheService(); RouteService routerService = new RouteService(cacheService); @@ -43,7 +53,7 @@ public void testHint() throws Exception { sql = "/*#mycat: sql = select * from employee where sharding_id = 10000 */select * from employee"; rrs = routerService.route(new SystemConfig(), schema, ServerParse.SELECT, sql, "UTF-8", null); Assert.assertTrue(rrs.getNodes().length == 1); - + //不用注解,路由到2个节点 sql = "select * from employee"; rrs = routerService.route(new SystemConfig(), schema, ServerParse.SELECT, sql, "UTF-8", null); diff --git a/src/test/java/io/mycat/route/TestSelectBetweenSqlParser.java b/src/test/java/io/mycat/route/TestSelectBetweenSqlParser.java index 8d4fa525b..869365520 100644 --- a/src/test/java/io/mycat/route/TestSelectBetweenSqlParser.java +++ b/src/test/java/io/mycat/route/TestSelectBetweenSqlParser.java @@ -1,12 +1,6 @@ package io.mycat.route; -import io.mycat.SimpleCachePool; -import io.mycat.cache.LayerCachePool; -import io.mycat.route.factory.RouteStrategyFactory; -import io.mycat.server.config.loader.ConfigInitializer; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SystemConfig; - +import java.io.IOException; import java.sql.SQLNonTransientException; import java.util.Map; @@ -14,9 +8,19 @@ import org.junit.Test; +import io.mycat.MycatServer; +import io.mycat.SimpleCachePool; +import io.mycat.cache.LayerCachePool; +import io.mycat.config.loader.SchemaLoader; +import io.mycat.config.loader.xml.XMLSchemaLoader; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; +import io.mycat.route.factory.RouteStrategyFactory; +import io.mycat.server.ServerConnection; + /** * 修改内容 - * + * * @author lxy * */ @@ -25,37 +29,42 @@ public class TestSelectBetweenSqlParser { protected LayerCachePool cachePool = new SimpleCachePool(); public TestSelectBetweenSqlParser() { - ConfigInitializer confInit = new ConfigInitializer(true); - schemaMap = confInit.getSchemas(); + String schemaFile = "/route/schema.xml"; + String ruleFile = "/route/rule.xml"; + SchemaLoader schemaLoader = new XMLSchemaLoader(schemaFile, ruleFile); + schemaMap = schemaLoader.getSchemas(); + MycatServer.getInstance().getConfig().getSchemas().putAll(schemaMap); + RouteStrategyFactory.init(); } @Test - public void testBetweenSqlRoute() throws SQLNonTransientException { + public void testBetweenSqlRoute() throws SQLNonTransientException, IOException { String sql = "select * from offer_detail where offer_id between 1 and 33"; SchemaConfig schema = schemaMap.get("cndb"); RouteResultset rrs = RouteStrategyFactory.getRouteStrategy().route(new SystemConfig(),schema, -1, sql, null, null, cachePool); Assert.assertEquals(5, rrs.getNodes().length); - + sql = "select * from offer_detail where col_1 = 33 and offer_id between 1 and 33 and col_2 = 18"; schema = schemaMap.get("cndb"); rrs = RouteStrategyFactory.getRouteStrategy().route(new SystemConfig(),schema, -1, sql, null, null, cachePool); Assert.assertEquals(5, rrs.getNodes().length); - + // sql = "select b.* from offer_date b join offer_detail a on a.id=b.id " + // "where b.col_date between '2014-02-02' and '2014-04-12' and col_1 = 3 and offer_id between 1 and 33"; - - + + sql = "select b.* from offer_detail a join offer_date b on a.id=b.id " + "where b.col_date between '2014-02-02' and '2014-04-12' and col_1 = 3 and offer_id between 1 and 33"; // sql = "select a.* from offer_detail a join offer_date b on a.id=b.id " + // "where b.col_date = '2014-04-02' and col_1 = 33 and offer_id =1"; schema = schemaMap.get("cndb"); - rrs = RouteStrategyFactory.getRouteStrategy().route(new SystemConfig(),schema, -1, sql, null, - null, cachePool); - Assert.assertEquals(2, rrs.getNodes().length); //这里2个表都有条件路由,取的是交集 - + // 两个路由规则不一样的表现在 走catlet. 不再取交集, catlet 测试时需要前端连接.这里注释掉. +// rrs = RouteStrategyFactory.getRouteStrategy().route(new SystemConfig(),schema, -1, sql, null, +// null, cachePool); +// Assert.assertEquals(2, rrs.getNodes().length); //这里2个表都有条件路由,取的是交集, + //确认大于小于操作符 sql = "select b.* from offer_date b " + "where b.col_date > '2014-02-02'"; @@ -65,7 +74,7 @@ public void testBetweenSqlRoute() throws SQLNonTransientException { rrs = RouteStrategyFactory.getRouteStrategy().route(new SystemConfig(),schema, -1, sql, null, null, cachePool); Assert.assertEquals(128, rrs.getNodes().length); - + sql = "select * from offer_date where col_1 = 33 and col_date between '2014-01-02' and '2014-01-12'"; schema = schemaMap.get("cndb"); rrs = RouteStrategyFactory.getRouteStrategy().route(new SystemConfig(),schema, -1, sql, null, diff --git a/src/test/java/io/mycat/route/function/AutoPartitionByLongTest.java b/src/test/java/io/mycat/route/function/AutoPartitionByLongTest.java index 5849ba740..a5d76cc46 100644 --- a/src/test/java/io/mycat/route/function/AutoPartitionByLongTest.java +++ b/src/test/java/io/mycat/route/function/AutoPartitionByLongTest.java @@ -2,8 +2,8 @@ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the * terms of the GNU General Public License version 2 only, as published by the * Free Software Foundation. * @@ -16,19 +16,13 @@ * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address + * + * Any questions about this component can be directed to it's project Web address * https://code.google.com/p/opencloudb/. * */ package io.mycat.route.function; -import io.mycat.server.config.loader.ConfigInitializer; -import io.mycat.server.config.node.RuleConfig; - -import java.util.Map; -import java.util.Set; - import junit.framework.Assert; import org.junit.Test; @@ -36,32 +30,23 @@ public class AutoPartitionByLongTest { @Test - public void test() - { - ConfigInitializer confInit = new ConfigInitializer(true); - Map ruleConfigs = confInit.getTableRules(); - Set sets = ruleConfigs.keySet(); - for(String ruleStr : sets){ - if(ruleConfigs.get(ruleStr).getFunctionName().indexOf("AutoPartitionByLong")!=-1){ - AbstractPartitionAlgorithm autoPartition = ruleConfigs.get(ruleStr).getRuleAlgorithm(); - autoPartition.init(); - String idVal="0"; - Assert.assertEquals(true, 0==autoPartition.calculate(idVal)); - - idVal="2000000"; - Assert.assertEquals(true, 0==autoPartition.calculate(idVal)); - - idVal="2000001"; - Assert.assertEquals(true, 1==autoPartition.calculate(idVal)); - - idVal="4000000"; - Assert.assertEquals(true, 1==autoPartition.calculate(idVal)); - - idVal="4000001"; - Assert.assertEquals(true, 2==autoPartition.calculate(idVal)); - } - - } - + public void test() { + AutoPartitionByLong autoPartition=new AutoPartitionByLong(); + autoPartition.setMapFile("autopartition-long.txt"); + autoPartition.init(); + String idVal="0"; + Assert.assertEquals(true, 0==autoPartition.calculate(idVal)); + + idVal="2000000"; + Assert.assertEquals(true, 0==autoPartition.calculate(idVal)); + + idVal="2000001"; + Assert.assertEquals(true, 1==autoPartition.calculate(idVal)); + + idVal="4000000"; + Assert.assertEquals(true, 1==autoPartition.calculate(idVal)); + + idVal="4000001"; + Assert.assertEquals(true, 2==autoPartition.calculate(idVal)); } } \ No newline at end of file diff --git a/src/test/java/io/mycat/route/function/PartitionByCRC32PreSlotTest.java b/src/test/java/io/mycat/route/function/PartitionByCRC32PreSlotTest.java new file mode 100644 index 000000000..fd61dd0f1 --- /dev/null +++ b/src/test/java/io/mycat/route/function/PartitionByCRC32PreSlotTest.java @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.route.function; + +import org.junit.Assert; +import org.junit.Test; + + +public class PartitionByCRC32PreSlotTest { + + @Test + public void test() { + PartitionByCRC32PreSlot partition = new PartitionByCRC32PreSlot(); + partition.setRuleName("test"); + partition.setCount(1000); + partition.reInit(); + + Assert.assertEquals(true, 521 == partition.calculate("1000316")); + Assert.assertEquals(true, 637 == partition.calculate("2")); + + + partition.setCount(2); + partition.reInit(); + + Assert.assertEquals(true, 0 == partition.calculate("1")); + Assert.assertEquals(true, 1 == partition.calculate("2")); + Assert.assertEquals(true, 0 == partition.calculate("3")); + Assert.assertEquals(true, 1 == partition.calculate("4")); + Assert.assertEquals(true, 0 == partition.calculate("5")); + Assert.assertEquals(true, 0 == partition.calculate("6")); + Assert.assertEquals(true, 0 == partition.calculate("7")); + Assert.assertEquals(true, 0 == partition.calculate("8")); + Assert.assertEquals(true, 0 == partition.calculate("9")); + + Assert.assertEquals(true, 0 == partition.calculate("9999")); + Assert.assertEquals(true, 1 == partition.calculate("123456789")); + Assert.assertEquals(true, 1 == partition.calculate("35565")); + + + partition.setCount(3); + partition.reInit(); + + Assert.assertEquals(true, 1 == partition.calculate("1")); + Assert.assertEquals(true, 1 == partition.calculate("2")); + Assert.assertEquals(true, 0 == partition.calculate("3")); + Assert.assertEquals(true, 2 == partition.calculate("4")); + Assert.assertEquals(true, 0 == partition.calculate("5")); + Assert.assertEquals(true, 1 == partition.calculate("6")); + Assert.assertEquals(true, 1 == partition.calculate("7")); + Assert.assertEquals(true, 0 == partition.calculate("8")); + Assert.assertEquals(true, 0 == partition.calculate("9")); + + Assert.assertEquals(true, 0 == partition.calculate("9999")); + Assert.assertEquals(true, 2 == partition.calculate("123456789")); + Assert.assertEquals(true, 2 == partition.calculate("35565")); + } + + public static void main(String[] args) { + + for (int i=0;i<20;i++) + { + int y=9; + int count=3; + long slot=i%y; + int slotSize= y/count; + + Long index = slot / slotSize; + if(slotSize*count!=y&&index>count-1) + { + index=index-1; + } + System.out.println(slot+" "+index); + } + } +} diff --git a/src/test/java/io/mycat/route/function/PartitionByDateTest.java b/src/test/java/io/mycat/route/function/PartitionByDateTest.java index 264b5a355..89afe3680 100644 --- a/src/test/java/io/mycat/route/function/PartitionByDateTest.java +++ b/src/test/java/io/mycat/route/function/PartitionByDateTest.java @@ -1,71 +1,71 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.route.function; - -import org.junit.Assert; -import org.junit.Test; - -public class PartitionByDateTest { - - @Test - public void test() { - PartitionByDate partition=new PartitionByDate(); - - partition.setDateFormat("yyyy-MM-dd"); - partition.setsBeginDate("2014-01-01"); - partition.setsPartionDay("10"); - - partition.init(); - - Assert.assertEquals(true, 0 == partition.calculate("2014-01-01")); - Assert.assertEquals(true, 0 == partition.calculate("2014-01-10")); - Assert.assertEquals(true, 1 == partition.calculate("2014-01-11")); - Assert.assertEquals(true, 12 == partition.calculate("2014-05-01")); - - partition.setDateFormat("yyyy-MM-dd"); - partition.setsBeginDate("2014-01-01"); - partition.setsEndDate("2014-01-31"); - partition.setsPartionDay("10"); - partition.init(); - - /** - * 0 : 01.01-01.10,02.10-02.19 - * 1 : 01.11-01.20,02.20-03.01 - * 2 : 01.21-01.30,03.02-03.12 - * 3 : 01.31-02-09,03.13-03.23 - */ - Assert.assertEquals(true, 0 == partition.calculate("2014-01-01")); - Assert.assertEquals(true, 0 == partition.calculate("2014-01-10")); - Assert.assertEquals(true, 1 == partition.calculate("2014-01-11")); - Assert.assertEquals(true, 3 == partition.calculate("2014-02-01")); - Assert.assertEquals(true, 0 == partition.calculate("2014-02-19")); - Assert.assertEquals(true, 1 == partition.calculate("2014-02-20")); - Assert.assertEquals(true, 1 == partition.calculate("2014-03-01")); - Assert.assertEquals(true, 2 == partition.calculate("2014-03-02")); - Assert.assertEquals(true, 2 == partition.calculate("2014-03-11")); - Assert.assertEquals(true, 3 == partition.calculate("2014-03-20")); - - - } +/* + * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the + * terms of the GNU General Public License version 2 only, as published by the + * Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Any questions about this component can be directed to it's project Web address + * https://code.google.com/p/opencloudb/. + * + */ +package io.mycat.route.function; + +import org.junit.Assert; +import org.junit.Test; + +public class PartitionByDateTest { + + @Test + public void test() { + PartitionByDate partition=new PartitionByDate(); + + partition.setDateFormat("yyyy-MM-dd"); + partition.setsBeginDate("2014-01-01"); + partition.setsPartionDay("10"); + + partition.init(); + + Assert.assertEquals(true, 0 == partition.calculate("2014-01-01")); + Assert.assertEquals(true, 0 == partition.calculate("2014-01-10")); + Assert.assertEquals(true, 1 == partition.calculate("2014-01-11")); + Assert.assertEquals(true, 12 == partition.calculate("2014-05-01")); + + partition.setDateFormat("yyyy-MM-dd"); + partition.setsBeginDate("2014-01-01"); + partition.setsEndDate("2014-01-31"); + partition.setsPartionDay("10"); + partition.init(); + + /** + * 0 : 01.01-01.10,02.10-02.19 + * 1 : 01.11-01.20,02.20-03.01 + * 2 : 01.21-01.30,03.02-03.12 + * 3 : 01.31-02-09,03.13-03.23 + */ + Assert.assertEquals(true, 0 == partition.calculate("2014-01-01")); + Assert.assertEquals(true, 0 == partition.calculate("2014-01-10")); + Assert.assertEquals(true, 1 == partition.calculate("2014-01-11")); + Assert.assertEquals(true, 3 == partition.calculate("2014-02-01")); + Assert.assertEquals(true, 0 == partition.calculate("2014-02-19")); + Assert.assertEquals(true, 1 == partition.calculate("2014-02-20")); + Assert.assertEquals(true, 1 == partition.calculate("2014-03-01")); + Assert.assertEquals(true, 2 == partition.calculate("2014-03-02")); + Assert.assertEquals(true, 2 == partition.calculate("2014-03-11")); + Assert.assertEquals(true, 3 == partition.calculate("2014-03-20")); + + + } } \ No newline at end of file diff --git a/src/test/java/io/mycat/route/function/PartitionByHashModTest.java b/src/test/java/io/mycat/route/function/PartitionByHashModTest.java new file mode 100644 index 000000000..644610f44 --- /dev/null +++ b/src/test/java/io/mycat/route/function/PartitionByHashModTest.java @@ -0,0 +1,62 @@ +package io.mycat.route.function; + +import junit.framework.Assert; +import org.junit.Test; + +import java.util.Random; +import java.util.concurrent.CountDownLatch; + +/** + * 哈希值取模单元测试 + * + * @author Hash Zhang + */ +public class PartitionByHashModTest { + String allChar = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"; + @Test + public void test() throws InterruptedException { + CountDownLatch countDownLatch = new CountDownLatch(1); + Task task1 = new Task(countDownLatch,63); + Task task2 = new Task(countDownLatch,64); + + task1.start(); + task2.start(); + countDownLatch.countDown(); + task1.join(); + task2.join(); + } + + private class Task extends Thread{ + CountDownLatch countDownLatch; + int count; + + public Task(CountDownLatch countDownLatch,int count) { + this.countDownLatch = countDownLatch; + this.count = count; + } + + @Override + public void run() { + try { + countDownLatch.await(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + PartitionByHashMod partitionByHashMod = new PartitionByHashMod(); + partitionByHashMod.setCount(count); + Random random = new Random(); + StringBuffer sb = new StringBuffer(); + long start = System.currentTimeMillis(); + for (int i = 0; i < 1000000; i++) { + for (int j = 0; j < 32; j++) { + sb.append(allChar.charAt(random.nextInt(allChar.length()))); + } + int result = partitionByHashMod.calculate(sb.toString()); + sb = new StringBuffer(); + Assert.assertTrue(0<=result && result ruleConfigs = confInit.getTableRules(); - Set sets = ruleConfigs.keySet(); - for(String ruleStr : sets){ - if(ruleConfigs.get(ruleStr).getFunctionName().indexOf("PartitionByPattern")!=-1){ - AbstractPartitionAlgorithm autoPartition = ruleConfigs.get(ruleStr).getRuleAlgorithm(); - //autoPartition.setPatternValue(256); - //autoPartition.setDefaultNode(2); - autoPartition.init(); - String idVal = "0"; - Assert.assertEquals(true, 7 == autoPartition.calculate(idVal)); - idVal = "45a"; - Assert.assertEquals(true, 2 == autoPartition.calculate(idVal)); - } + PartitionByPattern autoPartition = new PartitionByPattern(); + autoPartition.setPatternValue(256); + autoPartition.setDefaultNode(2); + autoPartition.setMapFile("partition-pattern.txt"); + autoPartition.init(); + String idVal = "0"; + Assert.assertEquals(true, 7 == autoPartition.calculate(idVal)); + idVal = "45a"; + Assert.assertEquals(true, 2 == autoPartition.calculate(idVal)); - } } } \ No newline at end of file diff --git a/src/test/java/io/mycat/route/function/PartitionByPrefixPatternTest.java b/src/test/java/io/mycat/route/function/PartitionByPrefixPatternTest.java index 0b3315e11..bf1ca00f0 100644 --- a/src/test/java/io/mycat/route/function/PartitionByPrefixPatternTest.java +++ b/src/test/java/io/mycat/route/function/PartitionByPrefixPatternTest.java @@ -2,8 +2,8 @@ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the * terms of the GNU General Public License version 2 only, as published by the * Free Software Foundation. * @@ -16,19 +16,13 @@ * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address + * + * Any questions about this component can be directed to it's project Web address * https://code.google.com/p/opencloudb/. * */ package io.mycat.route.function; -import io.mycat.server.config.loader.ConfigInitializer; -import io.mycat.server.config.node.RuleConfig; - -import java.util.Map; -import java.util.Set; - import junit.framework.Assert; import org.junit.Test; @@ -36,33 +30,27 @@ public class PartitionByPrefixPatternTest { @Test - public void test() - { + public void test() { /** * ASCII编码: * 48-57=0-9阿拉伯数字 - * 64、65-90=@、A-Z + * 64、65-90=@、A-Z * 97-122=a-z - * + * */ - ConfigInitializer confInit = new ConfigInitializer(true); - Map ruleConfigs = confInit.getTableRules(); - Set sets = ruleConfigs.keySet(); - for(String ruleStr : sets){ - if(ruleConfigs.get(ruleStr).getFunctionName().indexOf("PartitionByPrefixPattern")!=-1){ - AbstractPartitionAlgorithm autoPartition = ruleConfigs.get(ruleStr).getRuleAlgorithm(); - //autoPartition.setPatternValue(32); - //autoPartition.setDefaultNode(5); - String idVal="gf89f9a"; - Assert.assertEquals(true, 0==autoPartition.calculate(idVal)); - - idVal="8df99a"; - Assert.assertEquals(true, 4==autoPartition.calculate(idVal)); - - idVal="8dhdf99a"; - Assert.assertEquals(true, 3==autoPartition.calculate(idVal)); - } - - } + PartitionByPrefixPattern autoPartition=new PartitionByPrefixPattern(); + autoPartition.setPatternValue(32); + autoPartition.setPrefixLength(5); + autoPartition.setMapFile("partition_prefix_pattern.txt"); + autoPartition.init(); + + String idVal="gf89f9a"; + Assert.assertEquals(true, 0==autoPartition.calculate(idVal)); + + idVal="8df99a"; + Assert.assertEquals(true, 4==autoPartition.calculate(idVal)); + + idVal="8dhdf99a"; + Assert.assertEquals(true, 3==autoPartition.calculate(idVal)); } } \ No newline at end of file diff --git a/src/test/java/io/mycat/route/function/PartitionByRangeDateHashTest.java b/src/test/java/io/mycat/route/function/PartitionByRangeDateHashTest.java index e59efcbbf..a2ea77246 100644 --- a/src/test/java/io/mycat/route/function/PartitionByRangeDateHashTest.java +++ b/src/test/java/io/mycat/route/function/PartitionByRangeDateHashTest.java @@ -1,13 +1,19 @@ package io.mycat.route.function; +import com.google.common.hash.Hashing; + import io.mycat.SimpleCachePool; import io.mycat.cache.LayerCachePool; +import io.mycat.config.loader.SchemaLoader; +import io.mycat.config.loader.xml.XMLSchemaLoader; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; import io.mycat.route.RouteResultset; import io.mycat.route.RouteStrategy; import io.mycat.route.factory.RouteStrategyFactory; -import io.mycat.server.config.loader.ConfigInitializer; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SystemConfig; + +import org.junit.Assert; +import org.junit.Test; import java.sql.SQLNonTransientException; import java.text.ParseException; @@ -17,17 +23,11 @@ import java.util.HashMap; import java.util.Map; -import org.junit.Assert; -import org.junit.Test; - -import com.google.common.hash.Hashing; - public class PartitionByRangeDateHashTest { @Test - public void test() throws ParseException - { + public void test() throws ParseException { PartitionByRangeDateHash partition = new PartitionByRangeDateHash(); partition.setDateFormat("yyyy-MM-dd HH:mm:ss"); @@ -57,8 +57,8 @@ public void test() throws ParseException for (int i = 0; i < 60*60*24*3-1; i++) { - cal.add(Calendar.SECOND, 1); - int v = partition.calculate(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(cal.getTime())) ; + cal.add(Calendar.SECOND, 1); + int v= partition.calculate(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(cal.getTime())) ; Assert.assertTrue(v<6); } @@ -72,13 +72,14 @@ public void test() throws ParseException protected RouteStrategy routeStrategy = RouteStrategyFactory.getRouteStrategy("druidparser"); public PartitionByRangeDateHashTest() { - ConfigInitializer confInit = new ConfigInitializer(true); - schemaMap = confInit.getSchemas(); + String schemaFile = "/route/schema.xml"; + String ruleFile = "/route/rule.xml"; + SchemaLoader schemaLoader = new XMLSchemaLoader(schemaFile, ruleFile); + schemaMap = schemaLoader.getSchemas(); } @Test - public void testRange() throws SQLNonTransientException - { + public void testRange() throws SQLNonTransientException { String sql = "select * from offer1 where col_date between '2014-01-01 00:00:00' and '2014-01-03 23:59:59' order by id desc limit 100"; SchemaConfig schema = schemaMap.get("TESTDB"); RouteResultset rrs = routeStrategy.route(new SystemConfig(), schema, -1, sql, null, diff --git a/src/test/java/io/mycat/route/function/PartitionByRangeModTest.java b/src/test/java/io/mycat/route/function/PartitionByRangeModTest.java index bd9a24051..55721fc4f 100644 --- a/src/test/java/io/mycat/route/function/PartitionByRangeModTest.java +++ b/src/test/java/io/mycat/route/function/PartitionByRangeModTest.java @@ -2,8 +2,8 @@ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the * terms of the GNU General Public License version 2 only, as published by the * Free Software Foundation. * @@ -16,72 +16,62 @@ * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address + * + * Any questions about this component can be directed to it's project Web address * https://code.google.com/p/opencloudb/. * */ package io.mycat.route.function; +import junit.framework.Assert; +import org.junit.Test; + import io.mycat.SimpleCachePool; import io.mycat.cache.LayerCachePool; +import io.mycat.config.loader.SchemaLoader; +import io.mycat.config.loader.xml.XMLSchemaLoader; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; import io.mycat.route.RouteResultset; import io.mycat.route.RouteStrategy; import io.mycat.route.factory.RouteStrategyFactory; -import io.mycat.server.config.loader.ConfigInitializer; -import io.mycat.server.config.node.RuleConfig; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SystemConfig; import java.math.BigInteger; import java.sql.SQLNonTransientException; import java.util.Map; -import java.util.Set; - -import junit.framework.Assert; - -import org.junit.Test; public class PartitionByRangeModTest { @Test - public void test() - { - ConfigInitializer confInit = new ConfigInitializer(true); - Map ruleConfigs = confInit.getTableRules(); - Set sets = ruleConfigs.keySet(); - for(String ruleStr : sets){ - if(ruleConfigs.get(ruleStr).getFunctionName().indexOf("PartitionByRangeMod")!=-1){ - AbstractPartitionAlgorithm autoPartition = ruleConfigs.get(ruleStr).getRuleAlgorithm(); - autoPartition.init(); - String idVal = "0"; - Assert.assertEquals(true, 0 == autoPartition.calculate(idVal)); - idVal = "1"; - Assert.assertEquals(true, 1 == autoPartition.calculate(idVal)); - idVal = "2"; - Assert.assertEquals(true, 2 == autoPartition.calculate(idVal)); - idVal = "3"; - Assert.assertEquals(true, 3 == autoPartition.calculate(idVal)); - idVal = "4"; - Assert.assertEquals(true, 4 == autoPartition.calculate(idVal)); - idVal = "5"; - Assert.assertEquals(true, 0 == autoPartition.calculate(idVal)); - - idVal="2000000"; - Assert.assertEquals(true, 0==autoPartition.calculate(idVal)); - - idVal="2000001"; - Assert.assertEquals(true, 5==autoPartition.calculate(idVal)); - - idVal="4000000"; - Assert.assertEquals(true, 5==autoPartition.calculate(idVal)); - - idVal="4000001"; - Assert.assertEquals(true, 7==autoPartition.calculate(idVal)); - } - - } + public void test() { + PartitionByRangeMod autoPartition = new PartitionByRangeMod(); + autoPartition.setMapFile("partition-range-mod.txt"); + autoPartition.init(); + String idVal = "0"; + Assert.assertEquals(true, 0 == autoPartition.calculate(idVal)); + idVal = "1"; + Assert.assertEquals(true, 1 == autoPartition.calculate(idVal)); + idVal = "2"; + Assert.assertEquals(true, 2 == autoPartition.calculate(idVal)); + idVal = "3"; + Assert.assertEquals(true, 3 == autoPartition.calculate(idVal)); + idVal = "4"; + Assert.assertEquals(true, 4 == autoPartition.calculate(idVal)); + idVal = "5"; + Assert.assertEquals(true, 0 == autoPartition.calculate(idVal)); + + idVal="2000000"; + Assert.assertEquals(true, 0==autoPartition.calculate(idVal)); + + idVal="2000001"; + Assert.assertEquals(true, 5==autoPartition.calculate(idVal)); + + idVal="4000000"; + Assert.assertEquals(true, 5==autoPartition.calculate(idVal)); + + idVal="4000001"; + Assert.assertEquals(true, 7==autoPartition.calculate(idVal)); } @@ -96,13 +86,14 @@ private static int mod(long v, int size) protected RouteStrategy routeStrategy = RouteStrategyFactory.getRouteStrategy("druidparser"); public PartitionByRangeModTest() { - ConfigInitializer confInit = new ConfigInitializer(true); - schemaMap = confInit.getSchemas(); + String schemaFile = "/route/schema.xml"; + String ruleFile = "/route/rule.xml"; + SchemaLoader schemaLoader = new XMLSchemaLoader(schemaFile, ruleFile); + schemaMap = schemaLoader.getSchemas(); } @Test - public void testRange() throws SQLNonTransientException - { + public void testRange() throws SQLNonTransientException { String sql = "select * from offer where id between 2000000 and 4000001 order by id desc limit 100"; SchemaConfig schema = schemaMap.get("TESTDB"); RouteResultset rrs = routeStrategy.route(new SystemConfig(), schema, -1, sql, null, diff --git a/src/test/java/io/mycat/route/function/PartitionByStringTest.java b/src/test/java/io/mycat/route/function/PartitionByStringTest.java index 9cd50961c..dcfbeb3fe 100644 --- a/src/test/java/io/mycat/route/function/PartitionByStringTest.java +++ b/src/test/java/io/mycat/route/function/PartitionByStringTest.java @@ -26,6 +26,8 @@ import org.junit.Assert; import org.junit.Test; +import io.mycat.route.function.PartitionByString; + public class PartitionByStringTest { @Test diff --git a/src/test/java/io/mycat/route/function/PartitionByVelocityTest.java b/src/test/java/io/mycat/route/function/PartitionByVelocityTest.java deleted file mode 100644 index 8806f262c..000000000 --- a/src/test/java/io/mycat/route/function/PartitionByVelocityTest.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the - * terms of the GNU General Public License version 2 only, as published by the - * Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address - * https://code.google.com/p/opencloudb/. - * - */ -package io.mycat.route.function; - -import org.junit.Assert; -import org.junit.Test; - -public class PartitionByVelocityTest { - - @Test - public void test() { - PartitionByVelocity rule = new PartitionByVelocity(); - String idVal=null; - rule.setColumnName("id"); - rule.setRule("#set($Integer=0)##\r\n" - + "#set($monthday=$stringUtil.substring($id,2,8))##\r\n" - + "#set($prefix=$monthday.hashCode()%100)##\r\n" - + "$!prefix"); - rule.init(); - - idVal = "201508202330011"; - Assert.assertEquals(true, 94 == rule.calculate(idVal)); - } -} \ No newline at end of file diff --git a/src/test/java/io/mycat/route/function/RuleFunctionSuitTableTest.java b/src/test/java/io/mycat/route/function/RuleFunctionSuitTableTest.java new file mode 100644 index 000000000..80667c3f5 --- /dev/null +++ b/src/test/java/io/mycat/route/function/RuleFunctionSuitTableTest.java @@ -0,0 +1,225 @@ +package io.mycat.route.function; + +import java.util.Arrays; + +import org.junit.Assert; +import org.junit.Test; + +import io.mycat.config.model.TableConfig; +import io.mycat.config.model.rule.RuleConfig; +import io.mycat.util.SplitUtil; + +/** + * 测试分片算法定义是否符合分片表的定义, 主要测试分区数是否符合分片表分片数 + * + * @author CrazyPig + * + */ +public class RuleFunctionSuitTableTest { + + @Test + public void testAutoPartitionByLong() { + AutoPartitionByLong autoPartition=new AutoPartitionByLong(); + autoPartition.setMapFile("autopartition-long.txt"); + autoPartition.init(); // partition = 3 + Assert.assertEquals(3, autoPartition.getPartitionNum()); + RuleConfig rule = new RuleConfig("id", "auto-partition-long"); + rule.setRuleAlgorithm(autoPartition); + TableConfig tableConf = new TableConfig("test", "id", true, false, -1, "dn1,dn2", + null, rule, true, null, false, null, null, null); + int suit1 = autoPartition.suitableFor(tableConf); + Assert.assertEquals(-1, suit1); + + tableConf.getDataNodes().clear(); + tableConf.getDataNodes().addAll(Arrays.asList("dn1", "dn2", "dn3")); + + int suit2 = autoPartition.suitableFor(tableConf); + Assert.assertEquals(0, suit2); + + tableConf.getDataNodes().clear(); + tableConf.getDataNodes().addAll(Arrays.asList("dn1", "dn2", "dn3", "dn4")); + int suit3 = autoPartition.suitableFor(tableConf); + Assert.assertEquals(1, suit3); + + /* + * autopartition-long-dupl.txt + * 0-1000=0 + * 1001-2000=1 + * 2001-3000=0 + * 3001-4000=1 + */ + AutoPartitionByLong autoPartition2 = new AutoPartitionByLong(); + autoPartition2.setMapFile("autopartition-long-dupl.txt"); + autoPartition2.init(); + Assert.assertEquals(2, autoPartition2.getPartitionNum()); + RuleConfig rule2 = new RuleConfig("id", "auto-partition-long-dupl"); + rule2.setRuleAlgorithm(autoPartition2); + TableConfig tableConf2 = new TableConfig("test2", "id", true, false, -1, "dn1,dn2", + null, rule, true, null, false, null, null, null); + Assert.assertEquals(0, autoPartition2.suitableFor(tableConf2)); + + Assert.assertEquals(0, autoPartition2.calculate("500").intValue()); + Assert.assertEquals(1, autoPartition2.calculate("1500").intValue()); + Assert.assertEquals(1, autoPartition2.calculate("2000").intValue()); + Assert.assertEquals(0, autoPartition2.calculate("3000").intValue()); + Assert.assertEquals(1, autoPartition2.calculate("3001").intValue()); + } + + @Test + public void testPartitionByDate() { + + PartitionByDate partition = new PartitionByDate(); + partition.setDateFormat("yyyy-MM-dd"); + partition.setsBeginDate("2014-01-01"); + partition.setsEndDate("2014-01-31"); + partition.setsPartionDay("10"); + partition.init(); // partition = 4 + Assert.assertEquals(4, partition.getPartitionNum()); + + RuleConfig rule = new RuleConfig("col_date", "partition-date"); + rule.setRuleAlgorithm(partition); + TableConfig tableConf = new TableConfig("test", "id", true, false, -1, "dn1,dn2,dn3", + null, rule, true, null, false, null, null, null); + int suit1 = partition.suitableFor(tableConf); + + Assert.assertEquals(-1, suit1); + + tableConf.getDataNodes().clear(); + tableConf.getDataNodes().addAll(Arrays.asList("dn1", "dn2", "dn3", "dn4")); + int suit2 = partition.suitableFor(tableConf); + Assert.assertEquals(0, suit2); + + tableConf.getDataNodes().clear(); + tableConf.getDataNodes().addAll(Arrays.asList("dn1", "dn2", "dn3", "dn4", "dn5")); + int suit3 = partition.suitableFor(tableConf); + Assert.assertEquals(1, suit3); + + PartitionByDate partition1 = new PartitionByDate(); + partition.setDateFormat("yyyy-MM-dd"); + partition.setsBeginDate("2014-01-01"); + partition.setsPartionDay("10"); + partition.init(); // partition no limit + + int suit4 = partition1.suitableFor(tableConf); + Assert.assertEquals(0, suit4); + + } + + @Test + public void testPartitionByHashMod() { + + PartitionByHashMod partition = new PartitionByHashMod(); + partition.setCount(3); // partition = 3; + Assert.assertEquals(3, partition.getPartitionNum()); + + RuleConfig rule = new RuleConfig("id", "partition-hash-mod"); + rule.setRuleAlgorithm(partition); + TableConfig tableConf = new TableConfig("test", "id", true, false, -1, "dn1,dn2,dn3", + null, rule, true, null, false, null, null, null); + int suit1 = partition.suitableFor(tableConf); + Assert.assertEquals(0, suit1); + + tableConf.getDataNodes().clear(); + tableConf.getDataNodes().addAll(Arrays.asList("dn1", "dn2", "dn3", "dn4")); + int suit2 = partition.suitableFor(tableConf); + Assert.assertEquals(1, suit2); + + tableConf.getDataNodes().clear(); + tableConf.getDataNodes().addAll(Arrays.asList("dn1", "dn2")); + int suit3 = partition.suitableFor(tableConf); + Assert.assertEquals(-1, suit3); + } + + @Test + public void testPartitionByRangeMod() { + PartitionByRangeMod partition = new PartitionByRangeMod(); + partition.setMapFile("partition-range-mod.txt"); + partition.init(); + + Assert.assertEquals(20, partition.getPartitionNum()); // partition = 20 + RuleConfig rule = new RuleConfig("id", "partition-range-mod"); + rule.setRuleAlgorithm(partition); + TableConfig tableConf = new TableConfig("test", "id", true, false, -1, "dn$1-10", + null, rule, true, null, false, null, null, null); + int suit1 = partition.suitableFor(tableConf); + Assert.assertEquals(-1, suit1); + + tableConf.getDataNodes().clear(); + String[] dataNodes = SplitUtil.split("dn$1-20", ',', '$', '-'); + tableConf.getDataNodes().addAll(Arrays.asList(dataNodes)); + int suit2 = partition.suitableFor(tableConf); + Assert.assertEquals(0, suit2); + + tableConf.getDataNodes().clear(); + dataNodes = SplitUtil.split("dn$1-30", ',', '$', '-'); + tableConf.getDataNodes().addAll(Arrays.asList(dataNodes)); + int suit3 = partition.suitableFor(tableConf); + Assert.assertEquals(1, suit3); + + } + + @Test + public void testPartitionByPattern() { + PartitionByPattern partition = new PartitionByPattern(); + partition.setMapFile("partition-pattern.txt"); + partition.init(); + + /* + * partition-pattern.txt + * 1-32=0 + * 33-64=1 + * 65-96=2 + * 97-128=3 + * 129-160=4 + * 161-192=5 + * 193-224=6 + * 225-256=7 + * 0-0=7 + */ + + Assert.assertEquals(8, partition.getPartitionNum()); + + } + + @Test + public void testPartitionByPrefixPattern() { + PartitionByPrefixPattern partition = new PartitionByPrefixPattern(); + partition.setMapFile("partition_prefix_pattern.txt"); + partition.init(); + + + /* + * partition_prefix_pattern.txt + * 1-4=0 + * 5-8=1 + * 9-12=2 + * 13-16=3 + * 17-20=4 + * 21-24=5 + * 25-28=6 + * 29-32=7 + * 0-0=7 + */ + Assert.assertEquals(8, partition.getPartitionNum()); + + RuleConfig rule = new RuleConfig("id", "partition-prefix-pattern"); + rule.setRuleAlgorithm(partition); + TableConfig tableConf = new TableConfig("test", "id", true, false, -1, "dn1,dn2", + null, rule, true, null, false, null, null, null); + int suit1 = partition.suitableFor(tableConf); + Assert.assertEquals(-1, suit1); + + tableConf.getDataNodes().clear(); + String[] dataNodes = SplitUtil.split("dn$1-8", ',', '$', '-'); + tableConf.getDataNodes().addAll(Arrays.asList(dataNodes)); + int suit2 = partition.suitableFor(tableConf); + Assert.assertEquals(0, suit2); + + tableConf.getDataNodes().clear(); + dataNodes = SplitUtil.split("dn$1-10", ',', '$', '-'); + tableConf.getDataNodes().addAll(Arrays.asList(dataNodes)); + int suit3 = partition.suitableFor(tableConf); + Assert.assertEquals(1, suit3); + } + +} diff --git a/src/test/java/io/mycat/route/function/TestLatestMonthPartion.java b/src/test/java/io/mycat/route/function/TestLatestMonthPartion.java index 96ef15978..73b002251 100644 --- a/src/test/java/io/mycat/route/function/TestLatestMonthPartion.java +++ b/src/test/java/io/mycat/route/function/TestLatestMonthPartion.java @@ -7,7 +7,7 @@ public class TestLatestMonthPartion { @Test - public void testSetDataNodes() { + public void testSetDataNodes() { LatestMonthPartion partion = new LatestMonthPartion(); partion.setSplitOneDay(24); Integer val = partion.calculate("2015020100"); diff --git a/src/test/java/io/mycat/route/function/TestNumberParseUtil.java b/src/test/java/io/mycat/route/function/TestNumberParseUtil.java index 0caf7abcc..b4796e44f 100644 --- a/src/test/java/io/mycat/route/function/TestNumberParseUtil.java +++ b/src/test/java/io/mycat/route/function/TestNumberParseUtil.java @@ -23,10 +23,11 @@ */ package io.mycat.route.function; -import junit.framework.Assert; - import org.junit.Test; +import io.mycat.route.function.NumberParseUtil; +import junit.framework.Assert; + public class TestNumberParseUtil { @Test diff --git a/src/test/java/io/mycat/route/parser/druid/impl/DefaultDruidParserTest.java b/src/test/java/io/mycat/route/parser/druid/impl/DefaultDruidParserTest.java new file mode 100644 index 000000000..f42e4cb0b --- /dev/null +++ b/src/test/java/io/mycat/route/parser/druid/impl/DefaultDruidParserTest.java @@ -0,0 +1,68 @@ +package io.mycat.route.parser.druid.impl; + +import static org.junit.Assert.*; +import static org.mockito.Mockito.*; + +import java.sql.SQLNonTransientException; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +import io.mycat.cache.LayerCachePool; +import io.mycat.config.model.SchemaConfig; +import io.mycat.route.RouteResultset; +import io.mycat.route.parser.druid.DruidParser; +import io.mycat.route.parser.druid.DruidParserFactory; +import io.mycat.route.parser.druid.DruidShardingParseInfo; +import io.mycat.route.parser.druid.MycatSchemaStatVisitor; +import io.mycat.server.parser.ServerParse; + +import org.junit.Before; +import org.junit.Test; + +import com.alibaba.druid.sql.ast.SQLStatement; +import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser; +import com.alibaba.druid.sql.parser.SQLStatementParser; + +/** + * sql解析单元测试 + * @author lian + * @date 2016年12月2日 + */ +public class DefaultDruidParserTest { + + private SchemaConfig schema; + private DruidParser druidParser; + @Before + public void setUp(){ + + schema = mock(SchemaConfig.class); + druidParser = new DefaultDruidParser(); + } + @Test + public void testParser() throws Exception { + + assertArrayEquals(getParseTables("select id as id from company t;"), getArr("company".toUpperCase())); + assertArrayEquals(getParseTables("select 1 from (select 1 from company) company;"), getArr("company".toUpperCase())); + } + + private Object[] getParseTables(String sql) throws Exception{ + + SQLStatementParser parser = new MySqlStatementParser(sql); + SQLStatement statement = parser.parseStatement(); + MycatSchemaStatVisitor visitor = new MycatSchemaStatVisitor(); + + + LayerCachePool cachePool = mock(LayerCachePool.class); + RouteResultset rrs = new RouteResultset(sql, ServerParse.SELECT); + + druidParser.parser(schema, rrs, statement, sql, cachePool, visitor); + + DruidShardingParseInfo ctx = druidParser.getCtx(); + return ctx.getTables().toArray(); + } + + private Object[] getArr(String...strings){ + return strings; + } +} diff --git a/src/test/java/io/mycat/route/perf/NoShardingSpace.java b/src/test/java/io/mycat/route/perf/NoShardingSpace.java index 38b52caa9..3cb0aad80 100644 --- a/src/test/java/io/mycat/route/perf/NoShardingSpace.java +++ b/src/test/java/io/mycat/route/perf/NoShardingSpace.java @@ -2,8 +2,8 @@ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the * terms of the GNU General Public License version 2 only, as published by the * Free Software Foundation. * @@ -16,21 +16,22 @@ * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address + * + * Any questions about this component can be directed to it's project Web address * https://code.google.com/p/opencloudb/. * */ package io.mycat.route.perf; +import java.sql.SQLNonTransientException; + import io.mycat.SimpleCachePool; import io.mycat.cache.LayerCachePool; +import io.mycat.config.loader.SchemaLoader; +import io.mycat.config.loader.xml.XMLSchemaLoader; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; import io.mycat.route.factory.RouteStrategyFactory; -import io.mycat.server.config.loader.ConfigInitializer; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SystemConfig; - -import java.sql.SQLNonTransientException; /** * @author mycat @@ -40,8 +41,10 @@ public class NoShardingSpace { private static int total=1000000; protected LayerCachePool cachePool = new SimpleCachePool(); public NoShardingSpace() { - ConfigInitializer confInit = new ConfigInitializer(true); - schema = confInit.getSchemas().get("dubbo"); + String schemaFile = "/route/schema.xml"; + String ruleFile = "/route/rule.xml"; + SchemaLoader schemaLoader = new XMLSchemaLoader(schemaFile, ruleFile); + schema = schemaLoader.getSchemas().get("dubbo"); } public void testDefaultSpace() throws SQLNonTransientException { diff --git a/src/test/java/io/mycat/route/perf/ShardingDefaultSpace.java b/src/test/java/io/mycat/route/perf/ShardingDefaultSpace.java index 6bb273483..98084eb3c 100644 --- a/src/test/java/io/mycat/route/perf/ShardingDefaultSpace.java +++ b/src/test/java/io/mycat/route/perf/ShardingDefaultSpace.java @@ -2,8 +2,8 @@ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the * terms of the GNU General Public License version 2 only, as published by the * Free Software Foundation. * @@ -16,21 +16,22 @@ * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address + * + * Any questions about this component can be directed to it's project Web address * https://code.google.com/p/opencloudb/. * */ package io.mycat.route.perf; +import java.sql.SQLNonTransientException; + import io.mycat.SimpleCachePool; import io.mycat.cache.LayerCachePool; +import io.mycat.config.loader.SchemaLoader; +import io.mycat.config.loader.xml.XMLSchemaLoader; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; import io.mycat.route.factory.RouteStrategyFactory; -import io.mycat.server.config.loader.ConfigInitializer; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SystemConfig; - -import java.sql.SQLNonTransientException; /** * @author mycat @@ -40,8 +41,10 @@ public class ShardingDefaultSpace { private static int total=1000000; protected LayerCachePool cachePool = new SimpleCachePool(); public ShardingDefaultSpace() throws InterruptedException { - ConfigInitializer confInit = new ConfigInitializer(true); - schema = confInit.getSchemas().get("cndb"); + String schemaFile = "/route/schema.xml"; + String ruleFile = "/route/rule.xml"; + SchemaLoader schemaLoader = new XMLSchemaLoader(schemaFile, ruleFile); + schema = schemaLoader.getSchemas().get("cndb"); } /** diff --git a/src/test/java/io/mycat/route/perf/ShardingMultiTableSpace.java b/src/test/java/io/mycat/route/perf/ShardingMultiTableSpace.java index 2610836e2..5abae9a26 100644 --- a/src/test/java/io/mycat/route/perf/ShardingMultiTableSpace.java +++ b/src/test/java/io/mycat/route/perf/ShardingMultiTableSpace.java @@ -2,8 +2,8 @@ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * This code is free software;Designed and Developed mainly by many Chinese - * opensource volunteers. you can redistribute it and/or modify it under the + * This code is free software;Designed and Developed mainly by many Chinese + * opensource volunteers. you can redistribute it and/or modify it under the * terms of the GNU General Public License version 2 only, as published by the * Free Software Foundation. * @@ -16,21 +16,22 @@ * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Any questions about this component can be directed to it's project Web address + * + * Any questions about this component can be directed to it's project Web address * https://code.google.com/p/opencloudb/. * */ package io.mycat.route.perf; +import java.sql.SQLNonTransientException; + import io.mycat.SimpleCachePool; import io.mycat.cache.LayerCachePool; +import io.mycat.config.loader.SchemaLoader; +import io.mycat.config.loader.xml.XMLSchemaLoader; +import io.mycat.config.model.SchemaConfig; +import io.mycat.config.model.SystemConfig; import io.mycat.route.factory.RouteStrategyFactory; -import io.mycat.server.config.loader.ConfigInitializer; -import io.mycat.server.config.node.SchemaConfig; -import io.mycat.server.config.node.SystemConfig; - -import java.sql.SQLNonTransientException; /** * @author mycat @@ -40,13 +41,15 @@ public class ShardingMultiTableSpace { private static int total=1000000; protected LayerCachePool cachePool = new SimpleCachePool(); public ShardingMultiTableSpace() throws InterruptedException { - ConfigInitializer confInit = new ConfigInitializer(true); - schema = confInit.getSchemas().get("cndb"); + String schemaFile = "/route/schema.xml"; + String ruleFile = "/route/rule.xml"; + SchemaLoader schemaLoader = new XMLSchemaLoader(schemaFile, ruleFile); + schema = schemaLoader.getSchemas().get("cndb"); } /** * 路由到tableSpace的性能测试 - * + * * @throws SQLNonTransientException */ public void testTableSpace() throws SQLNonTransientException { diff --git a/src/test/java/io/mycat/route/util/PartitionUtilTest.java b/src/test/java/io/mycat/route/util/PartitionUtilTest.java index 164fd6531..4af0bf25f 100644 --- a/src/test/java/io/mycat/route/util/PartitionUtilTest.java +++ b/src/test/java/io/mycat/route/util/PartitionUtilTest.java @@ -26,6 +26,8 @@ import org.junit.Assert; import org.junit.Test; +import io.mycat.route.util.PartitionUtil; + /** * @author mycat() */ diff --git a/src/test/java/io/mycat/route/util/RouterUtilTest.java b/src/test/java/io/mycat/route/util/RouterUtilTest.java new file mode 100644 index 000000000..6446a6c8b --- /dev/null +++ b/src/test/java/io/mycat/route/util/RouterUtilTest.java @@ -0,0 +1,119 @@ +package io.mycat.route.util; + +import io.mycat.util.StringUtil; +import org.junit.Assert; +import org.junit.Test; + +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.List; +import java.util.Map; + +/** + * @author Hash Zhang + * @version 1.0.0 + * @date 2016/7/19 + */ +public class RouterUtilTest { + + + + @Test + public void testBatchInsert() { + String sql = "insert into hotnews(title,name) values('test1',\"name\"),('(test)',\"(test)\"),('\\\"',\"\\'\"),(\")\",\"\\\"\\')\");"; + List values = RouterUtil.handleBatchInsert(sql, sql.toUpperCase().indexOf("VALUES")); + Assert.assertTrue(values.get(0).equals("insert into hotnews(title,name) values('test1',\"name\")")); + Assert.assertTrue(values.get(1).equals("insert into hotnews(title,name) values('(test)',\"(test)\")")); + Assert.assertTrue(values.get(2).equals("insert into hotnews(title,name) values('\\\"',\"\\'\")")); + Assert.assertTrue(values.get(3).equals("insert into hotnews(title,name) values(\")\",\"\\\"\\')\")")); + } + + + @Test + public void testRemoveSchema() { + String sql = "update test set name='abcdtestx.aa' where id=1 and testx=123"; + + String afterAql= RouterUtil.removeSchema(sql,"testx"); + Assert.assertEquals(sql,afterAql); + System.out.println(afterAql); + + } + @Test + public void testRemoveSchemaSelect() { + String sql = "select id as 'aa' from test where name='abcdtestx.aa' and id=1 and testx=123"; + + String afterAql= RouterUtil.removeSchema(sql,"testx"); + Assert.assertEquals(sql,afterAql); + + } + + @Test + public void testRemoveSchemaSelect2() { + String sql = "select id as 'aa' from testx.test where name='abcd testx.aa' and id=1 and testx=123"; + + String afterAql= RouterUtil.removeSchema(sql,"testx"); + Assert.assertNotSame(sql.indexOf("testx."),afterAql.indexOf("testx.")); + + } + + @Test + public void testRemoveSchema2(){ + String sql = "update testx.test set name='abcd \\' testx.aa' where id=1"; + String sqltrue = "update test set name='abcd \\' testx.aa' where id=1"; + String sqlnew = RouterUtil.removeSchema(sql, "testx"); + Assert.assertEquals("处理错误:", sqltrue, sqlnew); + } + + @Test + public void testRemoveSchema3(){ + String sql = "update testx.test set testx.name='abcd testx.aa' where testx.id=1"; + String sqltrue = "update test set name='abcd testx.aa' where id=1"; + String sqlnew = RouterUtil.removeSchema(sql, "testx"); + Assert.assertEquals("处理错误:", sqltrue, sqlnew); + } + + @Test + public void testRemoveSchema4(){ + String sql = "update testx.test set testx.name='abcd testx.aa' and testx.name2='abcd testx.aa' where testx.id=1"; + String sqltrue = "update test set name='abcd testx.aa' and name2='abcd testx.aa' where id=1"; + String sqlnew = RouterUtil.removeSchema(sql, "testx"); + Assert.assertEquals("处理错误:", sqltrue, sqlnew); + } + /** + * @modification 修改支持createTable语句中包含“IF NOT EXISTS”的情况,这里测试下 + * @date 2016/12/8 + * @modifiedBy Hash Zhang + */ + @Test + public void testGetCreateTableStmtTableName(){ + String sql1 = StringUtil.makeString("create table if not exists producer(\n", + "\tid int(11) primary key,\n", + "\tname varchar(32)\n", + ");").toUpperCase(); + String sql2 = StringUtil.makeString("create table good(\n", + "\tid int(11) primary key,\n", + "\tcontent varchar(32),\n", + "\tproducer_id int(11) key\n", + ");").toUpperCase(); + Assert.assertTrue("producer".equalsIgnoreCase(RouterUtil.getTableName(sql1, RouterUtil.getCreateTablePos(sql1, 0)))); + Assert.assertTrue("good".equalsIgnoreCase(RouterUtil.getTableName(sql2, RouterUtil.getCreateTablePos(sql2, 0)))); + } + + /** + * @modification 针对修改RouterUtil的去除schema的方法支持` 进行测试 + * @date 2016/12/29 + * @modifiedBy Hash Zhang + */ + @Test + public void testRemoveSchemaWithHypha(){ + String sql1 = StringUtil.makeString("select `testdb`.`orders`.`id`, `testdb`.`orders`.`customer_id`, `testdb`.`orders`.`goods_id` from `testdb`.`orders` where testdb.`orders`.`id` = 1;").toUpperCase(); + String sql2 = StringUtil.makeString("select `testdb`.`orders`.`id`, testdb.`orders`.`customer_id`, `testdb`.`orders`.`goods_id` from testdb.`orders` where `testdb`.`orders`.`id` = 1;").toUpperCase(); + String sql3 = StringUtil.makeString("select testdb.`orders`.`id`, `testdb`.`orders`.`customer_id`, testdb.`orders`.`goods_id` from `testdb`.`orders` where testdb.`orders`.`id` = 1;").toUpperCase(); + String sql4 = StringUtil.makeString("select testdb.`orders`.`id`, testdb.`orders`.`customer_id`, testdb.`orders`.`goods_id` from testdb.`orders` where testdb.`orders`.`id` = 1;").toUpperCase(); + String result = "SELECT `ORDERS`.`ID`, `ORDERS`.`CUSTOMER_ID`, `ORDERS`.`GOODS_ID` FROM `ORDERS` WHERE `ORDERS`.`ID` = 1;"; + Assert.assertTrue(result.equals(RouterUtil.removeSchema(sql1,"testdb"))); + Assert.assertTrue(result.equals(RouterUtil.removeSchema(sql2,"testdb"))); + Assert.assertTrue(result.equals(RouterUtil.removeSchema(sql3,"testdb"))); + Assert.assertTrue(result.equals(RouterUtil.removeSchema(sql4,"testdb"))); + } +} diff --git a/src/test/java/io/mycat/sequence/DistributedSequenceHandlerTest.java b/src/test/java/io/mycat/sequence/DistributedSequenceHandlerTest.java new file mode 100644 index 000000000..7ed629116 --- /dev/null +++ b/src/test/java/io/mycat/sequence/DistributedSequenceHandlerTest.java @@ -0,0 +1,181 @@ +package io.mycat.sequence; + +import io.mycat.config.MycatConfig; +import io.mycat.route.sequence.handler.DistributedSequenceHandler; +import junit.framework.Assert; +import org.apache.curator.test.TestingServer; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +/** + * 基于ZK与本地配置的分布式ID生成器 + * 无悲观锁,吞吐量更高 + * + * @author Hash Zhang + * @version 1.0 + * @time 00:12:05 2016/5/3 + */ +public class DistributedSequenceHandlerTest { + TestingServer testingServer = null; + DistributedSequenceHandler distributedSequenceHandler[]; + + @Before + public void initialize() throws Exception { + distributedSequenceHandler = new DistributedSequenceHandler[16]; + MycatConfig mycatConfig = new MycatConfig(); + testingServer = new TestingServer(); + testingServer.start(); + for (int i = 0; i < 16; i++) { + distributedSequenceHandler[i] = new DistributedSequenceHandler(mycatConfig.getSystem()); + distributedSequenceHandler[i].initializeZK(testingServer.getConnectString()); + distributedSequenceHandler[i].nextId(""); + } + } + + /** + * 测试获取的唯一InstanceId + * + * @throws Exception + */ + @Test + public void testUniqueInstanceID() throws Exception { + Set idSet = new HashSet<>(); + for (int i = 0; i < 16; i++) { + idSet.add(distributedSequenceHandler[i].getInstanceId()); + } + Assert.assertEquals(idSet.size(), 16); + } + + /** + * 测试获取的唯一id + * + * @throws Exception + */ + @Test + public void testUniqueID() throws Exception { + final ConcurrentHashMap idSet = new ConcurrentHashMap<>(); + Thread thread[] = new Thread[10]; + long start = System.currentTimeMillis(); + //多少线程,注意线程数不能超过最大线程数(1< idSet = new HashSet<>(); + try { + int leader = failLeader(17); + System.out.println("***断掉一个leader节点后(curator会抛对应的异常断链异常,不用在意)***:"); + for (int i = 0; i < 16; i++) { + if (i == leader) { + System.out.println("Node [" + i + "] used to be leader"); + continue; + } + distributedSequenceHandler[i].nextId(""); + System.out.println("Node [" + i + "]is leader:" + distributedSequenceHandler[i].getLeaderSelector().hasLeadership() ); + System.out.println(" InstanceID:" + distributedSequenceHandler[i].getInstanceId()); + idSet.add(distributedSequenceHandler[i].getInstanceId()); + } + Assert.assertEquals(idSet.size(), 15); + idSet = new HashSet<>(); + int leader2 = failLeader(leader); + System.out.println("***断掉两个leader节点后(curator会抛对应的异常断链异常,不用在意)***:"); + for (int i = 0; i < 16; i++) { + if (i == leader || i == leader2) { + System.out.println("Node ["+i + " used to be leader"); + continue; + } + distributedSequenceHandler[i].nextId(""); + System.out.println("Node ["+i + "]is leader:" + distributedSequenceHandler[i].getLeaderSelector().hasLeadership()); + System.out.println(" InstanceID:" + distributedSequenceHandler[i].getInstanceId()); + idSet.add(distributedSequenceHandler[i].getInstanceId()); + } + Assert.assertEquals(idSet.size(), 14); + + idSet = new HashSet<>(); + MycatConfig mycatConfig = new MycatConfig(); + distributedSequenceHandler[leader] = new DistributedSequenceHandler(mycatConfig.getSystem()); + distributedSequenceHandler[leader].initializeZK(testingServer.getConnectString()); + distributedSequenceHandler[leader].nextId(""); + distributedSequenceHandler[leader2] = new DistributedSequenceHandler(mycatConfig.getSystem()); + distributedSequenceHandler[leader2].initializeZK(testingServer.getConnectString()); + distributedSequenceHandler[leader2].nextId(""); + System.out.println("新加入两个节点后"); + for (int i = 0; i < 16; i++) { + System.out.println("Node ["+i + "]is leader:" + distributedSequenceHandler[i].getLeaderSelector().hasLeadership() ); + System.out.println(" InstanceID:" + distributedSequenceHandler[i].getInstanceId()); + idSet.add(distributedSequenceHandler[i].getInstanceId()); + } + } catch (Exception e) { + + } finally { + Assert.assertEquals(idSet.size(), 16); + } + + } + + private int failLeader(int p) { + int leader = 0, follower = 0; + for (int i = 0; i < 16; i++) { + if (i == p) { + continue; + } + if (distributedSequenceHandler[i].getLeaderSelector().hasLeadership()) { + leader = i; + } else { + follower = i; + } + System.out.println("Node ["+i + "]is leader:" + distributedSequenceHandler[i].getLeaderSelector().hasLeadership() ); + System.out.println(" InstanceID:" + distributedSequenceHandler[i].getInstanceId()); + } + try { + distributedSequenceHandler[leader].close(); + } catch (IOException e) { + } + + while (true) { + follower++; + if (follower >= 16) { + follower = 0; + } + if (follower == leader || follower == p) { + continue; + } + if (distributedSequenceHandler[follower].getLeaderSelector().hasLeadership()) { + break; + } + } + return leader; + } + +} diff --git a/src/test/java/io/mycat/sequence/IncrSequenceZKHandlerTest.java b/src/test/java/io/mycat/sequence/IncrSequenceZKHandlerTest.java new file mode 100644 index 000000000..234f9cbd6 --- /dev/null +++ b/src/test/java/io/mycat/sequence/IncrSequenceZKHandlerTest.java @@ -0,0 +1,92 @@ +package io.mycat.sequence; + +import io.mycat.route.sequence.handler.IncrSequenceZKHandler; +import io.mycat.route.util.PropertiesUtil; +import junit.framework.Assert; +import org.apache.curator.test.TestingServer; +import org.junit.Before; +import org.junit.Test; + +import java.util.Properties; +import java.util.concurrent.ConcurrentSkipListSet; + +/** + * zookeeper 实现递增序列号 + * 默认测试模拟60个进程,每个进程内20个线程。每个线程调用50次参数为GLOBAL的nextid + * 默认GLOBAL.MINID=1 + * 默认GLOBAL.MAXID=10 + * 表示当前线程内id用光时,每次会取GLOBAL.MINID-GLOBAL.MAXID9个ID + * + * @author Hash Zhang + * @version 1.0 + * @time 23:35 2016/5/6 + */ +public class IncrSequenceZKHandlerTest { + private final static int MAX_CONNECTION = 5; + private final static int threadCount = 5; + private final static int LOOP = 5; + TestingServer testingServer = null; + IncrSequenceZKHandler incrSequenceZKHandler[]; + ConcurrentSkipListSet results; + + @Before + public void initialize() throws Exception { + testingServer = new TestingServer(); + testingServer.start(); + incrSequenceZKHandler = new IncrSequenceZKHandler[MAX_CONNECTION]; + results = new ConcurrentSkipListSet(); + } + + @Test + public void testCorrectnessAndEfficiency() throws InterruptedException { + final Thread threads[] = new Thread[MAX_CONNECTION]; + for (int i = 0; i < MAX_CONNECTION; i++) { + final int a = i; + threads[i] = new Thread() { + @Override + public void run() { + incrSequenceZKHandler[a] = new IncrSequenceZKHandler(); + Properties props = PropertiesUtil.loadProps("sequence_conf.properties"); + try { + incrSequenceZKHandler[a].initializeZK(props, testingServer.getConnectString()); + } catch (Exception e) { + e.printStackTrace(); + } + Thread threads[] = new Thread[threadCount]; + for (int j = 0; j < threadCount; j++) { + threads[j] = new Thread() { + @Override + public void run() { + for (int k = 0; k < LOOP; k++) { + long key = incrSequenceZKHandler[a].nextId("GLOBAL"); + results.add(key); + } + } + }; + threads[j].start(); + } + for (int j = 0; j < threadCount; j++) { + try { + threads[j].join(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + } + }; + + } + long start = System.currentTimeMillis(); + for (int i = 0; i < MAX_CONNECTION; i++) { + threads[i].start(); + } + for (int i = 0; i < MAX_CONNECTION; i++) { + threads[i].join(); + } + long end = System.currentTimeMillis(); + Assert.assertEquals(MAX_CONNECTION * LOOP * threadCount, results.size()); +// Assert.assertTrue(results.pollLast().equals(MAX_CONNECTION * LOOP * threadCount + 1L)); +// Assert.assertTrue(results.pollFirst().equals(2L)); + System.out.println("Time elapsed:" + ((double) (end - start + 1) / 1000.0) + "s\n TPS:" + ((double) (MAX_CONNECTION * LOOP * threadCount) / (double) (end - start + 1) * 1000.0) + "/s"); + } +} diff --git a/src/test/java/io/mycat/sequence/SequenceHandlerTest.java b/src/test/java/io/mycat/sequence/SequenceHandlerTest.java index 2f9147746..9092ce7b2 100644 --- a/src/test/java/io/mycat/sequence/SequenceHandlerTest.java +++ b/src/test/java/io/mycat/sequence/SequenceHandlerTest.java @@ -23,10 +23,13 @@ */ package io.mycat.sequence; -import io.mycat.server.sequence.IncrSequencePropHandler; -import io.mycat.server.sequence.SequenceHandler; import junit.framework.Assert; +import org.junit.Test; + +import io.mycat.route.sequence.handler.IncrSequencePropHandler; +import io.mycat.route.sequence.handler.SequenceHandler; + /** * 全局序列号单元测试 * diff --git a/src/test/java/io/mycat/sequence/SequenceTest.java b/src/test/java/io/mycat/sequence/SequenceTest.java new file mode 100644 index 000000000..77b29e7c4 --- /dev/null +++ b/src/test/java/io/mycat/sequence/SequenceTest.java @@ -0,0 +1,72 @@ +package io.mycat.sequence; + +import io.mycat.MycatServer; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.util.Set; +import java.util.TreeSet; +import java.util.UUID; + +/** + * 全局序列号测试 + * + * @author Hash Zhang + * @version 1.0 + * @time 00:12:05 2016/5/6 + */ +public class SequenceTest { + private Set sequenceSet; + private long startTime; + private long endTime; + + @Before + public void initialize() { + sequenceSet = new TreeSet<>(); + startTime = System.nanoTime(); + } + +// @Test +// public void testIncrement(){ +// System.out.print("Increment "); +// for (int i = 0; i < 1000000; i++) { +// sequenceSet.add(i+""); +// } +// } +// + @Test + public void testUUID(){ + System.out.print("UUID "); + for (int i = 0; i < 100; i++) { + sequenceSet.add(UUID.randomUUID().toString()); + } + } + + @Test + public void testRandom(){ + TreeSet treeSet= new TreeSet<>(); + System.out.println(Long.toBinaryString(Long.valueOf(System.currentTimeMillis()+"")).length()); + } + + @Test + public void testRandom2(){ + System.out.print("UUID "); + for (int i = 0; i < 100; i++) { + sequenceSet.add("aaassscccddd"+i); + } + } + + @Test + public void testXAXID(){ + String xid = MycatServer.getInstance().getXATXIDGLOBAL(); + System.out.println(xid); + } + + + @After + public void end() { + endTime = System.nanoTime(); + System.out.println("Time elapsed: " + (endTime - startTime)/(1000000L) + "ms"); + } +} diff --git a/src/test/java/io/mycat/server/config/loader/zkloader/ZookeeperLoaderTest.java b/src/test/java/io/mycat/server/config/loader/zkloader/ZookeeperLoaderTest.java deleted file mode 100644 index 1c68651b5..000000000 --- a/src/test/java/io/mycat/server/config/loader/zkloader/ZookeeperLoaderTest.java +++ /dev/null @@ -1,17 +0,0 @@ -package io.mycat.server.config.loader.zkloader; - -import org.junit.Ignore; -import org.junit.Test; - - -/** - * Created by v1.lion on 2015/10/5. - */ -public class ZookeeperLoaderTest { - @Ignore - @Test - public void testConstruct() { - ZookeeperLoader zookeeperLoader = new ZookeeperLoader(); - zookeeperLoader.initConfig(); - } -} \ No newline at end of file diff --git a/src/test/java/io/mycat/server/handler/ServerHandlerTest.java b/src/test/java/io/mycat/server/handler/ServerHandlerTest.java new file mode 100644 index 000000000..b8e05587f --- /dev/null +++ b/src/test/java/io/mycat/server/handler/ServerHandlerTest.java @@ -0,0 +1,5 @@ +package io.mycat.server.handler; + +public class ServerHandlerTest { + +} diff --git a/src/test/java/io/mycat/server/interceptor/impl/GlobalTableUtilTest.java b/src/test/java/io/mycat/server/interceptor/impl/GlobalTableUtilTest.java new file mode 100644 index 000000000..049a580b9 --- /dev/null +++ b/src/test/java/io/mycat/server/interceptor/impl/GlobalTableUtilTest.java @@ -0,0 +1,46 @@ +package io.mycat.server.interceptor.impl; + +import org.junit.Test; + +import com.alibaba.druid.sql.ast.SQLStatement; +import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser; + +import junit.framework.Assert; + +public class GlobalTableUtilTest { + + private static final String originSql1 = "CREATE TABLE retl_mark" + + "(" + + " ID BIGINT AUTO_INCREMENT," + + " CHANNEL_ID INT(11)," + + " CHANNEL_INFO varchar(128)," + + " CONSTRAINT RETL_MARK_ID PRIMARY KEY (ID)" + + ") ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;"; + + private static final String originSql2 = "CREATE TABLE retl_mark" + + "(" + + " ID BIGINT AUTO_INCREMENT," + + " CHANNEL_ID INT(11)," + + " CHANNEL_INFO varchar(128)," + + " _MYCAT_OP_TIME int," + + " CONSTRAINT RETL_MARK_ID PRIMARY KEY (ID)" + + ") ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;"; + + @Test + public void addColumnIfCreate() { + String sql = parseSql(originSql1); + System.out.println(sql); + boolean contains = sql.contains("_mycat_op_time "); + Assert.assertTrue(contains); + sql = parseSql(originSql2); + System.out.println(sql); + Assert.assertFalse(sql.contains("_mycat_op_time int COMMENT '全局表保存修改时间戳的字段名'")); + } + + public String parseSql(String sql) { + MySqlStatementParser parser = new MySqlStatementParser(sql); + SQLStatement statement = parser.parseStatement(); + return GlobalTableUtil.addColumnIfCreate(sql, statement); + } + +} diff --git a/src/test/java/io/mycat/sqlexecute/MultiThreadSelectTest.java b/src/test/java/io/mycat/sqlexecute/MultiThreadSelectTest.java new file mode 100644 index 000000000..a1d57b17c --- /dev/null +++ b/src/test/java/io/mycat/sqlexecute/MultiThreadSelectTest.java @@ -0,0 +1,96 @@ +package io.mycat.sqlexecute; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; + +public class MultiThreadSelectTest { + private static void testSequnce(Connection theCon) throws SQLException { + boolean autCommit = System.currentTimeMillis() % 2 == 1; + theCon.setAutoCommit(autCommit); + + String sql = "select * from company "; + Statement stmt = theCon.createStatement(); + int charChoise = (int) (System.currentTimeMillis() % 3); + if (charChoise == 0) { + stmt.executeQuery("SET NAMES UTF8;"); + } else if (charChoise == 1) { + stmt.executeQuery("SET NAMES latin1;"); + } + if (charChoise == 2) { + stmt.executeQuery("SET NAMES gb2312;"); + } + ResultSet rs = stmt.executeQuery(sql); + if (rs.next()) { + System.out.println(Thread.currentThread().getName() + " get seq " + rs.getLong(1)); + } else { + System.out.println(Thread.currentThread().getName() + " can't get seq "); + } + if (autCommit == false) { + theCon.commit(); + } + stmt.close(); + + } + + private static Connection getCon(String url, String user, String passwd) throws SQLException { + Connection theCon = DriverManager.getConnection(url, user, passwd); + return theCon; + } + + public static void main(String[] args) { + try { + Class.forName("com.mysql.jdbc.Driver"); + } catch (ClassNotFoundException e1) { + e1.printStackTrace(); + } + + final String url = "jdbc:mysql://localhost:8066/TESTDB"; + final String user = "test"; + final String password = "test"; + List threads = new ArrayList(100); + for (int i = 0; i < 50; i++) { + + threads.add(new Thread() { + public void run() { + Connection con; + try { + con = getCon(url, user, password); + for (int j = 0; j < 10000; j++) { + testSequnce(con); + } + } catch (SQLException e) { + + e.printStackTrace(); + } + + } + }); + + } + for (Thread thred : threads) { + thred.start(); + + } + boolean hasRunning = true; + while (hasRunning) { + hasRunning = false; + for (Thread thred : threads) { + if (thred.isAlive()) { + try { + Thread.sleep(1000); + hasRunning = true; + } catch (InterruptedException e) { + + } + } + + } + } + + } +} diff --git a/src/test/java/io/mycat/sqlexecute/MycatMulitJdbcVersionTest.java b/src/test/java/io/mycat/sqlexecute/MycatMulitJdbcVersionTest.java new file mode 100644 index 000000000..3d57cd316 --- /dev/null +++ b/src/test/java/io/mycat/sqlexecute/MycatMulitJdbcVersionTest.java @@ -0,0 +1,157 @@ +package io.mycat.sqlexecute; + +import java.net.URL; +import java.net.URLClassLoader; +import java.sql.Connection; +import java.sql.Driver; +import java.sql.DriverManager; +import java.sql.DriverPropertyInfo; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.sql.Statement; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; +import java.util.logging.Logger; + +/** + * + * 测试mycat对不同版本的mysql jdbc的兼容性 + * + * + *

+ * 关联issue: @see https://github.com/MyCATApache/Mycat-Server/issues/1203 + * + *

+ * Note:
+ * 1. 请将这个类放到新建的project独立运行, mycat pom.xml里面使用的mysql驱动会影响测试结果
+ * 2. 确保project新建lib子目录并且在lib子目录里面放置了各类版本的mysql jdbc驱动 + * 3. 程序会动态加载不同版本的jdbc驱动, 请不要将任何mysql jdbc驱动加入classpath, 否则也可能影响测试结果 + * + * @author CrazyPig + * @since 2016-11-13 + * + */ +public class MycatMulitJdbcVersionTest { + + private static final String JDBC_URL = "jdbc:mysql://localhost:8066/TESTDB"; + private static final String USER = "root"; + private static final String PASSWORD = "123456"; + private static final Map jdbcVersionMap = new HashMap(); + private static final Map tmpDriverMap = new HashMap(); + + // 动态加载jdbc驱动 + private static void dynamicLoadJdbc(String mysqlJdbcFile) throws Exception { + URL u = new URL("jar:file:lib/" + mysqlJdbcFile + "!/"); + String classname = jdbcVersionMap.get(mysqlJdbcFile); + URLClassLoader ucl = new URLClassLoader(new URL[] { u }); + Driver d = (Driver)Class.forName(classname, true, ucl).newInstance(); + DriverShim driver = new DriverShim(d); + DriverManager.registerDriver(driver); + tmpDriverMap.put(mysqlJdbcFile, driver); + } + + // 每一次测试完卸载对应版本的jdbc驱动 + private static void dynamicUnLoadJdbc(String mysqlJdbcFile) throws SQLException { + DriverManager.deregisterDriver(tmpDriverMap.get(mysqlJdbcFile)); + } + + // 进行一次测试 + private static void testOneVersion(String mysqlJdbcFile) { + + System.out.println("start test mysql jdbc version : " + mysqlJdbcFile); + + try { + dynamicLoadJdbc(mysqlJdbcFile); + } catch (Exception e1) { + e1.printStackTrace(); + } + + Connection conn = null; + try { + conn = DriverManager.getConnection(JDBC_URL, USER, PASSWORD); + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("select user()"); + System.out.println("select user() output : "); + while(rs.next()) { + System.out.println(rs.getObject(1)); + } + rs = stmt.executeQuery("show tables"); + System.out.println("show tables output : "); + while(rs.next()) { + System.out.println(rs.getObject(1)); + } + } catch (SQLException e) { + e.printStackTrace(); + } finally { + if(conn != null) { + try { + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + } + + try { + dynamicUnLoadJdbc(mysqlJdbcFile); + } catch (SQLException e) { + e.printStackTrace(); + } + + System.out.println("end !!!"); + System.out.println(); + } + + public static void main(String[] args) { + + // 多版本mysql jdbc驱动兼容性测试 + + // NOTE: 注意将对应的jar放到lib子目录, 不需要加入classpath!!! + jdbcVersionMap.put("mysql-connector-java-6.0.3.jar", "com.mysql.cj.jdbc.Driver"); + jdbcVersionMap.put("mysql-connector-java-5.1.6.jar", "com.mysql.jdbc.Driver"); + jdbcVersionMap.put("mysql-connector-java-5.1.31.jar", "com.mysql.jdbc.Driver"); + jdbcVersionMap.put("mysql-connector-java-5.1.35.jar", "com.mysql.jdbc.Driver"); + jdbcVersionMap.put("mysql-connector-java-5.1.39.jar", "com.mysql.jdbc.Driver"); + + // 更多的jdbc驱动... + + for(String mysqlJdbcFile : jdbcVersionMap.keySet()) { + testOneVersion(mysqlJdbcFile); + } + + } + +} + +class DriverShim implements Driver { + private Driver driver; + DriverShim(Driver d) { this.driver = d; } + public boolean acceptsURL(String u) throws SQLException { + return this.driver.acceptsURL(u); + } + public Connection connect(String u, Properties p) throws SQLException { + return this.driver.connect(u, p); + } + @Override + public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException { + return this.driver.getPropertyInfo(url, info); + } + @Override + public int getMajorVersion() { + return this.driver.getMajorVersion(); + } + @Override + public int getMinorVersion() { + return this.driver.getMinorVersion(); + } + @Override + public boolean jdbcCompliant() { + return this.driver.jdbcCompliant(); + } + @Override + public Logger getParentLogger() throws SQLFeatureNotSupportedException { + return this.driver.getParentLogger(); + } +} diff --git a/src/test/java/io/mycat/sqlexecute/ServerPrepareTest.java b/src/test/java/io/mycat/sqlexecute/ServerPrepareTest.java new file mode 100644 index 000000000..2d9798972 --- /dev/null +++ b/src/test/java/io/mycat/sqlexecute/ServerPrepareTest.java @@ -0,0 +1,259 @@ +package io.mycat.sqlexecute; + + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.UnsupportedEncodingException; +import java.sql.*; + +import org.junit.Assert; + +/** + * + * @author CrazyPig + * + */ +public class ServerPrepareTest { + + // JDBC driver name and database URL + static final String JDBC_DRIVER = "com.mysql.jdbc.Driver"; + static final String DB_URL = "jdbc:mysql://localhost:8066/TESTDB?useServerPrepStmts=true"; + + // Database credentials + static final String USER = "root"; + static final String PASS = "mysql"; + + static { + try { + Class.forName("com.mysql.jdbc.Driver"); + } catch (ClassNotFoundException e) { + e.printStackTrace(); + } + } + + /** + * create table hotnews ( + * id int primary key auto_increment, + * title varchar(200), + * content text, + * image0 blob, + * image1 blob, + * image2 mediumblob, + * image3 longblob + * ) engine = innodb default character set = 'utf8'; + */ + + /** + * 测试发送COM_STMT_SEND_LONG_DATA命令 + * @throws IOException + */ + public static void testComStmtSendLondData() throws IOException { + Connection conn = null; + PreparedStatement pstmt = null; + ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); + // 获取待存储图片输入流 + InputStream image0In = classLoader.getResourceAsStream("blob/image0.jpg"); + InputStream image1In = classLoader.getResourceAsStream("blob/image1.png"); + InputStream image2In = classLoader.getResourceAsStream("blob/image2.png"); + InputStream image3In = classLoader.getResourceAsStream("blob/image3.png"); + + // 保存图片字节数据,待后面取回数据进行校验 + byte[] image0Bytes = getBytes(image0In); + byte[] image1Bytes = getBytes(image1In); + byte[] image2Bytes = getBytes(image2In); + byte[] image3Bytes = getBytes(image3In); + + try { + conn = DriverManager.getConnection(DB_URL,USER,PASS); + pstmt = conn.prepareStatement("insert into hotnews(id, title, content, image0, image1, image2, image3) values(?,?,?,?,?,?,?)"); + pstmt.setInt(1, 1314); + pstmt.setString(2, "hotnew"); + // text字段设置 + pstmt.setBinaryStream(3, new ByteArrayInputStream("this is a content of hotnew".getBytes("UTF-8"))); + // blob字段构造 + Blob image0Blob = conn.createBlob(); + Blob image1Blob = conn.createBlob(); + Blob image2Blob = conn.createBlob(); + Blob image3Blob = conn.createBlob(); + image0Blob.setBytes(1, image0Bytes); + image1Blob.setBytes(1, image1Bytes); + image2Blob.setBytes(1, image2Bytes); + image3Blob.setBytes(1, image3Bytes); + // blob字段设置 + pstmt.setBlob(4, image0Blob); + pstmt.setBlob(5, image1Blob); + pstmt.setBlob(6, image2Blob); + pstmt.setBlob(7, image3Blob); + // 执行 + pstmt.execute(); + + // 从表里面拿出刚插入的数据, 对blob字段进行校验 + pstmt = conn.prepareStatement("select image0, image1, image2, image3 from hotnews where id = ?"); + pstmt.setInt(1, 1314); + ResultSet rs = pstmt.executeQuery(); + if(rs.next()) { + InputStream _image0In = rs.getBlob(1).getBinaryStream(); + InputStream _image1In = rs.getBlob(2).getBinaryStream(); + InputStream _image2In = rs.getBlob(3).getBinaryStream(); + InputStream _image3In = rs.getBlob(4).getBinaryStream(); + // 断言从数据库取出来的数据,与之前发送的数据是一致的(字节数组内容比较) + Assert.assertArrayEquals(image0Bytes, getBytes(_image0In)); + Assert.assertArrayEquals(image1Bytes, getBytes(_image1In)); + Assert.assertArrayEquals(image2Bytes, getBytes(_image2In)); + Assert.assertArrayEquals(image3Bytes, getBytes(_image3In)); + } + + pstmt.close(); + + } catch (SQLException e) { + e.printStackTrace(); + } catch (UnsupportedEncodingException e) { + e.printStackTrace(); + } finally { + if(conn != null) { + try { + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + } + } + + private static byte[] getBytes(InputStream in) throws IOException { + byte[] bytes = new byte[0]; + byte[] buffer = new byte[1024]; + ByteArrayOutputStream bout = new ByteArrayOutputStream(); + int len = 0; + while((len = in.read(buffer)) > -1) { + bout.write(buffer, 0, len); + } + bytes = bout.toByteArray(); + return bytes; + } + + /** + * 测试发送COM_STMT_RESET命令 + */ + public static void testComStmtRest() { + Connection conn = null; + PreparedStatement pstmt = null; + try { + conn = DriverManager.getConnection(DB_URL,USER,PASS); + pstmt = conn.prepareStatement("insert into hotnews(id, title, content) values(?,?,?)"); + pstmt.setInt(1, 1314); + pstmt.setString(2, "hotnew"); + pstmt.setBinaryStream(3, new ByteArrayInputStream("this is a content of hotnew".getBytes("UTF-8"))); + pstmt.execute(); + pstmt.clearParameters(); + pstmt.setInt(1, 1315); + pstmt.setString(2, "hotnew"); + pstmt.setBinaryStream(3, new ByteArrayInputStream("this is a new content of hotnew".getBytes("UTF-8"))); + pstmt.execute(); + pstmt.close(); + } catch (SQLException e) { + e.printStackTrace(); + } catch (UnsupportedEncodingException e) { + e.printStackTrace(); + } finally { + if(conn != null) { + try { + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + } + } + + public static void simpleTest() { + Connection conn = null; + PreparedStatement stmt = null; + try{ + + System.out.println("Connecting to database..."); + conn = DriverManager.getConnection(DB_URL,USER,PASS); + + System.out.println("Creating statement..."); + String sql = "SELECT * FROM test where id ?"; - PreparedStatement pstmt = conn.prepareStatement(sql); - pstmt.setInt(1, 1); - ResultSet rs = pstmt.executeQuery(); - ResultSetMetaData rsmd = rs.getMetaData(); - int columns = rsmd.getColumnCount(); - for(int i = 1; i <= columns; i++) { // 输出列名 - System.out.print(rsmd.getColumnName(i) + "\t"); - } - System.out.println(); - while(rs.next()) { - for(int i = 1; i <= columns; i++) { // 输出行 - System.out.print(rs.getObject(i) + "\t"); - } - System.out.println(); - } - rs.close(); - pstmt.close(); - } catch (SQLException e) { - e.printStackTrace(); - } finally { - if(conn != null) { - try { - conn.close(); - } catch (SQLException e) { - e.printStackTrace(); - } - } - } - } - - -// mysql> desc test_numeric; -// +---------------+---------------+------+-----+---------+----------------+ -// | Field | Type | Null | Key | Default | Extra | -// +---------------+---------------+------+-----+---------+----------------+ -// | id | int(11) | NO | PRI | NULL | auto_increment | -// | tinyint_val | tinyint(4) | YES | | NULL | | -// | smallint_val | smallint(6) | YES | | NULL | | -// | mediumint_val | decimal(11,0) | YES | | NULL | | -// | int_val | int(11) | YES | | NULL | | -// | bigint_val | bigint(20) | YES | | NULL | | -// | decimal_val | decimal(7,2) | YES | | NULL | | -// | float_val | float(7,2) | YES | | NULL | | -// | double_val | double(7,2) | YES | | NULL | | -// +---------------+---------------+------+-----+---------+----------------+ -// 9 rows in set (0.00 sec) - - /** - * 测试服务端预处理查询返回Numeric类型数据是否有误 - */ - public static void testServerPrepareSelectWithNumericType() { - String sql = "select * from test_numeric"; - -// mysql> select * from test_numeric; -// +----+-------------+--------------+---------------+-----------+------------+-------------+-----------+------------+ -// | id | tinyint_val | smallint_val | mediumint_val | int_val | bigint_val | decimal_val | float_val | double_val | -// +----+-------------+--------------+---------------+-----------+------------+-------------+-----------+------------+ -// | 1 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | -// | 2 | 123 | 12345 | 123456789 | 123456789 | 123456789 | 1234.33 | 1234.33 | 1234.33 | -// +----+-------------+--------------+---------------+-----------+------------+-------------+-----------+------------+ -// 2 rows in set (0.00 sec) - - testServerPrepareSelectSql(sql); - } - - -// mysql> desc test_date; -// +---------------+-----------+------+-----+-------------------+-----------------------------+ -// | Field | Type | Null | Key | Default | Extra | -// +---------------+-----------+------+-----+-------------------+-----------------------------+ -// | id | int(11) | NO | PRI | NULL | auto_increment | -// | date_val | date | YES | | NULL | | -// | datetime_val | datetime | YES | | NULL | | -// | timestamp_val | timestamp | NO | | CURRENT_TIMESTAMP | on update CURRENT_TIMESTAMP | -// +---------------+-----------+------+-----+-------------------+-----------------------------+ -// 4 rows in set (0.01 sec) - - /** - * 测试服务端预处理查询返回Date和Time类型数据是否有误 - */ - public static void testServerPrepareSelectWithDateType() { - String sql = "select * from test_date"; - -// mysql> select * from test_date; -// +----+------------+---------------------+---------------------+ -// | id | date_val | datetime_val | timestamp_val | -// +----+------------+---------------------+---------------------+ -// | 1 | 2015-08-19 | 2015-08-26 16:02:11 | 2015-08-19 16:02:22 | -// | 2 | NULL | NULL | 2015-08-30 16:02:41 | -// +----+------------+---------------------+---------------------+ -// 2 rows in set (0.00 sec) - - testServerPrepareSelectSql(sql); - } - -// mysql> desc test_string; -// +-------------+-------------+------+-----+---------+----------------+ -// | Field | Type | Null | Key | Default | Extra | -// +-------------+-------------+------+-----+---------+----------------+ -// | id | int(11) | NO | PRI | NULL | auto_increment | -// | char_val | char(10) | YES | | NULL | | -// | varchar_val | varchar(10) | YES | | NULL | | -// | text_val | text | YES | | NULL | | -// +-------------+-------------+------+-----+---------+----------------+ -// 4 rows in set (0.01 sec) - - /** - * 测试服务端预处理查询返回String类型数据是否有误 - */ - public static void testServerPrepareSelectWithStringType() { - String sql = "select * from test_string"; - -// mysql> select * from test_string; -// +----+----------+-------------+----------+ -// | id | char_val | varchar_val | text_val | -// +----+----------+-------------+----------+ -// | 1 | AAA | BBB | CCC | -// | 2 | NULL | NULL | NULL | -// | 3 | | | | -// +----+----------+-------------+----------+ -// 3 rows in set (0.00 sec) - - testServerPrepareSelectSql(sql); - } - - private static void testServerPrepareSelectSql(String sql) { - Connection conn = null; - try { - conn = DriverManager.getConnection(url, user, password); - PreparedStatement pstmt = conn.prepareStatement(sql); - ResultSet rs = pstmt.executeQuery(); - ResultSetMetaData rsmd = rs.getMetaData(); - int columns = rsmd.getColumnCount(); - for(int i = 1; i <= columns; i++) { // 输出列名 - System.out.print(rsmd.getColumnName(i) + "\t"); - } - System.out.println(); - while(rs.next()) { - for(int i = 1; i <= columns; i++) { // 输出行 - System.out.print(rs.getObject(i) + "\t"); - } - System.out.println(); - } - rs.close(); - pstmt.close(); - } catch (SQLException e) { - e.printStackTrace(); - } finally { - if(conn != null) { - try { - conn.close(); - } catch (SQLException e) { - e.printStackTrace(); - } - } - } - } - } diff --git a/src/main/java/io/mycat/server/Versions.java b/src/test/java/io/mycat/statistic/SQLStatisticsMain.java similarity index 82% rename from src/main/java/io/mycat/server/Versions.java rename to src/test/java/io/mycat/statistic/SQLStatisticsMain.java index 7e6454953..f2417f787 100644 --- a/src/main/java/io/mycat/server/Versions.java +++ b/src/test/java/io/mycat/statistic/SQLStatisticsMain.java @@ -21,17 +21,17 @@ * https://code.google.com/p/opencloudb/. * */ -package io.mycat.server; +package io.mycat.statistic; + +import io.mycat.statistic.SQLRecord; +import io.mycat.statistic.SQLRecorder; /** * @author mycat */ -public interface Versions { +public class SQLStatisticsMain { + - /**协议版本**/ - public static final byte PROTOCOL_VERSION = 10; - /**服务器版**/ - public static final byte[] SERVER_VERSION = "5.5.8-mycat-2.0-dev-20151218165648".getBytes(); } \ No newline at end of file diff --git a/src/test/java/io/mycat/statistic/TestConcurrentSafety.java b/src/test/java/io/mycat/statistic/TestConcurrentSafety.java new file mode 100644 index 000000000..a11d0c248 --- /dev/null +++ b/src/test/java/io/mycat/statistic/TestConcurrentSafety.java @@ -0,0 +1,196 @@ +package io.mycat.statistic; + +import io.mycat.server.parser.ServerParse; +import io.mycat.statistic.stat.*; +import org.junit.Assert; +import org.junit.Ignore; +import org.junit.Test; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +/** + * 测试SQLstat相关元素并发安全性 + * + * + * + * + * + * + * 此单元测试会造成服务器上build运行时间过长一直通不过,最多build了6天还没结束,所以先忽略 + * + * + * + * 后续修复好了再打开 + * + * + * + * + * + * + * + * + * + * + * + * + * + * @author Hash Zhang + * @version 1.0 + * @time 08:54 2016/5/16 + */ +public class TestConcurrentSafety { + private static final int THREAD_COUNT = 2; + private static final int LOOP_COUNT = 1000; + + String sql = "SELECT `fnum`, `forg`, `fdst`, `airline`, `ftype` , `ports_of_call`, " + + "`scheduled_deptime`, `scheduled_arrtime`, `actual_deptime`, `actual_arrtime`, " + + "`flight_status_code` FROM dynamic " + + "WHERE `fnum` = 'CA1' AND `forg` = 'PEK' AND `fdst` = 'SHA' " + + "AND `scheduled_deptime` BETWEEN 1212121 AND 232323233 " + + "AND `fservice` = 'J' AND `fcategory` = 1 " + + "AND `share_execute_flag` = 1 ORDER BY scheduled_deptime"; + + String sql2 = "SELECT `fnum`, `forg`, `fdst`, `airline`, `ftype` , `ports_of_call`, " + + "`scheduled_deptime`, `scheduled_arrtime`, `actual_deptime`, `actual_arrtime`, " + + "`flight_status_code` FROM dynamic " + + "WHERE `fnum` = 'CA2' AND `forg` = 'PEK' AND `fdst` = 'SHA' " + + "AND `scheduled_deptime` BETWEEN 1212121 AND 232323233 " + + "AND `fservice` = 'J' AND `fcategory` = 1 " + + "AND `share_execute_flag` = 1 ORDER BY scheduled_deptime"; + + String sql3 = "SELECT `fnum`, `forg`, `fdst`, `airline`, `ftype` , `ports_of_call`, " + + "`scheduled_deptime`, `scheduled_arrtime`, `actual_deptime`, `actual_arrtime`, " + + "`flight_status_code` FROM dynamic " + + "WHERE `fnum` = 'CA3' AND `forg` = 'PEK' AND `fdst` = 'SHA' " + + "AND `scheduled_deptime` BETWEEN 1212121 AND 232323233 " + + "AND `fservice` = 'J' AND `fcategory` = 1 " + + "AND `share_execute_flag` = 1 ORDER BY scheduled_deptime"; + + String sql4 = "SELECT `fnum`, `forg`, `fdst`, `airline`, `ftype` , `ports_of_call`, " + + "`scheduled_deptime`, `scheduled_arrtime`, `actual_deptime`, `actual_arrtime`, " + + "`flight_status_code` FROM dynamic " + + "WHERE `fnum` = 'CA3' AND `forg` = 'PEK'"; + + + @Test @Ignore + public void testQueryConditionAnalyzer() throws InterruptedException { + + + final QueryResult qr = new QueryResult("zhuam", ServerParse.SELECT, sql, 0, 0, 0, 0, 0,0); + final QueryResult qr2 = new QueryResult("zhuam", ServerParse.SELECT, sql2, 0, 0, 0, 0, 0,0); + final QueryResult qr3 = new QueryResult("zhuam", ServerParse.SELECT, sql3, 0, 0, 0, 0, 0,0); + + final QueryConditionAnalyzer analyzer = QueryConditionAnalyzer.getInstance(); + analyzer.setCf("dynamic&fnum"); + + Thread thread[] = new Thread[THREAD_COUNT]; + Thread thread2[] = new Thread[THREAD_COUNT]; + Thread thread3[] = new Thread[THREAD_COUNT]; + + for (int i = 0; i < THREAD_COUNT; i++) { + thread[i] = new Thread() { + @Override + public void run() { + for (int j = 0; j < LOOP_COUNT; j++) { + analyzer.onQueryResult(qr); + } + } + }; + + thread2[i] = new Thread() { + @Override + public void run() { + for (int j = 0; j < LOOP_COUNT; j++) { + analyzer.onQueryResult(qr2); + } + } + }; + + thread3[i] = new Thread() { + @Override + public void run() { + for (int j = 0; j < LOOP_COUNT; j++) { + analyzer.onQueryResult(qr3); + } + } + }; + } + + for (int i = 0; i < THREAD_COUNT; i++) { + thread[i].start(); + thread2[i].start(); + thread3[i].start(); + } + + for (int i = 0; i < THREAD_COUNT; i++) { + thread[i].join(); + thread2[i].join(); + thread3[i].join(); + } + + List> list = analyzer.getValues(); + Assert.assertTrue((list.get(0).getValue().get() == (long) THREAD_COUNT * LOOP_COUNT)); + Assert.assertTrue((list.get(1).getValue().get() == (long) THREAD_COUNT * LOOP_COUNT)); + Assert.assertTrue((list.get(2).getValue().get() == (long) THREAD_COUNT * LOOP_COUNT)); + } + + @Test @Ignore + public void testUserSqlHighStat() throws InterruptedException { + final UserSqlHighStat userSqlHighStat = new UserSqlHighStat(); + + Thread thread[] = new Thread[THREAD_COUNT]; + Thread thread2[] = new Thread[THREAD_COUNT]; + Thread thread3[] = new Thread[THREAD_COUNT]; + + for (int i = 0; i < THREAD_COUNT; i++) { + thread[i] = new Thread() { + @Override + public void run() { + for (int j = 0; j < LOOP_COUNT; j++) { + userSqlHighStat.addSql(sql, 10L, 1L, 11L); + } + } + }; + + thread2[i] = new Thread() { + @Override + public void run() { + for (int j = 0; j < LOOP_COUNT; j++) { + userSqlHighStat.addSql(sql2, 10L, 1L, 11L); + } + } + }; + + thread3[i] = new Thread() { + @Override + public void run() { + for (int j = 0; j < LOOP_COUNT; j++) { + userSqlHighStat.addSql(sql4, 10L, 1L, 11L); + } + } + }; + } + + for (int i = 0; i < THREAD_COUNT; i++) { + thread[i].start(); + thread2[i].start(); + thread3[i].start(); + } + + for (int i = 0; i < THREAD_COUNT; i++) { + thread[i].join(); + thread2[i].join(); + thread3[i].join(); + } + + List sqlFrequency = userSqlHighStat.getSqlFrequency(true); + Assert.assertTrue(sqlFrequency.size() == 2); + Assert.assertTrue(sqlFrequency.get(0).getCount() == 2 * THREAD_COUNT *LOOP_COUNT); + Assert.assertTrue(sqlFrequency.get(1).getCount() == THREAD_COUNT *LOOP_COUNT); + } + + + +} diff --git a/src/test/java/io/mycat/util/HexFormatUtilMain.java b/src/test/java/io/mycat/util/HexFormatUtilMain.java index 7c946428e..147f277af 100644 --- a/src/test/java/io/mycat/util/HexFormatUtilMain.java +++ b/src/test/java/io/mycat/util/HexFormatUtilMain.java @@ -26,6 +26,8 @@ import java.util.ArrayList; import java.util.List; +import io.mycat.util.HexFormatUtil; + /** * @author mycat */ diff --git a/src/test/java/io/mycat/util/HexFormatUtilTest.java b/src/test/java/io/mycat/util/HexFormatUtilTest.java new file mode 100644 index 000000000..9b8e387b6 --- /dev/null +++ b/src/test/java/io/mycat/util/HexFormatUtilTest.java @@ -0,0 +1,24 @@ +package io.mycat.util; + +import org.junit.Assert; +import org.junit.Test; + +/** + * + * @author CrazyPig + * @since 2016-09-09 + * + */ +public class HexFormatUtilTest { + + @Test + public void testBytesToString() { + byte[] bytes = new byte[]{ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20 + }; + String hexString = HexFormatUtil.bytesToHexString(bytes); + String expected = "0102030405060708090A0B0C0D0E0F1011121314"; + Assert.assertEquals(expected, hexString); + } + +} diff --git a/src/test/java/io/mycat/util/LockPerfMain.java b/src/test/java/io/mycat/util/LockPerfMain.java index cc5286f83..4b2410352 100644 --- a/src/test/java/io/mycat/util/LockPerfMain.java +++ b/src/test/java/io/mycat/util/LockPerfMain.java @@ -37,12 +37,13 @@ public void tReentrantLock() { long t1 = System.currentTimeMillis(); for (int i = 0; i < 10000000; i++) { - if (lock.tryLock()) + if (lock.tryLock()) { try { // ... } finally { lock.unlock(); } + } } long t2 = System.currentTimeMillis(); diff --git a/src/test/java/io/mycat/util/SmallSetTest.java b/src/test/java/io/mycat/util/SmallSetTest.java index f7d1427f1..8902f2fb2 100644 --- a/src/test/java/io/mycat/util/SmallSetTest.java +++ b/src/test/java/io/mycat/util/SmallSetTest.java @@ -26,6 +26,7 @@ import java.util.Collection; import java.util.Iterator; +import io.mycat.util.SmallSet; import junit.framework.Assert; import junit.framework.TestCase; @@ -35,8 +36,9 @@ public class SmallSetTest extends TestCase { public void assertListEquals(Collection col, Object... objects) { - if (objects == null) + if (objects == null) { Assert.assertTrue(col.isEmpty()); + } Assert.assertEquals(objects.length, col.size()); int i = 0; for (Object o : col) { diff --git a/src/test/java/io/mycat/util/SplitUtilTest.java b/src/test/java/io/mycat/util/SplitUtilTest.java index b97aa4233..08b3cb62a 100644 --- a/src/test/java/io/mycat/util/SplitUtilTest.java +++ b/src/test/java/io/mycat/util/SplitUtilTest.java @@ -27,6 +27,8 @@ import org.junit.Test; +import io.mycat.util.SplitUtil; + /** * @author mycat */ diff --git a/src/test/java/io/mycat/util/StringHashPerfMain.java b/src/test/java/io/mycat/util/StringHashPerfMain.java index 281cd79b5..6fb59c2f8 100644 --- a/src/test/java/io/mycat/util/StringHashPerfMain.java +++ b/src/test/java/io/mycat/util/StringHashPerfMain.java @@ -23,6 +23,7 @@ */ package io.mycat.util; +import io.mycat.util.StringUtil; /** * @author mycat diff --git a/src/test/java/io/mycat/util/StringUtilTest.java b/src/test/java/io/mycat/util/StringUtilTest.java index a79bad6f5..16af266a2 100644 --- a/src/test/java/io/mycat/util/StringUtilTest.java +++ b/src/test/java/io/mycat/util/StringUtilTest.java @@ -30,6 +30,8 @@ import org.junit.Test; +import io.mycat.util.StringUtil; + /** * @author mycat */ diff --git a/src/test/resources/autopartition-long-dupl.txt b/src/test/resources/autopartition-long-dupl.txt new file mode 100644 index 000000000..4aaa74195 --- /dev/null +++ b/src/test/resources/autopartition-long-dupl.txt @@ -0,0 +1,5 @@ +# range start-end ,data node index +0-1000=0 +1001-2000=1 +2001-3000=0 +3001-4000=1 diff --git a/src/test/resources/autopartition-long.txt b/src/test/resources/autopartition-long.txt new file mode 100644 index 000000000..83ad8527d --- /dev/null +++ b/src/test/resources/autopartition-long.txt @@ -0,0 +1,6 @@ +# range start-end ,data node index +0-200M=0 +200M1-400M=1 +400M1-600M=2 +#600M1-800M=3 +#800M1-1000M=4 diff --git a/src/test/resources/autopartition-long2.txt b/src/test/resources/autopartition-long2.txt new file mode 100644 index 000000000..64a7f821d --- /dev/null +++ b/src/test/resources/autopartition-long2.txt @@ -0,0 +1,6 @@ +# range start-end ,data node index +0-200M=0 +200M1-400M=1 +#400M1-600M=2 +#600M1-800M=3 +#800M1-1000M=4 diff --git a/src/test/resources/blob/image0.jpg b/src/test/resources/blob/image0.jpg new file mode 100644 index 000000000..65e29f46d Binary files /dev/null and b/src/test/resources/blob/image0.jpg differ diff --git a/src/test/resources/blob/image1.png b/src/test/resources/blob/image1.png new file mode 100644 index 000000000..141c4f984 Binary files /dev/null and b/src/test/resources/blob/image1.png differ diff --git a/src/test/resources/blob/image2.png b/src/test/resources/blob/image2.png new file mode 100644 index 000000000..3b8bdd8e0 Binary files /dev/null and b/src/test/resources/blob/image2.png differ diff --git a/src/test/resources/blob/image3.png b/src/test/resources/blob/image3.png new file mode 100644 index 000000000..ef536faac Binary files /dev/null and b/src/test/resources/blob/image3.png differ diff --git a/src/test/resources/config/rule.xml b/src/test/resources/config/rule.xml new file mode 100644 index 000000000..ad26c1bbb --- /dev/null +++ b/src/test/resources/config/rule.xml @@ -0,0 +1,34 @@ + + + + + + + + + id + func1 + + + + + + 2 + 512 + + + diff --git a/src/test/resources/config/schema.xml b/src/test/resources/config/schema.xml new file mode 100644 index 000000000..2293b07dd --- /dev/null +++ b/src/test/resources/config/schema.xml @@ -0,0 +1,65 @@ + + + + + + + + + + + + + +
+
+ + + + + + + + + + + + + select user() + + + + + + + + + + + select user() + + + + + + + + + diff --git a/src/test/resources/dnindex.properties b/src/test/resources/dnindex.properties deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/test/resources/log4j2.xml b/src/test/resources/log4j2.xml new file mode 100644 index 000000000..143ed6d9e --- /dev/null +++ b/src/test/resources/log4j2.xml @@ -0,0 +1,32 @@ + + + + + + + + + + %d{yyyy-MM-dd HH:mm:ss.SSS} %5p [%t] (%l) - %m%n + + + + + + + + + + + + + + + + + + + + + diff --git a/src/test/resources/mycat.xml b/src/test/resources/mycat.xml deleted file mode 100644 index e98224431..000000000 --- a/src/test/resources/mycat.xml +++ /dev/null @@ -1,415 +0,0 @@ - - - - - - 1 - 8066 - 9066 - utf8 - - - - test - cndb - - - - - -
-
-
- -
- - - - -
- - -
- - - - -
- -
-
-
-
-
-
- - - - -
- -
- -
- - - -
-
- - -
-
- - - -
-
- - - - -
-
- - - - -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - select user() - - - - - - - - select user() - - - - - - - - select user() - - - - - - - - - - select 1 from dual - alter session set nls_date_format='yyyy-mm-dd hh24:mi:ss' - - - - - - select 1 from dual - alter session set nls_date_format='yyyy-mm-dd hh24:mi:ss' - - - - - - - select 1 from SYSIBM.SYSDUMMY1 - - - - - - - select 1 from SYSIBM.SYSDUMMY1 - - - - - - - - select 1 - - - - - - select 1 - - - - - select 1 - - - - - - select 1 - - - - - - - 128 - 8 - :8 - - - 2014-01-01 - 10 - yyyy-MM-dd - - - 128 - 8 - - - 0 - - - 0 - 1 - - - - - - - 0 - 1 - 2 - 3 - 4 - - - - 0 - - - 5 - 1 - 4 - 4 - 6 - - - - 2014-01-01 00:00:00 - 3 - yyyy-MM-dd HH:mm:ss - 6 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - dn1 - dn1 - - - - - - - 127.0.0.1 - 1 - - - - - test - - - - - 0 - - - utf8 - - - \ No newline at end of file diff --git a/src/test/resources/partition-pattern.txt b/src/test/resources/partition-pattern.txt new file mode 100644 index 000000000..ac35370a0 --- /dev/null +++ b/src/test/resources/partition-pattern.txt @@ -0,0 +1,12 @@ +# id partition range start-end ,data node index +###### first host configuration +1-32=0 +33-64=1 +65-96=2 +97-128=3 +######## second host configuration +129-160=4 +161-192=5 +193-224=6 +225-256=7 +0-0=7 \ No newline at end of file diff --git a/src/test/resources/partition-range-mod.txt b/src/test/resources/partition-range-mod.txt new file mode 100644 index 000000000..c649b53b3 --- /dev/null +++ b/src/test/resources/partition-range-mod.txt @@ -0,0 +1,6 @@ +# range start-end ,data node group size +0-200M=5 +200M1-400M=1 +400M1-600M=4 +600M1-800M=4 +800M1-1000M=6 diff --git a/src/test/resources/partition_prefix_pattern.txt b/src/test/resources/partition_prefix_pattern.txt new file mode 100644 index 000000000..553ef13bf --- /dev/null +++ b/src/test/resources/partition_prefix_pattern.txt @@ -0,0 +1,16 @@ +# range start-end ,data node index +# ASCII编码:主要划分出10个数字,小字母26,一共36个字母进行分片 +# 48-57=0-9阿拉伯数字 +# 64、65-90=@、A-Z +# 97-122=a-z +###### first host configuration +1-4=0 +5-8=1 +9-12=2 +13-16=3 +###### second host configuration +17-20=4 +21-24=5 +25-28=6 +29-32=7 +0-0=7 diff --git a/src/test/resources/route/rule.xml b/src/test/resources/route/rule.xml new file mode 100644 index 000000000..14d656088 --- /dev/null +++ b/src/test/resources/route/rule.xml @@ -0,0 +1,141 @@ + + + + + + + + member_id + func + + + + + + col_date + by-date + + + + + + offer_id + func2 + + + + + sharding_id + func1 + + + + + id + rang-long + + + + + id + rang-long2 + + + + + + id + rang-mod + + + + + + id + partitionByMod + + + + + + col_date + range-date-hash + + + + + id + crc32slot + + + + + + id + mod-long + + + + + 2 + + + 0 + sharding.txt + + + 128 + 8 + :8 + + + 128 + 8 + + + autopartition-long.txt + + + autopartition-long2.txt + + + 2014-01-01 + 10 + yyyy-MM-dd + + + + partition-range-mod.txt + + + + 2014-01-01 00:00:00 + 3 + yyyy-MM-dd HH:mm:ss + 6 + + + + 2 + + + + + 2 + + diff --git a/src/test/resources/route/schema.xml b/src/test/resources/route/schema.xml new file mode 100644 index 000000000..a9b99094e --- /dev/null +++ b/src/test/resources/route/schema.xml @@ -0,0 +1,213 @@ + + + + + + +
+
+
+ +
+ + + +
+
+ + + +
+ + +
+ + + + +
+ +
+
+
+
+
+
+ + + + +
+ +
+ +
+ + + +
+
+
+ + +
+
+ + + +
+
+ + + + +
+
+ + + + +
+
+ + + +
+
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + select user() + + + + + + + + select user() + + + + + + + + select user() + + + + + + + + + + select 1 from dual + alter session set nls_date_format='yyyy-mm-dd hh24:mi:ss' + + + + + + select 1 from dual + alter session set nls_date_format='yyyy-mm-dd hh24:mi:ss' + + + + + + + select 1 from SYSIBM.SYSDUMMY1 + + + + + + + select 1 from SYSIBM.SYSDUMMY1 + + + + + + + + select 1 + + + + + + select 1 + + + + + select 1 + + + + + + select 1 + + + + + diff --git a/src/test/resources/rule.xml b/src/test/resources/rule.xml new file mode 100644 index 000000000..ad26c1bbb --- /dev/null +++ b/src/test/resources/rule.xml @@ -0,0 +1,34 @@ + + + + + + + + + id + func1 + + + + + + 2 + 512 + + + diff --git a/src/test/resources/schema.xml b/src/test/resources/schema.xml new file mode 100644 index 000000000..e82bf9893 --- /dev/null +++ b/src/test/resources/schema.xml @@ -0,0 +1,41 @@ + + + + + + + +
+ + + + + + + + select user() + + + + + + + + + diff --git a/src/test/resources/sequence_conf.properties b/src/test/resources/sequence_conf.properties index e64f28b1f..5ec3ecd47 100644 --- a/src/test/resources/sequence_conf.properties +++ b/src/test/resources/sequence_conf.properties @@ -1,8 +1,11 @@ -GLOBAL_SEQ.HISIDS= -GLOBAL_SEQ.MINID=1001 -GLOBAL_SEQ.MAXID=1000000000 -GLOBAL_SEQ.CURID=1000 -TUSER.HISIDS= -TUSER.MINID=1001 -TUSER.MAXID=1000000000 -TUSER.CURID=1000 \ No newline at end of file +GLOBAL.HISIDS= +GLOBAL.MINID=1 +GLOBAL.MAXID=10 +GLOBAL.CURID=1 + +MY1.HISIDS= +MY1.MINID=1001 +MY1.MAXID=2000 +MY1.CURID=1000 + + diff --git a/src/test/resources/server.xml b/src/test/resources/server.xml new file mode 100644 index 000000000..2836f3703 --- /dev/null +++ b/src/test/resources/server.xml @@ -0,0 +1,66 @@ + + + + + + + + + + + test + dbtest + 11111 + + + + + + + + 127.0.0.1 + 1 + + + + + + + diff --git a/src/test/resources/sharding.txt b/src/test/resources/sharding.txt new file mode 100644 index 000000000..c13b35142 --- /dev/null +++ b/src/test/resources/sharding.txt @@ -0,0 +1,2 @@ +10000=0 +10010=1 \ No newline at end of file diff --git a/src/main/resources/zk-create.yaml b/src/test/resources/zk-create-test.yaml similarity index 88% rename from src/main/resources/zk-create.yaml rename to src/test/resources/zk-create-test.yaml index 3440e0f13..cf2173a67 100644 --- a/src/main/resources/zk-create.yaml +++ b/src/test/resources/zk-create-test.yaml @@ -15,6 +15,7 @@ mycat-cluster: readOnly : true schemas : - testdb + - test mycat : name: mycat password: admin @@ -106,13 +107,13 @@ mycat-cluster: name : company datanode : dn1,dn2,dn3 primaryKey : ID - type : 1 //全局表为 1 + type : 1 #全局表为 1 goods : name : goods datanode : dn1,dn2 primaryKey : ID - type : 1 //全局表为 1 + type : 1 #全局表为 1 hotnews : name : hotnews @@ -134,19 +135,19 @@ mycat-cluster: orders : name : orders - primarykey : ID - joinkey : customer_id - parentkey : ID + primaryKey : ID + joinKey : customer_id + parentKey : ID order_items : name : order_items - joinkey : order_id - parentkey : ID + joinKey : order_id + parentKey : ID customer_addr : name : customer_addr - joinkey : customer_id - parentkey : ID + joinKey : customer_id + parentKey : ID offer : name : offer @@ -215,7 +216,7 @@ mycat-nodes: weigth: 1 leader: 1 state: red - system-params: + systemParams: defaultsqlparser : druidparser serverport : 8066 sequncehandlertype : 1 @@ -223,7 +224,21 @@ mycat-nodes: mycat-mysqls: mysql_1: ip: 192.168.8.2 - port: 3366 + port: 3306 + user: mysql + password: mysql + hostId: host + zone: bj + mysql_2: + ip: 192.168.8.3 + port: 3307 + user: mysql + password: mysql + hostId: host + zone: bj + mysql_3: + ip: 192.168.8.4 + port: 3308 user: mysql password: mysql hostId: host @@ -235,9 +250,9 @@ mycat-mysqlgroup : repType: 0 zone: bj servers: - - mysqlId1 - - mysqlId2 - - mysqlId3 - cur-write-server: mysqlId2 + - mysql_1 + - mysql_2 + - mysql_3 + cur-write-server: mysql_1 auto-write-switch: true heartbeatSQL : select user() diff --git a/version.txt b/version.txt index ee966014c..f680a5d56 100644 --- a/version.txt +++ b/version.txt @@ -1,6 +1,6 @@ -BuildTime 2015-12-18 08:56:47 -GitVersion null -MavenVersion 2.0-dev +BuildTime 2017-04-24 09:41:48 +GitVersion 36626e4d819c30da8e281594758559ec13f00679 +MavenVersion 1.6.5-BETA GitUrl https://github.com/MyCATApache/Mycat-Server.git MyCatSite http://www.mycat.org.cn QQGroup 106088787