diff --git a/.travis.yml b/.travis.yml
index 80b6f4b78..b179f30c7 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,4 +1,3 @@
language: java
jdk:
- - openjdk7
- - oraclejdk7
+ - openjdk8
diff --git a/LICENSE b/LICENSE
index e06d20818..23cb79033 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,202 +1,339 @@
-Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ {description}
+ Copyright (C) {year} {fullname}
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ {signature of Ty Coon}, 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
diff --git a/README.md b/README.md
index 6e9ed7b62..e4fe272f2 100644
--- a/README.md
+++ b/README.md
@@ -11,7 +11,6 @@ Mycat’s target is to smoothly migrate the current stand-alone database and app
* [Getting Started](https://github.com/MyCATApache/Mycat-doc/tree/master/en)
* [尝试 MyCAT](https://github.com/MyCATApache/Mycat-doc/blob/master/MyCat_In_Action_%E4%B8%AD%E6%96%87%E7%89%88.doc)
-
## Features
@@ -48,45 +47,379 @@ There are some compiled binary installation packages in Mycat-download project o
There are some documents in Mycat-doc project on github at [Mycat-doc](https://github.com/MyCATApache/Mycat-doc).
-## Play with Mycat -- Mycat All In One
-All in one is the integrated developing & testing environment ,a centos 7 virtual machine with Mycat-server,Mycat-WEB,MYSQL,ZooKeeper installed .
-
-You can execute the following steps to get mycat working platform:
-import CentOs7.voa
->* install Oracle VM VirtualBox
->* run Oracle VM VirtualBox
->* download mycat-all-in-one ,[press here to get all-in-one](http://pan.baidu.com/s/1qWMkJPM),password:v63y
->* File -> Import Appliances
->* choose the path you download CentOS 7.ova, choose the ova file
->* you can get everything done,just press next
->
-setup virtual box
->* login user name : root, password: 123456
->* run the multiple mysql instances by the commands shown as follow:
-```
-mysqld_multi start
-```
-setup Mycat
-```
- cd /opt/mycat
- ./bin/mycat start
-```
-run zookeeper
->* cd /opt/zookeeper-3.4.6
->* bin/zkServer.sh start
->* bin/zkCli.sh
-
-connect to Mycat --> this step should be done on you host computer
->* setup Navicat Premium
->* create a connection to Mycat with IP:8066 , username : test , password: test
->* if connection correct , you would see the database's name is TESTDB
->* then you could try everything you like ,just have fun !
-
-pay attention to Mycat's IP in 'connect to Mycat' step , the IP address may different from your own host IP ,
-you need to justify the IP by rewrite the file under /etc/sysconfig/network-scripts/ifcfg-enp0s3 , then run
-
-```
-service network restart
-````
-to let the modified file work
-
\ No newline at end of file
+
+Mycat 简单demo,具体参考Mycat权威指南
+
+官网 : mycat.io
+qq官方群:106088787
+Mycat权威指南官方下载:http://songwie.com/attached/file/mycat_1.5.2.pdf
+wiki: wiki
+
+# Mycat前世今生
+
+2013年阿里的Cobar在社区使用过程中发现存在一些比较严重的问题,及其使用限制,经过Mycat发起人第一次改良,第一代改良版——Mycat诞生。 Mycat开源以后,一些Cobar的用户参与了Mycat的开发,最终Mycat发展成为一个由众多软件公司的实力派架构师和资深开发人员维护的社区型开源软件。
+
+2014年Mycat首次在上海的《中华架构师》大会上对外宣讲,更多的人参与进来,随后越来越多的项目采用了Mycat。
+
+2015年5月,由核心参与者们一起编写的第一本官方权威指南《Mycat权威指南》电子版发布,累计超过500本,成为开源项目中的首创。
+
+2015年10月为止,Mycat项目总共有16个Committer。
+
+截至2015年11月,超过300个项目采用Mycat,涵盖银行、电信、电子商务、物流、移动应用、O2O的众多领域和公司。
+
+截至2015年12月,超过4000名用户加群或研究讨论或测试或使用Mycat。
+
+Mycat是基于开源cobar演变而来,我们对cobar的代码进行了彻底的重构,使用NIO重构了网络模块,并且优化了Buffer内核,增强了聚合,Join等基本特性,同时兼容绝大多数数据库成为通用的数据库中间件。1.4 版本以后 完全的脱离基本cobar内核,结合Mycat集群管理、自动扩容、智能优化,成为高性能的中间件。我们致力于开发高性能数据库中间而努力。永不收费,永不闭源,持续推动开源社区的发展。
+
+Mycat吸引和聚集了一大批业内大数据和云计算方面的资深工程师,Mycat的发展壮大基于开源社区志愿者的持续努力,感谢社区志愿者的努力让Mycat更加强大,同时我们也欢迎社区更多的志愿者,特别是公司能够参与进来,参与Mycat的开发,一起推动社区的发展,为社区提供更好的开源中间件。
+
+Mycat还不够强大,Mycat还有很多不足,欢迎社区志愿者的持续优化改进。
+
+# 关键特性
+支持SQL92标准
+
+遵守Mysql原生协议,跨语言,跨平台,跨数据库的通用中间件代理。
+
+基于心跳的自动故障切换,支持读写分离,支持MySQL主从,以及galera cluster集群。
+
+支持Galera for MySQL集群,Percona Cluster或者MariaDB cluster
+
+基于Nio实现,有效管理线程,高并发问题。
+
+支持数据的多片自动路由与聚合,支持sum,count,max等常用的聚合函数。
+
+支持单库内部任意join,支持跨库2表join,甚至基于caltlet的多表join。
+
+支持通过全局表,ER关系的分片策略,实现了高效的多表join查询。
+
+支持多租户方案。
+
+支持分布式事务(弱xa)。
+
+支持全局序列号,解决分布式下的主键生成问题。
+
+分片规则丰富,插件化开发,易于扩展。
+
+强大的web,命令行监控。
+
+支持前端作为mysq通用代理,后端JDBC方式支持Oracle、DB2、SQL Server 、 mongodb 、巨杉。
+
+支持密码加密
+
+支持服务降级
+
+支持IP白名单
+
+支持SQL黑名单、sql注入攻击拦截
+
+支持分表(1.6)
+
+集群基于ZooKeeper管理,在线升级,扩容,智能优化,大数据处理(2.0开发版)。
+
+
+# Mycat安装与使用
+
+## 下载:
+[https://github.com/MyCATApache/Mycat-download](https://github.com/MyCATApache/Mycat-download)
+具体下载哪个版本以发布为准,推荐1.4,1.5.
+
+## 安装:
+下载的文件直接解压即可。
+
+## 运行:
+### linux:
+ ./mycat start 启动
+
+ ./mycat stop 停止
+
+ ./mycat console 前台运行
+
+ ./mycat install 添加到系统自动启动(暂未实现)
+
+ ./mycat remove 取消随系统自动启动(暂未实现)
+
+ ./mycat restart 重启服务
+
+ ./mycat pause 暂停
+
+ ./mycat status 查看启动状态
+
+### win:
+直接运行startup_nowrap.bat,如果出现闪退,在cmd 命令行运行,查看出错原因。
+
+## 内存配置:
+启动前,一般需要修改JVM配置参数,打开conf/wrapper.conf文件,如下行的内容为2G和2048,可根据本机配置情况修改为512M或其它值。
+以下配置跟jvm参数完全一致,可以根据自己的jvm参数调整。
+
+Java Additional Parameters
+
+wrapper.java.additional.1=
+
+wrapper.java.additional.1=-DMYCAT_HOME=.
+
+wrapper.java.additional.2=-server
+
+wrapper.java.additional.3=-XX:MaxPermSize=64M
+
+wrapper.java.additional.4=-XX:+AggressiveOpts
+
+wrapper.java.additional.5=-XX:MaxDirectMemorySize=100m
+
+wrapper.java.additional.6=-Dcom.sun.management.jmxremote
+
+wrapper.java.additional.7=-Dcom.sun.management.jmxremote.port=1984
+
+wrapper.java.additional.8=-Dcom.sun.management.jmxremote.authenticate=false
+
+wrapper.java.additional.9=-Dcom.sun.management.jmxremote.ssl=false
+
+wrapper.java.additional.10=-Xmx100m
+
+wrapper.java.additional.11=-Xms100m
+
+wrapper.java.additional.12=-XX:+UseParNewGC
+
+wrapper.java.additional.13=-XX:+UseConcMarkSweepGC
+
+wrapper.java.additional.14=-XX:+UseCMSCompactAtFullCollection
+
+wrapper.java.additional.15=-XX:CMSFullGCsBeforeCompaction=0
+
+wrapper.java.additional.16=-XX:CMSInitiatingOccupancyFraction=70
+
+
+以下配置作废:
+
+wrapper.java.initmemory=3
+
+wrapper.java.maxmemory=64
+
+### Mycat连接测试:
+测试mycat与测试mysql完全一致,mysql怎么连接,mycat就怎么连接。
+
+推荐先采用命令行测试:
+
+mysql -uroot -proot -P8066 -h127.0.0.1
+
+如果采用工具连接,1.4,1.3目前部分工具无法连接,会提示database not selected,建议采用高版本,navicat测试。1.5已经修复了部分工具连接。
+
+
+# Mycat配置入门
+
+## 配置:
+--bin 启动目录
+
+--conf 配置文件存放配置文件:
+
+ --server.xml:是Mycat服务器参数调整和用户授权的配置文件。
+
+ --schema.xml:是逻辑库定义和表以及分片定义的配置文件。
+
+ --rule.xml: 是分片规则的配置文件,分片规则的具体一些参数信息单独存放为文件,也在这个目录下,配置文件修改需要重启MyCAT。
+
+ --log4j.xml: 日志存放在logs/log中,每天一个文件,日志的配置是在conf/log4j.xml中,根据自己的需要可以调整输出级别为debug debug级别下,会输出更多的信息,方便排查问题。
+
+ --autopartition-long.txt,partition-hash-int.txt,sequence_conf.properties, sequence_db_conf.properties 分片相关的id分片规则配置文件
+
+ --lib MyCAT自身的jar包或依赖的jar包的存放目录。
+
+ --logs MyCAT日志的存放目录。日志存放在logs/log中,每天一个文件
+
+下面图片描述了Mycat最重要的3大配置文件:
+
+
+
+
+## 逻辑库配置:
+### 配置server.xml
+添加两个mycat逻辑库:user,pay:
+system 参数是所有的mycat参数配置,比如添加解析器:defaultSqlParser,其他类推
+user 是用户参数。
+
+
+
+ druidparser
+
+
+
+
+
+ mycat
+
+ user,pay
+
+
+
+### 编辑schema.xml
+修改dataHost和schema对应的连接信息,user,pay 垂直切分后的配置如下所示:
+
+schema 是实际逻辑库的配置,user,pay分别对应两个逻辑库,多个schema代表多个逻辑库。
+
+dataNode是逻辑库对应的分片,如果配置多个分片只需要多个dataNode即可。
+
+dataHost是实际的物理库配置地址,可以配置多主主从等其他配置,多个dataHost代表分片对应的物理库地址,下面的writeHost、readHost代表该分片是否配置多写,主从,读写分离等高级特性。
+
+以下例子配置了两个writeHost为主从。
+
+
+
+
+
+
+
+
+
+
+
+ select 1
+
+
+
+
+
+
+
+# Mycat逻辑库、系统参数配置
+
+## 配置Mycat环境参数
+
+
+
+
+ druidparser
+
+
+
+如例子中配置的所有的Mycat参数变量都是配置在server.xml 文件中,system标签下配置所有的参数,如果需要配置某个变量添加相应的配置即可,例如添加启动端口8066,默认为8066:
+
+ 8066
+
+其他所有变量类似。
+
+## 配置Mycat逻辑库与用户
+
+
+
+
+
+ mycat
+ TESTDB
+
+
+
+
+如例子中配置的所有的Mycat连接的用户与逻辑库映射都是配置在server.xml 文件中,user标签下配置所有的参数,例如例子中配置了一个mycat用户供应用连接到mycat,同时mycat 在schema.xml中配置后了一个逻辑库TESTDB,配置好逻辑库与用户的映射关系。
+
+
+# 逻辑库、表分片配置
+
+## 配置逻辑库(schema)
+
+Mycat作为一个中间件,实现mysql协议那么对前端应用连接来说就是一个数据库,也就有数据库的配置,mycat的数据库配置是在schema.xml中配置,配置好后映射到server.xml里面的用户就可以了。
+
+
+
+
+
+
+
+
+
+
+
+
+
+ show status like 'wsrep%'
+
+
+
+
+
+上面例子配置了一个逻辑库TESTDB,同时配置了t_user,ht_jy_login_log两个分片表。
+
+### 逻辑表配置
+
+
+table 标签 是逻辑表的配置 其中
+
+name代表表名,
+
+dataNode代表表对应的分片,
+
+Mycat默认采用分库方式,也就是一个表映射到不同的库上,
+
+rule代表表要采用的数据切分方式,名称对应到rule.xml中的对应配置,如果要分片必须配置。
+
+
+## 配置分片(dataNode)
+
+
+
+
+表切分后需要配置映射到哪几个数据库中,Mycat的分片实际上就是库的别名,例如上面例子配置了两个分片dn1,dn2 分别对应到物理机映射dataHost
+localhost1 的两个库上。
+
+## 配置物理库分片映射(dataHost)
+
+
+ show status like 'wsrep%'
+
+
+
+
+Mycat作为数据库代理需要逻辑库,逻辑用户,表切分后需要配置分片,分片也就需要映射到真实的物理主机上,至于是映射到一台还是一台的多个实例上,Mycat并不关心,只需要配置好映射即可,例如例子中:
+
+配置了一个名为localhost1的物理主机(dataHost)映射。
+
+heartbeat 标签代表Mycat需要对物理库心跳检测的语句,正常情况下生产案例可能配置主从,或者多写 或者单库,无论哪种情况Mycat都需要维持到数据库的数据源连接,因此需要定时检查后端连接可以性,心跳语句就是来作为心跳检测。
+
+writeHost 此标签代表 一个逻辑主机(dataHost)对应的后端的物理主机映射,例如例子中写库hostM1 映射到127.0.0.1:3306。如果后端需要做读写分离或者多写 或者主从则通过配置 多个writeHost 或者readHost即可。
+
+dataHost 标签中的 writeType balance 等标签则是不同的策略,具体参考指南。
+
+# Mycat 表切分规则配置
+
+## 表切分规则
+
+
+
+
+
+
+
+ createTime
+ sharding-by-hour
+
+
+
+
+ 24
+
+
+
+
+数据切分中作为表切分规则中最重要的配置,表的切分方式决定了数据切分后的性能好坏,因此也是最重要的配置。
+
+如上面例子配置了一个切分规则,名为sharding-by-hour 对应的切分方式(function )是按日期切分,该配置中:
+
+### tableRule
+
+name 为schema.xml 中table 标签中对应的 rule="sharding-by-hour" ,也就是配置表的分片规则,
+
+columns 是表的切分字段: createTime 创建日期。
+
+algorithm 是规则对应的切分规则:映射到function 的name。
+
+
+### function
+
+function 配置是分片规则的配置。
+
+name 为切分规则的名称,名字人员取,但是需要与tableRule 中匹配。
+
+class 是切分规则对应的切分类,写死,需要哪种规则则配置哪种,例如本例子是按小时分片:org.opencloudb.route.function.LatestMonthPartion
+
+property 标签是切分规则对应的不同属性,不同的切分规则配置不同。
+
+
diff --git a/README_Chinese.md b/README_Chinese.md
index 5ff2f24e4..8ff63cf1a 100644
--- a/README_Chinese.md
+++ b/README_Chinese.md
@@ -3,7 +3,7 @@
### 官网:[http://www.mycat.org.cn](http://www.mycat.org.cn)
### github:[https://github.com/MyCATApache](https://github.com/MyCATApache)
-##### 入门: [zh-CN: https://github.com/MyCATApache/Mycat-doc/blob/master/MyCat_In_Action_%E4%B8%AD%E6%96%87%E7%89%88.doc] [English:https://github.com/MyCATApache/Mycat-doc/tree/master/en]
+##### 入门: [zh-CN: https://github.com/MyCATApache/Mycat-doc/blob/master/history/MyCat_In_Action_%E4%B8%AD%E6%96%87%E7%89%88.doc] [English:https://github.com/MyCATApache/Mycat-doc/tree/master/en]
什么是Mycat?简单的说,Mycat就是:
@@ -57,55 +57,3 @@ github上面的Mycat-download项目是编译好的二进制安装包 [https://gi
##### 文档:
github上面的Mycat-doc项目是相关文档 [https://github.com/MyCATApache/Mycat-doc](https://github.com/MyCATApache/Mycat-doc)
-
-##### 尝试 Mycat -- Mycat All in One
-在这里我们为您提供了集 mycat-server,mycat-web,mysql,zookeeper 于一身的测试开发环境,是您开发测试必备良器,
-您只需要执行如下几个步骤便可开启属于您的 mycat 之旅 :
-
-> 导入 OVA
->* 安装Oracle VM VirtualBox
->* 启动Oracle VM VirtualBox
->* 下载 mycat-all-in-one 镜像文件,[戳这里下载all-in-one镜像](http://pan.baidu.com/s/1qWMkJPM),密码:v63y
->* File(管理) -> Import Appliances(导入虚拟电脑)<网络模式首选桥接模式>
->* 选择CentOS 7.ova
->* 一路Next
-
-> 启动虚拟机
->* 登录虚拟机 root/123456
->* 启动多实例Mysql
-
- ```
- mysqld_multi start
- ```
-
-> 启动 Mycat
- ```
- cd /opt/mycat/
- ./bin/mycat start
- ```
- > ZK启动
-
-```
- cd /opt/zookeeper-3.4.6
- bin/zkServer.sh start
- bin/zkCli.sh
-```
-> 体验 Mycat
- >* 启动Navicat Premium
- >* 连接Mycat,IP:8066 test/test
- >* 连接TESTDB
- >* 测试
-
-```
- select * from t_user;
-```
-请留意 '体验 Mycat'该步骤中的 IP 地址的设定,虚拟机中 IP 地址若与主机地址不匹配会引发连接失败的情况,
-此时可以将 虚拟机IP 地址修改静态IP地址来解决,修改位于路径
-````
-/etc/sysconfig/network-scripts/ifcfg-enp0s3
-````
-下面的文件,然后运行命令
-````
-service network restart
-````
-来让刚刚修改过的文件生效即可
\ No newline at end of file
diff --git a/pom.xml b/pom.xml
index 743fc1773..f8190fe96 100644
--- a/pom.xml
+++ b/pom.xml
@@ -2,9 +2,9 @@
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0%20http://maven.apache.org/maven-v4_0_0.xsd">
4.0.0
- io.mycat.mycat
+ io.mycat
Mycat-server
- 2.0-dev
+ 1.6.5-release
jar
Mycat-server
The project of Mycat-server
@@ -13,7 +13,7 @@
UTF-8
+ ${maven.build.timestamp}
yyyy-MM-dd HH:mm:ss
version.txt.template
version.txt
@@ -43,26 +43,8 @@
-
-
- org.codehaus.jsr166-mirror
- jsr166y
- 1.7.0
- test
-
-
- junit
- junit
- 4.4
- test
-
-
-
-
- mysql
- mysql-connector-java
- 5.1.35
-
+
org.mongodb
mongo-java-driver
@@ -78,103 +60,177 @@
leveldb-api
0.7
-
- com.sequoiadb
- sequoiadb-driver
- 1.12
-
-
-
com.google.guava
guava
- 18.0
+ 19.0
- com.google.code.findbugs
- jsr305
- 3.0.0
+ com.alibaba
+ druid
+ 1.0.26
- commons-beanutils
- commons-beanutils
- 1.9.2
+ mysql
+ mysql-connector-java
+ 5.1.35
-
-
- com.univocity
- univocity-parsers
- 1.5.4
- jar
+ net.sf.ehcache
+ ehcache-core
+ 2.6.11
+ compile
- com.alibaba
- druid
- 1.0.14
+ org.mapdb
+ mapdb
+ 1.0.7
- org.yaml
- snakeyaml
- 1.16
+ junit
+ junit
+ 4.4
+ provided
+
+ org.apache.velocity
+ velocity
+ 1.7
+
- com.alibaba
- fastjson
- 1.2.7
+ org.codehaus.jsr166-mirror
+ jsr166y
+ 1.7.0
+ test
+
+ com.lmax
+ disruptor
+ 3.3.4
+
+
+ org.apache.logging.log4j
+ log4j-slf4j-impl
+ 2.5
+
+
+ org.apache.logging.log4j
+ log4j-core
+ 2.5
+
+
+ org.apache.logging.log4j
+ log4j-1.2-api
+ 2.5
+
- org.apache.velocity
- velocity
- 1.7
+ com.univocity
+ univocity-parsers
+ 2.2.1
+ jar
-
+
- org.mapdb
- mapdb
- 1.0.7
+ com.sequoiadb
+ sequoiadb-driver
+ 1.12
+
+
+
- net.sf.ehcache
- ehcache-core
- 2.6.11
- compile
+ dom4j
+ dom4j
+ 1.6.1
+
+
+ xml-apis
+ xml-apis
+
+
org.apache.curator
curator-framework
- 2.9.0
-
+ 2.11.0
-
-
- org.slf4j
- slf4j-api
- 1.7.12
- compile
+ org.apache.curator
+ curator-recipes
+ 2.11.0
- org.apache.logging.log4j
- log4j-slf4j-impl
- 2.3
+ org.apache.curator
+ curator-test
+ 2.11.0
+ test
+
+
+ log4j
+ log4j
+
+
- org.apache.logging.log4j
- log4j-core
- 2.3
+ com.alibaba
+ fastjson
+ 1.2.12
-
-
+
joda-time
joda-time
- 2.8.2
+ 2.9.3
+
+ com.github.shyiko
+ mysql-binlog-connector-java
+ 0.6.0
+
+
+
+
+ org.mockito
+ mockito-all
+ 1.8.5
+ test
+
+
+
+ com.google.code.findbugs
+ jsr305
+ 2.0.3
+
+
+
+ com.esotericsoftware.kryo
+ kryo
+ 2.10
+
+
+
+ org.hamcrest
+ hamcrest-library
+ 1.3
+
+
+
+
+
+ commons-lang
+ commons-lang
+ 2.6
+
+
+
+ io.netty
+ netty-buffer
+ 4.1.9.Final
+
@@ -287,12 +343,12 @@
replace
- ${project.basedir}/src/main/java/io/mycat/server/Versions.template
- ${project.basedir}/src/main/java/io/mycat/server/Versions.java
+ ${project.basedir}/src/main/java/io/mycat/config/Versions.template
+ ${project.basedir}/src/main/java/io/mycat/config/Versions.java
@server-version@
- 5.5.8-mycat-${project.version}-${timestamp}
+ 5.6.29-mycat-${project.version}-${timestamp}
@@ -305,11 +361,12 @@
org.apache.maven.plugins
maven-compiler-plugin
- 1.8
- 1.8
+ 1.7
+ 1.7
${app.encoding}
+
org.apache.maven.plugins
maven-source-plugin
@@ -364,6 +421,7 @@
+
org.codehaus.mojo
@@ -391,15 +449,15 @@
MYCAT_HOME=.
- -server
+ -server
-XX:MaxPermSize=64M
-XX:+AggressiveOpts
-XX:MaxDirectMemorySize=2G
- -Dcom.sun.management.jmxremote
+ -Dcom.sun.management.jmxremote
-Dcom.sun.management.jmxremote.port=1984
- -Dcom.sun.management.jmxremote.authenticate=false
- -Dcom.sun.management.jmxremote.ssl=false
+ -Dcom.sun.management.jmxremote.authenticate=false
+ -Dcom.sun.management.jmxremote.ssl=false
-Xmx4G
-Xms1G
@@ -430,14 +488,22 @@
configuration.directory.in.classpath.first
conf
-
- wrapper.ping.timeout
- 120
-
+
+ wrapper.ping.timeout
+ 120
+
set.default.REPO_DIR
lib
+
+ wrapper.logfile.maxsize
+ 512m
+
+
+ wrapper.logfile.maxfiles
+ 30
+
wrapper.logfile
logs/wrapper.log
diff --git a/src/main/assembly/assembly-linux.xml b/src/main/assembly/assembly-linux.xml
index 1737c8517..618ed4057 100644
--- a/src/main/assembly/assembly-linux.xml
+++ b/src/main/assembly/assembly-linux.xml
@@ -37,7 +37,7 @@
mycat/conf
*.dtd
- log4j.*
+ log4j*
diff --git a/src/main/assembly/assembly-mac.xml b/src/main/assembly/assembly-mac.xml
index 3b410409e..8c06476ba 100644
--- a/src/main/assembly/assembly-mac.xml
+++ b/src/main/assembly/assembly-mac.xml
@@ -37,7 +37,7 @@
mycat/conf
*.dtd
- log4j.*
+ log4j*
diff --git a/src/main/assembly/assembly-solaris.xml b/src/main/assembly/assembly-solaris.xml
index d56385814..2fc2b33e1 100644
--- a/src/main/assembly/assembly-solaris.xml
+++ b/src/main/assembly/assembly-solaris.xml
@@ -37,7 +37,7 @@
mycat/conf
*.dtd
- log4j.*
+ log4j*
diff --git a/src/main/assembly/assembly-unix.xml b/src/main/assembly/assembly-unix.xml
index 4e78dae7b..5b3e0d425 100644
--- a/src/main/assembly/assembly-unix.xml
+++ b/src/main/assembly/assembly-unix.xml
@@ -38,7 +38,7 @@
mycat/conf
*.dtd
- log4j.*
+ log4j*
diff --git a/src/main/assembly/assembly-win.xml b/src/main/assembly/assembly-win.xml
index 53f316ddc..9d10d0e45 100644
--- a/src/main/assembly/assembly-win.xml
+++ b/src/main/assembly/assembly-win.xml
@@ -37,7 +37,7 @@
mycat/conf
*.dtd
- log4j.*
+ log4j*
diff --git a/src/main/assembly/bin/create_zookeeper_data.bat b/src/main/assembly/bin/create_zookeeper_data.bat
deleted file mode 100644
index fd16c7d1f..000000000
--- a/src/main/assembly/bin/create_zookeeper_data.bat
+++ /dev/null
@@ -1,17 +0,0 @@
-
-REM check JAVA_HOME & java
-set "JAVA_CMD="%JAVA_HOME%/bin/java""
-if "%JAVA_HOME%" == "" goto noJavaHome
-if exist "%JAVA_HOME%\bin\java.exe" goto mainEntry
-:noJavaHome
-echo ---------------------------------------------------
-echo WARN: JAVA_HOME environment variable is not set.
-echo ---------------------------------------------------
-set "JAVA_CMD=java"
-:mainEntry
-REM set HOME_DIR
-set "CURR_DIR=%cd%"
-cd ..
-set "MYCAT_HOME=%cd%"
-cd %CURR_DIR%
-"%JAVA_CMD%" -Xms256M -Xmx1G -XX:MaxPermSize=64M -DMYCAT_HOME=%MYCAT_HOME% -cp "..\conf;..\lib\*" demo.ZkCreate
\ No newline at end of file
diff --git a/src/main/assembly/bin/create_zookeeper_data.sh b/src/main/assembly/bin/create_zookeeper_data.sh
deleted file mode 100755
index 2029329ff..000000000
--- a/src/main/assembly/bin/create_zookeeper_data.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-
-echo "check JAVA_HOME & java"
-JAVA_CMD=$JAVA_HOME/bin/java
-MAIN_CLASS=demo.ZkCreate
-if [ ! -d "$JAVA_HOME" ]; then
- echo ---------------------------------------------------
- echo WARN: JAVA_HOME environment variable is not set.
- echo ---------------------------------------------------
- JAVA_CMD=java
-fi
-
-echo "---------set HOME_DIR------------"
-CURR_DIR=`pwd`
-cd ..
-MYCAT_HOME=`pwd`
-cd $CURR_DIR
-$JAVA_CMD -Xms256M -Xmx1G -XX:MaxPermSize=64M -DMYCAT_HOME=$MYCAT_HOME -cp "$MYCAT_HOME/conf:$MYCAT_HOME/lib/*" $MAIN_CLASS
diff --git a/src/main/assembly/conf/log4j.xml b/src/main/assembly/conf/log4j.xml
deleted file mode 100644
index 60f98a449..000000000
--- a/src/main/assembly/conf/log4j.xml
+++ /dev/null
@@ -1,42 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/src/main/assembly/conf/log4j2.xml b/src/main/assembly/conf/log4j2.xml
new file mode 100644
index 000000000..dfe0dc212
--- /dev/null
+++ b/src/main/assembly/conf/log4j2.xml
@@ -0,0 +1,32 @@
+
+
+
+
+
+
+
+
+
+ %d{yyyy-MM-dd HH:mm:ss.SSS} %5p [%t] (%l) - %m%n
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/main/conf/dnindex.properties b/src/main/conf/dnindex.properties
deleted file mode 100644
index da2b00c13..000000000
--- a/src/main/conf/dnindex.properties
+++ /dev/null
@@ -1,3 +0,0 @@
-#update
-#Thu Sep 10 16:14:18 CST 2015
-jdbchost=0
diff --git a/src/main/java/io/mycat/MycatServer.java b/src/main/java/io/mycat/MycatServer.java
index 77c61d8b0..484323cc3 100644
--- a/src/main/java/io/mycat/MycatServer.java
+++ b/src/main/java/io/mycat/MycatServer.java
@@ -2,8 +2,8 @@
* Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
- * This code is free software;Designed and Developed mainly by many Chinese
- * opensource volunteers. you can redistribute it and/or modify it under the
+ * This code is free software;Designed and Developed mainly by many Chinese
+ * opensource volunteers. you can redistribute it and/or modify it under the
* terms of the GNU General Public License version 2 only, as published by the
* Free Software Foundation.
*
@@ -16,95 +16,218 @@
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Any questions about this component can be directed to it's project Web address
+ *
+ * Any questions about this component can be directed to it's project Web address
* https://code.google.com/p/opencloudb/.
*
*/
package io.mycat;
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.nio.channels.AsynchronousChannelGroup;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+
+import io.mycat.buffer.NettyBufferPool;
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.recipes.locks.InterProcessMutex;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.io.Files;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
-import io.mycat.backend.PhysicalDBPool;
+import io.mycat.backend.BackendConnection;
+import io.mycat.backend.datasource.PhysicalDBNode;
+import io.mycat.backend.datasource.PhysicalDBPool;
+import io.mycat.backend.mysql.nio.handler.MultiNodeCoordinator;
+import io.mycat.backend.mysql.xa.CoordinatorLogEntry;
+import io.mycat.backend.mysql.xa.ParticipantLogEntry;
+import io.mycat.backend.mysql.xa.TxState;
+import io.mycat.backend.mysql.xa.XARollbackCallback;
+import io.mycat.backend.mysql.xa.recovery.Repository;
+import io.mycat.backend.mysql.xa.recovery.impl.FileSystemRepository;
+import io.mycat.buffer.BufferPool;
+import io.mycat.buffer.DirectByteBufferPool;
import io.mycat.cache.CacheService;
-import io.mycat.net.*;
+import io.mycat.config.MycatConfig;
+import io.mycat.config.classloader.DynaClassLoader;
+import io.mycat.config.loader.zkprocess.comm.ZkConfig;
+import io.mycat.config.loader.zkprocess.comm.ZkParamCfg;
+import io.mycat.config.model.SchemaConfig;
+import io.mycat.config.model.SystemConfig;
+import io.mycat.config.model.TableConfig;
+import io.mycat.config.table.structure.MySQLTableStructureDetector;
+import io.mycat.manager.ManagerConnectionFactory;
+import io.mycat.memory.MyCatMemory;
+import io.mycat.net.AIOAcceptor;
+import io.mycat.net.AIOConnector;
+import io.mycat.net.NIOAcceptor;
+import io.mycat.net.NIOConnector;
+import io.mycat.net.NIOProcessor;
+import io.mycat.net.NIOReactorPool;
+import io.mycat.net.SocketAcceptor;
+import io.mycat.net.SocketConnector;
import io.mycat.route.MyCATSequnceProcessor;
import io.mycat.route.RouteService;
-import io.mycat.server.MySQLFrontConnectionFactory;
-import io.mycat.server.MySQLFrontConnectionHandler;
-import io.mycat.server.classloader.DynaClassLoader;
-import io.mycat.server.config.ConfigException;
-import io.mycat.server.config.cluster.ClusterSync;
-import io.mycat.server.config.loader.ConfigFactory;
-import io.mycat.server.config.node.MycatConfig;
-import io.mycat.server.config.node.SystemConfig;
+import io.mycat.route.factory.RouteStrategyFactory;
+import io.mycat.route.sequence.handler.SequenceHandler;
+import io.mycat.server.ServerConnectionFactory;
import io.mycat.server.interceptor.SQLInterceptor;
import io.mycat.server.interceptor.impl.GlobalTableUtil;
+import io.mycat.sqlengine.OneRawSQLQueryResultHandler;
+import io.mycat.sqlengine.SQLJob;
+import io.mycat.statistic.SQLRecorder;
+import io.mycat.statistic.stat.SqlResultSizeRecorder;
+import io.mycat.statistic.stat.UserStat;
+import io.mycat.statistic.stat.UserStatAnalyzer;
+import io.mycat.util.ExecutorUtil;
+import io.mycat.util.NameableExecutor;
import io.mycat.util.TimeUtil;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.channels.AsynchronousChannelGroup;
-import java.util.Map;
-import java.util.Timer;
-import java.util.TimerTask;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicLong;
+import io.mycat.util.ZKUtils;
/**
* @author mycat
*/
public class MycatServer {
+
public static final String NAME = "MyCat";
private static final long LOG_WATCH_DELAY = 60000L;
private static final long TIME_UPDATE_PERIOD = 20L;
+ private static final long DEFAULT_SQL_STAT_RECYCLE_PERIOD = 5 * 1000L;
+ private static final long DEFAULT_OLD_CONNECTION_CLEAR_PERIOD = 5 * 1000L;
+
private static final MycatServer INSTANCE = new MycatServer();
private static final Logger LOGGER = LoggerFactory.getLogger("MycatServer");
+ private static final Repository fileRepository = new FileSystemRepository();
private final RouteService routerService;
private final CacheService cacheService;
+ private Properties dnIndexProperties;
+
+ //AIO连接群组
private AsynchronousChannelGroup[] asyncChannelGroups;
private volatile int channelIndex = 0;
- private final MyCATSequnceProcessor sequnceProcessor = new MyCATSequnceProcessor();
+
+ //全局序列号
+// private final MyCATSequnceProcessor sequnceProcessor = new MyCATSequnceProcessor();
private final DynaClassLoader catletClassLoader;
private final SQLInterceptor sqlInterceptor;
+ private volatile int nextProcessor;
+
+ // System Buffer Pool Instance
+ private BufferPool bufferPool;
+ private boolean aio = false;
+
+ //XA事务全局ID生成
private final AtomicLong xaIDInc = new AtomicLong();
+ //sequence处理对象
+ private SequenceHandler sequenceHandler;
+
+ /**
+ * Mycat 内存管理类
+ */
+ private MyCatMemory myCatMemory = null;
public static final MycatServer getInstance() {
return INSTANCE;
}
private final MycatConfig config;
- private final Timer timer;
+ private final ScheduledExecutorService scheduler;
+ private final ScheduledExecutorService heartbeatScheduler;
+ private final SQLRecorder sqlRecorder;
private final AtomicBoolean isOnline;
private final long startupTime;
- private NamebleScheduledExecutor timerExecutor;
+ private NIOProcessor[] processors;
+ private SocketConnector connector;
+ private NameableExecutor businessExecutor;
+ private NameableExecutor sequenceExecutor;
+ private NameableExecutor timerExecutor;
private ListeningExecutorService listeningExecutorService;
+ private InterProcessMutex dnindexLock;
+ private long totalNetWorkBufferSize = 0;
- private ClusterSync clusterSync;
-
- public MycatServer() {
+ private final AtomicBoolean startup=new AtomicBoolean(false);
+ private MycatServer() {
+
+ //读取文件配置
this.config = new MycatConfig();
- this.timer = new Timer(NAME + "Timer", true);
+
+ //定时线程池,单线程线程池
+ scheduler = Executors.newSingleThreadScheduledExecutor();
+
+ //心跳调度独立出来,避免被其他任务影响
+ heartbeatScheduler = Executors.newSingleThreadScheduledExecutor();
+
+ //SQL记录器
+ this.sqlRecorder = new SQLRecorder(config.getSystem().getSqlRecordCount());
+
+ /**
+ * 是否在线,MyCat manager中有命令控制
+ * | offline | Change MyCat status to OFF |
+ * | online | Change MyCat status to ON |
+ */
this.isOnline = new AtomicBoolean(true);
+
+ //缓存服务初始化
cacheService = new CacheService();
+
+ //路由计算初始化
routerService = new RouteService(cacheService);
+
+ // load datanode active index from properties
+ dnIndexProperties = loadDnIndexProps();
try {
+ //SQL解析器
sqlInterceptor = (SQLInterceptor) Class.forName(
config.getSystem().getSqlInterceptor()).newInstance();
} catch (Exception e) {
throw new RuntimeException(e);
}
+
+ //catlet加载器
catletClassLoader = new DynaClassLoader(SystemConfig.getHomePath()
- + File.separator + "catlet", config.getSystem()
- .getCatletClassCheckSeconds());
+ + File.separator + "catlet", config.getSystem().getCatletClassCheckSeconds());
+
+ //记录启动时间
+ this.startupTime = TimeUtil.currentTimeMillis();
+ if(isUseZkSwitch()) {
+ String path= ZKUtils.getZKBasePath()+"lock/dnindex.lock";
+ dnindexLock = new InterProcessMutex(ZKUtils.getConnection(), path);
+ }
+ }
- this.startupTime = TimeUtil.currentTimeMillis();
+ public AtomicBoolean getStartup() {
+ return startup;
+ }
+
+ public long getTotalNetWorkBufferSize() {
+ return totalNetWorkBufferSize;
+ }
+
+ public BufferPool getBufferPool() {
+ return bufferPool;
+ }
+
+ public NameableExecutor getTimerExecutor() {
+ return timerExecutor;
}
public DynaClassLoader getCatletClassLoader() {
@@ -112,25 +235,42 @@ public DynaClassLoader getCatletClassLoader() {
}
public MyCATSequnceProcessor getSequnceProcessor() {
- return sequnceProcessor;
+ return MyCATSequnceProcessor.getInstance();
}
public SQLInterceptor getSqlInterceptor() {
return sqlInterceptor;
}
+ public ScheduledExecutorService getScheduler() {
+ return scheduler;
+ }
+
public String genXATXID() {
long seq = this.xaIDInc.incrementAndGet();
if (seq < 0) {
synchronized (xaIDInc) {
- if (xaIDInc.get() < 0) {
+ if ( xaIDInc.get() < 0 ) {
xaIDInc.set(0);
}
seq = xaIDInc.incrementAndGet();
}
}
- return "'Mycat." + this.getConfig().getSystem().getMycatNodeId() + "."
- + seq + "'";
+ return "'Mycat." + this.getConfig().getSystem().getMycatNodeId() + "." + seq + "'";
+ }
+
+ public String getXATXIDGLOBAL(){
+ return "'" + getUUID() + "'";
+ }
+
+ public static String getUUID(){
+ String s = UUID.randomUUID().toString();
+ //去掉“-”符号
+ return s.substring(0,8)+s.substring(9,13)+s.substring(14,18)+s.substring(19,23)+s.substring(24);
+ }
+
+ public MyCatMemory getMyCatMemory() {
+ return myCatMemory;
}
/**
@@ -158,88 +298,291 @@ public MycatConfig getConfig() {
return config;
}
+ public void beforeStart() {
+ String home = SystemConfig.getHomePath();
+
+
+ //ZkConfig.instance().initZk();
+ }
+
public void startup() throws IOException {
SystemConfig system = config.getSystem();
int processorCount = system.getProcessors();
+ //init RouteStrategyFactory first
+ RouteStrategyFactory.init();
+
// server startup
- LOGGER.info("===============================================");
LOGGER.info(NAME + " is ready to startup ...");
String inf = "Startup processors ...,total processors:"
+ system.getProcessors() + ",aio thread pool size:"
+ system.getProcessorExecutor()
+ " \r\n each process allocated socket buffer pool "
- + " bytes ,buffer chunk size:"
- + system.getProcessorBufferChunk()
- + " buffer pool's capacity(buferPool/bufferChunk) is:"
- + system.getProcessorBufferPool()
- / system.getProcessorBufferChunk();
+ + " bytes ,a page size:"
+ + system.getBufferPoolPageSize()
+ + " a page's chunk number(PageSize/ChunkSize) is:"
+ + (system.getBufferPoolPageSize()
+ /system.getBufferPoolChunkSize())
+ + " buffer page's number is:"
+ + system.getBufferPoolPageNumber();
LOGGER.info(inf);
LOGGER.info("sysconfig params:" + system.toString());
+ // startup manager
+ ManagerConnectionFactory mf = new ManagerConnectionFactory();
+ ServerConnectionFactory sf = new ServerConnectionFactory();
+ SocketAcceptor manager = null;
+ SocketAcceptor server = null;
+ aio = (system.getUsingAIO() == 1);
+
+ // startup processors
int threadPoolSize = system.getProcessorExecutor();
- long processBuferPool = system.getProcessorBufferPool();
- int processBufferChunk = system.getProcessorBufferChunk();
+ processors = new NIOProcessor[processorCount];
+ // a page size
+ int bufferPoolPageSize = system.getBufferPoolPageSize();
+ // total page number
+ short bufferPoolPageNumber = system.getBufferPoolPageNumber();
+ //minimum allocation unit
+ short bufferPoolChunkSize = system.getBufferPoolChunkSize();
+
int socketBufferLocalPercent = system.getProcessorBufferLocalPercent();
+ int bufferPoolType = system.getProcessorBufferPoolType();
- // server startup
- LOGGER.info("===============================================");
- LOGGER.info(NAME + " is ready to startup ,network config:" + system);
-
- // message byte buffer pool
- BufferPool bufferPool = new BufferPool(processBuferPool,
- processBufferChunk, system.getFrontSocketSoRcvbuf(),
- socketBufferLocalPercent / processorCount);
- // Business Executor ,用来执行那些耗时的任务
- NameableExecutor businessExecutor = ExecutorUtil.create(
- "BusinessExecutor", threadPoolSize);
- // 定时器Executor,用来执行定时任务
- timerExecutor = ExecutorUtil.createSheduledExecute("Timer",
- system.getTimerExecutor());
- listeningExecutorService = MoreExecutors
- .listeningDecorator(businessExecutor);
-
- // create netsystem to store our network related objects
- NetSystem netSystem = new NetSystem(bufferPool, businessExecutor,
- timerExecutor);
- netSystem.setNetConfig(system);
- // Reactor pool
- NIOReactorPool reactorPool = new NIOReactorPool(
- BufferPool.LOCAL_BUF_THREAD_PREX + "NIOREACTOR", processorCount);
- NIOConnector connector = new NIOConnector(
- BufferPool.LOCAL_BUF_THREAD_PREX + "NIOConnector", reactorPool);
- connector.start();
- netSystem.setConnector(connector);
-
- MySQLFrontConnectionFactory frontFactory = new MySQLFrontConnectionFactory(
- new MySQLFrontConnectionHandler());
- NIOAcceptor server = new NIOAcceptor(BufferPool.LOCAL_BUF_THREAD_PREX
- + NAME + "Server", system.getBindIp(), system.getServerPort(),
- frontFactory, reactorPool);
+ switch (bufferPoolType){
+ case 0:
+ bufferPool = new DirectByteBufferPool(bufferPoolPageSize,bufferPoolChunkSize,
+ bufferPoolPageNumber,system.getFrontSocketSoRcvbuf());
+
+
+ totalNetWorkBufferSize = bufferPoolPageSize*bufferPoolPageNumber;
+ break;
+ case 1:
+ /**
+ * todo 对应权威指南修改:
+ *
+ * bytebufferarena由6个bytebufferlist组成,这六个list有减少内存碎片的机制
+ * 每个bytebufferlist由多个bytebufferchunk组成,每个list也有减少内存碎片的机制
+ * 每个bytebufferchunk由多个page组成,平衡二叉树管理内存使用状态,计算灵活
+ * 设置的pagesize对应bytebufferarena里面的每个bytebufferlist的每个bytebufferchunk的buffer长度
+ * bufferPoolChunkSize对应每个bytebufferchunk的每个page的长度
+ * bufferPoolPageNumber对应每个bytebufferlist有多少个bytebufferchunk
+ */
+
+ totalNetWorkBufferSize = 6*bufferPoolPageSize * bufferPoolPageNumber;
+ break;
+ case 2:
+ bufferPool = new NettyBufferPool(bufferPoolChunkSize);
+ LOGGER.info("Use Netty Buffer Pool");
+
+ break;
+ default:
+ bufferPool = new DirectByteBufferPool(bufferPoolPageSize,bufferPoolChunkSize,
+ bufferPoolPageNumber,system.getFrontSocketSoRcvbuf());;
+ totalNetWorkBufferSize = bufferPoolPageSize*bufferPoolPageNumber;
+ }
+
+ /**
+ * Off Heap For Merge/Order/Group/Limit 初始化
+ */
+ if(system.getUseOffHeapForMerge() == 1){
+ try {
+ myCatMemory = new MyCatMemory(system,totalNetWorkBufferSize);
+ } catch (NoSuchFieldException e) {
+ LOGGER .error("NoSuchFieldException",e);
+ } catch (IllegalAccessException e) {
+ LOGGER.error("Error",e);
+ }
+ }
+ businessExecutor = ExecutorUtil.create("BusinessExecutor",
+ threadPoolSize);
+ sequenceExecutor = ExecutorUtil.create("SequenceExecutor", threadPoolSize);
+ timerExecutor = ExecutorUtil.create("Timer", system.getTimerExecutor());
+ listeningExecutorService = MoreExecutors.listeningDecorator(businessExecutor);
+
+ for (int i = 0; i < processors.length; i++) {
+ processors[i] = new NIOProcessor("Processor" + i, bufferPool,
+ businessExecutor);
+ }
+
+ if (aio) {
+ LOGGER.info("using aio network handler ");
+ asyncChannelGroups = new AsynchronousChannelGroup[processorCount];
+ // startup connector
+ connector = new AIOConnector();
+ for (int i = 0; i < processors.length; i++) {
+ asyncChannelGroups[i] = AsynchronousChannelGroup.withFixedThreadPool(processorCount,
+ new ThreadFactory() {
+ private int inx = 1;
+ @Override
+ public Thread newThread(Runnable r) {
+ Thread th = new Thread(r);
+ //TODO
+ th.setName(DirectByteBufferPool.LOCAL_BUF_THREAD_PREX + "AIO" + (inx++));
+ LOGGER.info("created new AIO thread "+ th.getName());
+ return th;
+ }
+ }
+ );
+ }
+ manager = new AIOAcceptor(NAME + "Manager", system.getBindIp(),
+ system.getManagerPort(), mf, this.asyncChannelGroups[0]);
+
+ // startup server
+
+ server = new AIOAcceptor(NAME + "Server", system.getBindIp(),
+ system.getServerPort(), sf, this.asyncChannelGroups[0]);
+
+ } else {
+ LOGGER.info("using nio network handler ");
+
+ NIOReactorPool reactorPool = new NIOReactorPool(
+ DirectByteBufferPool.LOCAL_BUF_THREAD_PREX + "NIOREACTOR",
+ processors.length);
+ connector = new NIOConnector(DirectByteBufferPool.LOCAL_BUF_THREAD_PREX + "NIOConnector", reactorPool);
+ ((NIOConnector) connector).start();
+
+ manager = new NIOAcceptor(DirectByteBufferPool.LOCAL_BUF_THREAD_PREX + NAME
+ + "Manager", system.getBindIp(), system.getManagerPort(), mf, reactorPool);
+
+ server = new NIOAcceptor(DirectByteBufferPool.LOCAL_BUF_THREAD_PREX + NAME
+ + "Server", system.getBindIp(), system.getServerPort(), sf, reactorPool);
+ }
+ // manager start
+ manager.start();
+ LOGGER.info(manager.getName() + " is started and listening on " + manager.getPort());
server.start();
+
// server started
- LOGGER.info(server.getName() + " is started and listening on "
- + server.getPort());
+ LOGGER.info(server.getName() + " is started and listening on " + server.getPort());
+
+ LOGGER.info("===============================================");
// init datahost
- config.initDatasource();
+ Map dataHosts = config.getDataHosts();
+ LOGGER.info("Initialize dataHost ...");
+ for (PhysicalDBPool node : dataHosts.values()) {
+ String index = dnIndexProperties.getProperty(node.getHostName(),"0");
+ if (!"0".equals(index)) {
+ LOGGER.info("init datahost: " + node.getHostName() + " to use datasource index:" + index);
+ }
+ node.init(Integer.parseInt(index));
+ node.startHeartbeat();
+ }
long dataNodeIldeCheckPeriod = system.getDataNodeIdleCheckPeriod();
- timer.schedule(updateTime(), 0L, TIME_UPDATE_PERIOD);
- timer.schedule(processorCheck(), 0L, system.getProcessorCheckPeriod());
- timer.schedule(dataNodeConHeartBeatCheck(dataNodeIldeCheckPeriod), 0L,
- dataNodeIldeCheckPeriod);
- timer.schedule(dataNodeHeartbeat(), 0L,
- system.getDataNodeHeartbeatPeriod());
- timer.schedule(glableTableConsistencyCheck(), 0L,
- system.getGlableTableCheckPeriod());
- timer.schedule(catletClassClear(), 30000);
-
+
+ heartbeatScheduler.scheduleAtFixedRate(updateTime(), 0L, TIME_UPDATE_PERIOD,TimeUnit.MILLISECONDS);
+ heartbeatScheduler.scheduleAtFixedRate(processorCheck(), 0L, system.getProcessorCheckPeriod(),TimeUnit.MILLISECONDS);
+ heartbeatScheduler.scheduleAtFixedRate(dataNodeConHeartBeatCheck(dataNodeIldeCheckPeriod), 0L, dataNodeIldeCheckPeriod,TimeUnit.MILLISECONDS);
+ heartbeatScheduler.scheduleAtFixedRate(dataNodeHeartbeat(), 0L, system.getDataNodeHeartbeatPeriod(),TimeUnit.MILLISECONDS);
+ heartbeatScheduler.scheduleAtFixedRate(dataSourceOldConsClear(), 0L, DEFAULT_OLD_CONNECTION_CLEAR_PERIOD, TimeUnit.MILLISECONDS);
+ scheduler.schedule(catletClassClear(), 30000,TimeUnit.MILLISECONDS);
+
+ if(system.getCheckTableConsistency()==1) {
+ scheduler.scheduleAtFixedRate(tableStructureCheck(), 0L, system.getCheckTableConsistencyPeriod(), TimeUnit.MILLISECONDS);
+ }
+
+ if(system.getUseSqlStat()==1) {
+ scheduler.scheduleAtFixedRate(recycleSqlStat(), 0L, DEFAULT_SQL_STAT_RECYCLE_PERIOD, TimeUnit.MILLISECONDS);
+ }
+
+ if(system.getUseGlobleTableCheck() == 1){ // 全局表一致性检测是否开启
+ scheduler.scheduleAtFixedRate(glableTableConsistencyCheck(), 0L, system.getGlableTableCheckPeriod(), TimeUnit.MILLISECONDS);
+ }
+
+ //定期清理结果集排行榜,控制拒绝策略
+ scheduler.scheduleAtFixedRate(resultSetMapClear(),0L, system.getClearBigSqLResultSetMapMs(),TimeUnit.MILLISECONDS);
+
+
+// new Thread(tableStructureCheck()).start();
+
+ //XA Init recovery Log
+ LOGGER.info("===============================================");
+ LOGGER.info("Perform XA recovery log ...");
+ performXARecoveryLog();
+
+ if(isUseZkSwitch()) {
+ //首次启动如果发现zk上dnindex为空,则将本地初始化上zk
+ initZkDnindex();
+ }
+ initRuleData();
+
+ startup.set(true);
+ }
+
+ public void initRuleData() {
+ if(!isUseZk()) return;
+ InterProcessMutex ruleDataLock =null;
+ try {
+ File file = new File(SystemConfig.getHomePath(), "conf" + File.separator + "ruledata");
+ String path= ZKUtils.getZKBasePath()+"lock/ruledata.lock";
+ ruleDataLock= new InterProcessMutex(ZKUtils.getConnection(), path);
+ ruleDataLock.acquire(30, TimeUnit.SECONDS);
+ File[] childFiles= file.listFiles();
+ if(childFiles!=null&&childFiles.length>0) {
+ String basePath = ZKUtils.getZKBasePath() + "ruledata/";
+ for (File childFile : childFiles) {
+ CuratorFramework zk = ZKUtils.getConnection();
+ if (zk.checkExists().forPath(basePath + childFile.getName()) == null) {
+ zk.create().creatingParentsIfNeeded().forPath(basePath + childFile.getName(), Files.toByteArray(childFile));
+ }
+ }
+ }
+
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ } finally {
+ try {
+ if(ruleDataLock!=null)
+ ruleDataLock.release();
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
}
- private TimerTask catletClassClear() {
- return new TimerTask() {
+ private void initZkDnindex() {
+ try {
+ File file = new File(SystemConfig.getHomePath(), "conf" + File.separator + "dnindex.properties");
+ dnindexLock.acquire(30, TimeUnit.SECONDS);
+ String path = ZKUtils.getZKBasePath() + "bindata/dnindex.properties";
+ CuratorFramework zk = ZKUtils.getConnection();
+ if (zk.checkExists().forPath(path) == null) {
+ zk.create().creatingParentsIfNeeded().forPath(path, Files.toByteArray(file));
+ }
+
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ } finally {
+ try {
+ dnindexLock.release();
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+ }
+
+ public void reloadDnIndex()
+ {
+ if(MycatServer.getInstance().getProcessors()==null) return;
+ // load datanode active index from properties
+ dnIndexProperties = loadDnIndexProps();
+ // init datahost
+ Map dataHosts = config.getDataHosts();
+ LOGGER.info("reInitialize dataHost ...");
+ for (PhysicalDBPool node : dataHosts.values()) {
+ String index = dnIndexProperties.getProperty(node.getHostName(),"0");
+ if (!"0".equals(index)) {
+ LOGGER.info("reinit datahost: " + node.getHostName() + " to use datasource index:" + index);
+ }
+ node.switchSource(Integer.parseInt(index),true,"reload dnindex");
+
+ }
+ }
+
+ private Runnable catletClassClear() {
+ return new Runnable() {
@Override
public void run() {
try {
@@ -251,7 +594,172 @@ public void run() {
};
}
+
+ /**
+ * 清理 reload @@config_all 后,老的 connection 连接
+ * @return
+ */
+ private Runnable dataSourceOldConsClear() {
+ return new Runnable() {
+ @Override
+ public void run() {
+ timerExecutor.execute(new Runnable() {
+ @Override
+ public void run() {
+
+ long sqlTimeout = MycatServer.getInstance().getConfig().getSystem().getSqlExecuteTimeout() * 1000L;
+
+ //根据 lastTime 确认事务的执行, 超过 sqlExecuteTimeout 阀值 close connection
+ long currentTime = TimeUtil.currentTimeMillis();
+ Iterator iter = NIOProcessor.backends_old.iterator();
+ while( iter.hasNext() ) {
+ BackendConnection con = iter.next();
+ long lastTime = con.getLastTime();
+ if ( currentTime - lastTime > sqlTimeout ) {
+ con.close("clear old backend connection ...");
+ iter.remove();
+ }
+ }
+ }
+ });
+ };
+ };
+ }
+
+ /**
+ * 在bufferpool使用率大于使用率阈值时不清理
+ * 在bufferpool使用率小于使用率阈值时清理大结果集清单内容
+ *
+ */
+ private Runnable resultSetMapClear() {
+ return new Runnable() {
+ @Override
+ public void run() {
+ try {
+ BufferPool bufferPool=getBufferPool();
+ long bufferSize=bufferPool.size();
+ long bufferCapacity=bufferPool.capacity();
+ long bufferUsagePercent=(bufferCapacity-bufferSize)*100/bufferCapacity;
+ if(bufferUsagePercent map =UserStatAnalyzer.getInstance().getUserStatMap();
+ Set userSet=config.getUsers().keySet();
+ for (String user : userSet) {
+ UserStat userStat = map.get(user);
+ if(userStat!=null){
+ SqlResultSizeRecorder recorder=userStat.getSqlResultSizeRecorder();
+ //System.out.println(recorder.getSqlResultSet().size());
+ recorder.clearSqlResultSet();
+ }
+ }
+ }
+ } catch (Exception e) {
+ LOGGER.warn("resultSetMapClear err " + e);
+ }
+ };
+ };
+ }
+
+ private Properties loadDnIndexProps() {
+ Properties prop = new Properties();
+ File file = new File(SystemConfig.getHomePath(), "conf" + File.separator + "dnindex.properties");
+ if (!file.exists()) {
+ return prop;
+ }
+ FileInputStream filein = null;
+ try {
+ filein = new FileInputStream(file);
+ prop.load(filein);
+ } catch (Exception e) {
+ LOGGER.warn("load DataNodeIndex err:" + e);
+ } finally {
+ if (filein != null) {
+ try {
+ filein.close();
+ } catch (IOException e) {
+ }
+ }
+ }
+ return prop;
+ }
+
+ /**
+ * save cur datanode index to properties file
+ *
+ * @param
+ * @param curIndex
+ */
+ public synchronized void saveDataHostIndex(String dataHost, int curIndex) {
+ File file = new File(SystemConfig.getHomePath(), "conf" + File.separator + "dnindex.properties");
+ FileOutputStream fileOut = null;
+ try {
+ String oldIndex = dnIndexProperties.getProperty(dataHost);
+ String newIndex = String.valueOf(curIndex);
+ if (newIndex.equals(oldIndex)) {
+ return;
+ }
+
+ dnIndexProperties.setProperty(dataHost, newIndex);
+ LOGGER.info("save DataHost index " + dataHost + " cur index " + curIndex);
+
+ File parent = file.getParentFile();
+ if (parent != null && !parent.exists()) {
+ parent.mkdirs();
+ }
+
+ fileOut = new FileOutputStream(file);
+ dnIndexProperties.store(fileOut, "update");
+
+ if(isUseZkSwitch()) {
+ // save to zk
+ try {
+ dnindexLock.acquire(30,TimeUnit.SECONDS) ;
+ String path = ZKUtils.getZKBasePath() + "bindata/dnindex.properties";
+ CuratorFramework zk = ZKUtils.getConnection();
+ if(zk.checkExists().forPath(path)==null) {
+ zk.create().creatingParentsIfNeeded().forPath(path, Files.toByteArray(file));
+ } else{
+ byte[] data= zk.getData().forPath(path);
+ ByteArrayOutputStream out=new ByteArrayOutputStream();
+ Properties properties=new Properties();
+ properties.load(new ByteArrayInputStream(data));
+ if(!String.valueOf(curIndex).equals(properties.getProperty(dataHost))) {
+ properties.setProperty(dataHost, String.valueOf(curIndex));
+ properties.store(out, "update");
+ zk.setData().forPath(path, out.toByteArray());
+ }
+ }
+
+ }finally {
+ dnindexLock.release();
+ }
+ }
+ } catch (Exception e) {
+ LOGGER.warn("saveDataNodeIndex err:", e);
+ } finally {
+ if (fileOut != null) {
+ try {
+ fileOut.close();
+ } catch (IOException e) {
+ }
+ }
+ }
+
+ }
+
+
+ private boolean isUseZk(){
+ String loadZk=ZkConfig.getInstance().getValue(ZkParamCfg.ZK_CFG_FLAG);
+ return "true".equalsIgnoreCase(loadZk) ;
+ }
+
+ private boolean isUseZkSwitch()
+ {
+ MycatConfig mycatConfig=config;
+ boolean isUseZkSwitch= mycatConfig.getSystem().isUseZKSwitch();
+ String loadZk=ZkConfig.getInstance().getValue(ZkParamCfg.ZK_CFG_FLAG);
+ return (isUseZkSwitch&&"true".equalsIgnoreCase(loadZk)) ;
+ }
public RouteService getRouterService() {
return routerService;
@@ -261,10 +769,34 @@ public CacheService getCacheService() {
return cacheService;
}
+ public NameableExecutor getBusinessExecutor() {
+ return businessExecutor;
+ }
+
public RouteService getRouterservice() {
return routerService;
}
+ public NIOProcessor nextProcessor() {
+ int i = ++nextProcessor;
+ if (i >= processors.length) {
+ i = nextProcessor = 0;
+ }
+ return processors[i];
+ }
+
+ public NIOProcessor[] getProcessors() {
+ return processors;
+ }
+
+ public SocketConnector getConnector() {
+ return connector;
+ }
+
+ public SQLRecorder getSqlRecorder() {
+ return sqlRecorder;
+ }
+
public long getStartupTime() {
return startupTime;
}
@@ -282,8 +814,8 @@ public void online() {
}
// 系统时间定时更新任务
- private TimerTask updateTime() {
- return new TimerTask() {
+ private Runnable updateTime() {
+ return new Runnable() {
@Override
public void run() {
TimeUtil.update();
@@ -292,104 +824,204 @@ public void run() {
}
// 处理器定时检查任务
- private TimerTask processorCheck() {
- return new TimerTask() {
+ private Runnable processorCheck() {
+ return new Runnable() {
@Override
public void run() {
timerExecutor.execute(new Runnable() {
@Override
public void run() {
try {
- NetSystem.getInstance().checkConnections();
+ for (NIOProcessor p : processors) {
+ p.checkBackendCons();
+ }
} catch (Exception e) {
- LOGGER.warn("checkBackendCons caught err:", e);
+ LOGGER.warn("checkBackendCons caught err:" + e);
}
}
});
+ timerExecutor.execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ for (NIOProcessor p : processors) {
+ p.checkFrontCons();
+ }
+ } catch (Exception e) {
+ LOGGER.warn("checkFrontCons caught err:" + e);
+ }
+ }
+ });
}
};
}
// 数据节点定时连接空闲超时检查任务
- private TimerTask dataNodeConHeartBeatCheck(final long heartPeriod) {
- return new TimerTask() {
+ private Runnable dataNodeConHeartBeatCheck(final long heartPeriod) {
+ return new Runnable() {
@Override
public void run() {
timerExecutor.execute(new Runnable() {
@Override
public void run() {
- Map nodes = config
- .getDataHosts();
+
+ Map nodes = config.getDataHosts();
for (PhysicalDBPool node : nodes.values()) {
node.heartbeatCheck(heartPeriod);
}
- Map _nodes = config
- .getBackupDataHosts();
+
+ /*
+ Map _nodes = config.getBackupDataHosts();
if (_nodes != null) {
for (PhysicalDBPool node : _nodes.values()) {
node.heartbeatCheck(heartPeriod);
}
- }
+ }*/
}
});
}
};
}
- // 全局表一致性检查任务
- private TimerTask glableTableConsistencyCheck() {
- return new TimerTask() {
+ // 数据节点定时心跳任务
+ private Runnable dataNodeHeartbeat() {
+ return new Runnable() {
@Override
public void run() {
timerExecutor.execute(new Runnable() {
@Override
public void run() {
- GlobalTableUtil.consistencyCheck();
+ Map nodes = config.getDataHosts();
+ for (PhysicalDBPool node : nodes.values()) {
+ node.doHeartbeat();
+ }
}
});
}
};
}
+
+ //定时清理保存SqlStat中的数据
+ private Runnable recycleSqlStat(){
+ return new Runnable() {
+ @Override
+ public void run() {
+ Map statMap = UserStatAnalyzer.getInstance().getUserStatMap();
+ for (UserStat userStat : statMap.values()) {
+ userStat.getSqlLastStat().recycle();
+ userStat.getSqlRecorder().recycle();
+ userStat.getSqlHigh().recycle();
+ userStat.getSqlLargeRowStat().recycle();
+ }
+ }
+ };
+ }
+
+ //定时检查不同分片表结构一致性
+ private Runnable tableStructureCheck(){
+ return new MySQLTableStructureDetector();
+ }
- // 数据节点定时心跳任务
- private TimerTask dataNodeHeartbeat() {
- return new TimerTask() {
+ // 全局表一致性检查任务
+ private Runnable glableTableConsistencyCheck() {
+ return new Runnable() {
@Override
public void run() {
timerExecutor.execute(new Runnable() {
@Override
public void run() {
- Map nodes = config
- .getDataHosts();
- for (PhysicalDBPool node : nodes.values()) {
- node.doHeartbeat();
- }
+ GlobalTableUtil.consistencyCheck();
}
});
}
};
}
+
+ //XA recovery log check
+ private void performXARecoveryLog() {
+ //fetch the recovery log
+ CoordinatorLogEntry[] coordinatorLogEntries = getCoordinatorLogEntries();
+
+ for(int i=0; i allCoordinatorLogEntries = fileRepository.getAllCoordinatorLogEntries();
+ if(allCoordinatorLogEntries == null){return new CoordinatorLogEntry[0];}
+ if(allCoordinatorLogEntries.size()==0){return new CoordinatorLogEntry[0];}
+ return allCoordinatorLogEntries.toArray(new CoordinatorLogEntry[allCoordinatorLogEntries.size()]);
+ }
+
+ public NameableExecutor getSequenceExecutor() {
+ return sequenceExecutor;
+ }
+
+ //huangyiming add
+ public DirectByteBufferPool getDirectByteBufferPool() {
+ return (DirectByteBufferPool)bufferPool;
+ }
+
+ public boolean isAIO() {
+ return aio;
+ }
+
+
public ListeningExecutorService getListeningExecutorService() {
return listeningExecutorService;
}
- /**
- * save cur datanode index to properties file
- *
- * @param dataNode
- * @param curIndex
- */
- public synchronized void saveDataHostIndex(String dataHost, int curIndex) {
- if(clusterSync==null){
- clusterSync = ConfigFactory.instanceCluster();
- }
- boolean isSwitch = clusterSync.switchDataSource(dataHost, curIndex);
- if(isSwitch){
- config.setHostIndex(dataHost, curIndex);
- }else {
- LOGGER.warn("can't switch dataHost"+dataHost +" to curIndex " + curIndex);
- throw new ConfigException("can't switch dataHost"+dataHost +" to curIndex " + curIndex);
- }
+
+ public static void main(String[] args) throws Exception {
+ String path = ZKUtils.getZKBasePath() + "bindata";
+ CuratorFramework zk = ZKUtils.getConnection();
+ if(zk.checkExists().forPath(path)==null);
+
+ byte[] data= zk.getData().forPath(path);
+ System.out.println(data.length);
}
+
}
diff --git a/src/main/java/io/mycat/MycatStartup.java b/src/main/java/io/mycat/MycatStartup.java
index 3438dcaec..fbc1ffd90 100644
--- a/src/main/java/io/mycat/MycatStartup.java
+++ b/src/main/java/io/mycat/MycatStartup.java
@@ -23,44 +23,44 @@
*/
package io.mycat;
-import io.mycat.server.config.node.SystemConfig;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+
import java.text.SimpleDateFormat;
import java.util.Date;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.mycat.config.loader.zkprocess.comm.ZkConfig;
+import io.mycat.config.model.SystemConfig;
+
/**
* @author mycat
*/
public final class MycatStartup {
- private static final String dateFormat = "yyyy-MM-dd HH:mm:ss";
-
- private static final class Holder {
- private static final Logger LOGGER = LoggerFactory
- .getLogger(MycatStartup.class);
- }
+ private static final String dateFormat = "yyyy-MM-dd HH:mm:ss";
+ private static final Logger LOGGER = LoggerFactory.getLogger(MycatStartup.class);
+ public static void main(String[] args) {
+ //use zk ?
+ ZkConfig.getInstance().initZk();
+ try {
+ String home = SystemConfig.getHomePath();
+ if (home == null) {
+ System.out.println(SystemConfig.SYS_HOME + " is not set.");
+ System.exit(-1);
+ }
+ // init
+ MycatServer server = MycatServer.getInstance();
+ server.beforeStart();
- public static void main(String[] args) {
- try {
- String home = SystemConfig.getHomePath();
- if (home == null) {
- System.out.println(SystemConfig.SYS_HOME + " is not set.");
- System.exit(-1);
- }
- // init
- MycatServer server = MycatServer.getInstance();
+ // startup
+ server.startup();
+ System.out.println("MyCAT Server startup successfully. see logs in logs/mycat.log");
- // startup
- server.startup();
- System.out.println("MyCAT Server startup successfully. see logs in logs/mycat.log");
- while (true) {
- Thread.sleep(300 * 1000);
- }
- } catch (Exception e) {
- SimpleDateFormat sdf = new SimpleDateFormat(dateFormat);
- Holder.LOGGER.error(sdf.format(new Date()) + " startup error", e);
- System.exit(-1);
- }
- }
-}
\ No newline at end of file
+ } catch (Exception e) {
+ SimpleDateFormat sdf = new SimpleDateFormat(dateFormat);
+ LOGGER.error(sdf.format(new Date()) + " startup error", e);
+ System.exit(-1);
+ }
+ }
+}
diff --git a/src/main/java/io/mycat/backend/BackendConnection.java b/src/main/java/io/mycat/backend/BackendConnection.java
index 4d64b9e3c..3440a793b 100644
--- a/src/main/java/io/mycat/backend/BackendConnection.java
+++ b/src/main/java/io/mycat/backend/BackendConnection.java
@@ -1,64 +1,65 @@
-package io.mycat.backend;
-
-import io.mycat.net.ClosableConnection;
-import io.mycat.route.RouteResultsetNode;
-import io.mycat.server.MySQLFrontConnection;
-import io.mycat.server.executors.ResponseHandler;
-
-import java.io.IOException;
-import java.io.UnsupportedEncodingException;
-
-public interface BackendConnection extends ClosableConnection{
- public boolean isModifiedSQLExecuted();
-
- public boolean isFromSlaveDB();
-
- public String getSchema();
-
- public void setSchema(String newSchema);
-
- public long getLastTime();
-
- public boolean isClosedOrQuit();
-
- public void setAttachment(Object attachment);
-
- public void quit();
-
- public void setLastTime(long currentTimeMillis);
-
- public void release();
-
- public void setResponseHandler(ResponseHandler commandHandler);
-
- public void commit();
-
- public void query(String sql) throws UnsupportedEncodingException;
-
- public Object getAttachment();
-
- // public long getThreadId();
-
- public void execute(RouteResultsetNode node, MySQLFrontConnection source,
- boolean autocommit) throws IOException;
-
- public boolean syncAndExcute();
-
- public void rollback();
-
- public boolean isBorrowed();
-
- public void setBorrowed(boolean borrowed);
-
- public int getTxIsolation();
-
- public boolean isAutocommit();
-
- public long getId();
-
- public void close(String reason);
-
- public String getCharset();
-
- public PhysicalDatasource getPool();
-}
+package io.mycat.backend;
+
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+
+import io.mycat.backend.mysql.nio.handler.ResponseHandler;
+import io.mycat.net.ClosableConnection;
+import io.mycat.route.RouteResultsetNode;
+import io.mycat.server.ServerConnection;
+
+public interface BackendConnection extends ClosableConnection {
+ public boolean isModifiedSQLExecuted();
+
+ public boolean isFromSlaveDB();
+
+ public String getSchema();
+
+ public void setSchema(String newSchema);
+
+ public long getLastTime();
+
+ public boolean isClosedOrQuit();
+
+ public void setAttachment(Object attachment);
+
+ public void quit();
+
+ public void setLastTime(long currentTimeMillis);
+
+ public void release();
+
+ public boolean setResponseHandler(ResponseHandler commandHandler);
+
+ public void commit();
+
+ public void query(String sql) throws UnsupportedEncodingException;
+
+ public Object getAttachment();
+
+ // public long getThreadId();
+
+
+
+ public void execute(RouteResultsetNode node, ServerConnection source,
+ boolean autocommit) throws IOException;
+
+ public void recordSql(String host, String schema, String statement);
+
+ public boolean syncAndExcute();
+
+ public void rollback();
+
+ public boolean isBorrowed();
+
+ public void setBorrowed(boolean borrowed);
+
+ public int getTxIsolation();
+
+ public boolean isAutocommit();
+
+ public long getId();
+
+ public void discardClose(String reason);
+
+}
diff --git a/src/main/java/io/mycat/backend/ConMap.java b/src/main/java/io/mycat/backend/ConMap.java
index 37a58f512..140e6bdd0 100644
--- a/src/main/java/io/mycat/backend/ConMap.java
+++ b/src/main/java/io/mycat/backend/ConMap.java
@@ -1,14 +1,19 @@
package io.mycat.backend;
-import io.mycat.net.Connection;
-import io.mycat.net.NetSystem;
-
import java.util.Collection;
import java.util.Iterator;
import java.util.Map.Entry;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import io.mycat.MycatServer;
+import io.mycat.backend.datasource.PhysicalDatasource;
+import io.mycat.backend.jdbc.JDBCConnection;
+import io.mycat.backend.mysql.nio.MySQLConnection;
+import io.mycat.net.NIOProcessor;
public class ConMap {
+
// key -schema
private final ConcurrentHashMap items = new ConcurrentHashMap();
@@ -59,56 +64,73 @@ public Collection getAllConQueue() {
public int getActiveCountForSchema(String schema,
PhysicalDatasource dataSouce) {
int total = 0;
- for (Connection conn : NetSystem.getInstance().getAllConnectios()
- .values()) {
- if (conn instanceof BackendConnection) {
- BackendConnection theCon = (BackendConnection) conn;
- if (theCon.getSchema().equals(schema)
- && theCon.getPool() == dataSouce) {
- if (theCon.isBorrowed()) {
- total++;
+ for (NIOProcessor processor : MycatServer.getInstance().getProcessors()) {
+ for (BackendConnection con : processor.getBackends().values()) {
+ if (con instanceof MySQLConnection) {
+ MySQLConnection mysqlCon = (MySQLConnection) con;
+
+ if (mysqlCon.getSchema().equals(schema)
+ && mysqlCon.getPool() == dataSouce
+ && mysqlCon.isBorrowed()) {
+ total++;
}
- }
- }
- }
- return total;
- }
+ }else if (con instanceof JDBCConnection) {
+ JDBCConnection jdbcCon = (JDBCConnection) con;
+ if (jdbcCon.getSchema().equals(schema) && jdbcCon.getPool() == dataSouce
+ && jdbcCon.isBorrowed()) {
+ total++;
+ }
+ }
+ }
+ }
+ return total;
+ }
public int getActiveCountForDs(PhysicalDatasource dataSouce) {
-
int total = 0;
- for (Connection conn : NetSystem.getInstance().getAllConnectios()
- .values()) {
- if (conn instanceof BackendConnection) {
- BackendConnection theCon = (BackendConnection) conn;
- if (theCon.getPool() == dataSouce) {
- if (theCon.isBorrowed()) {
- total++;
+ for (NIOProcessor processor : MycatServer.getInstance().getProcessors()) {
+ for (BackendConnection con : processor.getBackends().values()) {
+ if (con instanceof MySQLConnection) {
+ MySQLConnection mysqlCon = (MySQLConnection) con;
+
+ if (mysqlCon.getPool() == dataSouce
+ && mysqlCon.isBorrowed() && !mysqlCon.isClosed()) {
+ total++;
}
- }
- }
- }
- return total;
- }
-
- public void clearConnections(String reason, PhysicalDatasource dataSouce) {
-
- Iterator> itor = NetSystem.getInstance()
- .getAllConnectios().entrySet().iterator();
- while (itor.hasNext()) {
- Entry entry = itor.next();
- Connection con = entry.getValue();
- if (con instanceof BackendConnection) {
- if (((BackendConnection) con).getPool() == dataSouce) {
- con.close(reason);
- itor.remove();
- }
- }
+ } else if (con instanceof JDBCConnection) {
+ JDBCConnection jdbcCon = (JDBCConnection) con;
+ if (jdbcCon.getPool() == dataSouce
+ && jdbcCon.isBorrowed() && !jdbcCon.isClosed()) {
+ total++;
+ }
+ }
+ }
+ }
+ return total;
+ }
+
+ public void clearConnections(String reason, PhysicalDatasource dataSouce) {
+ for (NIOProcessor processor : MycatServer.getInstance().getProcessors()) {
+ ConcurrentMap map = processor.getBackends();
+ Iterator> itor = map.entrySet().iterator();
+ while (itor.hasNext()) {
+ Entry entry = itor.next();
+ BackendConnection con = entry.getValue();
+ if (con instanceof MySQLConnection) {
+ if (((MySQLConnection) con).getPool() == dataSouce) {
+ con.close(reason);
+ itor.remove();
+ }
+ }else if((con instanceof JDBCConnection)
+ && (((JDBCConnection) con).getPool() == dataSouce)){
+ con.close(reason);
+ itor.remove();
+ }
+ }
}
- items.clear();
+ items.clear();
}
-
-}
+}
\ No newline at end of file
diff --git a/src/main/java/io/mycat/backend/ConQueue.java b/src/main/java/io/mycat/backend/ConQueue.java
index 9072c30ba..38ab556c1 100644
--- a/src/main/java/io/mycat/backend/ConQueue.java
+++ b/src/main/java/io/mycat/backend/ConQueue.java
@@ -37,10 +37,12 @@ public void incExecuteCount() {
this.executeCount++;
}
- public void removeCon(BackendConnection con) {
- if (!autoCommitCons.remove(con)) {
- manCommitCons.remove(con);
+ public boolean removeCon(BackendConnection con) {
+ boolean removed = autoCommitCons.remove(con);
+ if (!removed) {
+ return manCommitCons.remove(con);
}
+ return removed;
}
public boolean isSameCon(BackendConnection con) {
@@ -65,13 +67,13 @@ public ArrayList getIdleConsToClose(int count) {
count);
while (!manCommitCons.isEmpty() && readyCloseCons.size() < count) {
BackendConnection theCon = manCommitCons.poll();
- if (theCon != null) {
+ if (theCon != null&&!theCon.isBorrowed()) {
readyCloseCons.add(theCon);
}
}
while (!autoCommitCons.isEmpty() && readyCloseCons.size() < count) {
BackendConnection theCon = autoCommitCons.poll();
- if (theCon != null) {
+ if (theCon != null&&!theCon.isBorrowed()) {
readyCloseCons.add(theCon);
}
diff --git a/src/main/java/io/mycat/backend/PhysicalDBNode.java b/src/main/java/io/mycat/backend/PhysicalDBNode.java
deleted file mode 100644
index 5119fa5f8..000000000
--- a/src/main/java/io/mycat/backend/PhysicalDBNode.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software;Designed and Developed mainly by many Chinese
- * opensource volunteers. you can redistribute it and/or modify it under the
- * terms of the GNU General Public License version 2 only, as published by the
- * Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Any questions about this component can be directed to it's project Web address
- * https://code.google.com/p/opencloudb/.
- *
- */
-package io.mycat.backend;
-
-import io.mycat.route.RouteResultsetNode;
-import io.mycat.server.executors.ResponseHandler;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class PhysicalDBNode {
- protected static final Logger LOGGER = LoggerFactory
- .getLogger(PhysicalDBNode.class);
-
- protected final String name;
- protected final String database;
- protected final PhysicalDBPool dbPool;
-
- public PhysicalDBNode(String hostName, String database,
- PhysicalDBPool dbPool) {
- this.name = hostName;
- this.database = database;
- this.dbPool = dbPool;
- }
-
- public String getName() {
- return name;
- }
-
- public PhysicalDBPool getDbPool() {
- return dbPool;
- }
-
- public String getDatabase() {
- return database;
- }
-
- /**
- * get connection from the same datasource
- *
- * @param exitsCon
- * @throws Exception
- */
- public void getConnectionFromSameSource(String schema,boolean autocommit,
- BackendConnection exitsCon, ResponseHandler handler,
- Object attachment) throws Exception {
-
- PhysicalDatasource ds = this.dbPool.findDatasouce(exitsCon);
- if (ds == null) {
- throw new RuntimeException(
- "can't find exits connection,maybe fininshed " + exitsCon);
- } else {
- ds.getConnection(schema,autocommit, handler, attachment);
- }
-
- }
-
- private void checkRequest(String schema){
- if (schema != null
- && !schema.equals(this.database)) {
- throw new RuntimeException(
- "invalid param ,connection request db is :"
- + schema + " and datanode db is "
- + this.database);
- }
- if (!dbPool.isInitSuccess()) {
- dbPool.init(dbPool.activedIndex);
- }
- }
-
- public void getConnection(String schema,boolean autoCommit, RouteResultsetNode rrs,
- ResponseHandler handler, Object attachment) throws Exception {
- checkRequest(schema);
- if (dbPool.isInitSuccess()) {
- if (rrs.canRunnINReadDB(autoCommit)) {
- dbPool.getRWBanlanceCon(schema,autoCommit, handler, attachment,
- this.database);
- } else {
- dbPool.getSource().getConnection(schema,autoCommit, handler, attachment);
- }
-
- } else {
- throw new IllegalArgumentException("Invalid DataSource:"
- + dbPool.getActivedIndex());
- }
- }
-}
\ No newline at end of file
diff --git a/src/main/java/io/mycat/backend/PhysicalDBPool.java b/src/main/java/io/mycat/backend/PhysicalDBPool.java
deleted file mode 100644
index b3dd5bac8..000000000
--- a/src/main/java/io/mycat/backend/PhysicalDBPool.java
+++ /dev/null
@@ -1,601 +0,0 @@
-/*
- * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software;Designed and Developed mainly by many Chinese
- * opensource volunteers. you can redistribute it and/or modify it under the
- * terms of the GNU General Public License version 2 only, as published by the
- * Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Any questions about this component can be directed to it's project Web address
- * https://code.google.com/p/opencloudb/.
- *
- */
-package io.mycat.backend;
-
-import io.mycat.MycatServer;
-import io.mycat.backend.heartbeat.DBHeartbeat;
-import io.mycat.server.Alarms;
-import io.mycat.server.config.node.DataHostConfig;
-import io.mycat.server.executors.GetConnectionHandler;
-import io.mycat.server.executors.ResponseHandler;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.*;
-import java.util.concurrent.CopyOnWriteArrayList;
-import java.util.concurrent.locks.ReentrantLock;
-
-public class PhysicalDBPool {
- public static final int BALANCE_NONE = 0;
- public static final int BALANCE_ALL_BACK = 1;
- public static final int BALANCE_ALL = 2;
- public static final int BALANCE_ALL_READ = 3;
- public static final int WRITE_ONLYONE_NODE = 0;
- public static final int WRITE_RANDOM_NODE = 1;
- public static final int WRITE_ALL_NODE = 2;
- public static final long LONG_TIME = 300000;
- public static final int WEIGHT = 0;
-
- protected static final Logger LOGGER = LoggerFactory
- .getLogger(PhysicalDBPool.class);
- private final String hostName;
- protected PhysicalDatasource[] writeSources;
- protected Map readSources;
- protected volatile int activedIndex;
- protected volatile boolean initSuccess;
- protected final ReentrantLock switchLock = new ReentrantLock();
- private final Collection allDs;
- private final int banlance;
- private final int writeType;
- private final Random random = new Random();
- private final Random wnrandom = new Random();
- private String[] schemas;
- private final DataHostConfig dataHostConfig;
-
- public PhysicalDBPool(String name, DataHostConfig conf,
- PhysicalDatasource[] writeSources,
- Map readSources, int balance,
- int writeType) {
- this.hostName = name;
- this.dataHostConfig = conf;
- this.writeSources = writeSources;
- this.banlance = balance;
- this.writeType = writeType;
- Iterator> entryItor = readSources
- .entrySet().iterator();
- while (entryItor.hasNext()) {
- PhysicalDatasource[] values = entryItor.next().getValue();
- if (values.length == 0) {
- entryItor.remove();
- }
- }
- this.readSources = readSources;
- this.allDs = this.genAllDataSources();
- LOGGER.info("total resouces of dataHost " + this.hostName + " is :"
- + allDs.size());
- setDataSourceProps();
- }
-
- public int getWriteType() {
- return writeType;
- }
- public int getBalance() {
- return banlance;
- }
- private void setDataSourceProps() {
- for (PhysicalDatasource ds : this.allDs) {
- ds.setDbPool(this);
- }
- }
-
- public PhysicalDatasource findDatasouce(BackendConnection exitsCon) {
-
- for (PhysicalDatasource ds : this.allDs) {
- if (ds.isReadNode() == exitsCon.isFromSlaveDB()) {
- if (ds.isMyConnection(exitsCon)) {
- return ds;
- }
- }
- }
- LOGGER.warn("can't find connection in pool " + this.hostName + " con:"
- + exitsCon);
- return null;
- }
-
- public String getHostName() {
- return hostName;
- }
-
- /**
- * all write datanodes
- *
- * @return
- */
- public PhysicalDatasource[] getSources() {
- return writeSources;
- }
-
- public PhysicalDatasource getSource() {
- switch (writeType) {
- case WRITE_ONLYONE_NODE: {
- return writeSources[activedIndex];
- }
- case WRITE_RANDOM_NODE: {
-
- int index = Math.abs(wnrandom.nextInt()) % writeSources.length;
- PhysicalDatasource result = writeSources[index];
- if (!this.isAlive(result)) {
- // find all live nodes
- ArrayList alives = new ArrayList(
- writeSources.length - 1);
- for (int i = 0; i < writeSources.length; i++) {
- if (i != index) {
- if (this.isAlive(writeSources[i])) {
- alives.add(i);
- }
- }
- }
- if (alives.isEmpty()) {
- result = writeSources[0];
- } else {
- // random select one
- index = Math.abs(wnrandom.nextInt()) % alives.size();
- result = writeSources[alives.get(index)];
-
- }
- }
- if (LOGGER.isDebugEnabled()) {
- LOGGER.debug("select write source " + result.getName()
- + " for dataHost:" + this.getHostName());
- }
- return result;
- }
- default: {
- throw new java.lang.IllegalArgumentException("writeType is "
- + writeType + " ,so can't return one write datasource ");
- }
- }
-
- }
-
- public int getActivedIndex() {
- return activedIndex;
- }
-
- public boolean isInitSuccess() {
- return initSuccess;
- }
-
- public int next(int i) {
- if (checkIndex(i)) {
- return (++i == writeSources.length) ? 0 : i;
- } else {
- return 0;
- }
- }
-
- /**
- * 鍒囨崲鏁版嵁婧�
- */
- public boolean switchSource(int newIndex, boolean isAlarm, String reason) {
- if (this.writeType != PhysicalDBPool.WRITE_ONLYONE_NODE
- || !checkIndex(newIndex)) {
- return false;
- }
- final ReentrantLock lock = this.switchLock;
- lock.lock();
- try {
- int current = activedIndex;
- if (current != newIndex) {
- // switch index
- activedIndex = newIndex;
- // init again
- this.init(activedIndex);
- // clear all connections
- this.getSources()[current].clearCons("switch datasource");
- // write log
- LOGGER.warn(switchMessage(current, newIndex, false, reason));
- return true;
- }
- } finally {
- lock.unlock();
- }
- return false;
- }
-
- private String switchMessage(int current, int newIndex, boolean alarm,
- String reason) {
- StringBuilder s = new StringBuilder();
- if (alarm) {
- s.append(Alarms.DATANODE_SWITCH);
- }
- s.append("[Host=").append(hostName).append(",result=[").append(current)
- .append("->");
- s.append(newIndex).append("],reason=").append(reason).append(']');
- return s.toString();
- }
-
- private int loop(int i) {
- return i < writeSources.length ? i : (i - writeSources.length);
- }
-
- public void init(int index) {
- if (!checkIndex(index)) {
- index = 0;
- }
- int active = -1;
- for (int i = 0; i < writeSources.length; i++) {
- int j = loop(i + index);
- if (initSource(j, writeSources[j])) {
- //不切换-1时,如果主写挂了 不允许切换过去
- if(dataHostConfig.getSwitchType()==DataHostConfig.NOT_SWITCH_DS&&j>0)
- {
- break;
- }
- active = j;
- activedIndex = active;
- initSuccess = true;
- LOGGER.info(getMessage(active, " init success"));
-
- if (this.writeType == WRITE_ONLYONE_NODE) {
- // only init one write datasource
- MycatServer.getInstance().saveDataHostIndex(hostName, activedIndex);
- break;
- }
- }
- }
- if (!checkIndex(active)) {
- initSuccess = false;
- StringBuilder s = new StringBuilder();
- s.append(Alarms.DEFAULT).append(hostName).append(" init failure");
- LOGGER.error(s.toString());
- }
- }
-
- private boolean checkIndex(int i) {
- return i >= 0 && i < writeSources.length;
- }
-
- private String getMessage(int index, String info) {
- return new StringBuilder().append(hostName).append(" index:")
- .append(index).append(info).toString();
- }
-
- private boolean initSource(int index, PhysicalDatasource ds) {
- int initSize = ds.getConfig().getMinCon();
- LOGGER.info("init backend myqsl source ,create connections total "
- + initSize + " for " + ds.getName() + " index :" + index);
- CopyOnWriteArrayList list = new CopyOnWriteArrayList();
- GetConnectionHandler getConHandler = new GetConnectionHandler(list,
- initSize);
- // long start=System.currentTimeMillis();
- // long timeOut=start+5000*1000L;
-
- for (int i = 0; i < initSize; i++) {
- try {
-
- ds.getConnection(this.schemas[i % schemas.length], true,
- getConHandler, null);
- } catch (Exception e) {
- LOGGER.warn(getMessage(index, " init connection error."), e);
- }
- }
- long timeOut = System.currentTimeMillis() + 60 * 1000;
-
- // waiting for finish
- while (!getConHandler.finished()
- && (System.currentTimeMillis() < timeOut)) {
- try {
- Thread.sleep(100);
-
- } catch (InterruptedException e) {
- LOGGER.error("initError", e);
- }
- }
- LOGGER.info("init result :" + getConHandler.getStatusInfo());
-// for (BackendConnection c : list) {
-// c.release();
-// }
- return !list.isEmpty();
- }
-
- public void doHeartbeat() {
- if (writeSources == null || writeSources.length == 0) {
- return;
- }
-
- for (PhysicalDatasource source : this.allDs) {
- if (source != null) {
- source.doHeartbeat();
- } else {
- StringBuilder s = new StringBuilder();
- s.append(Alarms.DEFAULT).append(hostName).append(" current dataSource is null!");
- LOGGER.error(s.toString());
- }
- }
- }
-
- /**
- * back physical connection heartbeat check
- */
- public void heartbeatCheck(long ildCheckPeriod) {
- for (PhysicalDatasource ds : allDs) {
- // only readnode or all write node or writetype=WRITE_ONLYONE_NODE
- // and current write node will check
- if (ds != null
- && (ds.getHeartbeat().getStatus() == DBHeartbeat.OK_STATUS)
- && (ds.isReadNode()
- || (this.writeType != WRITE_ONLYONE_NODE) || (this.writeType == WRITE_ONLYONE_NODE && ds == this
- .getSource()))) {
- ds.heatBeatCheck(ds.getConfig().getIdleTimeout(),
- ildCheckPeriod);
- }
- }
- }
-
- public void startHeartbeat() {
- for (PhysicalDatasource source : this.allDs) {
- source.startHeartbeat();
- }
- }
-
- public void stopHeartbeat() {
- for (PhysicalDatasource source : this.allDs) {
- source.stopHeartbeat();
- }
- }
-
- public void clearDataSources(String reason) {
- LOGGER.info("clear datasours of pool " + this.hostName);
- for (PhysicalDatasource source : this.allDs) {
- LOGGER.info("clear datasoure of pool " + this.hostName + " ds:"
- + source.getConfig());
- source.clearCons(reason);
- source.stopHeartbeat();
- }
-
- }
-
- public Collection genAllDataSources() {
- LinkedList allSources = new LinkedList();
- for (PhysicalDatasource ds : writeSources) {
- if (ds != null) {
- allSources.add(ds);
- }
- }
- for (PhysicalDatasource[] dataSources : this.readSources.values()) {
- for (PhysicalDatasource ds : dataSources) {
- if (ds != null) {
- allSources.add(ds);
- }
- }
- }
- return allSources;
- }
-
- public Collection getAllDataSources() {
- return this.allDs;
- }
-
- /**
- * return connection for read balance
- *
- * @param handler
- * @param attachment
- * @param database
- * @throws Exception
- */
- public void getRWBanlanceCon(String schema, boolean autocommit,
- ResponseHandler handler, Object attachment, String database)
- throws Exception {
- PhysicalDatasource theNode = null;
- ArrayList okSources = null;
- switch (banlance) {
- case BALANCE_ALL_BACK: {// all read nodes and the standard by masters
-
- okSources = getAllActiveRWSources(true, false, checkSlaveSynStatus());
- if (okSources.isEmpty()) {
- theNode = this.getSource();
- } else {
- theNode = randomSelect(okSources);
- }
- break;
- }
- case BALANCE_ALL: {
- okSources = getAllActiveRWSources(true, true, checkSlaveSynStatus());
- theNode = randomSelect(okSources);
- break;
- }
- case BALANCE_ALL_READ: {
- okSources = getAllActiveRWSources(false, false, checkSlaveSynStatus());
- theNode = randomSelect(okSources);
- break;
- }
- case BALANCE_NONE:
- default:
- // return default write data source
- theNode = this.getSource();
- }
- if (LOGGER.isDebugEnabled()) {
- LOGGER.debug("select read source " + theNode.getName()
- + " for dataHost:" + this.getHostName());
- }
- theNode.getConnection(schema, autocommit, handler, attachment);
- }
-
- private boolean checkSlaveSynStatus() {
- return (dataHostConfig.getSlaveThreshold() != -1)
- && (dataHostConfig.getSwitchType() == DataHostConfig.SYN_STATUS_SWITCH_DS);
- }
-
- /**
- * TODO: modify by zhuam
- *
- * 随机选择,按权重设置随机概率。
- * 在一个截面上碰撞的概率高,但调用量越大分布越均匀,而且按概率使用权重后也比较均匀,有利于动态调整提供者权重。
- * @param okSources
- * @return
- */
- public PhysicalDatasource randomSelect(ArrayList okSources) {
-
- if (okSources.isEmpty()) {
- return this.getSource();
-
- } else {
-
- int length = okSources.size(); // 总个数
- int totalWeight = 0; // 总权重
- boolean sameWeight = true; // 权重是否都一样
- for (int i = 0; i < length; i++) {
- int weight = okSources.get(i).getConfig().getWeight();
- totalWeight += weight; // 累计总权重
- if (sameWeight && i > 0
- && weight != okSources.get(i-1).getConfig().getWeight() ) { // 计算所有权重是否一样
- sameWeight = false;
- }
- }
-
- if (totalWeight > 0 && !sameWeight ) {
-
- // 如果权重不相同且权重大于0则按总权重数随机
- int offset = random.nextInt(totalWeight);
-
- // 并确定随机值落在哪个片断上
- for (int i = 0; i < length; i++) {
- offset -= okSources.get(i).getConfig().getWeight();
- if (offset < 0) {
- return okSources.get(i);
- }
- }
- }
-
- // 如果权重相同或权重为0则均等随机
- return okSources.get( random.nextInt(length) );
-
- //int index = Math.abs(random.nextInt()) % okSources.size();
- //return okSources.get(index);
- }
- }
-
- private boolean isAlive(PhysicalDatasource theSource) {
- return (theSource.getHeartbeat().getStatus() == DBHeartbeat.OK_STATUS);
- }
-
- private boolean canSelectAsReadNode(PhysicalDatasource theSource) {
-
- if(theSource.getHeartbeat().getSlaveBehindMaster()==null
- ||theSource.getHeartbeat().getDbSynStatus()==DBHeartbeat.DB_SYN_ERROR){
- return false;
- }
- return (theSource.getHeartbeat().getDbSynStatus() == DBHeartbeat.DB_SYN_NORMAL)
- && (theSource.getHeartbeat().getSlaveBehindMaster() < this.dataHostConfig
- .getSlaveThreshold());
-
- }
-
- /**
- * return all backup write sources
- *
- * @param includeWriteNode if include write nodes
- * @param includeCurWriteNode if include current active write node. invalid when includeWriteNode is false
- * @param filterWithSlaveThreshold
- *
- * @return
- */
- private ArrayList getAllActiveRWSources(
- boolean includeWriteNode,
- boolean includeCurWriteNode, boolean filterWithSlaveThreshold) {
- int curActive = activedIndex;
- ArrayList okSources = new ArrayList(
- this.allDs.size());
- for (int i = 0; i < this.writeSources.length; i++) {
- PhysicalDatasource theSource = writeSources[i];
- if (isAlive(theSource)) {// write node is active
- if (includeWriteNode) {
- if (i == curActive && includeCurWriteNode == false) {
- // not include cur active source
- } else if (filterWithSlaveThreshold) {
- if (canSelectAsReadNode(theSource)) {
- okSources.add(theSource);
- } else {
- continue;
- }
- } else {
- okSources.add(theSource);
- }
- }
- if (!readSources.isEmpty()) {
- // check all slave nodes
- PhysicalDatasource[] allSlaves = this.readSources.get(i);
- if (allSlaves != null) {
- for (PhysicalDatasource slave : allSlaves) {
- if (isAlive(slave)) {
- if (filterWithSlaveThreshold) {
- if (canSelectAsReadNode(slave)) {
- okSources.add(slave);
- } else {
- continue;
- }
- } else {
- okSources.add(slave);
- }
- }
- }
- }
- }
-
- } else {
-
- // TODO : add by zhuam
- // 如果写节点不OK, 也要保证临时的读服务正常
- if ( this.dataHostConfig.isTempReadHostAvailable() ) {
-
- if (!readSources.isEmpty()) {
- // check all slave nodes
- PhysicalDatasource[] allSlaves = this.readSources.get(i);
- if (allSlaves != null) {
- for (PhysicalDatasource slave : allSlaves) {
- if (isAlive(slave)) {
-
- if (filterWithSlaveThreshold) {
- if (canSelectAsReadNode(slave)) {
- okSources.add(slave);
- } else {
- continue;
- }
-
- } else {
- okSources.add(slave);
- }
- }
- }
- }
- }
- }
- }
-
- }
- return okSources;
- }
-
- public String[] getSchemas() {
- return schemas;
- }
-
- public void setSchemas(String[] mySchemas) {
- this.schemas = mySchemas;
- }
-
-}
\ No newline at end of file
diff --git a/src/main/java/io/mycat/backend/PhysicalDatasource.java b/src/main/java/io/mycat/backend/PhysicalDatasource.java
deleted file mode 100644
index 6da2469bc..000000000
--- a/src/main/java/io/mycat/backend/PhysicalDatasource.java
+++ /dev/null
@@ -1,429 +0,0 @@
-/*
- * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software;Designed and Developed mainly by many Chinese
- * opensource volunteers. you can redistribute it and/or modify it under the
- * terms of the GNU General Public License version 2 only, as published by the
- * Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Any questions about this component can be directed to it's project Web address
- * https://code.google.com/p/opencloudb/.
- *
- */
-package io.mycat.backend;
-
-import io.mycat.backend.heartbeat.DBHeartbeat;
-import io.mycat.net.NetSystem;
-import io.mycat.server.Alarms;
-import io.mycat.server.config.node.DBHostConfig;
-import io.mycat.server.config.node.DataHostConfig;
-import io.mycat.server.executors.ConnectionHeartBeatHandler;
-import io.mycat.server.executors.DelegateResponseHandler;
-import io.mycat.server.executors.NewConnectionRespHandler;
-import io.mycat.server.executors.ResponseHandler;
-import io.mycat.util.TimeUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.concurrent.ConcurrentLinkedQueue;
-
-public abstract class PhysicalDatasource {
- public static final Logger LOGGER = LoggerFactory
- .getLogger(PhysicalDatasource.class);
-
- private final String name;
- private final int size;
- private final DBHostConfig config;
- private final ConMap conMap = new ConMap();
- private DBHeartbeat heartbeat;
- private final boolean readNode;
- private volatile long heartbeatRecoveryTime;
- private final DataHostConfig hostConfig;
- private final ConnectionHeartBeatHandler conHeartBeatHanler = new ConnectionHeartBeatHandler();
- private PhysicalDBPool dbPool;
-
- public PhysicalDatasource(DBHostConfig config, DataHostConfig hostConfig,
- boolean isReadNode) {
- this.size = config.getMaxCon();
- this.config = config;
- this.name = config.getHostName();
- this.hostConfig = hostConfig;
- heartbeat = this.createHeartBeat();
- this.readNode = isReadNode;
- }
-
- public boolean isMyConnection(BackendConnection con) {
- return (con.getPool() == this);
- }
-
- public DataHostConfig getHostConfig() {
- return hostConfig;
- }
-
- public boolean isReadNode() {
- return readNode;
- }
-
- public int getSize() {
- return size;
- }
-
- public void setDbPool(PhysicalDBPool dbPool) {
- this.dbPool = dbPool;
- }
-
- public PhysicalDBPool getDbPool() {
- return dbPool;
- }
-
- public abstract DBHeartbeat createHeartBeat();
-
- public String getName() {
- return name;
- }
- public int getIndex(){
- int currentIndex = 0;
- for(int i=0;i heartBeatCons, ConQueue queue,
- ConcurrentLinkedQueue checkLis,
- long hearBeatTime, long hearBeatTime2) {
- int maxConsInOneCheck = 10;
- Iterator checkListItor = checkLis.iterator();
- while (checkListItor.hasNext()) {
- BackendConnection con = checkListItor.next();
- if (con.isClosedOrQuit()) {
- checkListItor.remove();
- continue;
- }
- if (validSchema(con.getSchema())) {
- if (con.getLastTime() < hearBeatTime
- && heartBeatCons.size() < maxConsInOneCheck) {
- checkListItor.remove();
- // Heart beat check
- con.setBorrowed(true);
- heartBeatCons.add(con);
- }
- } else if (con.getLastTime() < hearBeatTime2) {
- // not valid schema conntion should close for idle
- // exceed 2*conHeartBeatPeriod
- checkListItor.remove();
- con.close(" heart beate idle ");
- }
-
- }
-
- }
-
- public void heatBeatCheck(long timeout, long conHeartBeatPeriod) {
- int ildeCloseCount = hostConfig.getMinCon() * 3;
- int maxConsInOneCheck = 5;
- LinkedList heartBeatCons = new LinkedList();
-
- long hearBeatTime = TimeUtil.currentTimeMillis() - conHeartBeatPeriod;
- long hearBeatTime2 = TimeUtil.currentTimeMillis() - 2
- * conHeartBeatPeriod;
- for (ConQueue queue : conMap.getAllConQueue()) {
- checkIfNeedHeartBeat(heartBeatCons, queue,
- queue.getAutoCommitCons(), hearBeatTime, hearBeatTime2);
- if (heartBeatCons.size() < maxConsInOneCheck) {
- checkIfNeedHeartBeat(heartBeatCons, queue,
- queue.getManCommitCons(), hearBeatTime, hearBeatTime2);
- } else if (heartBeatCons.size() >= maxConsInOneCheck) {
- break;
- }
- }
-
- if (!heartBeatCons.isEmpty()) {
- for (BackendConnection con : heartBeatCons) {
- conHeartBeatHanler
- .doHeartBeat(con, hostConfig.getHeartbeatSQL());
- }
- }
-
- // check if there has timeouted heatbeat cons
- conHeartBeatHanler.abandTimeOuttedConns();
- int idleCons = getIdleCount();
- int activeCons = this.getActiveCount();
- int createCount = (hostConfig.getMinCon() - idleCons) / 3;
- // create if idle too little
- if ((createCount > 0) && (idleCons + activeCons < size)
- && (idleCons < hostConfig.getMinCon())) {
- createByIdleLitte(idleCons, createCount);
- } else if (idleCons > hostConfig.getMinCon()) {
- closeByIdleMany(idleCons-hostConfig.getMinCon());
- } else {
- int activeCount = this.getActiveCount();
- if (activeCount > size) {
- StringBuilder s = new StringBuilder();
- s.append(Alarms.DEFAULT).append("DATASOURCE EXCEED [name=")
- .append(name).append(",active=");
- s.append(activeCount).append(",size=").append(size).append(']');
- LOGGER.warn(s.toString());
- }
- }
- }
-
- private void closeByIdleMany(int ildeCloseCount) {
- LOGGER.info("too many ilde cons ,close some for datasouce " + name);
- List readyCloseCons = new ArrayList(
- ildeCloseCount);
- for (ConQueue queue : conMap.getAllConQueue()) {
- readyCloseCons.addAll(queue.getIdleConsToClose(ildeCloseCount));
- if (readyCloseCons.size() >= ildeCloseCount) {
- break;
- }
- }
-
- for (BackendConnection idleCon : readyCloseCons) {
- if (idleCon.isBorrowed()) {
- LOGGER.warn("find idle con is using " + idleCon);
- }
- idleCon.close("too many idle con");
- }
- }
-
- private void createByIdleLitte(int idleCons, int createCount) {
- LOGGER.info("create connections ,because idle connection not enough ,cur is "
- + idleCons
- + ", minCon is "
- + hostConfig.getMinCon()
- + " for "
- + name);
- NewConnectionRespHandler simpleHandler = new NewConnectionRespHandler();
-
- final String[] schemas = dbPool.getSchemas();
- for (int i = 0; i < createCount; i++) {
- if (this.getActiveCount() + this.getIdleCount() >= size) {
- break;
- }
- try {
- // creat new connection
- this.createNewConnection(simpleHandler, null, schemas[i
- % schemas.length]);
- } catch (IOException e) {
- LOGGER.warn("create connection err " + e);
- }
-
- }
- }
-
- public int getActiveCount() {
- return this.conMap.getActiveCountForDs(this);
- }
-
- public void clearCons(String reason) {
- this.conMap.clearConnections(reason, this);
- }
-
- public void startHeartbeat() {
- heartbeat.start();
- }
-
- public void stopHeartbeat() {
- heartbeat.stop();
- }
-
- public void doHeartbeat() {
- // 未到预定恢复时间,不执行心跳检测。
- if (TimeUtil.currentTimeMillis() < heartbeatRecoveryTime) {
- return;
- }
- if (!heartbeat.isStop()) {
- try {
- heartbeat.heartbeat();
- } catch (Exception e) {
- LOGGER.error(name + " heartbeat error.", e);
- }
- }
- }
-
- private BackendConnection takeCon(BackendConnection conn,
- final ResponseHandler handler, final Object attachment,
- String schema) {
-
- conn.setBorrowed(true);
- if (!conn.getSchema().equals(schema)) {
- // need do schema syn in before sql send
- conn.setSchema(schema);
- }
- ConQueue queue = conMap.getSchemaConQueue(schema);
- queue.incExecuteCount();
- conn.setAttachment(attachment);
- conn.setLastTime(System.currentTimeMillis()); // 每次取连接的时候,更新下lasttime,防止在前端连接检查的时候,关闭连接,导致sql执行失败
- handler.connectionAcquired(conn);
- return conn;
- }
-
- private void createNewConnection(final ResponseHandler handler,
- final Object attachment, final String schema) throws IOException {
- // aysn create connection
- NetSystem.getInstance().getExecutor().execute(new Runnable() {
- public void run() {
- try {
- createNewConnection(new DelegateResponseHandler(handler) {
- @Override
- public void connectionError(Throwable e,
- BackendConnection conn) {
- handler.connectionError(e, conn);
- }
-
- @Override
- public void connectionAcquired(BackendConnection conn) {
- takeCon(conn, handler, attachment, schema);
- }
- }, schema);
- } catch (IOException e) {
- handler.connectionError(e, null);
- }
- }
- });
- }
-
- public void getConnection(String schema, boolean autocommit,
- final ResponseHandler handler, final Object attachment)
- throws IOException {
- BackendConnection con = this.conMap.tryTakeCon(schema, autocommit);
- if (con != null) {
- takeCon(con, handler, attachment, schema);
- return;
- } else {
- int activeCons = this.getActiveCount();//当前最大活动连接
- if(activeCons+1>size){//下一个连接大于最大连接数
- LOGGER.error("the max activeConnnections size can not be max than maxconnections");
- throw new IOException("the max activeConnnections size can not be max than maxconnections");
- }else{ // create connection
- LOGGER.info("no ilde connection in pool,create new connection for " + this.name
- + " of schema "+schema);
- createNewConnection(handler, attachment, schema);
- }
- }
-
- }
-
- private void returnCon(BackendConnection c) {
- c.setAttachment(null);
- c.setBorrowed(false);
- c.setLastTime(TimeUtil.currentTimeMillis());
- ConQueue queue = this.conMap.getSchemaConQueue(c.getSchema());
-
- boolean ok = false;
- if (c.isAutocommit()) {
- ok = queue.getAutoCommitCons().offer(c);
- } else {
- ok = queue.getManCommitCons().offer(c);
- }
- if (!ok) {
-
- LOGGER.warn("can't return to pool ,so close con " + c);
- c.close("can't return to pool ");
- }
- }
-
- public void releaseChannel(BackendConnection c) {
- returnCon(c);
- if (LOGGER.isDebugEnabled()) {
- LOGGER.debug("release channel " + c);
- }
- }
-
- public void connectionClosed(BackendConnection conn) {
- ConQueue queue = this.conMap.getSchemaConQueue(conn.getSchema());
- if (queue != null) {
- queue.removeCon(conn);
- }
-
- }
-
- public abstract void createNewConnection(ResponseHandler handler,
- String schema) throws IOException;
-
- public long getHeartbeatRecoveryTime() {
- return heartbeatRecoveryTime;
- }
-
- public void setHeartbeatRecoveryTime(long heartbeatRecoveryTime) {
- this.heartbeatRecoveryTime = heartbeatRecoveryTime;
- }
-
- public DBHostConfig getConfig() {
- return config;
- }
-}
\ No newline at end of file
diff --git a/src/main/java/io/mycat/backend/datasource/PhysicalDBNode.java b/src/main/java/io/mycat/backend/datasource/PhysicalDBNode.java
new file mode 100644
index 000000000..71deb7ed2
--- /dev/null
+++ b/src/main/java/io/mycat/backend/datasource/PhysicalDBNode.java
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software;Designed and Developed mainly by many Chinese
+ * opensource volunteers. you can redistribute it and/or modify it under the
+ * terms of the GNU General Public License version 2 only, as published by the
+ * Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Any questions about this component can be directed to it's project Web address
+ * https://code.google.com/p/opencloudb/.
+ *
+ */
+package io.mycat.backend.datasource;
+
+import org.slf4j.Logger; import org.slf4j.LoggerFactory;
+
+import io.mycat.backend.BackendConnection;
+import io.mycat.backend.mysql.nio.handler.ResponseHandler;
+import io.mycat.route.RouteResultsetNode;
+
+public class PhysicalDBNode {
+ protected static final Logger LOGGER = LoggerFactory
+ .getLogger(PhysicalDBNode.class);
+
+ protected final String name;
+ protected final String database;
+ protected final PhysicalDBPool dbPool;
+
+ public PhysicalDBNode(String hostName, String database,
+ PhysicalDBPool dbPool) {
+ this.name = hostName;
+ this.database = database;
+ this.dbPool = dbPool;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public PhysicalDBPool getDbPool() {
+ return dbPool;
+ }
+
+ public String getDatabase() {
+ return database;
+ }
+
+ /**
+ * get connection from the same datasource
+ *
+ * @param exitsCon
+ * @throws Exception
+ */
+ public void getConnectionFromSameSource(String schema,boolean autocommit,
+ BackendConnection exitsCon, ResponseHandler handler,
+ Object attachment) throws Exception {
+
+ PhysicalDatasource ds = this.dbPool.findDatasouce(exitsCon);
+ if (ds == null) {
+ throw new RuntimeException(
+ "can't find exits connection,maybe fininshed " + exitsCon);
+ } else {
+ ds.getConnection(schema,autocommit, handler, attachment);
+ }
+
+ }
+
+ private void checkRequest(String schema){
+ if (schema != null
+ && !schema.equals(this.database)) {
+ throw new RuntimeException(
+ "invalid param ,connection request db is :"
+ + schema + " and datanode db is "
+ + this.database);
+ }
+ if (!dbPool.isInitSuccess()) {
+ dbPool.init(dbPool.activedIndex);
+ }
+ }
+
+ public void getConnection(String schema,boolean autoCommit, RouteResultsetNode rrs,
+ ResponseHandler handler, Object attachment) throws Exception {
+ checkRequest(schema);
+ if (dbPool.isInitSuccess()) {
+ LOGGER.debug("rrs.getRunOnSlave() " + rrs.getRunOnSlave());
+ if(rrs.getRunOnSlave() != null){ // 带有 /*db_type=master/slave*/ 注解
+ // 强制走 slave
+ if(rrs.getRunOnSlave()){
+ LOGGER.debug("rrs.isHasBlanceFlag() " + rrs.isHasBlanceFlag());
+ if (rrs.isHasBlanceFlag()) { // 带有 /*balance*/ 注解(目前好像只支持一个注解...)
+ dbPool.getReadBanlanceCon(schema,autoCommit,handler, attachment, this.database);
+ }else{ // 没有 /*balance*/ 注解
+ LOGGER.debug("rrs.isHasBlanceFlag()" + rrs.isHasBlanceFlag());
+ if(!dbPool.getReadCon(schema, autoCommit, handler, attachment, this.database)){
+ LOGGER.warn("Do not have slave connection to use, use master connection instead.");
+ PhysicalDatasource writeSource=dbPool.getSource();
+ //记录写节点写负载值
+ writeSource.setWriteCount();
+ writeSource.getConnection(schema,
+ autoCommit, handler, attachment);
+ rrs.setRunOnSlave(false);
+ rrs.setCanRunInReadDB(false);
+ }
+ }
+ }else{ // 强制走 master
+ // 默认获得的是 writeSource,也就是 走master
+ LOGGER.debug("rrs.getRunOnSlave() " + rrs.getRunOnSlave());
+ PhysicalDatasource writeSource=dbPool.getSource();
+ //记录写节点写负载值
+ writeSource.setReadCount();
+ writeSource.getConnection(schema, autoCommit,
+ handler, attachment);
+ rrs.setCanRunInReadDB(false);
+ }
+ }else{ // 没有 /*db_type=master/slave*/ 注解,按照原来的处理方式
+ LOGGER.debug("rrs.getRunOnSlave() " + rrs.getRunOnSlave()); // null
+ if (rrs.canRunnINReadDB(autoCommit)) {
+ dbPool.getRWBanlanceCon(schema,autoCommit, handler, attachment, this.database);
+ } else {
+ PhysicalDatasource writeSource =dbPool.getSource();
+ //记录写节点写负载值
+ writeSource.setWriteCount();
+ writeSource.getConnection(schema, autoCommit,
+ handler, attachment);
+ }
+ }
+
+ } else {
+ throw new IllegalArgumentException("Invalid DataSource:" + dbPool.getActivedIndex());
+ }
+ }
+
+// public void getConnection(String schema,boolean autoCommit, RouteResultsetNode rrs,
+// ResponseHandler handler, Object attachment) throws Exception {
+// checkRequest(schema);
+// if (dbPool.isInitSuccess()) {
+// if (rrs.canRunnINReadDB(autoCommit)) {
+// dbPool.getRWBanlanceCon(schema,autoCommit, handler, attachment,
+// this.database);
+// } else {
+// dbPool.getSource().getConnection(schema,autoCommit, handler, attachment);
+// }
+//
+// } else {
+// throw new IllegalArgumentException("Invalid DataSource:"
+// + dbPool.getActivedIndex());
+// }
+// }
+}
\ No newline at end of file
diff --git a/src/main/java/io/mycat/backend/datasource/PhysicalDBPool.java b/src/main/java/io/mycat/backend/datasource/PhysicalDBPool.java
new file mode 100644
index 000000000..56968f2c8
--- /dev/null
+++ b/src/main/java/io/mycat/backend/datasource/PhysicalDBPool.java
@@ -0,0 +1,721 @@
+/*
+ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software;Designed and Developed mainly by many Chinese
+ * opensource volunteers. you can redistribute it and/or modify it under the
+ * terms of the GNU General Public License version 2 only, as published by the
+ * Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Any questions about this component can be directed to it's project Web address
+ * https://code.google.com/p/opencloudb/.
+ *
+ */
+package io.mycat.backend.datasource;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.slf4j.Logger; import org.slf4j.LoggerFactory;
+
+import io.mycat.MycatServer;
+import io.mycat.backend.BackendConnection;
+import io.mycat.backend.heartbeat.DBHeartbeat;
+import io.mycat.backend.mysql.nio.handler.GetConnectionHandler;
+import io.mycat.backend.mysql.nio.handler.ResponseHandler;
+import io.mycat.config.Alarms;
+import io.mycat.config.model.DataHostConfig;
+
+public class PhysicalDBPool {
+
+ protected static final Logger LOGGER = LoggerFactory.getLogger(PhysicalDBPool.class);
+
+ public static final int BALANCE_NONE = 0;
+ public static final int BALANCE_ALL_BACK = 1;
+ public static final int BALANCE_ALL = 2;
+ public static final int BALANCE_ALL_READ = 3;
+
+ public static final int WRITE_ONLYONE_NODE = 0;
+ public static final int WRITE_RANDOM_NODE = 1;
+ public static final int WRITE_ALL_NODE = 2;
+
+ public static final long LONG_TIME = 300000;
+ public static final int WEIGHT = 0;
+
+ private final String hostName;
+
+ protected PhysicalDatasource[] writeSources;
+ protected Map readSources;
+
+ protected volatile int activedIndex;
+ protected volatile boolean initSuccess;
+
+ protected final ReentrantLock switchLock = new ReentrantLock();
+ private final Collection allDs;
+ private final int banlance;
+ private final int writeType;
+ private final Random random = new Random();
+ private final Random wnrandom = new Random();
+ private String[] schemas;
+ private final DataHostConfig dataHostConfig;
+ private String slaveIDs;
+
+ public PhysicalDBPool(String name, DataHostConfig conf,
+ PhysicalDatasource[] writeSources,
+ Map readSources, int balance,
+ int writeType) {
+
+ this.hostName = name;
+ this.dataHostConfig = conf;
+ this.writeSources = writeSources;
+ this.banlance = balance;
+ this.writeType = writeType;
+
+ Iterator> entryItor = readSources.entrySet().iterator();
+ while (entryItor.hasNext()) {
+ PhysicalDatasource[] values = entryItor.next().getValue();
+ if (values.length == 0) {
+ entryItor.remove();
+ }
+ }
+
+ this.readSources = readSources;
+ this.allDs = this.genAllDataSources();
+
+ LOGGER.info("total resouces of dataHost " + this.hostName + " is :" + allDs.size());
+
+ setDataSourceProps();
+ }
+
+ public int getWriteType() {
+ return writeType;
+ }
+
+ private void setDataSourceProps() {
+ for (PhysicalDatasource ds : this.allDs) {
+ ds.setDbPool(this);
+ }
+ }
+
+ public PhysicalDatasource findDatasouce(BackendConnection exitsCon) {
+ for (PhysicalDatasource ds : this.allDs) {
+ if ((ds.isReadNode() == exitsCon.isFromSlaveDB())
+ && ds.isMyConnection(exitsCon)) {
+ return ds;
+ }
+ }
+
+ LOGGER.warn("can't find connection in pool " + this.hostName + " con:" + exitsCon);
+ return null;
+ }
+
+ public String getSlaveIDs() {
+ return slaveIDs;
+ }
+
+ public void setSlaveIDs(String slaveIDs) {
+ this.slaveIDs = slaveIDs;
+ }
+
+ public String getHostName() {
+ return hostName;
+ }
+
+ /**
+ * all write datanodes
+ * @return
+ */
+ public PhysicalDatasource[] getSources() {
+ return writeSources;
+ }
+
+ public PhysicalDatasource getSource() {
+
+ switch (writeType) {
+ case WRITE_ONLYONE_NODE: {
+ return writeSources[activedIndex];
+ }
+ case WRITE_RANDOM_NODE: {
+
+ int index = Math.abs(wnrandom.nextInt(Integer.MAX_VALUE)) % writeSources.length;
+ PhysicalDatasource result = writeSources[index];
+ if (!this.isAlive(result)) {
+
+ // find all live nodes
+ ArrayList alives = new ArrayList(writeSources.length - 1);
+ for (int i = 0; i < writeSources.length; i++) {
+ if (i != index
+ && this.isAlive(writeSources[i])) {
+ alives.add(i);
+ }
+ }
+
+ if (alives.isEmpty()) {
+ result = writeSources[0];
+ } else {
+ // random select one
+ index = Math.abs(wnrandom.nextInt(Integer.MAX_VALUE)) % alives.size();
+ result = writeSources[alives.get(index)];
+
+ }
+ }
+
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("select write source " + result.getName()
+ + " for dataHost:" + this.getHostName());
+ }
+ return result;
+ }
+ default: {
+ throw new java.lang.IllegalArgumentException("writeType is "
+ + writeType + " ,so can't return one write datasource ");
+ }
+ }
+
+ }
+
+ public int getActivedIndex() {
+ return activedIndex;
+ }
+
+ public boolean isInitSuccess() {
+ return initSuccess;
+ }
+
+ public int next(int i) {
+ if (checkIndex(i)) {
+ return (++i == writeSources.length) ? 0 : i;
+ } else {
+ return 0;
+ }
+ }
+
+ public boolean switchSource(int newIndex, boolean isAlarm, String reason) {
+ if (this.writeType != PhysicalDBPool.WRITE_ONLYONE_NODE || !checkIndex(newIndex)) {
+ return false;
+ }
+
+ final ReentrantLock lock = this.switchLock;
+ lock.lock();
+ try {
+ int current = activedIndex;
+ if (current != newIndex) {
+
+ // switch index
+ activedIndex = newIndex;
+
+ // init again
+ this.init(activedIndex);
+
+ // clear all connections
+ this.getSources()[current].clearCons("switch datasource");
+
+ // write log
+ LOGGER.warn(switchMessage(current, newIndex, false, reason));
+
+ return true;
+ }
+ } finally {
+ lock.unlock();
+ }
+ return false;
+ }
+
+ private String switchMessage(int current, int newIndex, boolean alarm, String reason) {
+ StringBuilder s = new StringBuilder();
+ if (alarm) {
+ s.append(Alarms.DATANODE_SWITCH);
+ }
+ s.append("[Host=").append(hostName).append(",result=[").append(current).append("->");
+ s.append(newIndex).append("],reason=").append(reason).append(']');
+ return s.toString();
+ }
+
+ private int loop(int i) {
+ return i < writeSources.length ? i : (i - writeSources.length);
+ }
+
+ public void init(int index) {
+
+ if (!checkIndex(index)) {
+ index = 0;
+ }
+
+ int active = -1;
+ for (int i = 0; i < writeSources.length; i++) {
+ int j = loop(i + index);
+ if ( initSource(j, writeSources[j]) ) {
+
+ //不切换-1时,如果主写挂了 不允许切换过去
+ boolean isNotSwitchDs = ( dataHostConfig.getSwitchType() == DataHostConfig.NOT_SWITCH_DS );
+ if ( isNotSwitchDs && j > 0 ) {
+ break;
+ }
+
+ active = j;
+ activedIndex = active;
+ initSuccess = true;
+ LOGGER.info(getMessage(active, " init success"));
+
+ if (this.writeType == WRITE_ONLYONE_NODE) {
+ // only init one write datasource
+ MycatServer.getInstance().saveDataHostIndex(hostName, activedIndex);
+ break;
+ }
+ }
+ }
+
+ if (!checkIndex(active)) {
+ initSuccess = false;
+ StringBuilder s = new StringBuilder();
+ s.append(Alarms.DEFAULT).append(hostName).append(" init failure");
+ LOGGER.error(s.toString());
+ }
+ }
+
+ private boolean checkIndex(int i) {
+ return i >= 0 && i < writeSources.length;
+ }
+
+ private String getMessage(int index, String info) {
+ return new StringBuilder().append(hostName).append(" index:").append(index).append(info).toString();
+ }
+
+ private boolean initSource(int index, PhysicalDatasource ds) {
+ int initSize = ds.getConfig().getMinCon();
+
+ LOGGER.info("init backend myqsl source ,create connections total " + initSize + " for " + ds.getName() + " index :" + index);
+
+ CopyOnWriteArrayList list = new CopyOnWriteArrayList();
+ GetConnectionHandler getConHandler = new GetConnectionHandler(list, initSize);
+ // long start = System.currentTimeMillis();
+ // long timeOut = start + 5000 * 1000L;
+
+ for (int i = 0; i < initSize; i++) {
+ try {
+ ds.getConnection(this.schemas[i % schemas.length], true, getConHandler, null);
+ } catch (Exception e) {
+ LOGGER.warn(getMessage(index, " init connection error."), e);
+ }
+ }
+ long timeOut = System.currentTimeMillis() + 60 * 1000;
+
+ // waiting for finish
+ while (!getConHandler.finished() && (System.currentTimeMillis() < timeOut)) {
+ try {
+ Thread.sleep(100);
+
+ } catch (InterruptedException e) {
+ LOGGER.error("initError", e);
+ }
+ }
+ LOGGER.info("init result :" + getConHandler.getStatusInfo());
+// for (BackendConnection c : list) {
+// c.release();
+// }
+ return !list.isEmpty();
+ }
+
+ public void doHeartbeat() {
+
+
+ if (writeSources == null || writeSources.length == 0) {
+ return;
+ }
+
+ for (PhysicalDatasource source : this.allDs) {
+
+ if (source != null) {
+ source.doHeartbeat();
+ } else {
+ StringBuilder s = new StringBuilder();
+ s.append(Alarms.DEFAULT).append(hostName).append(" current dataSource is null!");
+ LOGGER.error(s.toString());
+ }
+ }
+
+ }
+
+ /**
+ * back physical connection heartbeat check
+ */
+ public void heartbeatCheck(long ildCheckPeriod) {
+
+ for (PhysicalDatasource ds : allDs) {
+ // only readnode or all write node or writetype=WRITE_ONLYONE_NODE
+ // and current write node will check
+ if (ds != null
+ && (ds.getHeartbeat().getStatus() == DBHeartbeat.OK_STATUS)
+ && (ds.isReadNode()
+ || (this.writeType != WRITE_ONLYONE_NODE)
+ || (this.writeType == WRITE_ONLYONE_NODE
+ && ds == this.getSource()))) {
+
+ ds.heatBeatCheck(ds.getConfig().getIdleTimeout(), ildCheckPeriod);
+ }
+ }
+ }
+
+ public void startHeartbeat() {
+ for (PhysicalDatasource source : this.allDs) {
+ source.startHeartbeat();
+ }
+ }
+
+ public void stopHeartbeat() {
+ for (PhysicalDatasource source : this.allDs) {
+ source.stopHeartbeat();
+ }
+ }
+
+ /**
+ * 强制清除 dataSources
+ * @param reason
+ */
+ public void clearDataSources(String reason) {
+ LOGGER.info("clear datasours of pool " + this.hostName);
+ for (PhysicalDatasource source : this.allDs) {
+ LOGGER.info("clear datasoure of pool " + this.hostName + " ds:" + source.getConfig());
+ source.clearCons(reason);
+ source.stopHeartbeat();
+ }
+ }
+
+ public Collection genAllDataSources() {
+
+ LinkedList allSources = new LinkedList();
+ for (PhysicalDatasource ds : writeSources) {
+ if (ds != null) {
+ allSources.add(ds);
+ }
+ }
+
+ for (PhysicalDatasource[] dataSources : this.readSources.values()) {
+ for (PhysicalDatasource ds : dataSources) {
+ if (ds != null) {
+ allSources.add(ds);
+ }
+ }
+ }
+ return allSources;
+ }
+
+ public Collection getAllDataSources() {
+ return this.allDs;
+ }
+
+ /**
+ * return connection for read balance
+ *
+ * @param handler
+ * @param attachment
+ * @param database
+ * @throws Exception
+ */
+ public void getRWBanlanceCon(String schema, boolean autocommit,
+ ResponseHandler handler, Object attachment, String database) throws Exception {
+
+ PhysicalDatasource theNode = null;
+ ArrayList okSources = null;
+ switch (banlance) {
+ case BALANCE_ALL_BACK: {
+ // all read nodes and the standard by masters
+ okSources = getAllActiveRWSources(true, false, checkSlaveSynStatus());
+ if (okSources.isEmpty()) {
+ theNode = this.getSource();
+
+ } else {
+ theNode = randomSelect(okSources);
+ }
+ break;
+ }
+ case BALANCE_ALL: {
+ okSources = getAllActiveRWSources(true, true, checkSlaveSynStatus());
+ theNode = randomSelect(okSources);
+ break;
+ }
+ case BALANCE_ALL_READ: {
+ okSources = getAllActiveRWSources(false, false, checkSlaveSynStatus());
+ theNode = randomSelect(okSources);
+ break;
+ }
+ case BALANCE_NONE:
+ default:
+ // return default write data source
+ theNode = this.getSource();
+ }
+
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("select read source " + theNode.getName() + " for dataHost:" + this.getHostName());
+ }
+ //统计节点读操作次数
+ theNode.setReadCount();
+ theNode.getConnection(schema, autocommit, handler, attachment);
+ }
+
+ /**
+ * slave 读负载均衡,也就是 readSource 之间实现负载均衡
+ * @param schema
+ * @param autocommit
+ * @param handler
+ * @param attachment
+ * @param database
+ * @throws Exception
+ */
+ public void getReadBanlanceCon(String schema, boolean autocommit, ResponseHandler handler,
+ Object attachment, String database)throws Exception {
+ PhysicalDatasource theNode = null;
+ ArrayList okSources = null;
+ okSources = getAllActiveRWSources(false, false, checkSlaveSynStatus());
+ theNode = randomSelect(okSources);
+ //统计节点读操作次数
+ theNode.setReadCount();
+ theNode.getConnection(schema, autocommit, handler, attachment);
+ }
+
+ /**
+ * 从 writeHost 下面的 readHost中随机获取一个 connection, 用于slave注解
+ * @param schema
+ * @param autocommit
+ * @param handler
+ * @param attachment
+ * @param database
+ * @return
+ * @throws Exception
+ */
+ public boolean getReadCon(String schema, boolean autocommit, ResponseHandler handler,
+ Object attachment, String database)throws Exception {
+ PhysicalDatasource theNode = null;
+
+ LOGGER.debug("!readSources.isEmpty() " + !readSources.isEmpty());
+ if (!readSources.isEmpty()) {
+ int index = Math.abs(random.nextInt(Integer.MAX_VALUE)) % readSources.size();
+ PhysicalDatasource[] allSlaves = this.readSources.get(index);
+// System.out.println("allSlaves.length " + allSlaves.length);
+ if (allSlaves != null) {
+ index = Math.abs(random.nextInt(Integer.MAX_VALUE)) % readSources.size();
+ PhysicalDatasource slave = allSlaves[index];
+
+ for (int i=0; i okSources) {
+
+ if (okSources.isEmpty()) {
+ return this.getSource();
+
+ } else {
+
+ int length = okSources.size(); // 总个数
+ int totalWeight = 0; // 总权重
+ boolean sameWeight = true; // 权重是否都一样
+ for (int i = 0; i < length; i++) {
+ int weight = okSources.get(i).getConfig().getWeight();
+ totalWeight += weight; // 累计总权重
+ if (sameWeight && i > 0
+ && weight != okSources.get(i-1).getConfig().getWeight() ) { // 计算所有权重是否一样
+ sameWeight = false;
+ }
+ }
+
+ if (totalWeight > 0 && !sameWeight ) {
+
+ // 如果权重不相同且权重大于0则按总权重数随机
+ int offset = random.nextInt(totalWeight);
+
+ // 并确定随机值落在哪个片断上
+ for (int i = 0; i < length; i++) {
+ offset -= okSources.get(i).getConfig().getWeight();
+ if (offset < 0) {
+ return okSources.get(i);
+ }
+ }
+ }
+
+ // 如果权重相同或权重为0则均等随机
+ return okSources.get( random.nextInt(length) );
+
+ //int index = Math.abs(random.nextInt()) % okSources.size();
+ //return okSources.get(index);
+ }
+ }
+
+ //
+ public int getBalance() {
+ return banlance;
+ }
+
+ private boolean isAlive(PhysicalDatasource theSource) {
+ return (theSource.getHeartbeat().getStatus() == DBHeartbeat.OK_STATUS);
+ }
+
+ private boolean canSelectAsReadNode(PhysicalDatasource theSource) {
+
+ Integer slaveBehindMaster = theSource.getHeartbeat().getSlaveBehindMaster();
+ int dbSynStatus = theSource.getHeartbeat().getDbSynStatus();
+
+ if ( slaveBehindMaster == null || dbSynStatus == DBHeartbeat.DB_SYN_ERROR) {
+ return false;
+ }
+ boolean isSync = dbSynStatus == DBHeartbeat.DB_SYN_NORMAL;
+ boolean isNotDelay = slaveBehindMaster < this.dataHostConfig.getSlaveThreshold();
+ return isSync && isNotDelay;
+ }
+
+ /**
+ * return all backup write sources
+ *
+ * @param includeWriteNode if include write nodes
+ * @param includeCurWriteNode if include current active write node. invalid when includeWriteNode is false
+ * @param filterWithSlaveThreshold
+ *
+ * @return
+ */
+ private ArrayList getAllActiveRWSources(
+ boolean includeWriteNode, boolean includeCurWriteNode, boolean filterWithSlaveThreshold) {
+
+ int curActive = activedIndex;
+ ArrayList okSources = new ArrayList(this.allDs.size());
+
+ for (int i = 0; i < this.writeSources.length; i++) {
+ PhysicalDatasource theSource = writeSources[i];
+ if (isAlive(theSource)) {// write node is active
+
+ if (includeWriteNode) {
+ boolean isCurWriteNode = ( i == curActive );
+ if ( isCurWriteNode && includeCurWriteNode == false) {
+ // not include cur active source
+ } else if (filterWithSlaveThreshold && theSource.isSalveOrRead() ) {
+ boolean selected = canSelectAsReadNode(theSource);
+ if ( selected ) {
+ okSources.add(theSource);
+ } else {
+ continue;
+ }
+ } else {
+ okSources.add(theSource);
+ }
+ }
+
+ if (!readSources.isEmpty()) {
+ // check all slave nodes
+ PhysicalDatasource[] allSlaves = this.readSources.get(i);
+ if (allSlaves != null) {
+ for (PhysicalDatasource slave : allSlaves) {
+ if (isAlive(slave)) {
+ if (filterWithSlaveThreshold) {
+ boolean selected = canSelectAsReadNode(slave);
+ if ( selected ) {
+ okSources.add(slave);
+ } else {
+ continue;
+ }
+ } else {
+ okSources.add(slave);
+ }
+ }
+ }
+ }
+ }
+
+ } else {
+
+ // TODO : add by zhuam
+ // 如果写节点不OK, 也要保证临时的读服务正常
+ if ( this.dataHostConfig.isTempReadHostAvailable()
+ && !readSources.isEmpty()) {
+
+ // check all slave nodes
+ PhysicalDatasource[] allSlaves = this.readSources.get(i);
+ if (allSlaves != null) {
+ for (PhysicalDatasource slave : allSlaves) {
+ if (isAlive(slave)) {
+
+ if (filterWithSlaveThreshold) {
+ if (canSelectAsReadNode(slave)) {
+ okSources.add(slave);
+ } else {
+ continue;
+ }
+
+ } else {
+ okSources.add(slave);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ }
+ return okSources;
+ }
+
+ public String[] getSchemas() {
+ return schemas;
+ }
+
+ public void setSchemas(String[] mySchemas) {
+ this.schemas = mySchemas;
+ }
+}
diff --git a/src/main/java/io/mycat/backend/datasource/PhysicalDatasource.java b/src/main/java/io/mycat/backend/datasource/PhysicalDatasource.java
new file mode 100644
index 000000000..6851f0900
--- /dev/null
+++ b/src/main/java/io/mycat/backend/datasource/PhysicalDatasource.java
@@ -0,0 +1,635 @@
+/*
+ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software;Designed and Developed mainly by many Chinese
+ * opensource volunteers. you can redistribute it and/or modify it under the
+ * terms of the GNU General Public License version 2 only, as published by the
+ * Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Any questions about this component can be directed to it's project Web address
+ * https://code.google.com/p/opencloudb/.
+ *
+ */
+package io.mycat.backend.datasource;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.mycat.MycatServer;
+import io.mycat.backend.BackendConnection;
+import io.mycat.backend.ConMap;
+import io.mycat.backend.ConQueue;
+import io.mycat.backend.heartbeat.DBHeartbeat;
+import io.mycat.backend.mysql.nio.MySQLConnection;
+import io.mycat.backend.mysql.nio.handler.ConnectionHeartBeatHandler;
+import io.mycat.backend.mysql.nio.handler.DelegateResponseHandler;
+import io.mycat.backend.mysql.nio.handler.NewConnectionRespHandler;
+import io.mycat.backend.mysql.nio.handler.ResponseHandler;
+import io.mycat.config.Alarms;
+import io.mycat.config.model.DBHostConfig;
+import io.mycat.config.model.DataHostConfig;
+import io.mycat.util.TimeUtil;
+
+
+public abstract class PhysicalDatasource {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(PhysicalDatasource.class);
+
+ private final String name;
+ private final int size;
+ private final DBHostConfig config;
+ private final ConMap conMap = new ConMap();
+ private DBHeartbeat heartbeat;
+ private final boolean readNode;
+ private volatile long heartbeatRecoveryTime;
+ private final DataHostConfig hostConfig;
+ private final ConnectionHeartBeatHandler conHeartBeatHanler = new ConnectionHeartBeatHandler();
+ private PhysicalDBPool dbPool;
+
+ // 添加DataSource读计数
+ private AtomicLong readCount = new AtomicLong(0);
+
+ // 添加DataSource写计数
+ private AtomicLong writeCount = new AtomicLong(0);
+
+
+ /**
+ * edit by dingw at 2017.06.08
+ * @see https://github.com/MyCATApache/Mycat-Server/issues/1524
+ *
+ */
+ // 当前活动连接
+ //private volatile AtomicInteger activeCount = new AtomicInteger(0);
+
+ // 当前存活的总连接数,为什么不直接使用activeCount,主要是因为连接的创建是异步完成的
+ //private volatile AtomicInteger totalConnection = new AtomicInteger(0);
+
+ /**
+ * 由于在Mycat中,returnCon被多次调用(与takeCon并没有成对调用)导致activeCount、totalConnection容易出现负数
+ */
+ //private static final String TAKE_CONNECTION_FLAG = "1";
+ //private ConcurrentMap takeConnectionContext = new ConcurrentHashMap<>();
+
+
+
+ public PhysicalDatasource(DBHostConfig config, DataHostConfig hostConfig,
+ boolean isReadNode) {
+ this.size = config.getMaxCon();
+ this.config = config;
+ this.name = config.getHostName();
+ this.hostConfig = hostConfig;
+ heartbeat = this.createHeartBeat();
+ this.readNode = isReadNode;
+ }
+
+ public boolean isMyConnection(BackendConnection con) {
+ if (con instanceof MySQLConnection) {
+ return ((MySQLConnection) con).getPool() == this;
+ } else {
+ return false;
+ }
+
+ }
+
+ public long getReadCount() {
+ return readCount.get();
+ }
+
+ public void setReadCount() {
+ readCount.addAndGet(1);
+ }
+
+ public long getWriteCount() {
+ return writeCount.get();
+ }
+
+ public void setWriteCount() {
+ writeCount.addAndGet(1);
+ }
+
+ public DataHostConfig getHostConfig() {
+ return hostConfig;
+ }
+
+ public boolean isReadNode() {
+ return readNode;
+ }
+
+ public int getSize() {
+ return size;
+ }
+
+ public void setDbPool(PhysicalDBPool dbPool) {
+ this.dbPool = dbPool;
+ }
+
+ public PhysicalDBPool getDbPool() {
+ return dbPool;
+ }
+
+ public abstract DBHeartbeat createHeartBeat();
+
+ public String getName() {
+ return name;
+ }
+
+ public long getExecuteCount() {
+ long executeCount = 0;
+ for (ConQueue queue : conMap.getAllConQueue()) {
+ executeCount += queue.getExecuteCount();
+
+ }
+ return executeCount;
+ }
+
+ public long getExecuteCountForSchema(String schema) {
+ return conMap.getSchemaConQueue(schema).getExecuteCount();
+
+ }
+
+ public int getActiveCountForSchema(String schema) {
+ return conMap.getActiveCountForSchema(schema, this);
+ }
+
+ public int getIdleCountForSchema(String schema) {
+ ConQueue queue = conMap.getSchemaConQueue(schema);
+ int total = 0;
+ total += queue.getAutoCommitCons().size()
+ + queue.getManCommitCons().size();
+ return total;
+ }
+
+ public DBHeartbeat getHeartbeat() {
+ return heartbeat;
+ }
+
+ public int getIdleCount() {
+ int total = 0;
+ for (ConQueue queue : conMap.getAllConQueue()) {
+ total += queue.getAutoCommitCons().size()
+ + queue.getManCommitCons().size();
+ }
+ return total;
+ }
+
+ /**
+ * 该方法也不是非常精确,因为该操作也不是一个原子操作,相对getIdleCount高效与准确一些
+ * @return
+ */
+// public int getIdleCountSafe() {
+// return getTotalConnectionsSafe() - getActiveCountSafe();
+// }
+
+ /**
+ * 是否需要继续关闭空闲连接
+ * @return
+ */
+// private boolean needCloseIdleConnection() {
+// return getIdleCountSafe() > hostConfig.getMinCon();
+// }
+
+ private boolean validSchema(String schema) {
+ String theSchema = schema;
+ return theSchema != null && !"".equals(theSchema)
+ && !"snyn...".equals(theSchema);
+ }
+
+ private void checkIfNeedHeartBeat(
+ LinkedList heartBeatCons, ConQueue queue,
+ ConcurrentLinkedQueue checkLis,
+ long hearBeatTime, long hearBeatTime2) {
+ int maxConsInOneCheck = 10;
+ Iterator checkListItor = checkLis.iterator();
+ while (checkListItor.hasNext()) {
+ BackendConnection con = checkListItor.next();
+ if (con.isClosedOrQuit()) {
+ checkListItor.remove();
+ continue;
+ }
+ if (validSchema(con.getSchema())) {
+ if (con.getLastTime() < hearBeatTime
+ && heartBeatCons.size() < maxConsInOneCheck) {
+ if(checkLis.remove(con)) {
+ //如果移除成功,则放入到心跳连接中,如果移除失败,说明该连接已经被其他线程使用,忽略本次心跳检测
+ con.setBorrowed(true);
+ heartBeatCons.add(con);
+ }
+ }
+ } else if (con.getLastTime() < hearBeatTime2) {
+ // not valid schema conntion should close for idle
+ // exceed 2*conHeartBeatPeriod
+ // 同样,这里也需要先移除,避免被业务连接
+ if(checkLis.remove(con)) {
+ con.close(" heart beate idle ");
+ }
+ }
+
+ }
+
+ }
+
+ public int getIndex() {
+ int currentIndex = 0;
+ for (int i = 0; i < dbPool.getSources().length; i++) {
+ PhysicalDatasource writeHostDatasource = dbPool.getSources()[i];
+ if (writeHostDatasource.getName().equals(getName())) {
+ currentIndex = i;
+ break;
+ }
+ }
+ return currentIndex;
+ }
+
+ public boolean isSalveOrRead() {
+ int currentIndex = getIndex();
+ if (currentIndex != dbPool.activedIndex || this.readNode) {
+ return true;
+ }
+ return false;
+ }
+
+ public void heatBeatCheck(long timeout, long conHeartBeatPeriod) {
+// int ildeCloseCount = hostConfig.getMinCon() * 3;
+ int maxConsInOneCheck = 5;
+ LinkedList heartBeatCons = new LinkedList();
+
+ long hearBeatTime = TimeUtil.currentTimeMillis() - conHeartBeatPeriod;
+ long hearBeatTime2 = TimeUtil.currentTimeMillis() - 2
+ * conHeartBeatPeriod;
+ for (ConQueue queue : conMap.getAllConQueue()) {
+ checkIfNeedHeartBeat(heartBeatCons, queue,
+ queue.getAutoCommitCons(), hearBeatTime, hearBeatTime2);
+ if (heartBeatCons.size() < maxConsInOneCheck) {
+ checkIfNeedHeartBeat(heartBeatCons, queue,
+ queue.getManCommitCons(), hearBeatTime, hearBeatTime2);
+ } else if (heartBeatCons.size() >= maxConsInOneCheck) {
+ break;
+ }
+ }
+
+ if (!heartBeatCons.isEmpty()) {
+ for (BackendConnection con : heartBeatCons) {
+ conHeartBeatHanler
+ .doHeartBeat(con, hostConfig.getHearbeatSQL());
+ }
+ }
+
+ // check if there has timeouted heatbeat cons
+ conHeartBeatHanler.abandTimeOuttedConns();
+ int idleCons = getIdleCount();
+ int activeCons = this.getActiveCount();
+ int createCount = (hostConfig.getMinCon() - idleCons) / 3;
+ // create if idle too little
+ if ((createCount > 0) && (idleCons + activeCons < size)
+ && (idleCons < hostConfig.getMinCon())) {
+ createByIdleLitte(idleCons, createCount);
+ } else if (idleCons > hostConfig.getMinCon()) {
+ closeByIdleMany(idleCons - hostConfig.getMinCon());
+ } else {
+ int activeCount = this.getActiveCount();
+ if (activeCount > size) {
+ StringBuilder s = new StringBuilder();
+ s.append(Alarms.DEFAULT).append("DATASOURCE EXCEED [name=")
+ .append(name).append(",active=");
+ s.append(activeCount).append(",size=").append(size).append(']');
+ LOGGER.warn(s.toString());
+ }
+ }
+ }
+
+ /**
+ *
+ * @param ildeCloseCount
+ * 首先,从已创建的连接中选择本次心跳需要关闭的空闲连接数(由当前连接连接数-减去配置的最小连接数。
+ * 然后依次关闭这些连接。由于连接空闲心跳检测与业务是同时并发的,在心跳关闭阶段,可能有连接被使用,导致需要关闭的空闲连接数减少.
+ *
+ * 所以每次关闭新连接时,先判断当前空闲连接数是否大于配置的最少空闲连接,如果为否,则结束本次关闭空闲连接操作。
+ * 该方法修改之前:
+ * 首先从ConnMap中获取 ildeCloseCount 个连接,然后关闭;在关闭中,可能又有连接被使用,导致可能多关闭一些链接,
+ * 导致相对频繁的创建新连接和关闭连接
+ *
+ * 该方法修改之后:
+ * ildeCloseCount 为预期要关闭的连接
+ * 使用循环操作,首先在关闭之前,先再一次判断是否需要关闭连接,然后每次从ConnMap中获取一个空闲连接,然后进行关闭
+ * edit by dingw at 2017.06.16
+ */
+ private void closeByIdleMany(int ildeCloseCount) {
+ LOGGER.info("too many ilde cons ,close some for datasouce " + name);
+ List readyCloseCons = new ArrayList(
+ ildeCloseCount);
+ for (ConQueue queue : conMap.getAllConQueue()) {
+ readyCloseCons.addAll(queue.getIdleConsToClose(ildeCloseCount));
+ if (readyCloseCons.size() >= ildeCloseCount) {
+ break;
+ }
+ }
+
+ for (BackendConnection idleCon : readyCloseCons) {
+ if (idleCon.isBorrowed()) {
+ LOGGER.warn("find idle con is using " + idleCon);
+ }
+ idleCon.close("too many idle con");
+ }
+
+// LOGGER.info("too many ilde cons ,close some for datasouce " + name);
+//
+// Iterator conQueueIt = conMap.getAllConQueue().iterator();
+// ConQueue queue = null;
+// if(conQueueIt.hasNext()) {
+// queue = conQueueIt.next();
+// }
+//
+// for(int i = 0; i < ildeCloseCount; i ++ ) {
+//
+// if(!needCloseIdleConnection() || queue == null) {
+// break; //如果当时空闲连接数没有超过最小配置连接数,则结束本次连接关闭
+// }
+//
+// LOGGER.info("cur conns:" + getTotalConnectionsSafe() );
+//
+// BackendConnection idleCon = queue.takeIdleCon(false);
+//
+// while(idleCon == null && conQueueIt.hasNext()) {
+// queue = conQueueIt.next();
+// idleCon = queue.takeIdleCon(false);
+// }
+//
+// if(idleCon == null) {
+// break;
+// }
+//
+// if (idleCon.isBorrowed() ) {
+// LOGGER.warn("find idle con is using " + idleCon);
+// }
+// idleCon.close("too many idle con");
+//
+// }
+
+ }
+
+ private void createByIdleLitte(int idleCons, int createCount) {
+ LOGGER.info("create connections ,because idle connection not enough ,cur is "
+ + idleCons
+ + ", minCon is "
+ + hostConfig.getMinCon()
+ + " for "
+ + name);
+ NewConnectionRespHandler simpleHandler = new NewConnectionRespHandler();
+
+ final String[] schemas = dbPool.getSchemas();
+ for (int i = 0; i < createCount; i++) {
+ if (this.getActiveCount() + this.getIdleCount() >= size) {
+ break;
+ }
+ try {
+ // creat new connection
+ this.createNewConnection(simpleHandler, null, schemas[i
+ % schemas.length]);
+ } catch (IOException e) {
+ LOGGER.warn("create connection err " + e);
+ }
+
+ }
+ }
+
+ public int getActiveCount() {
+ return this.conMap.getActiveCountForDs(this);
+ }
+
+
+
+ public void clearCons(String reason) {
+ this.conMap.clearConnections(reason, this);
+ }
+
+ public void startHeartbeat() {
+ heartbeat.start();
+ }
+
+ public void stopHeartbeat() {
+ heartbeat.stop();
+ }
+
+ public void doHeartbeat() {
+ // 未到预定恢复时间,不执行心跳检测。
+ if (TimeUtil.currentTimeMillis() < heartbeatRecoveryTime) {
+ return;
+ }
+
+ if (!heartbeat.isStop()) {
+ try {
+ heartbeat.heartbeat();
+ } catch (Exception e) {
+ LOGGER.error(name + " heartbeat error.", e);
+ }
+ }
+ }
+
+ private BackendConnection takeCon(BackendConnection conn,
+ final ResponseHandler handler, final Object attachment,
+ String schema) {
+
+ conn.setBorrowed(true);
+
+// if(takeConnectionContext.putIfAbsent(conn.getId(), TAKE_CONNECTION_FLAG) == null) {
+// incrementActiveCountSafe();
+// }
+
+
+ if (!conn.getSchema().equals(schema)) {
+ // need do schema syn in before sql send
+ conn.setSchema(schema);
+ }
+ ConQueue queue = conMap.getSchemaConQueue(schema);
+ queue.incExecuteCount();
+ conn.setAttachment(attachment);
+ conn.setLastTime(System.currentTimeMillis()); // 每次取连接的时候,更新下lasttime,防止在前端连接检查的时候,关闭连接,导致sql执行失败
+ handler.connectionAcquired(conn);
+ return conn;
+ }
+
+ private void createNewConnection(final ResponseHandler handler,
+ final Object attachment, final String schema) throws IOException {
+ // aysn create connection
+ MycatServer.getInstance().getBusinessExecutor().execute(new Runnable() {
+ public void run() {
+ try {
+ createNewConnection(new DelegateResponseHandler(handler) {
+ @Override
+ public void connectionError(Throwable e, BackendConnection conn) {
+ //decrementTotalConnectionsSafe(); // 如果创建连接失败,将当前连接数减1
+ handler.connectionError(e, conn);
+ }
+
+ @Override
+ public void connectionAcquired(BackendConnection conn) {
+ takeCon(conn, handler, attachment, schema);
+ }
+ }, schema);
+ } catch (IOException e) {
+ handler.connectionError(e, null);
+ }
+ }
+ });
+ }
+
+ public void getConnection(String schema, boolean autocommit,
+ final ResponseHandler handler, final Object attachment)
+ throws IOException {
+
+ // 从当前连接map中拿取已建立好的后端连接
+ BackendConnection con = this.conMap.tryTakeCon(schema, autocommit);
+ if (con != null) {
+ //如果不为空,则绑定对应前端请求的handler
+ takeCon(con, handler, attachment, schema);
+ return;
+
+ } else { // this.getActiveCount并不是线程安全的(严格上说该方法获取数量不准确),
+// int curTotalConnection = this.totalConnection.get();
+// while(curTotalConnection + 1 <= size) {
+//
+// if (this.totalConnection.compareAndSet(curTotalConnection, curTotalConnection + 1)) {
+// LOGGER.info("no ilde connection in pool,create new connection for " + this.name + " of schema " + schema);
+// createNewConnection(handler, attachment, schema);
+// return;
+// }
+//
+// curTotalConnection = this.totalConnection.get(); //CAS更新失败,则重新判断当前连接是否超过最大连接数
+//
+// }
+//
+// // 如果后端连接不足,立即失败,故直接抛出连接数超过最大连接异常
+// LOGGER.error("the max activeConnnections size can not be max than maxconnections:" + curTotalConnection);
+// throw new IOException("the max activeConnnections size can not be max than maxconnections:" + curTotalConnection);
+
+ int activeCons = this.getActiveCount();// 当前最大活动连接
+ if (activeCons + 1 > size) {// 下一个连接大于最大连接数
+ LOGGER.error("the max activeConnnections size can not be max than maxconnections");
+ throw new IOException("the max activeConnnections size can not be max than maxconnections");
+ } else { // create connection
+ LOGGER.info("no ilde connection in pool,create new connection for " + this.name + " of schema " + schema);
+ createNewConnection(handler, attachment, schema);
+ }
+ }
+ }
+
+ /**
+ * 是否超过最大连接数
+ * @return
+ */
+// private boolean exceedMaxConnections() {
+// return this.totalConnection.get() + 1 > size;
+// }
+//
+// public int decrementActiveCountSafe() {
+// return this.activeCount.decrementAndGet();
+// }
+//
+// public int incrementActiveCountSafe() {
+// return this.activeCount.incrementAndGet();
+// }
+//
+// public int getActiveCountSafe() {
+// return this.activeCount.get();
+// }
+//
+// public int getTotalConnectionsSafe() {
+// return this.totalConnection.get();
+// }
+//
+// public int decrementTotalConnectionsSafe() {
+// return this.totalConnection.decrementAndGet();
+// }
+//
+// public int incrementTotalConnectionSafe() {
+// return this.totalConnection.incrementAndGet();
+// }
+
+ private void returnCon(BackendConnection c) {
+
+ c.setAttachment(null);
+ c.setBorrowed(false);
+ c.setLastTime(TimeUtil.currentTimeMillis());
+ ConQueue queue = this.conMap.getSchemaConQueue(c.getSchema());
+
+ boolean ok = false;
+ if (c.isAutocommit()) {
+ ok = queue.getAutoCommitCons().offer(c);
+ } else {
+ ok = queue.getManCommitCons().offer(c);
+ }
+
+// if(c.getId() > 0 && takeConnectionContext.remove(c.getId(), TAKE_CONNECTION_FLAG) ) {
+// decrementActiveCountSafe();
+// }
+
+ if(!ok) {
+ LOGGER.warn("can't return to pool ,so close con " + c);
+ c.close("can't return to pool ");
+
+ }
+
+ }
+
+ public void releaseChannel(BackendConnection c) {
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("release channel " + c);
+ }
+ // release connection
+ returnCon(c);
+ }
+
+ public void connectionClosed(BackendConnection conn) {
+ ConQueue queue = this.conMap.getSchemaConQueue(conn.getSchema());
+ if (queue != null ) {
+ queue.removeCon(conn);
+ }
+
+// decrementTotalConnectionsSafe();
+ }
+
+ /**
+ * 创建新连接
+ */
+ public abstract void createNewConnection(ResponseHandler handler, String schema) throws IOException;
+
+ /**
+ * 测试连接,用于初始化及热更新配置检测
+ */
+ public abstract boolean testConnection(String schema) throws IOException;
+
+ public long getHeartbeatRecoveryTime() {
+ return heartbeatRecoveryTime;
+ }
+
+ public void setHeartbeatRecoveryTime(long heartbeatRecoveryTime) {
+ this.heartbeatRecoveryTime = heartbeatRecoveryTime;
+ }
+
+ public DBHostConfig getConfig() {
+ return config;
+ }
+
+ public boolean isAlive() {
+ return getHeartbeat().getStatus() == DBHeartbeat.OK_STATUS;
+ }
+}
diff --git a/src/main/java/io/mycat/backend/heartbeat/DBHeartbeat.java b/src/main/java/io/mycat/backend/heartbeat/DBHeartbeat.java
index cb265c2a1..285517438 100644
--- a/src/main/java/io/mycat/backend/heartbeat/DBHeartbeat.java
+++ b/src/main/java/io/mycat/backend/heartbeat/DBHeartbeat.java
@@ -23,10 +23,11 @@
*/
package io.mycat.backend.heartbeat;
-import io.mycat.backend.HeartbeatRecorder;
-
import java.util.concurrent.atomic.AtomicBoolean;
+import io.mycat.statistic.DataSourceSyncRecorder;
+import io.mycat.statistic.HeartbeatRecorder;
+
public abstract class DBHeartbeat {
public static final int DB_SYN_ERROR = -1;
public static final int DB_SYN_NORMAL = 1;
@@ -46,6 +47,7 @@ public abstract class DBHeartbeat {
protected int errorCount;
protected volatile int status;
protected final HeartbeatRecorder recorder = new HeartbeatRecorder();
+ protected final DataSourceSyncRecorder asynRecorder = new DataSourceSyncRecorder();
private volatile Integer slaveBehindMaster;
private volatile int dbSynStatus = DB_SYN_NORMAL;
@@ -124,4 +126,8 @@ public boolean isNeedHeartbeat() {
return heartbeatSQL != null;
}
+ public DataSourceSyncRecorder getAsynRecorder() {
+ return this.asynRecorder;
+ }
+
}
\ No newline at end of file
diff --git a/src/main/java/io/mycat/backend/heartbeat/MySQLConsistencyChecker.java b/src/main/java/io/mycat/backend/heartbeat/MySQLConsistencyChecker.java
index 2756fa1ab..ed8699041 100644
--- a/src/main/java/io/mycat/backend/heartbeat/MySQLConsistencyChecker.java
+++ b/src/main/java/io/mycat/backend/heartbeat/MySQLConsistencyChecker.java
@@ -23,7 +23,7 @@
*/
package io.mycat.backend.heartbeat;
-import io.mycat.backend.MySQLDataSource;
+import io.mycat.backend.mysql.nio.MySQLDataSource;
import io.mycat.server.interceptor.impl.GlobalTableUtil;
import io.mycat.sqlengine.OneRawSQLQueryResultHandler;
import io.mycat.sqlengine.SQLJob;
@@ -54,9 +54,14 @@ public class MySQLConsistencyChecker{
private String maxSQL;
private String tableName; // global table name
private long beginTime;
- private String columnExistSQL = "select count(*) as "+GlobalTableUtil.INNER_COLUMN
- + " from information_schema.columns where column_name='"
- + GlobalTableUtil.GLOBAL_TABLE_MYCAT_COLUMN + "' and table_name='";
+// private String columnExistSQL = "select count(*) as "+GlobalTableUtil.INNER_COLUMN
+// + " from information_schema.columns where column_name='"
+// + GlobalTableUtil.GLOBAL_TABLE_MYCAT_COLUMN + "' and table_name='";
+
+ // 此处用到了 mysql 多行转一行 group_concat 的用法,主要是为了简化对结果的处理
+ // 得到的结果类似于:id,name,_mycat_op_time
+ private String columnExistSQL = "select group_concat(COLUMN_NAME separator ',') as "
+ + GlobalTableUtil.INNER_COLUMN +" from information_schema.columns where TABLE_NAME='"; //user' and TABLE_SCHEMA='db1';
private List>> list = new ArrayList<>();
@@ -74,34 +79,44 @@ public MySQLConsistencyChecker(MySQLDataSource source, String tableName) {
public void checkRecordCout() {
// ["db3","db2","db1"]
- this.jobCount.set(0);
- beginTime = new Date().getTime();
- String[] physicalSchemas = source.getDbPool().getSchemas();
- for(String dbName : physicalSchemas){
- MySQLConsistencyHelper detector = new MySQLConsistencyHelper(this, null);
- OneRawSQLQueryResultHandler resultHandler =
- new OneRawSQLQueryResultHandler(new String[] {GlobalTableUtil.COUNT_COLUMN}, detector);
- SQLJob sqlJob = new SQLJob(this.getCountSQL(), dbName, resultHandler, source);
- detector.setSqlJob(sqlJob);
- sqlJob.run();
- this.jobCount.incrementAndGet();
- }
+ lock.lock();
+ try{
+ this.jobCount.set(0);
+ beginTime = new Date().getTime();
+ String[] physicalSchemas = source.getDbPool().getSchemas();
+ for(String dbName : physicalSchemas){
+ MySQLConsistencyHelper detector = new MySQLConsistencyHelper(this, null);
+ OneRawSQLQueryResultHandler resultHandler =
+ new OneRawSQLQueryResultHandler(new String[] {GlobalTableUtil.COUNT_COLUMN}, detector);
+ SQLJob sqlJob = new SQLJob(this.getCountSQL(), dbName, resultHandler, source);
+ detector.setSqlJob(sqlJob);
+ sqlJob.run();
+ this.jobCount.incrementAndGet();
+ }
+ }finally{
+ lock.unlock();
+ }
}
public void checkMaxTimeStamp() {
// ["db3","db2","db1"]
- this.jobCount.set(0);
- beginTime = new Date().getTime();
- String[] physicalSchemas = source.getDbPool().getSchemas();
- for(String dbName : physicalSchemas){
- MySQLConsistencyHelper detector = new MySQLConsistencyHelper(this, null);
- OneRawSQLQueryResultHandler resultHandler =
- new OneRawSQLQueryResultHandler(new String[] {GlobalTableUtil.MAX_COLUMN}, detector);
- SQLJob sqlJob = new SQLJob(this.getMaxSQL(), dbName, resultHandler, source);
- detector.setSqlJob(sqlJob);
- sqlJob.run();
- this.jobCount.incrementAndGet();
- }
+ lock.lock();
+ try{
+ this.jobCount.set(0);
+ beginTime = new Date().getTime();
+ String[] physicalSchemas = source.getDbPool().getSchemas();
+ for(String dbName : physicalSchemas){
+ MySQLConsistencyHelper detector = new MySQLConsistencyHelper(this, null);
+ OneRawSQLQueryResultHandler resultHandler =
+ new OneRawSQLQueryResultHandler(new String[] {GlobalTableUtil.MAX_COLUMN}, detector);
+ SQLJob sqlJob = new SQLJob(this.getMaxSQL(), dbName, resultHandler, source);
+ detector.setSqlJob(sqlJob);
+ sqlJob.run();
+ this.jobCount.incrementAndGet();
+ }
+ }finally{
+ lock.unlock();
+ }
}
/**
@@ -109,20 +124,25 @@ public void checkMaxTimeStamp() {
*/
public void checkInnerColumnExist() {
// ["db3","db2","db1"]
- this.jobCount.set(0);
- beginTime = new Date().getTime();
- String[] physicalSchemas = source.getDbPool().getSchemas();
- for(String dbName : physicalSchemas){
- MySQLConsistencyHelper detector = new MySQLConsistencyHelper(this, null, 1);
- OneRawSQLQueryResultHandler resultHandler =
- new OneRawSQLQueryResultHandler(new String[] {GlobalTableUtil.INNER_COLUMN}, detector);
- String db = " and table_schema='" + dbName + "'";
- SQLJob sqlJob = new SQLJob(this.columnExistSQL + db , dbName, resultHandler, source);
- detector.setSqlJob(sqlJob);//table_schema='db1'
- LOGGER.debug(sqlJob.toString());
- sqlJob.run();
- this.jobCount.incrementAndGet();
- }
+ lock.lock();
+ try{
+ this.jobCount.set(0);
+ beginTime = new Date().getTime();
+ String[] physicalSchemas = source.getDbPool().getSchemas();
+ for(String dbName : physicalSchemas){
+ MySQLConsistencyHelper detector = new MySQLConsistencyHelper(this, null, 1);
+ OneRawSQLQueryResultHandler resultHandler =
+ new OneRawSQLQueryResultHandler(new String[] {GlobalTableUtil.INNER_COLUMN}, detector);
+ String db = " and table_schema='" + dbName + "'";
+ SQLJob sqlJob = new SQLJob(this.columnExistSQL + db , dbName, resultHandler, source);
+ detector.setSqlJob(sqlJob);//table_schema='db1'
+ LOGGER.debug(sqlJob.toString());
+ sqlJob.run();
+ this.jobCount.incrementAndGet();
+ }
+ }finally{
+ lock.unlock();
+ }
}
public void setResult(SQLQueryResult> result) {
diff --git a/src/main/java/io/mycat/backend/heartbeat/MySQLConsistencyHelper.java b/src/main/java/io/mycat/backend/heartbeat/MySQLConsistencyHelper.java
index 21488f9f3..67b6400aa 100644
--- a/src/main/java/io/mycat/backend/heartbeat/MySQLConsistencyHelper.java
+++ b/src/main/java/io/mycat/backend/heartbeat/MySQLConsistencyHelper.java
@@ -129,14 +129,4 @@ public void setSqlJob(SQLJob sqlJob) {
this.sqlJob = sqlJob;
}
-// if(count == null){
-// LOGGER.warn(heartbeat.getCountSQL() + " execute failed in db: "
-// + result.getDataNode() +" during global table consistency heartbeat.");
-// }
-// if(maxTimestamp == null){
-// LOGGER.warn(heartbeat.getMaxSQL() + " execute failed in db: "
-// + result.getDataNode() +" during global table consistency heartbeat.");
-// }
-// return;
-// }
}
diff --git a/src/main/java/io/mycat/backend/heartbeat/MySQLDetector.java b/src/main/java/io/mycat/backend/heartbeat/MySQLDetector.java
index 0306907a0..691ae2ad7 100644
--- a/src/main/java/io/mycat/backend/heartbeat/MySQLDetector.java
+++ b/src/main/java/io/mycat/backend/heartbeat/MySQLDetector.java
@@ -2,8 +2,8 @@
* Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
- * This code is free software;Designed and Developed mainly by many Chinese
- * opensource volunteers. you can redistribute it and/or modify it under the
+ * This code is free software;Designed and Developed mainly by many Chinese
+ * opensource volunteers. you can redistribute it and/or modify it under the
* terms of the GNU General Public License version 2 only, as published by the
* Free Software Foundation.
*
@@ -16,145 +16,201 @@
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Any questions about this component can be directed to it's project Web address
+ *
+ * Any questions about this component can be directed to it's project Web address
* https://code.google.com/p/opencloudb/.
*
*/
package io.mycat.backend.heartbeat;
-import io.mycat.backend.MySQLDataSource;
-import io.mycat.backend.PhysicalDBPool;
-import io.mycat.backend.PhysicalDatasource;
-import io.mycat.server.config.node.DataHostConfig;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import io.mycat.backend.datasource.PhysicalDBPool;
+import io.mycat.backend.datasource.PhysicalDatasource;
+import io.mycat.backend.mysql.nio.MySQLDataSource;
+import io.mycat.config.model.DataHostConfig;
import io.mycat.sqlengine.OneRawSQLQueryResultHandler;
import io.mycat.sqlengine.SQLJob;
import io.mycat.sqlengine.SQLQueryResult;
import io.mycat.sqlengine.SQLQueryResultListener;
import io.mycat.util.TimeUtil;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicBoolean;
-
/**
* @author mycat
*/
-public class MySQLDetector implements
- SQLQueryResultListener>> {
- private MySQLHeartbeat heartbeat;
- private long heartbeatTimeout;
- private final AtomicBoolean isQuit;
- private volatile long lastSendQryTime;
- private volatile long lasstReveivedQryTime;
- private volatile SQLJob sqlJob;
- private static final String[] MYSQL_SLAVE_STAUTS_COLMS = new String[] {
- "Seconds_Behind_Master", "Slave_IO_Running", "Slave_SQL_Running" };
-
- public MySQLDetector(MySQLHeartbeat heartbeat) {
- this.heartbeat = heartbeat;
- this.isQuit = new AtomicBoolean(false);
- }
-
- public MySQLHeartbeat getHeartbeat() {
- return heartbeat;
- }
-
- public long getHeartbeatTimeout() {
- return heartbeatTimeout;
- }
-
- public void setHeartbeatTimeout(long heartbeatTimeout) {
- this.heartbeatTimeout = heartbeatTimeout;
- }
-
- public boolean isHeartbeatTimeout() {
- return TimeUtil.currentTimeMillis() > Math.max(lastSendQryTime,
- lasstReveivedQryTime) + heartbeatTimeout;
- }
-
- public long getLastSendQryTime() {
- return lastSendQryTime;
- }
-
- public long getLasstReveivedQryTime() {
- return lasstReveivedQryTime;
- }
-
- public void heartbeat() {
- lastSendQryTime = System.currentTimeMillis();
- MySQLDataSource ds = heartbeat.getSource();
- String databaseName = ds.getDbPool().getSchemas()[0];
- String[] fetchColms={};
- if (heartbeat.getSource().getHostConfig().isShowSlaveSql() ) {
- fetchColms=MYSQL_SLAVE_STAUTS_COLMS;
- }
- OneRawSQLQueryResultHandler resultHandler = new OneRawSQLQueryResultHandler(
- fetchColms, this);
- sqlJob = new SQLJob(heartbeat.getHeartbeatSQL(), databaseName,
- resultHandler, ds);
- sqlJob.run();
- }
-
- public void quit() {
- if (isQuit.compareAndSet(false, true)) {
- close("heart beat quit");
- }
-
- }
-
- public boolean isQuit() {
- return isQuit.get();
- }
-
- @Override
- public void onResult(SQLQueryResult> result) {
- if (result.isSuccess()) {
- int balance = heartbeat.getSource().getDbPool().getBalance();
- PhysicalDatasource source = heartbeat.getSource();
+public class MySQLDetector implements SQLQueryResultListener>> {
+
+ private MySQLHeartbeat heartbeat;
+
+ private long heartbeatTimeout;
+ private final AtomicBoolean isQuit;
+ private volatile long lastSendQryTime;
+ private volatile long lasstReveivedQryTime;
+ private volatile SQLJob sqlJob;
+
+ private static final String[] MYSQL_SLAVE_STAUTS_COLMS = new String[] {
+ "Seconds_Behind_Master",
+ "Slave_IO_Running",
+ "Slave_SQL_Running",
+ "Slave_IO_State",
+ "Master_Host",
+ "Master_User",
+ "Master_Port",
+ "Connect_Retry",
+ "Last_IO_Error"};
+
+ private static final String[] MYSQL_CLUSTER_STAUTS_COLMS = new String[] {
+ "Variable_name",
+ "Value"};
+
+ public MySQLDetector(MySQLHeartbeat heartbeat) {
+ this.heartbeat = heartbeat;
+ this.isQuit = new AtomicBoolean(false);
+ }
+
+ public MySQLHeartbeat getHeartbeat() {
+ return heartbeat;
+ }
+
+ public long getHeartbeatTimeout() {
+ return heartbeatTimeout;
+ }
+
+ public void setHeartbeatTimeout(long heartbeatTimeout) {
+ this.heartbeatTimeout = heartbeatTimeout;
+ }
+
+ public boolean isHeartbeatTimeout() {
+ return TimeUtil.currentTimeMillis() > Math.max(lastSendQryTime,
+ lasstReveivedQryTime) + heartbeatTimeout;
+ }
+
+ public long getLastSendQryTime() {
+ return lastSendQryTime;
+ }
+
+ public long getLasstReveivedQryTime() {
+ return lasstReveivedQryTime;
+ }
+
+ public void heartbeat() {
+ lastSendQryTime = System.currentTimeMillis();
+ MySQLDataSource ds = heartbeat.getSource();
+ String databaseName = ds.getDbPool().getSchemas()[0];
+ String[] fetchColms={};
+ if (heartbeat.getSource().getHostConfig().isShowSlaveSql() ) {
+ fetchColms=MYSQL_SLAVE_STAUTS_COLMS;
+ }
+ if (heartbeat.getSource().getHostConfig().isShowClusterSql() ) {
+ fetchColms=MYSQL_CLUSTER_STAUTS_COLMS;
+ }
+ OneRawSQLQueryResultHandler resultHandler = new OneRawSQLQueryResultHandler( fetchColms, this);
+ sqlJob = new SQLJob(heartbeat.getHeartbeatSQL(), databaseName, resultHandler, ds);
+ sqlJob.run();
+ }
+
+ public void quit() {
+ if (isQuit.compareAndSet(false, true)) {
+ close("heart beat quit");
+ }
+
+ }
+
+ public boolean isQuit() {
+ return isQuit.get();
+ }
+
+ @Override
+ public void onResult(SQLQueryResult> result) {
+
+ if (result.isSuccess()) {
+
+ int balance = heartbeat.getSource().getDbPool().getBalance();
+
+ PhysicalDatasource source = heartbeat.getSource();
+ int switchType = source.getHostConfig().getSwitchType();
Map resultResult = result.getResult();
- if (source.getHostConfig().isShowSlaveSql()
- &&(source.getHostConfig().getSwitchType() == DataHostConfig.SYN_STATUS_SWITCH_DS ||
- PhysicalDBPool.BALANCE_NONE!=balance )
- )
- {
-
- String Slave_IO_Running =resultResult!=null? resultResult.get(
- "Slave_IO_Running"):null;
- String Slave_SQL_Running = resultResult!=null?resultResult.get(
- "Slave_SQL_Running"):null;
- if (Slave_IO_Running != null
- && Slave_IO_Running.equals(Slave_SQL_Running)
- && Slave_SQL_Running.equals("Yes")) {
- heartbeat.setDbSynStatus(DBHeartbeat.DB_SYN_NORMAL);
- String Seconds_Behind_Master = resultResult.get(
- "Seconds_Behind_Master");
- if (null != Seconds_Behind_Master
- && !"".equals(Seconds_Behind_Master)) {
- heartbeat.setSlaveBehindMaster(Integer
- .valueOf(Seconds_Behind_Master));
- }
- } else if(source.isSalveOrRead())
- {
- MySQLHeartbeat.LOGGER
- .warn("found MySQL master/slave Replication err !!! "
- + heartbeat.getSource().getConfig());
- heartbeat.setDbSynStatus(DBHeartbeat.DB_SYN_ERROR);
- }
-
- }
- heartbeat.setResult(MySQLHeartbeat.OK_STATUS, this, null);
- } else {
- heartbeat.setResult(MySQLHeartbeat.ERROR_STATUS, this, null);
- }
- lasstReveivedQryTime = System.currentTimeMillis();
- }
-
- public void close(String msg) {
- SQLJob curJob = sqlJob;
- if (curJob != null && !curJob.isFinished()) {
- curJob.teminate(msg);
- sqlJob = null;
- }
- }
-
-}
\ No newline at end of file
+
+ if ( resultResult!=null&& !resultResult.isEmpty() &&switchType == DataHostConfig.SYN_STATUS_SWITCH_DS
+ && source.getHostConfig().isShowSlaveSql()) {
+
+ String Slave_IO_Running = resultResult != null ? resultResult.get("Slave_IO_Running") : null;
+ String Slave_SQL_Running = resultResult != null ? resultResult.get("Slave_SQL_Running") : null;
+
+ if (Slave_IO_Running != null
+ && Slave_IO_Running.equals(Slave_SQL_Running)
+ && Slave_SQL_Running.equals("Yes")) {
+
+ heartbeat.setDbSynStatus(DBHeartbeat.DB_SYN_NORMAL);
+ String Seconds_Behind_Master = resultResult.get( "Seconds_Behind_Master");
+ if (null != Seconds_Behind_Master && !"".equals(Seconds_Behind_Master)) {
+
+ int Behind_Master = Integer.parseInt(Seconds_Behind_Master);
+ if ( Behind_Master > source.getHostConfig().getSlaveThreshold() ) {
+ MySQLHeartbeat.LOGGER.warn("found MySQL master/slave Replication delay !!! "
+ + heartbeat.getSource().getConfig() + ", binlog sync time delay: " + Behind_Master + "s" );
+ }
+ heartbeat.setSlaveBehindMaster( Behind_Master );
+ }
+
+ } else if( source.isSalveOrRead() ) {
+ //String Last_IO_Error = resultResult != null ? resultResult.get("Last_IO_Error") : null;
+ MySQLHeartbeat.LOGGER.warn("found MySQL master/slave Replication err !!! "
+ + heartbeat.getSource().getConfig() + ", " + resultResult);
+ heartbeat.setDbSynStatus(DBHeartbeat.DB_SYN_ERROR);
+ }
+
+ heartbeat.getAsynRecorder().set(resultResult, switchType);
+ heartbeat.setResult(MySQLHeartbeat.OK_STATUS, this, null);
+
+ } else if ( resultResult!=null&& !resultResult.isEmpty() && switchType==DataHostConfig.CLUSTER_STATUS_SWITCH_DS
+ && source.getHostConfig().isShowClusterSql() ) {
+
+ //String Variable_name = resultResult != null ? resultResult.get("Variable_name") : null;
+ String wsrep_cluster_status = resultResult != null ? resultResult.get("wsrep_cluster_status") : null;// Primary
+ String wsrep_connected = resultResult != null ? resultResult.get("wsrep_connected") : null;// ON
+ String wsrep_ready = resultResult != null ? resultResult.get("wsrep_ready") : null;// ON
+
+ if ("ON".equals(wsrep_connected)
+ && "ON".equals(wsrep_ready)
+ && "Primary".equals(wsrep_cluster_status)) {
+
+ heartbeat.setDbSynStatus(DBHeartbeat.DB_SYN_NORMAL);
+ heartbeat.setResult(MySQLHeartbeat.OK_STATUS, this, null);
+
+ } else {
+ MySQLHeartbeat.LOGGER.warn("found MySQL cluster status err !!! "
+ + heartbeat.getSource().getConfig()
+ + " wsrep_cluster_status: "+ wsrep_cluster_status
+ + " wsrep_connected: "+ wsrep_connected
+ + " wsrep_ready: "+ wsrep_ready
+ );
+
+ heartbeat.setDbSynStatus(DBHeartbeat.DB_SYN_ERROR);
+ heartbeat.setResult(MySQLHeartbeat.ERROR_STATUS, this, null);
+ }
+ heartbeat.getAsynRecorder().set(resultResult, switchType);
+
+ } else {
+ heartbeat.setResult(MySQLHeartbeat.OK_STATUS, this, null);
+ }
+ //监测数据库同步状态,在 switchType=-1或者1的情况下,也需要收集主从同步状态
+ heartbeat.getAsynRecorder().set(resultResult, switchType);
+
+ } else {
+ heartbeat.setResult(MySQLHeartbeat.ERROR_STATUS, this, null);
+ }
+
+ lasstReveivedQryTime = System.currentTimeMillis();
+ heartbeat.getRecorder().set((lasstReveivedQryTime - lastSendQryTime));
+ }
+
+ public void close(String msg) {
+ SQLJob curJob = sqlJob;
+ if (curJob != null && !curJob.isFinished()) {
+ curJob.teminate(msg);
+ sqlJob = null;
+ }
+ }
+}
diff --git a/src/main/java/io/mycat/backend/heartbeat/MySQLHeartbeat.java b/src/main/java/io/mycat/backend/heartbeat/MySQLHeartbeat.java
index bb11de2d2..1bb66a674 100644
--- a/src/main/java/io/mycat/backend/heartbeat/MySQLHeartbeat.java
+++ b/src/main/java/io/mycat/backend/heartbeat/MySQLHeartbeat.java
@@ -23,25 +23,24 @@
*/
package io.mycat.backend.heartbeat;
-import io.mycat.backend.MySQLDataSource;
-import io.mycat.backend.PhysicalDBPool;
-import io.mycat.backend.PhysicalDatasource;
-import io.mycat.server.config.node.DataHostConfig;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.concurrent.locks.ReentrantLock;
+import org.slf4j.Logger; import org.slf4j.LoggerFactory;
+
+import io.mycat.backend.datasource.PhysicalDBPool;
+import io.mycat.backend.datasource.PhysicalDatasource;
+import io.mycat.backend.mysql.nio.MySQLDataSource;
+import io.mycat.config.model.DataHostConfig;
+
/**
* @author mycat
*/
public class MySQLHeartbeat extends DBHeartbeat {
private static final int MAX_RETRY_COUNT = 5;
- public static final Logger LOGGER = LoggerFactory
- .getLogger(MySQLHeartbeat.class);
+ public static final Logger LOGGER = LoggerFactory.getLogger(MySQLHeartbeat.class);
private final MySQLDataSource source;
@@ -55,7 +54,7 @@ public MySQLHeartbeat(MySQLDataSource source) {
this.lock = new ReentrantLock(false);
this.maxRetryCount = MAX_RETRY_COUNT;
this.status = INIT_STATUS;
- this.heartbeatSQL = source.getHostConfig().getHeartbeatSQL();
+ this.heartbeatSQL = source.getHostConfig().getHearbeatSQL();
}
public MySQLDataSource getSource() {
@@ -171,9 +170,6 @@ public void setResult(int result, MySQLDetector detector, String msg) {
}
private void setOk(MySQLDetector detector) {
-
- recorder.set(detector.getLasstReveivedQryTime()
- - detector.getLastSendQryTime());
switch (status) {
case DBHeartbeat.TIMEOUT_STATUS:
this.status = DBHeartbeat.INIT_STATUS;
@@ -199,19 +195,17 @@ private void setError(MySQLDetector detector) {
// should continues check error status
if (++errorCount < maxRetryCount) {
- if (detector != null && !detector.isQuit()) {
- heartbeat(); // error count not enough, heart beat again
- }
- //return;
- } else
+ if (detector != null && !detector.isQuit()) {
+ heartbeat(); // error count not enough, heart beat again
+ }
+
+ }else
{
if (detector != null ) {
detector.quit();
}
-
this.status = ERROR_STATUS;
this.errorCount = 0;
-
}
}
@@ -243,8 +237,7 @@ private void switchSourceIfNeed(String reason) {
synchronized (pool) {
// try to see if need switch datasource
curDatasourceHB = pool.getSource().getHeartbeat().getStatus();
- if (curDatasourceHB != DBHeartbeat.INIT_STATUS
- && curDatasourceHB != DBHeartbeat.OK_STATUS) {
+ if (curDatasourceHB != DBHeartbeat.INIT_STATUS && curDatasourceHB != DBHeartbeat.OK_STATUS) {
int curIndex = pool.getActivedIndex();
int nextId = pool.next(curIndex);
PhysicalDatasource[] allWriteNodes = pool.getSources();
@@ -257,24 +250,20 @@ private void switchSourceIfNeed(String reason) {
int theSourceHBStatus = theSourceHB.getStatus();
if (theSourceHBStatus == DBHeartbeat.OK_STATUS) {
if (switchType == DataHostConfig.SYN_STATUS_SWITCH_DS) {
- if (Integer.valueOf(0).equals(
- theSourceHB.getSlaveBehindMaster())) {
- LOGGER.info("try to switch datasource ,slave is synchronized to master "
- + theSource.getConfig());
+ if (Integer.valueOf(0).equals( theSourceHB.getSlaveBehindMaster())) {
+ LOGGER.info("try to switch datasource ,slave is synchronized to master " + theSource.getConfig());
pool.switchSource(nextId, true, reason);
break;
} else {
LOGGER.warn("ignored datasource ,slave is not synchronized to master , slave behind master :"
- + theSourceHB
- .getSlaveBehindMaster()
+ + theSourceHB.getSlaveBehindMaster()
+ " " + theSource.getConfig());
}
} else {
// normal switch
- LOGGER.info("try to switch datasource ,not checked slave synchronize status "
- + theSource.getConfig());
+ LOGGER.info("try to switch datasource ,not checked slave synchronize status " + theSource.getConfig());
pool.switchSource(nextId, true, reason);
- break;
+ break;
}
}
diff --git a/src/main/java/io/mycat/backend/jdbc/JDBCConnection.java b/src/main/java/io/mycat/backend/jdbc/JDBCConnection.java
index d2972be7c..11ca234e4 100644
--- a/src/main/java/io/mycat/backend/jdbc/JDBCConnection.java
+++ b/src/main/java/io/mycat/backend/jdbc/JDBCConnection.java
@@ -1,575 +1,877 @@
-package io.mycat.backend.jdbc;
-
-import io.mycat.backend.BackendConnection;
-import io.mycat.net.BufferArray;
-import io.mycat.net.NetSystem;
-import io.mycat.route.RouteResultsetNode;
-import io.mycat.server.ErrorCode;
-import io.mycat.server.Isolations;
-import io.mycat.server.MySQLFrontConnection;
-import io.mycat.server.executors.ConnectionHeartBeatHandler;
-import io.mycat.server.executors.ResponseHandler;
-import io.mycat.server.packet.*;
-import io.mycat.server.parser.ServerParse;
-import io.mycat.server.response.ShowVariables;
-import io.mycat.util.ResultSetUtil;
-import io.mycat.util.StringUtil;
-import io.mycat.util.TimeUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.UnsupportedEncodingException;
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-
-public class JDBCConnection implements BackendConnection {
- protected static final Logger LOGGER = LoggerFactory
- .getLogger(JDBCConnection.class);
- private JDBCDatasource pool;
- private volatile String schema;
- private volatile String dbType;
- private volatile String oldSchema;
- private byte packetId;
- private int txIsolation;
- private volatile boolean running = false;
- private volatile boolean borrowed;
- private long id = 0;
- private String host;
- private int port;
- private Connection con;
- private ResponseHandler respHandler;
- private volatile Object attachement;
-
- boolean headerOutputed = false;
- private volatile boolean modifiedSQLExecuted;
- private final long startTime;
- private long lastTime;
- private boolean isSpark = false;
-
-
- public JDBCConnection() {
- startTime = System.currentTimeMillis();
- }
-
- public Connection getCon() {
- return con;
- }
-
- public void setCon(Connection con) {
- this.con = con;
-
- }
-
- @Override
- public void close(String reason) {
- try {
- con.close();
-
- } catch (SQLException e) {
- }
-
- }
-
- public void setId(long id) {
- this.id = id;
- }
-
- public JDBCDatasource getPool() {
- return pool;
- }
-
- public void setPool(JDBCDatasource pool) {
- this.pool = pool;
- }
-
- public void setHost(String host) {
- this.host = host;
- }
-
- public void setPort(int port) {
- this.port = port;
- }
-
- @Override
- public boolean isClosed() {
- try {
- return con == null || con.isClosed();
- } catch (SQLException e) {
- return true;
- }
- }
-
- @Override
- public void idleCheck() {
- if (TimeUtil.currentTimeMillis() > lastTime
- + pool.getConfig().getIdleTimeout()) {
- close(" idle check");
- }
- }
-
- @Override
- public long getStartupTime() {
- return startTime;
- }
-
- public String getHost() {
- return this.host;
- }
-
- public int getPort() {
- return this.port;
- }
-
- public int getLocalPort() {
- return 0;
- }
-
- public long getNetInBytes() {
-
- return 0;
- }
-
- public long getNetOutBytes() {
- return 0;
- }
-
- @Override
- public boolean isModifiedSQLExecuted() {
- return modifiedSQLExecuted;
- }
-
- @Override
- public boolean isFromSlaveDB() {
- return false;
- }
-
- public String getDbType() {
- return this.dbType;
- }
-
- public void setDbType(String newDbType) {
- this.dbType = newDbType.toUpperCase();
- this.isSpark = dbType.equals("SPARK");
-
- }
-
- @Override
- public String getSchema() {
- return this.schema;
- }
-
- @Override
- public void setSchema(String newSchema) {
- this.oldSchema = this.schema;
- this.schema = newSchema;
-
- }
-
- @Override
- public long getLastTime() {
-
- return lastTime;
- }
-
- @Override
- public boolean isClosedOrQuit() {
- return this.isClosed();
- }
-
- @Override
- public void setAttachment(Object attachment) {
- this.attachement = attachment;
-
- }
-
- @Override
- public void quit() {
- this.close("client quit");
-
- }
-
- @Override
- public void setLastTime(long currentTimeMillis) {
- this.lastTime = currentTimeMillis;
-
- }
-
- @Override
- public void release() {
- modifiedSQLExecuted = false;
- setResponseHandler(null);
- pool.releaseChannel(this);
- }
-
- public void setRunning(boolean running) {
- this.running = running;
-
- }
-
- @Override
- public void setResponseHandler(ResponseHandler commandHandler) {
- respHandler = commandHandler;
- }
-
- @Override
- public void commit() {
- try {
- con.commit();
-
- this.respHandler.okResponse(OkPacket.OK, this);
- } catch (SQLException e) {
- throw new RuntimeException(e);
- }
- }
-
- private int convertNativeIsolationToJDBC(int nativeIsolation) {
- if (nativeIsolation == Isolations.REPEATED_READ) {
- return Connection.TRANSACTION_REPEATABLE_READ;
- } else if (nativeIsolation == Isolations.SERIALIZABLE) {
- return Connection.TRANSACTION_SERIALIZABLE;
- } else {
- return nativeIsolation;
- }
- }
-
- private void syncIsolation(int nativeIsolation) {
- int jdbcIsolation = convertNativeIsolationToJDBC(nativeIsolation);
- int srcJdbcIsolation = getTxIsolation();
- if (jdbcIsolation == srcJdbcIsolation)
- return;
- if ("oracle".equalsIgnoreCase(getDbType())
- && jdbcIsolation != Connection.TRANSACTION_READ_COMMITTED
- && jdbcIsolation != Connection.TRANSACTION_SERIALIZABLE) {
- // oracle 只支持2个级别 ,且只能更改一次隔离级别,否则会报 ORA-01453
- return;
- }
- try {
- con.setTransactionIsolation(jdbcIsolation);
- } catch (SQLException e) {
- LOGGER.warn("set txisolation error:", e);
- }
- }
-
- private void executeSQL(RouteResultsetNode rrn, MySQLFrontConnection sc,
- boolean autocommit) throws IOException {
- String orgin = rrn.getStatement();
- // String sql = rrn.getStatement().toLowerCase();
- // LOGGER.info("JDBC SQL:"+orgin+"|"+sc.toString());
- if (!modifiedSQLExecuted && rrn.isModifySQL()) {
- modifiedSQLExecuted = true;
- }
-
- try {
-
- syncIsolation(sc.getTxIsolation());
- if (!this.schema.equals(this.oldSchema)) {
- con.setCatalog(schema);
- this.oldSchema = schema;
- }
- if (!this.isSpark) {
- con.setAutoCommit(autocommit);
- }
- int sqlType = rrn.getSqlType();
-
- if (sqlType == ServerParse.SELECT || sqlType == ServerParse.SHOW) {
- if ((sqlType == ServerParse.SHOW) && (!dbType.equals("MYSQL"))) {
- // showCMD(sc, orgin);
- // ShowVariables.execute(sc, orgin);
- ShowVariables.execute(sc);
-// } else if ("SELECT CONNECTION_ID()".equalsIgnoreCase(orgin)) {
-// // ShowVariables.justReturnValue(sc,String.valueOf(sc.getId()));
-// ShowVariables.justReturnValue(sc,
-// String.valueOf(sc.getId()), this);
- } else
- {
- ouputResultSet(sc, orgin);
- }
- } else {
- executeddl(sc, orgin);
- }
-
- } catch (SQLException e) {
-
- String msg = e.getMessage();
- ErrorPacket error = new ErrorPacket();
- error.packetId = ++packetId;
- error.errno = e.getErrorCode();
- error.message = msg.getBytes();
- this.respHandler.errorResponse(error.writeToBytes(), this);
- } catch (Exception e) {
- String msg = e.getMessage();
- ErrorPacket error = new ErrorPacket();
- error.packetId = ++packetId;
- error.errno = ErrorCode.ER_UNKNOWN_ERROR;
- error.message = msg.getBytes();
- this.respHandler.errorResponse(error.writeToBytes(), this);
- } finally {
- this.running = false;
- }
-
- }
-
-
-
- private void executeddl(MySQLFrontConnection sc, String sql)
- throws SQLException {
- Statement stmt = null;
- try {
- stmt = con.createStatement();
- int count = stmt.executeUpdate(sql);
- OkPacket okPck = new OkPacket();
- okPck.affectedRows = count;
- okPck.insertId = 0;
- okPck.packetId = ++packetId;
- okPck.message = " OK!".getBytes();
- this.respHandler.okResponse(okPck.writeToBytes(), this);
- } finally {
- if (stmt != null) {
- try {
- stmt.close();
- } catch (SQLException e) {
-
- }
- }
- }
- }
-
- private void ouputResultSet(MySQLFrontConnection sc, String sql)
- throws SQLException {
- ResultSet rs = null;
- Statement stmt = null;
-
- try {
- stmt = con.createStatement();
- rs = stmt.executeQuery(sql);
-
- List fieldPks = new LinkedList();
- ResultSetUtil.resultSetToFieldPacket(sc.getCharset(), fieldPks, rs,
- this.isSpark);
- int colunmCount = fieldPks.size();
- BufferArray bufferArray = NetSystem.getInstance().getBufferPool()
- .allocateArray();
- ResultSetHeaderPacket headerPkg = new ResultSetHeaderPacket();
- headerPkg.fieldCount = fieldPks.size();
- headerPkg.packetId = ++packetId;
-
- headerPkg.write(bufferArray);
-
- byte[] header =bufferArray.writeToByteArrayAndRecycle();
-
- List fields = new ArrayList(fieldPks.size());
- Iterator itor = fieldPks.iterator();
- while (itor.hasNext()) {
- bufferArray = NetSystem.getInstance().getBufferPool()
- .allocateArray();
- FieldPacket curField = itor.next();
- curField.packetId = ++packetId;
- curField.write(bufferArray);
- byte[] field = bufferArray.writeToByteArrayAndRecycle();
- fields.add(field);
- itor.remove();
- }
-
- bufferArray = NetSystem.getInstance().getBufferPool()
- .allocateArray();
- EOFPacket eofPckg = new EOFPacket();
- eofPckg.packetId = ++packetId;
- eofPckg.write(bufferArray);
- byte[] eof = bufferArray.writeToByteArrayAndRecycle();
- this.respHandler.fieldEofResponse(header, fields, eof, this);
-
- // output row
- while (rs.next()) {
- bufferArray = NetSystem.getInstance().getBufferPool()
- .allocateArray();
- RowDataPacket curRow = new RowDataPacket(colunmCount);
- for (int i = 0; i < colunmCount; i++) {
- int j = i + 1;
- curRow.add(StringUtil.encode(rs.getString(j),
- sc.getCharset()));
- }
- curRow.packetId = ++packetId;
- curRow.write(bufferArray);
- byte[] row =bufferArray.writeToByteArrayAndRecycle();
- this.respHandler.rowResponse(row, this);
- }
-
- // end row
- bufferArray = NetSystem.getInstance().getBufferPool()
- .allocateArray();
- eofPckg = new EOFPacket();
- eofPckg.packetId = ++packetId;
- eofPckg.write(bufferArray);
- eof = bufferArray.writeToByteArrayAndRecycle();
- this.respHandler.rowEofResponse(eof, this);
- } finally {
- if (rs != null) {
- try {
- rs.close();
- } catch (SQLException e) {
-
- }
- }
- if (stmt != null) {
- try {
- stmt.close();
- } catch (SQLException e) {
-
- }
- }
- }
- }
-
- @Override
- public void query(final String sql) throws UnsupportedEncodingException {
- if (respHandler instanceof ConnectionHeartBeatHandler) {
- justForHeartbeat(sql);
- } else {
- throw new UnsupportedEncodingException("unsupported yet ");
- }
- }
-
- private void justForHeartbeat(String sql) {
-
- Statement stmt = null;
-
- try {
- stmt = con.createStatement();
- stmt.execute(sql);
- if (!isAutocommit()) { // 如果在写库上,如果是事务方式的连接,需要进行手动commit
- con.commit();
- }
- this.respHandler.okResponse(OkPacket.OK, this);
-
- } catch (Exception e) {
- String msg = e.getMessage();
- ErrorPacket error = new ErrorPacket();
- error.packetId = ++packetId;
- error.errno = ErrorCode.ER_UNKNOWN_ERROR;
- error.message = msg.getBytes();
- this.respHandler.errorResponse(error.writeToBytes(), this);
- } finally {
- if (stmt != null) {
- try {
- stmt.close();
- } catch (SQLException e) {
-
- }
- }
- }
- }
-
- @Override
- public Object getAttachment() {
- return this.attachement;
- }
-
- @Override
- public String getCharset() {
- return null;
- }
-
- @Override
- public void execute(final RouteResultsetNode node,
- final MySQLFrontConnection source, final boolean autocommit)
- throws IOException {
- Runnable runnable = new Runnable() {
- @Override
- public void run() {
- try {
- executeSQL(node, source, autocommit);
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- }
- };
-
- NetSystem.getInstance().getExecutor().execute(runnable);
- }
-
- @Override
- public boolean syncAndExcute() {
- return true;
- }
-
- @Override
- public void rollback() {
- try {
- con.rollback();
-
- this.respHandler.okResponse(OkPacket.OK, this);
- } catch (SQLException e) {
- throw new RuntimeException(e);
- }
- }
-
- public boolean isRunning() {
- return this.running;
- }
-
- @Override
- public boolean isBorrowed() {
- return this.borrowed;
- }
-
- @Override
- public void setBorrowed(boolean borrowed) {
- this.borrowed = borrowed;
-
- }
-
- @Override
- public int getTxIsolation() {
- if (con != null) {
- try {
- return con.getTransactionIsolation();
- } catch (SQLException e) {
- return 0;
- }
- } else {
- return -1;
- }
- }
-
- @Override
- public boolean isAutocommit() {
- if (con == null) {
- return true;
- } else {
- try {
- return con.getAutoCommit();
- } catch (SQLException e) {
-
- }
- }
- return true;
- }
-
- @Override
- public long getId() {
- return id;
- }
-
- @Override
- public String toString() {
- return "JDBCConnection [id=" + id + ",autocommit="
- + this.isAutocommit() + ",pool=" + pool + ", schema=" + schema
- + ", dbType=" + dbType + ", oldSchema=" + oldSchema
- + ", packetId=" + packetId + ", txIsolation=" + txIsolation
- + ", running=" + running + ", borrowed=" + borrowed + ", host="
- + host + ", port=" + port + ", con=" + con + ", respHandler="
- + respHandler + ", attachement=" + attachement
- + ", headerOutputed=" + headerOutputed
- + ", modifiedSQLExecuted=" + modifiedSQLExecuted
- + ", startTime=" + startTime + ", lastTime=" + lastTime
- + ", isSpark=" + isSpark+"]";
- }
-
-}
+package io.mycat.backend.jdbc;
+
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.math.BigDecimal;
+import java.nio.ByteBuffer;
+import java.sql.*;
+import java.util.*;
+
+import io.mycat.backend.mysql.PacketUtil;
+import io.mycat.route.Procedure;
+import io.mycat.route.ProcedureParameter;
+import io.mycat.util.*;
+import org.slf4j.Logger; import org.slf4j.LoggerFactory;
+
+import io.mycat.MycatServer;
+import io.mycat.backend.BackendConnection;
+import io.mycat.backend.mysql.nio.handler.ConnectionHeartBeatHandler;
+import io.mycat.backend.mysql.nio.handler.ResponseHandler;
+import io.mycat.config.ErrorCode;
+import io.mycat.config.Isolations;
+import io.mycat.net.NIOProcessor;
+import io.mycat.net.mysql.EOFPacket;
+import io.mycat.net.mysql.ErrorPacket;
+import io.mycat.net.mysql.FieldPacket;
+import io.mycat.net.mysql.OkPacket;
+import io.mycat.net.mysql.ResultSetHeaderPacket;
+import io.mycat.net.mysql.RowDataPacket;
+import io.mycat.route.RouteResultsetNode;
+import io.mycat.server.ServerConnection;
+import io.mycat.server.parser.ServerParse;
+
+public class JDBCConnection implements BackendConnection {
+ protected static final Logger LOGGER = LoggerFactory
+ .getLogger(JDBCConnection.class);
+ private JDBCDatasource pool;
+ private volatile String schema;
+ private volatile String dbType;
+ private volatile String oldSchema;
+ private byte packetId;
+ private int txIsolation;
+ private volatile boolean running = false;
+ private volatile boolean borrowed;
+ private long id = 0;
+ private String host;
+ private int port;
+ private Connection con;
+ private ResponseHandler respHandler;
+ private volatile Object attachement;
+
+ boolean headerOutputed = false;
+ private volatile boolean modifiedSQLExecuted;
+ private final long startTime;
+ private long lastTime;
+ private boolean isSpark = false;
+
+ private NIOProcessor processor;
+
+
+
+ public NIOProcessor getProcessor() {
+ return processor;
+ }
+
+ public void setProcessor(NIOProcessor processor) {
+ this.processor = processor;
+ }
+
+ public JDBCConnection() {
+ startTime = System.currentTimeMillis();
+ }
+
+ public Connection getCon() {
+ return con;
+ }
+
+ public void setCon(Connection con) {
+ this.con = con;
+
+ }
+
+ @Override
+ public void close(String reason) {
+ try {
+ con.close();
+ if(processor!=null){
+ processor.removeConnection(this);
+ }
+
+ } catch (SQLException e) {
+ }
+
+ }
+
+ public void setId(long id) {
+ this.id = id;
+ }
+
+ public JDBCDatasource getPool() {
+ return pool;
+ }
+
+ public void setPool(JDBCDatasource pool) {
+ this.pool = pool;
+ }
+
+ public void setHost(String host) {
+ this.host = host;
+ }
+
+ public void setPort(int port) {
+ this.port = port;
+ }
+
+ @Override
+ public boolean isClosed() {
+ try {
+ return con == null || con.isClosed();
+ } catch (SQLException e) {
+ return true;
+ }
+ }
+
+ @Override
+ public void idleCheck() {
+ if(TimeUtil.currentTimeMillis() > lastTime + pool.getConfig().getIdleTimeout()){
+ close(" idle check");
+ }
+ }
+
+ @Override
+ public long getStartupTime() {
+ return startTime;
+ }
+
+ @Override
+ public String getHost() {
+ return this.host;
+ }
+
+ @Override
+ public int getPort() {
+ return this.port;
+ }
+
+ @Override
+ public int getLocalPort() {
+ return 0;
+ }
+
+ @Override
+ public long getNetInBytes() {
+
+ return 0;
+ }
+
+ @Override
+ public long getNetOutBytes() {
+ return 0;
+ }
+
+ @Override
+ public boolean isModifiedSQLExecuted() {
+ return modifiedSQLExecuted;
+ }
+
+ @Override
+ public boolean isFromSlaveDB() {
+ return false;
+ }
+
+ public String getDbType() {
+ return this.dbType;
+ }
+
+ public void setDbType(String newDbType) {
+ this.dbType = newDbType.toUpperCase();
+ this.isSpark = dbType.equals("SPARK");
+
+ }
+
+ @Override
+ public String getSchema() {
+ return this.schema;
+ }
+
+ @Override
+ public void setSchema(String newSchema) {
+ this.oldSchema = this.schema;
+ this.schema = newSchema;
+
+ }
+
+ @Override
+ public long getLastTime() {
+
+ return lastTime;
+ }
+
+ @Override
+ public boolean isClosedOrQuit() {
+ return this.isClosed();
+ }
+
+ @Override
+ public void setAttachment(Object attachment) {
+ this.attachement = attachment;
+
+ }
+
+ @Override
+ public void quit() {
+ this.close("client quit");
+
+ }
+
+ @Override
+ public void setLastTime(long currentTimeMillis) {
+ this.lastTime = currentTimeMillis;
+
+ }
+
+ @Override
+ public void release() {
+ modifiedSQLExecuted = false;
+ setResponseHandler(null);
+ pool.releaseChannel(this);
+ }
+
+ public void setRunning(boolean running) {
+ this.running = running;
+
+ }
+
+ @Override
+ public boolean setResponseHandler(ResponseHandler commandHandler) {
+ respHandler = commandHandler;
+ return false;
+ }
+
+ @Override
+ public void commit() {
+ try {
+ con.commit();
+
+ this.respHandler.okResponse(OkPacket.OK, this);
+ } catch (SQLException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ private int convertNativeIsolationToJDBC(int nativeIsolation)
+ {
+ if(nativeIsolation== Isolations.REPEATED_READ)
+ {
+ return Connection.TRANSACTION_REPEATABLE_READ;
+ }else
+ if(nativeIsolation== Isolations.SERIALIZABLE)
+ {
+ return Connection.TRANSACTION_SERIALIZABLE;
+ } else
+ {
+ return nativeIsolation;
+ }
+ }
+
+
+
+ private void syncIsolation(int nativeIsolation)
+ {
+ int jdbcIsolation=convertNativeIsolationToJDBC(nativeIsolation);
+ int srcJdbcIsolation= getTxIsolation();
+ if (jdbcIsolation == srcJdbcIsolation || "oracle".equalsIgnoreCase(getDbType())
+ && jdbcIsolation != Connection.TRANSACTION_READ_COMMITTED
+ && jdbcIsolation != Connection.TRANSACTION_SERIALIZABLE) {
+ return;
+ }
+ try
+ {
+ con.setTransactionIsolation(jdbcIsolation);
+ } catch (SQLException e)
+ {
+ LOGGER.warn("set txisolation error:",e);
+ }
+ }
+ private void executeSQL(RouteResultsetNode rrn, ServerConnection sc,
+ boolean autocommit) throws IOException {
+ String orgin = rrn.getStatement();
+ // String sql = rrn.getStatement().toLowerCase();
+ // LOGGER.info("JDBC SQL:"+orgin+"|"+sc.toString());
+ if (!modifiedSQLExecuted && rrn.isModifySQL()) {
+ modifiedSQLExecuted = true;
+ }
+
+ try {
+ syncIsolation(sc.getTxIsolation()) ;
+ if (!this.schema.equals(this.oldSchema)) {
+ con.setCatalog(schema);
+ this.oldSchema = schema;
+ }
+ if (!this.isSpark) {
+ con.setAutoCommit(autocommit);
+ }
+ int sqlType = rrn.getSqlType();
+ if(rrn.isCallStatement()&&"oracle".equalsIgnoreCase(getDbType()))
+ {
+ //存储过程暂时只支持oracle
+ ouputCallStatement(rrn,sc,orgin);
+ } else
+ if (sqlType == ServerParse.SELECT || sqlType == ServerParse.SHOW) {
+ if ((sqlType == ServerParse.SHOW) && (!dbType.equals("MYSQL"))) {
+ // showCMD(sc, orgin);
+ //ShowVariables.execute(sc, orgin);
+ ShowVariables.execute(sc, orgin,this);
+ } else if ("SELECT CONNECTION_ID()".equalsIgnoreCase(orgin)) {
+ //ShowVariables.justReturnValue(sc,String.valueOf(sc.getId()));
+ ShowVariables.justReturnValue(sc,String.valueOf(sc.getId()),this);
+ } else {
+ ouputResultSet(sc, orgin);
+ }
+ } else {
+ executeddl(sc, orgin);
+ }
+
+ } catch (SQLException e) {
+
+ String msg = e.getMessage();
+ ErrorPacket error = new ErrorPacket();
+ error.packetId = ++packetId;
+ error.errno = e.getErrorCode();
+ error.message = msg.getBytes();
+ this.respHandler.errorResponse(error.writeToBytes(sc), this);
+ }
+ catch (Exception e) {
+ String msg = e.getMessage();
+ ErrorPacket error = new ErrorPacket();
+ error.packetId = ++packetId;
+ error.errno = ErrorCode.ER_UNKNOWN_ERROR;
+ error.message = ((msg == null) ? e.toString().getBytes() : msg.getBytes());
+ String err = null;
+ if(error.message!=null){
+ err = new String(error.message);
+ }
+ LOGGER.error("sql execute error, "+ err , e);
+ this.respHandler.errorResponse(error.writeToBytes(sc), this);
+ }
+ finally {
+ this.running = false;
+ }
+
+ }
+
+ private FieldPacket getNewFieldPacket(String charset, String fieldName) {
+ FieldPacket fieldPacket = new FieldPacket();
+ fieldPacket.orgName = StringUtil.encode(fieldName, charset);
+ fieldPacket.name = StringUtil.encode(fieldName, charset);
+ fieldPacket.length = 20;
+ fieldPacket.flags = 0;
+ fieldPacket.decimals = 0;
+ int javaType = 12;
+ fieldPacket.type = (byte) (MysqlDefs.javaTypeMysql(javaType) & 0xff);
+ return fieldPacket;
+ }
+
+ private void executeddl(ServerConnection sc, String sql)
+ throws SQLException {
+ Statement stmt = null;
+ try {
+ stmt = con.createStatement();
+ int count = stmt.executeUpdate(sql);
+ OkPacket okPck = new OkPacket();
+ okPck.affectedRows = count;
+ okPck.insertId = 0;
+ okPck.packetId = ++packetId;
+ okPck.message = " OK!".getBytes();
+ this.respHandler.okResponse(okPck.writeToBytes(sc), this);
+ } finally {
+ if (stmt != null) {
+ try {
+ stmt.close();
+ } catch (SQLException e) {
+
+ }
+ }
+ }
+ }
+
+
+ private static int oracleCURSORTypeValue=-10;
+ static
+ {
+ Object cursor = ObjectUtil.getStaticFieldValue("oracle.jdbc.OracleTypes", "CURSOR");
+ if(cursor!=null) {
+ oracleCURSORTypeValue = (int) cursor;
+ }
+ }
+ private void ouputCallStatement(RouteResultsetNode rrn,ServerConnection sc, String sql)
+ throws SQLException {
+
+ CallableStatement stmt = null;
+ ResultSet rs = null;
+ try {
+ Procedure procedure = rrn.getProcedure();
+ Collection paramters= procedure.getParamterMap().values();
+ String callSql = procedure.toPreCallSql(null);
+ stmt = con.prepareCall(callSql);
+
+ for (ProcedureParameter paramter : paramters)
+ {
+ if((ProcedureParameter.IN.equalsIgnoreCase(paramter.getParameterType())
+ ||ProcedureParameter.INOUT.equalsIgnoreCase(paramter.getParameterType())))
+ {
+ Object value= paramter.getValue()!=null ?paramter.getValue():paramter.getName();
+ stmt.setObject(paramter.getIndex(),value);
+ }
+
+ if(ProcedureParameter.OUT.equalsIgnoreCase(paramter.getParameterType())
+ ||ProcedureParameter.INOUT.equalsIgnoreCase(paramter.getParameterType()) )
+ {
+ int jdbcType ="oracle".equalsIgnoreCase(getDbType())&& procedure.getListFields().contains(paramter.getName())?oracleCURSORTypeValue: paramter.getJdbcType();
+ stmt.registerOutParameter(paramter.getIndex(), jdbcType);
+ }
+ }
+
+ boolean hadResults= stmt.execute();
+
+ ByteBuffer byteBuf = sc.allocate();
+ if(procedure.getSelectColumns().size()>0)
+ {
+ List fieldPks = new LinkedList();
+ for (ProcedureParameter paramter : paramters)
+ {
+ if (!procedure.getListFields().contains(paramter.getName())&&(ProcedureParameter.OUT.equalsIgnoreCase(paramter.getParameterType())
+ || ProcedureParameter.INOUT.equalsIgnoreCase(paramter.getParameterType())) )
+ {
+ FieldPacket packet = PacketUtil.getField(paramter.getName(), MysqlDefs.javaTypeMysql(paramter.getJdbcType()));
+ fieldPks.add(packet);
+ }
+ }
+ int colunmCount = fieldPks.size();
+
+ ResultSetHeaderPacket headerPkg = new ResultSetHeaderPacket();
+ headerPkg.fieldCount = fieldPks.size();
+ headerPkg.packetId = ++packetId;
+
+ byteBuf = headerPkg.write(byteBuf, sc, true);
+ byteBuf.flip();
+ byte[] header = new byte[byteBuf.limit()];
+ byteBuf.get(header);
+ byteBuf.clear();
+
+
+ List fields = new ArrayList(fieldPks.size());
+ Iterator itor = fieldPks.iterator();
+ while (itor.hasNext()) {
+ FieldPacket curField = itor.next();
+ curField.packetId = ++packetId;
+ byteBuf = curField.write(byteBuf, sc, false);
+ byteBuf.flip();
+ byte[] field = new byte[byteBuf.limit()];
+ byteBuf.get(field);
+ byteBuf.clear();
+ fields.add(field);
+ itor.remove();
+ }
+ EOFPacket eofPckg = new EOFPacket();
+ eofPckg.packetId = ++packetId;
+ byteBuf = eofPckg.write(byteBuf, sc, false);
+ byteBuf.flip();
+ byte[] eof = new byte[byteBuf.limit()];
+ byteBuf.get(eof);
+ byteBuf.clear();
+ this.respHandler.fieldEofResponse(header, fields, eof, this);
+ RowDataPacket curRow = new RowDataPacket(colunmCount);
+ for (String name : procedure.getSelectColumns())
+ {
+ ProcedureParameter procedureParameter= procedure.getParamterMap().get(name);
+ curRow.add(StringUtil.encode(String.valueOf(stmt.getObject(procedureParameter.getIndex())),
+ sc.getCharset()));
+ }
+
+ curRow.packetId = ++packetId;
+ byteBuf = curRow.write(byteBuf, sc, false);
+ byteBuf.flip();
+ byte[] row = new byte[byteBuf.limit()];
+ byteBuf.get(row);
+ byteBuf.clear();
+ this.respHandler.rowResponse(row, this);
+
+ eofPckg = new EOFPacket();
+ eofPckg.packetId = ++packetId;
+ if(procedure.isResultList())
+ {
+ eofPckg.status = 42;
+ }
+ byteBuf = eofPckg.write(byteBuf, sc, false);
+ byteBuf.flip();
+ eof = new byte[byteBuf.limit()];
+ byteBuf.get(eof);
+ byteBuf.clear();
+ this.respHandler.rowEofResponse(eof, this);
+ }
+
+
+ if(procedure.isResultList())
+ {
+ List fieldPks = new LinkedList();
+ int listSize=procedure.getListFields().size();
+ for (ProcedureParameter paramter : paramters)
+ {
+ if (procedure.getListFields().contains(paramter.getName())&&(ProcedureParameter.OUT.equalsIgnoreCase(paramter.getParameterType())
+ || ProcedureParameter.INOUT.equalsIgnoreCase(paramter.getParameterType())) )
+ {
+ listSize--;
+
+ Object object = stmt.getObject(paramter.getIndex());
+ rs= (ResultSet) object;
+ if(rs==null) {
+ continue;
+ }
+ ResultSetUtil.resultSetToFieldPacket(sc.getCharset(), fieldPks, rs,
+ this.isSpark);
+
+ int colunmCount = fieldPks.size();
+ ResultSetHeaderPacket headerPkg = new ResultSetHeaderPacket();
+ headerPkg.fieldCount = fieldPks.size();
+ headerPkg.packetId = ++packetId;
+
+ byteBuf = headerPkg.write(byteBuf, sc, true);
+ byteBuf.flip();
+ byte[] header = new byte[byteBuf.limit()];
+ byteBuf.get(header);
+ byteBuf.clear();
+
+
+ List fields = new ArrayList(fieldPks.size());
+ Iterator itor = fieldPks.iterator();
+ while (itor.hasNext()) {
+ FieldPacket curField = itor.next();
+ curField.packetId = ++packetId;
+ byteBuf = curField.write(byteBuf, sc, false);
+ byteBuf.flip();
+ byte[] field = new byte[byteBuf.limit()];
+ byteBuf.get(field);
+ byteBuf.clear();
+ fields.add(field);
+ itor.remove();
+ }
+ EOFPacket eofPckg = new EOFPacket();
+ eofPckg.packetId = ++packetId;
+ byteBuf = eofPckg.write(byteBuf, sc, false);
+ byteBuf.flip();
+ byte[] eof = new byte[byteBuf.limit()];
+ byteBuf.get(eof);
+ byteBuf.clear();
+ this.respHandler.fieldEofResponse(header, fields, eof, this);
+
+ // output row
+ while (rs.next()) {
+ RowDataPacket curRow = new RowDataPacket(colunmCount);
+ for (int i = 0; i < colunmCount; i++) {
+ int j = i + 1;
+ curRow.add(StringUtil.encode(rs.getString(j),
+ sc.getCharset()));
+ }
+ curRow.packetId = ++packetId;
+ byteBuf = curRow.write(byteBuf, sc, false);
+ byteBuf.flip();
+ byte[] row = new byte[byteBuf.limit()];
+ byteBuf.get(row);
+ byteBuf.clear();
+ this.respHandler.rowResponse(row, this);
+ }
+ eofPckg = new EOFPacket();
+ eofPckg.packetId = ++packetId;
+ if(listSize!=0)
+ {
+ eofPckg.status = 42;
+ }
+ byteBuf = eofPckg.write(byteBuf, sc, false);
+ byteBuf.flip();
+ eof = new byte[byteBuf.limit()];
+ byteBuf.get(eof);
+ byteBuf.clear();
+ this.respHandler.rowEofResponse(eof, this);
+ }
+ }
+
+ }
+
+
+
+ if(!procedure.isResultSimpleValue())
+ {
+ byte[] OK = new byte[] { 7, 0, 0, 1, 0, 0, 0, 2, 0, 0,
+ 0 };
+ OK[3]=++packetId;
+ this.respHandler.okResponse(OK,this);
+ }
+ sc.recycle(byteBuf);
+ } finally {
+ if (rs != null) {
+ try {
+ rs.close();
+ } catch (SQLException e) {
+
+ }
+ }
+ if (stmt != null) {
+ try {
+ stmt.close();
+ } catch (SQLException e) {
+
+ }
+ }
+ }
+ }
+
+
+ private void ouputResultSet(ServerConnection sc, String sql)
+ throws SQLException {
+ ResultSet rs = null;
+ Statement stmt = null;
+
+ try {
+ stmt = con.createStatement();
+ rs = stmt.executeQuery(sql);
+
+ List fieldPks = new LinkedList();
+ ResultSetUtil.resultSetToFieldPacket(sc.getCharset(), fieldPks, rs,
+ this.isSpark);
+ int colunmCount = fieldPks.size();
+ ByteBuffer byteBuf = sc.allocate();
+ ResultSetHeaderPacket headerPkg = new ResultSetHeaderPacket();
+ headerPkg.fieldCount = fieldPks.size();
+ headerPkg.packetId = ++packetId;
+
+ byteBuf = headerPkg.write(byteBuf, sc, true);
+ byteBuf.flip();
+ byte[] header = new byte[byteBuf.limit()];
+ byteBuf.get(header);
+ byteBuf.clear();
+ List fields = new ArrayList(fieldPks.size());
+ Iterator itor = fieldPks.iterator();
+ while (itor.hasNext()) {
+ FieldPacket curField = itor.next();
+ curField.packetId = ++packetId;
+ byteBuf = curField.write(byteBuf, sc, false);
+ byteBuf.flip();
+ byte[] field = new byte[byteBuf.limit()];
+ byteBuf.get(field);
+ byteBuf.clear();
+ fields.add(field);
+ }
+ EOFPacket eofPckg = new EOFPacket();
+ eofPckg.packetId = ++packetId;
+ byteBuf = eofPckg.write(byteBuf, sc, false);
+ byteBuf.flip();
+ byte[] eof = new byte[byteBuf.limit()];
+ byteBuf.get(eof);
+ byteBuf.clear();
+ this.respHandler.fieldEofResponse(header, fields, eof, this);
+
+ // output row
+ while (rs.next()) {
+ RowDataPacket curRow = new RowDataPacket(colunmCount);
+ for (int i = 0; i < colunmCount; i++) {
+ int j = i + 1;
+ if(MysqlDefs.isBianry((byte) fieldPks.get(i).type)) {
+ curRow.add(rs.getBytes(j));
+ } else if(fieldPks.get(i).type == MysqlDefs.FIELD_TYPE_DECIMAL ||
+ fieldPks.get(i).type == (MysqlDefs.FIELD_TYPE_NEW_DECIMAL - 256)) { // field type is unsigned byte
+ // ensure that do not use scientific notation format
+ BigDecimal val = rs.getBigDecimal(j);
+ curRow.add(StringUtil.encode(val != null ? val.toPlainString() : null,
+ sc.getCharset()));
+ } else {
+ curRow.add(StringUtil.encode(rs.getString(j),
+ sc.getCharset()));
+ }
+
+ }
+ curRow.packetId = ++packetId;
+ byteBuf = curRow.write(byteBuf, sc, false);
+ byteBuf.flip();
+ byte[] row = new byte[byteBuf.limit()];
+ byteBuf.get(row);
+ byteBuf.clear();
+ this.respHandler.rowResponse(row, this);
+ }
+
+ fieldPks.clear();
+
+ // end row
+ eofPckg = new EOFPacket();
+ eofPckg.packetId = ++packetId;
+ byteBuf = eofPckg.write(byteBuf, sc, false);
+ byteBuf.flip();
+ eof = new byte[byteBuf.limit()];
+ byteBuf.get(eof);
+ sc.recycle(byteBuf);
+ this.respHandler.rowEofResponse(eof, this);
+ } finally {
+ if (rs != null) {
+ try {
+ rs.close();
+ } catch (SQLException e) {
+
+ }
+ }
+ if (stmt != null) {
+ try {
+ stmt.close();
+ } catch (SQLException e) {
+
+ }
+ }
+ }
+ }
+
+ @Override
+ public void query(final String sql) throws UnsupportedEncodingException {
+ if(respHandler instanceof ConnectionHeartBeatHandler)
+ {
+ justForHeartbeat(sql);
+ } else
+ {
+ throw new UnsupportedEncodingException("unsupported yet ");
+ }
+ }
+ private void justForHeartbeat(String sql)
+ {
+
+ Statement stmt = null;
+
+ try {
+ stmt = con.createStatement();
+ stmt.execute(sql);
+ if(!isAutocommit()){ //如果在写库上,如果是事务方式的连接,需要进行手动commit
+ con.commit();
+ }
+ this.respHandler.okResponse(OkPacket.OK, this);
+
+ }
+ catch (Exception e)
+ {
+ String msg = e.getMessage();
+ ErrorPacket error = new ErrorPacket();
+ error.packetId = ++packetId;
+ error.errno = ErrorCode.ER_UNKNOWN_ERROR;
+ error.message = msg.getBytes();
+ this.respHandler.errorResponse(error.writeToBytes(), this);
+ }
+ finally {
+ if (stmt != null) {
+ try {
+ stmt.close();
+ } catch (SQLException e) {
+
+ }
+ }
+ }
+ }
+ @Override
+ public Object getAttachment() {
+ return this.attachement;
+ }
+
+ @Override
+ public String getCharset() {
+ return null;
+ }
+
+ @Override
+ public void execute(final RouteResultsetNode node,
+ final ServerConnection source, final boolean autocommit)
+ throws IOException {
+ Runnable runnable = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ executeSQL(node, source, autocommit);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ };
+
+ MycatServer.getInstance().getBusinessExecutor().execute(runnable);
+ }
+
+ @Override
+ public void recordSql(String host, String schema, String statement) {
+
+ }
+
+ @Override
+ public boolean syncAndExcute() {
+ return true;
+ }
+
+ @Override
+ public void rollback() {
+ try {
+ con.rollback();
+
+ this.respHandler.okResponse(OkPacket.OK, this);
+ } catch (SQLException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ public boolean isRunning() {
+ return this.running;
+ }
+
+ @Override
+ public boolean isBorrowed() {
+ return this.borrowed;
+ }
+
+ @Override
+ public void setBorrowed(boolean borrowed) {
+ this.borrowed = borrowed;
+
+ }
+
+ @Override
+ public int getTxIsolation() {
+ if (con != null) {
+ try {
+ return con.getTransactionIsolation();
+ } catch (SQLException e) {
+ return 0;
+ }
+ } else {
+ return -1;
+ }
+ }
+
+ @Override
+ public boolean isAutocommit() {
+ if (con == null) {
+ return true;
+ } else {
+ try {
+ return con.getAutoCommit();
+ } catch (SQLException e) {
+
+ }
+ }
+ return true;
+ }
+
+ @Override
+ public long getId() {
+ return id;
+ }
+
+ @Override
+ public String toString() {
+ return "JDBCConnection [id=" + id +",autocommit="+this.isAutocommit()+",pool=" + pool + ", schema=" + schema + ", dbType=" + dbType + ", oldSchema="
+ + oldSchema + ", packetId=" + packetId + ", txIsolation=" + txIsolation + ", running=" + running
+ + ", borrowed=" + borrowed + ", host=" + host + ", port=" + port + ", con=" + con
+ + ", respHandler=" + respHandler + ", attachement=" + attachement + ", headerOutputed="
+ + headerOutputed + ", modifiedSQLExecuted=" + modifiedSQLExecuted + ", startTime=" + startTime
+ + ", lastTime=" + lastTime + ", isSpark=" + isSpark + ", processor=" + processor + "]";
+ }
+
+ @Override
+ public void discardClose(String reason) {
+ // TODO Auto-generated method stub
+
+ }
+
+
+
+}
diff --git a/src/main/java/io/mycat/backend/jdbc/JDBCDatasource.java b/src/main/java/io/mycat/backend/jdbc/JDBCDatasource.java
index 4a8ccd91b..597c50ec3 100644
--- a/src/main/java/io/mycat/backend/jdbc/JDBCDatasource.java
+++ b/src/main/java/io/mycat/backend/jdbc/JDBCDatasource.java
@@ -1,54 +1,48 @@
package io.mycat.backend.jdbc;
-import io.mycat.backend.PhysicalDatasource;
-import io.mycat.backend.heartbeat.DBHeartbeat;
-import io.mycat.net.ConnectIdGenerator;
-import io.mycat.server.config.loader.LocalLoader;
-import io.mycat.server.config.node.DBHostConfig;
-import io.mycat.server.config.node.DataHostConfig;
-import io.mycat.server.config.node.JdbcDriver;
-import io.mycat.server.executors.ResponseHandler;
-
import java.io.IOException;
import java.sql.Connection;
-import java.sql.Driver;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.sql.Statement;
-import java.util.Enumeration;
-import java.util.Map;
+import java.util.List;
-import org.apache.commons.lang.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import com.google.common.collect.Lists;
+
+import io.mycat.MycatServer;
+import io.mycat.backend.datasource.PhysicalDatasource;
+import io.mycat.backend.heartbeat.DBHeartbeat;
+import io.mycat.backend.mysql.nio.handler.ResponseHandler;
+import io.mycat.config.model.DBHostConfig;
+import io.mycat.config.model.DataHostConfig;
+import io.mycat.net.NIOConnector;
+import io.mycat.net.NIOProcessor;
public class JDBCDatasource extends PhysicalDatasource {
- public static final Logger logger = LoggerFactory.getLogger(JDBCDatasource.class);
- private static Map jdbcDriverConfig = null;
- static { // 最多也就3,4个数据库,一次性加载驱动类
- jdbcDriverConfig = LocalLoader.loadJdbcDriverConfig();
- if(jdbcDriverConfig != null && jdbcDriverConfig.size() > 0){
- for(String key : jdbcDriverConfig.keySet()){
- JdbcDriver driver = jdbcDriverConfig.get(key);
- if(driver != null && StringUtils.isNotBlank(driver.getClassName())){
- try {
- Class.forName(driver.getClassName());
- } catch (ClassNotFoundException e) {
- logger.error("Class.forName load jdbcDriver for "+key+" error: " + e.getMessage());
- }
- }else{
- logger.error(" driver for " + key + " is not exist or className has no value,"
- + " please check jdbcDriver-config element in mycat.xml.");
- }
+ static {
+ // 加载可能的驱动
+ List drivers = Lists.newArrayList(
+ "com.mysql.jdbc.Driver",
+ "io.mycat.backend.jdbc.mongodb.MongoDriver",
+ "io.mycat.backend.jdbc.sequoiadb.SequoiaDriver",
+ "oracle.jdbc.OracleDriver",
+ "com.microsoft.sqlserver.jdbc.SQLServerDriver",
+ "net.sourceforge.jtds.jdbc.Driver",
+ "org.apache.hive.jdbc.HiveDriver",
+ "com.ibm.db2.jcc.DB2Driver",
+ "org.postgresql.Driver");
+
+ for (String driver : drivers) {
+ try {
+ Class.forName(driver);
+ } catch (ClassNotFoundException ignored) {
}
}
}
-
- public JDBCDatasource(DBHostConfig config, DataHostConfig hostConfig,
- boolean isReadNode) {
+
+ public JDBCDatasource(DBHostConfig config, DataHostConfig hostConfig, boolean isReadNode) {
super(config, hostConfig, isReadNode);
-
}
@Override
@@ -57,69 +51,75 @@ public DBHeartbeat createHeartBeat() {
}
@Override
- public void createNewConnection(ResponseHandler handler, String schema)
- throws IOException {
+ public void createNewConnection(ResponseHandler handler,String schema) throws IOException {
DBHostConfig cfg = getConfig();
+ JDBCConnection c = new JDBCConnection();
+ c.setHost(cfg.getIp());
+ c.setPort(cfg.getPort());
+ c.setPool(this);
+ c.setSchema(schema);
+ c.setDbType(cfg.getDbType());
- JDBCConnection c = null;
+ NIOProcessor processor = (NIOProcessor) MycatServer.getInstance().nextProcessor();
+ c.setProcessor(processor);
+ c.setId(NIOConnector.ID_GENERATOR.getId()); //复用mysql的Backend的ID,需要在process中存储
+
+ processor.addBackend(c);
try {
- // TODO: 这里需要实现连继池
Connection con = getConnection();
- c = new JDBCConnection();
- c.setHost(cfg.getIp());
- c.setPort(cfg.getPort());
- c.setPool(this);
- c.setSchema(schema);
- c.setDbType(cfg.getDbType());
- c.setId(ConnectIdGenerator.getINSTNCE().getId()); // 复用mysql的Backend的ID,需要在process中存储
-
// c.setIdleTimeout(pool.getConfig().getIdleTimeout());
c.setCon(con);
// notify handler
handler.connectionAcquired(c);
-
} catch (Exception e) {
handler.connectionError(e, c);
}
+ }
+
+ @Override
+ public boolean testConnection(String schema) throws IOException {
+ boolean isConnected = false;
+
+ Connection connection = null;
+ Statement statement = null;
+ try {
+ DBHostConfig cfg = getConfig();
+ connection = DriverManager.getConnection(cfg.getUrl(), cfg.getUser(), cfg.getPassword());
+ statement = connection.createStatement();
+ if (connection != null && statement != null) {
+ isConnected = true;
+ }
+ } catch (SQLException e) {
+ e.printStackTrace();
+ } finally {
+ if (statement != null) {
+ try { statement.close(); } catch (SQLException e) {}
+ }
+
+ if (connection != null) {
+ try { connection.close(); } catch (SQLException e) {}
+ }
+ }
+ return isConnected;
}
- Connection getConnection() throws SQLException {
- DBHostConfig cfg = getConfig();
- Enumeration drivers = DriverManager.getDrivers();
- Driver d = drivers.nextElement();
- d.getClass().getName();
- Connection connection = DriverManager.getConnection(cfg.getUrl(),
- cfg.getUser(), cfg.getPassword());
- String initSql = getHostConfig().getConnectionInitSql();
- if (StringUtils.isNotBlank(initSql)) {
- try (Statement statement = connection.createStatement()){
+ Connection getConnection() throws SQLException {
+ DBHostConfig cfg = getConfig();
+ Connection connection = DriverManager.getConnection(cfg.getUrl(), cfg.getUser(), cfg.getPassword());
+ String initSql=getHostConfig().getConnectionInitSql();
+ if (initSql != null && !"".equals(initSql)) {
+ Statement statement = null;
+ try {
+ statement = connection.createStatement();
statement.execute(initSql);
- } catch(SQLException e) {
- logger.warn(" getConnection error: " + e.getMessage());
+ } finally {
+ if (statement != null) {
+ statement.close();
+ }
}
}
return connection;
- }
-
- /**
- * 根据 dbType 获取 JdbcDriver
- * @param dbType mysql
- * @return JdbcDriver: {'mysql':'com.mysql.jdbc.Driver'}
- */
- public static JdbcDriver getJdbcDriverBydbType(String dbType){
- if(StringUtils.isNotBlank(dbType)){
- return jdbcDriverConfig.get(dbType.toLowerCase()); // 获取对应 dbType 的 JdbcDriver
- }
- return null;
- }
-
- public static Map getJdbcDriverConfig() {
- return jdbcDriverConfig;
- }
-
- public static void setJdbcDriverConfig(Map jdbcDriverConfig) {
- JDBCDatasource.jdbcDriverConfig = jdbcDriverConfig;
- }
-
+ }
+
}
diff --git a/src/main/java/io/mycat/backend/jdbc/JDBCHeartbeat.java b/src/main/java/io/mycat/backend/jdbc/JDBCHeartbeat.java
index 4194c983f..85b2b2524 100644
--- a/src/main/java/io/mycat/backend/jdbc/JDBCHeartbeat.java
+++ b/src/main/java/io/mycat/backend/jdbc/JDBCHeartbeat.java
@@ -1,33 +1,33 @@
package io.mycat.backend.jdbc;
-import io.mycat.backend.HeartbeatRecorder;
-import io.mycat.backend.heartbeat.DBHeartbeat;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
import java.sql.Connection;
+import java.sql.SQLException;
import java.sql.Statement;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.concurrent.locks.ReentrantLock;
+import org.slf4j.Logger; import org.slf4j.LoggerFactory;
+
+import io.mycat.backend.heartbeat.DBHeartbeat;
+import io.mycat.statistic.HeartbeatRecorder;
+
public class JDBCHeartbeat extends DBHeartbeat{
private final ReentrantLock lock;
private final JDBCDatasource source;
private final boolean heartbeatnull;
private Long lastSendTime = System.currentTimeMillis();
private Long lastReciveTime = System.currentTimeMillis();
-
-
- private static final Logger logger = LoggerFactory
- .getLogger(JDBCHeartbeat.class);
+
+
+ private Logger logger = LoggerFactory.getLogger(this.getClass());
public JDBCHeartbeat(JDBCDatasource source)
{
this.source = source;
lock = new ReentrantLock(false);
this.status = INIT_STATUS;
- this.heartbeatSQL = source.getHostConfig().getHeartbeatSQL().trim();
+ this.heartbeatSQL = source.getHostConfig().getHearbeatSQL().trim();
this.heartbeatnull= heartbeatSQL.length()==0;
}
@@ -88,8 +88,9 @@ public HeartbeatRecorder getRecorder() {
public void heartbeat()
{
- if (isStop.get())
+ if (isStop.get()) {
return;
+ }
lastSendTime = System.currentTimeMillis();
lock.lock();
try
diff --git a/src/main/java/io/mycat/backend/jdbc/ShowVariables.java b/src/main/java/io/mycat/backend/jdbc/ShowVariables.java
new file mode 100644
index 000000000..77130c483
--- /dev/null
+++ b/src/main/java/io/mycat/backend/jdbc/ShowVariables.java
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software;Designed and Developed mainly by many Chinese
+ * opensource volunteers. you can redistribute it and/or modify it under the
+ * terms of the GNU General Public License version 2 only, as published by the
+ * Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Any questions about this component can be directed to it's project Web address
+ * https://code.google.com/p/opencloudb/.
+ *
+ */
+package io.mycat.backend.jdbc;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.slf4j.Logger; import org.slf4j.LoggerFactory;
+
+import io.mycat.backend.BackendConnection;
+import io.mycat.backend.mysql.PacketUtil;
+import io.mycat.config.Fields;
+import io.mycat.net.mysql.EOFPacket;
+import io.mycat.net.mysql.FieldPacket;
+import io.mycat.net.mysql.ResultSetHeaderPacket;
+import io.mycat.net.mysql.RowDataPacket;
+import io.mycat.server.NonBlockingSession;
+import io.mycat.server.ServerConnection;
+import io.mycat.util.StringUtil;
+
+/**
+ * @author mycat
+ */
+public final class ShowVariables
+{
+ private static final Logger LOGGER = LoggerFactory.getLogger(ShowVariables.class);
+ private static final int FIELD_COUNT = 2;
+ private static final ResultSetHeaderPacket header = PacketUtil.getHeader(FIELD_COUNT);
+ private static final FieldPacket[] fields = new FieldPacket[FIELD_COUNT];
+ private static final EOFPacket eof = new EOFPacket();
+ private static final Pattern pattern = Pattern.compile("(?:like|=)\\s*'([^']*(?:\\w+)+[^']*)+'",Pattern.CASE_INSENSITIVE);
+ static {
+ int i = 0;
+ byte packetId = 0;
+ header.packetId = ++packetId;
+
+ fields[i] = PacketUtil.getField("VARIABLE_NAME", Fields.FIELD_TYPE_VAR_STRING);
+ fields[i++].packetId = ++packetId;
+
+ fields[i] = PacketUtil.getField("VALUE", Fields.FIELD_TYPE_VAR_STRING);
+ fields[i++].packetId = ++packetId;
+
+ eof.packetId = ++packetId;
+ }
+ private static List parseVariable(String sql)
+ {
+ List variableList=new ArrayList<>();
+ Matcher matcher = pattern.matcher(sql);
+ while (matcher.find())
+ {
+ variableList.add(matcher.group(1));
+ }
+ return variableList;
+ }
+ public static void execute(ServerConnection c, String sql) {
+ ByteBuffer buffer = c.allocate();
+
+ // write header
+ buffer = header.write(buffer, c,true);
+
+ // write fields
+ for (FieldPacket field : fields) {
+ buffer = field.write(buffer, c,true);
+ }
+
+ // write eof
+ buffer = eof.write(buffer, c,true);
+
+ // write rows
+ byte packetId = eof.packetId;
+
+ List variableList= parseVariable(sql);
+ for (String key : variableList)
+ {
+ String value= variables.get(key) ;
+ if(value!=null)
+ {
+ RowDataPacket row = getRow(key, value, c.getCharset());
+ row.packetId = ++packetId;
+ buffer = row.write(buffer, c,true);
+ }
+ }
+
+
+
+ // write lastEof
+ EOFPacket lastEof = new EOFPacket();
+ lastEof.packetId = ++packetId;
+ buffer = lastEof.write(buffer, c,true);
+
+ // write buffer
+ c.write(buffer);
+ }
+
+ public static void justReturnValue(ServerConnection c, String value) {
+ ByteBuffer buffer = c.allocate();
+
+ // write header
+ buffer = header.write(buffer, c,true);
+
+ // write fields
+ for (FieldPacket field : fields) {
+ buffer = field.write(buffer, c,true);
+ }
+
+ // write eof
+ buffer = eof.write(buffer, c,true);
+
+ // write rows
+ byte packetId = eof.packetId;
+
+
+
+ if(value!=null)
+ {
+
+ RowDataPacket row = new RowDataPacket(1);
+ row.add(StringUtil.encode(value, c.getCharset()));
+ row.packetId = ++packetId;
+ buffer = row.write(buffer, c,true);
+ }
+
+
+
+ // write lastEof
+ EOFPacket lastEof = new EOFPacket();
+ lastEof.packetId = ++packetId;
+ buffer = lastEof.write(buffer, c,true);
+
+ // write buffer
+ c.write(buffer);
+ }
+
+ private static RowDataPacket getRow(String name, String value, String charset) {
+ RowDataPacket row = new RowDataPacket(FIELD_COUNT);
+ row.add(StringUtil.encode(name, charset));
+ row.add(StringUtil.encode(value, charset));
+ return row;
+ }
+
+ private static final Map variables = new HashMap();
+ static {
+ variables.put("character_set_client", "utf8");
+ variables.put("character_set_connection", "utf8");
+ variables.put("character_set_results", "utf8");
+ variables.put("character_set_server", "utf8");
+ variables.put("init_connect", "");
+ variables.put("interactive_timeout", "172800");
+ variables.put("lower_case_table_names", "1");
+ variables.put("max_allowed_packet", "16777216");
+ variables.put("net_buffer_length", "16384");
+ variables.put("net_write_timeout", "60");
+ variables.put("query_cache_size", "0");
+ variables.put("query_cache_type", "OFF");
+ variables.put("sql_mode", "STRICT_TRANS_TABLES");
+ variables.put("system_time_zone", "CST");
+ variables.put("time_zone", "SYSTEM");
+ variables.put("tx_isolation", "REPEATABLE-READ");
+ variables.put("wait_timeout", "172800");
+ }
+
+ public static void execute(ServerConnection sc, String orgin, BackendConnection jdbcConnection) {
+ execute(sc, orgin);
+ NonBlockingSession session = sc.getSession2();
+ session.releaseConnectionIfSafe(jdbcConnection, LOGGER.isDebugEnabled(), false);
+ }
+ public static void justReturnValue(ServerConnection sc, String orgin, BackendConnection jdbcConnection) {
+ justReturnValue(sc, orgin);
+ NonBlockingSession session = sc.getSession2();
+ session.releaseConnectionIfSafe(jdbcConnection, LOGGER.isDebugEnabled(), false);
+ }
+}
\ No newline at end of file
diff --git a/src/main/java/io/mycat/backend/jdbc/mongodb/DriverPropertyInfoHelper.java b/src/main/java/io/mycat/backend/jdbc/mongodb/DriverPropertyInfoHelper.java
index 483e0ef99..43dcb529f 100644
--- a/src/main/java/io/mycat/backend/jdbc/mongodb/DriverPropertyInfoHelper.java
+++ b/src/main/java/io/mycat/backend/jdbc/mongodb/DriverPropertyInfoHelper.java
@@ -1,70 +1,70 @@
-package io.mycat.backend.jdbc.mongodb;
-
-import java.sql.DriverPropertyInfo;
-import java.util.ArrayList;
-
-
-public class DriverPropertyInfoHelper{
-
- public static final String AUTO_CONNECT_RETRY = "autoConnectRetry";
-
- public static final String CONNECTIONS_PER_HOST = "connecionsPerHost";
-
- public static final String CONNECT_TIMEOUT = "connectTimeout";
-
- public static final String CURSOR_FINALIZER_ENABLED = "cursorFinalizerEnabled";
-
- public static final String MAX_AUTO_CONNECT_RETRY_TIME = "maxAutoConnectRetryTime";
-
- public static final String READ_PREFERENCE = "readPreference";
-
- public static final String SOCKET_TIMEOUT = "socketTimeout";
-
- public DriverPropertyInfo[] getPropertyInfo()
- {
- ArrayList propInfos = new ArrayList();
-
- addPropInfo(
- propInfos,
- AUTO_CONNECT_RETRY,
- "false",
- "If true, the driver will keep trying to connect to the same server in case that the socket "
- + "cannot be established. There is maximum amount of time to keep retrying, which is 15s by "
- + "default.", null);
-
- addPropInfo(propInfos, CONNECTIONS_PER_HOST, "10", "The maximum number of connections allowed per "
- + "host for this Mongo instance. Those connections will be kept in a pool when idle.", null);
-
- addPropInfo(propInfos, CONNECT_TIMEOUT, "10000", "The connection timeout in milliseconds. ", null);
-
- addPropInfo(propInfos, CURSOR_FINALIZER_ENABLED, "true", "Sets whether there is a a finalize "
- + "method created that cleans up instances of DBCursor that the client does not close.",
- null);
-
- addPropInfo(propInfos, MAX_AUTO_CONNECT_RETRY_TIME, "0",
- "The maximum amount of time in MS to spend retrying to open connection to the same server."
- + "Default is 0, which means to use the default 15s if autoConnectRetry is on.", null);
-
- addPropInfo(propInfos, READ_PREFERENCE, "primary",
- "represents preferred replica set members to which a query or command can be sent", new String[] {
- "primary", "primary preferred", "secondary", "secondary preferred", "nearest" });
-
- addPropInfo(propInfos, SOCKET_TIMEOUT, "0", "The socket timeout in milliseconds It is used for "
- + "I/O socket read and write operations "
- + "Socket.setSoTimeout(int) Default is 0 and means no timeout.", null);
-
- return propInfos.toArray(new DriverPropertyInfo[propInfos.size()]);
- }
-
- private void addPropInfo(final ArrayList propInfos, final String propName,
- final String defaultVal, final String description, final String[] choices)
- {
- DriverPropertyInfo newProp = new DriverPropertyInfo(propName, defaultVal);
- newProp.description = description;
- if (choices != null)
- {
- newProp.choices = choices;
- }
- propInfos.add(newProp);
- }
+package io.mycat.backend.jdbc.mongodb;
+
+import java.sql.DriverPropertyInfo;
+import java.util.ArrayList;
+
+
+public class DriverPropertyInfoHelper{
+
+ public static final String AUTO_CONNECT_RETRY = "autoConnectRetry";
+
+ public static final String CONNECTIONS_PER_HOST = "connecionsPerHost";
+
+ public static final String CONNECT_TIMEOUT = "connectTimeout";
+
+ public static final String CURSOR_FINALIZER_ENABLED = "cursorFinalizerEnabled";
+
+ public static final String MAX_AUTO_CONNECT_RETRY_TIME = "maxAutoConnectRetryTime";
+
+ public static final String READ_PREFERENCE = "readPreference";
+
+ public static final String SOCKET_TIMEOUT = "socketTimeout";
+
+ public DriverPropertyInfo[] getPropertyInfo()
+ {
+ ArrayList propInfos = new ArrayList();
+
+ addPropInfo(
+ propInfos,
+ AUTO_CONNECT_RETRY,
+ "false",
+ "If true, the driver will keep trying to connect to the same server in case that the socket "
+ + "cannot be established. There is maximum amount of time to keep retrying, which is 15s by "
+ + "default.", null);
+
+ addPropInfo(propInfos, CONNECTIONS_PER_HOST, "10", "The maximum number of connections allowed per "
+ + "host for this Mongo instance. Those connections will be kept in a pool when idle.", null);
+
+ addPropInfo(propInfos, CONNECT_TIMEOUT, "10000", "The connection timeout in milliseconds. ", null);
+
+ addPropInfo(propInfos, CURSOR_FINALIZER_ENABLED, "true", "Sets whether there is a a finalize "
+ + "method created that cleans up instances of DBCursor that the client does not close.",
+ null);
+
+ addPropInfo(propInfos, MAX_AUTO_CONNECT_RETRY_TIME, "0",
+ "The maximum amount of time in MS to spend retrying to open connection to the same server."
+ + "Default is 0, which means to use the default 15s if autoConnectRetry is on.", null);
+
+ addPropInfo(propInfos, READ_PREFERENCE, "primary",
+ "represents preferred replica set members to which a query or command can be sent", new String[] {
+ "primary", "primary preferred", "secondary", "secondary preferred", "nearest" });
+
+ addPropInfo(propInfos, SOCKET_TIMEOUT, "0", "The socket timeout in milliseconds It is used for "
+ + "I/O socket read and write operations "
+ + "Socket.setSoTimeout(int) Default is 0 and means no timeout.", null);
+
+ return propInfos.toArray(new DriverPropertyInfo[propInfos.size()]);
+ }
+
+ private void addPropInfo(final ArrayList propInfos, final String propName,
+ final String defaultVal, final String description, final String[] choices)
+ {
+ DriverPropertyInfo newProp = new DriverPropertyInfo(propName, defaultVal);
+ newProp.description = description;
+ if (choices != null)
+ {
+ newProp.choices = choices;
+ }
+ propInfos.add(newProp);
+ }
}
\ No newline at end of file
diff --git a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoClientPropertyHelper.java b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoClientPropertyHelper.java
new file mode 100644
index 000000000..bea00ae65
--- /dev/null
+++ b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoClientPropertyHelper.java
@@ -0,0 +1,35 @@
+package io.mycat.backend.jdbc.mongodb;
+
+
+import com.google.common.base.Joiner;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Properties;
+import java.util.Set;
+
+/**
+ * @author liuxinsi
+ * @mail akalxs@gmail.com
+ */
+public class MongoClientPropertyHelper {
+ /**
+ * 格式化pro中的属性为{@link com.mongodb.MongoClientURI}中要求的格式。
+ *
+ * @param pro 配置参数
+ * @return 格式化后的字符串
+ */
+ public static String formatProperties(Properties pro) {
+ if (pro == null || pro.isEmpty()) {
+ return null;
+ }
+
+ Set keys = pro.keySet();
+ List props = new ArrayList<>(keys.size());
+ for (Object key : keys) {
+ Object value = pro.get(key);
+ props.add(key + "=" + value.toString());
+ }
+ return Joiner.on(";").join(props);
+ }
+}
diff --git a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoConnection.java b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoConnection.java
index a5166e7b7..16590c7ef 100644
--- a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoConnection.java
+++ b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoConnection.java
@@ -48,7 +48,9 @@ public DB getDB() {
if (this._schema!=null) {
return this.mc.getDB(this._schema);
}
- else return null;
+ else {
+ return null;
+ }
}
@Override
@@ -70,10 +72,9 @@ public String nativeSQL(String sql) throws SQLException {
}
@Override
- public void setAutoCommit(boolean autoCommit) throws SQLException {
-
- if (!autoCommit)
- throw new RuntimeException("autoCommit has to be on");
+ public void setAutoCommit(boolean autoCommit) throws SQLException {
+ //if (!autoCommit)
+ // throw new RuntimeException("autoCommit has to be on");
}
@Override
diff --git a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoData.java b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoData.java
index 5ebf81e8c..86bba46bc 100644
--- a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoData.java
+++ b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoData.java
@@ -1,130 +1,130 @@
-package io.mycat.backend.jdbc.mongodb;
-
-import java.sql.Date;
-import java.sql.Time;
-import java.sql.Timestamp;
-import java.sql.Types;
-import java.util.HashMap;
-
-import com.mongodb.BasicDBList;
-import com.mongodb.DBCursor;
-import com.mongodb.DBObject;
-
-public class MongoData {
-
- private DBCursor cursor;
- private long count;
- private String table;
- private DBObject groupby;
-
- private HashMap map = new HashMap();
- private boolean type=false;
-
- public MongoData(){
- this.count=0;
- this.cursor=null;
- }
-
- public long getCount() {
- return this.count;
- }
-
-
- public void setCount(long count) {
- this.count=count;
- }
-
- public String getTable() {
- return this.table;
- }
-
- public void setTable(String table) {
- this.table=table;
- }
-
- public DBObject getGrouyBy() {
- return this.groupby;
- }
-
- public BasicDBList getGrouyBys() {
- if (this.groupby instanceof BasicDBList) {
- return (BasicDBList)this.groupby;
- }
- else {
- return null;
- }
- }
- public void setGrouyBy(DBObject gb) {
- this.groupby=gb;
- this.type=true;
- if (gb instanceof BasicDBList) {
- Object gb2=((BasicDBList)gb).get(0);
- if (gb2 instanceof DBObject) {
- for (String field :((DBObject)gb2).keySet()) {
- Object val = ((DBObject)gb2).get(field);
- setField(field,getObjectToType(val));
- }
- }
- }
- }
-
- public static int getObjectToType(Object ob){
- if (ob instanceof Integer) {
- return Types.INTEGER;
- }
- else if (ob instanceof Boolean) {
- return Types.BOOLEAN;
- }
- else if (ob instanceof Byte) {
- return Types.BIT;
- }
- else if (ob instanceof Short) {
- return Types.INTEGER;
- }
- else if (ob instanceof Float) {
- return Types.FLOAT;
- }
- else if (ob instanceof Long) {
- return Types.BIGINT;
- }
- else if (ob instanceof Double) {
- return Types.DOUBLE;
- }
- else if (ob instanceof Date) {
- return Types.DATE;
- }
- else if (ob instanceof Time) {
- return Types.TIME;
- }
- else if (ob instanceof Timestamp) {
- return Types.TIMESTAMP;
- }
- else if (ob instanceof String) {
- return Types.VARCHAR;
- }
- else {
- return Types.VARCHAR;
- }
- }
-
- public void setField(String field,int ftype) {
- map.put(field, ftype);
- }
-
- public HashMap getFields() {
- return this.map;
- }
-
- public boolean getType() {
- return this.type;
- }
-
- public DBCursor getCursor() {
- return this.cursor;
- }
-
- public DBCursor setCursor(DBCursor cursor) {
- return this.cursor=cursor;
- }
-
-}
+package io.mycat.backend.jdbc.mongodb;
+
+import java.sql.Date;
+import java.sql.Time;
+import java.sql.Timestamp;
+import java.sql.Types;
+import java.util.HashMap;
+
+import com.mongodb.DBCursor;
+import com.mongodb.DBObject;
+import com.mongodb.BasicDBList;
+
+public class MongoData {
+
+ private DBCursor cursor;
+ private long count;
+ private String table;
+ private DBObject groupby;
+
+ private HashMap map = new HashMap();
+ private boolean type=false;
+
+ public MongoData(){
+ this.count=0;
+ this.cursor=null;
+ }
+
+ public long getCount() {
+ return this.count;
+ }
+
+
+ public void setCount(long count) {
+ this.count=count;
+ }
+
+ public String getTable() {
+ return this.table;
+ }
+
+ public void setTable(String table) {
+ this.table=table;
+ }
+
+ public DBObject getGrouyBy() {
+ return this.groupby;
+ }
+
+ public BasicDBList getGrouyBys() {
+ if (this.groupby instanceof BasicDBList) {
+ return (BasicDBList)this.groupby;
+ }
+ else {
+ return null;
+ }
+ }
+ public void setGrouyBy(DBObject gb) {
+ this.groupby=gb;
+ this.type=true;
+ if (gb instanceof BasicDBList) {
+ Object gb2=((BasicDBList)gb).get(0);
+ if (gb2 instanceof DBObject) {
+ for (String field :((DBObject)gb2).keySet()) {
+ Object val = ((DBObject)gb2).get(field);
+ setField(field,getObjectToType(val));
+ }
+ }
+ }
+ }
+
+ public static int getObjectToType(Object ob){
+ if (ob instanceof Integer) {
+ return Types.INTEGER;
+ }
+ else if (ob instanceof Boolean) {
+ return Types.BOOLEAN;
+ }
+ else if (ob instanceof Byte) {
+ return Types.BIT;
+ }
+ else if (ob instanceof Short) {
+ return Types.INTEGER;
+ }
+ else if (ob instanceof Float) {
+ return Types.FLOAT;
+ }
+ else if (ob instanceof Long) {
+ return Types.BIGINT;
+ }
+ else if (ob instanceof Double) {
+ return Types.DOUBLE;
+ }
+ else if (ob instanceof Date) {
+ return Types.DATE;
+ }
+ else if (ob instanceof Time) {
+ return Types.TIME;
+ }
+ else if (ob instanceof Timestamp) {
+ return Types.TIMESTAMP;
+ }
+ else if (ob instanceof String) {
+ return Types.VARCHAR;
+ }
+ else {
+ return Types.VARCHAR;
+ }
+ }
+
+ public void setField(String field,int ftype) {
+ map.put(field, ftype);
+ }
+
+ public HashMap getFields() {
+ return this.map;
+ }
+
+ public boolean getType() {
+ return this.type;
+ }
+
+ public DBCursor getCursor() {
+ return this.cursor;
+ }
+
+ public DBCursor setCursor(DBCursor cursor) {
+ return this.cursor=cursor;
+ }
+
+}
diff --git a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoDriver.java b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoDriver.java
index 6b9db38d3..713aa2f04 100644
--- a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoDriver.java
+++ b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoDriver.java
@@ -63,10 +63,11 @@ private MongoClientURI parseURL(String url, Properties defaults) {
//删掉开头的 jdbc:
//url = url.replace(URL_JDBC, "");
-
+
+ String options = MongoClientPropertyHelper.formatProperties(defaults);
+ LOGGER.debug("the options:{}",options);
try {
- //FIXME 判断defaults中的参数,写入URL中?
- return new MongoClientURI(url);
+ return new MongoClientURI(options == null ? url : url + "?" + options);
} catch (Exception e) {
LOGGER.error("parseURLError",e);
return null;
diff --git a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoEmbeddedObjectProcessor.java b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoEmbeddedObjectProcessor.java
new file mode 100644
index 000000000..88fa0e92b
--- /dev/null
+++ b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoEmbeddedObjectProcessor.java
@@ -0,0 +1,297 @@
+package io.mycat.backend.jdbc.mongodb;
+
+import com.mongodb.BasicDBList;
+import com.mongodb.BasicDBObject;
+import org.bson.types.ObjectId;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.lang.reflect.Array;
+import java.lang.reflect.Field;
+import java.lang.reflect.ParameterizedType;
+import java.lang.reflect.Type;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * 处理从MongoDB中获取的内嵌对象(Embeedded Object|SubDocument),将MongoDB对象转换为对应的Java对象。
+ *
+ * 支持:
+ *
+ * {@link ObjectId}
+ * 基本类型
+ * 枚举
+ * 内嵌对象
+ * 内嵌数组
+ *
+ * eg.
+ * public class A{
+ * private ObjectId _id;
+ * private String name;
+ * private Integer age;
+ * private B b;
+ * private Address[] addresses;
+ * private String[] someCode;
+ * ...
+ * }
+ *
+ * 不支持:
+ *
+ * eg.
+ * public class A{
+ * private ObjectId _id;
+ * private String name;
+ * private Integer age;
+ * private B b;
+ * private List<Address> addresses;
+ * private Set<String> someCode;
+ * ...
+ * }
+ *
+ * 第一次拿不到范型,所以addresses、someCode不支持,直接返回null。B对象里的则没问题。
+ *
+ * @author liuxinsi
+ * @mail akalxs@gmail.com
+ */
+public class MongoEmbeddedObjectProcessor {
+ private static final Logger LOG = LoggerFactory.getLogger(MongoEmbeddedObjectProcessor.class);
+
+ /**
+ * 将传入的值value转换成对应的类型type返回。
+ *
+ * @param columnLabel 列名
+ * @param value 值
+ * @param type 对应的类型
+ * @return 转换后的对象
+ */
+ public static Object valueMapper(String columnLabel, Object value, Class> type) {
+ if (value == null) {
+ return null;
+ }
+
+ // mongodb _id field
+ if (type.isAssignableFrom(ObjectId.class)
+ && (value instanceof ObjectId || value instanceof String)) {
+ return new ObjectId(value.toString());
+ }
+
+ // enum
+ if (type.isEnum()) {
+ return value.toString();
+ }
+
+ // embedded collection,内嵌集合
+ if ((type.isAssignableFrom(List.class) || type.isAssignableFrom(Set.class))
+ && value instanceof BasicDBList) {
+ // TODO 拿不到范型,list没法转
+ LOG.debug("column:[{}],type:[{}]为内嵌列表,无法获取范型类,无法映射.return null.", columnLabel, type);
+ return null;
+ }
+
+ // embedded object,内嵌对象
+ if (value instanceof BasicDBObject) {
+ BasicDBObject dbObj = (BasicDBObject) value;
+ return beanMapper(dbObj, type);
+ }
+
+ // embedded array,内嵌数组
+ if (type.isArray() && value instanceof BasicDBList) {
+ BasicDBList basicDBList = (BasicDBList) value;
+ return arrayMapper(basicDBList, type);
+ }
+
+ LOG.debug("column:[{}],type:[{}] unsupported type yet.return null", columnLabel, type);
+ return null;
+ }
+
+ /**
+ * 加载clazzToMapper下所有field。
+ *
+ * @param clazzToMapper class
+ * @return filed map,k=field name,v=field
+ */
+ private static Map loadFields(Class> clazzToMapper) {
+ Map fieldMap = new HashMap<>();
+ Field[] fields = clazzToMapper.getDeclaredFields();
+ for (Field field : fields) {
+ field.setAccessible(true);
+ fieldMap.put(field.getName(), field);
+ }
+ return fieldMap;
+ }
+
+ /**
+ * 获取field字段的范型类。
+ *
+ * @param field field
+ * @return null 如果没有获取到或异常。
+ */
+ private static Class> getParameterizedClass(Field field) {
+ Type type = field.getGenericType();
+ String parameterizedType;
+ if (type instanceof ParameterizedType) {
+ ParameterizedType pt = (ParameterizedType) type;
+ if (pt.getActualTypeArguments() == null || pt.getActualTypeArguments().length == 0) {
+ return null;
+ }
+ parameterizedType = pt.getActualTypeArguments()[0].toString();
+ } else {
+ return null;
+ }
+
+ Class> clazz;
+ try {
+ clazz = Class.forName(parameterizedType);
+ } catch (ClassNotFoundException e) {
+ LOG.warn("获取field:{}的范型异常。", field.getName(), e);
+ return null;
+ }
+ return clazz;
+ }
+
+ /**
+ * 根据字段field类型创建对应的集合类。
+ * 仅支持List、Set。
+ *
+ * @param field field
+ * @param size 集合初始大小
+ * @return 对应集合的实现类
+ */
+ private static Collection createCollection(Field field, int size) {
+ Class> fieldType = field.getType();
+ Collection collection = null;
+ if (fieldType.isAssignableFrom(List.class)) {
+ collection = new ArrayList<>(size);
+ } else if (fieldType.isAssignableFrom(Set.class)) {
+ collection = new HashSet<>(size);
+ }
+ return collection;
+ }
+
+ /**
+ * 将mongodb的数据对象dbObj转换成对应类型clazzToMapper的对象。
+ * key=fieldName。
+ *
+ * @param dbObj mongodb数据对象
+ * @param clazzToMapper 目标对象类
+ * @return 转换后的对象
+ */
+ private static Object beanMapper(BasicDBObject dbObj, Class> clazzToMapper) {
+ // load all field
+ Map fieldMap = loadFields(clazzToMapper);
+
+ // 将dbObj中的数据映射到beanMap中,如数据包含BasicDBObject则递归映射为对应的bean
+ // k=dbObj中的字段名,v=dbObj中对应的值或对象
+ Map beanMap = new HashMap<>();
+ for (String s : dbObj.keySet()) {
+ Object o = dbObj.get(s);
+ // 嵌套对象
+ if (o instanceof BasicDBObject) {
+ Field field = fieldMap.get(s);
+ o = beanMapper((BasicDBObject) o, field.getType());
+
+ // 钳套对象列表
+ } else if (o instanceof BasicDBList) {
+ Field field = fieldMap.get(s);
+ // 获取对应的范型
+ Class> parameterizedClazz = getParameterizedClass(field);
+
+ BasicDBList basicDBs = (BasicDBList) o;
+
+ Collection collection = createCollection(field, basicDBs.size());
+ for (Object basicDbObj : basicDBs) {
+ // 基本类型
+ if (parameterizedClazz.isPrimitive()) {
+ collection.add(basicDbObj);
+ } else if (parameterizedClazz.getName().startsWith("java.lang")) {
+ collection.add(basicDbObj);
+ } else {
+ // 对象类型
+ collection.add(beanMapper((BasicDBObject) basicDbObj, parameterizedClazz));
+ }
+ }
+ o = collection;
+ }
+
+ beanMap.put(s, o);
+ }
+
+ // create
+ Object instance;
+ try {
+ instance = clazzToMapper.newInstance();
+ } catch (InstantiationException | IllegalAccessException e) {
+ LOG.warn("实例化:[{}]对象异常.", clazzToMapper, e);
+ return null;
+ }
+
+ // 赋值
+ Set fieldNames = fieldMap.keySet();
+ for (String fieldName : fieldNames) {
+ if (beanMap.containsKey(fieldName)) {
+ Field field = fieldMap.get(fieldName);
+ Object value = beanMap.get(fieldName);
+
+ try {
+ field.set(instance, value);
+ } catch (IllegalAccessException e) {
+ // 应该不会报
+ LOG.error("为字段:[{}]设置值异常",
+ fieldName, e);
+ }
+ }
+ }
+ return instance;
+ }
+
+ /**
+ * 将mongodb的数据对象列表basicDBList转换成对应类型arrayClass的数组。
+ * 基本类型直接转换,对象类型使用 {@link #beanMapper(BasicDBObject, Class)}。
+ *
+ * @param basicDBList mongodb数据对象列表
+ * @param arrayClass 目标数组对象类
+ * @return 转换后的数组对象
+ * @see MongoEmbeddedObjectProcessor#beanMapper(BasicDBObject, Class)
+ */
+ private static Object arrayMapper(BasicDBList basicDBList, Class> arrayClass) {
+ // 具体类
+ Class> clazzToMapper;
+ try {
+ clazzToMapper = Class.forName(arrayClass.getName()
+ .replace("[L", "")
+ .replace(";", ""));
+ } catch (ClassNotFoundException e) {
+ LOG.warn("实例化:[{}]对象异常.", arrayClass, e);
+ return null;
+ }
+
+ // 创建对应的数组
+ Object array = Array.newInstance(clazzToMapper, basicDBList.size());
+
+ // 数组赋值
+ int i = 0;
+ for (Object basicDbObj : basicDBList) {
+ Object value;
+ // 基本类型
+ if (clazzToMapper.isPrimitive()) {
+ value = basicDbObj;
+ } else if (clazzToMapper.getName().startsWith("java.lang")) {
+ value = basicDbObj;
+ } else {
+ // 对象类型
+ value = beanMapper((BasicDBObject) basicDbObj, clazzToMapper);
+ }
+
+ Array.set(array, i, value);
+ i++;
+ }
+ return array;
+ }
+}
diff --git a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoPreparedStatement.java b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoPreparedStatement.java
index 7880c14e4..6bbe17099 100644
--- a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoPreparedStatement.java
+++ b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoPreparedStatement.java
@@ -1,408 +1,409 @@
-package io.mycat.backend.jdbc.mongodb;
-
-import java.io.InputStream;
-import java.io.Reader;
-import java.math.BigDecimal;
-import java.net.URL;
-import java.sql.Array;
-import java.sql.Blob;
-import java.sql.Clob;
-import java.sql.Date;
-import java.sql.NClob;
-import java.sql.ParameterMetaData;
-import java.sql.PreparedStatement;
-import java.sql.Ref;
-import java.sql.ResultSet;
-import java.sql.ResultSetMetaData;
-import java.sql.RowId;
-import java.sql.SQLException;
-import java.sql.SQLXML;
-import java.sql.Time;
-import java.sql.Timestamp;
-import java.util.ArrayList;
-import java.util.Calendar;
-import java.util.List;
-/**
- * 功能详细描述
- * @author sohudo[http://blog.csdn.net/wind520]
- * @create 2014年12月19日 下午6:50:23
- * @version 0.0.1
- */
-public class MongoPreparedStatement extends MongoStatement implements
- PreparedStatement {
- final String _sql;
- final MongoSQLParser _mongosql;
- List _params = new ArrayList();
-
- public MongoPreparedStatement(MongoConnection conn, int type,
- int concurrency, int holdability, String sql)
- throws MongoSQLException {
- super(conn, type, concurrency, holdability);
- this._sql = sql;
- this._mongosql = new MongoSQLParser(conn.getDB(), sql);
- }
-
- @Override
- public ResultSet executeQuery() throws SQLException {
-
- return null;
- }
-
- @Override
- public int executeUpdate() throws SQLException {
-
- this._mongosql.setParams(this._params);
- return this._mongosql.executeUpdate();
- }
-
- public void setValue(int idx, Object o) {
- while (this._params.size() <= idx)
- this._params.add(null);
- this._params.set(idx, o);
- }
-
- @Override
- public void setNull(int parameterIndex, int sqlType) throws SQLException {
-
-
- }
-
- @Override
- public void setBoolean(int parameterIndex, boolean x) throws SQLException {
-
- setValue(parameterIndex, Boolean.valueOf(x));
- }
-
- @Override
- public void setByte(int parameterIndex, byte x) throws SQLException {
-
- setValue(parameterIndex, Byte.valueOf(x));
- }
-
- @Override
- public void setShort(int parameterIndex, short x) throws SQLException {
-
- setValue(parameterIndex, Short.valueOf(x));
- }
-
- @Override
- public void setInt(int parameterIndex, int x) throws SQLException {
-
- setValue(parameterIndex, Integer.valueOf(x));
- }
-
- @Override
- public void setLong(int parameterIndex, long x) throws SQLException {
-
- setValue(parameterIndex, Long.valueOf(x));
- }
-
- @Override
- public void setFloat(int parameterIndex, float x) throws SQLException {
-
- setValue(parameterIndex, Float.valueOf(x));
- }
-
- @Override
- public void setDouble(int parameterIndex, double x) throws SQLException {
-
- setValue(parameterIndex, Double.valueOf(x));
- }
-
- @Override
- public void setBigDecimal(int parameterIndex, BigDecimal x)
- throws SQLException {
-
- setValue(parameterIndex, x);
- }
-
- @Override
- public void setString(int parameterIndex, String x) throws SQLException {
-
- setValue(parameterIndex, x);
- }
-
- @Override
- public void setBytes(int parameterIndex, byte[] x) throws SQLException {
-
- setValue(parameterIndex, x);
- }
-
- @Override
- public void setDate(int parameterIndex, Date x) throws SQLException {
-
- setValue(parameterIndex, x);
- }
-
- @Override
- public void setTime(int parameterIndex, Time x) throws SQLException {
-
- setValue(parameterIndex, x);
- }
-
- @Override
- public void setTimestamp(int parameterIndex, Timestamp x)
- throws SQLException {
-
- setValue(parameterIndex, x);
- }
-
- @Override
- public void setAsciiStream(int parameterIndex, InputStream x, int length)
- throws SQLException {
-
-
- }
-
- @Override
- public void setUnicodeStream(int parameterIndex, InputStream x, int length)
- throws SQLException {
-
-
- }
-
- @Override
- public void setBinaryStream(int parameterIndex, InputStream x, int length)
- throws SQLException {
-
-
- }
-
- @Override
- public void clearParameters() throws SQLException {
-
-
- }
-
- @Override
- public void setObject(int parameterIndex, Object x, int targetSqlType)
- throws SQLException {
-
-
- }
-
- @Override
- public void setObject(int parameterIndex, Object x) throws SQLException {
-
- setValue(parameterIndex,x);
- }
-
- @Override
- public boolean execute() throws SQLException {
-
- return false;
- }
-
- @Override
- public void addBatch() throws SQLException {
-
-
- }
-
- @Override
- public void setCharacterStream(int parameterIndex, Reader reader, int length)
- throws SQLException {
-
-
- }
-
- @Override
- public void setRef(int parameterIndex, Ref x) throws SQLException {
-
-
- }
-
- @Override
- public void setBlob(int parameterIndex, Blob x) throws SQLException {
-
-
- }
-
- @Override
- public void setClob(int parameterIndex, Clob x) throws SQLException {
-
-
- }
-
- @Override
- public void setArray(int parameterIndex, Array x) throws SQLException {
-
-
- }
-
- @Override
- public ResultSetMetaData getMetaData() throws SQLException {
-
- return null;
- }
-
- @Override
- public void setDate(int parameterIndex, Date x, Calendar cal)
- throws SQLException {
-
-
- }
-
- @Override
- public void setTime(int parameterIndex, Time x, Calendar cal)
- throws SQLException {
-
-
- }
-
- @Override
- public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal)
- throws SQLException {
-
-
- }
-
- @Override
- public void setNull(int parameterIndex, int sqlType, String typeName)
- throws SQLException {
-
-
- }
-
- @Override
- public void setURL(int parameterIndex, URL x) throws SQLException {
-
-
- }
-
- @Override
- public ParameterMetaData getParameterMetaData() throws SQLException {
-
- return null;
- }
-
- @Override
- public void setRowId(int parameterIndex, RowId x) throws SQLException {
-
-
- }
-
- @Override
- public void setNString(int parameterIndex, String value)
- throws SQLException {
-
-
- }
-
- @Override
- public void setNCharacterStream(int parameterIndex, Reader value,
- long length) throws SQLException {
-
-
- }
-
- @Override
- public void setNClob(int parameterIndex, NClob value) throws SQLException {
-
-
- }
-
- @Override
- public void setClob(int parameterIndex, Reader reader, long length)
- throws SQLException {
-
-
- }
-
- @Override
- public void setBlob(int parameterIndex, InputStream inputStream, long length)
- throws SQLException {
-
-
- }
-
- @Override
- public void setNClob(int parameterIndex, Reader reader, long length)
- throws SQLException {
-
-
- }
-
- @Override
- public void setSQLXML(int parameterIndex, SQLXML xmlObject)
- throws SQLException {
-
-
- }
-
- @Override
- public void setObject(int parameterIndex, Object x, int targetSqlType,
- int scaleOrLength) throws SQLException {
-
-
- }
-
- @Override
- public void setAsciiStream(int parameterIndex, InputStream x, long length)
- throws SQLException {
-
-
- }
-
- @Override
- public void setBinaryStream(int parameterIndex, InputStream x, long length)
- throws SQLException {
-
-
- }
-
- @Override
- public void setCharacterStream(int parameterIndex, Reader reader,
- long length) throws SQLException {
-
-
- }
-
- @Override
- public void setAsciiStream(int parameterIndex, InputStream x)
- throws SQLException {
-
-
- }
-
- @Override
- public void setBinaryStream(int parameterIndex, InputStream x)
- throws SQLException {
-
-
- }
-
- @Override
- public void setCharacterStream(int parameterIndex, Reader reader)
- throws SQLException {
-
-
- }
-
- @Override
- public void setNCharacterStream(int parameterIndex, Reader value)
- throws SQLException {
-
-
- }
-
- @Override
- public void setClob(int parameterIndex, Reader reader) throws SQLException {
-
-
- }
-
- @Override
- public void setBlob(int parameterIndex, InputStream inputStream)
- throws SQLException {
-
-
- }
-
- @Override
- public void setNClob(int parameterIndex, Reader reader) throws SQLException {
-
-
- }
-
-}
+package io.mycat.backend.jdbc.mongodb;
+
+import java.io.InputStream;
+import java.io.Reader;
+import java.math.BigDecimal;
+import java.net.URL;
+import java.sql.Array;
+import java.sql.Blob;
+import java.sql.Clob;
+import java.sql.Date;
+import java.sql.NClob;
+import java.sql.ParameterMetaData;
+import java.sql.PreparedStatement;
+import java.sql.Ref;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.RowId;
+import java.sql.SQLException;
+import java.sql.SQLXML;
+import java.sql.Time;
+import java.sql.Timestamp;
+import java.util.ArrayList;
+import java.util.Calendar;
+import java.util.List;
+/**
+ * 功能详细描述
+ * @author sohudo[http://blog.csdn.net/wind520]
+ * @create 2014年12月19日 下午6:50:23
+ * @version 0.0.1
+ */
+public class MongoPreparedStatement extends MongoStatement implements
+ PreparedStatement {
+ final String _sql;
+ final MongoSQLParser _mongosql;
+ List _params = new ArrayList();
+
+ public MongoPreparedStatement(MongoConnection conn, int type,
+ int concurrency, int holdability, String sql)
+ throws MongoSQLException {
+ super(conn, type, concurrency, holdability);
+ this._sql = sql;
+ this._mongosql = new MongoSQLParser(conn.getDB(), sql);
+ }
+
+ @Override
+ public ResultSet executeQuery() throws SQLException {
+
+ return null;
+ }
+
+ @Override
+ public int executeUpdate() throws SQLException {
+
+ this._mongosql.setParams(this._params);
+ return this._mongosql.executeUpdate();
+ }
+
+ public void setValue(int idx, Object o) {
+ while (this._params.size() <= idx) {
+ this._params.add(null);
+ }
+ this._params.set(idx, o);
+ }
+
+ @Override
+ public void setNull(int parameterIndex, int sqlType) throws SQLException {
+
+
+ }
+
+ @Override
+ public void setBoolean(int parameterIndex, boolean x) throws SQLException {
+
+ setValue(parameterIndex, Boolean.valueOf(x));
+ }
+
+ @Override
+ public void setByte(int parameterIndex, byte x) throws SQLException {
+
+ setValue(parameterIndex, Byte.valueOf(x));
+ }
+
+ @Override
+ public void setShort(int parameterIndex, short x) throws SQLException {
+
+ setValue(parameterIndex, Short.valueOf(x));
+ }
+
+ @Override
+ public void setInt(int parameterIndex, int x) throws SQLException {
+
+ setValue(parameterIndex, Integer.valueOf(x));
+ }
+
+ @Override
+ public void setLong(int parameterIndex, long x) throws SQLException {
+
+ setValue(parameterIndex, Long.valueOf(x));
+ }
+
+ @Override
+ public void setFloat(int parameterIndex, float x) throws SQLException {
+
+ setValue(parameterIndex, Float.valueOf(x));
+ }
+
+ @Override
+ public void setDouble(int parameterIndex, double x) throws SQLException {
+
+ setValue(parameterIndex, Double.valueOf(x));
+ }
+
+ @Override
+ public void setBigDecimal(int parameterIndex, BigDecimal x)
+ throws SQLException {
+
+ setValue(parameterIndex, x);
+ }
+
+ @Override
+ public void setString(int parameterIndex, String x) throws SQLException {
+
+ setValue(parameterIndex, x);
+ }
+
+ @Override
+ public void setBytes(int parameterIndex, byte[] x) throws SQLException {
+
+ setValue(parameterIndex, x);
+ }
+
+ @Override
+ public void setDate(int parameterIndex, Date x) throws SQLException {
+
+ setValue(parameterIndex, x);
+ }
+
+ @Override
+ public void setTime(int parameterIndex, Time x) throws SQLException {
+
+ setValue(parameterIndex, x);
+ }
+
+ @Override
+ public void setTimestamp(int parameterIndex, Timestamp x)
+ throws SQLException {
+
+ setValue(parameterIndex, x);
+ }
+
+ @Override
+ public void setAsciiStream(int parameterIndex, InputStream x, int length)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setUnicodeStream(int parameterIndex, InputStream x, int length)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setBinaryStream(int parameterIndex, InputStream x, int length)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void clearParameters() throws SQLException {
+
+
+ }
+
+ @Override
+ public void setObject(int parameterIndex, Object x, int targetSqlType)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setObject(int parameterIndex, Object x) throws SQLException {
+
+ setValue(parameterIndex,x);
+ }
+
+ @Override
+ public boolean execute() throws SQLException {
+
+ return false;
+ }
+
+ @Override
+ public void addBatch() throws SQLException {
+
+
+ }
+
+ @Override
+ public void setCharacterStream(int parameterIndex, Reader reader, int length)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setRef(int parameterIndex, Ref x) throws SQLException {
+
+
+ }
+
+ @Override
+ public void setBlob(int parameterIndex, Blob x) throws SQLException {
+
+
+ }
+
+ @Override
+ public void setClob(int parameterIndex, Clob x) throws SQLException {
+
+
+ }
+
+ @Override
+ public void setArray(int parameterIndex, Array x) throws SQLException {
+
+
+ }
+
+ @Override
+ public ResultSetMetaData getMetaData() throws SQLException {
+
+ return null;
+ }
+
+ @Override
+ public void setDate(int parameterIndex, Date x, Calendar cal)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setTime(int parameterIndex, Time x, Calendar cal)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setNull(int parameterIndex, int sqlType, String typeName)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setURL(int parameterIndex, URL x) throws SQLException {
+
+
+ }
+
+ @Override
+ public ParameterMetaData getParameterMetaData() throws SQLException {
+
+ return null;
+ }
+
+ @Override
+ public void setRowId(int parameterIndex, RowId x) throws SQLException {
+
+
+ }
+
+ @Override
+ public void setNString(int parameterIndex, String value)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setNCharacterStream(int parameterIndex, Reader value,
+ long length) throws SQLException {
+
+
+ }
+
+ @Override
+ public void setNClob(int parameterIndex, NClob value) throws SQLException {
+
+
+ }
+
+ @Override
+ public void setClob(int parameterIndex, Reader reader, long length)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setBlob(int parameterIndex, InputStream inputStream, long length)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setNClob(int parameterIndex, Reader reader, long length)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setSQLXML(int parameterIndex, SQLXML xmlObject)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setObject(int parameterIndex, Object x, int targetSqlType,
+ int scaleOrLength) throws SQLException {
+
+
+ }
+
+ @Override
+ public void setAsciiStream(int parameterIndex, InputStream x, long length)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setBinaryStream(int parameterIndex, InputStream x, long length)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setCharacterStream(int parameterIndex, Reader reader,
+ long length) throws SQLException {
+
+
+ }
+
+ @Override
+ public void setAsciiStream(int parameterIndex, InputStream x)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setBinaryStream(int parameterIndex, InputStream x)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setCharacterStream(int parameterIndex, Reader reader)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setNCharacterStream(int parameterIndex, Reader value)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setClob(int parameterIndex, Reader reader) throws SQLException {
+
+
+ }
+
+ @Override
+ public void setBlob(int parameterIndex, InputStream inputStream)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setNClob(int parameterIndex, Reader reader) throws SQLException {
+
+
+ }
+
+}
diff --git a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoResultSet.java b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoResultSet.java
index 8013e2a59..944bfcccc 100644
--- a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoResultSet.java
+++ b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoResultSet.java
@@ -1,5 +1,9 @@
package io.mycat.backend.jdbc.mongodb;
+import com.mongodb.BasicDBList;
+import com.mongodb.DBCursor;
+import com.mongodb.DBObject;
+
import java.io.InputStream;
import java.io.Reader;
import java.math.BigDecimal;
@@ -26,10 +30,6 @@
//import java.util.HashMap;
import java.util.Map;
import java.util.Set;
-
-import com.mongodb.BasicDBList;
-import com.mongodb.DBCursor;
-import com.mongodb.DBObject;
/**
* 功能详细描述
* @author sohudo[http://blog.csdn.net/wind520]
@@ -101,7 +101,9 @@ public void SetFieldType(boolean isid) throws SQLException {
if (isid) {
fieldtype= new int[Types.VARCHAR];
}
- else fieldtype= new int[this.select.length];
+ else {
+ fieldtype = new int[this.select.length];
+ }
if (_cur!=null) {
for (int i=0;i T getObject(int columnIndex, Class type) throws SQLException {
-
- return null;
+ Object value = getObject(columnIndex);
+ return (T) MongoEmbeddedObjectProcessor.valueMapper(getField(columnIndex), value, type);
}
@Override
public T getObject(String columnLabel, Class type)
throws SQLException {
-
- return null;
+ Object value = getObject(columnLabel);
+ return (T) MongoEmbeddedObjectProcessor.valueMapper(columnLabel, value, type);
}
diff --git a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoResultSetMetaData.java b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoResultSetMetaData.java
index 52d503115..bacbecd82 100644
--- a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoResultSetMetaData.java
+++ b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoResultSetMetaData.java
@@ -1,185 +1,189 @@
-package io.mycat.backend.jdbc.mongodb;
-
-import java.sql.ResultSetMetaData;
-import java.sql.SQLException;
-import java.sql.Types;
-//import java.util.Arrays;
-/**
- * 功能详细描述
- * @author sohudo[http://blog.csdn.net/wind520]
- * @create 2014年12月19日 下午6:50:23
- * @version 0.0.1
- */
-
-public class MongoResultSetMetaData implements ResultSetMetaData {
-
- private String[] keySet ;
- private int[] keytype ;
- private String _schema;
- private String _table;
-
- /*
- public MongoResultSetMetaData(Set keySet,String schema) {
- super();
- this.keySet = new String[keySet.size()];
- this.keySet = keySet.toArray(this.keySet);
- this._schema = schema;
- }
- */
- public MongoResultSetMetaData(String[] select,int [] ftype,String schema,String table) {
- super();
- this.keySet = select;
- this.keytype=ftype;
- this._schema = schema;
- this._table =table;
- }
-
- @Override
- public T unwrap(Class iface) throws SQLException {
-
- return null;
- }
-
- @Override
- public boolean isWrapperFor(Class> iface) throws SQLException {
-
- return false;
- }
-
- @Override
- public int getColumnCount() throws SQLException {
- if (keySet==null) return 0;
- else
- return keySet.length;
- }
-
- @Override
- public boolean isAutoIncrement(int column) throws SQLException {
- // 是否为自动编号的字段
- return false;
- }
-
- @Override
- public boolean isCaseSensitive(int column) throws SQLException {
- //指示列的大小写是否有关系
- return true;
- }
-
- @Override
- public boolean isSearchable(int column) throws SQLException {
- //指示是否可以在 where 子句中使用指定的列
- return true;
- }
-
- @Override
- public boolean isCurrency(int column) throws SQLException {
- // 指示指定的列是否是一个哈希代码值
- return false;
- }
-
- @Override
- public int isNullable(int column) throws SQLException {
- // 指示指定列中的值是否可以为 null。
- return 0;
- }
-
- @Override
- public boolean isSigned(int column) throws SQLException {
- // 指示指定列中的值是否带正负号
- return false;
- }
-
- @Override
- public int getColumnDisplaySize(int column) throws SQLException {
-
- return 50;
- }
-
- @Override
- public String getColumnLabel(int column) throws SQLException {
- return keySet[column-1];
- }
-
- @Override
- public String getColumnName(int column) throws SQLException {
- return keySet[column-1];
- }
-
- @Override
- public String getSchemaName(int column) throws SQLException {
-
- return this._schema;
- }
-
- @Override
- public int getPrecision(int column) throws SQLException {
- //获取指定列的指定列宽
- return 0;
- }
-
- @Override
- public int getScale(int column) throws SQLException {
- // 检索指定参数的小数点右边的位数。
- return 0;
- }
-
- @Override
- public String getTableName(int column) throws SQLException {
-
- return this._table;
- }
-
- @Override
- public String getCatalogName(int column) throws SQLException {
-
- return this._schema;
- }
-
- @Override
- public int getColumnType(int column) throws SQLException {
- // 字段类型
- return keytype[column-1];//Types.VARCHAR;
- }
-
- @Override
- public String getColumnTypeName(int column) throws SQLException {
- // 数据库特定的类型名称
- switch (keytype[column-1]){
- case Types.INTEGER: return "INTEGER";
- case Types.BOOLEAN: return "BOOLEAN";
- case Types.BIT: return "BITT";
- case Types.FLOAT: return "FLOAT";
- case Types.BIGINT: return "BIGINT";
- case Types.DOUBLE: return "DOUBLE";
- case Types.DATE: return "DATE";
- case Types.TIME: return "TIME";
- case Types.TIMESTAMP: return "TIMESTAMP";
- default: return "varchar";
- }
- }
-
- @Override
- public boolean isReadOnly(int column) throws SQLException {
- //指示指定的列是否明确不可写入
- return false;
- }
-
- @Override
- public boolean isWritable(int column) throws SQLException {
-
- return false;
- }
-
- @Override
- public boolean isDefinitelyWritable(int column) throws SQLException {
-
- return false;
- }
-
- @Override
- public String getColumnClassName(int column) throws SQLException {
- // 如果调用方法 ResultSet.getObject 从列中获取值,则返回构造其实例的 Java 类的完全限定名称
- return "Object";
- }
-
-}
+package io.mycat.backend.jdbc.mongodb;
+
+import java.sql.ResultSetMetaData;
+import java.sql.SQLException;
+import java.sql.Types;
+//import java.util.Arrays;
+import java.util.Set;
+/**
+ * 功能详细描述
+ * @author sohudo[http://blog.csdn.net/wind520]
+ * @create 2014年12月19日 下午6:50:23
+ * @version 0.0.1
+ */
+
+public class MongoResultSetMetaData implements ResultSetMetaData {
+
+ private String[] keySet ;
+ private int[] keytype ;
+ private String _schema;
+ private String _table;
+
+ /*
+ public MongoResultSetMetaData(Set keySet,String schema) {
+ super();
+ this.keySet = new String[keySet.size()];
+ this.keySet = keySet.toArray(this.keySet);
+ this._schema = schema;
+ }
+ */
+ public MongoResultSetMetaData(String[] select,int [] ftype,String schema,String table) {
+ super();
+ this.keySet = select;
+ this.keytype=ftype;
+ this._schema = schema;
+ this._table =table;
+ }
+
+ @Override
+ public T unwrap(Class iface) throws SQLException {
+
+ return null;
+ }
+
+ @Override
+ public boolean isWrapperFor(Class> iface) throws SQLException {
+
+ return false;
+ }
+
+ @Override
+ public int getColumnCount() throws SQLException {
+ if (keySet==null) {
+ return 0;
+ }
+ else {
+ return keySet.length;
+ }
+ }
+
+ @Override
+ public boolean isAutoIncrement(int column) throws SQLException {
+ // 是否为自动编号的字段
+ return false;
+ }
+
+ @Override
+ public boolean isCaseSensitive(int column) throws SQLException {
+ //指示列的大小写是否有关系
+ return true;
+ }
+
+ @Override
+ public boolean isSearchable(int column) throws SQLException {
+ //指示是否可以在 where 子句中使用指定的列
+ return true;
+ }
+
+ @Override
+ public boolean isCurrency(int column) throws SQLException {
+ // 指示指定的列是否是一个哈希代码值
+ return false;
+ }
+
+ @Override
+ public int isNullable(int column) throws SQLException {
+ // 指示指定列中的值是否可以为 null。
+ return 0;
+ }
+
+ @Override
+ public boolean isSigned(int column) throws SQLException {
+ // 指示指定列中的值是否带正负号
+ return false;
+ }
+
+ @Override
+ public int getColumnDisplaySize(int column) throws SQLException {
+
+ return 50;
+ }
+
+ @Override
+ public String getColumnLabel(int column) throws SQLException {
+ return keySet[column-1];
+ }
+
+ @Override
+ public String getColumnName(int column) throws SQLException {
+ return keySet[column-1];
+ }
+
+ @Override
+ public String getSchemaName(int column) throws SQLException {
+
+ return this._schema;
+ }
+
+ @Override
+ public int getPrecision(int column) throws SQLException {
+ //获取指定列的指定列宽
+ return 0;
+ }
+
+ @Override
+ public int getScale(int column) throws SQLException {
+ // 检索指定参数的小数点右边的位数。
+ return 0;
+ }
+
+ @Override
+ public String getTableName(int column) throws SQLException {
+
+ return this._table;
+ }
+
+ @Override
+ public String getCatalogName(int column) throws SQLException {
+
+ return this._schema;
+ }
+
+ @Override
+ public int getColumnType(int column) throws SQLException {
+ // 字段类型
+ return keytype[column-1];//Types.VARCHAR;
+ }
+
+ @Override
+ public String getColumnTypeName(int column) throws SQLException {
+ // 数据库特定的类型名称
+ switch (keytype[column-1]){
+ case Types.INTEGER: return "INTEGER";
+ case Types.BOOLEAN: return "BOOLEAN";
+ case Types.BIT: return "BITT";
+ case Types.FLOAT: return "FLOAT";
+ case Types.BIGINT: return "BIGINT";
+ case Types.DOUBLE: return "DOUBLE";
+ case Types.DATE: return "DATE";
+ case Types.TIME: return "TIME";
+ case Types.TIMESTAMP: return "TIMESTAMP";
+ default: return "varchar";
+ }
+ }
+
+ @Override
+ public boolean isReadOnly(int column) throws SQLException {
+ //指示指定的列是否明确不可写入
+ return false;
+ }
+
+ @Override
+ public boolean isWritable(int column) throws SQLException {
+
+ return false;
+ }
+
+ @Override
+ public boolean isDefinitelyWritable(int column) throws SQLException {
+
+ return false;
+ }
+
+ @Override
+ public String getColumnClassName(int column) throws SQLException {
+ // 如果调用方法 ResultSet.getObject 从列中获取值,则返回构造其实例的 Java 类的完全限定名称
+ return "Object";
+ }
+
+}
diff --git a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoSQLParser.java b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoSQLParser.java
index dd8544478..3f033c21d 100644
--- a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoSQLParser.java
+++ b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoSQLParser.java
@@ -1,428 +1,438 @@
-package io.mycat.backend.jdbc.mongodb;
-
-
-
-import java.sql.Types;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.alibaba.druid.sql.ast.SQLExpr;
-import com.alibaba.druid.sql.ast.SQLOrderBy;
-import com.alibaba.druid.sql.ast.SQLOrderingSpecification;
-import com.alibaba.druid.sql.ast.SQLStatement;
-import com.alibaba.druid.sql.ast.expr.SQLAggregateExpr;
-import com.alibaba.druid.sql.ast.expr.SQLAllColumnExpr;
-import com.alibaba.druid.sql.ast.expr.SQLBinaryOpExpr;
-import com.alibaba.druid.sql.ast.expr.SQLBooleanExpr;
-import com.alibaba.druid.sql.ast.expr.SQLCharExpr;
-import com.alibaba.druid.sql.ast.expr.SQLIntegerExpr;
-import com.alibaba.druid.sql.ast.expr.SQLNullExpr;
-import com.alibaba.druid.sql.ast.expr.SQLNumberExpr;
-import com.alibaba.druid.sql.ast.expr.SQLVariantRefExpr;
-import com.alibaba.druid.sql.ast.statement.SQLCreateTableStatement;
-import com.alibaba.druid.sql.ast.statement.SQLDeleteStatement;
-import com.alibaba.druid.sql.ast.statement.SQLDropTableStatement;
-import com.alibaba.druid.sql.ast.statement.SQLInsertStatement;
-import com.alibaba.druid.sql.ast.statement.SQLSelectGroupByClause;
-import com.alibaba.druid.sql.ast.statement.SQLSelectItem;
-import com.alibaba.druid.sql.ast.statement.SQLSelectOrderByItem;
-import com.alibaba.druid.sql.ast.statement.SQLSelectQuery;
-import com.alibaba.druid.sql.ast.statement.SQLSelectStatement;
-import com.alibaba.druid.sql.ast.statement.SQLTableSource;
-import com.alibaba.druid.sql.ast.statement.SQLUpdateSetItem;
-import com.alibaba.druid.sql.ast.statement.SQLUpdateStatement;
-import com.alibaba.druid.sql.dialect.mysql.ast.expr.MySqlSelectGroupByExpr;
-import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlSelectQueryBlock;
-import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser;
-import com.mongodb.BasicDBList;
-import com.mongodb.BasicDBObject;
-import com.mongodb.DB;
-import com.mongodb.DBCollection;
-import com.mongodb.DBCursor;
-import com.mongodb.DBObject;
-/**
- * 功能详细描述
- * @author sohudo[http://blog.csdn.net/wind520]
- * @create 2014年12月19日 下午6:50:23
- * @version 0.0.1
- */
-public class MongoSQLParser {
- private static final Logger LOGGER = LoggerFactory.getLogger(MongoSQLParser.class);
- private final DB _db;
-// private final String _sql;
- private final SQLStatement statement;
- private List _params;
- private int _pos;
- public MongoSQLParser(DB db, String sql) throws MongoSQLException
- {
- this._db = db;
- // this._sql = sql;
- this.statement = parser(sql);
- }
-
- public SQLStatement parser(String s) throws MongoSQLException
- {
- s = s.trim();
- try
- {
- MySqlStatementParser parser = new MySqlStatementParser(s);
- return parser.parseStatement();
- }
- catch (Exception e)
- {
- LOGGER.error("MongoSQLParser.parserError", e);
- }
- throw new MongoSQLException.ErrorSQL(s);
- }
-
- public void setParams(List params)
- {
- this._pos = 1;
- this._params = params;
- }
-
- public MongoData query() throws MongoSQLException{
- if (!(statement instanceof SQLSelectStatement)) {
- //return null;
- throw new IllegalArgumentException("not a query sql statement");
- }
- MongoData mongo=new MongoData();
- DBCursor c=null;
- SQLSelectStatement selectStmt = (SQLSelectStatement)statement;
- SQLSelectQuery sqlSelectQuery =selectStmt.getSelect().getQuery();
- int icount=0;
- if(sqlSelectQuery instanceof MySqlSelectQueryBlock) {
- MySqlSelectQueryBlock mysqlSelectQuery = (MySqlSelectQueryBlock)selectStmt.getSelect().getQuery();
-
- BasicDBObject fields = new BasicDBObject();
- //显示的字段
- for(SQLSelectItem item : mysqlSelectQuery.getSelectList()) {
- //System.out.println(item.toString());
- if (!(item.getExpr() instanceof SQLAllColumnExpr)) {
- if (item.getExpr() instanceof SQLAggregateExpr) {
- SQLAggregateExpr expr =(SQLAggregateExpr)item.getExpr();
- if (expr.getMethodName().equals("COUNT")) {
- icount=1;
- mongo.setField(getExprFieldName(expr), Types.BIGINT);
- }
- fields.put(getExprFieldName(expr), Integer.valueOf(1));
- }
- else {
- fields.put(getFieldName(item), Integer.valueOf(1));
- }
- }
-
- }
-
- //表名
- SQLTableSource table=mysqlSelectQuery.getFrom();
- DBCollection coll =this._db.getCollection(table.toString());
- mongo.setTable(table.toString());
-
- SQLExpr expr=mysqlSelectQuery.getWhere();
- DBObject query = parserWhere(expr);
- //System.out.println(query);
- SQLSelectGroupByClause groupby=mysqlSelectQuery.getGroupBy();
- BasicDBObject gbkey = new BasicDBObject();
- if (groupby!=null) {
- for (SQLExpr gbexpr:groupby.getItems()){
- if (gbexpr instanceof MySqlSelectGroupByExpr) {
- SQLExpr gbyexpr=((MySqlSelectGroupByExpr) gbexpr).getExpr();
- gbkey.put(getFieldName2(gbyexpr), Integer.valueOf(1));
- }
- }
- icount=2;
- }
- int limitoff=0;
- int limitnum=0;
- if (mysqlSelectQuery.getLimit()!=null) {
- limitoff=getSQLExprToInt(mysqlSelectQuery.getLimit().getOffset());
- limitnum=getSQLExprToInt(mysqlSelectQuery.getLimit().getRowCount());
- }
-
- if (icount==1) {
- mongo.setCount(coll.count(query));
- }
- else if (icount==2){
- BasicDBObject initial = new BasicDBObject();
- initial.put("num", 0);
- String reduce="function (obj, prev) { "
- +" prev.num++}";
- mongo.setGrouyBy(coll.group(gbkey, query, initial, reduce));
- }
- else {
- if ((limitoff>0) || (limitnum>0)) {
- c = coll.find(query, fields).skip(limitoff).limit(limitnum);
- }
- else {
- c = coll.find(query, fields);
- }
-
- SQLOrderBy orderby=mysqlSelectQuery.getOrderBy();
- if (orderby != null ){
- BasicDBObject order = new BasicDBObject();
- for (int i = 0; i < orderby.getItems().size(); i++)
- {
- SQLSelectOrderByItem orderitem = orderby.getItems().get(i);
- order.put(orderitem.getExpr().toString(), Integer.valueOf(getSQLExprToAsc(orderitem.getType())));
- }
- c.sort(order);
- // System.out.println(order);
- }
- }
- mongo.setCursor(c);
- }
- return mongo;
- }
-
- public int executeUpdate() throws MongoSQLException {
- if (statement instanceof SQLInsertStatement) {
- return InsertData((SQLInsertStatement)statement);
- }
- if (statement instanceof SQLUpdateStatement) {
- return UpData((SQLUpdateStatement)statement);
- }
- if (statement instanceof SQLDropTableStatement) {
- return dropTable((SQLDropTableStatement)statement);
- }
- if (statement instanceof SQLDeleteStatement) {
- return DeleteDate((SQLDeleteStatement)statement);
- }
- if (statement instanceof SQLCreateTableStatement) {
- return 1;
- }
- return 1;
-
- }
- private int InsertData(SQLInsertStatement state) {
- if (state.getValues().getValues().size() ==0 ){
- throw new RuntimeException("number of columns error");
- }
- if (state.getValues().getValues().size() != state.getColumns().size()){
- throw new RuntimeException("number of values and columns have to match");
- }
- SQLTableSource table=state.getTableSource();
- BasicDBObject o = new BasicDBObject();
- int i=0;
- for(SQLExpr col : state.getColumns()) {
- o.put(getFieldName2(col), getExpValue(state.getValues().getValues().get(i)));
- i++;
- }
- DBCollection coll =this._db.getCollection(table.toString());
- coll.insert(new DBObject[] { o });
- return 1;
- }
- private int UpData(SQLUpdateStatement state) {
- SQLTableSource table=state.getTableSource();
- DBCollection coll =this._db.getCollection(table.toString());
-
- SQLExpr expr=state.getWhere();
- DBObject query = parserWhere(expr);
-
- BasicDBObject set = new BasicDBObject();
- for(SQLUpdateSetItem col : state.getItems()){
- set.put(getFieldName2(col.getColumn()), getExpValue(col.getValue()));
- }
- DBObject mod = new BasicDBObject("$set", set);
- coll.updateMulti(query, mod);
- //System.out.println("changs count:"+coll.getStats().size());
- return 1;
- }
- private int DeleteDate(SQLDeleteStatement state) {
- SQLTableSource table=state.getTableSource();
- DBCollection coll =this._db.getCollection(table.toString());
-
- SQLExpr expr=state.getWhere();
- if (expr==null) {
- throw new RuntimeException("not where of sql");
- }
- DBObject query = parserWhere(expr);
-
- coll.remove(query);
-
- return 1;
-
- }
- private int dropTable(SQLDropTableStatement state) {
- for (SQLTableSource table : state.getTableSources()){
- DBCollection coll =this._db.getCollection(table.toString());
- coll.drop();
- }
- return 1;
-
- }
-
- private int getSQLExprToInt(SQLExpr expr){
- if (expr instanceof SQLIntegerExpr){
- return ((SQLIntegerExpr)expr).getNumber().intValue();
- }
- return 0;
- }
- private int getSQLExprToAsc(SQLOrderingSpecification ASC){
- if (ASC==null ) return 1;
- if (ASC==SQLOrderingSpecification.DESC){
- return -1;
- }
- else {
- return 1;
- }
- }
- public String remove(String resource,char ch)
- {
- StringBuffer buffer=new StringBuffer();
- int position=0;
- char currentChar;
-
- while(position")) op="$gt";
- if (expr.getOperator().getName().equals(">=")) op="$gte";
-
- if (expr.getOperator().getName().equals("!=")) op="$ne";
- if (expr.getOperator().getName().equals("<>")) op="$ne";
- //xo.put(op, getExpValue(expr.getRight()));
- // o.put(exprL.toString(),xo);
- parserDBObject(o,exprL.toString(),op, getExpValue(expr.getRight()));
- }
- }
- }
- private void parserWhere(SQLExpr aexpr,BasicDBObject o){
- if(aexpr instanceof SQLBinaryOpExpr){
- SQLBinaryOpExpr expr=(SQLBinaryOpExpr)aexpr;
- SQLExpr exprL=expr.getLeft();
- if (!(exprL instanceof SQLBinaryOpExpr))
- {
- //opSQLExpr((SQLBinaryOpExpr)aexpr,o);
- if (expr.getOperator().getName().equals("=")) {
- o.put(exprL.toString(), getExpValue(expr.getRight()));
- }
- else {
- String op="";
- if (expr.getOperator().getName().equals("<")) op="$lt";
- if (expr.getOperator().getName().equals("<=")) op="$lte";
- if (expr.getOperator().getName().equals(">")) op="$gt";
- if (expr.getOperator().getName().equals(">=")) op="$gte";
-
- if (expr.getOperator().getName().equals("!=")) op="$ne";
- if (expr.getOperator().getName().equals("<>")) op="$ne";
-
- parserDBObject(o,exprL.toString(),op, getExpValue(expr.getRight()));
- }
-
- }
- else {
- if (expr.getOperator().getName().equals("AND")) {
- parserWhere(exprL,o);
- parserWhere(expr.getRight(),o);
- }
- else if (expr.getOperator().getName().equals("OR")) {
- orWhere(exprL,expr.getRight(),o);
- }
- else {
- throw new RuntimeException("Can't identify the operation of of where");
- }
- }
- }
-
- }
-
-
- private void orWhere(SQLExpr exprL,SQLExpr exprR ,BasicDBObject ob){
- BasicDBObject xo = new BasicDBObject();
- BasicDBObject yo = new BasicDBObject();
- parserWhere(exprL,xo);
- parserWhere(exprR,yo);
- ob.put("$or",new Object[]{xo,yo});
- }
-}
+package io.mycat.backend.jdbc.mongodb;
+
+
+
+import java.sql.Types;
+import java.util.List;
+
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.mongodb.BasicDBList;
+import com.mongodb.BasicDBObject;
+import com.mongodb.DB;
+import com.mongodb.DBCollection;
+import com.mongodb.DBCursor;
+import com.mongodb.DBObject;
+import com.alibaba.druid.sql.ast.SQLExpr;
+import com.alibaba.druid.sql.ast.SQLOrderingSpecification;
+import com.alibaba.druid.sql.ast.SQLStatement;
+
+import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlSelectQueryBlock;
+import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser;
+import com.alibaba.druid.sql.ast.statement.*;
+import com.alibaba.druid.sql.ast.expr.*;
+import com.alibaba.druid.sql.ast.*;
+/**
+ * 功能详细描述
+ * @author sohudo[http://blog.csdn.net/wind520]
+ * @create 2014年12月19日 下午6:50:23
+ * @version 0.0.1
+ */
+public class MongoSQLParser {
+ private static final Logger LOGGER = LoggerFactory.getLogger(MongoSQLParser.class);
+ private final DB _db;
+// private final String _sql;
+ private final SQLStatement statement;
+ private List _params;
+ private int _pos;
+ public MongoSQLParser(DB db, String sql) throws MongoSQLException
+ {
+ this._db = db;
+ // this._sql = sql;
+ this.statement = parser(sql);
+ }
+
+ public SQLStatement parser(String s) throws MongoSQLException
+ {
+ s = s.trim();
+ try
+ {
+ MySqlStatementParser parser = new MySqlStatementParser(s);
+ return parser.parseStatement();
+ }
+ catch (Exception e)
+ {
+ LOGGER.error("MongoSQLParser.parserError", e);
+ }
+ throw new MongoSQLException.ErrorSQL(s);
+ }
+
+ public void setParams(List params)
+ {
+ this._pos = 1;
+ this._params = params;
+ }
+
+ public MongoData query() throws MongoSQLException{
+ if (!(statement instanceof SQLSelectStatement)) {
+ //return null;
+ throw new IllegalArgumentException("not a query sql statement");
+ }
+ MongoData mongo=new MongoData();
+ DBCursor c=null;
+ SQLSelectStatement selectStmt = (SQLSelectStatement)statement;
+ SQLSelectQuery sqlSelectQuery =selectStmt.getSelect().getQuery();
+ int icount=0;
+ if(sqlSelectQuery instanceof MySqlSelectQueryBlock) {
+ MySqlSelectQueryBlock mysqlSelectQuery = (MySqlSelectQueryBlock)selectStmt.getSelect().getQuery();
+
+ BasicDBObject fields = new BasicDBObject();
+ //显示的字段
+ for(SQLSelectItem item : mysqlSelectQuery.getSelectList()) {
+ //System.out.println(item.toString());
+ if (!(item.getExpr() instanceof SQLAllColumnExpr)) {
+ if (item.getExpr() instanceof SQLAggregateExpr) {
+ SQLAggregateExpr expr =(SQLAggregateExpr)item.getExpr();
+ if (expr.getMethodName().equals("COUNT")) {
+ icount=1;
+ mongo.setField(getExprFieldName(expr), Types.BIGINT);
+ }
+ fields.put(getExprFieldName(expr), Integer.valueOf(1));
+ }
+ else {
+ fields.put(getFieldName(item), Integer.valueOf(1));
+ }
+ }
+
+ }
+
+ //表名
+ SQLTableSource table=mysqlSelectQuery.getFrom();
+ DBCollection coll =this._db.getCollection(table.toString());
+ mongo.setTable(table.toString());
+
+ SQLExpr expr=mysqlSelectQuery.getWhere();
+ DBObject query = parserWhere(expr);
+ //System.out.println(query);
+ SQLSelectGroupByClause groupby=mysqlSelectQuery.getGroupBy();
+ BasicDBObject gbkey = new BasicDBObject();
+ if (groupby!=null) {
+ for (SQLExpr gbexpr:groupby.getItems()){
+ if (gbexpr instanceof SQLIdentifierExpr) {
+ String name=((SQLIdentifierExpr) gbexpr).getName();
+ gbkey.put(name, Integer.valueOf(1));
+ }
+ }
+ icount=2;
+ }
+ int limitoff=0;
+ int limitnum=0;
+ if (mysqlSelectQuery.getLimit()!=null) {
+ limitoff=getSQLExprToInt(mysqlSelectQuery.getLimit().getOffset());
+ limitnum=getSQLExprToInt(mysqlSelectQuery.getLimit().getRowCount());
+ }
+
+ if (icount==1) {
+ mongo.setCount(coll.count(query));
+ }
+ else if (icount==2){
+ BasicDBObject initial = new BasicDBObject();
+ initial.put("num", 0);
+ String reduce="function (obj, prev) { "
+ +" prev.num++}";
+ mongo.setGrouyBy(coll.group(gbkey, query, initial, reduce));
+ }
+ else {
+ if ((limitoff>0) || (limitnum>0)) {
+ c = coll.find(query, fields).skip(limitoff).limit(limitnum);
+ }
+ else {
+ c = coll.find(query, fields);
+ }
+
+ SQLOrderBy orderby=mysqlSelectQuery.getOrderBy();
+ if (orderby != null ){
+ BasicDBObject order = new BasicDBObject();
+ for (int i = 0; i < orderby.getItems().size(); i++)
+ {
+ SQLSelectOrderByItem orderitem = orderby.getItems().get(i);
+ order.put(orderitem.getExpr().toString(), Integer.valueOf(getSQLExprToAsc(orderitem.getType())));
+ }
+ c.sort(order);
+ // System.out.println(order);
+ }
+ }
+ mongo.setCursor(c);
+ }
+ return mongo;
+ }
+
+ public int executeUpdate() throws MongoSQLException {
+ if (statement instanceof SQLInsertStatement) {
+ return InsertData((SQLInsertStatement)statement);
+ }
+ if (statement instanceof SQLUpdateStatement) {
+ return UpData((SQLUpdateStatement)statement);
+ }
+ if (statement instanceof SQLDropTableStatement) {
+ return dropTable((SQLDropTableStatement)statement);
+ }
+ if (statement instanceof SQLDeleteStatement) {
+ return DeleteDate((SQLDeleteStatement)statement);
+ }
+ if (statement instanceof SQLCreateTableStatement) {
+ return 1;
+ }
+ return 1;
+
+ }
+ private int InsertData(SQLInsertStatement state) {
+ if (state.getValues().getValues().size() ==0 ){
+ throw new RuntimeException("number of columns error");
+ }
+ if (state.getValues().getValues().size() != state.getColumns().size()){
+ throw new RuntimeException("number of values and columns have to match");
+ }
+ SQLTableSource table=state.getTableSource();
+ BasicDBObject o = new BasicDBObject();
+ int i=0;
+ for(SQLExpr col : state.getColumns()) {
+ o.put(getFieldName2(col), getExpValue(state.getValues().getValues().get(i)));
+ i++;
+ }
+ DBCollection coll =this._db.getCollection(table.toString());
+ coll.insert(new DBObject[] { o });
+ return 1;
+ }
+ private int UpData(SQLUpdateStatement state) {
+ SQLTableSource table=state.getTableSource();
+ DBCollection coll =this._db.getCollection(table.toString());
+
+ SQLExpr expr=state.getWhere();
+ DBObject query = parserWhere(expr);
+
+ BasicDBObject set = new BasicDBObject();
+ for(SQLUpdateSetItem col : state.getItems()){
+ set.put(getFieldName2(col.getColumn()), getExpValue(col.getValue()));
+ }
+ DBObject mod = new BasicDBObject("$set", set);
+ coll.updateMulti(query, mod);
+ //System.out.println("changs count:"+coll.getStats().size());
+ return 1;
+ }
+ private int DeleteDate(SQLDeleteStatement state) {
+ SQLTableSource table=state.getTableSource();
+ DBCollection coll =this._db.getCollection(table.toString());
+
+ SQLExpr expr=state.getWhere();
+ if (expr==null) {
+ throw new RuntimeException("not where of sql");
+ }
+ DBObject query = parserWhere(expr);
+
+ coll.remove(query);
+
+ return 1;
+
+ }
+ private int dropTable(SQLDropTableStatement state) {
+ for (SQLTableSource table : state.getTableSources()){
+ DBCollection coll =this._db.getCollection(table.toString());
+ coll.drop();
+ }
+ return 1;
+
+ }
+
+ private int getSQLExprToInt(SQLExpr expr){
+ if (expr instanceof SQLIntegerExpr){
+ return ((SQLIntegerExpr)expr).getNumber().intValue();
+ }
+ return 0;
+ }
+ private int getSQLExprToAsc(SQLOrderingSpecification ASC){
+ if (ASC==null ) {
+ return 1;
+ }
+ if (ASC==SQLOrderingSpecification.DESC){
+ return -1;
+ }
+ else {
+ return 1;
+ }
+ }
+ public String remove(String resource,char ch)
+ {
+ StringBuffer buffer=new StringBuffer();
+ int position=0;
+ char currentChar;
+
+ while(position")) {
+ op = "$gt";
+ }
+ if (expr.getOperator().getName().equals(">=")) {
+ op = "$gte";
+ }
+
+ if (expr.getOperator().getName().equals("!=")) {
+ op = "$ne";
+ }
+ if (expr.getOperator().getName().equals("<>")) {
+ op = "$ne";
+ }
+ //xo.put(op, getExpValue(expr.getRight()));
+ // o.put(exprL.toString(),xo);
+ parserDBObject(o,exprL.toString(),op, getExpValue(expr.getRight()));
+ }
+ }
+ }
+ private void parserWhere(SQLExpr aexpr,BasicDBObject o){
+ if(aexpr instanceof SQLBinaryOpExpr){
+ SQLBinaryOpExpr expr=(SQLBinaryOpExpr)aexpr;
+ SQLExpr exprL=expr.getLeft();
+ if (!(exprL instanceof SQLBinaryOpExpr))
+ {
+ //opSQLExpr((SQLBinaryOpExpr)aexpr,o);
+ if (expr.getOperator().getName().equals("=")) {
+ o.put(exprL.toString(), getExpValue(expr.getRight()));
+ }
+ else {
+ String op="";
+ if (expr.getOperator().getName().equals("<")) {
+ op = "$lt";
+ }
+ if (expr.getOperator().getName().equals("<=")) {
+ op = "$lte";
+ }
+ if (expr.getOperator().getName().equals(">")) {
+ op = "$gt";
+ }
+ if (expr.getOperator().getName().equals(">=")) {
+ op = "$gte";
+ }
+
+ if (expr.getOperator().getName().equals("!=")) {
+ op = "$ne";
+ }
+ if (expr.getOperator().getName().equals("<>")) {
+ op = "$ne";
+ }
+
+ parserDBObject(o,exprL.toString(),op, getExpValue(expr.getRight()));
+ }
+
+ }
+ else {
+ if (expr.getOperator().getName().equals("AND")) {
+ parserWhere(exprL,o);
+ parserWhere(expr.getRight(),o);
+ }
+ else if (expr.getOperator().getName().equals("OR")) {
+ orWhere(exprL,expr.getRight(),o);
+ }
+ else {
+ throw new RuntimeException("Can't identify the operation of of where");
+ }
+ }
+ }
+
+ }
+
+
+ private void orWhere(SQLExpr exprL,SQLExpr exprR ,BasicDBObject ob){
+ BasicDBObject xo = new BasicDBObject();
+ BasicDBObject yo = new BasicDBObject();
+ parserWhere(exprL,xo);
+ parserWhere(exprR,yo);
+ ob.put("$or",new Object[]{xo,yo});
+ }
+}
diff --git a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoStatement.java b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoStatement.java
index 48c880159..833af9609 100644
--- a/src/main/java/io/mycat/backend/jdbc/mongodb/MongoStatement.java
+++ b/src/main/java/io/mycat/backend/jdbc/mongodb/MongoStatement.java
@@ -1,5 +1,7 @@
package io.mycat.backend.jdbc.mongodb;
+import com.mongodb.DBCursor;
+
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
@@ -28,12 +30,15 @@ public MongoStatement(MongoConnection conn, int type, int concurrency, int holda
this._concurrency = concurrency;
this._holdability = holdability;
- if (this._type != 0)
- throw new UnsupportedOperationException("type not supported yet");
- if (this._concurrency != 0)
- throw new UnsupportedOperationException("concurrency not supported yet");
- if (this._holdability != 0)
- throw new UnsupportedOperationException("holdability not supported yet");
+ if (this._type != 0) {
+ throw new UnsupportedOperationException("type not supported yet");
+ }
+ if (this._concurrency != 0) {
+ throw new UnsupportedOperationException("concurrency not supported yet");
+ }
+ if (this._holdability != 0) {
+ throw new UnsupportedOperationException("holdability not supported yet");
+ }
}
@Override
@@ -52,12 +57,11 @@ public boolean isWrapperFor(Class> iface) throws SQLException {
public ResultSet executeQuery(String sql) throws SQLException {
MongoData mongo= new MongoSQLParser(this._conn.getDB(), sql).query();
- if (this._fetchSize > 0) {
+ if ((this._fetchSize > 0)
+ && (mongo.getCursor()!=null)) {
//设置每次网络请求的最大记录数
- if (mongo.getCursor()!=null) {
mongo.getCursor().batchSize(this._fetchSize);
- }
- }
+ }
/*
if (this._maxRows > 0)
{
diff --git a/src/main/java/io/mycat/backend/jdbc/mongodb/StringUtils.java b/src/main/java/io/mycat/backend/jdbc/mongodb/StringUtils.java
index 6b6ea4ccd..fcf4243e8 100644
--- a/src/main/java/io/mycat/backend/jdbc/mongodb/StringUtils.java
+++ b/src/main/java/io/mycat/backend/jdbc/mongodb/StringUtils.java
@@ -1,16 +1,16 @@
-package io.mycat.backend.jdbc.mongodb;
-
-
-public class StringUtils {
-
-
- public static boolean startsWithIgnoreCase(String searchIn, int startAt,
- String searchFor) {
- return searchIn.regionMatches(true, startAt, searchFor, 0, searchFor
- .length());
- }
-
- public static boolean startsWithIgnoreCase(String searchIn, String searchFor) {
- return startsWithIgnoreCase(searchIn, 0, searchFor);
- }
+package io.mycat.backend.jdbc.mongodb;
+
+
+public class StringUtils {
+
+
+ public static boolean startsWithIgnoreCase(String searchIn, int startAt,
+ String searchFor) {
+ return searchIn.regionMatches(true, startAt, searchFor, 0, searchFor
+ .length());
+ }
+
+ public static boolean startsWithIgnoreCase(String searchIn, String searchFor) {
+ return startsWithIgnoreCase(searchIn, 0, searchFor);
+ }
}
\ No newline at end of file
diff --git a/src/main/java/io/mycat/backend/jdbc/sequoiadb/DriverPropertyInfoHelper.java b/src/main/java/io/mycat/backend/jdbc/sequoiadb/DriverPropertyInfoHelper.java
index 4948cee89..16d25f395 100644
--- a/src/main/java/io/mycat/backend/jdbc/sequoiadb/DriverPropertyInfoHelper.java
+++ b/src/main/java/io/mycat/backend/jdbc/sequoiadb/DriverPropertyInfoHelper.java
@@ -1,70 +1,70 @@
-package io.mycat.backend.jdbc.sequoiadb;
-
-import java.sql.DriverPropertyInfo;
-import java.util.ArrayList;
-
-
-public class DriverPropertyInfoHelper{
-
- public static final String AUTO_CONNECT_RETRY = "autoConnectRetry";
-
- public static final String CONNECTIONS_PER_HOST = "connecionsPerHost";
-
- public static final String CONNECT_TIMEOUT = "connectTimeout";
-
- public static final String CURSOR_FINALIZER_ENABLED = "cursorFinalizerEnabled";
-
- public static final String MAX_AUTO_CONNECT_RETRY_TIME = "maxAutoConnectRetryTime";
-
- public static final String READ_PREFERENCE = "readPreference";
-
- public static final String SOCKET_TIMEOUT = "socketTimeout";
-
- public DriverPropertyInfo[] getPropertyInfo()
- {
- ArrayList propInfos = new ArrayList();
-
- addPropInfo(
- propInfos,
- AUTO_CONNECT_RETRY,
- "false",
- "If true, the driver will keep trying to connect to the same server in case that the socket "
- + "cannot be established. There is maximum amount of time to keep retrying, which is 15s by "
- + "default.", null);
-
- addPropInfo(propInfos, CONNECTIONS_PER_HOST, "10", "The maximum number of connections allowed per "
- + "host for this Mongo instance. Those connections will be kept in a pool when idle.", null);
-
- addPropInfo(propInfos, CONNECT_TIMEOUT, "10000", "The connection timeout in milliseconds. ", null);
-
- addPropInfo(propInfos, CURSOR_FINALIZER_ENABLED, "true", "Sets whether there is a a finalize "
- + "method created that cleans up instances of DBCursor that the client does not close.",
- null);
-
- addPropInfo(propInfos, MAX_AUTO_CONNECT_RETRY_TIME, "0",
- "The maximum amount of time in MS to spend retrying to open connection to the same server."
- + "Default is 0, which means to use the default 15s if autoConnectRetry is on.", null);
-
- addPropInfo(propInfos, READ_PREFERENCE, "primary",
- "represents preferred replica set members to which a query or command can be sent", new String[] {
- "primary", "primary preferred", "secondary", "secondary preferred", "nearest" });
-
- addPropInfo(propInfos, SOCKET_TIMEOUT, "0", "The socket timeout in milliseconds It is used for "
- + "I/O socket read and write operations "
- + "Socket.setSoTimeout(int) Default is 0 and means no timeout.", null);
-
- return propInfos.toArray(new DriverPropertyInfo[propInfos.size()]);
- }
-
- private void addPropInfo(final ArrayList propInfos, final String propName,
- final String defaultVal, final String description, final String[] choices)
- {
- DriverPropertyInfo newProp = new DriverPropertyInfo(propName, defaultVal);
- newProp.description = description;
- if (choices != null)
- {
- newProp.choices = choices;
- }
- propInfos.add(newProp);
- }
+package io.mycat.backend.jdbc.sequoiadb;
+
+import java.sql.DriverPropertyInfo;
+import java.util.ArrayList;
+
+
+public class DriverPropertyInfoHelper{
+
+ public static final String AUTO_CONNECT_RETRY = "autoConnectRetry";
+
+ public static final String CONNECTIONS_PER_HOST = "connecionsPerHost";
+
+ public static final String CONNECT_TIMEOUT = "connectTimeout";
+
+ public static final String CURSOR_FINALIZER_ENABLED = "cursorFinalizerEnabled";
+
+ public static final String MAX_AUTO_CONNECT_RETRY_TIME = "maxAutoConnectRetryTime";
+
+ public static final String READ_PREFERENCE = "readPreference";
+
+ public static final String SOCKET_TIMEOUT = "socketTimeout";
+
+ public DriverPropertyInfo[] getPropertyInfo()
+ {
+ ArrayList propInfos = new ArrayList();
+
+ addPropInfo(
+ propInfos,
+ AUTO_CONNECT_RETRY,
+ "false",
+ "If true, the driver will keep trying to connect to the same server in case that the socket "
+ + "cannot be established. There is maximum amount of time to keep retrying, which is 15s by "
+ + "default.", null);
+
+ addPropInfo(propInfos, CONNECTIONS_PER_HOST, "10", "The maximum number of connections allowed per "
+ + "host for this Mongo instance. Those connections will be kept in a pool when idle.", null);
+
+ addPropInfo(propInfos, CONNECT_TIMEOUT, "10000", "The connection timeout in milliseconds. ", null);
+
+ addPropInfo(propInfos, CURSOR_FINALIZER_ENABLED, "true", "Sets whether there is a a finalize "
+ + "method created that cleans up instances of DBCursor that the client does not close.",
+ null);
+
+ addPropInfo(propInfos, MAX_AUTO_CONNECT_RETRY_TIME, "0",
+ "The maximum amount of time in MS to spend retrying to open connection to the same server."
+ + "Default is 0, which means to use the default 15s if autoConnectRetry is on.", null);
+
+ addPropInfo(propInfos, READ_PREFERENCE, "primary",
+ "represents preferred replica set members to which a query or command can be sent", new String[] {
+ "primary", "primary preferred", "secondary", "secondary preferred", "nearest" });
+
+ addPropInfo(propInfos, SOCKET_TIMEOUT, "0", "The socket timeout in milliseconds It is used for "
+ + "I/O socket read and write operations "
+ + "Socket.setSoTimeout(int) Default is 0 and means no timeout.", null);
+
+ return propInfos.toArray(new DriverPropertyInfo[propInfos.size()]);
+ }
+
+ private void addPropInfo(final ArrayList propInfos, final String propName,
+ final String defaultVal, final String description, final String[] choices)
+ {
+ DriverPropertyInfo newProp = new DriverPropertyInfo(propName, defaultVal);
+ newProp.description = description;
+ if (choices != null)
+ {
+ newProp.choices = choices;
+ }
+ propInfos.add(newProp);
+ }
}
\ No newline at end of file
diff --git a/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaConnection.java b/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaConnection.java
index 368b4b261..e99a2be88 100644
--- a/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaConnection.java
+++ b/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaConnection.java
@@ -20,8 +20,8 @@
import java.util.Properties;
import java.util.concurrent.Executor;
-import com.sequoiadb.base.CollectionSpace;
import com.sequoiadb.base.Sequoiadb;
+import com.sequoiadb.base.CollectionSpace;
import com.sequoiadb.exception.BaseException;
/**
* 功能详细描述
@@ -51,12 +51,16 @@ public SequoiaConnection(String url, String db) throws UnknownHostException {
public CollectionSpace getDB() {
if (this._schema!=null) {
- if (mc.isCollectionSpaceExist(this._schema))
+ if (mc.isCollectionSpaceExist(this._schema)) {
return this.mc.getCollectionSpace(this._schema);
- else
+ }
+ else {
return this.mc.createCollectionSpace(this._schema);
+ }
+ }
+ else {
+ return null;
}
- else return null;
}
@Override
@@ -80,8 +84,9 @@ public String nativeSQL(String sql) throws SQLException {
@Override
public void setAutoCommit(boolean autoCommit) throws SQLException {
- if (!autoCommit)
- throw new RuntimeException("autoCommit has to be on");
+ if (!autoCommit) {
+ throw new RuntimeException("autoCommit has to be on");
+ }
}
@Override
diff --git a/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaData.java b/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaData.java
index 357620069..3de061c8b 100644
--- a/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaData.java
+++ b/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaData.java
@@ -1,131 +1,131 @@
-package io.mycat.backend.jdbc.sequoiadb;
-
-import java.sql.Date;
-import java.sql.Time;
-import java.sql.Timestamp;
-import java.sql.Types;
-import java.util.HashMap;
-
-import org.bson.BSONObject;
-import org.bson.types.BasicBSONList;
-
-import com.sequoiadb.base.DBCursor;
-
-public class SequoiaData {
-
- private DBCursor cursor;
- private long count;
- private String table;
- private BSONObject groupby;
-
- private HashMap map = new HashMap();
- private boolean type=false;
-
- public SequoiaData(){
- this.count=0;
- this.cursor=null;
- }
-
- public long getCount() {
- return this.count;
- }
-
-
- public void setCount(long count) {
- this.count=count;
- }
-
- public String getTable() {
- return this.table;
- }
-
- public void setTable(String table) {
- this.table=table;
- }
-
- public BSONObject getGrouyBy() {
- return this.groupby;
- }
-
- public BasicBSONList getGrouyBys() {
- if (this.groupby instanceof BasicBSONList) {
- return (BasicBSONList)this.groupby;
- }
- else {
- return null;
- }
- }
- public void setGrouyBy(BSONObject gb) {
- this.groupby=gb;
- this.type=true;
- if (gb instanceof BasicBSONList) {
- Object gb2=((BasicBSONList)gb).get(0);
- if (gb2 instanceof BSONObject) {
- for (String field :((BSONObject)gb2).keySet()) {
- Object val = ((BSONObject)gb2).get(field);
- setField(field,getObjectToType(val));
- }
- }
- }
- }
-
- public static int getObjectToType(Object ob){
- if (ob instanceof Integer) {
- return Types.INTEGER;
- }
- else if (ob instanceof Boolean) {
- return Types.BOOLEAN;
- }
- else if (ob instanceof Byte) {
- return Types.BIT;
- }
- else if (ob instanceof Short) {
- return Types.INTEGER;
- }
- else if (ob instanceof Float) {
- return Types.FLOAT;
- }
- else if (ob instanceof Long) {
- return Types.BIGINT;
- }
- else if (ob instanceof Double) {
- return Types.DOUBLE;
- }
- else if (ob instanceof Date) {
- return Types.DATE;
- }
- else if (ob instanceof Time) {
- return Types.TIME;
- }
- else if (ob instanceof Timestamp) {
- return Types.TIMESTAMP;
- }
- else if (ob instanceof String) {
- return Types.VARCHAR;
- }
- else {
- return Types.VARCHAR;
- }
- }
-
- public void setField(String field,int ftype) {
- map.put(field, ftype);
- }
-
- public HashMap getFields() {
- return this.map;
- }
-
- public boolean getType() {
- return this.type;
- }
-
- public DBCursor getCursor() {
- return this.cursor;
- }
-
- public DBCursor setCursor(DBCursor cursor) {
- return this.cursor=cursor;
- }
-
-}
+package io.mycat.backend.jdbc.sequoiadb;
+
+import java.sql.Date;
+import java.sql.Time;
+import java.sql.Timestamp;
+import java.sql.Types;
+import java.util.HashMap;
+
+import org.bson.BSONObject;
+import org.bson.BasicBSONObject;
+import org.bson.types.BasicBSONList;
+import com.sequoiadb.base.DBCursor;
+
+public class SequoiaData {
+
+ private DBCursor cursor;
+ private long count;
+ private String table;
+ private BSONObject groupby;
+
+ private HashMap map = new HashMap();
+ private boolean type=false;
+
+ public SequoiaData(){
+ this.count=0;
+ this.cursor=null;
+ }
+
+ public long getCount() {
+ return this.count;
+ }
+
+
+ public void setCount(long count) {
+ this.count=count;
+ }
+
+ public String getTable() {
+ return this.table;
+ }
+
+ public void setTable(String table) {
+ this.table=table;
+ }
+
+ public BSONObject getGrouyBy() {
+ return this.groupby;
+ }
+
+ public BasicBSONList getGrouyBys() {
+ if (this.groupby instanceof BasicBSONList) {
+ return (BasicBSONList)this.groupby;
+ }
+ else {
+ return null;
+ }
+ }
+ public void setGrouyBy(BSONObject gb) {
+ this.groupby=gb;
+ this.type=true;
+ if (gb instanceof BasicBSONList) {
+ Object gb2=((BasicBSONList)gb).get(0);
+ if (gb2 instanceof BSONObject) {
+ for (String field :((BSONObject)gb2).keySet()) {
+ Object val = ((BSONObject)gb2).get(field);
+ setField(field,getObjectToType(val));
+ }
+ }
+ }
+ }
+
+ public static int getObjectToType(Object ob){
+ if (ob instanceof Integer) {
+ return Types.INTEGER;
+ }
+ else if (ob instanceof Boolean) {
+ return Types.BOOLEAN;
+ }
+ else if (ob instanceof Byte) {
+ return Types.BIT;
+ }
+ else if (ob instanceof Short) {
+ return Types.INTEGER;
+ }
+ else if (ob instanceof Float) {
+ return Types.FLOAT;
+ }
+ else if (ob instanceof Long) {
+ return Types.BIGINT;
+ }
+ else if (ob instanceof Double) {
+ return Types.DOUBLE;
+ }
+ else if (ob instanceof Date) {
+ return Types.DATE;
+ }
+ else if (ob instanceof Time) {
+ return Types.TIME;
+ }
+ else if (ob instanceof Timestamp) {
+ return Types.TIMESTAMP;
+ }
+ else if (ob instanceof String) {
+ return Types.VARCHAR;
+ }
+ else {
+ return Types.VARCHAR;
+ }
+ }
+
+ public void setField(String field,int ftype) {
+ map.put(field, ftype);
+ }
+
+ public HashMap getFields() {
+ return this.map;
+ }
+
+ public boolean getType() {
+ return this.type;
+ }
+
+ public DBCursor getCursor() {
+ return this.cursor;
+ }
+
+ public DBCursor setCursor(DBCursor cursor) {
+ return this.cursor=cursor;
+ }
+
+}
diff --git a/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaPreparedStatement.java b/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaPreparedStatement.java
index 0b0ee4098..25f5a4f93 100644
--- a/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaPreparedStatement.java
+++ b/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaPreparedStatement.java
@@ -1,408 +1,409 @@
-package io.mycat.backend.jdbc.sequoiadb;
-
-import java.io.InputStream;
-import java.io.Reader;
-import java.math.BigDecimal;
-import java.net.URL;
-import java.sql.Array;
-import java.sql.Blob;
-import java.sql.Clob;
-import java.sql.Date;
-import java.sql.NClob;
-import java.sql.ParameterMetaData;
-import java.sql.PreparedStatement;
-import java.sql.Ref;
-import java.sql.ResultSet;
-import java.sql.ResultSetMetaData;
-import java.sql.RowId;
-import java.sql.SQLException;
-import java.sql.SQLXML;
-import java.sql.Time;
-import java.sql.Timestamp;
-import java.util.ArrayList;
-import java.util.Calendar;
-import java.util.List;
-/**
- * 功能详细描述
- * @author sohudo[http://blog.csdn.net/wind520]
- * @create 2014年12月19日 下午6:50:23
- * @version 0.0.1
- */
-public class SequoiaPreparedStatement extends SequoiaStatement implements
- PreparedStatement {
- final String _sql;
- final SequoiaSQLParser _mongosql;
- List _params = new ArrayList();
-
- public SequoiaPreparedStatement(SequoiaConnection conn, int type,
- int concurrency, int holdability, String sql)
- throws SequoiaSQLException {
- super(conn, type, concurrency, holdability);
- this._sql = sql;
- this._mongosql = new SequoiaSQLParser(conn.getDB(), sql);
- }
-
- @Override
- public ResultSet executeQuery() throws SQLException {
-
- return null;
- }
-
- @Override
- public int executeUpdate() throws SQLException {
-
- this._mongosql.setParams(this._params);
- return this._mongosql.executeUpdate();
- }
-
- public void setValue(int idx, Object o) {
- while (this._params.size() <= idx)
- this._params.add(null);
- this._params.set(idx, o);
- }
-
- @Override
- public void setNull(int parameterIndex, int sqlType) throws SQLException {
-
-
- }
-
- @Override
- public void setBoolean(int parameterIndex, boolean x) throws SQLException {
-
- setValue(parameterIndex, Boolean.valueOf(x));
- }
-
- @Override
- public void setByte(int parameterIndex, byte x) throws SQLException {
-
- setValue(parameterIndex, Byte.valueOf(x));
- }
-
- @Override
- public void setShort(int parameterIndex, short x) throws SQLException {
-
- setValue(parameterIndex, Short.valueOf(x));
- }
-
- @Override
- public void setInt(int parameterIndex, int x) throws SQLException {
-
- setValue(parameterIndex, Integer.valueOf(x));
- }
-
- @Override
- public void setLong(int parameterIndex, long x) throws SQLException {
-
- setValue(parameterIndex, Long.valueOf(x));
- }
-
- @Override
- public void setFloat(int parameterIndex, float x) throws SQLException {
-
- setValue(parameterIndex, Float.valueOf(x));
- }
-
- @Override
- public void setDouble(int parameterIndex, double x) throws SQLException {
-
- setValue(parameterIndex, Double.valueOf(x));
- }
-
- @Override
- public void setBigDecimal(int parameterIndex, BigDecimal x)
- throws SQLException {
-
- setValue(parameterIndex, x);
- }
-
- @Override
- public void setString(int parameterIndex, String x) throws SQLException {
-
- setValue(parameterIndex, x);
- }
-
- @Override
- public void setBytes(int parameterIndex, byte[] x) throws SQLException {
-
- setValue(parameterIndex, x);
- }
-
- @Override
- public void setDate(int parameterIndex, Date x) throws SQLException {
-
- setValue(parameterIndex, x);
- }
-
- @Override
- public void setTime(int parameterIndex, Time x) throws SQLException {
-
- setValue(parameterIndex, x);
- }
-
- @Override
- public void setTimestamp(int parameterIndex, Timestamp x)
- throws SQLException {
-
- setValue(parameterIndex, x);
- }
-
- @Override
- public void setAsciiStream(int parameterIndex, InputStream x, int length)
- throws SQLException {
-
-
- }
-
- @Override
- public void setUnicodeStream(int parameterIndex, InputStream x, int length)
- throws SQLException {
-
-
- }
-
- @Override
- public void setBinaryStream(int parameterIndex, InputStream x, int length)
- throws SQLException {
-
-
- }
-
- @Override
- public void clearParameters() throws SQLException {
-
-
- }
-
- @Override
- public void setObject(int parameterIndex, Object x, int targetSqlType)
- throws SQLException {
-
-
- }
-
- @Override
- public void setObject(int parameterIndex, Object x) throws SQLException {
-
- setValue(parameterIndex,x);
- }
-
- @Override
- public boolean execute() throws SQLException {
-
- return false;
- }
-
- @Override
- public void addBatch() throws SQLException {
-
-
- }
-
- @Override
- public void setCharacterStream(int parameterIndex, Reader reader, int length)
- throws SQLException {
-
-
- }
-
- @Override
- public void setRef(int parameterIndex, Ref x) throws SQLException {
-
-
- }
-
- @Override
- public void setBlob(int parameterIndex, Blob x) throws SQLException {
-
-
- }
-
- @Override
- public void setClob(int parameterIndex, Clob x) throws SQLException {
-
-
- }
-
- @Override
- public void setArray(int parameterIndex, Array x) throws SQLException {
-
-
- }
-
- @Override
- public ResultSetMetaData getMetaData() throws SQLException {
-
- return null;
- }
-
- @Override
- public void setDate(int parameterIndex, Date x, Calendar cal)
- throws SQLException {
-
-
- }
-
- @Override
- public void setTime(int parameterIndex, Time x, Calendar cal)
- throws SQLException {
-
-
- }
-
- @Override
- public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal)
- throws SQLException {
-
-
- }
-
- @Override
- public void setNull(int parameterIndex, int sqlType, String typeName)
- throws SQLException {
-
-
- }
-
- @Override
- public void setURL(int parameterIndex, URL x) throws SQLException {
-
-
- }
-
- @Override
- public ParameterMetaData getParameterMetaData() throws SQLException {
-
- return null;
- }
-
- @Override
- public void setRowId(int parameterIndex, RowId x) throws SQLException {
-
-
- }
-
- @Override
- public void setNString(int parameterIndex, String value)
- throws SQLException {
-
-
- }
-
- @Override
- public void setNCharacterStream(int parameterIndex, Reader value,
- long length) throws SQLException {
-
-
- }
-
- @Override
- public void setNClob(int parameterIndex, NClob value) throws SQLException {
-
-
- }
-
- @Override
- public void setClob(int parameterIndex, Reader reader, long length)
- throws SQLException {
-
-
- }
-
- @Override
- public void setBlob(int parameterIndex, InputStream inputStream, long length)
- throws SQLException {
-
-
- }
-
- @Override
- public void setNClob(int parameterIndex, Reader reader, long length)
- throws SQLException {
-
-
- }
-
- @Override
- public void setSQLXML(int parameterIndex, SQLXML xmlObject)
- throws SQLException {
-
-
- }
-
- @Override
- public void setObject(int parameterIndex, Object x, int targetSqlType,
- int scaleOrLength) throws SQLException {
-
-
- }
-
- @Override
- public void setAsciiStream(int parameterIndex, InputStream x, long length)
- throws SQLException {
-
-
- }
-
- @Override
- public void setBinaryStream(int parameterIndex, InputStream x, long length)
- throws SQLException {
-
-
- }
-
- @Override
- public void setCharacterStream(int parameterIndex, Reader reader,
- long length) throws SQLException {
-
-
- }
-
- @Override
- public void setAsciiStream(int parameterIndex, InputStream x)
- throws SQLException {
-
-
- }
-
- @Override
- public void setBinaryStream(int parameterIndex, InputStream x)
- throws SQLException {
-
-
- }
-
- @Override
- public void setCharacterStream(int parameterIndex, Reader reader)
- throws SQLException {
-
-
- }
-
- @Override
- public void setNCharacterStream(int parameterIndex, Reader value)
- throws SQLException {
-
-
- }
-
- @Override
- public void setClob(int parameterIndex, Reader reader) throws SQLException {
-
-
- }
-
- @Override
- public void setBlob(int parameterIndex, InputStream inputStream)
- throws SQLException {
-
-
- }
-
- @Override
- public void setNClob(int parameterIndex, Reader reader) throws SQLException {
-
-
- }
-
-}
+package io.mycat.backend.jdbc.sequoiadb;
+
+import java.io.InputStream;
+import java.io.Reader;
+import java.math.BigDecimal;
+import java.net.URL;
+import java.sql.Array;
+import java.sql.Blob;
+import java.sql.Clob;
+import java.sql.Date;
+import java.sql.NClob;
+import java.sql.ParameterMetaData;
+import java.sql.PreparedStatement;
+import java.sql.Ref;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.RowId;
+import java.sql.SQLException;
+import java.sql.SQLXML;
+import java.sql.Time;
+import java.sql.Timestamp;
+import java.util.ArrayList;
+import java.util.Calendar;
+import java.util.List;
+/**
+ * 功能详细描述
+ * @author sohudo[http://blog.csdn.net/wind520]
+ * @create 2014年12月19日 下午6:50:23
+ * @version 0.0.1
+ */
+public class SequoiaPreparedStatement extends SequoiaStatement implements
+ PreparedStatement {
+ final String _sql;
+ final SequoiaSQLParser _mongosql;
+ List _params = new ArrayList();
+
+ public SequoiaPreparedStatement(SequoiaConnection conn, int type,
+ int concurrency, int holdability, String sql)
+ throws SequoiaSQLException {
+ super(conn, type, concurrency, holdability);
+ this._sql = sql;
+ this._mongosql = new SequoiaSQLParser(conn.getDB(), sql);
+ }
+
+ @Override
+ public ResultSet executeQuery() throws SQLException {
+
+ return null;
+ }
+
+ @Override
+ public int executeUpdate() throws SQLException {
+
+ this._mongosql.setParams(this._params);
+ return this._mongosql.executeUpdate();
+ }
+
+ public void setValue(int idx, Object o) {
+ while (this._params.size() <= idx) {
+ this._params.add(null);
+ }
+ this._params.set(idx, o);
+ }
+
+ @Override
+ public void setNull(int parameterIndex, int sqlType) throws SQLException {
+
+
+ }
+
+ @Override
+ public void setBoolean(int parameterIndex, boolean x) throws SQLException {
+
+ setValue(parameterIndex, Boolean.valueOf(x));
+ }
+
+ @Override
+ public void setByte(int parameterIndex, byte x) throws SQLException {
+
+ setValue(parameterIndex, Byte.valueOf(x));
+ }
+
+ @Override
+ public void setShort(int parameterIndex, short x) throws SQLException {
+
+ setValue(parameterIndex, Short.valueOf(x));
+ }
+
+ @Override
+ public void setInt(int parameterIndex, int x) throws SQLException {
+
+ setValue(parameterIndex, Integer.valueOf(x));
+ }
+
+ @Override
+ public void setLong(int parameterIndex, long x) throws SQLException {
+
+ setValue(parameterIndex, Long.valueOf(x));
+ }
+
+ @Override
+ public void setFloat(int parameterIndex, float x) throws SQLException {
+
+ setValue(parameterIndex, Float.valueOf(x));
+ }
+
+ @Override
+ public void setDouble(int parameterIndex, double x) throws SQLException {
+
+ setValue(parameterIndex, Double.valueOf(x));
+ }
+
+ @Override
+ public void setBigDecimal(int parameterIndex, BigDecimal x)
+ throws SQLException {
+
+ setValue(parameterIndex, x);
+ }
+
+ @Override
+ public void setString(int parameterIndex, String x) throws SQLException {
+
+ setValue(parameterIndex, x);
+ }
+
+ @Override
+ public void setBytes(int parameterIndex, byte[] x) throws SQLException {
+
+ setValue(parameterIndex, x);
+ }
+
+ @Override
+ public void setDate(int parameterIndex, Date x) throws SQLException {
+
+ setValue(parameterIndex, x);
+ }
+
+ @Override
+ public void setTime(int parameterIndex, Time x) throws SQLException {
+
+ setValue(parameterIndex, x);
+ }
+
+ @Override
+ public void setTimestamp(int parameterIndex, Timestamp x)
+ throws SQLException {
+
+ setValue(parameterIndex, x);
+ }
+
+ @Override
+ public void setAsciiStream(int parameterIndex, InputStream x, int length)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setUnicodeStream(int parameterIndex, InputStream x, int length)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setBinaryStream(int parameterIndex, InputStream x, int length)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void clearParameters() throws SQLException {
+
+
+ }
+
+ @Override
+ public void setObject(int parameterIndex, Object x, int targetSqlType)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setObject(int parameterIndex, Object x) throws SQLException {
+
+ setValue(parameterIndex,x);
+ }
+
+ @Override
+ public boolean execute() throws SQLException {
+
+ return false;
+ }
+
+ @Override
+ public void addBatch() throws SQLException {
+
+
+ }
+
+ @Override
+ public void setCharacterStream(int parameterIndex, Reader reader, int length)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setRef(int parameterIndex, Ref x) throws SQLException {
+
+
+ }
+
+ @Override
+ public void setBlob(int parameterIndex, Blob x) throws SQLException {
+
+
+ }
+
+ @Override
+ public void setClob(int parameterIndex, Clob x) throws SQLException {
+
+
+ }
+
+ @Override
+ public void setArray(int parameterIndex, Array x) throws SQLException {
+
+
+ }
+
+ @Override
+ public ResultSetMetaData getMetaData() throws SQLException {
+
+ return null;
+ }
+
+ @Override
+ public void setDate(int parameterIndex, Date x, Calendar cal)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setTime(int parameterIndex, Time x, Calendar cal)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setNull(int parameterIndex, int sqlType, String typeName)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setURL(int parameterIndex, URL x) throws SQLException {
+
+
+ }
+
+ @Override
+ public ParameterMetaData getParameterMetaData() throws SQLException {
+
+ return null;
+ }
+
+ @Override
+ public void setRowId(int parameterIndex, RowId x) throws SQLException {
+
+
+ }
+
+ @Override
+ public void setNString(int parameterIndex, String value)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setNCharacterStream(int parameterIndex, Reader value,
+ long length) throws SQLException {
+
+
+ }
+
+ @Override
+ public void setNClob(int parameterIndex, NClob value) throws SQLException {
+
+
+ }
+
+ @Override
+ public void setClob(int parameterIndex, Reader reader, long length)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setBlob(int parameterIndex, InputStream inputStream, long length)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setNClob(int parameterIndex, Reader reader, long length)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setSQLXML(int parameterIndex, SQLXML xmlObject)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setObject(int parameterIndex, Object x, int targetSqlType,
+ int scaleOrLength) throws SQLException {
+
+
+ }
+
+ @Override
+ public void setAsciiStream(int parameterIndex, InputStream x, long length)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setBinaryStream(int parameterIndex, InputStream x, long length)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setCharacterStream(int parameterIndex, Reader reader,
+ long length) throws SQLException {
+
+
+ }
+
+ @Override
+ public void setAsciiStream(int parameterIndex, InputStream x)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setBinaryStream(int parameterIndex, InputStream x)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setCharacterStream(int parameterIndex, Reader reader)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setNCharacterStream(int parameterIndex, Reader value)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setClob(int parameterIndex, Reader reader) throws SQLException {
+
+
+ }
+
+ @Override
+ public void setBlob(int parameterIndex, InputStream inputStream)
+ throws SQLException {
+
+
+ }
+
+ @Override
+ public void setNClob(int parameterIndex, Reader reader) throws SQLException {
+
+
+ }
+
+}
diff --git a/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaResultSet.java b/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaResultSet.java
index 6501e52a5..8cda05568 100644
--- a/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaResultSet.java
+++ b/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaResultSet.java
@@ -1,6 +1,10 @@
package io.mycat.backend.jdbc.sequoiadb;
+import com.sequoiadb.base.DBCursor;
+import org.bson.BSONObject;
+import org.bson.types.BasicBSONList;
+
import java.io.InputStream;
import java.io.Reader;
import java.math.BigDecimal;
@@ -27,11 +31,6 @@
//import java.util.HashMap;
import java.util.Map;
import java.util.Set;
-
-import org.bson.BSONObject;
-import org.bson.types.BasicBSONList;
-
-import com.sequoiadb.base.DBCursor;
/**
* 功能详细描述
* @author sohudo[http://blog.csdn.net/wind520]
@@ -103,7 +102,9 @@ public void SetFieldType(boolean isid) throws SQLException {
if (isid) {
fieldtype= new int[Types.VARCHAR];
}
- else fieldtype= new int[this.select.length];
+ else {
+ fieldtype = new int[this.select.length];
+ }
if (_cur!=null) {
for (int i=0;i keySet,String schema) {
- super();
- this.keySet = new String[keySet.size()];
- this.keySet = keySet.toArray(this.keySet);
- this._schema = schema;
- }
- */
- public SequoiaResultSetMetaData(String[] select,int [] ftype,String schema,String table) {
- super();
- this.keySet = select;
- this.keytype=ftype;
- this._schema = schema;
- this._table =table;
- }
-
- @Override
- public T unwrap(Class iface) throws SQLException {
-
- return null;
- }
-
- @Override
- public boolean isWrapperFor(Class> iface) throws SQLException {
-
- return false;
- }
-
- @Override
- public int getColumnCount() throws SQLException {
- if (keySet==null) return 0;
- else
- return keySet.length;
- }
-
- @Override
- public boolean isAutoIncrement(int column) throws SQLException {
- // 是否为自动编号的字段
- return false;
- }
-
- @Override
- public boolean isCaseSensitive(int column) throws SQLException {
- //指示列的大小写是否有关系
- return true;
- }
-
- @Override
- public boolean isSearchable(int column) throws SQLException {
- //指示是否可以在 where 子句中使用指定的列
- return true;
- }
-
- @Override
- public boolean isCurrency(int column) throws SQLException {
- // 指示指定的列是否是一个哈希代码值
- return false;
- }
-
- @Override
- public int isNullable(int column) throws SQLException {
- // 指示指定列中的值是否可以为 null。
- return 0;
- }
-
- @Override
- public boolean isSigned(int column) throws SQLException {
- // 指示指定列中的值是否带正负号
- return false;
- }
-
- @Override
- public int getColumnDisplaySize(int column) throws SQLException {
-
- return 50;
- }
-
- @Override
- public String getColumnLabel(int column) throws SQLException {
- return keySet[column-1];
- }
-
- @Override
- public String getColumnName(int column) throws SQLException {
- return keySet[column-1];
- }
-
- @Override
- public String getSchemaName(int column) throws SQLException {
-
- return this._schema;
- }
-
- @Override
- public int getPrecision(int column) throws SQLException {
- //获取指定列的指定列宽
- return 0;
- }
-
- @Override
- public int getScale(int column) throws SQLException {
- // 检索指定参数的小数点右边的位数。
- return 0;
- }
-
- @Override
- public String getTableName(int column) throws SQLException {
-
- return this._table;
- }
-
- @Override
- public String getCatalogName(int column) throws SQLException {
-
- return this._schema;
- }
-
- @Override
- public int getColumnType(int column) throws SQLException {
- // 字段类型
- return keytype[column-1];//Types.VARCHAR;
- }
-
- @Override
- public String getColumnTypeName(int column) throws SQLException {
- // 数据库特定的类型名称
- switch (keytype[column-1]){
- case Types.INTEGER: return "INTEGER";
- case Types.BOOLEAN: return "BOOLEAN";
- case Types.BIT: return "BITT";
- case Types.FLOAT: return "FLOAT";
- case Types.BIGINT: return "BIGINT";
- case Types.DOUBLE: return "DOUBLE";
- case Types.DATE: return "DATE";
- case Types.TIME: return "TIME";
- case Types.TIMESTAMP: return "TIMESTAMP";
- default: return "varchar";
- }
- }
-
- @Override
- public boolean isReadOnly(int column) throws SQLException {
- //指示指定的列是否明确不可写入
- return false;
- }
-
- @Override
- public boolean isWritable(int column) throws SQLException {
-
- return false;
- }
-
- @Override
- public boolean isDefinitelyWritable(int column) throws SQLException {
-
- return false;
- }
-
- @Override
- public String getColumnClassName(int column) throws SQLException {
- // 如果调用方法 ResultSet.getObject 从列中获取值,则返回构造其实例的 Java 类的完全限定名称
- return "Object";
- }
-
-}
+package io.mycat.backend.jdbc.sequoiadb;
+
+import java.sql.ResultSetMetaData;
+import java.sql.SQLException;
+import java.sql.Types;
+
+/**
+ * 功能详细描述
+ * @author sohudo[http://blog.csdn.net/wind520]
+ * @create 2014年12月19日 下午6:50:23
+ * @version 0.0.1
+ */
+
+public class SequoiaResultSetMetaData implements ResultSetMetaData {
+
+ private String[] keySet ;
+ private int[] keytype ;
+ private String _schema;
+ private String _table;
+
+ /*
+ public MongoResultSetMetaData(Set keySet,String schema) {
+ super();
+ this.keySet = new String[keySet.size()];
+ this.keySet = keySet.toArray(this.keySet);
+ this._schema = schema;
+ }
+ */
+ public SequoiaResultSetMetaData(String[] select,int [] ftype,String schema,String table) {
+ super();
+ this.keySet = select;
+ this.keytype=ftype;
+ this._schema = schema;
+ this._table =table;
+ }
+
+ @Override
+ public T unwrap(Class iface) throws SQLException {
+
+ return null;
+ }
+
+ @Override
+ public boolean isWrapperFor(Class> iface) throws SQLException {
+
+ return false;
+ }
+
+ @Override
+ public int getColumnCount() throws SQLException {
+ if (keySet==null) {
+ return 0;
+ }
+ else {
+ return keySet.length;
+ }
+ }
+
+ @Override
+ public boolean isAutoIncrement(int column) throws SQLException {
+ // 是否为自动编号的字段
+ return false;
+ }
+
+ @Override
+ public boolean isCaseSensitive(int column) throws SQLException {
+ //指示列的大小写是否有关系
+ return true;
+ }
+
+ @Override
+ public boolean isSearchable(int column) throws SQLException {
+ //指示是否可以在 where 子句中使用指定的列
+ return true;
+ }
+
+ @Override
+ public boolean isCurrency(int column) throws SQLException {
+ // 指示指定的列是否是一个哈希代码值
+ return false;
+ }
+
+ @Override
+ public int isNullable(int column) throws SQLException {
+ // 指示指定列中的值是否可以为 null。
+ return 0;
+ }
+
+ @Override
+ public boolean isSigned(int column) throws SQLException {
+ // 指示指定列中的值是否带正负号
+ return false;
+ }
+
+ @Override
+ public int getColumnDisplaySize(int column) throws SQLException {
+
+ return 50;
+ }
+
+ @Override
+ public String getColumnLabel(int column) throws SQLException {
+ return keySet[column-1];
+ }
+
+ @Override
+ public String getColumnName(int column) throws SQLException {
+ return keySet[column-1];
+ }
+
+ @Override
+ public String getSchemaName(int column) throws SQLException {
+
+ return this._schema;
+ }
+
+ @Override
+ public int getPrecision(int column) throws SQLException {
+ //获取指定列的指定列宽
+ return 0;
+ }
+
+ @Override
+ public int getScale(int column) throws SQLException {
+ // 检索指定参数的小数点右边的位数。
+ return 0;
+ }
+
+ @Override
+ public String getTableName(int column) throws SQLException {
+
+ return this._table;
+ }
+
+ @Override
+ public String getCatalogName(int column) throws SQLException {
+
+ return this._schema;
+ }
+
+ @Override
+ public int getColumnType(int column) throws SQLException {
+ // 字段类型
+ return keytype[column-1];//Types.VARCHAR;
+ }
+
+ @Override
+ public String getColumnTypeName(int column) throws SQLException {
+ // 数据库特定的类型名称
+ switch (keytype[column-1]){
+ case Types.INTEGER: return "INTEGER";
+ case Types.BOOLEAN: return "BOOLEAN";
+ case Types.BIT: return "BITT";
+ case Types.FLOAT: return "FLOAT";
+ case Types.BIGINT: return "BIGINT";
+ case Types.DOUBLE: return "DOUBLE";
+ case Types.DATE: return "DATE";
+ case Types.TIME: return "TIME";
+ case Types.TIMESTAMP: return "TIMESTAMP";
+ default: return "varchar";
+ }
+ }
+
+ @Override
+ public boolean isReadOnly(int column) throws SQLException {
+ //指示指定的列是否明确不可写入
+ return false;
+ }
+
+ @Override
+ public boolean isWritable(int column) throws SQLException {
+
+ return false;
+ }
+
+ @Override
+ public boolean isDefinitelyWritable(int column) throws SQLException {
+
+ return false;
+ }
+
+ @Override
+ public String getColumnClassName(int column) throws SQLException {
+ // 如果调用方法 ResultSet.getObject 从列中获取值,则返回构造其实例的 Java 类的完全限定名称
+ return "Object";
+ }
+
+}
diff --git a/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaSQLParser.java b/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaSQLParser.java
index c430ac572..17b11c442 100644
--- a/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaSQLParser.java
+++ b/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaSQLParser.java
@@ -1,440 +1,451 @@
-package io.mycat.backend.jdbc.sequoiadb;
-
-
-
-import java.sql.Types;
-import java.util.List;
-
-import org.bson.BSONObject;
-import org.bson.BasicBSONObject;
-import org.bson.types.BasicBSONList;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.alibaba.druid.sql.ast.SQLExpr;
-import com.alibaba.druid.sql.ast.SQLOrderBy;
-import com.alibaba.druid.sql.ast.SQLOrderingSpecification;
-import com.alibaba.druid.sql.ast.SQLStatement;
-import com.alibaba.druid.sql.ast.expr.SQLAggregateExpr;
-import com.alibaba.druid.sql.ast.expr.SQLAllColumnExpr;
-import com.alibaba.druid.sql.ast.expr.SQLBinaryOpExpr;
-import com.alibaba.druid.sql.ast.expr.SQLBooleanExpr;
-import com.alibaba.druid.sql.ast.expr.SQLCharExpr;
-import com.alibaba.druid.sql.ast.expr.SQLIntegerExpr;
-import com.alibaba.druid.sql.ast.expr.SQLNullExpr;
-import com.alibaba.druid.sql.ast.expr.SQLNumberExpr;
-import com.alibaba.druid.sql.ast.expr.SQLVariantRefExpr;
-import com.alibaba.druid.sql.ast.statement.SQLCreateTableStatement;
-import com.alibaba.druid.sql.ast.statement.SQLDeleteStatement;
-import com.alibaba.druid.sql.ast.statement.SQLDropTableStatement;
-import com.alibaba.druid.sql.ast.statement.SQLInsertStatement;
-import com.alibaba.druid.sql.ast.statement.SQLSelectGroupByClause;
-import com.alibaba.druid.sql.ast.statement.SQLSelectItem;
-import com.alibaba.druid.sql.ast.statement.SQLSelectOrderByItem;
-import com.alibaba.druid.sql.ast.statement.SQLSelectQuery;
-import com.alibaba.druid.sql.ast.statement.SQLSelectStatement;
-import com.alibaba.druid.sql.ast.statement.SQLTableSource;
-import com.alibaba.druid.sql.ast.statement.SQLUpdateSetItem;
-import com.alibaba.druid.sql.ast.statement.SQLUpdateStatement;
-import com.alibaba.druid.sql.dialect.mysql.ast.expr.MySqlSelectGroupByExpr;
-import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlSelectQueryBlock;
-import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser;
-import com.sequoiadb.base.CollectionSpace;
-import com.sequoiadb.base.DBCollection;
-import com.sequoiadb.base.DBCursor;
-/**
- * 功能详细描述
- * @author sohudo[http://blog.csdn.net/wind520]
- * @create 2014年12月19日 下午6:50:23
- * @version 0.0.1
- */
-public class SequoiaSQLParser {
- private static final Logger LOGGER = LoggerFactory.getLogger(SequoiaSQLParser.class);
- private final CollectionSpace _db;
-// private final String _sql;
- private final SQLStatement statement;
- private List _params;
- private int _pos;
- public SequoiaSQLParser(CollectionSpace db, String sql) throws SequoiaSQLException
- {
- this._db = db;
- // this._sql = sql;
- this.statement = parser(sql);
- }
-
- public SQLStatement parser(String s) throws SequoiaSQLException
- {
- s = s.trim();
- try
- {
- MySqlStatementParser parser = new MySqlStatementParser(s);
- return parser.parseStatement();
- }
- catch (Exception e)
- {
- LOGGER.error("MongoSQLParser.parserError", e);
- }
- throw new SequoiaSQLException.ErrorSQL(s);
- }
-
- public void setParams(List params)
- {
- this._pos = 1;
- this._params = params;
- }
-
- public SequoiaData query() throws SequoiaSQLException{
- if (!(statement instanceof SQLSelectStatement)) {
- //return null;
- throw new IllegalArgumentException("not a query sql statement");
- }
- SequoiaData mongo=new SequoiaData();
- DBCursor c=null;
- SQLSelectStatement selectStmt = (SQLSelectStatement)statement;
- SQLSelectQuery sqlSelectQuery =selectStmt.getSelect().getQuery();
- int icount=0;
- if(sqlSelectQuery instanceof MySqlSelectQueryBlock) {
- MySqlSelectQueryBlock mysqlSelectQuery = (MySqlSelectQueryBlock)selectStmt.getSelect().getQuery();
-
- BasicBSONObject fields = new BasicBSONObject();
- //显示的字段
- for(SQLSelectItem item : mysqlSelectQuery.getSelectList()) {
- //System.out.println(item.toString());
- if (!(item.getExpr() instanceof SQLAllColumnExpr)) {
- if (item.getExpr() instanceof SQLAggregateExpr) {
- SQLAggregateExpr expr =(SQLAggregateExpr)item.getExpr();
- if (expr.getMethodName().equals("COUNT")) {
- icount=1;
- mongo.setField(getExprFieldName(expr), Types.BIGINT);
- }
- fields.put(getExprFieldName(expr), Integer.valueOf(1));
- }
- else {
- fields.put(getFieldName(item), Integer.valueOf(1));
- }
- }
-
- }
-
- //表名
- SQLTableSource table=mysqlSelectQuery.getFrom();
- DBCollection coll =this._db.getCollection(table.toString());
- mongo.setTable(table.toString());
-
- SQLExpr expr=mysqlSelectQuery.getWhere();
- BSONObject query = parserWhere(expr);
- //System.out.println(query);
- SQLSelectGroupByClause groupby=mysqlSelectQuery.getGroupBy();
- BasicBSONObject gbkey = new BasicBSONObject();
- if (groupby!=null) {
- for (SQLExpr gbexpr:groupby.getItems()){
- if (gbexpr instanceof MySqlSelectGroupByExpr) {
- SQLExpr gbyexpr=((MySqlSelectGroupByExpr) gbexpr).getExpr();
- gbkey.put(getFieldName2(gbyexpr), Integer.valueOf(1));
- }
- }
- icount=2;
- }
- int limitoff=0;
- int limitnum=0;
- if (mysqlSelectQuery.getLimit()!=null) {
- limitoff=getSQLExprToInt(mysqlSelectQuery.getLimit().getOffset());
- limitnum=getSQLExprToInt(mysqlSelectQuery.getLimit().getRowCount());
- }
-
- SQLOrderBy orderby=mysqlSelectQuery.getOrderBy();
- BasicBSONObject order = new BasicBSONObject();
- if (orderby != null ){
- for (int i = 0; i < orderby.getItems().size(); i++)
- {
- SQLSelectOrderByItem orderitem = orderby.getItems().get(i);
- order.put(orderitem.getExpr().toString(), Integer.valueOf(getSQLExprToAsc(orderitem.getType())));
- }
- // c.sort(order);
- // System.out.println(order);
- }
-
- if (icount==1) {
- mongo.setCount(coll.getCount(query));
- }
- else if (icount==2){
- BasicBSONObject initial = new BasicBSONObject();
- initial.put("num", 0);
- String reduce="function (obj, prev) { "
- +" prev.num++}";
- //mongo.setGrouyBy(coll.group(gbkey, query, initial, reduce));
- }
- else {
- if ((limitoff>0) || (limitnum>0)) {
- c = coll.query(query, fields, order,null, limitoff, limitnum);//.skip(limitoff).limit(limitnum);
- }
- else {
- c = coll.query(query, fields, order,null, 0, -1);
- }
-
-
- }
- mongo.setCursor(c);
- }
- return mongo;
- }
-
- public int executeUpdate() throws SequoiaSQLException {
- if (statement instanceof SQLInsertStatement) {
- return InsertData((SQLInsertStatement)statement);
- }
- if (statement instanceof SQLUpdateStatement) {
- return UpData((SQLUpdateStatement)statement);
- }
- if (statement instanceof SQLDropTableStatement) {
- return dropTable((SQLDropTableStatement)statement);
- }
- if (statement instanceof SQLDeleteStatement) {
- return DeleteDate((SQLDeleteStatement)statement);
- }
- if (statement instanceof SQLCreateTableStatement) {
- return createTable((SQLCreateTableStatement)statement);
- }
- return 1;
-
- }
- private int InsertData(SQLInsertStatement state) {
- if (state.getValues().getValues().size() ==0 ){
- throw new RuntimeException("number of columns error");
- }
- if (state.getValues().getValues().size() != state.getColumns().size()){
- throw new RuntimeException("number of values and columns have to match");
- }
- SQLTableSource table=state.getTableSource();
- BSONObject o = new BasicBSONObject();
- int i=0;
- for(SQLExpr col : state.getColumns()) {
- o.put(getFieldName2(col), getExpValue(state.getValues().getValues().get(i)));
- i++;
- }
- DBCollection coll =this._db.getCollection(table.toString());
- //coll.insert(new DBObject[] { o });
- coll.insert(o);
- return 1;
- }
- private int UpData(SQLUpdateStatement state) {
- SQLTableSource table=state.getTableSource();
- DBCollection coll =this._db.getCollection(table.toString());
-
- SQLExpr expr=state.getWhere();
- BSONObject query = parserWhere(expr);
-
- BasicBSONObject set = new BasicBSONObject();
- for(SQLUpdateSetItem col : state.getItems()){
- set.put(getFieldName2(col.getColumn()), getExpValue(col.getValue()));
- }
- BSONObject mod = new BasicBSONObject("$set", set);
- //coll.updateMulti(query, mod);
- coll.update(query, mod, null);
- //System.out.println("changs count:"+coll.getStats().size());
- return 1;
- }
- private int DeleteDate(SQLDeleteStatement state) {
- SQLTableSource table=state.getTableSource();
- DBCollection coll =this._db.getCollection(table.toString());
-
- SQLExpr expr=state.getWhere();
- if (expr==null) {
- throw new RuntimeException("not where of sql");
- }
- BSONObject query = parserWhere(expr);
-
- //coll.remove(query);
- coll.delete(query);
- return 1;
-
- }
- private int dropTable(SQLDropTableStatement state) {
- for (SQLTableSource table : state.getTableSources()){
- //DBCollection coll =this._db.getCollection(table.toString());
- //coll.drop();
- this._db.dropCollection(table.toString());
- }
- return 1;
-
- }
-
- private int createTable(SQLCreateTableStatement state) {
- //for (SQLTableSource table : state.getTableSource()){
- if (!this._db.isCollectionExist(state.getTableSource().toString()))
- this._db.createCollection(state.getTableSource().toString());
- return 1;
- }
-
- private int getSQLExprToInt(SQLExpr expr){
- if (expr instanceof SQLIntegerExpr){
- return ((SQLIntegerExpr)expr).getNumber().intValue();
- }
- return 0;
- }
- private int getSQLExprToAsc(SQLOrderingSpecification ASC){
- if (ASC==null ) return 1;
- if (ASC==SQLOrderingSpecification.DESC){
- return -1;
- }
- else {
- return 1;
- }
- }
- public String remove(String resource,char ch)
- {
- StringBuffer buffer=new StringBuffer();
- int position=0;
- char currentChar;
-
- while(position")) op="$gt";
- if (expr.getOperator().getName().equals(">=")) op="$gte";
-
- if (expr.getOperator().getName().equals("!=")) op="$ne";
- if (expr.getOperator().getName().equals("<>")) op="$ne";
- //xo.put(op, getExpValue(expr.getRight()));
- // o.put(exprL.toString(),xo);
- parserDBObject(o,exprL.toString(),op, getExpValue(expr.getRight()));
- }
- }
- }
- private void parserWhere(SQLExpr aexpr,BasicBSONObject o){
- if(aexpr instanceof SQLBinaryOpExpr){
- SQLBinaryOpExpr expr=(SQLBinaryOpExpr)aexpr;
- SQLExpr exprL=expr.getLeft();
- if (!(exprL instanceof SQLBinaryOpExpr))
- {
- //opSQLExpr((SQLBinaryOpExpr)aexpr,o);
- if (expr.getOperator().getName().equals("=")) {
- o.put(exprL.toString(), getExpValue(expr.getRight()));
- }
- else {
- String op="";
- if (expr.getOperator().getName().equals("<")) op="$lt";
- if (expr.getOperator().getName().equals("<=")) op="$lte";
- if (expr.getOperator().getName().equals(">")) op="$gt";
- if (expr.getOperator().getName().equals(">=")) op="$gte";
-
- if (expr.getOperator().getName().equals("!=")) op="$ne";
- if (expr.getOperator().getName().equals("<>")) op="$ne";
-
- parserDBObject(o,exprL.toString(),op, getExpValue(expr.getRight()));
- }
-
- }
- else {
- if (expr.getOperator().getName().equals("AND")) {
- parserWhere(exprL,o);
- parserWhere(expr.getRight(),o);
- }
- else if (expr.getOperator().getName().equals("OR")) {
- orWhere(exprL,expr.getRight(),o);
- }
- else {
- throw new RuntimeException("Can't identify the operation of of where");
- }
- }
- }
-
- }
-
-
- private void orWhere(SQLExpr exprL,SQLExpr exprR ,BasicBSONObject ob){
- BasicBSONObject xo = new BasicBSONObject();
- BasicBSONObject yo = new BasicBSONObject();
- parserWhere(exprL,xo);
- parserWhere(exprR,yo);
- ob.put("$or",new Object[]{xo,yo});
- }
-}
+package io.mycat.backend.jdbc.sequoiadb;
+
+
+
+import java.sql.Types;
+import java.util.List;
+
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.sequoiadb.base.CollectionSpace;
+import com.sequoiadb.base.DBCollection;
+import com.sequoiadb.base.DBCursor;
+
+import org.bson.BSONObject;
+import org.bson.BasicBSONObject;
+import org.bson.types.BasicBSONList;
+
+import com.alibaba.druid.sql.ast.SQLExpr;
+import com.alibaba.druid.sql.ast.SQLOrderingSpecification;
+import com.alibaba.druid.sql.ast.SQLStatement;
+
+import com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlSelectQueryBlock;
+import com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser;
+import com.alibaba.druid.sql.ast.statement.*;
+import com.alibaba.druid.sql.ast.expr.*;
+import com.alibaba.druid.sql.ast.*;
+/**
+ * 功能详细描述
+ * @author sohudo[http://blog.csdn.net/wind520]
+ * @create 2014年12月19日 下午6:50:23
+ * @version 0.0.1
+ */
+public class SequoiaSQLParser {
+ private static final Logger LOGGER = LoggerFactory.getLogger(SequoiaSQLParser.class);
+ private final CollectionSpace _db;
+// private final String _sql;
+ private final SQLStatement statement;
+ private List _params;
+ private int _pos;
+ public SequoiaSQLParser(CollectionSpace db, String sql) throws SequoiaSQLException
+ {
+ this._db = db;
+ // this._sql = sql;
+ this.statement = parser(sql);
+ }
+
+ public SQLStatement parser(String s) throws SequoiaSQLException
+ {
+ s = s.trim();
+ try
+ {
+ MySqlStatementParser parser = new MySqlStatementParser(s);
+ return parser.parseStatement();
+ }
+ catch (Exception e)
+ {
+ LOGGER.error("MongoSQLParser.parserError", e);
+ }
+ throw new SequoiaSQLException.ErrorSQL(s);
+ }
+
+ public void setParams(List params)
+ {
+ this._pos = 1;
+ this._params = params;
+ }
+
+ public SequoiaData query() throws SequoiaSQLException{
+ if (!(statement instanceof SQLSelectStatement)) {
+ //return null;
+ throw new IllegalArgumentException("not a query sql statement");
+ }
+ SequoiaData mongo=new SequoiaData();
+ DBCursor c=null;
+ SQLSelectStatement selectStmt = (SQLSelectStatement)statement;
+ SQLSelectQuery sqlSelectQuery =selectStmt.getSelect().getQuery();
+ int icount=0;
+ if(sqlSelectQuery instanceof MySqlSelectQueryBlock) {
+ MySqlSelectQueryBlock mysqlSelectQuery = (MySqlSelectQueryBlock)selectStmt.getSelect().getQuery();
+
+ BasicBSONObject fields = new BasicBSONObject();
+ //显示的字段
+ for(SQLSelectItem item : mysqlSelectQuery.getSelectList()) {
+ //System.out.println(item.toString());
+ if (!(item.getExpr() instanceof SQLAllColumnExpr)) {
+ if (item.getExpr() instanceof SQLAggregateExpr) {
+ SQLAggregateExpr expr =(SQLAggregateExpr)item.getExpr();
+ if (expr.getMethodName().equals("COUNT")) {
+ icount=1;
+ mongo.setField(getExprFieldName(expr), Types.BIGINT);
+ }
+ fields.put(getExprFieldName(expr), Integer.valueOf(1));
+ }
+ else {
+ fields.put(getFieldName(item), Integer.valueOf(1));
+ }
+ }
+
+ }
+
+ //表名
+ SQLTableSource table=mysqlSelectQuery.getFrom();
+ DBCollection coll =this._db.getCollection(table.toString());
+ mongo.setTable(table.toString());
+
+ SQLExpr expr=mysqlSelectQuery.getWhere();
+ BSONObject query = parserWhere(expr);
+ //System.out.println(query);
+ SQLSelectGroupByClause groupby=mysqlSelectQuery.getGroupBy();
+ BasicBSONObject gbkey = new BasicBSONObject();
+ if (groupby!=null) {
+ for (SQLExpr gbexpr:groupby.getItems()){
+ if (gbexpr instanceof SQLIdentifierExpr) {
+ String name =((SQLIdentifierExpr) gbexpr).getName();
+ gbkey.put(name, Integer.valueOf(1));
+ }
+ }
+ icount=2;
+ }
+ int limitoff=0;
+ int limitnum=0;
+ if (mysqlSelectQuery.getLimit()!=null) {
+ limitoff=getSQLExprToInt(mysqlSelectQuery.getLimit().getOffset());
+ limitnum=getSQLExprToInt(mysqlSelectQuery.getLimit().getRowCount());
+ }
+
+ SQLOrderBy orderby=mysqlSelectQuery.getOrderBy();
+ BasicBSONObject order = new BasicBSONObject();
+ if (orderby != null ){
+ for (int i = 0; i < orderby.getItems().size(); i++)
+ {
+ SQLSelectOrderByItem orderitem = orderby.getItems().get(i);
+ order.put(orderitem.getExpr().toString(), Integer.valueOf(getSQLExprToAsc(orderitem.getType())));
+ }
+ // c.sort(order);
+ // System.out.println(order);
+ }
+
+ if (icount==1) {
+ mongo.setCount(coll.getCount(query));
+ }
+ else if (icount==2){
+ BasicBSONObject initial = new BasicBSONObject();
+ initial.put("num", 0);
+ String reduce="function (obj, prev) { "
+ +" prev.num++}";
+ //mongo.setGrouyBy(coll.group(gbkey, query, initial, reduce));
+ }
+ else {
+ if ((limitoff>0) || (limitnum>0)) {
+ c = coll.query(query, fields, order,null, limitoff, limitnum);//.skip(limitoff).limit(limitnum);
+ }
+ else {
+ c = coll.query(query, fields, order,null, 0, -1);
+ }
+
+
+ }
+ mongo.setCursor(c);
+ }
+ return mongo;
+ }
+
+ public int executeUpdate() throws SequoiaSQLException {
+ if (statement instanceof SQLInsertStatement) {
+ return InsertData((SQLInsertStatement)statement);
+ }
+ if (statement instanceof SQLUpdateStatement) {
+ return UpData((SQLUpdateStatement)statement);
+ }
+ if (statement instanceof SQLDropTableStatement) {
+ return dropTable((SQLDropTableStatement)statement);
+ }
+ if (statement instanceof SQLDeleteStatement) {
+ return DeleteDate((SQLDeleteStatement)statement);
+ }
+ if (statement instanceof SQLCreateTableStatement) {
+ return createTable((SQLCreateTableStatement)statement);
+ }
+ return 1;
+
+ }
+ private int InsertData(SQLInsertStatement state) {
+ if (state.getValues().getValues().size() ==0 ){
+ throw new RuntimeException("number of columns error");
+ }
+ if (state.getValues().getValues().size() != state.getColumns().size()){
+ throw new RuntimeException("number of values and columns have to match");
+ }
+ SQLTableSource table=state.getTableSource();
+ BSONObject o = new BasicBSONObject();
+ int i=0;
+ for(SQLExpr col : state.getColumns()) {
+ o.put(getFieldName2(col), getExpValue(state.getValues().getValues().get(i)));
+ i++;
+ }
+ DBCollection coll =this._db.getCollection(table.toString());
+ //coll.insert(new DBObject[] { o });
+ coll.insert(o);
+ return 1;
+ }
+ private int UpData(SQLUpdateStatement state) {
+ SQLTableSource table=state.getTableSource();
+ DBCollection coll =this._db.getCollection(table.toString());
+
+ SQLExpr expr=state.getWhere();
+ BSONObject query = parserWhere(expr);
+
+ BasicBSONObject set = new BasicBSONObject();
+ for(SQLUpdateSetItem col : state.getItems()){
+ set.put(getFieldName2(col.getColumn()), getExpValue(col.getValue()));
+ }
+ BSONObject mod = new BasicBSONObject("$set", set);
+ //coll.updateMulti(query, mod);
+ coll.update(query, mod, null);
+ //System.out.println("changs count:"+coll.getStats().size());
+ return 1;
+ }
+ private int DeleteDate(SQLDeleteStatement state) {
+ SQLTableSource table=state.getTableSource();
+ DBCollection coll =this._db.getCollection(table.toString());
+
+ SQLExpr expr=state.getWhere();
+ if (expr==null) {
+ throw new RuntimeException("not where of sql");
+ }
+ BSONObject query = parserWhere(expr);
+
+ //coll.remove(query);
+ coll.delete(query);
+ return 1;
+
+ }
+ private int dropTable(SQLDropTableStatement state) {
+ for (SQLTableSource table : state.getTableSources()){
+ //DBCollection coll =this._db.getCollection(table.toString());
+ //coll.drop();
+ this._db.dropCollection(table.toString());
+ }
+ return 1;
+
+ }
+
+ private int createTable(SQLCreateTableStatement state) {
+ //for (SQLTableSource table : state.getTableSource()){
+ if (!this._db.isCollectionExist(state.getTableSource().toString())) {
+ this._db.createCollection(state.getTableSource().toString());
+ }
+ return 1;
+ }
+
+ private int getSQLExprToInt(SQLExpr expr){
+ if (expr instanceof SQLIntegerExpr){
+ return ((SQLIntegerExpr)expr).getNumber().intValue();
+ }
+ return 0;
+ }
+ private int getSQLExprToAsc(SQLOrderingSpecification ASC){
+ if (ASC==null ) {
+ return 1;
+ }
+ if (ASC==SQLOrderingSpecification.DESC){
+ return -1;
+ }
+ else {
+ return 1;
+ }
+ }
+ public String remove(String resource,char ch)
+ {
+ StringBuffer buffer=new StringBuffer();
+ int position=0;
+ char currentChar;
+
+ while(position")) {
+ op = "$gt";
+ }
+ if (expr.getOperator().getName().equals(">=")) {
+ op = "$gte";
+ }
+ if (expr.getOperator().getName().equals("!=")) {
+ op = "$ne";
+ }
+ if (expr.getOperator().getName().equals("<>")) {
+ op = "$ne";
+ }
+ //xo.put(op, getExpValue(expr.getRight()));
+ // o.put(exprL.toString(),xo);
+ parserDBObject(o,exprL.toString(),op, getExpValue(expr.getRight()));
+ }
+ }
+ }
+ private void parserWhere(SQLExpr aexpr,BasicBSONObject o){
+ if(aexpr instanceof SQLBinaryOpExpr){
+ SQLBinaryOpExpr expr=(SQLBinaryOpExpr)aexpr;
+ SQLExpr exprL=expr.getLeft();
+ if (!(exprL instanceof SQLBinaryOpExpr))
+ {
+ //opSQLExpr((SQLBinaryOpExpr)aexpr,o);
+ if (expr.getOperator().getName().equals("=")) {
+ o.put(exprL.toString(), getExpValue(expr.getRight()));
+ }
+ else {
+ String op="";
+ if (expr.getOperator().getName().equals("<")) {
+ op = "$lt";
+ }
+ if (expr.getOperator().getName().equals("<=")) {
+ op = "$lte";
+ }
+ if (expr.getOperator().getName().equals(">")) {
+ op = "$gt";
+ }
+ if (expr.getOperator().getName().equals(">=")) {
+ op = "$gte";
+ }
+ if (expr.getOperator().getName().equals("!=")) {
+ op = "$ne";
+ }
+ if (expr.getOperator().getName().equals("<>")) {
+ op = "$ne";
+ }
+
+ parserDBObject(o,exprL.toString(),op, getExpValue(expr.getRight()));
+ }
+
+ }
+ else {
+ if (expr.getOperator().getName().equals("AND")) {
+ parserWhere(exprL,o);
+ parserWhere(expr.getRight(),o);
+ }
+ else if (expr.getOperator().getName().equals("OR")) {
+ orWhere(exprL,expr.getRight(),o);
+ }
+ else {
+ throw new RuntimeException("Can't identify the operation of of where");
+ }
+ }
+ }
+
+ }
+
+
+ private void orWhere(SQLExpr exprL,SQLExpr exprR ,BasicBSONObject ob){
+ BasicBSONObject xo = new BasicBSONObject();
+ BasicBSONObject yo = new BasicBSONObject();
+ parserWhere(exprL,xo);
+ parserWhere(exprR,yo);
+ ob.put("$or",new Object[]{xo,yo});
+ }
+}
diff --git a/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaStatement.java b/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaStatement.java
index ba8641c08..e9f61d88d 100644
--- a/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaStatement.java
+++ b/src/main/java/io/mycat/backend/jdbc/sequoiadb/SequoiaStatement.java
@@ -28,12 +28,15 @@ public SequoiaStatement(SequoiaConnection conn, int type, int concurrency, int h
this._concurrency = concurrency;
this._holdability = holdability;
- if (this._type != 0)
- throw new UnsupportedOperationException("type not supported yet");
- if (this._concurrency != 0)
- throw new UnsupportedOperationException("concurrency not supported yet");
- if (this._holdability != 0)
- throw new UnsupportedOperationException("holdability not supported yet");
+ if (this._type != 0) {
+ throw new UnsupportedOperationException("type not supported yet");
+ }
+ if (this._concurrency != 0) {
+ throw new UnsupportedOperationException("concurrency not supported yet");
+ }
+ if (this._holdability != 0) {
+ throw new UnsupportedOperationException("holdability not supported yet");
+ }
}
@Override
@@ -52,12 +55,12 @@ public boolean isWrapperFor(Class> iface) throws SQLException {
public ResultSet executeQuery(String sql) throws SQLException {
SequoiaData mongo= new SequoiaSQLParser(this._conn.getDB(), sql).query();
- if (this._fetchSize > 0) {
- //设置每次网络请求的最大记录数
- if (mongo.getCursor()!=null) {
- //mongo.getCursor().batchSize(this._fetchSize);
- }
- }
+// if (this._fetchSize > 0) {
+// //设置每次网络请求的最大记录数
+// if (mongo.getCursor()!=null) {
+// //mongo.getCursor().batchSize(this._fetchSize);
+// }
+// }
/*
if (this._maxRows > 0)
{
diff --git a/src/main/java/io/mycat/backend/jdbc/sequoiadb/StringUtils.java b/src/main/java/io/mycat/backend/jdbc/sequoiadb/StringUtils.java
index 063117d22..d4d3758f7 100644
--- a/src/main/java/io/mycat/backend/jdbc/sequoiadb/StringUtils.java
+++ b/src/main/java/io/mycat/backend/jdbc/sequoiadb/StringUtils.java
@@ -1,16 +1,16 @@
-package io.mycat.backend.jdbc.sequoiadb;
-
-
-public class StringUtils {
-
-
- public static boolean startsWithIgnoreCase(String searchIn, int startAt,
- String searchFor) {
- return searchIn.regionMatches(true, startAt, searchFor, 0, searchFor
- .length());
- }
-
- public static boolean startsWithIgnoreCase(String searchIn, String searchFor) {
- return startsWithIgnoreCase(searchIn, 0, searchFor);
- }
+package io.mycat.backend.jdbc.sequoiadb;
+
+
+public class StringUtils {
+
+
+ public static boolean startsWithIgnoreCase(String searchIn, int startAt,
+ String searchFor) {
+ return searchIn.regionMatches(true, startAt, searchFor, 0, searchFor
+ .length());
+ }
+
+ public static boolean startsWithIgnoreCase(String searchIn, String searchFor) {
+ return startsWithIgnoreCase(searchIn, 0, searchFor);
+ }
}
\ No newline at end of file
diff --git a/src/main/java/io/mycat/server/packet/util/BindValue.java b/src/main/java/io/mycat/backend/mysql/BindValue.java
similarity index 98%
rename from src/main/java/io/mycat/server/packet/util/BindValue.java
rename to src/main/java/io/mycat/backend/mysql/BindValue.java
index 4d2ec6934..b4c352a3c 100644
--- a/src/main/java/io/mycat/server/packet/util/BindValue.java
+++ b/src/main/java/io/mycat/backend/mysql/BindValue.java
@@ -21,7 +21,7 @@
* https://code.google.com/p/opencloudb/.
*
*/
-package io.mycat.server.packet.util;
+package io.mycat.backend.mysql;
/**
* @author mycat
diff --git a/src/main/java/io/mycat/server/packet/util/BindValueUtil.java b/src/main/java/io/mycat/backend/mysql/BindValueUtil.java
similarity index 92%
rename from src/main/java/io/mycat/server/packet/util/BindValueUtil.java
rename to src/main/java/io/mycat/backend/mysql/BindValueUtil.java
index 51b737aba..bc93a54f1 100644
--- a/src/main/java/io/mycat/server/packet/util/BindValueUtil.java
+++ b/src/main/java/io/mycat/backend/mysql/BindValueUtil.java
@@ -21,13 +21,12 @@
* https://code.google.com/p/opencloudb/.
*
*/
-package io.mycat.server.packet.util;
-
-import io.mycat.server.Fields;
-import io.mycat.server.packet.MySQLMessage;
+package io.mycat.backend.mysql;
import java.io.UnsupportedEncodingException;
+import io.mycat.config.Fields;
+
/**
* @author mycat
*/
@@ -68,9 +67,9 @@ public static final void read(MySQLMessage mm, BindValue bv, String charset) thr
case Fields.FIELD_TYPE_STRING:
case Fields.FIELD_TYPE_VARCHAR:
bv.value = mm.readStringWithLength(charset);
- if (bv.value == null) {
- bv.isNull = true;
- }
+// if (bv.value == null) {
+// bv.isNull = true;
+// }
break;
case Fields.FIELD_TYPE_DECIMAL:
case Fields.FIELD_TYPE_NEW_DECIMAL:
@@ -79,6 +78,9 @@ public static final void read(MySQLMessage mm, BindValue bv, String charset) thr
bv.isNull = true;
}
break;
+ case Fields.FIELD_TYPE_BLOB:
+ bv.isLongData = true;
+ break;
default:
throw new IllegalArgumentException("bindValue error,unsupported type:" + bv.type);
}
diff --git a/src/main/java/io/mycat/server/packet/util/BufferUtil.java b/src/main/java/io/mycat/backend/mysql/BufferUtil.java
similarity index 99%
rename from src/main/java/io/mycat/server/packet/util/BufferUtil.java
rename to src/main/java/io/mycat/backend/mysql/BufferUtil.java
index 69a456350..920cbffaf 100644
--- a/src/main/java/io/mycat/server/packet/util/BufferUtil.java
+++ b/src/main/java/io/mycat/backend/mysql/BufferUtil.java
@@ -21,7 +21,7 @@
* https://code.google.com/p/opencloudb/.
*
*/
-package io.mycat.server.packet.util;
+package io.mycat.backend.mysql;
import java.nio.ByteBuffer;
diff --git a/src/main/java/io/mycat/server/packet/util/ByteUtil.java b/src/main/java/io/mycat/backend/mysql/ByteUtil.java
similarity index 97%
rename from src/main/java/io/mycat/server/packet/util/ByteUtil.java
rename to src/main/java/io/mycat/backend/mysql/ByteUtil.java
index db86b7032..48f445db7 100644
--- a/src/main/java/io/mycat/server/packet/util/ByteUtil.java
+++ b/src/main/java/io/mycat/backend/mysql/ByteUtil.java
@@ -21,9 +21,7 @@
* https://code.google.com/p/opencloudb/.
*
*/
-package io.mycat.server.packet.util;
-
-import io.mycat.server.packet.MySQLMessage;
+package io.mycat.backend.mysql;
/**
* @author mycat
diff --git a/src/main/java/io/mycat/backend/mysql/CharsetUtil.java b/src/main/java/io/mycat/backend/mysql/CharsetUtil.java
new file mode 100644
index 000000000..56df40738
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/CharsetUtil.java
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software;Designed and Developed mainly by many Chinese
+ * opensource volunteers. you can redistribute it and/or modify it under the
+ * terms of the GNU General Public License version 2 only, as published by the
+ * Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Any questions about this component can be directed to it's project Web address
+ * https://code.google.com/p/opencloudb/.
+ *
+ */
+package io.mycat.backend.mysql;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.FileInputStream;
+import java.util.*;
+
+/**
+ * @author mycat
+ */
+public class CharsetUtil {
+ public static final Logger logger = LoggerFactory
+ .getLogger(CharsetUtil.class);
+ private static final Map INDEX_TO_CHARSET = new HashMap<>();
+ private static final Map CHARSET_TO_INDEX = new HashMap<>();
+ static {
+
+ // index_to_charset.properties
+ INDEX_TO_CHARSET.put(1,"big5");
+ INDEX_TO_CHARSET.put(8,"latin1");
+ INDEX_TO_CHARSET.put(9,"latin2");
+ INDEX_TO_CHARSET.put(14,"cp1251");
+ INDEX_TO_CHARSET.put(28,"gbk");
+ INDEX_TO_CHARSET.put(24,"gb2312");
+ INDEX_TO_CHARSET.put(33,"utf8");
+ INDEX_TO_CHARSET.put(45,"utf8mb4");
+
+ String filePath = Thread.currentThread().getContextClassLoader()
+ .getResource("").getPath().replaceAll("%20", " ")
+ + "index_to_charset.properties";
+ Properties prop = new Properties();
+ try {
+ prop.load(new FileInputStream(filePath));
+ for (Object index : prop.keySet()){
+ INDEX_TO_CHARSET.put(Integer.parseInt((String) index), prop.getProperty((String) index));
+ }
+ } catch (Exception e) {
+ logger.error("error:",e);
+ }
+
+ // charset --> index
+ for(Integer key : INDEX_TO_CHARSET.keySet()){
+ String charset = INDEX_TO_CHARSET.get(key);
+ if(charset != null && CHARSET_TO_INDEX.get(charset) == null){
+ CHARSET_TO_INDEX.put(charset, key);
+ }
+ }
+
+ CHARSET_TO_INDEX.put("iso-8859-1", 14);
+ CHARSET_TO_INDEX.put("iso_8859_1", 14);
+ CHARSET_TO_INDEX.put("utf-8", 33);
+ }
+
+ public static final String getCharset(int index) {
+ return INDEX_TO_CHARSET.get(index);
+ }
+
+ public static final int getIndex(String charset) {
+ if (charset == null || charset.length() == 0) {
+ return 0;
+ } else {
+ Integer i = CHARSET_TO_INDEX.get(charset.toLowerCase());
+ return (i == null) ? 0 : i;
+ }
+ }
+
+
+
+}
diff --git a/src/main/java/io/mycat/backend/mysql/DataType.java b/src/main/java/io/mycat/backend/mysql/DataType.java
new file mode 100644
index 000000000..2acc44793
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/DataType.java
@@ -0,0 +1,20 @@
+package io.mycat.backend.mysql;
+
+/**
+ * 定义返回的数据类型
+ * @author huangyiming
+ *
+ */
+public enum DataType {
+
+ STRING("String"),DOUBLE("Double"),FLOAT("Float"),DATE("Date"),INT("Int");
+ private String type;
+ private DataType(String type){
+ this.type = type;
+ }
+ public String getType() {
+ return type;
+ }
+
+
+}
diff --git a/src/main/java/io/mycat/backend/mysql/LoadDataUtil.java b/src/main/java/io/mycat/backend/mysql/LoadDataUtil.java
new file mode 100644
index 000000000..bbe9cd738
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/LoadDataUtil.java
@@ -0,0 +1,106 @@
+package io.mycat.backend.mysql;
+
+import java.io.*;
+import java.util.List;
+
+import io.mycat.MycatServer;
+import io.mycat.backend.BackendConnection;
+import io.mycat.net.BackendAIOConnection;
+import io.mycat.net.mysql.BinaryPacket;
+import io.mycat.net.mysql.CommandPacket;
+import io.mycat.net.mysql.MySQLPacket;
+import io.mycat.route.RouteResultsetNode;
+import io.mycat.sqlengine.mpp.LoadData;
+
+/**
+ * Created by nange on 2015/3/31.
+ */
+public class LoadDataUtil
+{
+ public static void requestFileDataResponse(byte[] data, BackendConnection conn)
+ {
+
+ byte packId= data[3];
+ BackendAIOConnection backendAIOConnection= (BackendAIOConnection) conn;
+ RouteResultsetNode rrn= (RouteResultsetNode) conn.getAttachment();
+ LoadData loadData= rrn.getLoadData();
+ List loadDataData = loadData.getData();
+ try
+ {
+ if(loadDataData !=null&&loadDataData.size()>0)
+ {
+ ByteArrayOutputStream bos = new ByteArrayOutputStream();
+ for (int i = 0, loadDataDataSize = loadDataData.size(); i < loadDataDataSize; i++)
+ {
+ String line = loadDataData.get(i);
+
+
+ String s =(i==loadDataDataSize-1)?line: line + loadData.getLineTerminatedBy();
+ byte[] bytes = s.getBytes(loadData.getCharset());
+ bos.write(bytes);
+
+
+ }
+
+ packId= writeToBackConnection(packId,new ByteArrayInputStream(bos.toByteArray()),backendAIOConnection);
+
+ } else
+ {
+ //从文件读取
+ packId= writeToBackConnection(packId,new BufferedInputStream(new FileInputStream(loadData.getFileName())),backendAIOConnection);
+
+ }
+ }catch (IOException e)
+ {
+
+ throw new RuntimeException(e);
+ } finally
+ {
+ //结束必须发空包
+ byte[] empty = new byte[] { 0, 0, 0,3 };
+ empty[3]=++packId;
+ backendAIOConnection.write(empty);
+ }
+
+
+
+
+ }
+
+ public static byte writeToBackConnection(byte packID,InputStream inputStream,BackendAIOConnection backendAIOConnection) throws IOException
+ {
+ try
+ {
+ int packSize = MycatServer.getInstance().getConfig().getSystem().getBufferPoolChunkSize() - 5;
+ // int packSize = backendAIOConnection.getMaxPacketSize() / 32;
+ // int packSize=65530;
+ byte[] buffer = new byte[packSize];
+ int len = -1;
+
+ while ((len = inputStream.read(buffer)) != -1)
+ {
+ byte[] temp = null;
+ if (len == packSize)
+ {
+ temp = buffer;
+ } else
+ {
+ temp = new byte[len];
+ System.arraycopy(buffer, 0, temp, 0, len);
+ }
+ BinaryPacket packet = new BinaryPacket();
+ packet.packetId = ++packID;
+ packet.data = temp;
+ packet.write(backendAIOConnection);
+ }
+
+ }
+ finally
+ {
+ inputStream.close();
+ }
+
+
+ return packID;
+ }
+}
diff --git a/src/main/java/io/mycat/server/packet/MySQLMessage.java b/src/main/java/io/mycat/backend/mysql/MySQLMessage.java
similarity index 99%
rename from src/main/java/io/mycat/server/packet/MySQLMessage.java
rename to src/main/java/io/mycat/backend/mysql/MySQLMessage.java
index 04ee9dbe0..b8f60996c 100644
--- a/src/main/java/io/mycat/server/packet/MySQLMessage.java
+++ b/src/main/java/io/mycat/backend/mysql/MySQLMessage.java
@@ -21,7 +21,7 @@
* https://code.google.com/p/opencloudb/.
*
*/
-package io.mycat.server.packet;
+package io.mycat.backend.mysql;
import java.io.UnsupportedEncodingException;
import java.math.BigDecimal;
@@ -298,9 +298,9 @@ public String readStringWithLength() {
public String readStringWithLength(String charset) throws UnsupportedEncodingException {
int length = (int) readLength();
- if (length <= 0) {
- return null;
- }
+// if (length <= 0) {
+// return null;
+// }
String s = new String(data, position, length, charset);
position += length;
return s;
diff --git a/src/main/java/io/mycat/server/packet/util/PacketUtil.java b/src/main/java/io/mycat/backend/mysql/PacketUtil.java
similarity index 92%
rename from src/main/java/io/mycat/server/packet/util/PacketUtil.java
rename to src/main/java/io/mycat/backend/mysql/PacketUtil.java
index 56788f4a2..076751ede 100644
--- a/src/main/java/io/mycat/server/packet/util/PacketUtil.java
+++ b/src/main/java/io/mycat/backend/mysql/PacketUtil.java
@@ -21,16 +21,16 @@
* https://code.google.com/p/opencloudb/.
*
*/
-package io.mycat.server.packet.util;
-
-import io.mycat.server.ErrorCode;
-import io.mycat.server.packet.BinaryPacket;
-import io.mycat.server.packet.ErrorPacket;
-import io.mycat.server.packet.FieldPacket;
-import io.mycat.server.packet.ResultSetHeaderPacket;
+package io.mycat.backend.mysql;
import java.io.UnsupportedEncodingException;
+import io.mycat.config.ErrorCode;
+import io.mycat.net.mysql.BinaryPacket;
+import io.mycat.net.mysql.ErrorPacket;
+import io.mycat.net.mysql.FieldPacket;
+import io.mycat.net.mysql.ResultSetHeaderPacket;
+
/**
* @author mycat
*/
diff --git a/src/main/java/io/mycat/server/packet/util/PreparedStatement.java b/src/main/java/io/mycat/backend/mysql/PreparedStatement.java
similarity index 61%
rename from src/main/java/io/mycat/server/packet/util/PreparedStatement.java
rename to src/main/java/io/mycat/backend/mysql/PreparedStatement.java
index 90aee80b2..78b5db730 100644
--- a/src/main/java/io/mycat/server/packet/util/PreparedStatement.java
+++ b/src/main/java/io/mycat/backend/mysql/PreparedStatement.java
@@ -21,10 +21,15 @@
* https://code.google.com/p/opencloudb/.
*
*/
-package io.mycat.server.packet.util;
+package io.mycat.backend.mysql;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
/**
- * @author mycat
+ * @author mycat, CrazyPig
*/
public class PreparedStatement {
@@ -33,6 +38,14 @@ public class PreparedStatement {
private int columnsNumber;
private int parametersNumber;
private int[] parametersType;
+ /**
+ * 存放COM_STMT_SEND_LONG_DATA命令发送过来的字节数据
+ *
+ * key : param_id
+ * value : byte data
+ *
+ */
+ private Map longDataMap;
public PreparedStatement(long id, String statement, int columnsNumber, int parametersNumber) {
this.id = id;
@@ -40,6 +53,7 @@ public PreparedStatement(long id, String statement, int columnsNumber, int param
this.columnsNumber = columnsNumber;
this.parametersNumber = parametersNumber;
this.parametersType = new int[parametersNumber];
+ this.longDataMap = new HashMap();
}
public long getId() {
@@ -62,4 +76,32 @@ public int[] getParametersType() {
return parametersType;
}
+ public ByteArrayOutputStream getLongData(long paramId) {
+ return longDataMap.get(paramId);
+ }
+
+ /**
+ * COM_STMT_RESET命令将调用该方法进行数据重置
+ */
+ public void resetLongData() {
+ for(Long paramId : longDataMap.keySet()) {
+ longDataMap.get(paramId).reset();
+ }
+ }
+
+ /**
+ * 追加数据到指定的预处理参数
+ * @param paramId
+ * @param data
+ * @throws IOException
+ */
+ public void appendLongData(long paramId, byte[] data) throws IOException {
+ if(getLongData(paramId) == null) {
+ ByteArrayOutputStream out = new ByteArrayOutputStream();
+ out.write(data);
+ longDataMap.put(paramId, out);
+ } else {
+ longDataMap.get(paramId).write(data);
+ }
+ }
}
\ No newline at end of file
diff --git a/src/main/java/io/mycat/server/packet/util/SecurityUtil.java b/src/main/java/io/mycat/backend/mysql/SecurityUtil.java
similarity index 97%
rename from src/main/java/io/mycat/server/packet/util/SecurityUtil.java
rename to src/main/java/io/mycat/backend/mysql/SecurityUtil.java
index f2dc7a442..ff7cc4dcf 100644
--- a/src/main/java/io/mycat/server/packet/util/SecurityUtil.java
+++ b/src/main/java/io/mycat/backend/mysql/SecurityUtil.java
@@ -21,7 +21,7 @@
* https://code.google.com/p/opencloudb/.
*
*/
-package io.mycat.server.packet.util;
+package io.mycat.backend.mysql;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
@@ -67,7 +67,7 @@ public static final String scramble323(String pass, String seed) {
chars[i] = (char) b;
}
seed1 = ((seed1 * 3) + seed2) % max;
- seed2 = (seed1 + seed2 + 33) % max;
+// seed2 = (seed1 + seed2 + 33) % max;
d = (double) seed1 / (double) max;
b = (byte) java.lang.Math.floor(d * 31);
for (int i = 0; i < seed.length(); i++) {
diff --git a/src/main/java/io/mycat/server/packet/util/StreamUtil.java b/src/main/java/io/mycat/backend/mysql/StreamUtil.java
similarity index 99%
rename from src/main/java/io/mycat/server/packet/util/StreamUtil.java
rename to src/main/java/io/mycat/backend/mysql/StreamUtil.java
index 9f5ea2543..5cc2fd923 100644
--- a/src/main/java/io/mycat/server/packet/util/StreamUtil.java
+++ b/src/main/java/io/mycat/backend/mysql/StreamUtil.java
@@ -21,7 +21,7 @@
* https://code.google.com/p/opencloudb/.
*
*/
-package io.mycat.server.packet.util;
+package io.mycat.backend.mysql;
import java.io.EOFException;
import java.io.IOException;
@@ -183,8 +183,9 @@ public static final void writeDouble(OutputStream out, double d) throws IOExcept
public static final long readLength(InputStream in) throws IOException {
int length = in.read();
- if (length < 0)
+ if (length < 0) {
throw new EOFException();
+ }
switch (length) {
case 251:
return NULL_LENGTH;
diff --git a/src/main/java/io/mycat/backend/nio/MySQLBackendConnection.java b/src/main/java/io/mycat/backend/mysql/nio/MySQLConnection.java
similarity index 63%
rename from src/main/java/io/mycat/backend/nio/MySQLBackendConnection.java
rename to src/main/java/io/mycat/backend/mysql/nio/MySQLConnection.java
index dd776e916..bc24785a9 100644
--- a/src/main/java/io/mycat/backend/nio/MySQLBackendConnection.java
+++ b/src/main/java/io/mycat/backend/mysql/nio/MySQLConnection.java
@@ -1,58 +1,65 @@
-package io.mycat.backend.nio;
+/*
+ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software;Designed and Developed mainly by many Chinese
+ * opensource volunteers. you can redistribute it and/or modify it under the
+ * terms of the GNU General Public License version 2 only, as published by the
+ * Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Any questions about this component can be directed to it's project Web address
+ * https://code.google.com/p/opencloudb/.
+ *
+ */
+package io.mycat.backend.mysql.nio;
+
+import io.mycat.backend.mysql.xa.TxState;
+import org.slf4j.Logger; import org.slf4j.LoggerFactory;
import io.mycat.MycatServer;
-import io.mycat.backend.BackendConnection;
-import io.mycat.backend.MySQLDataSource;
+import io.mycat.backend.mysql.CharsetUtil;
+import io.mycat.backend.mysql.SecurityUtil;
+import io.mycat.backend.mysql.nio.handler.ResponseHandler;
+import io.mycat.config.Capabilities;
+import io.mycat.config.Isolations;
+import io.mycat.net.BackendAIOConnection;
+import io.mycat.net.mysql.*;
import io.mycat.route.RouteResultsetNode;
-import io.mycat.server.Capabilities;
-import io.mycat.server.GenalMySQLConnection;
-import io.mycat.server.Isolations;
-import io.mycat.server.MySQLFrontConnection;
-import io.mycat.server.exception.UnknownTxIsolationException;
-import io.mycat.server.executors.ResponseHandler;
-import io.mycat.server.packet.CommandPacket;
-import io.mycat.server.packet.MySQLPacket;
-import io.mycat.server.packet.QuitPacket;
-import io.mycat.server.packet.ResultStatus;
-import io.mycat.server.packet.util.CharsetUtil;
+import io.mycat.server.ServerConnection;
import io.mycat.server.parser.ServerParse;
import io.mycat.util.TimeUtil;
+import io.mycat.util.exception.UnknownTxIsolationException;
import java.io.UnsupportedEncodingException;
-import java.nio.ByteBuffer;
-import java.nio.channels.SocketChannel;
+import java.nio.channels.NetworkChannel;
+import java.security.NoSuchAlgorithmException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
-public class MySQLBackendConnection extends GenalMySQLConnection implements
- BackendConnection {
-
- private static final CommandPacket _READ_UNCOMMITTED = new CommandPacket();
- private static final CommandPacket _READ_COMMITTED = new CommandPacket();
- private static final CommandPacket _REPEATED_READ = new CommandPacket();
- private static final CommandPacket _SERIALIZABLE = new CommandPacket();
- private static final CommandPacket _AUTOCOMMIT_ON = new CommandPacket();
- private static final CommandPacket _AUTOCOMMIT_OFF = new CommandPacket();
- private static final CommandPacket _COMMIT = new CommandPacket();
- private static final CommandPacket _ROLLBACK = new CommandPacket();
+/**
+ * @author mycat
+ */
+public class MySQLConnection extends BackendAIOConnection {
+ private static final Logger LOGGER = LoggerFactory
+ .getLogger(MySQLConnection.class);
private static final long CLIENT_FLAGS = initClientFlags();
+ private volatile long lastTime;
+ private volatile String schema = null;
+ private volatile String oldSchema;
private volatile boolean borrowed = false;
- private volatile long lastTime;
private volatile boolean modifiedSQLExecuted = false;
- private volatile StatusSync statusSync;
- private volatile boolean metaDataSyned = true;
- private volatile int xaStatus = 0;
private volatile int batchCmdCount = 0;
- private MySQLDataSource pool;
- private boolean fromSlaveDB;
- private long threadId;
- private final ResultStatus sqlResultStatus = new ResultStatus();
- private Object attachment;
- private volatile ResponseHandler respHandler;
-
- private final AtomicBoolean isQuit;
-
private static long initClientFlags() {
int flag = 0;
flag |= Capabilities.CLIENT_LONG_PASSWORD;
@@ -60,10 +67,10 @@ private static long initClientFlags() {
flag |= Capabilities.CLIENT_LONG_FLAG;
flag |= Capabilities.CLIENT_CONNECT_WITH_DB;
// flag |= Capabilities.CLIENT_NO_SCHEMA;
- boolean usingCompress = MycatServer.getInstance().getConfig()
- .getSystem().getUseCompression() == 1;
- if (usingCompress) {
- flag |= Capabilities.CLIENT_COMPRESS;
+ boolean usingCompress=MycatServer.getInstance().getConfig().getSystem().getUseCompression()==1 ;
+ if(usingCompress)
+ {
+ flag |= Capabilities.CLIENT_COMPRESS;
}
flag |= Capabilities.CLIENT_ODBC;
flag |= Capabilities.CLIENT_LOCAL_FILES;
@@ -81,6 +88,14 @@ private static long initClientFlags() {
return flag;
}
+ private static final CommandPacket _READ_UNCOMMITTED = new CommandPacket();
+ private static final CommandPacket _READ_COMMITTED = new CommandPacket();
+ private static final CommandPacket _REPEATED_READ = new CommandPacket();
+ private static final CommandPacket _SERIALIZABLE = new CommandPacket();
+ private static final CommandPacket _AUTOCOMMIT_ON = new CommandPacket();
+ private static final CommandPacket _AUTOCOMMIT_OFF = new CommandPacket();
+ private static final CommandPacket _COMMIT = new CommandPacket();
+ private static final CommandPacket _ROLLBACK = new CommandPacket();
static {
_READ_UNCOMMITTED.packetId = 0;
_READ_UNCOMMITTED.command = MySQLPacket.COM_QUERY;
@@ -112,14 +127,33 @@ private static long initClientFlags() {
_ROLLBACK.arg = "rollback".getBytes();
}
- public MySQLBackendConnection(SocketChannel channel, boolean fromSlaveDB) {
+ private MySQLDataSource pool;
+ private boolean fromSlaveDB;
+ private long threadId;
+ private HandshakePacket handshake;
+ private volatile int txIsolation;
+ private volatile boolean autocommit;
+ private long clientFlags;
+ private boolean isAuthenticated;
+ private String user;
+ private String password;
+ private Object attachment;
+ private ResponseHandler respHandler;
+
+ private final AtomicBoolean isQuit;
+ private volatile StatusSync statusSync;
+ private volatile boolean metaDataSyned = true;
+ private volatile int xaStatus = 0;
+
+ public MySQLConnection(NetworkChannel channel, boolean fromSlaveDB) {
super(channel);
this.clientFlags = CLIENT_FLAGS;
this.lastTime = TimeUtil.currentTimeMillis();
this.isQuit = new AtomicBoolean(false);
this.autocommit = true;
this.fromSlaveDB = fromSlaveDB;
-
+ // 设为默认值,免得每个初始化好的连接都要去同步一下
+ this.txIsolation = MycatServer.getInstance().getConfig().getSystem().getTxIsolation();
}
public int getXaStatus() {
@@ -130,17 +164,28 @@ public void setXaStatus(int xaStatus) {
this.xaStatus = xaStatus;
}
- // public void onConnectFailed(Throwable t) {
- // if (handler instanceof MySQLConnectionHandler) {
- // MySQLConnectionHandler theHandler = (MySQLConnectionHandler) handler;
- // theHandler.connectionError(t);
- // } else {
- // ((MySQLConnectionAuthenticator) handler).connectionError(this, t);
- // }
- // }
+ public void onConnectFailed(Throwable t) {
+ if (handler instanceof MySQLConnectionHandler) {
+ MySQLConnectionHandler theHandler = (MySQLConnectionHandler) handler;
+ theHandler.connectionError(t);
+ } else {
+ ((MySQLConnectionAuthenticator) handler).connectionError(this, t);
+ }
+ }
+
+ public String getSchema() {
+ return this.schema;
+ }
- public ResultStatus getSqlResultStatus() {
- return sqlResultStatus;
+ public void setSchema(String newSchema) {
+ String curSchema = schema;
+ if (curSchema == null) {
+ this.schema = newSchema;
+ this.oldSchema = newSchema;
+ } else {
+ this.oldSchema = curSchema;
+ this.schema = newSchema;
+ }
}
public MySQLDataSource getPool() {
@@ -151,6 +196,26 @@ public void setPool(MySQLDataSource pool) {
this.pool = pool;
}
+ public String getUser() {
+ return user;
+ }
+
+ public void setUser(String user) {
+ this.user = user;
+ }
+
+ public void setPassword(String password) {
+ this.password = password;
+ }
+
+ public HandshakePacket getHandshake() {
+ return handshake;
+ }
+
+ public void setHandshake(HandshakePacket handshake) {
+ this.handshake = handshake;
+ }
+
public long getThreadId() {
return threadId;
}
@@ -159,6 +224,34 @@ public void setThreadId(long threadId) {
this.threadId = threadId;
}
+ public boolean isAuthenticated() {
+ return isAuthenticated;
+ }
+
+ public void setAuthenticated(boolean isAuthenticated) {
+ this.isAuthenticated = isAuthenticated;
+ }
+
+ public String getPassword() {
+ return password;
+ }
+
+ public void authenticate() {
+ AuthPacket packet = new AuthPacket();
+ packet.packetId = 1;
+ packet.clientFlags = clientFlags;
+ packet.maxPacketSize = maxPacketSize;
+ packet.charsetIndex = this.charsetIndex;
+ packet.user = user;
+ try {
+ packet.password = passwd(password, handshake);
+ } catch (NoSuchAlgorithmException e) {
+ throw new RuntimeException(e.getMessage());
+ }
+ packet.database = schema;
+ packet.write(this);
+ }
+
public boolean isAutocommit() {
return autocommit;
}
@@ -220,10 +313,6 @@ private void getAutocommitCommand(StringBuilder sb, boolean autoCommit) {
}
}
- public ResponseHandler getRespHandler() {
- return respHandler;
- }
-
private static class StatusSync {
private final String schema;
private final Integer charsetIndex;
@@ -244,7 +333,7 @@ public StatusSync(boolean xaStarted, String schema,
this.synCmdCount = new AtomicInteger(synCount);
}
- public boolean synAndExecuted(MySQLBackendConnection conn) {
+ public boolean synAndExecuted(MySQLConnection conn) {
int remains = synCmdCount.decrementAndGet();
if (remains == 0) {// syn command finished
this.updateConnectionInfo(conn);
@@ -256,10 +345,9 @@ public boolean synAndExecuted(MySQLBackendConnection conn) {
return false;
}
- private void updateConnectionInfo(MySQLBackendConnection conn)
+ private void updateConnectionInfo(MySQLConnection conn)
{
- conn.xaStatus = (xaStarted == true) ? 1 : 0;
if (schema != null) {
conn.schema = schema;
conn.oldSchema = conn.schema;
@@ -295,12 +383,15 @@ public boolean syncAndExcute() {
}
- public void execute(RouteResultsetNode rrn, MySQLFrontConnection sc,
+ public void execute(RouteResultsetNode rrn, ServerConnection sc,
boolean autocommit) throws UnsupportedEncodingException {
if (!modifiedSQLExecuted && rrn.isModifySQL()) {
modifiedSQLExecuted = true;
}
- String xaTXID = sc.getSession2().getXaTXID();
+ String xaTXID = null;
+ if(sc.getSession2().getXaTXID()!=null){
+ xaTXID = sc.getSession2().getXaTXID()+",'"+getSchema()+"'";
+ }
synAndDoExecute(xaTXID, rrn, sc.getCharsetIndex(), sc.getTxIsolation(),
autocommit);
}
@@ -315,17 +406,24 @@ private void synAndDoExecute(String xaTxID, RouteResultsetNode rrn,
// never executed modify sql,so auto commit
boolean expectAutocommit = !modifiedSQLExecuted || isFromSlaveDB()
|| clientAutoCommit;
- if (expectAutocommit == false && xaTxID != null && xaStatus == 0) {
- clientTxIsoLation = Isolations.SERIALIZABLE;
+ if (expectAutocommit == false && xaTxID != null && xaStatus == TxState.TX_INITIALIZE_STATE) {
+ //clientTxIsoLation = Isolations.SERIALIZABLE;
xaCmd = "XA START " + xaTxID + ';';
-
+ this.xaStatus = TxState.TX_STARTED_STATE;
}
int schemaSyn = conSchema.equals(oldSchema) ? 0 : 1;
- int charsetSyn = (this.charsetIndex == clientCharSetIndex) ? 0 : 1;
+ int charsetSyn = 0;
+ if (this.charsetIndex != clientCharSetIndex) {
+ //need to syn the charset of connection.
+ //set current connection charset to client charset.
+ //otherwise while sending commend to server the charset will not coincidence.
+ setCharset(CharsetUtil.getCharset(clientCharSetIndex));
+ charsetSyn = 1;
+ }
int txIsoLationSyn = (txIsolation == clientTxIsoLation) ? 0 : 1;
int autoCommitSyn = (conAutoComit == expectAutocommit) ? 0 : 1;
- int synCount = schemaSyn + charsetSyn + txIsoLationSyn + autoCommitSyn;
- if (synCount == 0) {
+ int synCount = schemaSyn + charsetSyn + txIsoLationSyn + autoCommitSyn + (xaCmd!=null?1:0);
+ if (synCount == 0 && this.xaStatus != TxState.TX_STARTED_STATE) {
// not need syn connection
sendQueryCmd(rrn.getStatement());
return;
@@ -334,6 +432,7 @@ private void synAndDoExecute(String xaTxID, RouteResultsetNode rrn,
StringBuilder sb = new StringBuilder();
if (schemaSyn == 1) {
schemaCmd = getChangeSchemaCommand(conSchema);
+ // getChangeSchemaCommand(sb, conSchema);
}
if (charsetSyn == 1) {
@@ -362,7 +461,7 @@ private void synAndDoExecute(String xaTxID, RouteResultsetNode rrn,
schemaCmd.write(this);
}
// and our query sql to multi command at last
- sb.append(rrn.getStatement());
+ sb.append(rrn.getStatement()+";");
// syn and execute others
this.sendQueryCmd(sb.toString());
// waiting syn result...
@@ -402,8 +501,8 @@ public void setLastTime(long lastTime) {
public void quit() {
if (isQuit.compareAndSet(false, true) && !isClosed()) {
if (isAuthenticated) {
- write(QuitPacket.QUIT);
- write(ByteBuffer.allocate(10));
+ write(writeToBuffer(QuitPacket.QUIT, allocate()));
+ write(allocate());
} else {
close("normal");
}
@@ -412,7 +511,7 @@ public void quit() {
@Override
public void close(String reason) {
- if (!isClosed) {
+ if (!isClosed.get()) {
isQuit.set(true);
super.close(reason);
pool.connectionClosed(this);
@@ -471,18 +570,75 @@ public void release() {
pool.releaseChannel(this);
}
- public void setResponseHandler(ResponseHandler queryHandler) {
- respHandler = queryHandler;
+ public boolean setResponseHandler(ResponseHandler queryHandler) {
+ if (handler instanceof MySQLConnectionHandler) {
+ ((MySQLConnectionHandler) handler).setResponseHandler(queryHandler);
+ respHandler = queryHandler;
+ return true;
+ } else if (queryHandler != null) {
+ LOGGER.warn("set not MySQLConnectionHandler "
+ + queryHandler.getClass().getCanonicalName());
+ }
+ return false;
+ }
+
+ /**
+ * 写队列为空,可以继续写数据
+ */
+ public void writeQueueAvailable() {
+ if (respHandler != null) {
+ respHandler.writeQueueAvailable();
+ }
+ }
+
+ /**
+ * 记录sql执行信息
+ */
+ public void recordSql(String host, String schema, String stmt) {
+ // final long now = TimeUtil.currentTimeMillis();
+ // if (now > this.lastTime) {
+ // // long time = now - this.lastTime;
+ // // SQLRecorder sqlRecorder = this.pool.getSqlRecorder();
+ // // if (sqlRecorder.check(time)) {
+ // // SQLRecord recorder = new SQLRecord();
+ // // recorder.host = host;
+ // // recorder.schema = schema;
+ // // recorder.statement = stmt;
+ // // recorder.startTime = lastTime;
+ // // recorder.executeTime = time;
+ // // recorder.dataNode = pool.getName();
+ // // recorder.dataNodeIndex = pool.getIndex();
+ // // sqlRecorder.add(recorder);
+ // // }
+ // }
+ // this.lastTime = now;
+ }
+
+ private static byte[] passwd(String pass, HandshakePacket hs)
+ throws NoSuchAlgorithmException {
+ if (pass == null || pass.length() == 0) {
+ return null;
+ }
+ byte[] passwd = pass.getBytes();
+ int sl1 = hs.seed.length;
+ int sl2 = hs.restOfScrambleBuff.length;
+ byte[] seed = new byte[sl1 + sl2];
+ System.arraycopy(hs.seed, 0, seed, 0, sl1);
+ System.arraycopy(hs.restOfScrambleBuff, 0, seed, sl1, sl2);
+ return SecurityUtil.scramble411(passwd, seed);
}
+ @Override
public boolean isFromSlaveDB() {
return fromSlaveDB;
}
+ @Override
public boolean isBorrowed() {
return borrowed;
}
+ @Override
public void setBorrowed(boolean borrowed) {
this.lastTime = TimeUtil.currentTimeMillis();
this.borrowed = borrowed;
@@ -491,6 +647,7 @@ public void setBorrowed(boolean borrowed) {
@Override
public String toString() {
return "MySQLConnection [id=" + id + ", lastTime=" + lastTime
+ + ", user=" + user
+ ", schema=" + schema + ", old shema=" + oldSchema
+ ", borrowed=" + borrowed + ", fromSlaveDB=" + fromSlaveDB
+ ", threadId=" + threadId + ", charset=" + charset
@@ -501,6 +658,7 @@ public String toString() {
+ ", modifiedSQLExecuted=" + modifiedSQLExecuted + "]";
}
+ @Override
public boolean isModifiedSQLExecuted() {
return modifiedSQLExecuted;
}
diff --git a/src/main/java/io/mycat/backend/mysql/nio/MySQLConnectionAuthenticator.java b/src/main/java/io/mycat/backend/mysql/nio/MySQLConnectionAuthenticator.java
new file mode 100644
index 000000000..be6e13ee8
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/nio/MySQLConnectionAuthenticator.java
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software;Designed and Developed mainly by many Chinese
+ * opensource volunteers. you can redistribute it and/or modify it under the
+ * terms of the GNU General Public License version 2 only, as published by the
+ * Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Any questions about this component can be directed to it's project Web address
+ * https://code.google.com/p/opencloudb/.
+ *
+ */
+package io.mycat.backend.mysql.nio;
+
+import org.slf4j.Logger; import org.slf4j.LoggerFactory;
+
+import io.mycat.MycatServer;
+import io.mycat.backend.mysql.CharsetUtil;
+import io.mycat.backend.mysql.SecurityUtil;
+import io.mycat.backend.mysql.nio.handler.ResponseHandler;
+import io.mycat.config.Capabilities;
+import io.mycat.net.ConnectionException;
+import io.mycat.net.NIOHandler;
+import io.mycat.net.mysql.EOFPacket;
+import io.mycat.net.mysql.ErrorPacket;
+import io.mycat.net.mysql.HandshakePacket;
+import io.mycat.net.mysql.OkPacket;
+import io.mycat.net.mysql.Reply323Packet;
+
+/**
+ * MySQL 验证处理器
+ *
+ * @author mycat
+ */
+public class MySQLConnectionAuthenticator implements NIOHandler {
+ private static final Logger LOGGER = LoggerFactory
+ .getLogger(MySQLConnectionAuthenticator.class);
+ private final MySQLConnection source;
+ private final ResponseHandler listener;
+
+ public MySQLConnectionAuthenticator(MySQLConnection source,
+ ResponseHandler listener) {
+ this.source = source;
+ this.listener = listener;
+ }
+
+ public void connectionError(MySQLConnection source, Throwable e) {
+ listener.connectionError(e, source);
+ }
+
+ @Override
+ public void handle(byte[] data) {
+ try {
+ switch (data[4]) {
+ case OkPacket.FIELD_COUNT:
+ HandshakePacket packet = source.getHandshake();
+ if (packet == null) {
+ processHandShakePacket(data);
+ // 发送认证数据包
+ source.authenticate();
+ break;
+ }
+ // 处理认证结果
+ source.setHandler(new MySQLConnectionHandler(source));
+ source.setAuthenticated(true);
+ boolean clientCompress = Capabilities.CLIENT_COMPRESS==(Capabilities.CLIENT_COMPRESS & packet.serverCapabilities);
+ boolean usingCompress= MycatServer.getInstance().getConfig().getSystem().getUseCompression()==1 ;
+ if(clientCompress&&usingCompress)
+ {
+ source.setSupportCompress(true);
+ }
+ if (listener != null) {
+ listener.connectionAcquired(source);
+ }
+ break;
+ case ErrorPacket.FIELD_COUNT:
+ ErrorPacket err = new ErrorPacket();
+ err.read(data);
+ String errMsg = new String(err.message);
+ LOGGER.warn("can't connect to mysql server ,errmsg:"+errMsg+" "+source);
+ //source.close(errMsg);
+ throw new ConnectionException(err.errno, errMsg);
+
+ case EOFPacket.FIELD_COUNT:
+ auth323(data[3]);
+ break;
+ default:
+ packet = source.getHandshake();
+ if (packet == null) {
+ processHandShakePacket(data);
+ // 发送认证数据包
+ source.authenticate();
+ break;
+ } else {
+ throw new RuntimeException("Unknown Packet!");
+ }
+
+ }
+
+ } catch (RuntimeException e) {
+ if (listener != null) {
+ listener.connectionError(e, source);
+ return;
+ }
+ throw e;
+ }
+ }
+
+ private void processHandShakePacket(byte[] data) {
+ // 设置握手数据包
+ HandshakePacket packet= new HandshakePacket();
+ packet.read(data);
+ source.setHandshake(packet);
+ source.setThreadId(packet.threadId);
+
+ // 设置字符集编码
+ int charsetIndex = (packet.serverCharsetIndex & 0xff);
+ String charset = CharsetUtil.getCharset(charsetIndex);
+ if (charset != null) {
+ source.setCharset(charset);
+ } else {
+ throw new RuntimeException("Unknown charsetIndex:" + charsetIndex);
+ }
+ }
+
+ private void auth323(byte packetId) {
+ // 发送323响应认证数据包
+ Reply323Packet r323 = new Reply323Packet();
+ r323.packetId = ++packetId;
+ String pass = source.getPassword();
+ if (pass != null && pass.length() > 0) {
+ byte[] seed = source.getHandshake().seed;
+ r323.seed = SecurityUtil.scramble323(pass, new String(seed))
+ .getBytes();
+ }
+ r323.write(source);
+ }
+
+}
\ No newline at end of file
diff --git a/src/main/java/io/mycat/backend/mysql/nio/MySQLConnectionFactory.java b/src/main/java/io/mycat/backend/mysql/nio/MySQLConnectionFactory.java
new file mode 100644
index 000000000..1c78e56c3
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/nio/MySQLConnectionFactory.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software;Designed and Developed mainly by many Chinese
+ * opensource volunteers. you can redistribute it and/or modify it under the
+ * terms of the GNU General Public License version 2 only, as published by the
+ * Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Any questions about this component can be directed to it's project Web address
+ * https://code.google.com/p/opencloudb/.
+ *
+ */
+package io.mycat.backend.mysql.nio;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.nio.channels.AsynchronousSocketChannel;
+import java.nio.channels.CompletionHandler;
+import java.nio.channels.NetworkChannel;
+
+import io.mycat.MycatServer;
+import io.mycat.backend.mysql.nio.handler.ResponseHandler;
+import io.mycat.config.model.DBHostConfig;
+import io.mycat.net.NIOConnector;
+import io.mycat.net.factory.BackendConnectionFactory;
+
+/**
+ * @author mycat
+ */
+public class MySQLConnectionFactory extends BackendConnectionFactory {
+ @SuppressWarnings({ "unchecked", "rawtypes" })
+ public MySQLConnection make(MySQLDataSource pool, ResponseHandler handler,
+ String schema) throws IOException {
+
+ DBHostConfig dsc = pool.getConfig();
+ NetworkChannel channel = openSocketChannel(MycatServer.getInstance()
+ .isAIO());
+
+ MySQLConnection c = new MySQLConnection(channel, pool.isReadNode());
+ MycatServer.getInstance().getConfig().setSocketParams(c, false);
+ c.setHost(dsc.getIp());
+ c.setPort(dsc.getPort());
+ c.setUser(dsc.getUser());
+ c.setPassword(dsc.getPassword());
+ c.setSchema(schema);
+ c.setHandler(new MySQLConnectionAuthenticator(c, handler));
+ c.setPool(pool);
+ c.setIdleTimeout(pool.getConfig().getIdleTimeout());
+ if (channel instanceof AsynchronousSocketChannel) {
+ ((AsynchronousSocketChannel) channel).connect(
+ new InetSocketAddress(dsc.getIp(), dsc.getPort()), c,
+ (CompletionHandler) MycatServer.getInstance()
+ .getConnector());
+ } else {
+ ((NIOConnector) MycatServer.getInstance().getConnector())
+ .postConnect(c);
+
+ }
+ return c;
+ }
+
+}
\ No newline at end of file
diff --git a/src/main/java/io/mycat/backend/mysql/nio/MySQLConnectionHandler.java b/src/main/java/io/mycat/backend/mysql/nio/MySQLConnectionHandler.java
new file mode 100644
index 000000000..eeaa68e04
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/nio/MySQLConnectionHandler.java
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software;Designed and Developed mainly by many Chinese
+ * opensource volunteers. you can redistribute it and/or modify it under the
+ * terms of the GNU General Public License version 2 only, as published by the
+ * Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Any questions about this component can be directed to it's project Web address
+ * https://code.google.com/p/opencloudb/.
+ *
+ */
+package io.mycat.backend.mysql.nio;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.slf4j.Logger; import org.slf4j.LoggerFactory;
+
+import io.mycat.backend.mysql.ByteUtil;
+import io.mycat.backend.mysql.nio.handler.LoadDataResponseHandler;
+import io.mycat.backend.mysql.nio.handler.ResponseHandler;
+import io.mycat.net.handler.BackendAsyncHandler;
+import io.mycat.net.mysql.EOFPacket;
+import io.mycat.net.mysql.ErrorPacket;
+import io.mycat.net.mysql.OkPacket;
+import io.mycat.net.mysql.RequestFilePacket;
+
+/**
+ * life cycle: from connection establish to close
+ *
+ * @author mycat
+ */
+public class MySQLConnectionHandler extends BackendAsyncHandler {
+ private static final Logger logger = LoggerFactory
+ .getLogger(MySQLConnectionHandler.class);
+ private static final int RESULT_STATUS_INIT = 0;
+ private static final int RESULT_STATUS_HEADER = 1;
+ private static final int RESULT_STATUS_FIELD_EOF = 2;
+
+ private final MySQLConnection source;
+ private volatile int resultStatus;
+ private volatile byte[] header;
+ private volatile List fields;
+
+ /**
+ * life cycle: one SQL execution
+ */
+ private volatile ResponseHandler responseHandler;
+
+ public MySQLConnectionHandler(MySQLConnection source) {
+ this.source = source;
+ this.resultStatus = RESULT_STATUS_INIT;
+ }
+
+ public void connectionError(Throwable e) {
+ if (responseHandler != null) {
+ responseHandler.connectionError(e, source);
+ }
+
+ }
+
+ public MySQLConnection getSource() {
+ return source;
+ }
+
+ @Override
+ public void handle(byte[] data) {
+ offerData(data, source.getProcessor().getExecutor());
+ }
+
+ @Override
+ protected void offerDataError() {
+ resultStatus = RESULT_STATUS_INIT;
+ throw new RuntimeException("offer data error!");
+ }
+
+ @Override
+ protected void handleData(byte[] data) {
+ switch (resultStatus) {
+ case RESULT_STATUS_INIT:
+ switch (data[4]) {
+ case OkPacket.FIELD_COUNT:
+ handleOkPacket(data);
+ break;
+ case ErrorPacket.FIELD_COUNT:
+ handleErrorPacket(data);
+ break;
+ case RequestFilePacket.FIELD_COUNT:
+ handleRequestPacket(data);
+ break;
+ default:
+ resultStatus = RESULT_STATUS_HEADER;
+ header = data;
+ fields = new ArrayList((int) ByteUtil.readLength(data,
+ 4));
+ }
+ break;
+ case RESULT_STATUS_HEADER:
+ switch (data[4]) {
+ case ErrorPacket.FIELD_COUNT:
+ resultStatus = RESULT_STATUS_INIT;
+ handleErrorPacket(data);
+ break;
+ case EOFPacket.FIELD_COUNT:
+ resultStatus = RESULT_STATUS_FIELD_EOF;
+ handleFieldEofPacket(data);
+ break;
+ default:
+ fields.add(data);
+ }
+ break;
+ case RESULT_STATUS_FIELD_EOF:
+ switch (data[4]) {
+ case ErrorPacket.FIELD_COUNT:
+ resultStatus = RESULT_STATUS_INIT;
+ handleErrorPacket(data);
+ break;
+ case EOFPacket.FIELD_COUNT:
+ resultStatus = RESULT_STATUS_INIT;
+ handleRowEofPacket(data);
+ break;
+ default:
+ handleRowPacket(data);
+ }
+ break;
+ default:
+ throw new RuntimeException("unknown status!");
+ }
+ }
+
+ public void setResponseHandler(ResponseHandler responseHandler) {
+ // logger.info("set response handler "+responseHandler);
+ // if (this.responseHandler != null && responseHandler != null) {
+ // throw new RuntimeException("reset agani!");
+ // }
+ this.responseHandler = responseHandler;
+ }
+
+ /**
+ * OK数据包处理
+ */
+ private void handleOkPacket(byte[] data) {
+ ResponseHandler respHand = responseHandler;
+ if (respHand != null) {
+ respHand.okResponse(data, source);
+ }
+ }
+
+ /**
+ * ERROR数据包处理
+ */
+ private void handleErrorPacket(byte[] data) {
+ ResponseHandler respHand = responseHandler;
+ if (respHand != null) {
+ respHand.errorResponse(data, source);
+ } else {
+ closeNoHandler();
+ }
+ }
+
+ /**
+ * load data file 请求文件数据包处理
+ */
+ private void handleRequestPacket(byte[] data) {
+ ResponseHandler respHand = responseHandler;
+ if (respHand != null && respHand instanceof LoadDataResponseHandler) {
+ ((LoadDataResponseHandler) respHand).requestDataResponse(data,
+ source);
+ } else {
+ closeNoHandler();
+ }
+ }
+
+ /**
+ * 字段数据包结束处理
+ */
+ private void handleFieldEofPacket(byte[] data) {
+ ResponseHandler respHand = responseHandler;
+ if (respHand != null) {
+ respHand.fieldEofResponse(header, fields, data, source);
+ } else {
+ closeNoHandler();
+ }
+ }
+
+ /**
+ * 行数据包处理
+ */
+ private void handleRowPacket(byte[] data) {
+ ResponseHandler respHand = responseHandler;
+ if (respHand != null) {
+ respHand.rowResponse(data, source);
+ } else {
+ closeNoHandler();
+
+ }
+ }
+
+ private void closeNoHandler() {
+ if (!source.isClosedOrQuit()) {
+ source.close("no handler");
+ logger.warn("no handler bind in this con " + this + " client:"
+ + source);
+ }
+ }
+
+ /**
+ * 行数据包结束处理
+ */
+ private void handleRowEofPacket(byte[] data) {
+ if (responseHandler != null) {
+ responseHandler.rowEofResponse(data, source);
+ } else {
+ closeNoHandler();
+ }
+ }
+
+}
\ No newline at end of file
diff --git a/src/main/java/io/mycat/backend/mysql/nio/MySQLDataSource.java b/src/main/java/io/mycat/backend/mysql/nio/MySQLDataSource.java
new file mode 100644
index 000000000..d2cbccaba
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/nio/MySQLDataSource.java
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software;Designed and Developed mainly by many Chinese
+ * opensource volunteers. you can redistribute it and/or modify it under the
+ * terms of the GNU General Public License version 2 only, as published by the
+ * Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Any questions about this component can be directed to it's project Web address
+ * https://code.google.com/p/opencloudb/.
+ *
+ */
+package io.mycat.backend.mysql.nio;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.Socket;
+import java.security.NoSuchAlgorithmException;
+
+import io.mycat.backend.datasource.PhysicalDatasource;
+import io.mycat.backend.heartbeat.DBHeartbeat;
+import io.mycat.backend.heartbeat.MySQLHeartbeat;
+import io.mycat.backend.mysql.SecurityUtil;
+import io.mycat.backend.mysql.nio.handler.ResponseHandler;
+import io.mycat.config.Capabilities;
+import io.mycat.config.model.DBHostConfig;
+import io.mycat.config.model.DataHostConfig;
+import io.mycat.net.mysql.AuthPacket;
+import io.mycat.net.mysql.BinaryPacket;
+import io.mycat.net.mysql.EOFPacket;
+import io.mycat.net.mysql.ErrorPacket;
+import io.mycat.net.mysql.HandshakePacket;
+import io.mycat.net.mysql.OkPacket;
+import io.mycat.net.mysql.QuitPacket;
+import io.mycat.net.mysql.Reply323Packet;
+
+/**
+ * @author mycat
+ */
+public class MySQLDataSource extends PhysicalDatasource {
+
+ private final MySQLConnectionFactory factory;
+
+ public MySQLDataSource(DBHostConfig config, DataHostConfig hostConfig,
+ boolean isReadNode) {
+ super(config, hostConfig, isReadNode);
+ this.factory = new MySQLConnectionFactory();
+
+ }
+
+ @Override
+ public void createNewConnection(ResponseHandler handler,String schema) throws IOException {
+ factory.make(this, handler,schema);
+ }
+
+ private long getClientFlags() {
+ int flag = 0;
+ flag |= Capabilities.CLIENT_LONG_PASSWORD;
+ flag |= Capabilities.CLIENT_FOUND_ROWS;
+ flag |= Capabilities.CLIENT_LONG_FLAG;
+ flag |= Capabilities.CLIENT_CONNECT_WITH_DB;
+ // flag |= Capabilities.CLIENT_NO_SCHEMA;
+ // flag |= Capabilities.CLIENT_COMPRESS;
+ flag |= Capabilities.CLIENT_ODBC;
+ // flag |= Capabilities.CLIENT_LOCAL_FILES;
+ flag |= Capabilities.CLIENT_IGNORE_SPACE;
+ flag |= Capabilities.CLIENT_PROTOCOL_41;
+ flag |= Capabilities.CLIENT_INTERACTIVE;
+ // flag |= Capabilities.CLIENT_SSL;
+ flag |= Capabilities.CLIENT_IGNORE_SIGPIPE;
+ flag |= Capabilities.CLIENT_TRANSACTIONS;
+ // flag |= Capabilities.CLIENT_RESERVED;
+ flag |= Capabilities.CLIENT_SECURE_CONNECTION;
+ // client extension
+ // flag |= Capabilities.CLIENT_MULTI_STATEMENTS;
+ // flag |= Capabilities.CLIENT_MULTI_RESULTS;
+ return flag;
+ }
+
+
+ private byte[] passwd(String pass, HandshakePacket hs) throws NoSuchAlgorithmException {
+ if (pass == null || pass.length() == 0) {
+ return null;
+ }
+ byte[] passwd = pass.getBytes();
+ int sl1 = hs.seed.length;
+ int sl2 = hs.restOfScrambleBuff.length;
+ byte[] seed = new byte[sl1 + sl2];
+ System.arraycopy(hs.seed, 0, seed, 0, sl1);
+ System.arraycopy(hs.restOfScrambleBuff, 0, seed, sl1, sl2);
+ return SecurityUtil.scramble411(passwd, seed);
+ }
+
+ @Override
+ public boolean testConnection(String schema) throws IOException {
+
+ boolean isConnected = true;
+
+ Socket socket = null;
+ InputStream in = null;
+ OutputStream out = null;
+ try {
+ socket = new Socket(this.getConfig().getIp(), this.getConfig().getPort());
+ socket.setSoTimeout(1000 * 20);
+ socket.setReceiveBufferSize( 32768 );
+ socket.setSendBufferSize( 32768 );
+ socket.setTcpNoDelay(true);
+ socket.setKeepAlive(true);
+
+ in = new BufferedInputStream(socket.getInputStream(), 32768);
+ out = new BufferedOutputStream( socket.getOutputStream(), 32768 );
+
+ /**
+ * Phase 1: MySQL to client. Send handshake packet.
+ */
+ BinaryPacket bin1 = new BinaryPacket();
+ bin1.read(in);
+
+ HandshakePacket handshake = new HandshakePacket();
+ handshake.read( bin1 );
+
+ /**
+ * Phase 2: client to MySQL. Send auth packet.
+ */
+ AuthPacket authPacket = new AuthPacket();
+ authPacket.packetId = 1;
+ authPacket.clientFlags = getClientFlags();
+ authPacket.maxPacketSize = 1024 * 1024 * 16;
+ authPacket.charsetIndex = handshake.serverCharsetIndex & 0xff;
+ authPacket.user = this.getConfig().getUser();;
+ try {
+ authPacket.password = passwd(this.getConfig().getPassword(), handshake);
+ } catch (NoSuchAlgorithmException e) {
+ throw new RuntimeException(e.getMessage());
+ }
+ authPacket.database = schema;
+ authPacket.write(out);
+ out.flush();
+
+ /**
+ * Phase 3: MySQL to client. send OK/ERROR packet.
+ */
+ BinaryPacket bin2 = new BinaryPacket();
+ bin2.read(in);
+ switch (bin2.data[0]) {
+ case OkPacket.FIELD_COUNT:
+ break;
+ case ErrorPacket.FIELD_COUNT:
+ ErrorPacket err = new ErrorPacket();
+ err.read(bin2);
+ isConnected = false;
+ case EOFPacket.FIELD_COUNT:
+ // 发送323响应认证数据包
+ Reply323Packet r323 = new Reply323Packet();
+ r323.packetId = ++bin2.packetId;
+ String passwd = this.getConfig().getPassword();
+ if (passwd != null && passwd.length() > 0) {
+ r323.seed = SecurityUtil.scramble323(passwd, new String(handshake.seed)).getBytes();
+ }
+ r323.write(out);
+ out.flush();
+ break;
+ }
+
+ } catch (IOException e) {
+ isConnected = false;
+ } finally {
+ try {
+ if (in != null) {
+ in.close();
+ }
+ } catch (IOException e) {}
+
+ try {
+ if (out != null) {
+ out.write(QuitPacket.QUIT);
+ out.flush();
+ out.close();
+ }
+ } catch (IOException e) {}
+
+ try {
+ if (socket != null)
+ socket.close();
+ } catch (IOException e) {}
+ }
+
+ return isConnected;
+ }
+
+ @Override
+ public DBHeartbeat createHeartBeat() {
+ return new MySQLHeartbeat(this);
+ }
+
+}
\ No newline at end of file
diff --git a/src/main/java/io/mycat/server/executors/CommitNodeHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/CommitNodeHandler.java
similarity index 56%
rename from src/main/java/io/mycat/server/executors/CommitNodeHandler.java
rename to src/main/java/io/mycat/backend/mysql/nio/handler/CommitNodeHandler.java
index 97859a4b1..ec58ff973 100644
--- a/src/main/java/io/mycat/server/executors/CommitNodeHandler.java
+++ b/src/main/java/io/mycat/backend/mysql/nio/handler/CommitNodeHandler.java
@@ -21,22 +21,25 @@
* https://code.google.com/p/opencloudb/.
*
*/
-package io.mycat.server.executors;
+package io.mycat.backend.mysql.nio.handler;
+
+import java.util.List;
+
+import io.mycat.backend.mysql.xa.TxState;
+import io.mycat.config.ErrorCode;
+import org.slf4j.Logger; import org.slf4j.LoggerFactory;
import io.mycat.backend.BackendConnection;
-import io.mycat.backend.nio.MySQLBackendConnection;
+import io.mycat.backend.mysql.nio.MySQLConnection;
+import io.mycat.net.mysql.ErrorPacket;
import io.mycat.server.NonBlockingSession;
-import io.mycat.server.packet.ErrorPacket;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
+import io.mycat.server.ServerConnection;
/**
* @author mycat
*/
public class CommitNodeHandler implements ResponseHandler {
- public static final Logger LOGGER = LoggerFactory
+ private static final Logger LOGGER = LoggerFactory
.getLogger(CommitNodeHandler.class);
private final NonBlockingSession session;
@@ -46,19 +49,30 @@ public CommitNodeHandler(NonBlockingSession session) {
public void commit(BackendConnection conn) {
conn.setResponseHandler(CommitNodeHandler.this);
- if (conn instanceof MySQLBackendConnection) {
- MySQLBackendConnection mysqlCon = (MySQLBackendConnection) conn;
- if (mysqlCon.getXaStatus() == 1) {
- String xaTxId = session.getXaTXID();
- String[] cmds = new String[] { "XA END " + xaTxId,
- "XA PREPARE " + xaTxId };
- mysqlCon.execBatchCmd(cmds);
- } else {
- conn.commit();
- }
- } else {
- conn.commit();
+ boolean isClosed=conn.isClosedOrQuit();
+ if(isClosed)
+ {
+ session.getSource().writeErrMessage(ErrorCode.ER_UNKNOWN_ERROR,
+ "receive commit,but find backend con is closed or quit");
+ LOGGER.error( conn+"receive commit,but fond backend con is closed or quit");
}
+ if(conn instanceof MySQLConnection)
+ {
+ MySQLConnection mysqlCon = (MySQLConnection) conn;
+ if (mysqlCon.getXaStatus() == 1)
+ {
+ String xaTxId = session.getXaTXID()+",'"+mysqlCon.getSchema()+"'";
+ String[] cmds = new String[]{"XA END " + xaTxId,
+ "XA PREPARE " + xaTxId};
+ mysqlCon.execBatchCmd(cmds);
+ } else
+ {
+ conn.commit();
+ }
+ }else
+ {
+ conn.commit();
+ }
}
@Override
@@ -69,25 +83,43 @@ public void connectionAcquired(BackendConnection conn) {
@Override
public void okResponse(byte[] ok, BackendConnection conn) {
- if (conn instanceof MySQLBackendConnection) {
- MySQLBackendConnection mysqlCon = (MySQLBackendConnection) conn;
- switch (mysqlCon.getXaStatus()) {
- case 1:
- if (mysqlCon.batchCmdFinished()) {
- String xaTxId = session.getXaTXID();
- mysqlCon.execCmd("XA COMMIT " + xaTxId);
- mysqlCon.setXaStatus(2);
+ if(conn instanceof MySQLConnection)
+ {
+ MySQLConnection mysqlCon = (MySQLConnection) conn;
+ switch (mysqlCon.getXaStatus())
+ {
+ case TxState.TX_STARTED_STATE:
+ if (mysqlCon.batchCmdFinished())
+ {
+ String xaTxId = session.getXaTXID()+",'"+mysqlCon.getSchema()+"'";
+ mysqlCon.execCmd("XA COMMIT " + xaTxId);
+ mysqlCon.setXaStatus(TxState.TX_PREPARED_STATE);
+ }
+ return;
+ case TxState.TX_PREPARED_STATE:
+ {
+ mysqlCon.setXaStatus(TxState.TX_INITIALIZE_STATE);
+ break;
}
- return;
- case 2: {
- mysqlCon.setXaStatus(0);
- break;
+ default:
+ // LOGGER.error("Wrong XA status flag!");
}
+
+ /* 1. 事务提交后,xa 事务结束 */
+ if(TxState.TX_INITIALIZE_STATE==mysqlCon.getXaStatus()){
+ if(session.getXaTXID()!=null){
+ session.setXATXEnabled(false);
+ }
}
}
+
+ /* 2. preAcStates 为true,事务结束后,需要设置为true。preAcStates 为ac上一个状态 */
+ if(session.getSource().isPreAcStates()&&!session.getSource().isAutocommit()){
+ session.getSource().setAutocommit(true);
+ }
session.clearResources(false);
- session.getSource().write(ok);
-
+ ServerConnection source = session.getSource();
+ source.write(ok);
}
@Override
@@ -111,8 +143,8 @@ public void rowEofResponse(byte[] eof, BackendConnection conn) {
public void fieldEofResponse(byte[] header, List fields,
byte[] eof, BackendConnection conn) {
LOGGER.error(new StringBuilder().append("unexpected packet for ")
- .append(conn).append(" bound by ").append(session.getSource())
- .append(": field's eof").toString());
+ .append(conn).append(" bound by ").append(session.getSource())
+ .append(": field's eof").toString());
}
@Override
@@ -122,14 +154,21 @@ public void rowResponse(byte[] row, BackendConnection conn) {
.append(": row data packet").toString());
}
+ @Override
+ public void writeQueueAvailable() {
+
+ }
+
@Override
public void connectionError(Throwable e, BackendConnection conn) {
+
}
@Override
public void connectionClose(BackendConnection conn, String reason) {
+
}
-}
\ No newline at end of file
+}
diff --git a/src/main/java/io/mycat/server/executors/ConnectionHeartBeatHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/ConnectionHeartBeatHandler.java
similarity index 95%
rename from src/main/java/io/mycat/server/executors/ConnectionHeartBeatHandler.java
rename to src/main/java/io/mycat/backend/mysql/nio/handler/ConnectionHeartBeatHandler.java
index d0ae02d28..583ac885b 100644
--- a/src/main/java/io/mycat/server/executors/ConnectionHeartBeatHandler.java
+++ b/src/main/java/io/mycat/backend/mysql/nio/handler/ConnectionHeartBeatHandler.java
@@ -21,12 +21,7 @@
* https://code.google.com/p/opencloudb/.
*
*/
-package io.mycat.server.executors;
-
-import io.mycat.backend.BackendConnection;
-import io.mycat.server.packet.ErrorPacket;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+package io.mycat.backend.mysql.nio.handler;
import java.util.Collection;
import java.util.Iterator;
@@ -36,6 +31,11 @@
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.locks.ReentrantLock;
+import org.slf4j.Logger; import org.slf4j.LoggerFactory;
+
+import io.mycat.backend.BackendConnection;
+import io.mycat.net.mysql.ErrorPacket;
+
/**
* heartbeat check for mysql connections
*
@@ -43,7 +43,7 @@
*
*/
public class ConnectionHeartBeatHandler implements ResponseHandler {
- public static final Logger LOGGER = LoggerFactory
+ private static final Logger LOGGER = LoggerFactory
.getLogger(ConnectionHeartBeatHandler.class);
protected final ReentrantLock lock = new ReentrantLock();
private final ConcurrentHashMap allCons = new ConcurrentHashMap();
@@ -153,6 +153,11 @@ private void removeFinished(BackendConnection con) {
this.allCons.remove(id);
}
+ @Override
+ public void writeQueueAvailable() {
+
+ }
+
@Override
public void connectionClose(BackendConnection conn, String reason) {
removeFinished(conn);
diff --git a/src/main/java/io/mycat/server/executors/DelegateResponseHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/DelegateResponseHandler.java
similarity index 95%
rename from src/main/java/io/mycat/server/executors/DelegateResponseHandler.java
rename to src/main/java/io/mycat/backend/mysql/nio/handler/DelegateResponseHandler.java
index edf2709d2..cedd06407 100644
--- a/src/main/java/io/mycat/server/executors/DelegateResponseHandler.java
+++ b/src/main/java/io/mycat/backend/mysql/nio/handler/DelegateResponseHandler.java
@@ -21,12 +21,12 @@
* https://code.google.com/p/opencloudb/.
*
*/
-package io.mycat.server.executors;
-
-import io.mycat.backend.BackendConnection;
+package io.mycat.backend.mysql.nio.handler;
import java.util.List;
+import io.mycat.backend.BackendConnection;
+
/**
* @author mycat
*/
@@ -75,7 +75,11 @@ public void rowEofResponse(byte[] eof, BackendConnection conn) {
target.rowEofResponse(eof, conn);
}
-
+ @Override
+ public void writeQueueAvailable() {
+ target.writeQueueAvailable();
+
+ }
@Override
public void connectionClose(BackendConnection conn, String reason) {
diff --git a/src/main/java/io/mycat/server/executors/FetchStoreNodeOfChildTableHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/FetchStoreNodeOfChildTableHandler.java
similarity index 63%
rename from src/main/java/io/mycat/server/executors/FetchStoreNodeOfChildTableHandler.java
rename to src/main/java/io/mycat/backend/mysql/nio/handler/FetchStoreNodeOfChildTableHandler.java
index 20f0550eb..548cb22ab 100644
--- a/src/main/java/io/mycat/server/executors/FetchStoreNodeOfChildTableHandler.java
+++ b/src/main/java/io/mycat/backend/mysql/nio/handler/FetchStoreNodeOfChildTableHandler.java
@@ -21,24 +21,26 @@
* https://code.google.com/p/opencloudb/.
*
*/
-package io.mycat.server.executors;
+package io.mycat.backend.mysql.nio.handler;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.slf4j.Logger; import org.slf4j.LoggerFactory;
import io.mycat.MycatServer;
import io.mycat.backend.BackendConnection;
-import io.mycat.backend.PhysicalDBNode;
+import io.mycat.backend.datasource.PhysicalDBNode;
import io.mycat.cache.CachePool;
+import io.mycat.config.MycatConfig;
+import io.mycat.net.mysql.ErrorPacket;
+import io.mycat.net.mysql.RowDataPacket;
import io.mycat.route.RouteResultsetNode;
-import io.mycat.server.config.node.MycatConfig;
-import io.mycat.server.packet.ErrorPacket;
-import io.mycat.server.packet.RowDataPacket;
+import io.mycat.server.ServerConnection;
import io.mycat.server.parser.ServerParse;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.locks.ReentrantLock;
/**
* company where id=(select company_id from customer where id=3); the one which
@@ -55,6 +57,67 @@ public class FetchStoreNodeOfChildTableHandler implements ResponseHandler {
private volatile String dataNode;
private AtomicInteger finished = new AtomicInteger(0);
protected final ReentrantLock lock = new ReentrantLock();
+
+ public String execute(String schema, String sql, List dataNodes, ServerConnection sc) {
+
+ String key = schema + ":" + sql;
+ CachePool cache = MycatServer.getInstance().getCacheService()
+ .getCachePool("ER_SQL2PARENTID");
+ String result = (String) cache.get(key);
+ if (result != null) {
+ return result;
+ }
+ this.sql = sql;
+ int totalCount = dataNodes.size();
+ long startTime = System.currentTimeMillis();
+ long endTime = startTime + 5 * 60 * 1000L;
+ MycatConfig conf = MycatServer.getInstance().getConfig();
+
+ LOGGER.debug("find child node with sql:" + sql);
+ for (String dn : dataNodes) {
+ if (dataNode != null) {
+ return dataNode;
+ }
+ PhysicalDBNode mysqlDN = conf.getDataNodes().get(dn);
+ try {
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("execute in datanode " + dn);
+ }
+ RouteResultsetNode node = new RouteResultsetNode(dn, ServerParse.SELECT, sql);
+ node.setRunOnSlave(false); // 获取 子表节点,最好走master为好
+
+ /*
+ * fix #1370 默认应该先从已经持有的连接中取连接, 否则可能因为事务隔离性看不到当前事务内更新的数据
+ * Tips: 通过mysqlDN.getConnection获取到的连接不是当前连接
+ *
+ */
+ BackendConnection conn = sc.getSession2().getTarget(node);
+ if(sc.getSession2().tryExistsCon(conn, node)) {
+ _execute(conn, node, sc);
+ } else {
+ mysqlDN.getConnection(mysqlDN.getDatabase(), sc.isAutocommit(), node, this, node);
+ }
+ } catch (Exception e) {
+ LOGGER.warn("get connection err " + e);
+ }
+ }
+
+ while (dataNode == null && System.currentTimeMillis() < endTime) {
+ try {
+ Thread.sleep(50);
+ } catch (InterruptedException e) {
+ break;
+ }
+ if (dataNode != null || finished.get() >= totalCount) {
+ break;
+ }
+ }
+ if (dataNode != null) {
+ cache.putIfAbsent(key, dataNode);
+ }
+ return dataNode;
+
+ }
public String execute(String schema, String sql, ArrayList dataNodes) {
String key = schema + ":" + sql;
@@ -80,9 +143,14 @@ public String execute(String schema, String sql, ArrayList dataNodes) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("execute in datanode " + dn);
}
- mysqlDN.getConnection(mysqlDN.getDatabase(), true,
- new RouteResultsetNode(dn, ServerParse.SELECT, sql),
- this, dn);
+ RouteResultsetNode node = new RouteResultsetNode(dn, ServerParse.SELECT, sql);
+ node.setRunOnSlave(false); // 获取 子表节点,最好走master为好
+
+ mysqlDN.getConnection(mysqlDN.getDatabase(), true, node, this, node);
+
+// mysqlDN.getConnection(mysqlDN.getDatabase(), true,
+// new RouteResultsetNode(dn, ServerParse.SELECT, sql),
+// this, dn);
} catch (Exception e) {
LOGGER.warn("get connection err " + e);
}
@@ -109,6 +177,15 @@ public String execute(String schema, String sql, ArrayList dataNodes) {
return dataNode;
}
+
+ private void _execute(BackendConnection conn, RouteResultsetNode node, ServerConnection sc) {
+ conn.setResponseHandler(this);
+ try {
+ conn.execute(node, sc, sc.isAutocommit());
+ } catch (IOException e) {
+ connectionError(e, conn);
+ }
+ }
@Override
public void connectionAcquired(BackendConnection conn) {
@@ -156,7 +233,7 @@ public void rowResponse(byte[] row, BackendConnection conn) {
}
if (result == null) {
result = getColumn(row);
- dataNode = (String) conn.getAttachment();
+ dataNode = ((RouteResultsetNode) conn.getAttachment()).getName();
} else {
LOGGER.warn("find multi data nodes for child table store, sql is: "
+ sql);
@@ -184,6 +261,11 @@ private void executeException(BackendConnection c, Throwable e) {
}
+ @Override
+ public void writeQueueAvailable() {
+
+ }
+
@Override
public void connectionClose(BackendConnection conn, String reason) {
diff --git a/src/main/java/io/mycat/server/executors/GetConnectionHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/GetConnectionHandler.java
similarity index 89%
rename from src/main/java/io/mycat/server/executors/GetConnectionHandler.java
rename to src/main/java/io/mycat/backend/mysql/nio/handler/GetConnectionHandler.java
index e4d0d68d9..d748f2114 100644
--- a/src/main/java/io/mycat/server/executors/GetConnectionHandler.java
+++ b/src/main/java/io/mycat/backend/mysql/nio/handler/GetConnectionHandler.java
@@ -21,16 +21,16 @@
* https://code.google.com/p/opencloudb/.
*
*/
-package io.mycat.server.executors;
-
-import io.mycat.backend.BackendConnection;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+package io.mycat.backend.mysql.nio.handler;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.atomic.AtomicInteger;
+import org.slf4j.Logger; import org.slf4j.LoggerFactory;
+
+import io.mycat.backend.BackendConnection;
+
/**
* wuzh
*
@@ -52,11 +52,10 @@ public GetConnectionHandler(
this.total = totalNumber;
}
- public String getStatusInfo() {
- return "finished " + finishedCount.get() + " success "
- + successCons.size() + " target count:" + this.total;
+ public String getStatusInfo()
+ {
+ return "finished "+ finishedCount.get()+" success "+successCons.size()+" target count:"+this.total;
}
-
public boolean finished() {
return finishedCount.get() >= total;
}
@@ -72,7 +71,7 @@ public void connectionAcquired(BackendConnection conn) {
@Override
public void connectionError(Throwable e, BackendConnection conn) {
finishedCount.addAndGet(1);
- logger.warn("connect error " + conn + e);
+ logger.warn("connect error " + conn+ e);
conn.release();
}
@@ -104,6 +103,11 @@ public void rowEofResponse(byte[] eof, BackendConnection conn) {
}
+ @Override
+ public void writeQueueAvailable() {
+
+ }
+
@Override
public void connectionClose(BackendConnection conn, String reason) {
diff --git a/src/main/java/io/mycat/server/executors/KillConnectionHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/KillConnectionHandler.java
similarity index 87%
rename from src/main/java/io/mycat/server/executors/KillConnectionHandler.java
rename to src/main/java/io/mycat/backend/mysql/nio/handler/KillConnectionHandler.java
index 955078d8f..0fd1dbd21 100644
--- a/src/main/java/io/mycat/server/executors/KillConnectionHandler.java
+++ b/src/main/java/io/mycat/backend/mysql/nio/handler/KillConnectionHandler.java
@@ -21,20 +21,20 @@
* https://code.google.com/p/opencloudb/.
*
*/
-package io.mycat.server.executors;
-
-import io.mycat.backend.BackendConnection;
-import io.mycat.backend.nio.MySQLBackendConnection;
-import io.mycat.server.NonBlockingSession;
-import io.mycat.server.packet.CommandPacket;
-import io.mycat.server.packet.ErrorPacket;
-import io.mycat.server.packet.MySQLPacket;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+package io.mycat.backend.mysql.nio.handler;
import java.io.UnsupportedEncodingException;
import java.util.List;
+import org.slf4j.Logger; import org.slf4j.LoggerFactory;
+
+import io.mycat.backend.BackendConnection;
+import io.mycat.backend.mysql.nio.MySQLConnection;
+import io.mycat.net.mysql.CommandPacket;
+import io.mycat.net.mysql.ErrorPacket;
+import io.mycat.net.mysql.MySQLPacket;
+import io.mycat.server.NonBlockingSession;
+
/**
* @author mycat
*/
@@ -42,18 +42,18 @@ public class KillConnectionHandler implements ResponseHandler {
private static final Logger LOGGER = LoggerFactory
.getLogger(KillConnectionHandler.class);
- private final MySQLBackendConnection killee;
+ private final MySQLConnection killee;
private final NonBlockingSession session;
public KillConnectionHandler(BackendConnection killee,
NonBlockingSession session) {
- this.killee = (MySQLBackendConnection) killee;
+ this.killee = (MySQLConnection) killee;
this.session = session;
}
@Override
public void connectionAcquired(BackendConnection conn) {
- MySQLBackendConnection mysqlCon = (MySQLBackendConnection) conn;
+ MySQLConnection mysqlCon = (MySQLConnection) conn;
conn.setResponseHandler(this);
CommandPacket packet = new CommandPacket();
packet.packetId = 0;
@@ -113,6 +113,11 @@ public void fieldEofResponse(byte[] header, List fields,
public void rowResponse(byte[] row, BackendConnection conn) {
}
+ @Override
+ public void writeQueueAvailable() {
+
+ }
+
@Override
public void connectionClose(BackendConnection conn, String reason) {
}
diff --git a/src/main/java/io/mycat/server/executors/LoadDataResponseHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/LoadDataResponseHandler.java
similarity index 85%
rename from src/main/java/io/mycat/server/executors/LoadDataResponseHandler.java
rename to src/main/java/io/mycat/backend/mysql/nio/handler/LoadDataResponseHandler.java
index b76bc9f91..a621e9a39 100644
--- a/src/main/java/io/mycat/server/executors/LoadDataResponseHandler.java
+++ b/src/main/java/io/mycat/backend/mysql/nio/handler/LoadDataResponseHandler.java
@@ -1,4 +1,4 @@
-package io.mycat.server.executors;
+package io.mycat.backend.mysql.nio.handler;
import io.mycat.backend.BackendConnection;
diff --git a/src/main/java/io/mycat/backend/mysql/nio/handler/LockTablesHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/LockTablesHandler.java
new file mode 100644
index 000000000..cfff92588
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/nio/handler/LockTablesHandler.java
@@ -0,0 +1,135 @@
+package io.mycat.backend.mysql.nio.handler;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.mycat.MycatServer;
+import io.mycat.backend.BackendConnection;
+import io.mycat.backend.datasource.PhysicalDBNode;
+import io.mycat.config.MycatConfig;
+import io.mycat.net.mysql.OkPacket;
+import io.mycat.route.RouteResultset;
+import io.mycat.route.RouteResultsetNode;
+import io.mycat.server.NonBlockingSession;
+
+/**
+ * lock tables 语句处理器
+ * @author songdabin
+ *
+ */
+public class LockTablesHandler extends MultiNodeHandler {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(LockTablesHandler.class);
+
+ private final RouteResultset rrs;
+ private final ReentrantLock lock;
+ private final boolean autocommit;
+
+ public LockTablesHandler(NonBlockingSession session, RouteResultset rrs) {
+ super(session);
+ this.rrs = rrs;
+ this.autocommit = session.getSource().isAutocommit();
+ this.lock = new ReentrantLock();
+ }
+
+ public void execute() throws Exception {
+ super.reset(this.rrs.getNodes().length);
+ MycatConfig conf = MycatServer.getInstance().getConfig();
+ for (final RouteResultsetNode node : rrs.getNodes()) {
+ BackendConnection conn = session.getTarget(node);
+ if (session.tryExistsCon(conn, node)) {
+ _execute(conn, node);
+ } else {
+ // create new connection
+ PhysicalDBNode dn = conf.getDataNodes().get(node.getName());
+ dn.getConnection(dn.getDatabase(), autocommit, node, this, node);
+ }
+ }
+ }
+
+ private void _execute(BackendConnection conn, RouteResultsetNode node) {
+ if (clearIfSessionClosed(session)) {
+ return;
+ }
+ conn.setResponseHandler(this);
+ try {
+ conn.execute(node, session.getSource(), autocommit);
+ } catch (IOException e) {
+ connectionError(e, conn);
+ }
+ }
+
+ @Override
+ public void connectionAcquired(BackendConnection conn) {
+ final RouteResultsetNode node = (RouteResultsetNode) conn.getAttachment();
+ session.bindConnection(node, conn);
+ _execute(conn, node);
+ }
+
+ @Override
+ public void okResponse(byte[] data, BackendConnection conn) {
+ boolean executeResponse = conn.syncAndExcute();
+ if (executeResponse) {
+ if (clearIfSessionClosed(session)) {
+ return;
+ }
+ boolean isEndPack = decrementCountBy(1);
+ if (isEndPack) {
+ if (this.isFail() || session.closed()) {
+ tryErrorFinished(true);
+ return;
+ }
+ OkPacket ok = new OkPacket();
+ ok.read(data);
+ lock.lock();
+ try {
+ ok.packetId = ++ packetId;
+ ok.serverStatus = session.getSource().isAutocommit() ? 2:1;
+ } finally {
+ lock.unlock();
+ }
+ ok.write(session.getSource());
+ }
+ }
+ }
+
+ protected String byte2Str(byte[] data) {
+ StringBuilder sb = new StringBuilder();
+ for (byte b : data) {
+ sb.append(Byte.toString(b));
+ }
+ return sb.toString();
+ }
+
+ @Override
+ public void fieldEofResponse(byte[] header, List fields, byte[] eof, BackendConnection conn) {
+ LOGGER.error(new StringBuilder().append("unexpected packet for ")
+ .append(conn).append(" bound by ").append(session.getSource())
+ .append(": field's eof").toString());
+ }
+
+ @Override
+ public void rowResponse(byte[] row, BackendConnection conn) {
+ LOGGER.warn(new StringBuilder().append("unexpected packet for ")
+ .append(conn).append(" bound by ").append(session.getSource())
+ .append(": row data packet").toString());
+ }
+
+ @Override
+ public void rowEofResponse(byte[] eof, BackendConnection conn) {
+ LOGGER.error(new StringBuilder().append("unexpected packet for ")
+ .append(conn).append(" bound by ").append(session.getSource())
+ .append(": row's eof").toString());
+ }
+
+ @Override
+ public void writeQueueAvailable() {
+ // TODO Auto-generated method stub
+
+ }
+
+}
diff --git a/src/main/java/io/mycat/backend/mysql/nio/handler/MiddlerQueryResultHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/MiddlerQueryResultHandler.java
new file mode 100644
index 000000000..e42442de1
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/nio/handler/MiddlerQueryResultHandler.java
@@ -0,0 +1,51 @@
+package io.mycat.backend.mysql.nio.handler;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import com.alibaba.druid.sql.ast.expr.SQLCharExpr;
+
+import io.mycat.backend.mysql.DataType;
+
+/**
+ * 查询中间结果处理器
+ * @author huangyiming
+ *
+ * @param
+ */
+public class MiddlerQueryResultHandler implements MiddlerResultHandler {
+
+ List reusult = new ArrayList<>();
+ DataType dataType;
+ Class clazz;
+ private SecondHandler secondHandler;
+
+ public MiddlerQueryResultHandler(SecondHandler secondHandler) {
+ this.secondHandler = secondHandler;
+
+
+ }
+ //确保只有一个构造函数入口
+ private MiddlerQueryResultHandler(){
+
+ }
+
+ @Override
+ public List getResult() {
+ return reusult;
+ }
+ @Override
+ public void add(T t ) {
+ reusult.add(new SQLCharExpr(t==null?null:t.toString()));
+ }
+
+ @Override
+ public String getDataType() {
+ return dataType.name();
+ }
+
+ @Override
+ public void secondEexcute() {
+ secondHandler.doExecute(getResult());
+ }
+}
diff --git a/src/main/java/io/mycat/backend/mysql/nio/handler/MiddlerResultHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/MiddlerResultHandler.java
new file mode 100644
index 000000000..734cec632
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/nio/handler/MiddlerResultHandler.java
@@ -0,0 +1,29 @@
+package io.mycat.backend.mysql.nio.handler;
+
+import java.util.List;
+
+import com.alibaba.druid.sql.ast.expr.SQLCharExpr;
+
+/**
+ * 中间结果处理器
+ * @author huangyiming
+ *
+ * @param
+ */
+public interface MiddlerResultHandler {
+
+
+ public List getResult();
+
+ public void add(T t );
+
+ public String getDataType();
+
+ public void secondEexcute();
+
+
+
+
+
+
+ }
diff --git a/src/main/java/io/mycat/backend/mysql/nio/handler/MultiNodeCoordinator.java b/src/main/java/io/mycat/backend/mysql/nio/handler/MultiNodeCoordinator.java
new file mode 100644
index 000000000..33e613ea2
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/nio/handler/MultiNodeCoordinator.java
@@ -0,0 +1,263 @@
+package io.mycat.backend.mysql.nio.handler;
+
+import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import com.esotericsoftware.minlog.Log;
+import io.mycat.backend.mysql.nio.MySQLConnection;
+import io.mycat.backend.mysql.xa.CoordinatorLogEntry;
+import io.mycat.backend.mysql.xa.ParticipantLogEntry;
+import io.mycat.backend.mysql.xa.TxState;
+import io.mycat.backend.mysql.xa.recovery.Repository;
+import io.mycat.backend.mysql.xa.recovery.impl.FileSystemRepository;
+import io.mycat.backend.mysql.xa.recovery.impl.InMemoryRepository;
+import io.mycat.net.BackendAIOConnection;
+import org.slf4j.Logger; import org.slf4j.LoggerFactory;
+
+import io.mycat.backend.BackendConnection;
+import io.mycat.route.RouteResultsetNode;
+import io.mycat.server.NonBlockingSession;
+import io.mycat.server.sqlcmd.SQLCtrlCommand;
+
+public class MultiNodeCoordinator implements ResponseHandler {
+ private static final Logger LOGGER = LoggerFactory
+ .getLogger(MultiNodeCoordinator.class);
+ public static final Repository fileRepository = new FileSystemRepository();
+ public static final Repository inMemoryRepository = new InMemoryRepository();
+ private final AtomicInteger runningCount = new AtomicInteger(0);
+ private final AtomicInteger faileCount = new AtomicInteger(0);
+ private volatile int nodeCount;
+ private final NonBlockingSession session;
+ private SQLCtrlCommand cmdHandler;
+ private final AtomicBoolean failed = new AtomicBoolean(false);
+
+ public MultiNodeCoordinator(NonBlockingSession session) {
+ this.session = session;
+ }
+
+ /** Multi-nodes 1pc Commit Handle **/
+ public void executeBatchNodeCmd(SQLCtrlCommand cmdHandler) {
+ this.cmdHandler = cmdHandler;
+ final int initCount = session.getTargetCount();
+ runningCount.set(initCount);
+ nodeCount = initCount;
+ failed.set(false);
+ faileCount.set(0);
+ //recovery nodes log
+ ParticipantLogEntry[] participantLogEntry = new ParticipantLogEntry[initCount];
+ // 执行
+ int started = 0;
+ for (RouteResultsetNode rrn : session.getTargetKeys()) {
+ if (rrn == null) {
+ LOGGER.error("null is contained in RoutResultsetNodes, source = "
+ + session.getSource());
+ continue;
+ }
+ final BackendConnection conn = session.getTarget(rrn);
+ if (conn != null) {
+ conn.setResponseHandler(this);
+ //process the XA_END XA_PREPARE Command
+ if(conn instanceof MySQLConnection){
+ MySQLConnection mysqlCon = (MySQLConnection) conn;
+ String xaTxId = null;
+ if(session.getXaTXID()!=null){
+ xaTxId = session.getXaTXID() +",'"+ mysqlCon.getSchema()+"'";
+ }
+ if (mysqlCon.getXaStatus() == TxState.TX_STARTED_STATE)
+ {
+ //recovery Log
+ participantLogEntry[started] = new ParticipantLogEntry(xaTxId,conn.getHost(),0,conn.getSchema(),((MySQLConnection) conn).getXaStatus());
+ String[] cmds = new String[]{"XA END " + xaTxId,
+ "XA PREPARE " + xaTxId};
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("Start execute the batch cmd : "+ cmds[0] + ";" + cmds[1]+","+
+ "current connection:"+conn.getHost()+":"+conn.getPort());
+ }
+ mysqlCon.execBatchCmd(cmds);
+ } else
+ {
+ //recovery Log
+ participantLogEntry[started] = new ParticipantLogEntry(xaTxId,conn.getHost(),0,conn.getSchema(),((MySQLConnection) conn).getXaStatus());
+ cmdHandler.sendCommand(session, conn);
+ }
+ }else{
+ cmdHandler.sendCommand(session, conn);
+ }
+ ++started;
+ }
+ }
+
+ //xa recovery log
+ if(session.getXaTXID()!=null) {
+ CoordinatorLogEntry coordinatorLogEntry = new CoordinatorLogEntry(session.getXaTXID(), false, participantLogEntry);
+ inMemoryRepository.put(session.getXaTXID(), coordinatorLogEntry);
+ fileRepository.writeCheckpoint(inMemoryRepository.getAllCoordinatorLogEntries());
+ }
+ if (started < nodeCount) {
+ runningCount.set(started);
+ LOGGER.warn("some connection failed to execute "
+ + (nodeCount - started));
+ /**
+ * assumption: only caused by front-end connection close.
+ * Otherwise, packet must be returned to front-end
+ */
+ failed.set(true);
+ }
+ }
+
+ private boolean finished() {
+ int val = runningCount.decrementAndGet();
+ return (val == 0);
+ }
+
+ @Override
+ public void connectionError(Throwable e, BackendConnection conn) {
+ }
+
+ @Override
+ public void connectionAcquired(BackendConnection conn) {
+
+ }
+
+ @Override
+ public void errorResponse(byte[] err, BackendConnection conn) {
+ faileCount.incrementAndGet();
+
+ //replayCommit
+ if(conn instanceof MySQLConnection) {
+ MySQLConnection mysqlCon = (MySQLConnection) conn;
+ String xaTxId = session.getXaTXID();
+ if (xaTxId != null) {
+ xaTxId += ",'"+mysqlCon.getSchema()+"'";
+ String cmd = "XA COMMIT " + xaTxId;
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("Replay Commit execute the cmd :" + cmd + ",current host:" +
+ mysqlCon.getHost() + ":" + mysqlCon.getPort());
+ }
+ mysqlCon.execCmd(cmd);
+ }
+ }
+
+ //release connection
+ if (this.cmdHandler.releaseConOnErr()) {
+ session.releaseConnection(conn);
+ } else {
+ session.releaseConnectionIfSafe(conn, LOGGER.isDebugEnabled(),false);
+ }
+ if (this.finished()) {
+ cmdHandler.errorResponse(session, err, this.nodeCount,
+ this.faileCount.get());
+ if (cmdHandler.isAutoClearSessionCons()) {
+ session.clearResources(session.getSource().isTxInterrupted());
+ }
+ }
+
+ }
+
+ @Override
+ public void okResponse(byte[] ok, BackendConnection conn) {
+ //process the XA Transatcion 2pc commit
+ if(conn instanceof MySQLConnection)
+ {
+ MySQLConnection mysqlCon = (MySQLConnection) conn;
+ switch (mysqlCon.getXaStatus())
+ {
+ case TxState.TX_STARTED_STATE:
+ //if there have many SQL execute wait the okResponse,will come to here one by one
+ //should be wait all nodes ready ,then send xa commit to all nodes.
+ if (mysqlCon.batchCmdFinished())
+ {
+ String xaTxId = session.getXaTXID();
+ String cmd = "XA COMMIT " + xaTxId +",'"+mysqlCon.getSchema()+"'";
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("Start execute the cmd :"+cmd+",current host:"+
+ mysqlCon.getHost()+":"+mysqlCon.getPort());
+ }
+ //recovery log
+ CoordinatorLogEntry coordinatorLogEntry = inMemoryRepository.get(xaTxId);
+ for(int i=0; i fields,
+ byte[] eof, BackendConnection conn) {
+
+ }
+
+ @Override
+ public void rowResponse(byte[] row, BackendConnection conn) {
+
+ }
+
+ @Override
+ public void rowEofResponse(byte[] eof, BackendConnection conn) {
+ }
+
+ @Override
+ public void writeQueueAvailable() {
+
+ }
+
+ @Override
+ public void connectionClose(BackendConnection conn, String reason) {
+
+ }
+
+}
diff --git a/src/main/java/io/mycat/server/executors/MultiNodeHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/MultiNodeHandler.java
similarity index 85%
rename from src/main/java/io/mycat/server/executors/MultiNodeHandler.java
rename to src/main/java/io/mycat/backend/mysql/nio/handler/MultiNodeHandler.java
index da5fde7dc..52581ad14 100644
--- a/src/main/java/io/mycat/server/executors/MultiNodeHandler.java
+++ b/src/main/java/io/mycat/backend/mysql/nio/handler/MultiNodeHandler.java
@@ -1,229 +1,238 @@
-/*
- * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software;Designed and Developed mainly by many Chinese
- * opensource volunteers. you can redistribute it and/or modify it under the
- * terms of the GNU General Public License version 2 only, as published by the
- * Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Any questions about this component can be directed to it's project Web address
- * https://code.google.com/p/opencloudb/.
- *
- */
-package io.mycat.server.executors;
-
-import io.mycat.backend.BackendConnection;
-import io.mycat.server.ErrorCode;
-import io.mycat.server.NonBlockingSession;
-import io.mycat.server.packet.ErrorPacket;
-import io.mycat.util.StringUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.locks.ReentrantLock;
-
-/**
- * @author mycat
- */
-abstract class MultiNodeHandler implements ResponseHandler, Terminatable {
- public static final Logger LOGGER = LoggerFactory
- .getLogger(MultiNodeHandler.class);
- protected final ReentrantLock lock = new ReentrantLock();
- protected final NonBlockingSession session;
- private AtomicBoolean isFailed = new AtomicBoolean(false);
- protected volatile String error;
- protected byte packetId;
- protected final AtomicBoolean errorRepsponsed = new AtomicBoolean(false);
-
- public MultiNodeHandler(NonBlockingSession session) {
- if (session == null) {
- throw new IllegalArgumentException("session is null!");
- }
- this.session = session;
- }
-
- public void setFail(String errMsg) {
- isFailed.set(true);
- error = errMsg;
- }
-
- public boolean isFail() {
- return isFailed.get();
- }
-
- private int nodeCount;
-
- private Runnable terminateCallBack;
-
- @Override
- public void terminate(Runnable terminateCallBack) {
- boolean zeroReached = false;
- lock.lock();
- try {
- if (nodeCount > 0) {
- this.terminateCallBack = terminateCallBack;
- } else {
- zeroReached = true;
- }
- } finally {
- lock.unlock();
- }
- if (zeroReached) {
- terminateCallBack.run();
- }
- }
-
- protected boolean canClose(BackendConnection conn, boolean tryErrorFinish) {
-
- // realse this connection if safe
- session.releaseConnectionIfSafe(conn, LOGGER.isDebugEnabled(), false);
- boolean allFinished = false;
- if (tryErrorFinish) {
- allFinished = this.decrementCountBy(1);
- this.tryErrorFinished(allFinished);
- }
-
- return allFinished;
- }
-
- protected void decrementCountToZero() {
- Runnable callback;
- lock.lock();
- try {
- nodeCount = 0;
- callback = this.terminateCallBack;
- this.terminateCallBack = null;
- } finally {
- lock.unlock();
- }
- if (callback != null) {
- callback.run();
- }
- }
-
- public void connectionError(Throwable e, BackendConnection conn) {
- boolean canClose = decrementCountBy(1);
- this.tryErrorFinished(canClose);
- }
-
- public void errorResponse(byte[] data, BackendConnection conn) {
- session.releaseConnectionIfSafe(conn, LOGGER.isDebugEnabled(), false);
- ErrorPacket err = new ErrorPacket();
- err.read(data);
- String errmsg = new String(err.message);
- this.setFail(errmsg);
- LOGGER.warn("error response from " + conn + " err " + errmsg + " code:"
- + err.errno);
-
- this.tryErrorFinished(this.decrementCountBy(1));
- }
-
- public boolean clearIfSessionClosed(NonBlockingSession session) {
- if (session.closed()) {
- if (LOGGER.isDebugEnabled()) {
- LOGGER.debug("session closed ,clear resources " + session);
- }
-
- session.clearResources(true);
- this.clearResources();
- return true;
- } else {
- return false;
- }
-
- }
-
- protected boolean decrementCountBy(int finished) {
- boolean zeroReached = false;
- Runnable callback = null;
- lock.lock();
- try {
- if (zeroReached = --nodeCount == 0) {
- callback = this.terminateCallBack;
- this.terminateCallBack = null;
- }
- } finally {
- lock.unlock();
- }
- if (zeroReached && callback != null) {
- callback.run();
- }
- return zeroReached;
- }
-
- protected void reset(int initCount) {
- nodeCount = initCount;
- isFailed.set(false);
- error = null;
- packetId = 0;
- }
-
- protected ErrorPacket createErrPkg(String errmgs) {
- ErrorPacket err = new ErrorPacket();
- lock.lock();
- try {
- err.packetId = ++packetId;
- } finally {
- lock.unlock();
- }
- err.errno = ErrorCode.ER_UNKNOWN_ERROR;
- err.message = StringUtil.encode(errmgs, session.getSource()
- .getCharset());
- return err;
- }
-
- protected void tryErrorFinished(boolean allEnd) {
- if (allEnd && !session.closed()) {
- if (errorRepsponsed.compareAndSet(false, true)) {
- createErrPkg(this.error).write(session.getSource());
- }
- // clear session resources,release all
- if (LOGGER.isDebugEnabled()) {
- LOGGER.debug("error all end ,clear session resource ");
- }
- if (session.getSource().isAutocommit()) {
- session.closeAndClearResources(error);
- } else {
- session.getSource().setTxInterrupt(this.error);
- // clear resouces
- clearResources();
- }
-
- }
-
- }
-
- public void connectionClose(BackendConnection conn, String reason) {
- this.setFail("closed connection:" + reason + " con:" + conn);
- boolean finished = false;
- lock.lock();
- try {
- finished = (this.nodeCount == 0);
-
- } finally {
- lock.unlock();
- }
- if (finished == false) {
- finished = this.decrementCountBy(1);
- }
- if (error == null) {
- error = "back connection closed ";
- }
- tryErrorFinished(finished);
- }
-
- public void clearResources() {
- }
+/*
+ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software;Designed and Developed mainly by many Chinese
+ * opensource volunteers. you can redistribute it and/or modify it under the
+ * terms of the GNU General Public License version 2 only, as published by the
+ * Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Any questions about this component can be directed to it's project Web address
+ * https://code.google.com/p/opencloudb/.
+ *
+ */
+package io.mycat.backend.mysql.nio.handler;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.slf4j.Logger; import org.slf4j.LoggerFactory;
+
+import io.mycat.backend.BackendConnection;
+import io.mycat.config.ErrorCode;
+import io.mycat.net.mysql.ErrorPacket;
+import io.mycat.server.NonBlockingSession;
+import io.mycat.util.StringUtil;
+
+/**
+ * @author mycat
+ */
+abstract class MultiNodeHandler implements ResponseHandler, Terminatable {
+ private static final Logger LOGGER = LoggerFactory
+ .getLogger(MultiNodeHandler.class);
+ protected final ReentrantLock lock = new ReentrantLock();
+ protected final NonBlockingSession session;
+ private AtomicBoolean isFailed = new AtomicBoolean(false);
+ protected volatile String error;
+ protected byte packetId;
+ protected final AtomicBoolean errorRepsponsed = new AtomicBoolean(false);
+
+ public MultiNodeHandler(NonBlockingSession session) {
+ if (session == null) {
+ throw new IllegalArgumentException("session is null!");
+ }
+ this.session = session;
+ }
+
+ public void setFail(String errMsg) {
+ isFailed.set(true);
+ error = errMsg;
+ }
+
+ public boolean isFail() {
+ return isFailed.get();
+ }
+
+ private int nodeCount;
+
+ private Runnable terminateCallBack;
+
+ @Override
+ public void terminate(Runnable terminateCallBack) {
+ boolean zeroReached = false;
+ lock.lock();
+ try {
+ if (nodeCount > 0) {
+ this.terminateCallBack = terminateCallBack;
+ } else {
+ zeroReached = true;
+ }
+ } finally {
+ lock.unlock();
+ }
+ if (zeroReached) {
+ terminateCallBack.run();
+ }
+ }
+
+ protected boolean canClose(BackendConnection conn, boolean tryErrorFinish) {
+
+ // realse this connection if safe
+ session.releaseConnectionIfSafe(conn, LOGGER.isDebugEnabled(), false);
+ boolean allFinished = false;
+ if (tryErrorFinish) {
+ allFinished = this.decrementCountBy(1);
+ this.tryErrorFinished(allFinished);
+ }
+
+ return allFinished;
+ }
+
+ protected void decrementCountToZero() {
+ Runnable callback;
+ lock.lock();
+ try {
+ nodeCount = 0;
+ callback = this.terminateCallBack;
+ this.terminateCallBack = null;
+ } finally {
+ lock.unlock();
+ }
+ if (callback != null) {
+ callback.run();
+ }
+ }
+
+ public void connectionError(Throwable e, BackendConnection conn) {
+ final boolean canClose = decrementCountBy(1);
+ // 需要把Throwable e的错误信息保存下来(setFail()), 否则会导致响应
+ //null信息,结果mysql命令行等客户端查询结果是"Query OK"!!
+ // @author Uncle-pan
+ // @since 2016-03-26
+ if(canClose){
+ setFail("backend connect: "+e);
+ }
+ LOGGER.warn("backend connect", e);
+ this.tryErrorFinished(canClose);
+ }
+
+ public void errorResponse(byte[] data, BackendConnection conn) {
+ session.releaseConnectionIfSafe(conn, LOGGER.isDebugEnabled(), false);
+ ErrorPacket err = new ErrorPacket();
+ err.read(data);
+
+ String errmsg = new String(err.message);
+ this.setFail(errmsg);
+
+ LOGGER.warn("error response from " + conn + " err " + errmsg + " code:" + err.errno);
+
+ this.tryErrorFinished(this.decrementCountBy(1));
+ }
+
+ public boolean clearIfSessionClosed(NonBlockingSession session) {
+ if (session.closed()) {
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("session closed ,clear resources " + session);
+ }
+
+ session.clearResources(true);
+ this.clearResources();
+ return true;
+ } else {
+ return false;
+ }
+
+ }
+
+ protected boolean decrementCountBy(int finished) {
+ boolean zeroReached = false;
+ Runnable callback = null;
+ lock.lock();
+ try {
+ if (zeroReached = --nodeCount == 0) {
+ callback = this.terminateCallBack;
+ this.terminateCallBack = null;
+ }
+ } finally {
+ lock.unlock();
+ }
+ if (zeroReached && callback != null) {
+ callback.run();
+ }
+ return zeroReached;
+ }
+
+ protected void reset(int initCount) {
+ nodeCount = initCount;
+ isFailed.set(false);
+ error = null;
+ packetId = 0;
+ }
+
+ protected ErrorPacket createErrPkg(String errmgs) {
+ ErrorPacket err = new ErrorPacket();
+ lock.lock();
+ try {
+ err.packetId = ++packetId;
+ } finally {
+ lock.unlock();
+ }
+ err.errno = ErrorCode.ER_UNKNOWN_ERROR;
+ err.message = StringUtil.encode(errmgs, session.getSource().getCharset());
+ return err;
+ }
+
+ protected void tryErrorFinished(boolean allEnd) {
+ if (allEnd && !session.closed()) {
+
+ if (errorRepsponsed.compareAndSet(false, true)) {
+ createErrPkg(this.error).write(session.getSource());
+ }
+ // clear session resources,release all
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("error all end ,clear session resource ");
+ }
+ if (session.getSource().isAutocommit()) {
+ session.closeAndClearResources(error);
+ } else {
+ session.getSource().setTxInterrupt(this.error);
+ // clear resouces
+ clearResources();
+ }
+
+ }
+
+ }
+
+ public void connectionClose(BackendConnection conn, String reason) {
+ this.setFail("closed connection:" + reason + " con:" + conn);
+ boolean finished = false;
+ lock.lock();
+ try {
+ finished = (this.nodeCount == 0);
+
+ } finally {
+ lock.unlock();
+ }
+ if (finished == false) {
+ finished = this.decrementCountBy(1);
+ }
+ if (error == null) {
+ error = "back connection closed ";
+ }
+ tryErrorFinished(finished);
+ }
+
+ public void clearResources() {
+ }
}
\ No newline at end of file
diff --git a/src/main/java/io/mycat/backend/mysql/nio/handler/MultiNodeQueryHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/MultiNodeQueryHandler.java
new file mode 100644
index 000000000..508ec314b
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/nio/handler/MultiNodeQueryHandler.java
@@ -0,0 +1,872 @@
+/*
+ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software;Designed and Developed mainly by many Chinese
+ * opensource volunteers. you can redistribute it and/or modify it under the
+ * terms of the GNU General Public License version 2 only, as published by the
+ * Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Any questions about this component can be directed to it's project Web address
+ * https://code.google.com/p/opencloudb/.
+ *
+ */
+package io.mycat.backend.mysql.nio.handler;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.mycat.MycatServer;
+import io.mycat.backend.BackendConnection;
+import io.mycat.backend.datasource.PhysicalDBNode;
+import io.mycat.backend.mysql.LoadDataUtil;
+import io.mycat.cache.LayerCachePool;
+import io.mycat.config.MycatConfig;
+import io.mycat.memory.unsafe.row.UnsafeRow;
+import io.mycat.net.mysql.BinaryRowDataPacket;
+import io.mycat.net.mysql.FieldPacket;
+import io.mycat.net.mysql.OkPacket;
+import io.mycat.net.mysql.ResultSetHeaderPacket;
+import io.mycat.net.mysql.RowDataPacket;
+import io.mycat.route.RouteResultset;
+import io.mycat.route.RouteResultsetNode;
+import io.mycat.server.NonBlockingSession;
+import io.mycat.server.ServerConnection;
+import io.mycat.server.parser.ServerParse;
+import io.mycat.sqlengine.mpp.AbstractDataNodeMerge;
+import io.mycat.sqlengine.mpp.ColMeta;
+import io.mycat.sqlengine.mpp.DataMergeService;
+import io.mycat.sqlengine.mpp.DataNodeMergeManager;
+import io.mycat.sqlengine.mpp.MergeCol;
+import io.mycat.statistic.stat.QueryResult;
+import io.mycat.statistic.stat.QueryResultDispatcher;
+import io.mycat.util.ResultSetUtil;
+
+/**
+ * @author mycat
+ */
+public class MultiNodeQueryHandler extends MultiNodeHandler implements LoadDataResponseHandler {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(MultiNodeQueryHandler.class);
+
+ private final RouteResultset rrs;
+ private final NonBlockingSession session;
+ // private final CommitNodeHandler icHandler;
+ private final AbstractDataNodeMerge dataMergeSvr;
+ private final boolean autocommit;
+ private String priamaryKeyTable = null;
+ private int primaryKeyIndex = -1;
+ private int fieldCount = 0;
+ private final ReentrantLock lock;
+ private long affectedRows;
+ private long selectRows;
+ private long insertId;
+ private volatile boolean fieldsReturned;
+ private int okCount;
+ private final boolean isCallProcedure;
+ private long startTime;
+ private long netInBytes;
+ private long netOutBytes;
+ private int execCount = 0;
+
+ private boolean prepared;
+ private List fieldPackets = new ArrayList();
+ private int isOffHeapuseOffHeapForMerge = 1;
+ //huangyiming add 中间处理结果是否处理完毕
+ private final AtomicBoolean isMiddleResultDone;
+ /**
+ * Limit N,M
+ */
+ private int limitStart;
+ private int limitSize;
+
+ private int index = 0;
+
+ private int end = 0;
+
+ //huangyiming
+ private byte[] header = null;
+ private List fields = null;
+
+ public MultiNodeQueryHandler(int sqlType, RouteResultset rrs,
+ boolean autocommit, NonBlockingSession session) {
+
+ super(session);
+ this.isMiddleResultDone = new AtomicBoolean(false);
+
+ if (rrs.getNodes() == null) {
+ throw new IllegalArgumentException("routeNode is null!");
+ }
+
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("execute mutinode query " + rrs.getStatement());
+ }
+
+ this.rrs = rrs;
+ isOffHeapuseOffHeapForMerge = MycatServer.getInstance().
+ getConfig().getSystem().getUseOffHeapForMerge();
+ if (ServerParse.SELECT == sqlType && rrs.needMerge()) {
+ /**
+ * 使用Off Heap
+ */
+ if(isOffHeapuseOffHeapForMerge == 1){
+ dataMergeSvr = new DataNodeMergeManager(this,rrs,isMiddleResultDone);
+ }else {
+ dataMergeSvr = new DataMergeService(this,rrs);
+ }
+ } else {
+ dataMergeSvr = null;
+ }
+
+ isCallProcedure = rrs.isCallStatement();
+ this.autocommit = session.getSource().isAutocommit();
+ this.session = session;
+ this.lock = new ReentrantLock();
+ // this.icHandler = new CommitNodeHandler(session);
+
+ this.limitStart = rrs.getLimitStart();
+ this.limitSize = rrs.getLimitSize();
+ this.end = limitStart + rrs.getLimitSize();
+
+ if (this.limitStart < 0)
+ this.limitStart = 0;
+
+ if (rrs.getLimitSize() < 0)
+ end = Integer.MAX_VALUE;
+ if ((dataMergeSvr != null)
+ && LOGGER.isDebugEnabled()) {
+ LOGGER.debug("has data merge logic ");
+ }
+
+ if ( rrs != null && rrs.getStatement() != null) {
+ netInBytes += rrs.getStatement().getBytes().length;
+ }
+ }
+
+ protected void reset(int initCount) {
+ super.reset(initCount);
+ this.okCount = initCount;
+ this.execCount = 0;
+ this.netInBytes = 0;
+ this.netOutBytes = 0;
+ }
+
+ public NonBlockingSession getSession() {
+ return session;
+ }
+
+ public void execute() throws Exception {
+ final ReentrantLock lock = this.lock;
+ lock.lock();
+ try {
+ this.reset(rrs.getNodes().length);
+ this.fieldsReturned = false;
+ this.affectedRows = 0L;
+ this.insertId = 0L;
+ } finally {
+ lock.unlock();
+ }
+ MycatConfig conf = MycatServer.getInstance().getConfig();
+ startTime = System.currentTimeMillis();
+ LOGGER.debug("rrs.getRunOnSlave()-" + rrs.getRunOnSlave());
+ for (final RouteResultsetNode node : rrs.getNodes()) {
+ BackendConnection conn = session.getTarget(node);
+ if (session.tryExistsCon(conn, node)) {
+ LOGGER.debug("node.getRunOnSlave()-" + node.getRunOnSlave());
+ node.setRunOnSlave(rrs.getRunOnSlave()); // 实现 master/slave注解
+ LOGGER.debug("node.getRunOnSlave()-" + node.getRunOnSlave());
+ _execute(conn, node);
+ } else {
+ // create new connection
+ LOGGER.debug("node.getRunOnSlave()1-" + node.getRunOnSlave());
+ node.setRunOnSlave(rrs.getRunOnSlave()); // 实现 master/slave注解
+ LOGGER.debug("node.getRunOnSlave()2-" + node.getRunOnSlave());
+ PhysicalDBNode dn = conf.getDataNodes().get(node.getName());
+ dn.getConnection(dn.getDatabase(), autocommit, node, this, node);
+ // 注意该方法不仅仅是获取连接,获取新连接成功之后,会通过层层回调,最后回调到本类 的connectionAcquired
+ // 这是通过 上面方法的 this 参数的层层传递完成的。
+ // connectionAcquired 进行执行操作:
+ // session.bindConnection(node, conn);
+ // _execute(conn, node);
+ }
+
+ }
+ }
+
+ private void _execute(BackendConnection conn, RouteResultsetNode node) {
+ if (clearIfSessionClosed(session)) {
+ return;
+ }
+ conn.setResponseHandler(this);
+ try {
+ conn.execute(node, session.getSource(), autocommit);
+ } catch (IOException e) {
+ connectionError(e, conn);
+ }
+ }
+
+ @Override
+ public void connectionAcquired(final BackendConnection conn) {
+ final RouteResultsetNode node = (RouteResultsetNode) conn
+ .getAttachment();
+ session.bindConnection(node, conn);
+ _execute(conn, node);
+ }
+
+ private boolean decrementOkCountBy(int finished) {
+ lock.lock();
+ try {
+ return --okCount == 0;
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ @Override
+ public void okResponse(byte[] data, BackendConnection conn) {
+
+ this.netOutBytes += data.length;
+
+ boolean executeResponse = conn.syncAndExcute();
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("received ok response ,executeResponse:"
+ + executeResponse + " from " + conn);
+ }
+ if (executeResponse) {
+
+ ServerConnection source = session.getSource();
+ OkPacket ok = new OkPacket();
+ ok.read(data);
+ //存储过程
+ boolean isCanClose2Client =(!rrs.isCallStatement()) ||(rrs.isCallStatement() &&!rrs.getProcedure().isResultSimpleValue());;
+ if(!isCallProcedure)
+ {
+ if (clearIfSessionClosed(session))
+ {
+ return;
+ } else if (canClose(conn, false))
+ {
+ return;
+ }
+ }
+ lock.lock();
+ try {
+ // 判断是否是全局表,如果是,执行行数不做累加,以最后一次执行的为准。
+ if (!rrs.isGlobalTable()) {
+ affectedRows += ok.affectedRows;
+ } else {
+ affectedRows = ok.affectedRows;
+ }
+ if (ok.insertId > 0) {
+ insertId = (insertId == 0) ? ok.insertId : Math.min(
+ insertId, ok.insertId);
+ }
+ } finally {
+ lock.unlock();
+ }
+ // 对于存储过程,其比较特殊,查询结果返回EndRow报文以后,还会再返回一个OK报文,才算结束
+ boolean isEndPacket = isCallProcedure ? decrementOkCountBy(1): decrementCountBy(1);
+ if (isEndPacket && isCanClose2Client) {
+
+ if (this.autocommit && !session.getSource().isLocked()) {// clear all connections
+ session.releaseConnections(false);
+ }
+
+ if (this.isFail() || session.closed()) {
+ tryErrorFinished(true);
+ return;
+ }
+
+ lock.lock();
+ try {
+ if (rrs.isLoadData()) {
+ byte lastPackId = source.getLoadDataInfileHandler()
+ .getLastPackId();
+ ok.packetId = ++lastPackId;// OK_PACKET
+ ok.message = ("Records: " + affectedRows + " Deleted: 0 Skipped: 0 Warnings: 0")
+ .getBytes();// 此处信息只是为了控制台给人看的
+ source.getLoadDataInfileHandler().clear();
+ } else {
+ ok.packetId = ++packetId;// OK_PACKET
+ }
+
+ ok.affectedRows = affectedRows;
+ ok.serverStatus = source.isAutocommit() ? 2 : 1;
+ if (insertId > 0) {
+ ok.insertId = insertId;
+ source.setLastInsertId(insertId);
+ }
+
+ ok.write(source);
+ } catch (Exception e) {
+ handleDataProcessException(e);
+ } finally {
+ lock.unlock();
+ }
+ }
+
+
+ // add by lian
+ // 解决sql统计中写操作永远为0
+ execCount++;
+ if (execCount == rrs.getNodes().length) {
+ source.setExecuteSql(null); //完善show @@connection.sql 监控命令.已经执行完的sql 不再显示
+ QueryResult queryResult = new QueryResult(session.getSource().getUser(),
+ rrs.getSqlType(), rrs.getStatement(), selectRows, netInBytes, netOutBytes, startTime, System.currentTimeMillis(),0);
+ QueryResultDispatcher.dispatchQuery( queryResult );
+ }
+ }
+ }
+
+ @Override
+ public void rowEofResponse(final byte[] eof, BackendConnection conn) {
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("on row end reseponse " + conn);
+ }
+
+ this.netOutBytes += eof.length;
+ MiddlerResultHandler middlerResultHandler = session.getMiddlerResultHandler();
+
+ if (errorRepsponsed.get()) {
+ // the connection has been closed or set to "txInterrupt" properly
+ //in tryErrorFinished() method! If we close it here, it can
+ // lead to tx error such as blocking rollback tx for ever.
+ // @author Uncle-pan
+ // @since 2016-03-25
+ // conn.close(this.error);
+ return;
+ }
+
+ final ServerConnection source = session.getSource();
+ if (!isCallProcedure) {
+ if (clearIfSessionClosed(session)) {
+ return;
+ } else if (canClose(conn, false)) {
+ return;
+ }
+ }
+
+ if (decrementCountBy(1)) {
+ if (!rrs.isCallStatement()||(rrs.isCallStatement()&&rrs.getProcedure().isResultSimpleValue())) {
+ if (this.autocommit && !session.getSource().isLocked()) {// clear all connections
+ session.releaseConnections(false);
+ }
+
+ if (this.isFail() || session.closed()) {
+ tryErrorFinished(true);
+ return;
+ }
+ }
+ if (dataMergeSvr != null) {
+ //huangyiming add 数据合并前如果有中间过程则先执行数据合并再执行下一步
+ if(session.getMiddlerResultHandler() !=null ){
+ isMiddleResultDone.set(true);
+ }
+
+ try {
+ dataMergeSvr.outputMergeResult(session, eof);
+ } catch (Exception e) {
+ handleDataProcessException(e);
+ }
+
+ } else {
+ try {
+ lock.lock();
+ eof[3] = ++packetId;
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("last packet id:" + packetId);
+ }
+ if( middlerResultHandler ==null ){
+ //middlerResultHandler.secondEexcute();
+ source.write(eof);
+ }
+ } finally {
+ lock.unlock();
+
+ }
+ }
+ }
+ execCount++;
+ if(middlerResultHandler !=null){
+ if (execCount != rrs.getNodes().length) {
+
+ return;
+ }
+ /*else{
+ middlerResultHandler.secondEexcute();
+ }*/
+ }
+ if (execCount == rrs.getNodes().length) {
+ int resultSize = source.getWriteQueue().size()*MycatServer.getInstance().getConfig().getSystem().getBufferPoolPageSize();
+ source.setExecuteSql(null); //完善show @@connection.sql 监控命令.已经执行完的sql 不再显示
+ //TODO: add by zhuam
+ //查询结果派发
+ QueryResult queryResult = new QueryResult(session.getSource().getUser(),
+ rrs.getSqlType(), rrs.getStatement(), selectRows, netInBytes, netOutBytes, startTime, System.currentTimeMillis(),resultSize);
+ QueryResultDispatcher.dispatchQuery( queryResult );
+
+
+ // add huangyiming 如果是中间过程,必须等数据合并好了再进行下一步语句的拼装
+ if(middlerResultHandler !=null ){
+ while (!this.isMiddleResultDone.compareAndSet(false, true)) {
+ Thread.yield();
+ }
+ middlerResultHandler.secondEexcute();
+ isMiddleResultDone.set(false);
+ }
+ }
+
+ }
+
+ /**
+ * 将汇聚结果集数据真正的发送给Mycat客户端
+ * @param source
+ * @param eof
+ * @param
+ */
+ public void outputMergeResult(final ServerConnection source, final byte[] eof, Iterator iter,AtomicBoolean isMiddleResultDone) {
+
+ try {
+ lock.lock();
+ ByteBuffer buffer = session.getSource().allocate();
+ final RouteResultset rrs = this.dataMergeSvr.getRrs();
+
+ /**
+ * 处理limit语句的start 和 end位置,将正确的结果发送给
+ * Mycat 客户端
+ */
+ int start = rrs.getLimitStart();
+ int end = start + rrs.getLimitSize();
+ int index = 0;
+
+ if (start < 0)
+ start = 0;
+
+ if (rrs.getLimitSize() < 0)
+ end = Integer.MAX_VALUE;
+
+ if(prepared) {
+ while (iter.hasNext()){
+ UnsafeRow row = iter.next();
+ if(index >= start){
+ row.packetId = ++packetId;
+ BinaryRowDataPacket binRowPacket = new BinaryRowDataPacket();
+ binRowPacket.read(fieldPackets, row);
+ buffer = binRowPacket.write(buffer, source, true);
+ }
+ index++;
+ if(index == end){
+ break;
+ }
+ }
+ } else {
+ while (iter.hasNext()){
+ UnsafeRow row = iter.next();
+ if(index >= start){
+ row.packetId = ++packetId;
+ buffer = row.write(buffer,source,true);
+ }
+ index++;
+ if(index == end){
+ break;
+ }
+ }
+ }
+
+ eof[3] = ++packetId;
+
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("last packet id:" + packetId);
+ }
+ //huangyiming add 中间过程缓存起来,isMiddleResultDone是确保合并部分执行完成后才会执行secondExecute
+ MiddlerResultHandler middlerResultHandler = source.getSession2().getMiddlerResultHandler();
+ if(null != middlerResultHandler){
+ if(buffer.position() > 0){
+ buffer.flip();
+ byte[] data = new byte[buffer.limit()];
+ buffer.get(data);
+ buffer.clear();
+ //如果该操作只是一个中间过程则把结果存储起来
+ String str = ResultSetUtil.getColumnValAsString(data, fields, 0);
+ //真的需要数据合并的时候才合并
+ if(rrs.isHasAggrColumn()){
+ middlerResultHandler.getResult().clear();
+ if(str !=null){
+ middlerResultHandler.add(str);
+ }
+ }
+ }
+ isMiddleResultDone.set(false);
+ }else{
+ ByteBuffer byteBuffer = source.writeToBuffer(eof, buffer);
+
+ /**
+ * 真正的开始把Writer Buffer的数据写入到channel 中
+ */
+ session.getSource().write(byteBuffer);
+ }
+
+
+ } catch (Exception e) {
+ e.printStackTrace();
+ handleDataProcessException(e);
+ } finally {
+ lock.unlock();
+ dataMergeSvr.clear();
+ }
+ }
+ public void outputMergeResult(final ServerConnection source,
+ final byte[] eof, List results) {
+ try {
+ lock.lock();
+ ByteBuffer buffer = session.getSource().allocate();
+ final RouteResultset rrs = this.dataMergeSvr.getRrs();
+
+ // 处理limit语句
+ int start = rrs.getLimitStart();
+ int end = start + rrs.getLimitSize();
+
+ if (start < 0) {
+ start = 0;
+ }
+
+ if (rrs.getLimitSize() < 0) {
+ end = results.size();
+ }
+
+// // 对于不需要排序的语句,返回的数据只有rrs.getLimitSize()
+// if (rrs.getOrderByCols() == null) {
+// end = results.size();
+// start = 0;
+// }
+ if (end > results.size()) {
+ end = results.size();
+ }
+
+// for (int i = start; i < end; i++) {
+// RowDataPacket row = results.get(i);
+// if( prepared ) {
+// BinaryRowDataPacket binRowDataPk = new BinaryRowDataPacket();
+// binRowDataPk.read(fieldPackets, row);
+// binRowDataPk.packetId = ++packetId;
+// //binRowDataPk.write(source);
+// buffer = binRowDataPk.write(buffer, session.getSource(), true);
+// } else {
+// row.packetId = ++packetId;
+// buffer = row.write(buffer, source, true);
+// }
+// }
+
+ if(prepared) {
+ for (int i = start; i < end; i++) {
+ RowDataPacket row = results.get(i);
+ BinaryRowDataPacket binRowDataPk = new BinaryRowDataPacket();
+ binRowDataPk.read(fieldPackets, row);
+ binRowDataPk.packetId = ++packetId;
+ //binRowDataPk.write(source);
+ buffer = binRowDataPk.write(buffer, session.getSource(), true);
+ }
+ } else {
+ for (int i = start; i < end; i++) {
+ RowDataPacket row = results.get(i);
+ row.packetId = ++packetId;
+ buffer = row.write(buffer, source, true);
+ }
+ }
+
+ eof[3] = ++packetId;
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("last packet id:" + packetId);
+ }
+ source.write(source.writeToBuffer(eof, buffer));
+
+ } catch (Exception e) {
+ handleDataProcessException(e);
+ } finally {
+ lock.unlock();
+ dataMergeSvr.clear();
+ }
+ }
+
+ @Override
+ public void fieldEofResponse(byte[] header, List fields,
+ byte[] eof, BackendConnection conn) {
+
+ //huangyiming add
+ this.header = header;
+ this.fields = fields;
+ MiddlerResultHandler middlerResultHandler = session.getMiddlerResultHandler();
+ /*if(null !=middlerResultHandler ){
+ return;
+ }*/
+ this.netOutBytes += header.length;
+ this.netOutBytes += eof.length;
+ for (int i = 0, len = fields.size(); i < len; ++i) {
+ byte[] field = fields.get(i);
+ this.netOutBytes += field.length;
+ }
+
+ ServerConnection source = null;
+
+ if (fieldsReturned) {
+ return;
+ }
+ lock.lock();
+ try {
+ if (fieldsReturned) {
+ return;
+ }
+ fieldsReturned = true;
+
+ boolean needMerg = (dataMergeSvr != null)
+ && dataMergeSvr.getRrs().needMerge();
+ Set shouldRemoveAvgField = new HashSet<>();
+ Set shouldRenameAvgField = new HashSet<>();
+ if (needMerg) {
+ Map mergeColsMap = dataMergeSvr.getRrs()
+ .getMergeCols();
+ if (mergeColsMap != null) {
+ for (Map.Entry entry : mergeColsMap
+ .entrySet()) {
+ String key = entry.getKey();
+ int mergeType = entry.getValue();
+ if (MergeCol.MERGE_AVG == mergeType
+ && mergeColsMap.containsKey(key + "SUM")) {
+ shouldRemoveAvgField.add((key + "COUNT")
+ .toUpperCase());
+ shouldRenameAvgField.add((key + "SUM")
+ .toUpperCase());
+ }
+ }
+ }
+
+ }
+
+ source = session.getSource();
+ ByteBuffer buffer = source.allocate();
+ fieldCount = fields.size();
+ if (shouldRemoveAvgField.size() > 0) {
+ ResultSetHeaderPacket packet = new ResultSetHeaderPacket();
+ packet.packetId = ++packetId;
+ packet.fieldCount = fieldCount - shouldRemoveAvgField.size();
+ buffer = packet.write(buffer, source, true);
+ } else {
+
+ header[3] = ++packetId;
+ buffer = source.writeToBuffer(header, buffer);
+ }
+
+ String primaryKey = null;
+ if (rrs.hasPrimaryKeyToCache()) {
+ String[] items = rrs.getPrimaryKeyItems();
+ priamaryKeyTable = items[0];
+ primaryKey = items[1];
+ }
+
+ Map columToIndx = new HashMap(
+ fieldCount);
+
+ for (int i = 0, len = fieldCount; i < len; ++i) {
+ boolean shouldSkip = false;
+ byte[] field = fields.get(i);
+ if (needMerg) {
+ FieldPacket fieldPkg = new FieldPacket();
+ fieldPkg.read(field);
+ fieldPackets.add(fieldPkg);
+ String fieldName = new String(fieldPkg.name).toUpperCase();
+ if (columToIndx != null
+ && !columToIndx.containsKey(fieldName)) {
+ if (shouldRemoveAvgField.contains(fieldName)) {
+ shouldSkip = true;
+ fieldPackets.remove(fieldPackets.size() - 1);
+ }
+ if (shouldRenameAvgField.contains(fieldName)) {
+ String newFieldName = fieldName.substring(0,
+ fieldName.length() - 3);
+ fieldPkg.name = newFieldName.getBytes();
+ fieldPkg.packetId = ++packetId;
+ shouldSkip = true;
+ // 处理AVG字段位数和精度, AVG位数 = SUM位数 - 14
+ fieldPkg.length = fieldPkg.length - 14;
+ // AVG精度 = SUM精度 + 4
+ fieldPkg.decimals = (byte) (fieldPkg.decimals + 4);
+ buffer = fieldPkg.write(buffer, source, false);
+
+ // 还原精度
+ fieldPkg.decimals = (byte) (fieldPkg.decimals - 4);
+ }
+
+ ColMeta colMeta = new ColMeta(i, fieldPkg.type);
+ colMeta.decimals = fieldPkg.decimals;
+ columToIndx.put(fieldName, colMeta);
+ }
+ } else {
+ FieldPacket fieldPkg = new FieldPacket();
+ fieldPkg.read(field);
+ fieldPackets.add(fieldPkg);
+ fieldCount = fields.size();
+ if (primaryKey != null && primaryKeyIndex == -1) {
+ // find primary key index
+ String fieldName = new String(fieldPkg.name);
+ if (primaryKey.equalsIgnoreCase(fieldName)) {
+ primaryKeyIndex = i;
+ }
+ } }
+ if (!shouldSkip) {
+ field[3] = ++packetId;
+ buffer = source.writeToBuffer(field, buffer);
+ }
+ }
+ eof[3] = ++packetId;
+ buffer = source.writeToBuffer(eof, buffer);
+
+ if(null == middlerResultHandler ){
+ //session.getSource().write(row);
+ source.write(buffer);
+ }
+
+ if (dataMergeSvr != null) {
+ dataMergeSvr.onRowMetaData(columToIndx, fieldCount);
+
+ }
+ } catch (Exception e) {
+ handleDataProcessException(e);
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ public void handleDataProcessException(Exception e) {
+ if (!errorRepsponsed.get()) {
+ this.error = e.toString();
+ LOGGER.warn("caught exception ", e);
+ setFail(e.toString());
+ this.tryErrorFinished(true);
+ }
+ }
+
+ @Override
+ public void rowResponse(final byte[] row, final BackendConnection conn) {
+
+ if (errorRepsponsed.get()) {
+ // the connection has been closed or set to "txInterrupt" properly
+ //in tryErrorFinished() method! If we close it here, it can
+ // lead to tx error such as blocking rollback tx for ever.
+ // @author Uncle-pan
+ // @since 2016-03-25
+ //conn.close(error);
+ return;
+ }
+
+
+ lock.lock();
+ try {
+
+ this.selectRows++;
+
+ RouteResultsetNode rNode = (RouteResultsetNode) conn.getAttachment();
+ String dataNode = rNode.getName();
+ if (dataMergeSvr != null) {
+ // even through discarding the all rest data, we can't
+ //close the connection for tx control such as rollback or commit.
+ // So the "isClosedByDiscard" variable is unnecessary.
+ // @author Uncle-pan
+ // @since 2016-03-25
+ dataMergeSvr.onNewRecord(dataNode, row);
+
+ MiddlerResultHandler middlerResultHandler = session.getMiddlerResultHandler();
+ if(null != middlerResultHandler ){
+ if(middlerResultHandler instanceof MiddlerQueryResultHandler){
+ byte[] rv = ResultSetUtil.getColumnVal(row, fields, 0);
+ String rowValue = rv==null? "":new String(rv);
+ middlerResultHandler.add(rowValue);
+ }
+ }
+ } else {
+ row[3] = ++packetId;
+ RowDataPacket rowDataPkg =null;
+ // cache primaryKey-> dataNode
+ if (primaryKeyIndex != -1) {
+ rowDataPkg = new RowDataPacket(fieldCount);
+ rowDataPkg.read(row);
+ String primaryKey = new String(rowDataPkg.fieldValues.get(primaryKeyIndex));
+ LayerCachePool pool = MycatServer.getInstance().getRouterservice().getTableId2DataNodeCache();
+ pool.putIfAbsent(priamaryKeyTable, primaryKey, dataNode);
+ }
+ if( prepared ) {
+ if(rowDataPkg==null) {
+ rowDataPkg = new RowDataPacket(fieldCount);
+ rowDataPkg.read(row);
+ }
+ BinaryRowDataPacket binRowDataPk = new BinaryRowDataPacket();
+ binRowDataPk.read(fieldPackets, rowDataPkg);
+ binRowDataPk.write(session.getSource());
+ } else {
+ //add huangyiming
+ MiddlerResultHandler middlerResultHandler = session.getMiddlerResultHandler();
+ if(null == middlerResultHandler ){
+ session.getSource().write(row);
+ }else{
+
+ if(middlerResultHandler instanceof MiddlerQueryResultHandler){
+ String rowValue = ResultSetUtil.getColumnValAsString(row, fields, 0);
+ middlerResultHandler.add(rowValue);
+ }
+
+ }
+ }
+ }
+
+ } catch (Exception e) {
+ handleDataProcessException(e);
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ @Override
+ public void clearResources() {
+ if (dataMergeSvr != null) {
+ dataMergeSvr.clear();
+ }
+ }
+
+ @Override
+ public void writeQueueAvailable() {
+ }
+
+ @Override
+ public void requestDataResponse(byte[] data, BackendConnection conn) {
+ LoadDataUtil.requestFileDataResponse(data, conn);
+ }
+
+ public boolean isPrepared() {
+ return prepared;
+ }
+
+ public void setPrepared(boolean prepared) {
+ this.prepared = prepared;
+ }
+}
\ No newline at end of file
diff --git a/src/main/java/io/mycat/server/executors/NewConnectionRespHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/NewConnectionRespHandler.java
similarity index 78%
rename from src/main/java/io/mycat/server/executors/NewConnectionRespHandler.java
rename to src/main/java/io/mycat/backend/mysql/nio/handler/NewConnectionRespHandler.java
index d5689246a..db5e3fe7a 100644
--- a/src/main/java/io/mycat/server/executors/NewConnectionRespHandler.java
+++ b/src/main/java/io/mycat/backend/mysql/nio/handler/NewConnectionRespHandler.java
@@ -21,65 +21,73 @@
* https://code.google.com/p/opencloudb/.
*
*/
-package io.mycat.server.executors;
-
-import io.mycat.backend.BackendConnection;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+package io.mycat.backend.mysql.nio.handler;
import java.util.List;
-public class NewConnectionRespHandler implements ResponseHandler {
+import org.slf4j.Logger; import org.slf4j.LoggerFactory;
+
+import io.mycat.backend.BackendConnection;
+
+public class NewConnectionRespHandler implements ResponseHandler{
private static final Logger LOGGER = LoggerFactory
.getLogger(NewConnectionRespHandler.class);
-
@Override
public void connectionError(Throwable e, BackendConnection conn) {
- LOGGER.warn(conn + " connectionError " + e);
-
+ LOGGER.warn(conn+" connectionError "+e);
+
}
@Override
public void connectionAcquired(BackendConnection conn) {
//
- conn.release();
- LOGGER.info("connectionAcquired " + conn);
-
+ LOGGER.info("connectionAcquired "+conn);
+
+ conn.release(); // NewConnectionRespHandler ��Ϊ��������ڿ����������������ã���Ҫ�½����ӣ������½����ӵ�ʱ��
+
}
@Override
public void errorResponse(byte[] err, BackendConnection conn) {
LOGGER.warn("caught error resp: " + conn + " " + new String(err));
+ conn.release();
}
@Override
public void okResponse(byte[] ok, BackendConnection conn) {
- LOGGER.info("okResponse: " + conn);
-
+ LOGGER.info("okResponse: " + conn );
+ conn.release();
}
@Override
public void fieldEofResponse(byte[] header, List fields,
byte[] eof, BackendConnection conn) {
- LOGGER.info("fieldEofResponse: " + conn);
-
+ LOGGER.info("fieldEofResponse: " + conn );
+
}
@Override
public void rowResponse(byte[] row, BackendConnection conn) {
- LOGGER.info("rowResponse: " + conn);
-
+ LOGGER.info("rowResponse: " + conn );
+
}
@Override
public void rowEofResponse(byte[] eof, BackendConnection conn) {
- LOGGER.info("rowEofResponse: " + conn);
+ LOGGER.info("rowEofResponse: " + conn );
+ conn.release();
+ }
+ @Override
+ public void writeQueueAvailable() {
+
+
}
@Override
public void connectionClose(BackendConnection conn, String reason) {
-
+
+
}
}
\ No newline at end of file
diff --git a/src/main/java/io/mycat/server/executors/ResponseHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/ResponseHandler.java
similarity index 94%
rename from src/main/java/io/mycat/server/executors/ResponseHandler.java
rename to src/main/java/io/mycat/backend/mysql/nio/handler/ResponseHandler.java
index e5b714479..9a113fa78 100644
--- a/src/main/java/io/mycat/server/executors/ResponseHandler.java
+++ b/src/main/java/io/mycat/backend/mysql/nio/handler/ResponseHandler.java
@@ -21,12 +21,12 @@
* https://code.google.com/p/opencloudb/.
*
*/
-package io.mycat.server.executors;
-
-import io.mycat.backend.BackendConnection;
+package io.mycat.backend.mysql.nio.handler;
import java.util.List;
+import io.mycat.backend.BackendConnection;
+
/**
* @author mycat
* @author mycat
@@ -72,9 +72,16 @@ void fieldEofResponse(byte[] header, List fields, byte[] eof,
*/
void rowEofResponse(byte[] eof, BackendConnection conn);
+ /**
+ * 写队列为空,可以写数据了
+ *
+ */
+ void writeQueueAvailable();
+
/**
* on connetion close event
*/
void connectionClose(BackendConnection conn, String reason);
+
}
\ No newline at end of file
diff --git a/src/main/java/io/mycat/server/executors/RollbackNodeHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/RollbackNodeHandler.java
similarity index 72%
rename from src/main/java/io/mycat/server/executors/RollbackNodeHandler.java
rename to src/main/java/io/mycat/backend/mysql/nio/handler/RollbackNodeHandler.java
index 923b17f3b..1c6463d77 100644
--- a/src/main/java/io/mycat/server/executors/RollbackNodeHandler.java
+++ b/src/main/java/io/mycat/backend/mysql/nio/handler/RollbackNodeHandler.java
@@ -21,21 +21,23 @@
* https://code.google.com/p/opencloudb/.
*
*/
-package io.mycat.server.executors;
+package io.mycat.backend.mysql.nio.handler;
+
+import java.util.List;
+
+import io.mycat.backend.mysql.nio.MySQLConnection;
+import io.mycat.config.ErrorCode;
+import org.slf4j.Logger; import org.slf4j.LoggerFactory;
import io.mycat.backend.BackendConnection;
import io.mycat.route.RouteResultsetNode;
import io.mycat.server.NonBlockingSession;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
/**
* @author mycat
*/
public class RollbackNodeHandler extends MultiNodeHandler {
- public static final Logger LOGGER = LoggerFactory
+ private static final Logger LOGGER = LoggerFactory
.getLogger(RollbackNodeHandler.class);
public RollbackNodeHandler(NonBlockingSession session) {
@@ -59,15 +61,20 @@ public void rollback() {
int started = 0;
for (final RouteResultsetNode node : session.getTargetKeys()) {
if (node == null) {
- try {
LOGGER.error("null is contained in RoutResultsetNodes, source = "
+ session.getSource());
- } catch (Exception e) {
- }
continue;
}
final BackendConnection conn = session.getTarget(node);
+
if (conn != null) {
+ boolean isClosed=conn.isClosedOrQuit();
+ if(isClosed)
+ {
+ session.getSource().writeErrMessage(ErrorCode.ER_UNKNOWN_ERROR,
+ "receive rollback,but find backend con is closed or quit");
+ LOGGER.error( conn+"receive rollback,but fond backend con is closed or quit");
+ }
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("rollback job run for " + conn);
}
@@ -75,7 +82,18 @@ public void rollback() {
return;
}
conn.setResponseHandler(RollbackNodeHandler.this);
- conn.rollback();
+
+ //support the XA rollback
+ if(session.getXaTXID()!=null && conn instanceof MySQLConnection) {
+ MySQLConnection mysqlCon = (MySQLConnection) conn;
+ String xaTxId = session.getXaTXID() +",'"+ mysqlCon.getSchema()+"'";
+ //exeBatch cmd issue : the 2nd package can not receive the response
+ mysqlCon.execCmd("XA END " + xaTxId + ";");
+ mysqlCon.execCmd("XA ROLLBACK " + xaTxId + ";");
+ }else {
+ conn.rollback();
+ }
+
++started;
}
@@ -98,6 +116,16 @@ public void okResponse(byte[] ok, BackendConnection conn) {
if (this.isFail() || session.closed()) {
tryErrorFinished(true);
} else {
+ /* 1. 事务结束后,xa事务结束 */
+ if(session.getXaTXID()!=null){
+ session.setXATXEnabled(false);
+ }
+
+ /* 2. preAcStates 为true,事务结束后,需要设置为true。preAcStates 为ac上一个状态 */
+ if(session.getSource().isPreAcStates()&&!session.getSource().isAutocommit()){
+ session.getSource().setAutocommit(true);
+ }
+
session.getSource().write(ok);
}
}
@@ -130,4 +158,9 @@ public void rowResponse(byte[] row, BackendConnection conn) {
.append(": field's eof").toString());
}
-}
\ No newline at end of file
+ @Override
+ public void writeQueueAvailable() {
+
+ }
+
+}
diff --git a/src/main/java/io/mycat/server/executors/RollbackReleaseHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/RollbackReleaseHandler.java
similarity index 89%
rename from src/main/java/io/mycat/server/executors/RollbackReleaseHandler.java
rename to src/main/java/io/mycat/backend/mysql/nio/handler/RollbackReleaseHandler.java
index fe5d21ad6..477412721 100644
--- a/src/main/java/io/mycat/server/executors/RollbackReleaseHandler.java
+++ b/src/main/java/io/mycat/backend/mysql/nio/handler/RollbackReleaseHandler.java
@@ -21,14 +21,14 @@
* https://code.google.com/p/opencloudb/.
*
*/
-package io.mycat.server.executors;
-
-import io.mycat.backend.BackendConnection;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+package io.mycat.backend.mysql.nio.handler;
import java.util.List;
+import org.slf4j.Logger; import org.slf4j.LoggerFactory;
+
+import io.mycat.backend.BackendConnection;
+
/**
* @author mycat
*/
@@ -56,7 +56,7 @@ public void errorResponse(byte[] err, BackendConnection conn) {
@Override
public void okResponse(byte[] ok, BackendConnection conn) {
- logger.debug("autocomit is false,but no commit or rollback ,so mycat rollbacked backend conn "+conn);
+ logger.debug("autocomit is false,but no commit or rollback ,so mycat rollbacked backend conn "+conn);
conn.release();
}
@@ -74,6 +74,11 @@ public void rowEofResponse(byte[] eof, BackendConnection conn) {
}
+ @Override
+ public void writeQueueAvailable() {
+
+ }
+
@Override
public void connectionClose(BackendConnection conn, String reason) {
diff --git a/src/main/java/io/mycat/backend/mysql/nio/handler/SecondHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/SecondHandler.java
new file mode 100644
index 000000000..4aa7b8419
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/nio/handler/SecondHandler.java
@@ -0,0 +1,13 @@
+package io.mycat.backend.mysql.nio.handler;
+
+import java.util.List;
+
+/**
+ * 查询分解后的第二部处理
+ * @author huangyiming
+ *
+ */
+public interface SecondHandler {
+
+ public void doExecute(List params);
+}
diff --git a/src/main/java/io/mycat/backend/mysql/nio/handler/SecondQueryHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/SecondQueryHandler.java
new file mode 100644
index 000000000..1dd701fd7
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/nio/handler/SecondQueryHandler.java
@@ -0,0 +1,19 @@
+package io.mycat.backend.mysql.nio.handler;
+
+import java.util.List;
+
+public class SecondQueryHandler implements SecondHandler {
+
+ public MiddlerResultHandler middlerResultHandler;
+ public SecondQueryHandler(MiddlerResultHandler middlerResultHandler){
+ this.middlerResultHandler = middlerResultHandler;
+ }
+
+ @Override
+ public void doExecute(List params) {
+ // TODO Auto-generated method stub
+
+ }
+
+
+}
diff --git a/src/main/java/io/mycat/server/executors/SimpleLogHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/SimpleLogHandler.java
similarity index 80%
rename from src/main/java/io/mycat/server/executors/SimpleLogHandler.java
rename to src/main/java/io/mycat/backend/mysql/nio/handler/SimpleLogHandler.java
index ce9dd5d72..5b46714af 100644
--- a/src/main/java/io/mycat/server/executors/SimpleLogHandler.java
+++ b/src/main/java/io/mycat/backend/mysql/nio/handler/SimpleLogHandler.java
@@ -21,28 +21,27 @@
* https://code.google.com/p/opencloudb/.
*
*/
-package io.mycat.server.executors;
-
-import io.mycat.backend.BackendConnection;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+package io.mycat.backend.mysql.nio.handler;
import java.util.List;
-public class SimpleLogHandler implements ResponseHandler {
+import org.slf4j.Logger; import org.slf4j.LoggerFactory;
+
+import io.mycat.backend.BackendConnection;
+
+public class SimpleLogHandler implements ResponseHandler{
private static final Logger LOGGER = LoggerFactory
.getLogger(SimpleLogHandler.class);
-
@Override
public void connectionError(Throwable e, BackendConnection conn) {
- LOGGER.warn(conn + " connectionError " + e);
-
+ LOGGER.warn(conn+" connectionError "+e);
+
}
@Override
public void connectionAcquired(BackendConnection conn) {
- LOGGER.info("connectionAcquired " + conn);
-
+ LOGGER.info("connectionAcquired "+conn);
+
}
@Override
@@ -52,32 +51,39 @@ public void errorResponse(byte[] err, BackendConnection conn) {
@Override
public void okResponse(byte[] ok, BackendConnection conn) {
- LOGGER.info("okResponse: " + conn);
-
+ LOGGER.info("okResponse: " + conn );
+
}
@Override
public void fieldEofResponse(byte[] header, List fields,
byte[] eof, BackendConnection conn) {
- LOGGER.info("fieldEofResponse: " + conn);
-
+ LOGGER.info("fieldEofResponse: " + conn );
+
}
@Override
public void rowResponse(byte[] row, BackendConnection conn) {
- LOGGER.info("rowResponse: " + conn);
-
+ LOGGER.info("rowResponse: " + conn );
+
}
@Override
public void rowEofResponse(byte[] eof, BackendConnection conn) {
- LOGGER.info("rowEofResponse: " + conn);
+ LOGGER.info("rowEofResponse: " + conn );
+
+ }
+ @Override
+ public void writeQueueAvailable() {
+
+
}
@Override
public void connectionClose(BackendConnection conn, String reason) {
-
+
+
}
}
\ No newline at end of file
diff --git a/src/main/java/io/mycat/backend/mysql/nio/handler/SingleNodeHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/SingleNodeHandler.java
new file mode 100644
index 000000000..fa105fc3e
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/nio/handler/SingleNodeHandler.java
@@ -0,0 +1,531 @@
+/*
+ * Copyright (c) 2013, OpenCloudDB/MyCAT and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software;Designed and Developed mainly by many Chinese
+ * opensource volunteers. you can redistribute it and/or modify it under the
+ * terms of the GNU General Public License version 2 only, as published by the
+ * Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Any questions about this component can be directed to it's project Web address
+ * https://code.google.com/p/opencloudb/.
+ *
+ */
+package io.mycat.backend.mysql.nio.handler;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
+import com.google.common.base.Strings;
+
+import io.mycat.MycatServer;
+import io.mycat.backend.BackendConnection;
+import io.mycat.backend.datasource.PhysicalDBNode;
+import io.mycat.backend.mysql.LoadDataUtil;
+import io.mycat.config.ErrorCode;
+import io.mycat.config.MycatConfig;
+import io.mycat.config.model.SchemaConfig;
+import io.mycat.net.mysql.BinaryRowDataPacket;
+import io.mycat.net.mysql.ErrorPacket;
+import io.mycat.net.mysql.FieldPacket;
+import io.mycat.net.mysql.OkPacket;
+import io.mycat.net.mysql.RowDataPacket;
+import io.mycat.route.RouteResultset;
+import io.mycat.route.RouteResultsetNode;
+import io.mycat.server.NonBlockingSession;
+import io.mycat.server.ServerConnection;
+import io.mycat.server.parser.ServerParse;
+import io.mycat.server.parser.ServerParseShow;
+import io.mycat.server.response.ShowFullTables;
+import io.mycat.server.response.ShowTables;
+import io.mycat.statistic.stat.QueryResult;
+import io.mycat.statistic.stat.QueryResultDispatcher;
+import io.mycat.util.ResultSetUtil;
+import io.mycat.util.StringUtil;
+
+import org.slf4j.Logger; import org.slf4j.LoggerFactory;
+/**
+ * @author mycat
+ */
+public class SingleNodeHandler implements ResponseHandler, Terminatable, LoadDataResponseHandler {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(SingleNodeHandler.class);
+
+ private final RouteResultsetNode node;
+ private final RouteResultset rrs;
+ private final NonBlockingSession session;
+
+ // only one thread access at one time no need lock
+ private volatile byte packetId;
+ private volatile ByteBuffer buffer;
+ private volatile boolean isRunning;
+ private Runnable terminateCallBack;
+ private long startTime;
+ private long netInBytes;
+ private long netOutBytes;
+ private long selectRows;
+ private long affectedRows;
+
+ private boolean prepared;
+ private int fieldCount;
+ private List fieldPackets = new ArrayList();
+
+ private volatile boolean isDefaultNodeShowTable;
+ private volatile boolean isDefaultNodeShowFullTable;
+ private Set shardingTablesSet;
+ private byte[] header = null;
+ private List fields = null;
+ public SingleNodeHandler(RouteResultset rrs, NonBlockingSession session) {
+ this.rrs = rrs;
+ this.node = rrs.getNodes()[0];
+
+ if (node == null) {
+ throw new IllegalArgumentException("routeNode is null!");
+ }
+
+ if (session == null) {
+ throw new IllegalArgumentException("session is null!");
+ }
+
+ this.session = session;
+ ServerConnection source = session.getSource();
+ String schema = source.getSchema();
+ if (schema != null && ServerParse.SHOW == rrs.getSqlType()) {
+ SchemaConfig schemaConfig = MycatServer.getInstance().getConfig().getSchemas().get(schema);
+ int type = ServerParseShow.tableCheck(rrs.getStatement(), 0);
+ isDefaultNodeShowTable = (ServerParseShow.TABLES == type && !Strings.isNullOrEmpty(schemaConfig.getDataNode()));
+ isDefaultNodeShowFullTable = (ServerParseShow.FULLTABLES == type && !Strings.isNullOrEmpty(schemaConfig.getDataNode()));
+ if (isDefaultNodeShowTable) {
+ shardingTablesSet = ShowTables.getTableSet(source, rrs.getStatement());
+
+ } else if (isDefaultNodeShowFullTable) {
+ shardingTablesSet = ShowFullTables.getTableSet(source, rrs.getStatement());
+ }
+ }
+
+ if ( rrs != null && rrs.getStatement() != null) {
+ netInBytes += rrs.getStatement().getBytes().length;
+ }
+
+ }
+
+ @Override
+ public void terminate(Runnable callback) {
+ boolean zeroReached = false;
+
+ if (isRunning) {
+ terminateCallBack = callback;
+ } else {
+ zeroReached = true;
+ }
+
+ if (zeroReached) {
+ callback.run();
+ }
+ }
+
+ private void endRunning() {
+ Runnable callback = null;
+ if (isRunning) {
+ isRunning = false;
+ callback = terminateCallBack;
+ terminateCallBack = null;
+ }
+
+ if (callback != null) {
+ callback.run();
+ }
+ }
+
+ private void recycleResources() {
+
+ ByteBuffer buf = buffer;
+ if (buf != null) {
+ session.getSource().recycle(buffer);
+ buffer = null;
+ }
+ }
+
+ public void execute() throws Exception {
+ startTime=System.currentTimeMillis();
+ ServerConnection sc = session.getSource();
+ this.isRunning = true;
+ this.packetId = 0;
+ final BackendConnection conn = session.getTarget(node);
+ LOGGER.debug("rrs.getRunOnSlave() " + rrs.getRunOnSlave());
+ node.setRunOnSlave(rrs.getRunOnSlave()); // 实现 master/slave注解
+ LOGGER.debug("node.getRunOnSlave() " + node.getRunOnSlave());
+
+ if (session.tryExistsCon(conn, node)) {
+ _execute(conn);
+ } else {
+ // create new connection
+
+ MycatConfig conf = MycatServer.getInstance().getConfig();
+
+ LOGGER.debug("node.getRunOnSlave() " + node.getRunOnSlave());
+ node.setRunOnSlave(rrs.getRunOnSlave()); // 实现 master/slave注解
+ LOGGER.debug("node.getRunOnSlave() " + node.getRunOnSlave());
+
+ PhysicalDBNode dn = conf.getDataNodes().get(node.getName());
+ dn.getConnection(dn.getDatabase(), sc.isAutocommit(), node, this, node);
+ }
+
+ }
+
+ @Override
+ public void connectionAcquired(final BackendConnection conn) {
+ session.bindConnection(node, conn);
+ _execute(conn);
+
+ }
+
+ private void _execute(BackendConnection conn) {
+ if (session.closed()) {
+ endRunning();
+ session.clearResources(true);
+ return;
+ }
+ conn.setResponseHandler(this);
+ try {
+ conn.execute(node, session.getSource(), session.getSource()
+ .isAutocommit());
+ } catch (Exception e1) {
+ executeException(conn, e1);
+ return;
+ }
+ }
+
+ private void executeException(BackendConnection c, Exception e) {
+ ErrorPacket err = new ErrorPacket();
+ err.packetId = ++packetId;
+ err.errno = ErrorCode.ERR_FOUND_EXCEPION;
+ err.message = StringUtil.encode(e.toString(), session.getSource().getCharset());
+
+ this.backConnectionErr(err, c);
+ }
+
+ @Override
+ public void connectionError(Throwable e, BackendConnection conn) {
+
+ endRunning();
+ ErrorPacket err = new ErrorPacket();
+ err.packetId = ++packetId;
+ err.errno = ErrorCode.ER_NEW_ABORTING_CONNECTION;
+ err.message = StringUtil.encode(e.getMessage(), session.getSource().getCharset());
+
+ ServerConnection source = session.getSource();
+ source.write(err.write(allocBuffer(), source, true));
+ }
+
+ @Override
+ public void errorResponse(byte[] data, BackendConnection conn) {
+ ErrorPacket err = new ErrorPacket();
+ err.read(data);
+ err.packetId = ++packetId;
+ backConnectionErr(err, conn);
+ }
+
+ private void backConnectionErr(ErrorPacket errPkg, BackendConnection conn) {
+ endRunning();
+
+ ServerConnection source = session.getSource();
+ String errUser = source.getUser();
+ String errHost = source.getHost();
+ int errPort = source.getLocalPort();
+
+ String errmgs = " errno:" + errPkg.errno + " " + new String(errPkg.message);
+ LOGGER.warn("execute sql err :" + errmgs + " con:" + conn
+ + " frontend host:" + errHost + "/" + errPort + "/" + errUser);
+
+ session.releaseConnectionIfSafe(conn, LOGGER.isDebugEnabled(), false);
+
+ source.setTxInterrupt(errmgs);
+
+ /**
+ * TODO: 修复全版本BUG
+ *
+ * BUG复现:
+ * 1、MysqlClient: SELECT 9223372036854775807 + 1;
+ * 2、MyCatServer: ERROR 1690 (22003): BIGINT value is out of range in '(9223372036854775807 + 1)'
+ * 3、MysqlClient: ERROR 2013 (HY000): Lost connection to MySQL server during query
+ *
+ * Fixed后
+ * 1、MysqlClient: SELECT 9223372036854775807 + 1;
+ * 2、MyCatServer: ERROR 1690 (22003): BIGINT value is out of range in '(9223372036854775807 + 1)'
+ * 3、MysqlClient: ERROR 1690 (22003): BIGINT value is out of range in '(9223372036854775807 + 1)'
+ *
+ */
+ // 由于 pakcetId != 1 造成的问题
+ errPkg.packetId = 1;
+ errPkg.write(source);
+
+ recycleResources();
+ }
+
+
+ /**
+ * insert/update/delete
+ *
+ * okResponse():读取data字节数组,组成一个OKPacket,并调用ok.write(source)将结果写入前端连接FrontendConnection的写缓冲队列writeQueue中,
+ * 真正发送给应用是由对应的NIOSocketWR从写队列中读取ByteBuffer并返回的
+ */
+ @Override
+ public void okResponse(byte[] data, BackendConnection conn) {
+ //
+ this.netOutBytes += data.length;
+
+ boolean executeResponse = conn.syncAndExcute();
+ if (executeResponse) {
+ ServerConnection source = session.getSource();
+ OkPacket ok = new OkPacket();
+ ok.read(data);
+ boolean isCanClose2Client =(!rrs.isCallStatement()) ||(rrs.isCallStatement() &&!rrs.getProcedure().isResultSimpleValue());
+ if (rrs.isLoadData()) {
+ byte lastPackId = source.getLoadDataInfileHandler().getLastPackId();
+ ok.packetId = ++lastPackId;// OK_PACKET
+ source.getLoadDataInfileHandler().clear();
+
+ } else if (isCanClose2Client) {
+ ok.packetId = ++packetId;// OK_PACKET
+ }
+
+
+ if (isCanClose2Client) {
+ session.releaseConnectionIfSafe(conn, LOGGER.isDebugEnabled(), false);
+ endRunning();
+ }
+ ok.serverStatus = source.isAutocommit() ? 2 : 1;
+ recycleResources();
+
+ if (isCanClose2Client) {
+ source.setLastInsertId(ok.insertId);
+ ok.write(source);
+ }
+
+ this.affectedRows = ok.affectedRows;
+
+ source.setExecuteSql(null);
+ // add by lian
+ // 解决sql统计中写操作永远为0
+ QueryResult queryResult = new QueryResult(session.getSource().getUser(),
+ rrs.getSqlType(), rrs.getStatement(), affectedRows, netInBytes, netOutBytes, startTime, System.currentTimeMillis(),0);
+ QueryResultDispatcher.dispatchQuery( queryResult );
+ }
+ }
+
+
+ /**
+ * select
+ *
+ * 行结束标志返回时触发,将EOF标志写入缓冲区,最后调用source.write(buffer)将缓冲区放入前端连接的写缓冲队列中,等待NIOSocketWR将其发送给应用
+ */
+ @Override
+ public void rowEofResponse(byte[] eof, BackendConnection conn) {
+
+ this.netOutBytes += eof.length;
+
+ ServerConnection source = session.getSource();
+ conn.recordSql(source.getHost(), source.getSchema(), node.getStatement());
+ // 判断是调用存储过程的话不能在这里释放链接
+ if (!rrs.isCallStatement()||(rrs.isCallStatement()&&rrs.getProcedure().isResultSimpleValue()))
+ {
+ session.releaseConnectionIfSafe(conn, LOGGER.isDebugEnabled(), false);
+ endRunning();
+ }
+
+ eof[3] = ++packetId;
+ buffer = source.writeToBuffer(eof, allocBuffer());
+ int resultSize = source.getWriteQueue().size()*MycatServer.getInstance().getConfig().getSystem().getBufferPoolPageSize();
+ resultSize=resultSize+buffer.position();
+ MiddlerResultHandler middlerResultHandler = session.getMiddlerResultHandler();
+
+ if(middlerResultHandler !=null ){
+ middlerResultHandler.secondEexcute();
+ } else{
+ source.write(buffer);
+ }
+ source.setExecuteSql(null);
+ //TODO: add by zhuam
+ //查询结果派发
+ QueryResult queryResult = new QueryResult(session.getSource().getUser(),
+ rrs.getSqlType(), rrs.getStatement(), affectedRows, netInBytes, netOutBytes, startTime, System.currentTimeMillis(),resultSize);
+ QueryResultDispatcher.dispatchQuery( queryResult );
+
+ }
+
+ /**
+ * lazy create ByteBuffer only when needed
+ *
+ * @return
+ */
+ private ByteBuffer allocBuffer() {
+ if (buffer == null) {
+ buffer = session.getSource().allocate();
+ }
+ return buffer;
+ }
+
+ /**
+ * select
+ *
+ * 元数据返回时触发,将header和元数据内容依次写入缓冲区中
+ */
+ @Override
+ public void fieldEofResponse(byte[] header, List fields,
+ byte[] eof, BackendConnection conn) {
+ this.header = header;
+ this.fields = fields;
+ MiddlerResultHandler middlerResultHandler = session.getMiddlerResultHandler();
+ if(null !=middlerResultHandler ){
+ return;
+ }
+ this.netOutBytes += header.length;
+ for (int i = 0, len = fields.size(); i < len; ++i) {
+ byte[] field = fields.get(i);
+ this.netOutBytes += field.length;
+ }
+
+ header[3] = ++packetId;
+ ServerConnection source = session.getSource();
+ buffer = source.writeToBuffer(header, allocBuffer());
+ for (int i = 0, len = fields.size(); i < len; ++i) {
+ byte[] field = fields.get(i);
+ field[3] = ++packetId;
+
+ // 保存field信息
+ FieldPacket fieldPk = new FieldPacket();
+ fieldPk.read(field);
+ fieldPackets.add(fieldPk);
+
+ buffer = source.writeToBuffer(field, buffer);
+ }
+
+ fieldCount = fieldPackets.size();
+
+ eof[3] = ++packetId;
+ buffer = source.writeToBuffer(eof, buffer);
+
+ if (isDefaultNodeShowTable) {
+
+ for (String name : shardingTablesSet) {
+ RowDataPacket row = new RowDataPacket(1);
+ row.add(StringUtil.encode(name.toLowerCase(), source.getCharset()));
+ row.packetId = ++packetId;
+ buffer = row.write(buffer, source, true);
+ }
+
+ } else if (isDefaultNodeShowFullTable) {
+
+ for (String name : shardingTablesSet) {
+ RowDataPacket row = new RowDataPacket(1);
+ row.add(StringUtil.encode(name.toLowerCase(), source.getCharset()));
+ row.add(StringUtil.encode("BASE TABLE", source.getCharset()));
+ row.packetId = ++packetId;
+ buffer = row.write(buffer, source, true);
+ }
+ }
+ }
+
+ /**
+ * select
+ *
+ * 行数据返回时触发,将行数据写入缓冲区中
+ */
+ @Override
+ public void rowResponse(byte[] row, BackendConnection conn) {
+
+ this.netOutBytes += row.length;
+ this.selectRows++;
+
+ if (isDefaultNodeShowTable || isDefaultNodeShowFullTable) {
+ RowDataPacket rowDataPacket = new RowDataPacket(1);
+ rowDataPacket.read(row);
+ String table = StringUtil.decode(rowDataPacket.fieldValues.get(0), session.getSource().getCharset());
+ if (shardingTablesSet.contains(table.toUpperCase())) {
+ return;
+ }
+ }
+ row[3] = ++packetId;
+
+ if ( prepared ) {
+ RowDataPacket rowDataPk = new RowDataPacket(fieldCount);
+ rowDataPk.read(row);
+ BinaryRowDataPacket binRowDataPk = new BinaryRowDataPacket();
+ binRowDataPk.read(fieldPackets, rowDataPk);
+ binRowDataPk.packetId = rowDataPk.packetId;
+// binRowDataPk.write(session.getSource());
+ /*
+ * [fix bug] : 这里不能直接将包写到前端连接,
+ * 因为在fieldEofResponse()方法结束后buffer还没写出,
+ * 所以这里应该将包数据顺序写入buffer(如果buffer满了就写出),然后再将buffer写出
+ */
+ buffer = binRowDataPk.write(buffer, session.getSource(), true);
+ } else {
+
+ MiddlerResultHandler middlerResultHandler = session.getMiddlerResultHandler();
+ if(null ==middlerResultHandler ){
+ buffer = session.getSource().writeToBuffer(row, allocBuffer());
+ }else{
+ if(middlerResultHandler instanceof MiddlerQueryResultHandler){
+ byte[] rv = ResultSetUtil.getColumnVal(row, fields, 0);
+ String rowValue = rv==null?"":new String(rv);
+ middlerResultHandler.add(rowValue);
+ }
+ }
+
+ }
+
+ }
+
+ @Override
+ public void writeQueueAvailable() {
+
+ }
+
+ @Override
+ public void connectionClose(BackendConnection conn, String reason) {
+ ErrorPacket err = new ErrorPacket();
+ err.packetId = ++packetId;
+ err.errno = ErrorCode.ER_ERROR_ON_CLOSE;
+ err.message = StringUtil.encode(reason, session.getSource()
+ .getCharset());
+ this.backConnectionErr(err, conn);
+
+ }
+
+ public void clearResources() {
+
+ }
+
+ @Override
+ public void requestDataResponse(byte[] data, BackendConnection conn) {
+ LoadDataUtil.requestFileDataResponse(data, conn);
+ }
+
+ public boolean isPrepared() {
+ return prepared;
+ }
+
+ public void setPrepared(boolean prepared) {
+ this.prepared = prepared;
+ }
+
+ @Override
+ public String toString() {
+ return "SingleNodeHandler [node=" + node + ", packetId=" + packetId + "]";
+ }
+
+}
\ No newline at end of file
diff --git a/src/main/java/io/mycat/server/executors/Terminatable.java b/src/main/java/io/mycat/backend/mysql/nio/handler/Terminatable.java
similarity index 96%
rename from src/main/java/io/mycat/server/executors/Terminatable.java
rename to src/main/java/io/mycat/backend/mysql/nio/handler/Terminatable.java
index 8085885ce..87188ce46 100644
--- a/src/main/java/io/mycat/server/executors/Terminatable.java
+++ b/src/main/java/io/mycat/backend/mysql/nio/handler/Terminatable.java
@@ -21,7 +21,7 @@
* https://code.google.com/p/opencloudb/.
*
*/
-package io.mycat.server.executors;
+package io.mycat.backend.mysql.nio.handler;
/**
* @author mycat
diff --git a/src/main/java/io/mycat/backend/mysql/nio/handler/UnLockTablesHandler.java b/src/main/java/io/mycat/backend/mysql/nio/handler/UnLockTablesHandler.java
new file mode 100644
index 000000000..24cbfbc66
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/nio/handler/UnLockTablesHandler.java
@@ -0,0 +1,138 @@
+package io.mycat.backend.mysql.nio.handler;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.mycat.backend.BackendConnection;
+import io.mycat.net.mysql.OkPacket;
+import io.mycat.route.RouteResultsetNode;
+import io.mycat.server.NonBlockingSession;
+import io.mycat.server.parser.ServerParse;
+
+/**
+ * unlock tables 语句处理器
+ * @author songdabin
+ *
+ */
+public class UnLockTablesHandler extends MultiNodeHandler implements ResponseHandler {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(UnLockTablesHandler.class);
+
+ private final NonBlockingSession session;
+ private final boolean autocommit;
+ private final String srcStatement;
+
+ public UnLockTablesHandler(NonBlockingSession session, boolean autocommit, String sql) {
+ super(session);
+ this.session = session;
+ this.autocommit = autocommit;
+ this.srcStatement = sql;
+ }
+
+ public void execute() {
+ Map lockedConns = session.getTargetMap();
+ Set dnSet = lockedConns.keySet();
+ this.reset(lockedConns.size());
+ // 客户端直接发送unlock tables命令,由于之前未发送lock tables语句,无法获取后端绑定的连接,此时直接返回OK包
+ if (lockedConns.size() == 0) {
+ LOGGER.warn("find no locked backend connection!"+session.getSource());
+ OkPacket ok = new OkPacket();
+ ok.packetId = ++ packetId;
+ ok.packetLength = 7; // unlock table 命令返回MySQL协议包长度为7
+ ok.serverStatus = session.getSource().isAutocommit() ? 2:1;
+ ok.write(session.getSource());
+ return;
+ }
+ for (RouteResultsetNode dataNode : dnSet) {
+ RouteResultsetNode node = new RouteResultsetNode(dataNode.getName(), ServerParse.UNLOCK, srcStatement);
+ BackendConnection conn = lockedConns.get(dataNode);
+ if (clearIfSessionClosed(session)) {
+ return;
+ }
+ conn.setResponseHandler(this);
+ try {
+ conn.execute(node, session.getSource(), autocommit);
+ } catch (Exception e) {
+ connectionError(e, conn);
+ }
+ }
+ }
+
+ @Override
+ public void connectionError(Throwable e, BackendConnection conn) {
+ super.connectionError(e, conn);
+ }
+
+ @Override
+ public void connectionAcquired(BackendConnection conn) {
+ LOGGER.error("unexpected invocation: connectionAcquired from unlock tables");
+ }
+
+ @Override
+ public void errorResponse(byte[] err, BackendConnection conn) {
+ super.errorResponse(err, conn);
+ }
+
+ @Override
+ public void okResponse(byte[] data, BackendConnection conn) {
+ boolean executeResponse = conn.syncAndExcute();
+ if (executeResponse) {
+ boolean isEndPack = decrementCountBy(1);
+ session.releaseConnection(conn);
+ if (isEndPack) {
+ if (this.isFail() || session.closed()) {
+ tryErrorFinished(true);
+ return;
+ }
+ OkPacket ok = new OkPacket();
+ ok.read(data);
+ lock.lock();
+ try {
+ ok.packetId = ++ packetId;
+ ok.serverStatus = session.getSource().isAutocommit() ? 2:1;
+ } finally {
+ lock.unlock();
+ }
+ ok.write(session.getSource());
+ }
+ }
+ }
+
+ @Override
+ public void fieldEofResponse(byte[] header, List fields, byte[] eof, BackendConnection conn) {
+ LOGGER.error(new StringBuilder().append("unexpected packet for ")
+ .append(conn).append(" bound by ").append(session.getSource())
+ .append(": field's eof").toString());
+ }
+
+ @Override
+ public void rowResponse(byte[] row, BackendConnection conn) {
+ LOGGER.warn(new StringBuilder().append("unexpected packet for ")
+ .append(conn).append(" bound by ").append(session.getSource())
+ .append(": row data packet").toString());
+ }
+
+ @Override
+ public void rowEofResponse(byte[] eof, BackendConnection conn) {
+ LOGGER.error(new StringBuilder().append("unexpected packet for ")
+ .append(conn).append(" bound by ").append(session.getSource())
+ .append(": row's eof").toString());
+ }
+
+ @Override
+ public void writeQueueAvailable() {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void connectionClose(BackendConnection conn, String reason) {
+ // TODO Auto-generated method stub
+
+ }
+
+}
diff --git a/src/main/java/io/mycat/backend/mysql/xa/CoordinatorLogEntry.java b/src/main/java/io/mycat/backend/mysql/xa/CoordinatorLogEntry.java
new file mode 100644
index 000000000..62c76c7a8
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/xa/CoordinatorLogEntry.java
@@ -0,0 +1,41 @@
+package io.mycat.backend.mysql.xa;
+
+import java.io.Serializable;
+
+/**
+ * Created by zhangchao on 2016/10/17.
+ */
+public class CoordinatorLogEntry implements Serializable {
+
+ private static final long serialVersionUID = -919666492191340531L;
+
+ public final String id;
+
+// public final boolean wasCommitted;
+
+ public final ParticipantLogEntry[] participants;
+
+
+ public CoordinatorLogEntry(String coordinatorId,
+ ParticipantLogEntry[] participantDetails) {
+ this(coordinatorId, false, participantDetails, null);
+ }
+
+ public CoordinatorLogEntry(String coordinatorId, boolean wasCommitted,
+ ParticipantLogEntry[] participants) {
+ this.id = coordinatorId;
+// this.wasCommitted = wasCommitted;
+ this.participants = participants;
+ }
+
+ public CoordinatorLogEntry(String coordinatorId, boolean wasCommitted,
+ ParticipantLogEntry[] participants, String superiorCoordinatorId) {
+ this.id = coordinatorId;
+// this.wasCommitted = wasCommitted;
+ this.participants = participants;
+ }
+
+
+
+
+}
diff --git a/src/main/java/io/mycat/backend/mysql/xa/Deserializer.java b/src/main/java/io/mycat/backend/mysql/xa/Deserializer.java
new file mode 100644
index 000000000..812f511f9
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/xa/Deserializer.java
@@ -0,0 +1,108 @@
+package io.mycat.backend.mysql.xa;
+
+import io.mycat.backend.mysql.xa.recovery.DeserialisationException;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Created by zhangchao on 2016/10/17.
+ */
+public class Deserializer {
+
+ private static final String JSON_ARRAY_END = "]";
+
+ private static final String JSON_ARRAY_START = "[";
+
+ private static final String OBJECT_START= "{";
+
+ private static final String OBJECT_END= "}";
+
+ List tokenize(String content) {
+ List result = new ArrayList();
+ int endObject = content.indexOf(OBJECT_END);
+ while(endObject >0){
+ String object = content.substring(0,endObject+1);
+ result.add(object);
+ content = content.substring(endObject+1);
+ endObject = content.indexOf(OBJECT_END);
+ }
+ return result;
+ }
+
+ String extractArrayPart(String content) {
+ if(!content.contains(JSON_ARRAY_START) && !content.contains(JSON_ARRAY_END)) {
+ //no array...
+ return "";
+ }
+ //else
+ int start=content.indexOf(JSON_ARRAY_START);
+ int end=content.indexOf(JSON_ARRAY_END);
+
+ return content.substring(start+1, end);
+ }
+ public CoordinatorLogEntry fromJSON(String coordinatorLogEntryStr) throws DeserialisationException {
+ try {
+ String jsonContent = coordinatorLogEntryStr.trim();
+ validateJSONContent(jsonContent);
+ Map header = extractHeader(jsonContent);
+ String coordinatorId = header.get("id");
+ String arrayContent = extractArrayPart(jsonContent);
+ List elements = tokenize(arrayContent);
+
+ ParticipantLogEntry[] participantLogEntries = new ParticipantLogEntry[elements.size()];
+
+ for (int i = 0; i < participantLogEntries.length; i++) {
+ participantLogEntries[i]=recreateParticipantLogEntry(coordinatorId,elements.get(i));
+ }
+
+
+ CoordinatorLogEntry actual = new CoordinatorLogEntry(header.get("id"),Boolean.valueOf(header.get("wasCommitted")), participantLogEntries,header.get("superiorCoordinatorId"));
+ return actual;
+ } catch (Exception unexpectedEOF) {
+ throw new DeserialisationException(coordinatorLogEntryStr);
+ }
+ }
+
+ private void validateJSONContent(String coordinatorLogEntryStr)
+ throws DeserialisationException {
+ if (!coordinatorLogEntryStr.startsWith(OBJECT_START)){
+ throw new DeserialisationException(coordinatorLogEntryStr);
+ }
+ if (!coordinatorLogEntryStr.endsWith(OBJECT_END)){
+ throw new DeserialisationException(coordinatorLogEntryStr);
+ }
+ }
+
+ private Map extractHeader(String coordinatorLogEntryStr) {
+ Map header = new HashMap(2);
+ String[] attributes = coordinatorLogEntryStr.split(",");
+ for (String attribute : attributes) {
+ String[] pair = attribute.split(":");
+ header.put(pair[0].replaceAll("\\{", "").replace("\"", ""), pair[1].replace("\"", ""));
+ }
+ return header;
+ }
+
+ ParticipantLogEntry recreateParticipantLogEntry(String coordinatorId,
+ String participantLogEntry) {
+ participantLogEntry = participantLogEntry.replaceAll("\\{", "").replaceAll("\\}", "");
+
+ Map content = new HashMap(5);
+ String[] attributes = participantLogEntry.split(",");
+ for (String attribute : attributes) {
+ String[] pair = attribute.split(":");
+ if(pair.length>1){
+ content.put(pair[0].replace("\"", ""), pair[1].replace("\"", ""));
+ }
+
+ }
+
+ ParticipantLogEntry actual = new ParticipantLogEntry(coordinatorId,
+ content.get("uri"), Long.valueOf(content.get("expires")), content.get("resourceName"), Integer.parseInt(content.get("state")));
+ return actual;
+ }
+
+}
diff --git a/src/main/java/io/mycat/backend/mysql/xa/LogFileLock.java b/src/main/java/io/mycat/backend/mysql/xa/LogFileLock.java
new file mode 100644
index 000000000..c625f4a59
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/xa/LogFileLock.java
@@ -0,0 +1,77 @@
+package io.mycat.backend.mysql.xa;
+
+import io.mycat.backend.mysql.xa.recovery.LogException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.nio.channels.FileLock;
+import java.nio.channels.OverlappingFileLockException;
+
+/**
+ * Created by zhangchao on 2016/10/17.
+ */
+public class LogFileLock {
+ public static final Logger logger = LoggerFactory
+ .getLogger(LogFileLock.class);
+ private static final String FILE_SEPARATOR = String.valueOf(File.separatorChar);
+ private File lockfileToPreventDoubleStartup_;
+ private FileOutputStream lockfilestream_ = null;
+ private FileLock lock_ = null;
+
+ private String dir;
+
+ private String fileName;
+
+ public LogFileLock(String dir, String fileName) {
+ if(!dir.endsWith(FILE_SEPARATOR)) {
+ dir += FILE_SEPARATOR;
+ }
+ this.dir = dir;
+ this.fileName = fileName;
+ }
+
+ public void acquireLock() throws LogException {
+ try {
+ File parent = new File(dir);
+ if(!parent.exists()) {
+ parent.mkdir();
+ }
+ lockfileToPreventDoubleStartup_ = new File(dir, fileName + ".lck");
+ lockfilestream_ = new FileOutputStream(lockfileToPreventDoubleStartup_);
+ lock_ = lockfilestream_.getChannel().tryLock();
+ lockfileToPreventDoubleStartup_.deleteOnExit();
+ } catch (OverlappingFileLockException failedToGetLock) {
+ // happens on windows
+ lock_ = null;
+ } catch (IOException failedToGetLock) {
+ // happens on windows
+ lock_ = null;
+ }
+ if (lock_ == null) {
+ logger.error("ERROR: the specified log seems to be in use already: " + fileName + " in " + dir + ". Make sure that no other instance is running, or kill any pending process if needed.");
+ throw new LogException("Log already in use? " + fileName + " in "+ dir);
+ }
+ }
+
+ public void releaseLock() {
+ try {
+ if (lock_ != null) {
+ lock_.release();
+ }
+ if (lockfilestream_ != null)
+ lockfilestream_.close();
+ } catch (IOException e) {
+ logger.warn("Error releasing file lock: " + e.getMessage());
+ } finally {
+ lock_ = null;
+ }
+
+ if (lockfileToPreventDoubleStartup_ != null) {
+ lockfileToPreventDoubleStartup_.delete();
+ lockfileToPreventDoubleStartup_ = null;
+ }
+ }
+}
diff --git a/src/main/java/io/mycat/backend/mysql/xa/ParticipantLogEntry.java b/src/main/java/io/mycat/backend/mysql/xa/ParticipantLogEntry.java
new file mode 100644
index 000000000..54bd77bd5
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/xa/ParticipantLogEntry.java
@@ -0,0 +1,76 @@
+package io.mycat.backend.mysql.xa;
+
+import java.io.Serializable;
+
+/**
+ * Created by zhangchao on 2016/10/17.
+ */
+public class ParticipantLogEntry implements Serializable {
+
+ private static final long serialVersionUID = 1728296701394899871L;
+
+ /**
+ * The ID of the global transaction as known by the transaction core.
+ */
+
+ public String coordinatorId;
+
+ /**
+ * Identifies the participant within the global transaction.
+ */
+
+ public String uri;
+
+ /**
+ * When does this participant expire (expressed in millis since Jan 1, 1970)?
+ */
+
+ public long expires;
+
+ /**
+ * Best-known state of the participant.
+ */
+ public int txState;
+
+ /**
+ * For diagnostic purposes, null if not relevant.
+ */
+ public String resourceName;
+
+ public ParticipantLogEntry(String coordinatorId, String uri,
+ long expires, String resourceName, int txState) {
+ this.coordinatorId = coordinatorId;
+ this.uri = uri;
+ this.expires = expires;
+ this.resourceName = resourceName;
+ this.txState = txState;
+ }
+
+
+
+ @Override
+ public boolean equals(Object other) {
+ boolean ret = false;
+ if (other instanceof ParticipantLogEntry) {
+ ParticipantLogEntry o = (ParticipantLogEntry) other;
+ if (o.coordinatorId.equals(coordinatorId) && o.uri.equals(uri)) ret = true;
+ }
+ return ret;
+ }
+
+ @Override
+ public int hashCode() {
+ return coordinatorId.hashCode();
+ }
+
+
+
+ @Override
+ public String toString() {
+ return "ParticipantLogEntry [id=" + coordinatorId
+ + ", uri=" + uri + ", expires=" + expires
+ + ", state=" + txState + ", resourceName=" + resourceName + "]";
+ }
+
+
+}
diff --git a/src/main/java/io/mycat/backend/mysql/xa/Serializer.java b/src/main/java/io/mycat/backend/mysql/xa/Serializer.java
new file mode 100644
index 000000000..c16fad701
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/xa/Serializer.java
@@ -0,0 +1,54 @@
+package io.mycat.backend.mysql.xa;
+
+/**
+ * Created by zhangchao on 2016/10/17.
+ */
+public class Serializer {
+ private static final String PROPERTY_SEPARATOR = ",";
+ private static final String QUOTE = "\"";
+ private static final String END_ARRAY = "]";
+ private static final String START_ARRAY = "[";
+ private static final String START_OBJECT = "{";
+ private static final String END_OBJECT = "}";
+ private static final String LINE_SEPARATOR = System.getProperty("line.separator");
+
+ public String toJSON(CoordinatorLogEntry coordinatorLogEntry) {
+ StringBuilder strBuilder = new StringBuilder(600);
+ strBuilder.append(START_OBJECT);
+ strBuilder.append(QUOTE).append("id").append(QUOTE).append(":").append(QUOTE).append(coordinatorLogEntry.id).append(QUOTE);
+ strBuilder.append(PROPERTY_SEPARATOR);
+ //strBuilder.append(QUOTE).append("wasCommitted").append(QUOTE).append(":").append(coordinatorLogEntry.wasCommitted);
+ //strBuilder.append(PROPERTY_SEPARATOR);
+
+ String prefix = "";
+ if(coordinatorLogEntry.participants.length>0){
+ strBuilder.append(QUOTE).append("participants").append(QUOTE);
+ strBuilder.append(":");
+ strBuilder.append(START_ARRAY);
+
+ for(ParticipantLogEntry participantLogEntry :coordinatorLogEntry.participants){
+ if(participantLogEntry==null){continue;}
+ strBuilder.append(prefix);
+ prefix = PROPERTY_SEPARATOR;
+ strBuilder.append(START_OBJECT);
+ strBuilder.append(QUOTE).append("uri").append(QUOTE).append(":").append(QUOTE).append(participantLogEntry.uri).append(QUOTE);
+ strBuilder.append(PROPERTY_SEPARATOR);
+ strBuilder.append(QUOTE).append("state").append(QUOTE).append(":").append(QUOTE).append(participantLogEntry.txState).append(QUOTE);
+ strBuilder.append(PROPERTY_SEPARATOR);
+ strBuilder.append(QUOTE).append("expires").append(QUOTE).append(":").append(participantLogEntry.expires);
+ if (participantLogEntry.resourceName!=null) {
+ strBuilder.append(PROPERTY_SEPARATOR);
+ strBuilder.append(QUOTE).append("resourceName").append(QUOTE).append(":").append(QUOTE).append(participantLogEntry.resourceName).append(QUOTE);
+ }
+ strBuilder.append(END_OBJECT);
+ }
+// for (ParticipantLogEntry participantLogEntry : coordinatorLogEntry.participants) {
+//
+// }
+ strBuilder.append(END_ARRAY);
+ }
+ strBuilder.append(END_OBJECT);
+ strBuilder.append(LINE_SEPARATOR);
+ return strBuilder.toString();
+ }
+}
diff --git a/src/main/java/io/mycat/backend/mysql/xa/TxState.java b/src/main/java/io/mycat/backend/mysql/xa/TxState.java
new file mode 100644
index 000000000..fa84e048e
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/xa/TxState.java
@@ -0,0 +1,17 @@
+package io.mycat.backend.mysql.xa;
+
+/**
+ * Created by zhangchao on 2016/10/13.
+ */
+public class TxState {
+ /** XA INIT STATUS **/
+ public static final int TX_INITIALIZE_STATE = 0;
+ /** XA STARTED STATUS **/
+ public static final int TX_STARTED_STATE = 1;
+ /** XA is prepared **/
+ public static final int TX_PREPARED_STATE = 2;
+ /** XA is commited **/
+ public static final int TX_COMMITED_STATE = 3;
+ /** XA is rollbacked **/
+ public static final int TX_ROLLBACKED_STATE = 4;
+}
diff --git a/src/main/java/io/mycat/backend/mysql/xa/VersionedFile.java b/src/main/java/io/mycat/backend/mysql/xa/VersionedFile.java
new file mode 100644
index 000000000..efd4414e0
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/xa/VersionedFile.java
@@ -0,0 +1,231 @@
+package io.mycat.backend.mysql.xa;
+
+import java.io.*;
+import java.nio.channels.FileChannel;
+
+/**
+ * Created by zhangchao on 2016/10/17.
+ */
+public class VersionedFile {
+
+ private static final String FILE_SEPARATOR = String.valueOf(File.separatorChar);
+ private String baseDir;
+ private String suffix;
+ private String baseName;
+
+ //state attributes below
+
+ private long version;
+ private FileInputStream inputStream;
+
+ private RandomAccessFile randomAccessFile;
+
+
+ /**
+ * Creates a new instance based on the given name parameters.
+ * The actual complete name(s) of the physical file(s) will be based on a version number
+ * inserted in between, to identify versions.
+ *
+ * @param baseDir The base folder.
+ * @param baseName The base name for of the file path/name.
+ * @param suffix The suffix to append to the complete file name.
+ */
+ public VersionedFile ( String baseDir , String baseName , String suffix )
+ {
+
+ if(!baseDir.endsWith(FILE_SEPARATOR)) {
+ baseDir += FILE_SEPARATOR;
+ }
+ this.baseDir = baseDir;
+ this.suffix = suffix;
+ this.baseName = baseName;
+ resetVersion();
+ }
+
+ private void resetVersion()
+ {
+ this.version = extractLastValidVersionNumberFromFileNames();
+ }
+
+ private long extractLastValidVersionNumberFromFileNames() {
+ long version = -1;
+ File cd = new File ( getBaseDir() );
+ String[] names = cd.list ( new FilenameFilter() {
+ public boolean accept ( File dir , String name )
+ {
+ return (name.startsWith ( getBaseName() ) && name
+ .endsWith ( getSuffix() ));
+ }
+ } );
+ if ( names!= null ) {
+ for ( int i = 0; i < names.length; i++ ) {
+ long sfx = extractVersion ( names[i] );
+ if ( version < 0 || sfx < version )
+ version = sfx;
+ }
+ }
+
+ return version;
+ }
+
+ private long extractVersion ( String name )
+ {
+ long ret = 0;
+ int lastpos = name.lastIndexOf ( '.' );
+ int startpos = getBaseName().length ();
+ String suffix = name.substring ( startpos, lastpos );
+ try {
+
+ ret = Long.valueOf( suffix );
+ } catch ( NumberFormatException e ) {
+ IllegalArgumentException err = new IllegalArgumentException ( "Error extracting version from file: " + name+" in " + getBaseDir() );
+ err.initCause ( e );
+ throw err;
+ }
+ return ret;
+ }
+
+ private String getBackupVersionFileName()
+ {
+ return getBaseUrl() + (version - 1) + getSuffix();
+ }
+
+ public String getCurrentVersionFileName()
+ {
+ return getBaseUrl() + version + getSuffix();
+ }
+
+ public String getBaseUrl()
+ {
+ return baseDir + baseName;
+ }
+
+ public String getBaseDir()
+ {
+ return this.baseDir;
+ }
+
+ public String getBaseName()
+ {
+ return this.baseName;
+ }
+
+ public String getSuffix()
+ {
+ return this.suffix;
+ }
+
+ /**
+ * Opens the last valid version for reading.
+ *
+ * @return A stream to read the last valid contents
+ * of the file: either the backup version (if present)
+ * or the current (and only) version if no backup is found.
+ *
+ * @throws IllegalStateException If a newer version was opened for writing.
+ * @throws FileNotFoundException If no last version was found.
+ */
+ public FileInputStream openLastValidVersionForReading()
+ throws IllegalStateException, FileNotFoundException
+ {
+ if ( randomAccessFile != null ) throw new IllegalStateException ( "Already started writing." );
+ inputStream = new FileInputStream ( getCurrentVersionFileName() );
+ return inputStream;
+ }
+
+ /**
+ * Opens a new version for writing to. Note that
+ * this new version is tentative and cannot be read
+ * by {@link #openLastValidVersionForReading()} until
+ * {@link #discardBackupVersion()} is called.
+ *
+ * @return A stream for writing to.
+ * @throws IllegalStateException If called more than once
+ * without a close in between.
+ * @throws IOException If the file cannot be opened for writing.
+ */
+ public FileOutputStream openNewVersionForWriting() throws IOException
+ {
+ openNewVersionForNioWriting();
+ return new FileOutputStream(randomAccessFile.getFD());
+ }
+
+ /**
+ * Opens a new version for writing to. Note that
+ * this new version is tentative and cannot be read
+ * by {@link #openLastValidVersionForReading()} until
+ * {@link #discardBackupVersion()} is called.
+ *
+ * @return A file for writing to.
+ * @throws IOException
+ *
+ * @throws IllegalStateException If called more than once
+ * without a close in between.
+ * @throws FileNotFoundException If the file cannot be opened for writing.
+ * @throws IOException
+ */
+ public FileChannel openNewVersionForNioWriting() throws FileNotFoundException
+ {
+ if ( randomAccessFile != null ) throw new IllegalStateException ( "Already writing a new version." );
+ //version++;
+ randomAccessFile = new RandomAccessFile(getCurrentVersionFileName(), "rw");
+ return randomAccessFile.getChannel();
+ }
+ /**
+ * Discards the backup version (if any).
+ * After calling this method, the newer version
+ * produced after calling {@link #openNewVersionForWriting()}
+ * becomes valid for reading next time when
+ * {@link #openLastValidVersionForReading()} is called.
+ *
+ * Note: it is the caller's responsibility to make sure that
+ * all new data has been flushed to disk before calling this method!
+ *
+ * @throws IllegalStateException If {@link #openNewVersionForWriting()} has not been called yet.
+ * @throws IOException If the previous version exists but could no be deleted.
+ */
+ public void discardBackupVersion() throws IllegalStateException, IOException
+ {
+ if ( randomAccessFile == null ) throw new IllegalStateException ( "No new version yet!" );
+ String fileName = getBackupVersionFileName();
+
+ File temp = new File ( fileName );
+ if ( temp.exists() && !temp.delete() ) throw new IOException ( "Failed to delete backup version: " + fileName );
+
+ }
+
+ /**
+ * Closes any open resources and resets the file for reading again.
+ * @throws IOException If the output stream could not be closed.
+ */
+
+ public void close() throws IOException
+ {
+ resetVersion();
+ if ( inputStream != null ) {
+ try {
+ inputStream.close();
+ } catch (IOException e) {
+ //don't care and won't happen: closing an input stream
+ //does nothing says the JDK javadoc!
+ } finally {
+ inputStream = null;
+ }
+ }
+ if ( randomAccessFile != null ) {
+ try {
+ if ( randomAccessFile.getFD().valid() ) randomAccessFile.close();
+ } finally {
+ randomAccessFile = null;
+ }
+ }
+ }
+
+ public long getSize()
+ {
+ long res = -1;
+ File f = new File ( getCurrentVersionFileName() );
+ res = f.length();
+ return res;
+ }
+}
diff --git a/src/main/java/io/mycat/backend/mysql/xa/XARollbackCallback.java b/src/main/java/io/mycat/backend/mysql/xa/XARollbackCallback.java
new file mode 100644
index 000000000..fcca9d7c7
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/xa/XARollbackCallback.java
@@ -0,0 +1,23 @@
+package io.mycat.backend.mysql.xa;
+
+import io.mycat.sqlengine.SQLQueryResult;
+import io.mycat.sqlengine.SQLQueryResultListener;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Map;
+
+/**
+ * Created by zhangchao on 2016/10/18.
+ */
+public class XARollbackCallback implements SQLQueryResultListener>> {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(XARollbackCallback.class);
+
+ public void onResult(SQLQueryResult> result) {
+
+ LOGGER.debug("[CALLBACK][XA ROLLBACK] when Mycat start");
+
+
+ }
+}
diff --git a/src/main/java/io/mycat/backend/mysql/xa/recovery/DeserialisationException.java b/src/main/java/io/mycat/backend/mysql/xa/recovery/DeserialisationException.java
new file mode 100644
index 000000000..d0c614d80
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/xa/recovery/DeserialisationException.java
@@ -0,0 +1,12 @@
+package io.mycat.backend.mysql.xa.recovery;
+
+/**
+ * Created by zhangchao on 2016/10/17.
+ */
+public class DeserialisationException extends Exception{
+ private static final long serialVersionUID = -3835526236269555460L;
+
+ public DeserialisationException(String content) {
+ super(content);
+ }
+}
diff --git a/src/main/java/io/mycat/backend/mysql/xa/recovery/LogException.java b/src/main/java/io/mycat/backend/mysql/xa/recovery/LogException.java
new file mode 100644
index 000000000..5feb8355c
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/xa/recovery/LogException.java
@@ -0,0 +1,20 @@
+package io.mycat.backend.mysql.xa.recovery;
+
+/**
+ * Created by zhangchao on 2016/10/13.
+ */
+public class LogException extends Exception{
+ private static final long serialVersionUID = 3259337218182873867L;
+
+ public LogException() {
+ super();
+ }
+
+ public LogException(String message) {
+ super(message);
+ }
+
+ public LogException(Throwable cause) {
+ super(cause);
+ }
+}
diff --git a/src/main/java/io/mycat/backend/mysql/xa/recovery/LogReadException.java b/src/main/java/io/mycat/backend/mysql/xa/recovery/LogReadException.java
new file mode 100644
index 000000000..28f6f2b42
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/xa/recovery/LogReadException.java
@@ -0,0 +1,22 @@
+package io.mycat.backend.mysql.xa.recovery;
+
+/**
+ * Created by zhangchao on 2016/10/17.
+ */
+public class LogReadException extends LogException{
+
+ private static final long serialVersionUID = -4835268355879075429L;
+
+ public LogReadException() {
+ super();
+ }
+
+ public LogReadException(Throwable cause) {
+ super(cause);
+ }
+
+ public LogReadException(String message) {
+ super(message);
+ }
+
+}
diff --git a/src/main/java/io/mycat/backend/mysql/xa/recovery/LogWriteException.java b/src/main/java/io/mycat/backend/mysql/xa/recovery/LogWriteException.java
new file mode 100644
index 000000000..1c9d284d4
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/xa/recovery/LogWriteException.java
@@ -0,0 +1,17 @@
+package io.mycat.backend.mysql.xa.recovery;
+
+/**
+ * Created by zhangchao on 2016/10/17.
+ */
+public class LogWriteException extends LogException{
+
+ private static final long serialVersionUID = 5648208124041649641L;
+
+ public LogWriteException() {
+ super();
+ }
+ public LogWriteException(Throwable cause) {
+ super(cause);
+ }
+
+}
diff --git a/src/main/java/io/mycat/backend/mysql/xa/recovery/Repository.java b/src/main/java/io/mycat/backend/mysql/xa/recovery/Repository.java
new file mode 100644
index 000000000..d6a50ff9e
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/xa/recovery/Repository.java
@@ -0,0 +1,26 @@
+package io.mycat.backend.mysql.xa.recovery;
+
+import io.mycat.backend.mysql.xa.CoordinatorLogEntry;
+
+import java.util.Collection;
+
+/**
+ * Created by zhangchao on 2016/10/13.
+ */
+public interface Repository {
+
+ void init() ;
+
+ void put(String id, CoordinatorLogEntry coordinatorLogEntry);
+
+ CoordinatorLogEntry get(String coordinatorId);
+
+ Collection findAllCommittingCoordinatorLogEntries() ;
+
+ Collection getAllCoordinatorLogEntries() ;
+
+ void writeCheckpoint(Collection checkpointContent) ;
+
+ void close();
+
+}
diff --git a/src/main/java/io/mycat/backend/mysql/xa/recovery/impl/FileSystemRepository.java b/src/main/java/io/mycat/backend/mysql/xa/recovery/impl/FileSystemRepository.java
new file mode 100644
index 000000000..9e2febdb0
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/xa/recovery/impl/FileSystemRepository.java
@@ -0,0 +1,233 @@
+package io.mycat.backend.mysql.xa.recovery.impl;
+
+import io.mycat.MycatServer;
+import io.mycat.backend.mysql.xa.*;
+import io.mycat.backend.mysql.xa.recovery.*;
+import io.mycat.config.MycatConfig;
+import io.mycat.config.model.SystemConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.*;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Created by zhangchao on 2016/10/13.
+ */
+public class FileSystemRepository implements Repository{
+ public static final Logger logger = LoggerFactory
+ .getLogger(FileSystemRepository.class);
+ private VersionedFile file;
+ private FileChannel rwChannel = null;
+
+ public FileSystemRepository() {
+ init();
+ }
+
+ @Override
+ public void init(){
+// ConfigProperties configProperties = Configuration.getConfigProperties();
+// String baseDir = configProperties.getLogBaseDir();
+// String baseName = configProperties.getLogBaseName();
+ MycatConfig mycatconfig = MycatServer.getInstance().getConfig();
+ SystemConfig systemConfig = mycatconfig.getSystem();
+
+ String baseDir =systemConfig.getXARecoveryLogBaseDir();
+ String baseName = systemConfig.getXARecoveryLogBaseName();
+
+ logger.debug("baseDir " + baseDir);
+ logger.debug("baseName " + baseName);
+
+ //Judge whether exist the basedir
+ createBaseDir(baseDir);
+
+ file = new VersionedFile(baseDir, baseName, ".log");
+
+ }
+
+ private Serializer serializer = new Serializer();
+
+ @Override
+ public void put(String id, CoordinatorLogEntry coordinatorLogEntry) {
+
+ try {
+ initChannelIfNecessary();
+ write(coordinatorLogEntry, true);
+ } catch (IOException e) {
+ logger.error(e.getMessage(),e);
+ }
+ }
+
+ private synchronized void initChannelIfNecessary()
+ throws FileNotFoundException {
+ if (rwChannel == null) {
+ rwChannel = file.openNewVersionForNioWriting();
+ }
+ }
+
+ private void write(CoordinatorLogEntry coordinatorLogEntry,
+ boolean flushImmediately) throws IOException {
+ String str = serializer.toJSON(coordinatorLogEntry);
+ byte[] buffer = str.getBytes();
+ ByteBuffer buff = ByteBuffer.wrap(buffer);
+ writeToFile(buff, flushImmediately);
+ }
+
+ private synchronized void writeToFile(ByteBuffer buff, boolean force)
+ throws IOException {
+ rwChannel.write(buff);
+ rwChannel.force(force);
+ }
+
+ @Override
+ public CoordinatorLogEntry get(String coordinatorId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Collection findAllCommittingCoordinatorLogEntries() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Collection getAllCoordinatorLogEntries() {
+ FileInputStream fis = null;
+ try {
+ fis = file.openLastValidVersionForReading();
+ } catch (FileNotFoundException firstStart) {
+ // the file could not be opened for reading;
+ // merely return the default empty vector
+ }
+ if (fis != null) {
+ return readFromInputStream(fis);
+ }
+ //else
+ return Collections.emptyList();
+ }
+
+ public static Collection readFromInputStream(
+ InputStream in) {
+ Map coordinatorLogEntries = new HashMap();
+ BufferedReader br = null;
+ try {
+ InputStreamReader isr = new InputStreamReader(in);
+ br = new BufferedReader(isr);
+ coordinatorLogEntries = readContent(br);
+ } catch (Exception e) {
+ logger.error("Error in recover", e);
+ } finally {
+ closeSilently(br);
+ }
+ return coordinatorLogEntries.values();
+ }
+
+ static Map readContent(BufferedReader br)
+ throws IOException {
+
+ Map coordinatorLogEntries = new HashMap();
+ try {
+ String line;
+ while ((line = br.readLine()) != null) {
+ CoordinatorLogEntry coordinatorLogEntry = deserialize(line);
+ coordinatorLogEntries.put(coordinatorLogEntry.id,
+ coordinatorLogEntry);
+ }
+
+ } catch (EOFException unexpectedEOF) {
+ logger.info(
+ "Unexpected EOF - logfile not closed properly last time?",
+ unexpectedEOF);
+ // merely return what was read so far...
+ } catch (StreamCorruptedException unexpectedEOF) {
+ logger.info(
+ "Unexpected EOF - logfile not closed properly last time?",
+ unexpectedEOF);
+ // merely return what was read so far...
+ } catch (ObjectStreamException unexpectedEOF) {
+ logger.info(
+ "Unexpected EOF - logfile not closed properly last time?",
+ unexpectedEOF);
+ // merely return what was read so far...
+ } catch (DeserialisationException unexpectedEOF) {
+ logger.info("Unexpected EOF - logfile not closed properly last time? "
+ + unexpectedEOF);
+ }
+ return coordinatorLogEntries;
+ }
+
+ private static void closeSilently(BufferedReader fis) {
+ try {
+ if (fis != null)
+ fis.close();
+ } catch (IOException io) {
+ logger.warn("Fail to close logfile after reading - ignoring");
+ }
+ }
+
+ private static Deserializer deserializer = new Deserializer();
+
+ private static CoordinatorLogEntry deserialize(String line)
+ throws DeserialisationException {
+ return deserializer.fromJSON(line);
+ }
+
+ @Override
+ public void close() {
+ try {
+ closeOutput();
+ } catch (Exception e) {
+ logger.warn("Error closing file - ignoring", e);
+ }
+
+ }
+
+ protected void closeOutput() throws IllegalStateException {
+ try {
+ if (file != null) {
+ file.close();
+ }
+ } catch (IOException e) {
+ throw new IllegalStateException("Error closing previous output", e);
+ }
+ }
+
+ @Override
+ public synchronized void writeCheckpoint(
+ Collection checkpointContent)
+ {
+
+ try {
+ closeOutput();
+
+ rwChannel = file.openNewVersionForNioWriting();
+ for (CoordinatorLogEntry coordinatorLogEntry : checkpointContent) {
+ write(coordinatorLogEntry, false);
+ }
+ rwChannel.force(false);
+ file.discardBackupVersion();
+ } catch (FileNotFoundException firstStart) {
+ // the file could not be opened for reading;
+ // merely return the default empty vector
+ } catch (Exception e) {
+ logger.error("Failed to write checkpoint", e);
+ }
+
+ }
+
+ /**
+ * create the log base dir
+ * @param baseDir
+ */
+ public void createBaseDir(String baseDir){
+ File baseDirFolder = new File (baseDir);
+ if (!baseDirFolder.exists()){
+ baseDirFolder.mkdirs();
+ }
+ }
+
+}
diff --git a/src/main/java/io/mycat/backend/mysql/xa/recovery/impl/InMemoryRepository.java b/src/main/java/io/mycat/backend/mysql/xa/recovery/impl/InMemoryRepository.java
new file mode 100644
index 000000000..937a14482
--- /dev/null
+++ b/src/main/java/io/mycat/backend/mysql/xa/recovery/impl/InMemoryRepository.java
@@ -0,0 +1,76 @@
+package io.mycat.backend.mysql.xa.recovery.impl;
+
+import io.mycat.backend.mysql.xa.CoordinatorLogEntry;
+import io.mycat.backend.mysql.xa.TxState;
+import io.mycat.backend.mysql.xa.recovery.Repository;
+
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * Created by zhangchao on 2016/10/18.
+ */
+public class InMemoryRepository implements Repository {
+
+ private Map storage = new ConcurrentHashMap();
+
+
+ private boolean closed = true;
+ @Override
+ public void init() {
+ closed=false;
+ }
+
+ @Override
+ public synchronized void put(String id, CoordinatorLogEntry coordinatorLogEntry) {
+ storage.put(id, coordinatorLogEntry);
+ }
+
+ @Override
+ public synchronized CoordinatorLogEntry get(String coordinatorId) {
+ return storage.get(coordinatorId);
+ }
+
+ @Override
+ public synchronized Collection findAllCommittingCoordinatorLogEntries() {
+// Set res = new HashSet();
+// Collection allCoordinatorLogEntry = storage.values();
+// for (CoordinatorLogEntry coordinatorLogEntry : allCoordinatorLogEntry) {
+// if(coordinatorLogEntry.getResultingState() == TxState.TX_PREPARED_STATE){
+// res.add(coordinatorLogEntry);
+// }
+// }
+// return res;
+ return null;
+ }
+
+ @Override
+ public void close() {
+ storage.clear();
+ closed=true;
+ }
+
+ @Override
+ public Collection getAllCoordinatorLogEntries() {
+ return storage.values();
+ }
+
+ @Override
+ public void writeCheckpoint(
+ Collection checkpointContent) {
+ storage.clear();
+ for (CoordinatorLogEntry coordinatorLogEntry : checkpointContent) {
+ storage.put(coordinatorLogEntry.id, coordinatorLogEntry);
+ }
+
+ }
+
+
+
+ public boolean isClosed() {
+ return closed;
+ }
+}
diff --git a/src/main/java/io/mycat/backend/nio/MySQLBackendConnectionFactory.java b/src/main/java/io/mycat/backend/nio/MySQLBackendConnectionFactory.java
deleted file mode 100644
index 0a477bd3e..000000000
--- a/src/main/java/io/mycat/backend/nio/MySQLBackendConnectionFactory.java
+++ /dev/null
@@ -1,37 +0,0 @@
-package io.mycat.backend.nio;
-
-import io.mycat.backend.MySQLDataSource;
-import io.mycat.net.NetSystem;
-import io.mycat.server.config.node.DBHostConfig;
-import io.mycat.server.executors.ResponseHandler;
-
-import java.io.IOException;
-import java.nio.channels.SocketChannel;
-
-public class MySQLBackendConnectionFactory {
- private final MySQLBackendConnectionHandler nioHandler = new MySQLBackendConnectionHandler();
-
- public MySQLBackendConnection make(MySQLDataSource pool,
- ResponseHandler handler, String schema) throws IOException {
-
- DBHostConfig dsc = pool.getConfig();
- SocketChannel channel = SocketChannel.open();
- channel.configureBlocking(false);
-
- MySQLBackendConnection c = new MySQLBackendConnection(channel,
- pool.isReadNode());
- NetSystem.getInstance().setSocketParams(c, false);
- // 设置NIOHandler
- c.setHandler(nioHandler);
- c.setHost(dsc.getIp());
- c.setPort(dsc.getPort());
- c.setUser(dsc.getUser());
- c.setPassword(dsc.getPassword());
- c.setSchema(schema);
- c.setPool(pool);
- c.setResponseHandler(handler);
- c.setIdleTimeout(pool.getConfig().getIdleTimeout());
- NetSystem.getInstance().getConnector().postConnect(c);
- return c;
- }
-}
diff --git a/src/main/java/io/mycat/backend/nio/MySQLBackendConnectionHandler.java b/src/main/java/io/mycat/backend/nio/MySQLBackendConnectionHandler.java
deleted file mode 100644
index 08e9bf855..000000000
--- a/src/main/java/io/mycat/backend/nio/MySQLBackendConnectionHandler.java
+++ /dev/null
@@ -1,333 +0,0 @@
-package io.mycat.backend.nio;
-
-import io.mycat.MycatServer;
-import io.mycat.net.Connection;
-import io.mycat.net.ConnectionException;
-import io.mycat.net.NIOHandler;
-import io.mycat.server.Capabilities;
-import io.mycat.server.executors.LoadDataResponseHandler;
-import io.mycat.server.executors.ResponseHandler;
-import io.mycat.server.packet.*;
-import io.mycat.server.packet.util.ByteUtil;
-import io.mycat.server.packet.util.CharsetUtil;
-import io.mycat.server.packet.util.SecurityUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-
-public class MySQLBackendConnectionHandler implements
- NIOHandler