summaryrefslogtreecommitdiffstats
path: root/vendor/github.com/RoaringBitmap
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/RoaringBitmap')
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/AUTHORS10
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/CONTRIBUTORS12
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/LICENSE202
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/LICENSE-2.0.txt202
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/Makefile121
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/README.md246
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/arraycontainer.go960
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/arraycontainer_gen.go134
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/bitmapcontainer.go982
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/bitmapcontainer_gen.go415
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/ctz.go11
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/ctz_compat.go71
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/fastaggregation.go215
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/manyiterator.go23
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/parallel.go613
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/popcnt.go11
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/popcnt_amd64.s103
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/popcnt_asm.go67
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/popcnt_compat.go17
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/popcnt_generic.go23
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/popcnt_slices.go41
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/priorityqueue.go101
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/rle.go1667
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/rle16.go1747
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/rle16_gen.go1126
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/rle_gen.go1118
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/rlecommon.go163
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/rlei.go695
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/roaring.go1345
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/roaringarray.go893
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/roaringarray_gen.go529
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/serialization.go83
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/serialization_generic.go118
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/serialization_littleendian.go113
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/serializationfuzz.go21
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/setutil.go609
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/shortiterator.go21
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/smat.go383
-rw-r--r--vendor/github.com/RoaringBitmap/roaring/util.go315
39 files changed, 15526 insertions, 0 deletions
diff --git a/vendor/github.com/RoaringBitmap/roaring/AUTHORS b/vendor/github.com/RoaringBitmap/roaring/AUTHORS
new file mode 100644
index 0000000000..08c074047f
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/AUTHORS
@@ -0,0 +1,10 @@
+# This is the official list of roaring authors for copyright purposes.
+
+Todd Gruben (@tgruben),
+Daniel Lemire (@lemire),
+Elliot Murphy (@statik),
+Bob Potter (@bpot),
+Tyson Maly (@tvmaly),
+Will Glynn (@willglynn),
+Brent Pedersen (@brentp)
+Maciej Biłas (@maciej)
diff --git a/vendor/github.com/RoaringBitmap/roaring/CONTRIBUTORS b/vendor/github.com/RoaringBitmap/roaring/CONTRIBUTORS
new file mode 100644
index 0000000000..70b4735dad
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/CONTRIBUTORS
@@ -0,0 +1,12 @@
+# This is the official list of roaring contributors
+
+Todd Gruben (@tgruben),
+Daniel Lemire (@lemire),
+Elliot Murphy (@statik),
+Bob Potter (@bpot),
+Tyson Maly (@tvmaly),
+Will Glynn (@willglynn),
+Brent Pedersen (@brentp),
+Jason E. Aten (@glycerine),
+Vali Malinoiu (@0x4139),
+Forud Ghafouri (@fzerorubigd) \ No newline at end of file
diff --git a/vendor/github.com/RoaringBitmap/roaring/LICENSE b/vendor/github.com/RoaringBitmap/roaring/LICENSE
new file mode 100644
index 0000000000..aff5f9999b
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016 by the authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/RoaringBitmap/roaring/LICENSE-2.0.txt b/vendor/github.com/RoaringBitmap/roaring/LICENSE-2.0.txt
new file mode 100644
index 0000000000..aff5f9999b
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/LICENSE-2.0.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016 by the authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/RoaringBitmap/roaring/Makefile b/vendor/github.com/RoaringBitmap/roaring/Makefile
new file mode 100644
index 0000000000..d5259b4c9e
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/Makefile
@@ -0,0 +1,121 @@
+.PHONY: help all test format fmtcheck vet lint qa deps clean nuke rle backrle ser fetch-real-roaring-datasets
+
+
+
+
+
+
+
+
+# Display general help about this command
+help:
+ @echo ""
+ @echo "The following commands are available:"
+ @echo ""
+ @echo " make qa : Run all the tests"
+ @echo " make test : Run the unit tests"
+ @echo ""
+ @echo " make format : Format the source code"
+ @echo " make fmtcheck : Check if the source code has been formatted"
+ @echo " make vet : Check for suspicious constructs"
+ @echo " make lint : Check for style errors"
+ @echo ""
+ @echo " make deps : Get the dependencies"
+ @echo " make clean : Remove any build artifact"
+ @echo " make nuke : Deletes any intermediate file"
+ @echo ""
+ @echo " make fuzz-smat : Fuzzy testing with smat"
+ @echo " make fuzz-stream : Fuzzy testing with stream deserialization"
+ @echo " make fuzz-buffer : Fuzzy testing with buffer deserialization"
+ @echo ""
+
+# Alias for help target
+all: help
+test:
+ go test
+ go test -race -run TestConcurrent*
+# Format the source code
+format:
+ @find ./ -type f -name "*.go" -exec gofmt -w {} \;
+
+# Check if the source code has been formatted
+fmtcheck:
+ @mkdir -p target
+ @find ./ -type f -name "*.go" -exec gofmt -d {} \; | tee target/format.diff
+ @test ! -s target/format.diff || { echo "ERROR: the source code has not been formatted - please use 'make format' or 'gofmt'"; exit 1; }
+
+# Check for syntax errors
+vet:
+ GOPATH=$(GOPATH) go vet ./...
+
+# Check for style errors
+lint:
+ GOPATH=$(GOPATH) PATH=$(GOPATH)/bin:$(PATH) golint ./...
+
+
+
+
+
+# Alias to run all quality-assurance checks
+qa: fmtcheck test vet lint
+
+# --- INSTALL ---
+
+# Get the dependencies
+deps:
+ GOPATH=$(GOPATH) go get github.com/smartystreets/goconvey/convey
+ GOPATH=$(GOPATH) go get github.com/willf/bitset
+ GOPATH=$(GOPATH) go get github.com/golang/lint/golint
+ GOPATH=$(GOPATH) go get github.com/mschoch/smat
+ GOPATH=$(GOPATH) go get github.com/dvyukov/go-fuzz/go-fuzz
+ GOPATH=$(GOPATH) go get github.com/dvyukov/go-fuzz/go-fuzz-build
+ GOPATH=$(GOPATH) go get github.com/glycerine/go-unsnap-stream
+ GOPATH=$(GOPATH) go get github.com/philhofer/fwd
+ GOPATH=$(GOPATH) go get github.com/jtolds/gls
+
+fuzz-smat:
+ go test -tags=gofuzz -run=TestGenerateSmatCorpus
+ go-fuzz-build -func FuzzSmat github.com/RoaringBitmap/roaring
+ go-fuzz -bin=./roaring-fuzz.zip -workdir=workdir/ -timeout=200
+
+
+fuzz-stream:
+ go-fuzz-build -func FuzzSerializationStream github.com/RoaringBitmap/roaring
+ go-fuzz -bin=./roaring-fuzz.zip -workdir=workdir/ -timeout=200
+
+
+fuzz-buffer:
+ go-fuzz-build -func FuzzSerializationBuffer github.com/RoaringBitmap/roaring
+ go-fuzz -bin=./roaring-fuzz.zip -workdir=workdir/ -timeout=200
+
+# Remove any build artifact
+clean:
+ GOPATH=$(GOPATH) go clean ./...
+
+# Deletes any intermediate file
+nuke:
+ rm -rf ./target
+ GOPATH=$(GOPATH) go clean -i ./...
+
+rle:
+ cp rle.go rle16.go
+ perl -pi -e 's/32/16/g' rle16.go
+ cp rle_test.go rle16_test.go
+ perl -pi -e 's/32/16/g' rle16_test.go
+
+backrle:
+ cp rle16.go rle.go
+ perl -pi -e 's/16/32/g' rle.go
+ perl -pi -e 's/2032/2016/g' rle.go
+
+ser: rle
+ go generate
+
+cover:
+ go test -coverprofile=coverage.out
+ go tool cover -html=coverage.out
+
+fetch-real-roaring-datasets:
+ # pull github.com/RoaringBitmap/real-roaring-datasets -> testdata/real-roaring-datasets
+ git submodule init
+ git submodule update
diff --git a/vendor/github.com/RoaringBitmap/roaring/README.md b/vendor/github.com/RoaringBitmap/roaring/README.md
new file mode 100644
index 0000000000..2c096ce8e6
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/README.md
@@ -0,0 +1,246 @@
+roaring [![Build Status](https://travis-ci.org/RoaringBitmap/roaring.png)](https://travis-ci.org/RoaringBitmap/roaring) [![Coverage Status](https://coveralls.io/repos/github/RoaringBitmap/roaring/badge.svg?branch=master)](https://coveralls.io/github/RoaringBitmap/roaring?branch=master) [![GoDoc](https://godoc.org/github.com/RoaringBitmap/roaring?status.svg)](https://godoc.org/github.com/RoaringBitmap/roaring) [![Go Report Card](https://goreportcard.com/badge/RoaringBitmap/roaring)](https://goreportcard.com/report/github.com/RoaringBitmap/roaring)
+=============
+
+This is a go version of the Roaring bitmap data structure.
+
+
+
+Roaring bitmaps are used by several major systems such as [Apache Lucene][lucene] and derivative systems such as [Solr][solr] and
+[Elasticsearch][elasticsearch], [Metamarkets' Druid][druid], [LinkedIn Pinot][pinot], [Netflix Atlas][atlas], [Apache Spark][spark], [OpenSearchServer][opensearchserver], [Cloud Torrent][cloudtorrent], [Whoosh][whoosh], [Pilosa][pilosa], [Microsoft Visual Studio Team Services (VSTS)][vsts], and eBay's [Apache Kylin][kylin].
+
+[lucene]: https://lucene.apache.org/
+[solr]: https://lucene.apache.org/solr/
+[elasticsearch]: https://www.elastic.co/products/elasticsearch
+[druid]: http://druid.io/
+[spark]: https://spark.apache.org/
+[opensearchserver]: http://www.opensearchserver.com
+[cloudtorrent]: https://github.com/jpillora/cloud-torrent
+[whoosh]: https://bitbucket.org/mchaput/whoosh/wiki/Home
+[pilosa]: https://www.pilosa.com/
+[kylin]: http://kylin.apache.org/
+[pinot]: http://github.com/linkedin/pinot/wiki
+[vsts]: https://www.visualstudio.com/team-services/
+[atlas]: https://github.com/Netflix/atlas
+
+Roaring bitmaps are found to work well in many important applications:
+
+> Use Roaring for bitmap compression whenever possible. Do not use other bitmap compression methods ([Wang et al., SIGMOD 2017](http://db.ucsd.edu/wp-content/uploads/2017/03/sidm338-wangA.pdf))
+
+
+The ``roaring`` Go library is used by
+* [Cloud Torrent](https://github.com/jpillora/cloud-torrent): a self-hosted remote torrent client
+* [runv](https://github.com/hyperhq/runv): an Hypervisor-based runtime for the Open Containers Initiative
+* [InfluxDB](https://www.influxdata.com)
+* [Pilosa](https://www.pilosa.com/)
+* [Bleve](http://www.blevesearch.com)
+
+This library is used in production in several systems, it is part of the [Awesome Go collection](https://awesome-go.com).
+
+
+There are also [Java](https://github.com/RoaringBitmap/RoaringBitmap) and [C/C++](https://github.com/RoaringBitmap/CRoaring) versions. The Java, C, C++ and Go version are binary compatible: e.g, you can save bitmaps
+from a Java program and load them back in Go, and vice versa. We have a [format specification](https://github.com/RoaringBitmap/RoaringFormatSpec).
+
+
+This code is licensed under Apache License, Version 2.0 (ASL2.0).
+
+Copyright 2016-... by the authors.
+
+
+### References
+
+- Daniel Lemire, Owen Kaser, Nathan Kurz, Luca Deri, Chris O'Hara, François Saint-Jacques, Gregory Ssi-Yan-Kai, Roaring Bitmaps: Implementation of an Optimized Software Library, Software: Practice and Experience 48 (4), 2018 [arXiv:1709.07821](https://arxiv.org/abs/1709.07821)
+- Samy Chambi, Daniel Lemire, Owen Kaser, Robert Godin,
+Better bitmap performance with Roaring bitmaps,
+Software: Practice and Experience 46 (5), 2016.
+http://arxiv.org/abs/1402.6407 This paper used data from http://lemire.me/data/realroaring2014.html
+- Daniel Lemire, Gregory Ssi-Yan-Kai, Owen Kaser, Consistently faster and smaller compressed bitmaps with Roaring, Software: Practice and Experience 46 (11), 2016. http://arxiv.org/abs/1603.06549
+
+
+### Dependencies
+
+Dependencies are fetched automatically by giving the `-t` flag to `go get`.
+
+they include
+ - github.com/smartystreets/goconvey/convey
+ - github.com/willf/bitset
+ - github.com/mschoch/smat
+ - github.com/glycerine/go-unsnap-stream
+ - github.com/philhofer/fwd
+ - github.com/jtolds/gls
+
+Note that the smat library requires Go 1.6 or better.
+
+#### Installation
+
+ - go get -t github.com/RoaringBitmap/roaring
+
+
+### Example
+
+Here is a simplified but complete example:
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/RoaringBitmap/roaring"
+ "bytes"
+)
+
+
+func main() {
+ // example inspired by https://github.com/fzandona/goroar
+ fmt.Println("==roaring==")
+ rb1 := roaring.BitmapOf(1, 2, 3, 4, 5, 100, 1000)
+ fmt.Println(rb1.String())
+
+ rb2 := roaring.BitmapOf(3, 4, 1000)
+ fmt.Println(rb2.String())
+
+ rb3 := roaring.New()
+ fmt.Println(rb3.String())
+
+ fmt.Println("Cardinality: ", rb1.GetCardinality())
+
+ fmt.Println("Contains 3? ", rb1.Contains(3))
+
+ rb1.And(rb2)
+
+ rb3.Add(1)
+ rb3.Add(5)
+
+ rb3.Or(rb1)
+
+ // computes union of the three bitmaps in parallel using 4 workers
+ roaring.ParOr(4, rb1, rb2, rb3)
+ // computes intersection of the three bitmaps in parallel using 4 workers
+ roaring.ParAnd(4, rb1, rb2, rb3)
+
+
+ // prints 1, 3, 4, 5, 1000
+ i := rb3.Iterator()
+ for i.HasNext() {
+ fmt.Println(i.Next())
+ }
+ fmt.Println()
+
+ // next we include an example of serialization
+ buf := new(bytes.Buffer)
+ rb1.WriteTo(buf) // we omit error handling
+ newrb:= roaring.New()
+ newrb.ReadFrom(buf)
+ if rb1.Equals(newrb) {
+ fmt.Println("I wrote the content to a byte stream and read it back.")
+ }
+}
+```
+
+If you wish to use serialization and handle errors, you might want to
+consider the following sample of code:
+
+```go
+ rb := BitmapOf(1, 2, 3, 4, 5, 100, 1000)
+ buf := new(bytes.Buffer)
+ size,err:=rb.WriteTo(buf)
+ if err != nil {
+ t.Errorf("Failed writing")
+ }
+ newrb:= New()
+ size,err=newrb.ReadFrom(buf)
+ if err != nil {
+ t.Errorf("Failed reading")
+ }
+ if ! rb.Equals(newrb) {
+ t.Errorf("Cannot retrieve serialized version")
+ }
+```
+
+Given N integers in [0,x), then the serialized size in bytes of
+a Roaring bitmap should never exceed this bound:
+
+`` 8 + 9 * ((long)x+65535)/65536 + 2 * N ``
+
+That is, given a fixed overhead for the universe size (x), Roaring
+bitmaps never use more than 2 bytes per integer. You can call
+``BoundSerializedSizeInBytes`` for a more precise estimate.
+
+
+### Documentation
+
+Current documentation is available at http://godoc.org/github.com/RoaringBitmap/roaring
+
+### Goroutine safety
+
+In general, it should not generally be considered safe to access
+the same bitmaps using different goroutines--they are left
+unsynchronized for performance. Should you want to access
+a Bitmap from more than one goroutine, you should
+provide synchronization. Typically this is done by using channels to pass
+the *Bitmap around (in Go style; so there is only ever one owner),
+or by using `sync.Mutex` to serialize operations on Bitmaps.
+
+### Coverage
+
+We test our software. For a report on our test coverage, see
+
+https://coveralls.io/github/RoaringBitmap/roaring?branch=master
+
+### Benchmark
+
+Type
+
+ go test -bench Benchmark -run -
+
+To run benchmarks on [Real Roaring Datasets](https://github.com/RoaringBitmap/real-roaring-datasets)
+run the following:
+
+```sh
+go get github.com/RoaringBitmap/real-roaring-datasets
+BENCH_REAL_DATA=1 go test -bench BenchmarkRealData -run -
+```
+
+### Iterative use
+
+You can use roaring with gore:
+
+- go get -u github.com/motemen/gore
+- Make sure that ``$GOPATH/bin`` is in your ``$PATH``.
+- go get github/RoaringBitmap/roaring
+
+```go
+$ gore
+gore version 0.2.6 :help for help
+gore> :import github.com/RoaringBitmap/roaring
+gore> x:=roaring.New()
+gore> x.Add(1)
+gore> x.String()
+"{1}"
+```
+
+
+### Fuzzy testing
+
+You can help us test further the library with fuzzy testing:
+
+ go get github.com/dvyukov/go-fuzz/go-fuzz
+ go get github.com/dvyukov/go-fuzz/go-fuzz-build
+ go test -tags=gofuzz -run=TestGenerateSmatCorpus
+ go-fuzz-build github.com/RoaringBitmap/roaring
+ go-fuzz -bin=./roaring-fuzz.zip -workdir=workdir/ -timeout=200
+
+Let it run, and if the # of crashers is > 0, check out the reports in
+the workdir where you should be able to find the panic goroutine stack
+traces.
+
+### Alternative in Go
+
+There is a Go version wrapping the C/C++ implementation https://github.com/RoaringBitmap/gocroaring
+
+For an alternative implementation in Go, see https://github.com/fzandona/goroar
+The two versions were written independently.
+
+
+### Mailing list/discussion group
+
+https://groups.google.com/forum/#!forum/roaring-bitmaps
diff --git a/vendor/github.com/RoaringBitmap/roaring/arraycontainer.go b/vendor/github.com/RoaringBitmap/roaring/arraycontainer.go
new file mode 100644
index 0000000000..c395868210
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/arraycontainer.go
@@ -0,0 +1,960 @@
+package roaring
+
+import (
+ "fmt"
+)
+
+//go:generate msgp -unexported
+
+type arrayContainer struct {
+ content []uint16
+}
+
+func (ac *arrayContainer) String() string {
+ s := "{"
+ for it := ac.getShortIterator(); it.hasNext(); {
+ s += fmt.Sprintf("%v, ", it.next())
+ }
+ return s + "}"
+}
+
+func (ac *arrayContainer) fillLeastSignificant16bits(x []uint32, i int, mask uint32) {
+ for k := 0; k < len(ac.content); k++ {
+ x[k+i] = uint32(ac.content[k]) | mask
+ }
+}
+
+func (ac *arrayContainer) getShortIterator() shortIterable {
+ return &shortIterator{ac.content, 0}
+}
+
+func (ac *arrayContainer) getManyIterator() manyIterable {
+ return &manyIterator{ac.content, 0}
+}
+
+func (ac *arrayContainer) minimum() uint16 {
+ return ac.content[0] // assume not empty
+}
+
+func (ac *arrayContainer) maximum() uint16 {
+ return ac.content[len(ac.content)-1] // assume not empty
+}
+
+func (ac *arrayContainer) getSizeInBytes() int {
+ return ac.getCardinality() * 2
+}
+
+func (ac *arrayContainer) serializedSizeInBytes() int {
+ return ac.getCardinality() * 2
+}
+
+func arrayContainerSizeInBytes(card int) int {
+ return card * 2
+}
+
+// add the values in the range [firstOfRange,endx)
+func (ac *arrayContainer) iaddRange(firstOfRange, endx int) container {
+ if firstOfRange >= endx {
+ return ac
+ }
+ indexstart := binarySearch(ac.content, uint16(firstOfRange))
+ if indexstart < 0 {
+ indexstart = -indexstart - 1
+ }
+ indexend := binarySearch(ac.content, uint16(endx-1))
+ if indexend < 0 {
+ indexend = -indexend - 1
+ } else {
+ indexend++
+ }
+ rangelength := endx - firstOfRange
+ newcardinality := indexstart + (ac.getCardinality() - indexend) + rangelength
+ if newcardinality > arrayDefaultMaxSize {
+ a := ac.toBitmapContainer()
+ return a.iaddRange(firstOfRange, endx)
+ }
+ if cap(ac.content) < newcardinality {
+ tmp := make([]uint16, newcardinality, newcardinality)
+ copy(tmp[:indexstart], ac.content[:indexstart])
+ copy(tmp[indexstart+rangelength:], ac.content[indexend:])
+
+ ac.content = tmp
+ } else {
+ ac.content = ac.content[:newcardinality]
+ copy(ac.content[indexstart+rangelength:], ac.content[indexend:])
+
+ }
+ for k := 0; k < rangelength; k++ {
+ ac.content[k+indexstart] = uint16(firstOfRange + k)
+ }
+ return ac
+}
+
+// remove the values in the range [firstOfRange,endx)
+func (ac *arrayContainer) iremoveRange(firstOfRange, endx int) container {
+ if firstOfRange >= endx {
+ return ac
+ }
+ indexstart := binarySearch(ac.content, uint16(firstOfRange))
+ if indexstart < 0 {
+ indexstart = -indexstart - 1
+ }
+ indexend := binarySearch(ac.content, uint16(endx-1))
+ if indexend < 0 {
+ indexend = -indexend - 1
+ } else {
+ indexend++
+ }
+ rangelength := indexend - indexstart
+ answer := ac
+ copy(answer.content[indexstart:], ac.content[indexstart+rangelength:])
+ answer.content = answer.content[:ac.getCardinality()-rangelength]
+ return answer
+}
+
+// flip the values in the range [firstOfRange,endx)
+func (ac *arrayContainer) not(firstOfRange, endx int) container {
+ if firstOfRange >= endx {
+ //p("arrayContainer.not(): exiting early with ac.clone()")
+ return ac.clone()
+ }
+ return ac.notClose(firstOfRange, endx-1) // remove everything in [firstOfRange,endx-1]
+}
+
+// flip the values in the range [firstOfRange,lastOfRange]
+func (ac *arrayContainer) notClose(firstOfRange, lastOfRange int) container {
+ if firstOfRange > lastOfRange { // unlike add and remove, not uses an inclusive range [firstOfRange,lastOfRange]
+ //p("arrayContainer.notClose(): exiting early with ac.clone()")
+ return ac.clone()
+ }
+
+ // determine the span of array indices to be affected^M
+ startIndex := binarySearch(ac.content, uint16(firstOfRange))
+ //p("startIndex=%v", startIndex)
+ if startIndex < 0 {
+ startIndex = -startIndex - 1
+ }
+ lastIndex := binarySearch(ac.content, uint16(lastOfRange))
+ //p("lastIndex=%v", lastIndex)
+ if lastIndex < 0 {
+ lastIndex = -lastIndex - 2
+ }
+ currentValuesInRange := lastIndex - startIndex + 1
+ spanToBeFlipped := lastOfRange - firstOfRange + 1
+ newValuesInRange := spanToBeFlipped - currentValuesInRange
+ cardinalityChange := newValuesInRange - currentValuesInRange
+ newCardinality := len(ac.content) + cardinalityChange
+ //p("new card is %v", newCardinality)
+ if newCardinality > arrayDefaultMaxSize {
+ //p("new card over arrayDefaultMaxSize, so returning bitmap")
+ return ac.toBitmapContainer().not(firstOfRange, lastOfRange+1)
+ }
+ answer := newArrayContainer()
+ answer.content = make([]uint16, newCardinality, newCardinality) //a hack for sure
+
+ copy(answer.content, ac.content[:startIndex])
+ outPos := startIndex
+ inPos := startIndex
+ valInRange := firstOfRange
+ for ; valInRange <= lastOfRange && inPos <= lastIndex; valInRange++ {
+ if uint16(valInRange) != ac.content[inPos] {
+ answer.content[outPos] = uint16(valInRange)
+ outPos++
+ } else {
+ inPos++
+ }
+ }
+
+ for ; valInRange <= lastOfRange; valInRange++ {
+ answer.content[outPos] = uint16(valInRange)
+ outPos++
+ }
+
+ for i := lastIndex + 1; i < len(ac.content); i++ {
+ answer.content[outPos] = ac.content[i]
+ outPos++
+ }
+ answer.content = answer.content[:newCardinality]
+ return answer
+
+}
+
+func (ac *arrayContainer) equals(o container) bool {
+
+ srb, ok := o.(*arrayContainer)
+ if ok {
+ // Check if the containers are the same object.
+ if ac == srb {
+ return true
+ }
+
+ if len(srb.content) != len(ac.content) {
+ return false
+ }
+
+ for i, v := range ac.content {
+ if v != srb.content[i] {
+ return false
+ }
+ }
+ return true
+ }
+
+ // use generic comparison
+ bCard := o.getCardinality()
+ aCard := ac.getCardinality()
+ if bCard != aCard {
+ return false
+ }
+
+ ait := ac.getShortIterator()
+ bit := o.getShortIterator()
+ for ait.hasNext() {
+ if bit.next() != ait.next() {
+ return false
+ }
+ }
+ return true
+}
+
+func (ac *arrayContainer) toBitmapContainer() *bitmapContainer {
+ bc := newBitmapContainer()
+ bc.loadData(ac)
+ return bc
+
+}
+func (ac *arrayContainer) iadd(x uint16) (wasNew bool) {
+ // Special case adding to the end of the container.
+ l := len(ac.content)
+ if l > 0 && l < arrayDefaultMaxSize && ac.content[l-1] < x {
+ ac.content = append(ac.content, x)
+ return true
+ }
+
+ loc := binarySearch(ac.content, x)
+
+ if loc < 0 {
+ s := ac.content
+ i := -loc - 1
+ s = append(s, 0)
+ copy(s[i+1:], s[i:])
+ s[i] = x
+ ac.content = s
+ return true
+ }
+ return false
+}
+
+func (ac *arrayContainer) iaddReturnMinimized(x uint16) container {
+ // Special case adding to the end of the container.
+ l := len(ac.content)
+ if l > 0 && l < arrayDefaultMaxSize && ac.content[l-1] < x {
+ ac.content = append(ac.content, x)
+ return ac
+ }
+
+ loc := binarySearch(ac.content, x)
+
+ if loc < 0 {
+ if len(ac.content) >= arrayDefaultMaxSize {
+ a := ac.toBitmapContainer()
+ a.iadd(x)
+ return a
+ }
+ s := ac.content
+ i := -loc - 1
+ s = append(s, 0)
+ copy(s[i+1:], s[i:])
+ s[i] = x
+ ac.content = s
+ }
+ return ac
+}
+
+// iremoveReturnMinimized is allowed to change the return type to minimize storage.
+func (ac *arrayContainer) iremoveReturnMinimized(x uint16) container {
+ ac.iremove(x)
+ return ac
+}
+
+func (ac *arrayContainer) iremove(x uint16) bool {
+ loc := binarySearch(ac.content, x)
+ if loc >= 0 {
+ s := ac.content
+ s = append(s[:loc], s[loc+1:]...)
+ ac.content = s
+ return true
+ }
+ return false
+}
+
+func (ac *arrayContainer) remove(x uint16) container {
+ out := &arrayContainer{make([]uint16, len(ac.content))}
+ copy(out.content, ac.content[:])
+
+ loc := binarySearch(out.content, x)
+ if loc >= 0 {
+ s := out.content
+ s = append(s[:loc], s[loc+1:]...)
+ out.content = s
+ }
+ return out
+}
+
+func (ac *arrayContainer) or(a container) container {
+ switch x := a.(type) {
+ case *arrayContainer:
+ return ac.orArray(x)
+ case *bitmapContainer:
+ return x.orArray(ac)
+ case *runContainer16:
+ if x.isFull() {
+ return x.clone()
+ }
+ return x.orArray(ac)
+ }
+ panic("unsupported container type")
+}
+
+func (ac *arrayContainer) orCardinality(a container) int {
+ switch x := a.(type) {
+ case *arrayContainer:
+ return ac.orArrayCardinality(x)
+ case *bitmapContainer:
+ return x.orArrayCardinality(ac)
+ case *runContainer16:
+ return x.orArrayCardinality(ac)
+ }
+ panic("unsupported container type")
+}
+
+func (ac *arrayContainer) ior(a container) container {
+ switch x := a.(type) {
+ case *arrayContainer:
+ return ac.iorArray(x)
+ case *bitmapContainer:
+ return a.(*bitmapContainer).orArray(ac)
+ //return ac.iorBitmap(x) // note: this does not make sense
+ case *runContainer16:
+ if x.isFull() {
+ return x.clone()
+ }
+ return ac.iorRun16(x)
+ }
+ panic("unsupported container type")
+}
+
+func (ac *arrayContainer) iorArray(value2 *arrayContainer) container {
+ value1 := ac
+ len1 := value1.getCardinality()
+ len2 := value2.getCardinality()
+ maxPossibleCardinality := len1 + len2
+ if maxPossibleCardinality > arrayDefaultMaxSize { // it could be a bitmap!
+ bc := newBitmapContainer()
+ for k := 0; k < len(value2.content); k++ {
+ v := value2.content[k]
+ i := uint(v) >> 6
+ mask := uint64(1) << (v % 64)
+ bc.bitmap[i] |= mask
+ }
+ for k := 0; k < len(ac.content); k++ {
+ v := ac.content[k]
+ i := uint(v) >> 6
+ mask := uint64(1) << (v % 64)
+ bc.bitmap[i] |= mask
+ }
+ bc.cardinality = int(popcntSlice(bc.bitmap))
+ if bc.cardinality <= arrayDefaultMaxSize {
+ return bc.toArrayContainer()
+ }
+ return bc
+ }
+ if maxPossibleCardinality > cap(value1.content) {
+ newcontent := make([]uint16, 0, maxPossibleCardinality)
+ copy(newcontent[len2:maxPossibleCardinality], ac.content[0:len1])
+ ac.content = newcontent
+ } else {
+ copy(ac.content[len2:maxPossibleCardinality], ac.content[0:len1])
+ }
+ nl := union2by2(value1.content[len2:maxPossibleCardinality], value2.content, ac.content)
+ ac.content = ac.content[:nl] // reslice to match actual used capacity
+ return ac
+}
+
+// Note: such code does not make practical sense, except for lazy evaluations
+func (ac *arrayContainer) iorBitmap(bc2 *bitmapContainer) container {
+ bc1 := ac.toBitmapContainer()
+ bc1.iorBitmap(bc2)
+ *ac = *newArrayContainerFromBitmap(bc1)
+ return ac
+}
+
+func (ac *arrayContainer) iorRun16(rc *runContainer16) container {
+ bc1 := ac.toBitmapContainer()
+ bc2 := rc.toBitmapContainer()
+ bc1.iorBitmap(bc2)
+ *ac = *newArrayContainerFromBitmap(bc1)
+ return ac
+}
+
+func (ac *arrayContainer) lazyIOR(a container) container {
+ switch x := a.(type) {
+ case *arrayContainer:
+ return ac.lazyIorArray(x)
+ case *bitmapContainer:
+ return ac.lazyIorBitmap(x)
+ case *runContainer16:
+ if x.isFull() {
+ return x.clone()
+ }
+ return ac.lazyIorRun16(x)
+
+ }
+ panic("unsupported container type")
+}
+
+func (ac *arrayContainer) lazyIorArray(ac2 *arrayContainer) container {
+ // TODO actually make this lazy
+ return ac.iorArray(ac2)
+}
+
+func (ac *arrayContainer) lazyIorBitmap(bc *bitmapContainer) container {
+ // TODO actually make this lazy
+ return ac.iorBitmap(bc)
+}
+
+func (ac *arrayContainer) lazyIorRun16(rc *runContainer16) container {
+ // TODO actually make this lazy
+ return ac.iorRun16(rc)
+}
+
+func (ac *arrayContainer) lazyOR(a container) container {
+ switch x := a.(type) {
+ case *arrayContainer:
+ return ac.lazyorArray(x)
+ case *bitmapContainer:
+ return a.lazyOR(ac)
+ case *runContainer16:
+ if x.isFull() {
+ return x.clone()
+ }
+ return x.orArray(ac)
+ }
+ panic("unsupported container type")
+}
+
+func (ac *arrayContainer) orArray(value2 *arrayContainer) container {
+ value1 := ac
+ maxPossibleCardinality := value1.getCardinality() + value2.getCardinality()
+ if maxPossibleCardinality > arrayDefaultMaxSize { // it could be a bitmap!
+ bc := newBitmapContainer()
+ for k := 0; k < len(value2.content); k++ {
+ v := value2.content[k]
+ i := uint(v) >> 6
+ mask := uint64(1) << (v % 64)
+ bc.bitmap[i] |= mask
+ }
+ for k := 0; k < len(ac.content); k++ {
+ v := ac.content[k]
+ i := uint(v) >> 6
+ mask := uint64(1) << (v % 64)
+ bc.bitmap[i] |= mask
+ }
+ bc.cardinality = int(popcntSlice(bc.bitmap))
+ if bc.cardinality <= arrayDefaultMaxSize {
+ return bc.toArrayContainer()
+ }
+ return bc
+ }
+ answer := newArrayContainerCapacity(maxPossibleCardinality)
+ nl := union2by2(value1.content, value2.content, answer.content)
+ answer.content = answer.content[:nl] // reslice to match actual used capacity
+ return answer
+}
+
+func (ac *arrayContainer) orArrayCardinality(value2 *arrayContainer) int {
+ return union2by2Cardinality(ac.content, value2.content)
+}
+
+func (ac *arrayContainer) lazyorArray(value2 *arrayContainer) container {
+ value1 := ac
+ maxPossibleCardinality := value1.getCardinality() + value2.getCardinality()
+ if maxPossibleCardinality > arrayLazyLowerBound { // it could be a bitmap!^M
+ bc := newBitmapContainer()
+ for k := 0; k < len(value2.content); k++ {
+ v := value2.content[k]
+ i := uint(v) >> 6
+ mask := uint64(1) << (v % 64)
+ bc.bitmap[i] |= mask
+ }
+ for k := 0; k < len(ac.content); k++ {
+ v := ac.content[k]
+ i := uint(v) >> 6
+ mask := uint64(1) << (v % 64)
+ bc.bitmap[i] |= mask
+ }
+ bc.cardinality = invalidCardinality
+ return bc
+ }
+ answer := newArrayContainerCapacity(maxPossibleCardinality)
+ nl := union2by2(value1.content, value2.content, answer.content)
+ answer.content = answer.content[:nl] // reslice to match actual used capacity
+ return answer
+}
+
+func (ac *arrayContainer) and(a container) container {
+ //p("ac.and() called")
+ switch x := a.(type) {
+ case *arrayContainer:
+ return ac.andArray(x)
+ case *bitmapContainer:
+ return x.and(ac)
+ case *runContainer16:
+ if x.isFull() {
+ return ac.clone()
+ }
+ return x.andArray(ac)
+ }
+ panic("unsupported container type")
+}
+
+func (ac *arrayContainer) andCardinality(a container) int {
+ switch x := a.(type) {
+ case *arrayContainer:
+ return ac.andArrayCardinality(x)
+ case *bitmapContainer:
+ return x.andCardinality(ac)
+ case *runContainer16:
+ return x.andArrayCardinality(ac)
+ }
+ panic("unsupported container type")
+}
+
+func (ac *arrayContainer) intersects(a container) bool {
+ switch x := a.(type) {
+ case *arrayContainer:
+ return ac.intersectsArray(x)
+ case *bitmapContainer:
+ return x.intersects(ac)
+ case *runContainer16:
+ return x.intersects(ac)
+ }
+ panic("unsupported container type")
+}
+
+func (ac *arrayContainer) iand(a container) container {
+ switch x := a.(type) {
+ case *arrayContainer:
+ return ac.iandArray(x)
+ case *bitmapContainer:
+ return ac.iandBitmap(x)
+ case *runContainer16:
+ if x.isFull() {
+ return ac.clone()
+ }
+ return x.andArray(ac)
+ }
+ panic("unsupported container type")
+}
+
+func (ac *arrayContainer) iandBitmap(bc *bitmapContainer) container {
+ pos := 0
+ c := ac.getCardinality()
+ for k := 0; k < c; k++ {
+ // branchless
+ v := ac.content[k]
+ ac.content[pos] = v
+ pos += int(bc.bitValue(v))
+ }
+ ac.content = ac.content[:pos]
+ return ac
+
+}
+
+func (ac *arrayContainer) xor(a container) container {
+ switch x := a.(type) {
+ case *arrayContainer:
+ return ac.xorArray(x)
+ case *bitmapContainer:
+ return a.xor(ac)
+ case *runContainer16:
+ return x.xorArray(ac)
+ }
+ panic("unsupported container type")
+}
+
+func (ac *arrayContainer) xorArray(value2 *arrayContainer) container {
+ value1 := ac
+ totalCardinality := value1.getCardinality() + value2.getCardinality()
+ if totalCardinality > arrayDefaultMaxSize { // it could be a bitmap!
+ bc := newBitmapContainer()
+ for k := 0; k < len(value2.content); k++ {
+ v := value2.content[k]
+ i := uint(v) >> 6
+ bc.bitmap[i] ^= (uint64(1) << (v % 64))
+ }
+ for k := 0; k < len(ac.content); k++ {
+ v := ac.content[k]
+ i := uint(v) >> 6
+ bc.bitmap[i] ^= (uint64(1) << (v % 64))
+ }
+ bc.computeCardinality()
+ if bc.cardinality <= arrayDefaultMaxSize {
+ return bc.toArrayContainer()
+ }
+ return bc
+ }
+ desiredCapacity := totalCardinality
+ answer := newArrayContainerCapacity(desiredCapacity)
+ length := exclusiveUnion2by2(value1.content, value2.content, answer.content)
+ answer.content = answer.content[:length]
+ return answer
+
+}
+
+func (ac *arrayContainer) andNot(a container) container {
+ switch x := a.(type) {
+ case *arrayContainer:
+ return ac.andNotArray(x)
+ case *bitmapContainer:
+ return ac.andNotBitmap(x)
+ case *runContainer16:
+ return ac.andNotRun16(x)
+ }
+ panic("unsupported container type")
+}
+
+func (ac *arrayContainer) andNotRun16(rc *runContainer16) container {
+ acb := ac.toBitmapContainer()
+ rcb := rc.toBitmapContainer()
+ return acb.andNotBitmap(rcb)
+}
+
+func (ac *arrayContainer) iandNot(a container) container {
+ switch x := a.(type) {
+ case *arrayContainer:
+ return ac.iandNotArray(x)
+ case *bitmapContainer:
+ return ac.iandNotBitmap(x)
+ case *runContainer16:
+ return ac.iandNotRun16(x)
+ }
+ panic("unsupported container type")
+}
+
+func (ac *arrayContainer) iandNotRun16(rc *runContainer16) container {
+ rcb := rc.toBitmapContainer()
+ acb := ac.toBitmapContainer()
+ acb.iandNotBitmapSurely(rcb)
+ *ac = *(acb.toArrayContainer())
+ return ac
+}
+
+func (ac *arrayContainer) andNotArray(value2 *arrayContainer) container {
+ value1 := ac
+ desiredcapacity := value1.getCardinality()
+ answer := newArrayContainerCapacity(desiredcapacity)
+ length := difference(value1.content, value2.content, answer.content)
+ answer.content = answer.content[:length]
+ return answer
+}
+
+func (ac *arrayContainer) iandNotArray(value2 *arrayContainer) container {
+ length := difference(ac.content, value2.content, ac.content)
+ ac.content = ac.content[:length]
+ return ac
+}
+
+func (ac *arrayContainer) andNotBitmap(value2 *bitmapContainer) container {
+ desiredcapacity := ac.getCardinality()
+ answer := newArrayContainerCapacity(desiredcapacity)
+ answer.content = answer.content[:desiredcapacity]
+ pos := 0
+ for _, v := range ac.content {
+ answer.content[pos] = v
+ pos += 1 - int(value2.bitValue(v))
+ }
+ answer.content = answer.content[:pos]
+ return answer
+}
+
+func (ac *arrayContainer) andBitmap(value2 *bitmapContainer) container {
+ desiredcapacity := ac.getCardinality()
+ answer := newArrayContainerCapacity(desiredcapacity)
+ answer.content = answer.content[:desiredcapacity]
+ pos := 0
+ for _, v := range ac.content {
+ answer.content[pos] = v
+ pos += int(value2.bitValue(v))
+ }
+ answer.content = answer.content[:pos]
+ return answer
+}
+
+func (ac *arrayContainer) iandNotBitmap(value2 *bitmapContainer) container {
+ pos := 0
+ for _, v := range ac.content {
+ ac.content[pos] = v
+ pos += 1 - int(value2.bitValue(v))
+ }
+ ac.content = ac.content[:pos]
+ return ac
+}
+
+func copyOf(array []uint16, size int) []uint16 {
+ result := make([]uint16, size)
+ for i, x := range array {
+ if i == size {
+ break
+ }
+ result[i] = x
+ }
+ return result
+}
+
+// flip the values in the range [firstOfRange,endx)
+func (ac *arrayContainer) inot(firstOfRange, endx int) container {
+ if firstOfRange >= endx {
+ return ac
+ }
+ return ac.inotClose(firstOfRange, endx-1) // remove everything in [firstOfRange,endx-1]
+}
+
+// flip the values in the range [firstOfRange,lastOfRange]
+func (ac *arrayContainer) inotClose(firstOfRange, lastOfRange int) container {
+ //p("ac.inotClose() starting")
+ if firstOfRange > lastOfRange { // unlike add and remove, not uses an inclusive range [firstOfRange,lastOfRange]
+ return ac
+ }
+ // determine the span of array indices to be affected
+ startIndex := binarySearch(ac.content, uint16(firstOfRange))
+ if startIndex < 0 {
+ startIndex = -startIndex - 1
+ }
+ lastIndex := binarySearch(ac.content, uint16(lastOfRange))
+ if lastIndex < 0 {
+ lastIndex = -lastIndex - 1 - 1
+ }
+ currentValuesInRange := lastIndex - startIndex + 1
+ spanToBeFlipped := lastOfRange - firstOfRange + 1
+
+ newValuesInRange := spanToBeFlipped - currentValuesInRange
+ buffer := make([]uint16, newValuesInRange)
+ cardinalityChange := newValuesInRange - currentValuesInRange
+ newCardinality := len(ac.content) + cardinalityChange
+ if cardinalityChange > 0 {
+ if newCardinality > len(ac.content) {
+ if newCardinality > arrayDefaultMaxSize {
+ //p("ac.inotClose() converting to bitmap and doing inot there")
+ bcRet := ac.toBitmapContainer()
+ bcRet.inot(firstOfRange, lastOfRange+1)
+ *ac = *bcRet.toArrayContainer()
+ return bcRet
+ }
+ ac.content = copyOf(ac.content, newCardinality)
+ }
+ base := lastIndex + 1
+ copy(ac.content[lastIndex+1+cardinalityChange:], ac.content[base:base+len(ac.content)-1-lastIndex])
+ ac.negateRange(buffer, startIndex, lastIndex, firstOfRange, lastOfRange+1)
+ } else { // no expansion needed
+ ac.negateRange(buffer, startIndex, lastIndex, firstOfRange, lastOfRange+1)
+ if cardinalityChange < 0 {
+
+ for i := startIndex + newValuesInRange; i < newCardinality; i++ {
+ ac.content[i] = ac.content[i-cardinalityChange]
+ }
+ }
+ }
+ ac.content = ac.content[:newCardinality]
+ //p("bottom of ac.inotClose(): returning ac")
+ return ac
+}
+
+func (ac *arrayContainer) negateRange(buffer []uint16, startIndex, lastIndex, startRange, lastRange int) {
+ // compute the negation into buffer
+ outPos := 0
+ inPos := startIndex // value here always >= valInRange,
+ // until it is exhausted
+ // n.b., we can start initially exhausted.
+
+ valInRange := startRange
+ for ; valInRange < lastRange && inPos <= lastIndex; valInRange++ {
+ if uint16(valInRange) != ac.content[inPos] {
+ buffer[outPos] = uint16(valInRange)
+ outPos++
+ } else {
+ inPos++
+ }
+ }
+
+ // if there are extra items (greater than the biggest
+ // pre-existing one in range), buffer them
+ for ; valInRange < lastRange; valInRange++ {
+ buffer[outPos] = uint16(valInRange)
+ outPos++
+ }
+
+ if outPos != len(buffer) {
+ panic("negateRange: internal bug")
+ }
+
+ for i, item := range buffer {
+ ac.content[i+startIndex] = item
+ }
+}
+
+func (ac *arrayContainer) isFull() bool {
+ return false
+}
+
+func (ac *arrayContainer) andArray(value2 *arrayContainer) container {
+ desiredcapacity := minOfInt(ac.getCardinality(), value2.getCardinality())
+ answer := newArrayContainerCapacity(desiredcapacity)
+ length := intersection2by2(
+ ac.content,
+ value2.content,
+ answer.content)
+ answer.content = answer.content[:length]
+ return answer
+}
+
+func (ac *arrayContainer) andArrayCardinality(value2 *arrayContainer) int {
+ return intersection2by2Cardinality(
+ ac.content,
+ value2.content)
+}
+
+func (ac *arrayContainer) intersectsArray(value2 *arrayContainer) bool {
+ return intersects2by2(
+ ac.content,
+ value2.content)
+}
+
+func (ac *arrayContainer) iandArray(value2 *arrayContainer) container {
+ length := intersection2by2(
+ ac.content,
+ value2.content,
+ ac.content)
+ ac.content = ac.content[:length]
+ return ac
+}
+
+func (ac *arrayContainer) getCardinality() int {
+ return len(ac.content)
+}
+
+func (ac *arrayContainer) rank(x uint16) int {
+ answer := binarySearch(ac.content, x)
+ if answer >= 0 {
+ return answer + 1
+ }
+ return -answer - 1
+
+}
+
+func (ac *arrayContainer) selectInt(x uint16) int {
+ return int(ac.content[x])
+}
+
+func (ac *arrayContainer) clone() container {
+ ptr := arrayContainer{make([]uint16, len(ac.content))}
+ copy(ptr.content, ac.content[:])
+ return &ptr
+}
+
+func (ac *arrayContainer) contains(x uint16) bool {
+ return binarySearch(ac.content, x) >= 0
+}
+
+func (ac *arrayContainer) loadData(bitmapContainer *bitmapContainer) {
+ ac.content = make([]uint16, bitmapContainer.cardinality, bitmapContainer.cardinality)
+ bitmapContainer.fillArray(ac.content)
+}
+func newArrayContainer() *arrayContainer {
+ p := new(arrayContainer)
+ return p
+}
+
+func newArrayContainerFromBitmap(bc *bitmapContainer) *arrayContainer {
+ ac := &arrayContainer{}
+ ac.loadData(bc)
+ return ac
+}
+
+func newArrayContainerCapacity(size int) *arrayContainer {
+ p := new(arrayContainer)
+ p.content = make([]uint16, 0, size)
+ return p
+}
+
+func newArrayContainerSize(size int) *arrayContainer {
+ p := new(arrayContainer)
+ p.content = make([]uint16, size, size)
+ return p
+}
+
+func newArrayContainerRange(firstOfRun, lastOfRun int) *arrayContainer {
+ valuesInRange := lastOfRun - firstOfRun + 1
+ this := newArrayContainerCapacity(valuesInRange)
+ for i := 0; i < valuesInRange; i++ {
+ this.content = append(this.content, uint16(firstOfRun+i))
+ }
+ return this
+}
+
+func (ac *arrayContainer) numberOfRuns() (nr int) {
+ n := len(ac.content)
+ var runlen uint16
+ var cur, prev uint16
+
+ switch n {
+ case 0:
+ return 0
+ case 1:
+ return 1
+ default:
+ for i := 1; i < n; i++ {
+ prev = ac.content[i-1]
+ cur = ac.content[i]
+
+ if cur == prev+1 {
+ runlen++
+ } else {
+ if cur < prev {
+ panic("then fundamental arrayContainer assumption of sorted ac.content was broken")
+ }
+ if cur == prev {
+ panic("then fundamental arrayContainer assumption of deduplicated content was broken")
+ } else {
+ nr++
+ runlen = 0
+ }
+ }
+ }
+ nr++
+ }
+ return
+}
+
+// convert to run or array *if needed*
+func (ac *arrayContainer) toEfficientContainer() container {
+
+ numRuns := ac.numberOfRuns()
+
+ sizeAsRunContainer := runContainer16SerializedSizeInBytes(numRuns)
+ sizeAsBitmapContainer := bitmapContainerSizeInBytes()
+ card := ac.getCardinality()
+ sizeAsArrayContainer := arrayContainerSizeInBytes(card)
+
+ if sizeAsRunContainer <= minOfInt(sizeAsBitmapContainer, sizeAsArrayContainer) {
+ return newRunContainer16FromArray(ac)
+ }
+ if card <= arrayDefaultMaxSize {
+ return ac
+ }
+ return ac.toBitmapContainer()
+}
+
+func (ac *arrayContainer) containerType() contype {
+ return arrayContype
+}
diff --git a/vendor/github.com/RoaringBitmap/roaring/arraycontainer_gen.go b/vendor/github.com/RoaringBitmap/roaring/arraycontainer_gen.go
new file mode 100644
index 0000000000..cba6e53e30
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/arraycontainer_gen.go
@@ -0,0 +1,134 @@
+package roaring
+
+// NOTE: THIS FILE WAS PRODUCED BY THE
+// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp)
+// DO NOT EDIT
+
+import "github.com/tinylib/msgp/msgp"
+
+// DecodeMsg implements msgp.Decodable
+func (z *arrayContainer) DecodeMsg(dc *msgp.Reader) (err error) {
+ var field []byte
+ _ = field
+ var zbzg uint32
+ zbzg, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zbzg > 0 {
+ zbzg--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "content":
+ var zbai uint32
+ zbai, err = dc.ReadArrayHeader()
+ if err != nil {
+ return
+ }
+ if cap(z.content) >= int(zbai) {
+ z.content = (z.content)[:zbai]
+ } else {
+ z.content = make([]uint16, zbai)
+ }
+ for zxvk := range z.content {
+ z.content[zxvk], err = dc.ReadUint16()
+ if err != nil {
+ return
+ }
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z *arrayContainer) EncodeMsg(en *msgp.Writer) (err error) {
+ // map header, size 1
+ // write "content"
+ err = en.Append(0x81, 0xa7, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74)
+ if err != nil {
+ return err
+ }
+ err = en.WriteArrayHeader(uint32(len(z.content)))
+ if err != nil {
+ return
+ }
+ for zxvk := range z.content {
+ err = en.WriteUint16(z.content[zxvk])
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *arrayContainer) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 1
+ // string "content"
+ o = append(o, 0x81, 0xa7, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74)
+ o = msgp.AppendArrayHeader(o, uint32(len(z.content)))
+ for zxvk := range z.content {
+ o = msgp.AppendUint16(o, z.content[zxvk])
+ }
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *arrayContainer) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zcmr uint32
+ zcmr, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zcmr > 0 {
+ zcmr--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "content":
+ var zajw uint32
+ zajw, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ if cap(z.content) >= int(zajw) {
+ z.content = (z.content)[:zajw]
+ } else {
+ z.content = make([]uint16, zajw)
+ }
+ for zxvk := range z.content {
+ z.content[zxvk], bts, err = msgp.ReadUint16Bytes(bts)
+ if err != nil {
+ return
+ }
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *arrayContainer) Msgsize() (s int) {
+ s = 1 + 8 + msgp.ArrayHeaderSize + (len(z.content) * (msgp.Uint16Size))
+ return
+}
diff --git a/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer.go b/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer.go
new file mode 100644
index 0000000000..5e58b31f2b
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer.go
@@ -0,0 +1,982 @@
+package roaring
+
+import (
+ "fmt"
+ "unsafe"
+)
+
+//go:generate msgp -unexported
+
+type bitmapContainer struct {
+ cardinality int
+ bitmap []uint64
+}
+
+func (bc bitmapContainer) String() string {
+ var s string
+ for it := bc.getShortIterator(); it.hasNext(); {
+ s += fmt.Sprintf("%v, ", it.next())
+ }
+ return s
+}
+
+func newBitmapContainer() *bitmapContainer {
+ p := new(bitmapContainer)
+ size := (1 << 16) / 64
+ p.bitmap = make([]uint64, size, size)
+ return p
+}
+
+func newBitmapContainerwithRange(firstOfRun, lastOfRun int) *bitmapContainer {
+ bc := newBitmapContainer()
+ bc.cardinality = lastOfRun - firstOfRun + 1
+ if bc.cardinality == maxCapacity {
+ fill(bc.bitmap, uint64(0xffffffffffffffff))
+ } else {
+ firstWord := firstOfRun / 64
+ lastWord := lastOfRun / 64
+ zeroPrefixLength := uint64(firstOfRun & 63)
+ zeroSuffixLength := uint64(63 - (lastOfRun & 63))
+
+ fillRange(bc.bitmap, firstWord, lastWord+1, uint64(0xffffffffffffffff))
+ bc.bitmap[firstWord] ^= ((uint64(1) << zeroPrefixLength) - 1)
+ blockOfOnes := (uint64(1) << zeroSuffixLength) - 1
+ maskOnLeft := blockOfOnes << (uint64(64) - zeroSuffixLength)
+ bc.bitmap[lastWord] ^= maskOnLeft
+ }
+ return bc
+}
+
+func (bc *bitmapContainer) minimum() uint16 {
+ for i := 0; i < len(bc.bitmap); i++ {
+ w := bc.bitmap[i]
+ if w != 0 {
+ r := countTrailingZeros(w)
+ return uint16(r + i*64)
+ }
+ }
+ return MaxUint16
+}
+
+// i should be non-zero
+func clz(i uint64) int {
+ n := 1
+ x := uint32(i >> 32)
+ if x == 0 {
+ n += 32
+ x = uint32(i)
+ }
+ if x>>16 == 0 {
+ n += 16
+ x = x << 16
+ }
+ if x>>24 == 0 {
+ n += 8
+ x = x << 8
+ }
+ if x>>28 == 0 {
+ n += 4
+ x = x << 4
+ }
+ if x>>30 == 0 {
+ n += 2
+ x = x << 2
+ }
+ return n - int(x>>31)
+}
+
+func (bc *bitmapContainer) maximum() uint16 {
+ for i := len(bc.bitmap); i > 0; i-- {
+ w := bc.bitmap[i-1]
+ if w != 0 {
+ r := clz(w)
+ return uint16((i-1)*64 + 63 - r)
+ }
+ }
+ return uint16(0)
+}
+
+type bitmapContainerShortIterator struct {
+ ptr *bitmapContainer
+ i int
+}
+
+func (bcsi *bitmapContainerShortIterator) next() uint16 {
+ j := bcsi.i
+ bcsi.i = bcsi.ptr.NextSetBit(bcsi.i + 1)
+ return uint16(j)
+}
+func (bcsi *bitmapContainerShortIterator) hasNext() bool {
+ return bcsi.i >= 0
+}
+
+func newBitmapContainerShortIterator(a *bitmapContainer) *bitmapContainerShortIterator {
+ return &bitmapContainerShortIterator{a, a.NextSetBit(0)}
+}
+
+func (bc *bitmapContainer) getShortIterator() shortIterable {
+ return newBitmapContainerShortIterator(bc)
+}
+
+type bitmapContainerManyIterator struct {
+ ptr *bitmapContainer
+ base int
+ bitset uint64
+}
+
+func (bcmi *bitmapContainerManyIterator) nextMany(hs uint32, buf []uint32) int {
+ n := 0
+ base := bcmi.base
+ bitset := bcmi.bitset
+
+ for n < len(buf) {
+ if bitset == 0 {
+ base += 1
+ if base >= len(bcmi.ptr.bitmap) {
+ bcmi.base = base
+ bcmi.bitset = bitset
+ return n
+ }
+ bitset = bcmi.ptr.bitmap[base]
+ continue
+ }
+ t := bitset & -bitset
+ buf[n] = uint32(((base * 64) + int(popcount(t-1)))) | hs
+ n = n + 1
+ bitset ^= t
+ }
+
+ bcmi.base = base
+ bcmi.bitset = bitset
+ return n
+}
+
+func newBitmapContainerManyIterator(a *bitmapContainer) *bitmapContainerManyIterator {
+ return &bitmapContainerManyIterator{a, -1, 0}
+}
+
+func (bc *bitmapContainer) getManyIterator() manyIterable {
+ return newBitmapContainerManyIterator(bc)
+}
+
+func (bc *bitmapContainer) getSizeInBytes() int {
+ return len(bc.bitmap) * 8 // + bcBaseBytes
+}
+
+func (bc *bitmapContainer) serializedSizeInBytes() int {
+ //return bc.Msgsize()// NOO! This breaks GetSerializedSizeInBytes
+ return len(bc.bitmap) * 8
+}
+
+const bcBaseBytes = int(unsafe.Sizeof(bitmapContainer{}))
+
+// bitmapContainer doesn't depend on card, always fully allocated
+func bitmapContainerSizeInBytes() int {
+ return bcBaseBytes + (1<<16)/8
+}
+
+func bitmapEquals(a, b []uint64) bool {
+ if len(a) != len(b) {
+ //p("bitmaps differ on length. len(a)=%v; len(b)=%v", len(a), len(b))
+ return false
+ }
+ for i, v := range a {
+ if v != b[i] {
+ //p("bitmaps differ on element i=%v", i)
+ return false
+ }
+ }
+ //p("bitmapEquals returning true")
+ return true
+}
+
+func (bc *bitmapContainer) fillLeastSignificant16bits(x []uint32, i int, mask uint32) {
+ // TODO: should be written as optimized assembly
+ pos := i
+ base := mask
+ for k := 0; k < len(bc.bitmap); k++ {
+ bitset := bc.bitmap[k]
+ for bitset != 0 {
+ t := bitset & -bitset
+ x[pos] = base + uint32(popcount(t-1))
+ pos++
+ bitset ^= t
+ }
+ base += 64
+ }
+}
+
+func (bc *bitmapContainer) equals(o container) bool {
+ srb, ok := o.(*bitmapContainer)
+ if ok {
+ //p("bitmapContainers.equals: both are bitmapContainers")
+ if srb.cardinality != bc.cardinality {
+ //p("bitmapContainers.equals: card differs: %v vs %v", srb.cardinality, bc.cardinality)
+ return false
+ }
+ return bitmapEquals(bc.bitmap, srb.bitmap)
+ }
+
+ // use generic comparison
+ if bc.getCardinality() != o.getCardinality() {
+ return false
+ }
+ ait := o.getShortIterator()
+ bit := bc.getShortIterator()
+
+ for ait.hasNext() {
+ if bit.next() != ait.next() {
+ return false
+ }
+ }
+ return true
+}
+
+func (bc *bitmapContainer) iaddReturnMinimized(i uint16) container {
+ bc.iadd(i)
+ if bc.isFull() {
+ return newRunContainer16Range(0, MaxUint16)
+ }
+ return bc
+}
+
+func (bc *bitmapContainer) iadd(i uint16) bool {
+ x := int(i)
+ previous := bc.bitmap[x/64]
+ mask := uint64(1) << (uint(x) % 64)
+ newb := previous | mask
+ bc.bitmap[x/64] = newb
+ bc.cardinality += int((previous ^ newb) >> (uint(x) % 64))
+ return newb != previous
+}
+
+func (bc *bitmapContainer) iremoveReturnMinimized(i uint16) container {
+ if bc.iremove(i) {
+ if bc.cardinality == arrayDefaultMaxSize {
+ return bc.toArrayContainer()
+ }
+ }
+ return bc
+}
+
+// iremove returns true if i was found.
+func (bc *bitmapContainer) iremove(i uint16) bool {
+ /* branchless code
+ w := bc.bitmap[i>>6]
+ mask := uint64(1) << (i % 64)
+ neww := w &^ mask
+ bc.cardinality -= int((w ^ neww) >> (i % 64))
+ bc.bitmap[i>>6] = neww */
+ if bc.contains(i) {
+ bc.cardinality--
+ bc.bitmap[i/64] &^= (uint64(1) << (i % 64))
+ return true
+ }
+ return false
+}
+
+func (bc *bitmapContainer) isFull() bool {
+ return bc.cardinality == int(MaxUint16)+1
+}
+
+func (bc *bitmapContainer) getCardinality() int {
+ return bc.cardinality
+}
+
+func (bc *bitmapContainer) clone() container {
+ ptr := bitmapContainer{bc.cardinality, make([]uint64, len(bc.bitmap))}
+ copy(ptr.bitmap, bc.bitmap[:])
+ return &ptr
+}
+
+// add all values in range [firstOfRange,lastOfRange)
+func (bc *bitmapContainer) iaddRange(firstOfRange, lastOfRange int) container {
+ bc.cardinality += setBitmapRangeAndCardinalityChange(bc.bitmap, firstOfRange, lastOfRange)
+ return bc
+}
+
+// remove all values in range [firstOfRange,lastOfRange)
+func (bc *bitmapContainer) iremoveRange(firstOfRange, lastOfRange int) container {
+ bc.cardinality += resetBitmapRangeAndCardinalityChange(bc.bitmap, firstOfRange, lastOfRange)
+ if bc.getCardinality() <= arrayDefaultMaxSize {
+ return bc.toArrayContainer()
+ }
+ return bc
+}
+
+// flip all values in range [firstOfRange,endx)
+func (bc *bitmapContainer) inot(firstOfRange, endx int) container {
+ p("bc.inot() called with [%v, %v)", firstOfRange, endx)
+ if endx-firstOfRange == maxCapacity {
+ //p("endx-firstOfRange == maxCapacity")
+ flipBitmapRange(bc.bitmap, firstOfRange, endx)
+ bc.cardinality = maxCapacity - bc.cardinality
+ //p("bc.cardinality is now %v", bc.cardinality)
+ } else if endx-firstOfRange > maxCapacity/2 {
+ //p("endx-firstOfRange > maxCapacity/2")
+ flipBitmapRange(bc.bitmap, firstOfRange, endx)
+ bc.computeCardinality()
+ } else {
+ bc.cardinality += flipBitmapRangeAndCardinalityChange(bc.bitmap, firstOfRange, endx)
+ }
+ if bc.getCardinality() <= arrayDefaultMaxSize {
+ return bc.toArrayContainer()
+ }
+ return bc
+}
+
+// flip all values in range [firstOfRange,endx)
+func (bc *bitmapContainer) not(firstOfRange, endx int) container {
+ answer := bc.clone()
+ return answer.inot(firstOfRange, endx)
+}
+
+func (bc *bitmapContainer) or(a container) container {
+ switch x := a.(type) {
+ case *arrayContainer:
+ return bc.orArray(x)
+ case *bitmapContainer:
+ return bc.orBitmap(x)
+ case *runContainer16:
+ if x.isFull() {
+ return x.clone()
+ }
+ return x.orBitmapContainer(bc)
+ }
+ panic("unsupported container type")
+}
+
+func (bc *bitmapContainer) orCardinality(a container) int {
+ switch x := a.(type) {
+ case *arrayContainer:
+ return bc.orArrayCardinality(x)
+ case *bitmapContainer:
+ return bc.orBitmapCardinality(x)
+ case *runContainer16:
+ return x.orBitmapContainerCardinality(bc)
+ }
+ panic("unsupported container type")
+}
+
+func (bc *bitmapContainer) ior(a container) container {
+ switch x := a.(type) {
+ case *arrayContainer:
+ return bc.iorArray(x)
+ case *bitmapContainer:
+ return bc.iorBitmap(x)
+ case *runContainer16:
+ if x.isFull() {
+ return x.clone()
+ }
+ for i := range x.iv {
+ bc.iaddRange(int(x.iv[i].start), int(x.iv[i].last())+1)
+ }
+ if bc.isFull() {
+ return newRunContainer16Range(0, MaxUint16)
+ }
+ //bc.computeCardinality()
+ return bc
+ }
+ panic(fmt.Errorf("unsupported container type %T", a))
+}
+
+func (bc *bitmapContainer) lazyIOR(a container) container {
+ switch x := a.(type) {
+ case *arrayContainer:
+ return bc.lazyIORArray(x)
+ case *bitmapContainer:
+ return bc.lazyIORBitmap(x)
+ case *runContainer16:
+ if x.isFull() {
+ return x.clone()
+ }
+
+ // Manually inlined setBitmapRange function
+ bitmap := bc.bitmap
+ for _, iv := range x.iv {
+ start := int(iv.start)
+ end := int(iv.last()) + 1
+ if start >= end {
+ continue
+ }
+ firstword := start / 64
+ endword := (end - 1) / 64
+ if firstword == endword {
+ bitmap[firstword] |= (^uint64(0) << uint(start%64)) & (^uint64(0) >> (uint(-end) % 64))
+ continue
+ }
+ bitmap[firstword] |= ^uint64(0) << uint(start%64)
+ for i := firstword + 1; i < endword; i++ {
+ bitmap[i] = ^uint64(0)
+ }
+ bitmap[endword] |= ^uint64(0) >> (uint(-end) % 64)
+ }
+ bc.cardinality = invalidCardinality
+ return bc
+ }
+ panic("unsupported container type")
+}
+
+func (bc *bitmapContainer) lazyOR(a container) container {
+ switch x := a.(type) {
+ case *arrayContainer:
+ return bc.lazyORArray(x)
+ case *bitmapContainer:
+ return bc.lazyORBitmap(x)
+ case *runContainer16:
+ if x.isFull() {
+ return x.clone()
+ }
+ // TODO: implement lazy OR
+ return x.orBitmapContainer(bc)
+
+ }
+ panic("unsupported container type")
+}
+
+func (bc *bitmapContainer) orArray(value2 *arrayContainer) container {
+ answer := bc.clone().(*bitmapContainer)
+ c := value2.getCardinality()
+ for k := 0; k < c; k++ {
+ v := value2.content[k]
+ i := uint(v) >> 6
+ bef := answer.bitmap[i]
+ aft := bef | (uint64(1) << (v % 64))
+ answer.bitmap[i] = aft
+ answer.cardinality += int((bef - aft) >> 63)
+ }
+ return answer
+}
+
+func (bc *bitmapContainer) orArrayCardinality(value2 *arrayContainer) int {
+ answer := 0
+ c := value2.getCardinality()
+ for k := 0; k < c; k++ {
+ // branchless:
+ v := value2.content[k]
+ i := uint(v) >> 6
+ bef := bc.bitmap[i]
+ aft := bef | (uint64(1) << (v % 64))
+ answer += int((bef - aft) >> 63)
+ }
+ return answer
+}
+
+func (bc *bitmapContainer) orBitmap(value2 *bitmapContainer) container {
+ answer := newBitmapContainer()
+ for k := 0; k < len(answer.bitmap); k++ {
+ answer.bitmap[k] = bc.bitmap[k] | value2.bitmap[k]
+ }
+ answer.computeCardinality()
+ if answer.isFull() {
+ return newRunContainer16Range(0, MaxUint16)
+ }
+ return answer
+}
+
+func (bc *bitmapContainer) orBitmapCardinality(value2 *bitmapContainer) int {
+ return int(popcntOrSlice(bc.bitmap, value2.bitmap))
+}
+
+func (bc *bitmapContainer) andBitmapCardinality(value2 *bitmapContainer) int {
+ return int(popcntAndSlice(bc.bitmap, value2.bitmap))
+}
+
+func (bc *bitmapContainer) computeCardinality() {
+ bc.cardinality = int(popcntSlice(bc.bitmap))
+}
+
+func (bc *bitmapContainer) iorArray(ac *arrayContainer) container {
+ for k := range ac.content {
+ vc := ac.content[k]
+ i := uint(vc) >> 6
+ bef := bc.bitmap[i]
+ aft := bef | (uint64(1) << (vc % 64))
+ bc.bitmap[i] = aft
+ bc.cardinality += int((bef - aft) >> 63)
+ }
+ if bc.isFull() {
+ return newRunContainer16Range(0, MaxUint16)
+ }
+ return bc
+}
+
+func (bc *bitmapContainer) iorBitmap(value2 *bitmapContainer) container {
+ answer := bc
+ answer.cardinality = 0
+ for k := 0; k < len(answer.bitmap); k++ {
+ answer.bitmap[k] = bc.bitmap[k] | value2.bitmap[k]
+ }
+ answer.computeCardinality()
+ if bc.isFull() {
+ return newRunContainer16Range(0, MaxUint16)
+ }
+ return answer
+}
+
+func (bc *bitmapContainer) lazyIORArray(value2 *arrayContainer) container {
+ answer := bc
+ c := value2.getCardinality()
+ for k := 0; k < c; k++ {
+ vc := value2.content[k]
+ i := uint(vc) >> 6
+ answer.bitmap[i] = answer.bitmap[i] | (uint64(1) << (vc % 64))
+ }
+ answer.cardinality = invalidCardinality
+ return answer
+}
+
+func (bc *bitmapContainer) lazyORArray(value2 *arrayContainer) container {
+ answer := bc.clone().(*bitmapContainer)
+ return answer.lazyIORArray(value2)
+}
+
+func (bc *bitmapContainer) lazyIORBitmap(value2 *bitmapContainer) container {
+ answer := bc
+ for k := 0; k < len(answer.bitmap); k++ {
+ answer.bitmap[k] = bc.bitmap[k] | value2.bitmap[k]
+ }
+ bc.cardinality = invalidCardinality
+ return answer
+}
+
+func (bc *bitmapContainer) lazyORBitmap(value2 *bitmapContainer) container {
+ answer := bc.clone().(*bitmapContainer)
+ return answer.lazyIORBitmap(value2)
+}
+
+func (bc *bitmapContainer) xor(a container) container {
+ switch x := a.(type) {
+ case *arrayContainer:
+ return bc.xorArray(x)
+ case *bitmapContainer:
+ return bc.xorBitmap(x)
+ case *runContainer16:
+ return x.xorBitmap(bc)
+ }
+ panic("unsupported container type")
+}
+
+func (bc *bitmapContainer) xorArray(value2 *arrayContainer) container {
+ answer := bc.clone().(*bitmapContainer)
+ c := value2.getCardinality()
+ for k := 0; k < c; k++ {
+ vc := value2.content[k]
+ index := uint(vc) >> 6
+ abi := answer.bitmap[index]
+ mask := uint64(1) << (vc % 64)
+ answer.cardinality += 1 - 2*int((abi&mask)>>(vc%64))
+ answer.bitmap[index] = abi ^ mask
+ }
+ if answer.cardinality <= arrayDefaultMaxSize {
+ return answer.toArrayContainer()
+ }
+ return answer
+}
+
+func (bc *bitmapContainer) rank(x uint16) int {
+ // TODO: rewrite in assembly
+ leftover := (uint(x) + 1) & 63
+ if leftover == 0 {
+ return int(popcntSlice(bc.bitmap[:(uint(x)+1)/64]))
+ }
+ return int(popcntSlice(bc.bitmap[:(uint(x)+1)/64]) + popcount(bc.bitmap[(uint(x)+1)/64]<<(64-leftover)))
+}
+
+func (bc *bitmapContainer) selectInt(x uint16) int {
+ remaining := x
+ for k := 0; k < len(bc.bitmap); k++ {
+ w := popcount(bc.bitmap[k])
+ if uint16(w) > remaining {
+ return k*64 + selectBitPosition(bc.bitmap[k], int(remaining))
+ }
+ remaining -= uint16(w)
+ }
+ return -1
+}
+
+func (bc *bitmapContainer) xorBitmap(value2 *bitmapContainer) container {
+ newCardinality := int(popcntXorSlice(bc.bitmap, value2.bitmap))
+
+ if newCardinality > arrayDefaultMaxSize {
+ answer := newBitmapContainer()
+ for k := 0; k < len(answer.bitmap); k++ {
+ answer.bitmap[k] = bc.bitmap[k] ^ value2.bitmap[k]
+ }
+ answer.cardinality = newCardinality
+ if answer.isFull() {
+ return newRunContainer16Range(0, MaxUint16)
+ }
+ return answer
+ }
+ ac := newArrayContainerSize(newCardinality)
+ fillArrayXOR(ac.content, bc.bitmap, value2.bitmap)
+ ac.content = ac.content[:newCardinality]
+ return ac
+}
+
+func (bc *bitmapContainer) and(a container) container {
+ switch x := a.(type) {
+ case *arrayContainer:
+ return bc.andArray(x)
+ case *bitmapContainer:
+ return bc.andBitmap(x)
+ case *runContainer16:
+ if x.isFull() {
+ return bc.clone()
+ }
+ return x.andBitmapContainer(bc)
+ }
+ panic("unsupported container type")
+}
+
+func (bc *bitmapContainer) andCardinality(a container) int {
+ switch x := a.(type) {
+ case *arrayContainer:
+ return bc.andArrayCardinality(x)
+ case *bitmapContainer:
+ return bc.andBitmapCardinality(x)
+ case *runContainer16:
+ return x.andBitmapContainerCardinality(bc)
+ }
+ panic("unsupported container type")
+}
+
+func (bc *bitmapContainer) intersects(a container) bool {
+ switch x := a.(type) {
+ case *arrayContainer:
+ return bc.intersectsArray(x)
+ case *bitmapContainer:
+ return bc.intersectsBitmap(x)
+ case *runContainer16:
+ return x.intersects(bc)
+
+ }
+ panic("unsupported container type")
+}
+
+func (bc *bitmapContainer) iand(a container) container {
+ switch x := a.(type) {
+ case *arrayContainer:
+ return bc.iandArray(x)
+ case *bitmapContainer:
+ return bc.iandBitmap(x)
+ case *runContainer16:
+ if x.isFull() {
+ return bc.clone()
+ }
+ return bc.iandRun16(x)
+ }
+ panic("unsupported container type")
+}
+
+func (bc *bitmapContainer) iandRun16(rc *runContainer16) container {
+ rcb := newBitmapContainerFromRun(rc)
+ return bc.iandBitmap(rcb)
+}
+
+func (bc *bitmapContainer) iandArray(ac *arrayContainer) container {
+ acb := ac.toBitmapContainer()
+ return bc.iandBitmap(acb)
+}
+
+func (bc *bitmapContainer) andArray(value2 *arrayContainer) *arrayContainer {
+ answer := newArrayContainerCapacity(len(value2.content))
+ answer.content = answer.content[:cap(answer.content)]
+ c := value2.getCardinality()
+ pos := 0
+ for k := 0; k < c; k++ {
+ v := value2.content[k]
+ answer.content[pos] = v
+ pos += int(bc.bitValue(v))
+ }
+ answer.content = answer.content[:pos]
+ return answer
+}
+
+func (bc *bitmapContainer) andArrayCardinality(value2 *arrayContainer) int {
+ c := value2.getCardinality()
+ pos := 0
+ for k := 0; k < c; k++ {
+ v := value2.content[k]
+ pos += int(bc.bitValue(v))
+ }
+ return pos
+}
+
+func (bc *bitmapContainer) getCardinalityInRange(start, end uint) int {
+ if start >= end {
+ return 0
+ }
+ firstword := start / 64
+ endword := (end - 1) / 64
+ const allones = ^uint64(0)
+ if firstword == endword {
+ return int(popcount(bc.bitmap[firstword] & ((allones << (start % 64)) & (allones >> ((64 - end) & 63)))))
+ }
+ answer := popcount(bc.bitmap[firstword] & (allones << (start % 64)))
+ answer += popcntSlice(bc.bitmap[firstword+1 : endword])
+ answer += popcount(bc.bitmap[endword] & (allones >> ((64 - end) & 63)))
+ return int(answer)
+}
+
+func (bc *bitmapContainer) andBitmap(value2 *bitmapContainer) container {
+ newcardinality := int(popcntAndSlice(bc.bitmap, value2.bitmap))
+ if newcardinality > arrayDefaultMaxSize {
+ answer := newBitmapContainer()
+ for k := 0; k < len(answer.bitmap); k++ {
+ answer.bitmap[k] = bc.bitmap[k] & value2.bitmap[k]
+ }
+ answer.cardinality = newcardinality
+ return answer
+ }
+ ac := newArrayContainerSize(newcardinality)
+ fillArrayAND(ac.content, bc.bitmap, value2.bitmap)
+ ac.content = ac.content[:newcardinality] //not sure why i need this
+ return ac
+
+}
+
+func (bc *bitmapContainer) intersectsArray(value2 *arrayContainer) bool {
+ c := value2.getCardinality()
+ for k := 0; k < c; k++ {
+ v := value2.content[k]
+ if bc.contains(v) {
+ return true
+ }
+ }
+ return false
+}
+
+func (bc *bitmapContainer) intersectsBitmap(value2 *bitmapContainer) bool {
+ for k := 0; k < len(bc.bitmap); k++ {
+ if (bc.bitmap[k] & value2.bitmap[k]) != 0 {
+ return true
+ }
+ }
+ return false
+
+}
+
+func (bc *bitmapContainer) iandBitmap(value2 *bitmapContainer) container {
+ newcardinality := int(popcntAndSlice(bc.bitmap, value2.bitmap))
+ for k := 0; k < len(bc.bitmap); k++ {
+ bc.bitmap[k] = bc.bitmap[k] & value2.bitmap[k]
+ }
+ bc.cardinality = newcardinality
+
+ if newcardinality <= arrayDefaultMaxSize {
+ return newArrayContainerFromBitmap(bc)
+ }
+ return bc
+}
+
+func (bc *bitmapContainer) andNot(a container) container {
+ switch x := a.(type) {
+ case *arrayContainer:
+ return bc.andNotArray(x)
+ case *bitmapContainer:
+ return bc.andNotBitmap(x)
+ case *runContainer16:
+ return bc.andNotRun16(x)
+ }
+ panic("unsupported container type")
+}
+
+func (bc *bitmapContainer) andNotRun16(rc *runContainer16) container {
+ rcb := rc.toBitmapContainer()
+ return bc.andNotBitmap(rcb)
+}
+
+func (bc *bitmapContainer) iandNot(a container) container {
+ //p("bitmapContainer.iandNot() starting")
+
+ switch x := a.(type) {
+ case *arrayContainer:
+ return bc.iandNotArray(x)
+ case *bitmapContainer:
+ return bc.iandNotBitmapSurely(x)
+ case *runContainer16:
+ return bc.iandNotRun16(x)
+ }
+ panic("unsupported container type")
+}
+
+func (bc *bitmapContainer) iandNotArray(ac *arrayContainer) container {
+ acb := ac.toBitmapContainer()
+ return bc.iandNotBitmapSurely(acb)
+}
+
+func (bc *bitmapContainer) iandNotRun16(rc *runContainer16) container {
+ rcb := rc.toBitmapContainer()
+ return bc.iandNotBitmapSurely(rcb)
+}
+
+func (bc *bitmapContainer) andNotArray(value2 *arrayContainer) container {
+ answer := bc.clone().(*bitmapContainer)
+ c := value2.getCardinality()
+ for k := 0; k < c; k++ {
+ vc := value2.content[k]
+ i := uint(vc) >> 6
+ oldv := answer.bitmap[i]
+ newv := oldv &^ (uint64(1) << (vc % 64))
+ answer.bitmap[i] = newv
+ answer.cardinality -= int((oldv ^ newv) >> (vc % 64))
+ }
+ if answer.cardinality <= arrayDefaultMaxSize {
+ return answer.toArrayContainer()
+ }
+ return answer
+}
+
+func (bc *bitmapContainer) andNotBitmap(value2 *bitmapContainer) container {
+ newCardinality := int(popcntMaskSlice(bc.bitmap, value2.bitmap))
+ if newCardinality > arrayDefaultMaxSize {
+ answer := newBitmapContainer()
+ for k := 0; k < len(answer.bitmap); k++ {
+ answer.bitmap[k] = bc.bitmap[k] &^ value2.bitmap[k]
+ }
+ answer.cardinality = newCardinality
+ return answer
+ }
+ ac := newArrayContainerSize(newCardinality)
+ fillArrayANDNOT(ac.content, bc.bitmap, value2.bitmap)
+ return ac
+}
+
+func (bc *bitmapContainer) iandNotBitmapSurely(value2 *bitmapContainer) *bitmapContainer {
+ newCardinality := int(popcntMaskSlice(bc.bitmap, value2.bitmap))
+ for k := 0; k < len(bc.bitmap); k++ {
+ bc.bitmap[k] = bc.bitmap[k] &^ value2.bitmap[k]
+ }
+ bc.cardinality = newCardinality
+ return bc
+}
+
+func (bc *bitmapContainer) contains(i uint16) bool { //testbit
+ x := uint(i)
+ w := bc.bitmap[x>>6]
+ mask := uint64(1) << (x & 63)
+ return (w & mask) != 0
+}
+
+func (bc *bitmapContainer) bitValue(i uint16) uint64 {
+ x := uint(i)
+ w := bc.bitmap[x>>6]
+ return (w >> (x & 63)) & 1
+}
+
+func (bc *bitmapContainer) loadData(arrayContainer *arrayContainer) {
+ bc.cardinality = arrayContainer.getCardinality()
+ c := arrayContainer.getCardinality()
+ for k := 0; k < c; k++ {
+ x := arrayContainer.content[k]
+ i := int(x) / 64
+ bc.bitmap[i] |= (uint64(1) << uint(x%64))
+ }
+}
+
+func (bc *bitmapContainer) toArrayContainer() *arrayContainer {
+ ac := &arrayContainer{}
+ ac.loadData(bc)
+ return ac
+}
+
+func (bc *bitmapContainer) fillArray(container []uint16) {
+ //TODO: rewrite in assembly
+ pos := 0
+ base := 0
+ for k := 0; k < len(bc.bitmap); k++ {
+ bitset := bc.bitmap[k]
+ for bitset != 0 {
+ t := bitset & -bitset
+ container[pos] = uint16((base + int(popcount(t-1))))
+ pos = pos + 1
+ bitset ^= t
+ }
+ base += 64
+ }
+}
+
+func (bc *bitmapContainer) NextSetBit(i int) int {
+ x := i / 64
+ if x >= len(bc.bitmap) {
+ return -1
+ }
+ w := bc.bitmap[x]
+ w = w >> uint(i%64)
+ if w != 0 {
+ return i + countTrailingZeros(w)
+ }
+ x++
+ for ; x < len(bc.bitmap); x++ {
+ if bc.bitmap[x] != 0 {
+ return (x * 64) + countTrailingZeros(bc.bitmap[x])
+ }
+ }
+ return -1
+}
+
+// reference the java implementation
+// https://github.com/RoaringBitmap/RoaringBitmap/blob/master/src/main/java/org/roaringbitmap/BitmapContainer.java#L875-L892
+//
+func (bc *bitmapContainer) numberOfRuns() int {
+ if bc.cardinality == 0 {
+ return 0
+ }
+
+ var numRuns uint64
+ nextWord := bc.bitmap[0]
+
+ for i := 0; i < len(bc.bitmap)-1; i++ {
+ word := nextWord
+ nextWord = bc.bitmap[i+1]
+ numRuns += popcount((^word)&(word<<1)) + ((word >> 63) &^ nextWord)
+ }
+
+ word := nextWord
+ numRuns += popcount((^word) & (word << 1))
+ if (word & 0x8000000000000000) != 0 {
+ numRuns++
+ }
+
+ return int(numRuns)
+}
+
+// convert to run or array *if needed*
+func (bc *bitmapContainer) toEfficientContainer() container {
+
+ numRuns := bc.numberOfRuns()
+
+ sizeAsRunContainer := runContainer16SerializedSizeInBytes(numRuns)
+ sizeAsBitmapContainer := bitmapContainerSizeInBytes()
+ card := bc.getCardinality()
+ sizeAsArrayContainer := arrayContainerSizeInBytes(card)
+
+ if sizeAsRunContainer <= minOfInt(sizeAsBitmapContainer, sizeAsArrayContainer) {
+ return newRunContainer16FromBitmapContainer(bc)
+ }
+ if card <= arrayDefaultMaxSize {
+ return bc.toArrayContainer()
+ }
+ return bc
+}
+
+func newBitmapContainerFromRun(rc *runContainer16) *bitmapContainer {
+
+ if len(rc.iv) == 1 {
+ return newBitmapContainerwithRange(int(rc.iv[0].start), int(rc.iv[0].last()))
+ }
+
+ bc := newBitmapContainer()
+ for i := range rc.iv {
+ setBitmapRange(bc.bitmap, int(rc.iv[i].start), int(rc.iv[i].last())+1)
+ bc.cardinality += int(rc.iv[i].last()) + 1 - int(rc.iv[i].start)
+ }
+ //bc.computeCardinality()
+ return bc
+}
+
+func (bc *bitmapContainer) containerType() contype {
+ return bitmapContype
+}
diff --git a/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer_gen.go b/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer_gen.go
new file mode 100644
index 0000000000..f6c053e650
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer_gen.go
@@ -0,0 +1,415 @@
+package roaring
+
+// NOTE: THIS FILE WAS PRODUCED BY THE
+// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp)
+// DO NOT EDIT
+
+import "github.com/tinylib/msgp/msgp"
+
+// DecodeMsg implements msgp.Decodable
+func (z *bitmapContainer) DecodeMsg(dc *msgp.Reader) (err error) {
+ var field []byte
+ _ = field
+ var zbzg uint32
+ zbzg, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zbzg > 0 {
+ zbzg--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "cardinality":
+ z.cardinality, err = dc.ReadInt()
+ if err != nil {
+ return
+ }
+ case "bitmap":
+ var zbai uint32
+ zbai, err = dc.ReadArrayHeader()
+ if err != nil {
+ return
+ }
+ if cap(z.bitmap) >= int(zbai) {
+ z.bitmap = (z.bitmap)[:zbai]
+ } else {
+ z.bitmap = make([]uint64, zbai)
+ }
+ for zxvk := range z.bitmap {
+ z.bitmap[zxvk], err = dc.ReadUint64()
+ if err != nil {
+ return
+ }
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z *bitmapContainer) EncodeMsg(en *msgp.Writer) (err error) {
+ // map header, size 2
+ // write "cardinality"
+ err = en.Append(0x82, 0xab, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79)
+ if err != nil {
+ return err
+ }
+ err = en.WriteInt(z.cardinality)
+ if err != nil {
+ return
+ }
+ // write "bitmap"
+ err = en.Append(0xa6, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x70)
+ if err != nil {
+ return err
+ }
+ err = en.WriteArrayHeader(uint32(len(z.bitmap)))
+ if err != nil {
+ return
+ }
+ for zxvk := range z.bitmap {
+ err = en.WriteUint64(z.bitmap[zxvk])
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *bitmapContainer) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 2
+ // string "cardinality"
+ o = append(o, 0x82, 0xab, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79)
+ o = msgp.AppendInt(o, z.cardinality)
+ // string "bitmap"
+ o = append(o, 0xa6, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x70)
+ o = msgp.AppendArrayHeader(o, uint32(len(z.bitmap)))
+ for zxvk := range z.bitmap {
+ o = msgp.AppendUint64(o, z.bitmap[zxvk])
+ }
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *bitmapContainer) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zcmr uint32
+ zcmr, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zcmr > 0 {
+ zcmr--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "cardinality":
+ z.cardinality, bts, err = msgp.ReadIntBytes(bts)
+ if err != nil {
+ return
+ }
+ case "bitmap":
+ var zajw uint32
+ zajw, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ if cap(z.bitmap) >= int(zajw) {
+ z.bitmap = (z.bitmap)[:zajw]
+ } else {
+ z.bitmap = make([]uint64, zajw)
+ }
+ for zxvk := range z.bitmap {
+ z.bitmap[zxvk], bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ return
+ }
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *bitmapContainer) Msgsize() (s int) {
+ s = 1 + 12 + msgp.IntSize + 7 + msgp.ArrayHeaderSize + (len(z.bitmap) * (msgp.Uint64Size))
+ return
+}
+
+// DecodeMsg implements msgp.Decodable
+func (z *bitmapContainerShortIterator) DecodeMsg(dc *msgp.Reader) (err error) {
+ var field []byte
+ _ = field
+ var zhct uint32
+ zhct, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zhct > 0 {
+ zhct--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "ptr":
+ if dc.IsNil() {
+ err = dc.ReadNil()
+ if err != nil {
+ return
+ }
+ z.ptr = nil
+ } else {
+ if z.ptr == nil {
+ z.ptr = new(bitmapContainer)
+ }
+ var zcua uint32
+ zcua, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zcua > 0 {
+ zcua--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "cardinality":
+ z.ptr.cardinality, err = dc.ReadInt()
+ if err != nil {
+ return
+ }
+ case "bitmap":
+ var zxhx uint32
+ zxhx, err = dc.ReadArrayHeader()
+ if err != nil {
+ return
+ }
+ if cap(z.ptr.bitmap) >= int(zxhx) {
+ z.ptr.bitmap = (z.ptr.bitmap)[:zxhx]
+ } else {
+ z.ptr.bitmap = make([]uint64, zxhx)
+ }
+ for zwht := range z.ptr.bitmap {
+ z.ptr.bitmap[zwht], err = dc.ReadUint64()
+ if err != nil {
+ return
+ }
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ }
+ case "i":
+ z.i, err = dc.ReadInt()
+ if err != nil {
+ return
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z *bitmapContainerShortIterator) EncodeMsg(en *msgp.Writer) (err error) {
+ // map header, size 2
+ // write "ptr"
+ err = en.Append(0x82, 0xa3, 0x70, 0x74, 0x72)
+ if err != nil {
+ return err
+ }
+ if z.ptr == nil {
+ err = en.WriteNil()
+ if err != nil {
+ return
+ }
+ } else {
+ // map header, size 2
+ // write "cardinality"
+ err = en.Append(0x82, 0xab, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79)
+ if err != nil {
+ return err
+ }
+ err = en.WriteInt(z.ptr.cardinality)
+ if err != nil {
+ return
+ }
+ // write "bitmap"
+ err = en.Append(0xa6, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x70)
+ if err != nil {
+ return err
+ }
+ err = en.WriteArrayHeader(uint32(len(z.ptr.bitmap)))
+ if err != nil {
+ return
+ }
+ for zwht := range z.ptr.bitmap {
+ err = en.WriteUint64(z.ptr.bitmap[zwht])
+ if err != nil {
+ return
+ }
+ }
+ }
+ // write "i"
+ err = en.Append(0xa1, 0x69)
+ if err != nil {
+ return err
+ }
+ err = en.WriteInt(z.i)
+ if err != nil {
+ return
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *bitmapContainerShortIterator) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 2
+ // string "ptr"
+ o = append(o, 0x82, 0xa3, 0x70, 0x74, 0x72)
+ if z.ptr == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ // map header, size 2
+ // string "cardinality"
+ o = append(o, 0x82, 0xab, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79)
+ o = msgp.AppendInt(o, z.ptr.cardinality)
+ // string "bitmap"
+ o = append(o, 0xa6, 0x62, 0x69, 0x74, 0x6d, 0x61, 0x70)
+ o = msgp.AppendArrayHeader(o, uint32(len(z.ptr.bitmap)))
+ for zwht := range z.ptr.bitmap {
+ o = msgp.AppendUint64(o, z.ptr.bitmap[zwht])
+ }
+ }
+ // string "i"
+ o = append(o, 0xa1, 0x69)
+ o = msgp.AppendInt(o, z.i)
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *bitmapContainerShortIterator) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zlqf uint32
+ zlqf, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zlqf > 0 {
+ zlqf--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "ptr":
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ z.ptr = nil
+ } else {
+ if z.ptr == nil {
+ z.ptr = new(bitmapContainer)
+ }
+ var zdaf uint32
+ zdaf, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zdaf > 0 {
+ zdaf--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "cardinality":
+ z.ptr.cardinality, bts, err = msgp.ReadIntBytes(bts)
+ if err != nil {
+ return
+ }
+ case "bitmap":
+ var zpks uint32
+ zpks, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ if cap(z.ptr.bitmap) >= int(zpks) {
+ z.ptr.bitmap = (z.ptr.bitmap)[:zpks]
+ } else {
+ z.ptr.bitmap = make([]uint64, zpks)
+ }
+ for zwht := range z.ptr.bitmap {
+ z.ptr.bitmap[zwht], bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ return
+ }
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ }
+ case "i":
+ z.i, bts, err = msgp.ReadIntBytes(bts)
+ if err != nil {
+ return
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *bitmapContainerShortIterator) Msgsize() (s int) {
+ s = 1 + 4
+ if z.ptr == nil {
+ s += msgp.NilSize
+ } else {
+ s += 1 + 12 + msgp.IntSize + 7 + msgp.ArrayHeaderSize + (len(z.ptr.bitmap) * (msgp.Uint64Size))
+ }
+ s += 2 + msgp.IntSize
+ return
+}
diff --git a/vendor/github.com/RoaringBitmap/roaring/ctz.go b/vendor/github.com/RoaringBitmap/roaring/ctz.go
new file mode 100644
index 0000000000..e399dddebd
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/ctz.go
@@ -0,0 +1,11 @@
+// +build go1.9
+// "go1.9", from Go version 1.9 onward
+// See https://golang.org/pkg/go/build/#hdr-Build_Constraints
+
+package roaring
+
+import "math/bits"
+
+func countTrailingZeros(x uint64) int {
+ return bits.TrailingZeros64(x)
+}
diff --git a/vendor/github.com/RoaringBitmap/roaring/ctz_compat.go b/vendor/github.com/RoaringBitmap/roaring/ctz_compat.go
new file mode 100644
index 0000000000..80220e6bee
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/ctz_compat.go
@@ -0,0 +1,71 @@
+// +build !go1.9
+
+package roaring
+
+// Reuse of portions of go/src/math/big standard lib code
+// under this license:
+/*
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+const deBruijn32 = 0x077CB531
+
+var deBruijn32Lookup = []byte{
+ 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
+ 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9,
+}
+
+const deBruijn64 = 0x03f79d71b4ca8b09
+
+var deBruijn64Lookup = []byte{
+ 0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4,
+ 62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5,
+ 63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11,
+ 54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6,
+}
+
+// trailingZeroBits returns the number of consecutive least significant zero
+// bits of x.
+func countTrailingZeros(x uint64) int {
+ // x & -x leaves only the right-most bit set in the word. Let k be the
+ // index of that bit. Since only a single bit is set, the value is two
+ // to the power of k. Multiplying by a power of two is equivalent to
+ // left shifting, in this case by k bits. The de Bruijn constant is
+ // such that all six bit, consecutive substrings are distinct.
+ // Therefore, if we have a left shifted version of this constant we can
+ // find by how many bits it was shifted by looking at which six bit
+ // substring ended up at the top of the word.
+ // (Knuth, volume 4, section 7.3.1)
+ if x == 0 {
+ // We have to special case 0; the fomula
+ // below doesn't work for 0.
+ return 64
+ }
+ return int(deBruijn64Lookup[((x&-x)*(deBruijn64))>>58])
+}
diff --git a/vendor/github.com/RoaringBitmap/roaring/fastaggregation.go b/vendor/github.com/RoaringBitmap/roaring/fastaggregation.go
new file mode 100644
index 0000000000..762e500ed8
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/fastaggregation.go
@@ -0,0 +1,215 @@
+package roaring
+
+import (
+ "container/heap"
+)
+
+// Or function that requires repairAfterLazy
+func lazyOR(x1, x2 *Bitmap) *Bitmap {
+ answer := NewBitmap()
+ pos1 := 0
+ pos2 := 0
+ length1 := x1.highlowcontainer.size()
+ length2 := x2.highlowcontainer.size()
+main:
+ for (pos1 < length1) && (pos2 < length2) {
+ s1 := x1.highlowcontainer.getKeyAtIndex(pos1)
+ s2 := x2.highlowcontainer.getKeyAtIndex(pos2)
+
+ for {
+ if s1 < s2 {
+ answer.highlowcontainer.appendCopy(x1.highlowcontainer, pos1)
+ pos1++
+ if pos1 == length1 {
+ break main
+ }
+ s1 = x1.highlowcontainer.getKeyAtIndex(pos1)
+ } else if s1 > s2 {
+ answer.highlowcontainer.appendCopy(x2.highlowcontainer, pos2)
+ pos2++
+ if pos2 == length2 {
+ break main
+ }
+ s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
+ } else {
+ c1 := x1.highlowcontainer.getContainerAtIndex(pos1)
+ switch t := c1.(type) {
+ case *arrayContainer:
+ c1 = t.toBitmapContainer()
+ case *runContainer16:
+ if !t.isFull() {
+ c1 = t.toBitmapContainer()
+ }
+ }
+
+ answer.highlowcontainer.appendContainer(s1, c1.lazyOR(x2.highlowcontainer.getContainerAtIndex(pos2)), false)
+ pos1++
+ pos2++
+ if (pos1 == length1) || (pos2 == length2) {
+ break main
+ }
+ s1 = x1.highlowcontainer.getKeyAtIndex(pos1)
+ s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
+ }
+ }
+ }
+ if pos1 == length1 {
+ answer.highlowcontainer.appendCopyMany(x2.highlowcontainer, pos2, length2)
+ } else if pos2 == length2 {
+ answer.highlowcontainer.appendCopyMany(x1.highlowcontainer, pos1, length1)
+ }
+ return answer
+}
+
+// In-place Or function that requires repairAfterLazy
+func (x1 *Bitmap) lazyOR(x2 *Bitmap) *Bitmap {
+ pos1 := 0
+ pos2 := 0
+ length1 := x1.highlowcontainer.size()
+ length2 := x2.highlowcontainer.size()
+main:
+ for (pos1 < length1) && (pos2 < length2) {
+ s1 := x1.highlowcontainer.getKeyAtIndex(pos1)
+ s2 := x2.highlowcontainer.getKeyAtIndex(pos2)
+
+ for {
+ if s1 < s2 {
+ pos1++
+ if pos1 == length1 {
+ break main
+ }
+ s1 = x1.highlowcontainer.getKeyAtIndex(pos1)
+ } else if s1 > s2 {
+ x1.highlowcontainer.insertNewKeyValueAt(pos1, s2, x2.highlowcontainer.getContainerAtIndex(pos2).clone())
+ pos2++
+ pos1++
+ length1++
+ if pos2 == length2 {
+ break main
+ }
+ s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
+ } else {
+ c1 := x1.highlowcontainer.getContainerAtIndex(pos1)
+ switch t := c1.(type) {
+ case *arrayContainer:
+ c1 = t.toBitmapContainer()
+ case *runContainer16:
+ if !t.isFull() {
+ c1 = t.toBitmapContainer()
+ }
+ case *bitmapContainer:
+ c1 = x1.highlowcontainer.getWritableContainerAtIndex(pos1)
+ }
+
+ x1.highlowcontainer.containers[pos1] = c1.lazyIOR(x2.highlowcontainer.getContainerAtIndex(pos2))
+ x1.highlowcontainer.needCopyOnWrite[pos1] = false
+ pos1++
+ pos2++
+ if (pos1 == length1) || (pos2 == length2) {
+ break main
+ }
+ s1 = x1.highlowcontainer.getKeyAtIndex(pos1)
+ s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
+ }
+ }
+ }
+ if pos1 == length1 {
+ x1.highlowcontainer.appendCopyMany(x2.highlowcontainer, pos2, length2)
+ }
+ return x1
+}
+
+// to be called after lazy aggregates
+func (x1 *Bitmap) repairAfterLazy() {
+ for pos := 0; pos < x1.highlowcontainer.size(); pos++ {
+ c := x1.highlowcontainer.getContainerAtIndex(pos)
+ switch c.(type) {
+ case *bitmapContainer:
+ if c.(*bitmapContainer).cardinality == invalidCardinality {
+ c = x1.highlowcontainer.getWritableContainerAtIndex(pos)
+ c.(*bitmapContainer).computeCardinality()
+ if c.(*bitmapContainer).getCardinality() <= arrayDefaultMaxSize {
+ x1.highlowcontainer.setContainerAtIndex(pos, c.(*bitmapContainer).toArrayContainer())
+ } else if c.(*bitmapContainer).isFull() {
+ x1.highlowcontainer.setContainerAtIndex(pos, newRunContainer16Range(0, MaxUint16))
+ }
+ }
+ }
+ }
+}
+
+// FastAnd computes the intersection between many bitmaps quickly
+// Compared to the And function, it can take many bitmaps as input, thus saving the trouble
+// of manually calling "And" many times.
+func FastAnd(bitmaps ...*Bitmap) *Bitmap {
+ if len(bitmaps) == 0 {
+ return NewBitmap()
+ } else if len(bitmaps) == 1 {
+ return bitmaps[0].Clone()
+ }
+ answer := And(bitmaps[0], bitmaps[1])
+ for _, bm := range bitmaps[2:] {
+ answer.And(bm)
+ }
+ return answer
+}
+
+// FastOr computes the union between many bitmaps quickly, as opposed to having to call Or repeatedly.
+// It might also be faster than calling Or repeatedly.
+func FastOr(bitmaps ...*Bitmap) *Bitmap {
+ if len(bitmaps) == 0 {
+ return NewBitmap()
+ } else if len(bitmaps) == 1 {
+ return bitmaps[0].Clone()
+ }
+ answer := lazyOR(bitmaps[0], bitmaps[1])
+ for _, bm := range bitmaps[2:] {
+ answer = answer.lazyOR(bm)
+ }
+ // here is where repairAfterLazy is called.
+ answer.repairAfterLazy()
+ return answer
+}
+
+// HeapOr computes the union between many bitmaps quickly using a heap.
+// It might be faster than calling Or repeatedly.
+func HeapOr(bitmaps ...*Bitmap) *Bitmap {
+ if len(bitmaps) == 0 {
+ return NewBitmap()
+ }
+ // TODO: for better speed, we could do the operation lazily, see Java implementation
+ pq := make(priorityQueue, len(bitmaps))
+ for i, bm := range bitmaps {
+ pq[i] = &item{bm, i}
+ }
+ heap.Init(&pq)
+
+ for pq.Len() > 1 {
+ x1 := heap.Pop(&pq).(*item)
+ x2 := heap.Pop(&pq).(*item)
+ heap.Push(&pq, &item{Or(x1.value, x2.value), 0})
+ }
+ return heap.Pop(&pq).(*item).value
+}
+
+// HeapXor computes the symmetric difference between many bitmaps quickly (as opposed to calling Xor repeated).
+// Internally, this function uses a heap.
+// It might be faster than calling Xor repeatedly.
+func HeapXor(bitmaps ...*Bitmap) *Bitmap {
+ if len(bitmaps) == 0 {
+ return NewBitmap()
+ }
+
+ pq := make(priorityQueue, len(bitmaps))
+ for i, bm := range bitmaps {
+ pq[i] = &item{bm, i}
+ }
+ heap.Init(&pq)
+
+ for pq.Len() > 1 {
+ x1 := heap.Pop(&pq).(*item)
+ x2 := heap.Pop(&pq).(*item)
+ heap.Push(&pq, &item{Xor(x1.value, x2.value), 0})
+ }
+ return heap.Pop(&pq).(*item).value
+}
diff --git a/vendor/github.com/RoaringBitmap/roaring/manyiterator.go b/vendor/github.com/RoaringBitmap/roaring/manyiterator.go
new file mode 100644
index 0000000000..b4f630a7b4
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/manyiterator.go
@@ -0,0 +1,23 @@
+package roaring
+
+type manyIterable interface {
+ nextMany(hs uint32, buf []uint32) int
+}
+
+type manyIterator struct {
+ slice []uint16
+ loc int
+}
+
+func (si *manyIterator) nextMany(hs uint32, buf []uint32) int {
+ n := 0
+ l := si.loc
+ s := si.slice
+ for n < len(buf) && l < len(s) {
+ buf[n] = uint32(s[l]) | hs
+ l++
+ n++
+ }
+ si.loc = l
+ return n
+}
diff --git a/vendor/github.com/RoaringBitmap/roaring/parallel.go b/vendor/github.com/RoaringBitmap/roaring/parallel.go
new file mode 100644
index 0000000000..09f94fe83c
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/parallel.go
@@ -0,0 +1,613 @@
+package roaring
+
+import (
+ "container/heap"
+ "fmt"
+ "runtime"
+ "sync"
+)
+
+var defaultWorkerCount = runtime.NumCPU()
+
+type bitmapContainerKey struct {
+ key uint16
+ idx int
+ bitmap *Bitmap
+}
+
+type multipleContainers struct {
+ key uint16
+ containers []container
+ idx int
+}
+
+type keyedContainer struct {
+ key uint16
+ container container
+ idx int
+}
+
+type bitmapContainerHeap []bitmapContainerKey
+
+func (h bitmapContainerHeap) Len() int { return len(h) }
+func (h bitmapContainerHeap) Less(i, j int) bool { return h[i].key < h[j].key }
+func (h bitmapContainerHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
+
+func (h *bitmapContainerHeap) Push(x interface{}) {
+ // Push and Pop use pointer receivers because they modify the slice's length,
+ // not just its contents.
+ *h = append(*h, x.(bitmapContainerKey))
+}
+
+func (h *bitmapContainerHeap) Pop() interface{} {
+ old := *h
+ n := len(old)
+ x := old[n-1]
+ *h = old[0 : n-1]
+ return x
+}
+
+func (h bitmapContainerHeap) Peek() bitmapContainerKey {
+ return h[0]
+}
+
+func (h *bitmapContainerHeap) popIncrementing() (key uint16, container container) {
+ k := h.Peek()
+ key = k.key
+ container = k.bitmap.highlowcontainer.containers[k.idx]
+
+ newIdx := k.idx + 1
+ if newIdx < k.bitmap.highlowcontainer.size() {
+ k = bitmapContainerKey{
+ k.bitmap.highlowcontainer.keys[newIdx],
+ newIdx,
+ k.bitmap,
+ }
+ (*h)[0] = k
+ heap.Fix(h, 0)
+ } else {
+ heap.Pop(h)
+ }
+
+ return
+}
+
+func (h *bitmapContainerHeap) Next(containers []container) multipleContainers {
+ if h.Len() == 0 {
+ return multipleContainers{}
+ }
+
+ key, container := h.popIncrementing()
+ containers = append(containers, container)
+
+ for h.Len() > 0 && key == h.Peek().key {
+ _, container = h.popIncrementing()
+ containers = append(containers, container)
+ }
+
+ return multipleContainers{
+ key,
+ containers,
+ -1,
+ }
+}
+
+func newBitmapContainerHeap(bitmaps ...*Bitmap) bitmapContainerHeap {
+ // Initialize heap
+ var h bitmapContainerHeap = make([]bitmapContainerKey, 0, len(bitmaps))
+ for _, bitmap := range bitmaps {
+ if !bitmap.IsEmpty() {
+ key := bitmapContainerKey{
+ bitmap.highlowcontainer.keys[0],
+ 0,
+ bitmap,
+ }
+ h = append(h, key)
+ }
+ }
+
+ heap.Init(&h)
+
+ return h
+}
+
+func repairAfterLazy(c container) container {
+ switch t := c.(type) {
+ case *bitmapContainer:
+ if t.cardinality == invalidCardinality {
+ t.computeCardinality()
+ }
+
+ if t.getCardinality() <= arrayDefaultMaxSize {
+ return t.toArrayContainer()
+ } else if c.(*bitmapContainer).isFull() {
+ return newRunContainer16Range(0, MaxUint16)
+ }
+ }
+
+ return c
+}
+
+func toBitmapContainer(c container) container {
+ switch t := c.(type) {
+ case *arrayContainer:
+ return t.toBitmapContainer()
+ case *runContainer16:
+ if !t.isFull() {
+ return t.toBitmapContainer()
+ }
+ }
+ return c
+}
+
+func appenderRoutine(bitmapChan chan<- *Bitmap, resultChan <-chan keyedContainer, expectedKeysChan <-chan int) {
+ expectedKeys := -1
+ appendedKeys := 0
+ keys := make([]uint16, 0)
+ containers := make([]container, 0)
+ for appendedKeys != expectedKeys {
+ select {
+ case item := <-resultChan:
+ if len(keys) <= item.idx {
+ keys = append(keys, make([]uint16, item.idx-len(keys)+1)...)
+ containers = append(containers, make([]container, item.idx-len(containers)+1)...)
+ }
+ keys[item.idx] = item.key
+ containers[item.idx] = item.container
+
+ appendedKeys++
+ case msg := <-expectedKeysChan:
+ expectedKeys = msg
+ }
+ }
+ answer := &Bitmap{
+ roaringArray{
+ make([]uint16, 0, expectedKeys),
+ make([]container, 0, expectedKeys),
+ make([]bool, 0, expectedKeys),
+ false,
+ nil,
+ },
+ }
+ for i := range keys {
+ if containers[i] != nil { // in case a resulting container was empty, see ParAnd function
+ answer.highlowcontainer.appendContainer(keys[i], containers[i], false)
+ }
+ }
+
+ bitmapChan <- answer
+}
+
+// ParHeapOr computes the union (OR) of all provided bitmaps in parallel,
+// where the parameter "parallelism" determines how many workers are to be used
+// (if it is set to 0, a default number of workers is chosen)
+// ParHeapOr uses a heap to compute the union. For rare cases it might be faster than ParOr
+func ParHeapOr(parallelism int, bitmaps ...*Bitmap) *Bitmap {
+
+ bitmapCount := len(bitmaps)
+ if bitmapCount == 0 {
+ return NewBitmap()
+ } else if bitmapCount == 1 {
+ return bitmaps[0].Clone()
+ }
+
+ if parallelism == 0 {
+ parallelism = defaultWorkerCount
+ }
+
+ h := newBitmapContainerHeap(bitmaps...)
+
+ bitmapChan := make(chan *Bitmap)
+ inputChan := make(chan multipleContainers, 128)
+ resultChan := make(chan keyedContainer, 32)
+ expectedKeysChan := make(chan int)
+
+ pool := sync.Pool{
+ New: func() interface{} {
+ return make([]container, 0, len(bitmaps))
+ },
+ }
+
+ orFunc := func() {
+ // Assumes only structs with >=2 containers are passed
+ for input := range inputChan {
+ c := toBitmapContainer(input.containers[0]).lazyOR(input.containers[1])
+ for _, next := range input.containers[2:] {
+ c = c.lazyIOR(next)
+ }
+ c = repairAfterLazy(c)
+ kx := keyedContainer{
+ input.key,
+ c,
+ input.idx,
+ }
+ resultChan <- kx
+ pool.Put(input.containers[:0])
+ }
+ }
+
+ go appenderRoutine(bitmapChan, resultChan, expectedKeysChan)
+
+ for i := 0; i < parallelism; i++ {
+ go orFunc()
+ }
+
+ idx := 0
+ for h.Len() > 0 {
+ ck := h.Next(pool.Get().([]container))
+ if len(ck.containers) == 1 {
+ resultChan <- keyedContainer{
+ ck.key,
+ ck.containers[0],
+ idx,
+ }
+ pool.Put(ck.containers[:0])
+ } else {
+ ck.idx = idx
+ inputChan <- ck
+ }
+ idx++
+ }
+ expectedKeysChan <- idx
+
+ bitmap := <-bitmapChan
+
+ close(inputChan)
+ close(resultChan)
+ close(expectedKeysChan)
+
+ return bitmap
+}
+
+// ParAnd computes the intersection (AND) of all provided bitmaps in parallel,
+// where the parameter "parallelism" determines how many workers are to be used
+// (if it is set to 0, a default number of workers is chosen)
+func ParAnd(parallelism int, bitmaps ...*Bitmap) *Bitmap {
+ bitmapCount := len(bitmaps)
+ if bitmapCount == 0 {
+ return NewBitmap()
+ } else if bitmapCount == 1 {
+ return bitmaps[0].Clone()
+ }
+
+ if parallelism == 0 {
+ parallelism = defaultWorkerCount
+ }
+
+ h := newBitmapContainerHeap(bitmaps...)
+
+ bitmapChan := make(chan *Bitmap)
+ inputChan := make(chan multipleContainers, 128)
+ resultChan := make(chan keyedContainer, 32)
+ expectedKeysChan := make(chan int)
+
+ andFunc := func() {
+ // Assumes only structs with >=2 containers are passed
+ for input := range inputChan {
+ c := input.containers[0].and(input.containers[1])
+ for _, next := range input.containers[2:] {
+ if c.getCardinality() == 0 {
+ break
+ }
+ c = c.iand(next)
+ }
+
+ // Send a nil explicitly if the result of the intersection is an empty container
+ if c.getCardinality() == 0 {
+ c = nil
+ }
+
+ kx := keyedContainer{
+ input.key,
+ c,
+ input.idx,
+ }
+ resultChan <- kx
+ }
+ }
+
+ go appenderRoutine(bitmapChan, resultChan, expectedKeysChan)
+
+ for i := 0; i < parallelism; i++ {
+ go andFunc()
+ }
+
+ idx := 0
+ for h.Len() > 0 {
+ ck := h.Next(make([]container, 0, 4))
+ if len(ck.containers) == bitmapCount {
+ ck.idx = idx
+ inputChan <- ck
+ idx++
+ }
+ }
+ expectedKeysChan <- idx
+
+ bitmap := <-bitmapChan
+
+ close(inputChan)
+ close(resultChan)
+ close(expectedKeysChan)
+
+ return bitmap
+}
+
+// ParOr computes the union (OR) of all provided bitmaps in parallel,
+// where the parameter "parallelism" determines how many workers are to be used
+// (if it is set to 0, a default number of workers is chosen)
+func ParOr(parallelism int, bitmaps ...*Bitmap) *Bitmap {
+ var lKey uint16 = MaxUint16
+ var hKey uint16 = 0
+
+ bitmapsFiltered := bitmaps[:0]
+ for _, b := range bitmaps {
+ if !b.IsEmpty() {
+ bitmapsFiltered = append(bitmapsFiltered, b)
+ }
+ }
+ bitmaps = bitmapsFiltered
+
+ for _, b := range bitmaps {
+ lKey = minOfUint16(lKey, b.highlowcontainer.keys[0])
+ hKey = maxOfUint16(hKey, b.highlowcontainer.keys[b.highlowcontainer.size()-1])
+ }
+
+ if lKey == MaxUint16 && hKey == 0 {
+ return New()
+ } else if len(bitmaps) == 1 {
+ return bitmaps[0]
+ }
+
+ keyRange := hKey - lKey + 1
+ if keyRange == 1 {
+ // revert to FastOr. Since the key range is 0
+ // no container-level aggregation parallelism is achievable
+ return FastOr(bitmaps...)
+ }
+
+ if parallelism == 0 {
+ parallelism = defaultWorkerCount
+ }
+
+ var chunkSize int
+ var chunkCount int
+ if parallelism*4 > int(keyRange) {
+ chunkSize = 1
+ chunkCount = int(keyRange)
+ } else {
+ chunkCount = parallelism * 4
+ chunkSize = (int(keyRange) + chunkCount - 1) / chunkCount
+ }
+
+ if chunkCount*chunkSize < int(keyRange) {
+ // it's fine to panic to indicate an implementation error
+ panic(fmt.Sprintf("invariant check failed: chunkCount * chunkSize < keyRange, %d * %d < %d", chunkCount, chunkSize, keyRange))
+ }
+
+ chunks := make([]*roaringArray, chunkCount)
+
+ chunkSpecChan := make(chan parChunkSpec, minOfInt(maxOfInt(64, 2*parallelism), int(chunkCount)))
+ chunkChan := make(chan parChunk, minOfInt(32, int(chunkCount)))
+
+ orFunc := func() {
+ for spec := range chunkSpecChan {
+ ra := lazyOrOnRange(&bitmaps[0].highlowcontainer, &bitmaps[1].highlowcontainer, spec.start, spec.end)
+ for _, b := range bitmaps[2:] {
+ ra = lazyIOrOnRange(ra, &b.highlowcontainer, spec.start, spec.end)
+ }
+
+ for i, c := range ra.containers {
+ ra.containers[i] = repairAfterLazy(c)
+ }
+
+ chunkChan <- parChunk{ra, spec.idx}
+ }
+ }
+
+ for i := 0; i < parallelism; i++ {
+ go orFunc()
+ }
+
+ go func() {
+ for i := 0; i < chunkCount; i++ {
+ spec := parChunkSpec{
+ start: uint16(int(lKey) + i*chunkSize),
+ end: uint16(minOfInt(int(lKey)+(i+1)*chunkSize-1, int(hKey))),
+ idx: int(i),
+ }
+ chunkSpecChan <- spec
+ }
+ }()
+
+ chunksRemaining := chunkCount
+ for chunk := range chunkChan {
+ chunks[chunk.idx] = chunk.ra
+ chunksRemaining--
+ if chunksRemaining == 0 {
+ break
+ }
+ }
+ close(chunkChan)
+ close(chunkSpecChan)
+
+ containerCount := 0
+ for _, chunk := range chunks {
+ containerCount += chunk.size()
+ }
+
+ result := Bitmap{
+ roaringArray{
+ containers: make([]container, containerCount),
+ keys: make([]uint16, containerCount),
+ needCopyOnWrite: make([]bool, containerCount),
+ },
+ }
+
+ resultOffset := 0
+ for _, chunk := range chunks {
+ copy(result.highlowcontainer.containers[resultOffset:], chunk.containers)
+ copy(result.highlowcontainer.keys[resultOffset:], chunk.keys)
+ copy(result.highlowcontainer.needCopyOnWrite[resultOffset:], chunk.needCopyOnWrite)
+ resultOffset += chunk.size()
+ }
+
+ return &result
+}
+
+type parChunkSpec struct {
+ start uint16
+ end uint16
+ idx int
+}
+
+type parChunk struct {
+ ra *roaringArray
+ idx int
+}
+
+func (c parChunk) size() int {
+ return c.ra.size()
+}
+
+func parNaiveStartAt(ra *roaringArray, start uint16, last uint16) int {
+ for idx, key := range ra.keys {
+ if key >= start && key <= last {
+ return idx
+ } else if key > last {
+ break
+ }
+ }
+ return ra.size()
+}
+
+func lazyOrOnRange(ra1, ra2 *roaringArray, start, last uint16) *roaringArray {
+ answer := newRoaringArray()
+ length1 := ra1.size()
+ length2 := ra2.size()
+
+ idx1 := parNaiveStartAt(ra1, start, last)
+ idx2 := parNaiveStartAt(ra2, start, last)
+
+ var key1 uint16
+ var key2 uint16
+ if idx1 < length1 && idx2 < length2 {
+ key1 = ra1.getKeyAtIndex(idx1)
+ key2 = ra2.getKeyAtIndex(idx2)
+
+ for key1 <= last && key2 <= last {
+
+ if key1 < key2 {
+ answer.appendCopy(*ra1, idx1)
+ idx1++
+ if idx1 == length1 {
+ break
+ }
+ key1 = ra1.getKeyAtIndex(idx1)
+ } else if key1 > key2 {
+ answer.appendCopy(*ra2, idx2)
+ idx2++
+ if idx2 == length2 {
+ break
+ }
+ key2 = ra2.getKeyAtIndex(idx2)
+ } else {
+ c1 := ra1.getFastContainerAtIndex(idx1, false)
+
+ answer.appendContainer(key1, c1.lazyOR(ra2.getContainerAtIndex(idx2)), false)
+ idx1++
+ idx2++
+ if idx1 == length1 || idx2 == length2 {
+ break
+ }
+
+ key1 = ra1.getKeyAtIndex(idx1)
+ key2 = ra2.getKeyAtIndex(idx2)
+ }
+ }
+ }
+
+ if idx2 < length2 {
+ key2 = ra2.getKeyAtIndex(idx2)
+ for key2 <= last {
+ answer.appendCopy(*ra2, idx2)
+ idx2++
+ if idx2 == length2 {
+ break
+ }
+ key2 = ra2.getKeyAtIndex(idx2)
+ }
+ }
+
+ if idx1 < length1 {
+ key1 = ra1.getKeyAtIndex(idx1)
+ for key1 <= last {
+ answer.appendCopy(*ra1, idx1)
+ idx1++
+ if idx1 == length1 {
+ break
+ }
+ key1 = ra1.getKeyAtIndex(idx1)
+ }
+ }
+ return answer
+}
+
+func lazyIOrOnRange(ra1, ra2 *roaringArray, start, last uint16) *roaringArray {
+ length1 := ra1.size()
+ length2 := ra2.size()
+
+ idx1 := 0
+ idx2 := parNaiveStartAt(ra2, start, last)
+
+ var key1 uint16
+ var key2 uint16
+ if idx1 < length1 && idx2 < length2 {
+ key1 = ra1.getKeyAtIndex(idx1)
+ key2 = ra2.getKeyAtIndex(idx2)
+
+ for key1 <= last && key2 <= last {
+ if key1 < key2 {
+ idx1++
+ if idx1 >= length1 {
+ break
+ }
+ key1 = ra1.getKeyAtIndex(idx1)
+ } else if key1 > key2 {
+ ra1.insertNewKeyValueAt(idx1, key2, ra2.getContainerAtIndex(idx2))
+ ra1.needCopyOnWrite[idx1] = true
+ idx2++
+ idx1++
+ length1++
+ if idx2 >= length2 {
+ break
+ }
+ key2 = ra2.getKeyAtIndex(idx2)
+ } else {
+ c1 := ra1.getFastContainerAtIndex(idx1, true)
+
+ ra1.containers[idx1] = c1.lazyIOR(ra2.getContainerAtIndex(idx2))
+ ra1.needCopyOnWrite[idx1] = false
+ idx1++
+ idx2++
+ if idx1 >= length1 || idx2 >= length2 {
+ break
+ }
+
+ key1 = ra1.getKeyAtIndex(idx1)
+ key2 = ra2.getKeyAtIndex(idx2)
+ }
+ }
+ }
+ if idx2 < length2 {
+ key2 = ra2.getKeyAtIndex(idx2)
+ for key2 <= last {
+ ra1.appendCopy(*ra2, idx2)
+ idx2++
+ if idx2 >= length2 {
+ break
+ }
+ key2 = ra2.getKeyAtIndex(idx2)
+ }
+ }
+ return ra1
+}
diff --git a/vendor/github.com/RoaringBitmap/roaring/popcnt.go b/vendor/github.com/RoaringBitmap/roaring/popcnt.go
new file mode 100644
index 0000000000..9d99508ce0
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/popcnt.go
@@ -0,0 +1,11 @@
+// +build go1.9
+// "go1.9", from Go version 1.9 onward
+// See https://golang.org/pkg/go/build/#hdr-Build_Constraints
+
+package roaring
+
+import "math/bits"
+
+func popcount(x uint64) uint64 {
+ return uint64(bits.OnesCount64(x))
+}
diff --git a/vendor/github.com/RoaringBitmap/roaring/popcnt_amd64.s b/vendor/github.com/RoaringBitmap/roaring/popcnt_amd64.s
new file mode 100644
index 0000000000..1f13fa2eca
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/popcnt_amd64.s
@@ -0,0 +1,103 @@
+// +build amd64,!appengine,!go1.9
+
+TEXT ·hasAsm(SB),4,$0-1
+MOVQ $1, AX
+CPUID
+SHRQ $23, CX
+ANDQ $1, CX
+MOVB CX, ret+0(FP)
+RET
+
+#define POPCNTQ_DX_DX BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0xd2
+
+TEXT ·popcntSliceAsm(SB),4,$0-32
+XORQ AX, AX
+MOVQ s+0(FP), SI
+MOVQ s_len+8(FP), CX
+TESTQ CX, CX
+JZ popcntSliceEnd
+popcntSliceLoop:
+BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0x16 // POPCNTQ (SI), DX
+ADDQ DX, AX
+ADDQ $8, SI
+LOOP popcntSliceLoop
+popcntSliceEnd:
+MOVQ AX, ret+24(FP)
+RET
+
+TEXT ·popcntMaskSliceAsm(SB),4,$0-56
+XORQ AX, AX
+MOVQ s+0(FP), SI
+MOVQ s_len+8(FP), CX
+TESTQ CX, CX
+JZ popcntMaskSliceEnd
+MOVQ m+24(FP), DI
+popcntMaskSliceLoop:
+MOVQ (DI), DX
+NOTQ DX
+ANDQ (SI), DX
+POPCNTQ_DX_DX
+ADDQ DX, AX
+ADDQ $8, SI
+ADDQ $8, DI
+LOOP popcntMaskSliceLoop
+popcntMaskSliceEnd:
+MOVQ AX, ret+48(FP)
+RET
+
+TEXT ·popcntAndSliceAsm(SB),4,$0-56
+XORQ AX, AX
+MOVQ s+0(FP), SI
+MOVQ s_len+8(FP), CX
+TESTQ CX, CX
+JZ popcntAndSliceEnd
+MOVQ m+24(FP), DI
+popcntAndSliceLoop:
+MOVQ (DI), DX
+ANDQ (SI), DX
+POPCNTQ_DX_DX
+ADDQ DX, AX
+ADDQ $8, SI
+ADDQ $8, DI
+LOOP popcntAndSliceLoop
+popcntAndSliceEnd:
+MOVQ AX, ret+48(FP)
+RET
+
+TEXT ·popcntOrSliceAsm(SB),4,$0-56
+XORQ AX, AX
+MOVQ s+0(FP), SI
+MOVQ s_len+8(FP), CX
+TESTQ CX, CX
+JZ popcntOrSliceEnd
+MOVQ m+24(FP), DI
+popcntOrSliceLoop:
+MOVQ (DI), DX
+ORQ (SI), DX
+POPCNTQ_DX_DX
+ADDQ DX, AX
+ADDQ $8, SI
+ADDQ $8, DI
+LOOP popcntOrSliceLoop
+popcntOrSliceEnd:
+MOVQ AX, ret+48(FP)
+RET
+
+TEXT ·popcntXorSliceAsm(SB),4,$0-56
+XORQ AX, AX
+MOVQ s+0(FP), SI
+MOVQ s_len+8(FP), CX
+TESTQ CX, CX
+JZ popcntXorSliceEnd
+MOVQ m+24(FP), DI
+popcntXorSliceLoop:
+MOVQ (DI), DX
+XORQ (SI), DX
+POPCNTQ_DX_DX
+ADDQ DX, AX
+ADDQ $8, SI
+ADDQ $8, DI
+LOOP popcntXorSliceLoop
+popcntXorSliceEnd:
+MOVQ AX, ret+48(FP)
+RET
diff --git a/vendor/github.com/RoaringBitmap/roaring/popcnt_asm.go b/vendor/github.com/RoaringBitmap/roaring/popcnt_asm.go
new file mode 100644
index 0000000000..882d7f4ecf
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/popcnt_asm.go
@@ -0,0 +1,67 @@
+// +build amd64,!appengine,!go1.9
+
+package roaring
+
+// *** the following functions are defined in popcnt_amd64.s
+
+//go:noescape
+
+func hasAsm() bool
+
+// useAsm is a flag used to select the GO or ASM implementation of the popcnt function
+var useAsm = hasAsm()
+
+//go:noescape
+
+func popcntSliceAsm(s []uint64) uint64
+
+//go:noescape
+
+func popcntMaskSliceAsm(s, m []uint64) uint64
+
+//go:noescape
+
+func popcntAndSliceAsm(s, m []uint64) uint64
+
+//go:noescape
+
+func popcntOrSliceAsm(s, m []uint64) uint64
+
+//go:noescape
+
+func popcntXorSliceAsm(s, m []uint64) uint64
+
+func popcntSlice(s []uint64) uint64 {
+ if useAsm {
+ return popcntSliceAsm(s)
+ }
+ return popcntSliceGo(s)
+}
+
+func popcntMaskSlice(s, m []uint64) uint64 {
+ if useAsm {
+ return popcntMaskSliceAsm(s, m)
+ }
+ return popcntMaskSliceGo(s, m)
+}
+
+func popcntAndSlice(s, m []uint64) uint64 {
+ if useAsm {
+ return popcntAndSliceAsm(s, m)
+ }
+ return popcntAndSliceGo(s, m)
+}
+
+func popcntOrSlice(s, m []uint64) uint64 {
+ if useAsm {
+ return popcntOrSliceAsm(s, m)
+ }
+ return popcntOrSliceGo(s, m)
+}
+
+func popcntXorSlice(s, m []uint64) uint64 {
+ if useAsm {
+ return popcntXorSliceAsm(s, m)
+ }
+ return popcntXorSliceGo(s, m)
+}
diff --git a/vendor/github.com/RoaringBitmap/roaring/popcnt_compat.go b/vendor/github.com/RoaringBitmap/roaring/popcnt_compat.go
new file mode 100644
index 0000000000..7ae82d4c83
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/popcnt_compat.go
@@ -0,0 +1,17 @@
+// +build !go1.9
+
+package roaring
+
+// bit population count, take from
+// https://code.google.com/p/go/issues/detail?id=4988#c11
+// credit: https://code.google.com/u/arnehormann/
+// credit: https://play.golang.org/p/U7SogJ7psJ
+// credit: http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
+func popcount(x uint64) uint64 {
+ x -= (x >> 1) & 0x5555555555555555
+ x = (x>>2)&0x3333333333333333 + x&0x3333333333333333
+ x += x >> 4
+ x &= 0x0f0f0f0f0f0f0f0f
+ x *= 0x0101010101010101
+ return x >> 56
+}
diff --git a/vendor/github.com/RoaringBitmap/roaring/popcnt_generic.go b/vendor/github.com/RoaringBitmap/roaring/popcnt_generic.go
new file mode 100644
index 0000000000..edf2083f19
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/popcnt_generic.go
@@ -0,0 +1,23 @@
+// +build !amd64 appengine go1.9
+
+package roaring
+
+func popcntSlice(s []uint64) uint64 {
+ return popcntSliceGo(s)
+}
+
+func popcntMaskSlice(s, m []uint64) uint64 {
+ return popcntMaskSliceGo(s, m)
+}
+
+func popcntAndSlice(s, m []uint64) uint64 {
+ return popcntAndSliceGo(s, m)
+}
+
+func popcntOrSlice(s, m []uint64) uint64 {
+ return popcntOrSliceGo(s, m)
+}
+
+func popcntXorSlice(s, m []uint64) uint64 {
+ return popcntXorSliceGo(s, m)
+}
diff --git a/vendor/github.com/RoaringBitmap/roaring/popcnt_slices.go b/vendor/github.com/RoaringBitmap/roaring/popcnt_slices.go
new file mode 100644
index 0000000000..d27c5f383d
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/popcnt_slices.go
@@ -0,0 +1,41 @@
+package roaring
+
+func popcntSliceGo(s []uint64) uint64 {
+ cnt := uint64(0)
+ for _, x := range s {
+ cnt += popcount(x)
+ }
+ return cnt
+}
+
+func popcntMaskSliceGo(s, m []uint64) uint64 {
+ cnt := uint64(0)
+ for i := range s {
+ cnt += popcount(s[i] &^ m[i])
+ }
+ return cnt
+}
+
+func popcntAndSliceGo(s, m []uint64) uint64 {
+ cnt := uint64(0)
+ for i := range s {
+ cnt += popcount(s[i] & m[i])
+ }
+ return cnt
+}
+
+func popcntOrSliceGo(s, m []uint64) uint64 {
+ cnt := uint64(0)
+ for i := range s {
+ cnt += popcount(s[i] | m[i])
+ }
+ return cnt
+}
+
+func popcntXorSliceGo(s, m []uint64) uint64 {
+ cnt := uint64(0)
+ for i := range s {
+ cnt += popcount(s[i] ^ m[i])
+ }
+ return cnt
+}
diff --git a/vendor/github.com/RoaringBitmap/roaring/priorityqueue.go b/vendor/github.com/RoaringBitmap/roaring/priorityqueue.go
new file mode 100644
index 0000000000..9259a68163
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/priorityqueue.go
@@ -0,0 +1,101 @@
+package roaring
+
+import "container/heap"
+
+/////////////
+// The priorityQueue is used to keep Bitmaps sorted.
+////////////
+
+type item struct {
+ value *Bitmap
+ index int
+}
+
+type priorityQueue []*item
+
+func (pq priorityQueue) Len() int { return len(pq) }
+
+func (pq priorityQueue) Less(i, j int) bool {
+ return pq[i].value.GetSizeInBytes() < pq[j].value.GetSizeInBytes()
+}
+
+func (pq priorityQueue) Swap(i, j int) {
+ pq[i], pq[j] = pq[j], pq[i]
+ pq[i].index = i
+ pq[j].index = j
+}
+
+func (pq *priorityQueue) Push(x interface{}) {
+ n := len(*pq)
+ item := x.(*item)
+ item.index = n
+ *pq = append(*pq, item)
+}
+
+func (pq *priorityQueue) Pop() interface{} {
+ old := *pq
+ n := len(old)
+ item := old[n-1]
+ item.index = -1 // for safety
+ *pq = old[0 : n-1]
+ return item
+}
+
+func (pq *priorityQueue) update(item *item, value *Bitmap) {
+ item.value = value
+ heap.Fix(pq, item.index)
+}
+
+/////////////
+// The containerPriorityQueue is used to keep the containers of various Bitmaps sorted.
+////////////
+
+type containeritem struct {
+ value *Bitmap
+ keyindex int
+ index int
+}
+
+type containerPriorityQueue []*containeritem
+
+func (pq containerPriorityQueue) Len() int { return len(pq) }
+
+func (pq containerPriorityQueue) Less(i, j int) bool {
+ k1 := pq[i].value.highlowcontainer.getKeyAtIndex(pq[i].keyindex)
+ k2 := pq[j].value.highlowcontainer.getKeyAtIndex(pq[j].keyindex)
+ if k1 != k2 {
+ return k1 < k2
+ }
+ c1 := pq[i].value.highlowcontainer.getContainerAtIndex(pq[i].keyindex)
+ c2 := pq[j].value.highlowcontainer.getContainerAtIndex(pq[j].keyindex)
+
+ return c1.getCardinality() > c2.getCardinality()
+}
+
+func (pq containerPriorityQueue) Swap(i, j int) {
+ pq[i], pq[j] = pq[j], pq[i]
+ pq[i].index = i
+ pq[j].index = j
+}
+
+func (pq *containerPriorityQueue) Push(x interface{}) {
+ n := len(*pq)
+ item := x.(*containeritem)
+ item.index = n
+ *pq = append(*pq, item)
+}
+
+func (pq *containerPriorityQueue) Pop() interface{} {
+ old := *pq
+ n := len(old)
+ item := old[n-1]
+ item.index = -1 // for safety
+ *pq = old[0 : n-1]
+ return item
+}
+
+//func (pq *containerPriorityQueue) update(item *containeritem, value *Bitmap, keyindex int) {
+// item.value = value
+// item.keyindex = keyindex
+// heap.Fix(pq, item.index)
+//}
diff --git a/vendor/github.com/RoaringBitmap/roaring/rle.go b/vendor/github.com/RoaringBitmap/roaring/rle.go
new file mode 100644
index 0000000000..8f3d4edd68
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/rle.go
@@ -0,0 +1,1667 @@
+package roaring
+
+//
+// Copyright (c) 2016 by the roaring authors.
+// Licensed under the Apache License, Version 2.0.
+//
+// We derive a few lines of code from the sort.Search
+// function in the golang standard library. That function
+// is Copyright 2009 The Go Authors, and licensed
+// under the following BSD-style license.
+/*
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+import (
+ "fmt"
+ "sort"
+ "unsafe"
+)
+
+//go:generate msgp -unexported
+
+// runContainer32 does run-length encoding of sets of
+// uint32 integers.
+type runContainer32 struct {
+ iv []interval32
+ card int64
+
+ // avoid allocation during search
+ myOpts searchOptions `msg:"-"`
+}
+
+// interval32 is the internal to runContainer32
+// structure that maintains the individual [Start, last]
+// closed intervals.
+type interval32 struct {
+ start uint32
+ last uint32
+}
+
+// runlen returns the count of integers in the interval.
+func (iv interval32) runlen() int64 {
+ return 1 + int64(iv.last) - int64(iv.start)
+}
+
+// String produces a human viewable string of the contents.
+func (iv interval32) String() string {
+ return fmt.Sprintf("[%d, %d]", iv.start, iv.last)
+}
+
+func ivalString32(iv []interval32) string {
+ var s string
+ var j int
+ var p interval32
+ for j, p = range iv {
+ s += fmt.Sprintf("%v:[%d, %d], ", j, p.start, p.last)
+ }
+ return s
+}
+
+// String produces a human viewable string of the contents.
+func (rc *runContainer32) String() string {
+ if len(rc.iv) == 0 {
+ return "runContainer32{}"
+ }
+ is := ivalString32(rc.iv)
+ return `runContainer32{` + is + `}`
+}
+
+// uint32Slice is a sort.Sort convenience method
+type uint32Slice []uint32
+
+// Len returns the length of p.
+func (p uint32Slice) Len() int { return len(p) }
+
+// Less returns p[i] < p[j]
+func (p uint32Slice) Less(i, j int) bool { return p[i] < p[j] }
+
+// Swap swaps elements i and j.
+func (p uint32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+//msgp:ignore addHelper
+
+// addHelper helps build a runContainer32.
+type addHelper32 struct {
+ runstart uint32
+ runlen uint32
+ actuallyAdded uint32
+ m []interval32
+ rc *runContainer32
+}
+
+func (ah *addHelper32) storeIval(runstart, runlen uint32) {
+ mi := interval32{start: runstart, last: runstart + runlen}
+ ah.m = append(ah.m, mi)
+}
+
+func (ah *addHelper32) add(cur, prev uint32, i int) {
+ if cur == prev+1 {
+ ah.runlen++
+ ah.actuallyAdded++
+ } else {
+ if cur < prev {
+ panic(fmt.Sprintf("newRunContainer32FromVals sees "+
+ "unsorted vals; vals[%v]=cur=%v < prev=%v. Sort your vals"+
+ " before calling us with alreadySorted == true.", i, cur, prev))
+ }
+ if cur == prev {
+ // ignore duplicates
+ } else {
+ ah.actuallyAdded++
+ ah.storeIval(ah.runstart, ah.runlen)
+ ah.runstart = cur
+ ah.runlen = 0
+ }
+ }
+}
+
+// newRunContainerRange makes a new container made of just the specified closed interval [rangestart,rangelast]
+func newRunContainer32Range(rangestart uint32, rangelast uint32) *runContainer32 {
+ rc := &runContainer32{}
+ rc.iv = append(rc.iv, interval32{start: rangestart, last: rangelast})
+ return rc
+}
+
+// newRunContainer32FromVals makes a new container from vals.
+//
+// For efficiency, vals should be sorted in ascending order.
+// Ideally vals should not contain duplicates, but we detect and
+// ignore them. If vals is already sorted in ascending order, then
+// pass alreadySorted = true. Otherwise, for !alreadySorted,
+// we will sort vals before creating a runContainer32 of them.
+// We sort the original vals, so this will change what the
+// caller sees in vals as a side effect.
+func newRunContainer32FromVals(alreadySorted bool, vals ...uint32) *runContainer32 {
+ // keep this in sync with newRunContainer32FromArray below
+
+ rc := &runContainer32{}
+ ah := addHelper32{rc: rc}
+
+ if !alreadySorted {
+ sort.Sort(uint32Slice(vals))
+ }
+ n := len(vals)
+ var cur, prev uint32
+ switch {
+ case n == 0:
+ // nothing more
+ case n == 1:
+ ah.m = append(ah.m, interval32{start: vals[0], last: vals[0]})
+ ah.actuallyAdded++
+ default:
+ ah.runstart = vals[0]
+ ah.actuallyAdded++
+ for i := 1; i < n; i++ {
+ prev = vals[i-1]
+ cur = vals[i]
+ ah.add(cur, prev, i)
+ }
+ ah.storeIval(ah.runstart, ah.runlen)
+ }
+ rc.iv = ah.m
+ rc.card = int64(ah.actuallyAdded)
+ return rc
+}
+
+// newRunContainer32FromBitmapContainer makes a new run container from bc,
+// somewhat efficiently. For reference, see the Java
+// https://github.com/RoaringBitmap/RoaringBitmap/blob/master/src/main/java/org/roaringbitmap/RunContainer.java#L145-L192
+func newRunContainer32FromBitmapContainer(bc *bitmapContainer) *runContainer32 {
+
+ rc := &runContainer32{}
+ nbrRuns := bc.numberOfRuns()
+ if nbrRuns == 0 {
+ return rc
+ }
+ rc.iv = make([]interval32, nbrRuns)
+
+ longCtr := 0 // index of current long in bitmap
+ curWord := bc.bitmap[0] // its value
+ runCount := 0
+ for {
+ // potentially multiword advance to first 1 bit
+ for curWord == 0 && longCtr < len(bc.bitmap)-1 {
+ longCtr++
+ curWord = bc.bitmap[longCtr]
+ }
+
+ if curWord == 0 {
+ // wrap up, no more runs
+ return rc
+ }
+ localRunStart := countTrailingZeros(curWord)
+ runStart := localRunStart + 64*longCtr
+ // stuff 1s into number's LSBs
+ curWordWith1s := curWord | (curWord - 1)
+
+ // find the next 0, potentially in a later word
+ runEnd := 0
+ for curWordWith1s == maxWord && longCtr < len(bc.bitmap)-1 {
+ longCtr++
+ curWordWith1s = bc.bitmap[longCtr]
+ }
+
+ if curWordWith1s == maxWord {
+ // a final unterminated run of 1s
+ runEnd = wordSizeInBits + longCtr*64
+ rc.iv[runCount].start = uint32(runStart)
+ rc.iv[runCount].last = uint32(runEnd) - 1
+ return rc
+ }
+ localRunEnd := countTrailingZeros(^curWordWith1s)
+ runEnd = localRunEnd + longCtr*64
+ rc.iv[runCount].start = uint32(runStart)
+ rc.iv[runCount].last = uint32(runEnd) - 1
+ runCount++
+ // now, zero out everything right of runEnd.
+ curWord = curWordWith1s & (curWordWith1s + 1)
+ // We've lathered and rinsed, so repeat...
+ }
+
+}
+
+//
+// newRunContainer32FromArray populates a new
+// runContainer32 from the contents of arr.
+//
+func newRunContainer32FromArray(arr *arrayContainer) *runContainer32 {
+ // keep this in sync with newRunContainer32FromVals above
+
+ rc := &runContainer32{}
+ ah := addHelper32{rc: rc}
+
+ n := arr.getCardinality()
+ var cur, prev uint32
+ switch {
+ case n == 0:
+ // nothing more
+ case n == 1:
+ ah.m = append(ah.m, interval32{start: uint32(arr.content[0]), last: uint32(arr.content[0])})
+ ah.actuallyAdded++
+ default:
+ ah.runstart = uint32(arr.content[0])
+ ah.actuallyAdded++
+ for i := 1; i < n; i++ {
+ prev = uint32(arr.content[i-1])
+ cur = uint32(arr.content[i])
+ ah.add(cur, prev, i)
+ }
+ ah.storeIval(ah.runstart, ah.runlen)
+ }
+ rc.iv = ah.m
+ rc.card = int64(ah.actuallyAdded)
+ return rc
+}
+
+// set adds the integers in vals to the set. Vals
+// must be sorted in increasing order; if not, you should set
+// alreadySorted to false, and we will sort them in place for you.
+// (Be aware of this side effect -- it will affect the callers
+// view of vals).
+//
+// If you have a small number of additions to an already
+// big runContainer32, calling Add() may be faster.
+func (rc *runContainer32) set(alreadySorted bool, vals ...uint32) {
+
+ rc2 := newRunContainer32FromVals(alreadySorted, vals...)
+ un := rc.union(rc2)
+ rc.iv = un.iv
+ rc.card = 0
+}
+
+// canMerge returns true if the intervals
+// a and b either overlap or they are
+// contiguous and so can be merged into
+// a single interval.
+func canMerge32(a, b interval32) bool {
+ if int64(a.last)+1 < int64(b.start) {
+ return false
+ }
+ return int64(b.last)+1 >= int64(a.start)
+}
+
+// haveOverlap differs from canMerge in that
+// it tells you if the intersection of a
+// and b would contain an element (otherwise
+// it would be the empty set, and we return
+// false).
+func haveOverlap32(a, b interval32) bool {
+ if int64(a.last)+1 <= int64(b.start) {
+ return false
+ }
+ return int64(b.last)+1 > int64(a.start)
+}
+
+// mergeInterval32s joins a and b into a
+// new interval, and panics if it cannot.
+func mergeInterval32s(a, b interval32) (res interval32) {
+ if !canMerge32(a, b) {
+ panic(fmt.Sprintf("cannot merge %#v and %#v", a, b))
+ }
+ if b.start < a.start {
+ res.start = b.start
+ } else {
+ res.start = a.start
+ }
+ if b.last > a.last {
+ res.last = b.last
+ } else {
+ res.last = a.last
+ }
+ return
+}
+
+// intersectInterval32s returns the intersection
+// of a and b. The isEmpty flag will be true if
+// a and b were disjoint.
+func intersectInterval32s(a, b interval32) (res interval32, isEmpty bool) {
+ if !haveOverlap32(a, b) {
+ isEmpty = true
+ return
+ }
+ if b.start > a.start {
+ res.start = b.start
+ } else {
+ res.start = a.start
+ }
+ if b.last < a.last {
+ res.last = b.last
+ } else {
+ res.last = a.last
+ }
+ return
+}
+
+// union merges two runContainer32s, producing
+// a new runContainer32 with the union of rc and b.
+func (rc *runContainer32) union(b *runContainer32) *runContainer32 {
+
+ // rc is also known as 'a' here, but golint insisted we
+ // call it rc for consistency with the rest of the methods.
+
+ var m []interval32
+
+ alim := int64(len(rc.iv))
+ blim := int64(len(b.iv))
+
+ var na int64 // next from a
+ var nb int64 // next from b
+
+ // merged holds the current merge output, which might
+ // get additional merges before being appended to m.
+ var merged interval32
+ var mergedUsed bool // is merged being used at the moment?
+
+ var cura interval32 // currently considering this interval32 from a
+ var curb interval32 // currently considering this interval32 from b
+
+ pass := 0
+ for na < alim && nb < blim {
+ pass++
+ cura = rc.iv[na]
+ curb = b.iv[nb]
+
+ if mergedUsed {
+ mergedUpdated := false
+ if canMerge32(cura, merged) {
+ merged = mergeInterval32s(cura, merged)
+ na = rc.indexOfIntervalAtOrAfter(int64(merged.last)+1, na+1)
+ mergedUpdated = true
+ }
+ if canMerge32(curb, merged) {
+ merged = mergeInterval32s(curb, merged)
+ nb = b.indexOfIntervalAtOrAfter(int64(merged.last)+1, nb+1)
+ mergedUpdated = true
+ }
+ if !mergedUpdated {
+ // we know that merged is disjoint from cura and curb
+ m = append(m, merged)
+ mergedUsed = false
+ }
+ continue
+
+ } else {
+ // !mergedUsed
+ if !canMerge32(cura, curb) {
+ if cura.start < curb.start {
+ m = append(m, cura)
+ na++
+ } else {
+ m = append(m, curb)
+ nb++
+ }
+ } else {
+ merged = mergeInterval32s(cura, curb)
+ mergedUsed = true
+ na = rc.indexOfIntervalAtOrAfter(int64(merged.last)+1, na+1)
+ nb = b.indexOfIntervalAtOrAfter(int64(merged.last)+1, nb+1)
+ }
+ }
+ }
+ var aDone, bDone bool
+ if na >= alim {
+ aDone = true
+ }
+ if nb >= blim {
+ bDone = true
+ }
+ // finish by merging anything remaining into merged we can:
+ if mergedUsed {
+ if !aDone {
+ aAdds:
+ for na < alim {
+ cura = rc.iv[na]
+ if canMerge32(cura, merged) {
+ merged = mergeInterval32s(cura, merged)
+ na = rc.indexOfIntervalAtOrAfter(int64(merged.last)+1, na+1)
+ } else {
+ break aAdds
+ }
+ }
+
+ }
+
+ if !bDone {
+ bAdds:
+ for nb < blim {
+ curb = b.iv[nb]
+ if canMerge32(curb, merged) {
+ merged = mergeInterval32s(curb, merged)
+ nb = b.indexOfIntervalAtOrAfter(int64(merged.last)+1, nb+1)
+ } else {
+ break bAdds
+ }
+ }
+
+ }
+
+ m = append(m, merged)
+ }
+ if na < alim {
+ m = append(m, rc.iv[na:]...)
+ }
+ if nb < blim {
+ m = append(m, b.iv[nb:]...)
+ }
+
+ res := &runContainer32{iv: m}
+ return res
+}
+
+// unionCardinality returns the cardinality of the merger of two runContainer32s, the union of rc and b.
+func (rc *runContainer32) unionCardinality(b *runContainer32) uint64 {
+
+ // rc is also known as 'a' here, but golint insisted we
+ // call it rc for consistency with the rest of the methods.
+ answer := uint64(0)
+
+ alim := int64(len(rc.iv))
+ blim := int64(len(b.iv))
+
+ var na int64 // next from a
+ var nb int64 // next from b
+
+ // merged holds the current merge output, which might
+ // get additional merges before being appended to m.
+ var merged interval32
+ var mergedUsed bool // is merged being used at the moment?
+
+ var cura interval32 // currently considering this interval32 from a
+ var curb interval32 // currently considering this interval32 from b
+
+ pass := 0
+ for na < alim && nb < blim {
+ pass++
+ cura = rc.iv[na]
+ curb = b.iv[nb]
+
+ if mergedUsed {
+ mergedUpdated := false
+ if canMerge32(cura, merged) {
+ merged = mergeInterval32s(cura, merged)
+ na = rc.indexOfIntervalAtOrAfter(int64(merged.last)+1, na+1)
+ mergedUpdated = true
+ }
+ if canMerge32(curb, merged) {
+ merged = mergeInterval32s(curb, merged)
+ nb = b.indexOfIntervalAtOrAfter(int64(merged.last)+1, nb+1)
+ mergedUpdated = true
+ }
+ if !mergedUpdated {
+ // we know that merged is disjoint from cura and curb
+ //m = append(m, merged)
+ answer += uint64(merged.last) - uint64(merged.start) + 1
+ mergedUsed = false
+ }
+ continue
+
+ } else {
+ // !mergedUsed
+ if !canMerge32(cura, curb) {
+ if cura.start < curb.start {
+ answer += uint64(cura.last) - uint64(cura.start) + 1
+ //m = append(m, cura)
+ na++
+ } else {
+ answer += uint64(curb.last) - uint64(curb.start) + 1
+ //m = append(m, curb)
+ nb++
+ }
+ } else {
+ merged = mergeInterval32s(cura, curb)
+ mergedUsed = true
+ na = rc.indexOfIntervalAtOrAfter(int64(merged.last)+1, na+1)
+ nb = b.indexOfIntervalAtOrAfter(int64(merged.last)+1, nb+1)
+ }
+ }
+ }
+ var aDone, bDone bool
+ if na >= alim {
+ aDone = true
+ }
+ if nb >= blim {
+ bDone = true
+ }
+ // finish by merging anything remaining into merged we can:
+ if mergedUsed {
+ if !aDone {
+ aAdds:
+ for na < alim {
+ cura = rc.iv[na]
+ if canMerge32(cura, merged) {
+ merged = mergeInterval32s(cura, merged)
+ na = rc.indexOfIntervalAtOrAfter(int64(merged.last)+1, na+1)
+ } else {
+ break aAdds
+ }
+ }
+
+ }
+
+ if !bDone {
+ bAdds:
+ for nb < blim {
+ curb = b.iv[nb]
+ if canMerge32(curb, merged) {
+ merged = mergeInterval32s(curb, merged)
+ nb = b.indexOfIntervalAtOrAfter(int64(merged.last)+1, nb+1)
+ } else {
+ break bAdds
+ }
+ }
+
+ }
+
+ //m = append(m, merged)
+ answer += uint64(merged.last) - uint64(merged.start) + 1
+ }
+ for _, r := range rc.iv[na:] {
+ answer += uint64(r.last) - uint64(r.start) + 1
+ }
+ for _, r := range b.iv[nb:] {
+ answer += uint64(r.last) - uint64(r.start) + 1
+ }
+ return answer
+}
+
+// indexOfIntervalAtOrAfter is a helper for union.
+func (rc *runContainer32) indexOfIntervalAtOrAfter(key int64, startIndex int64) int64 {
+ rc.myOpts.startIndex = startIndex
+ rc.myOpts.endxIndex = 0
+
+ w, already, _ := rc.search(key, &rc.myOpts)
+ if already {
+ return w
+ }
+ return w + 1
+}
+
+// intersect returns a new runContainer32 holding the
+// intersection of rc (also known as 'a') and b.
+func (rc *runContainer32) intersect(b *runContainer32) *runContainer32 {
+
+ a := rc
+ numa := int64(len(a.iv))
+ numb := int64(len(b.iv))
+ res := &runContainer32{}
+ if numa == 0 || numb == 0 {
+ return res
+ }
+
+ if numa == 1 && numb == 1 {
+ if !haveOverlap32(a.iv[0], b.iv[0]) {
+ return res
+ }
+ }
+
+ var output []interval32
+
+ var acuri int64
+ var bcuri int64
+
+ astart := int64(a.iv[acuri].start)
+ bstart := int64(b.iv[bcuri].start)
+
+ var intersection interval32
+ var leftoverstart int64
+ var isOverlap, isLeftoverA, isLeftoverB bool
+ var done bool
+ pass := 0
+toploop:
+ for acuri < numa && bcuri < numb {
+ pass++
+
+ isOverlap, isLeftoverA, isLeftoverB, leftoverstart, intersection = intersectWithLeftover32(astart, int64(a.iv[acuri].last), bstart, int64(b.iv[bcuri].last))
+
+ if !isOverlap {
+ switch {
+ case astart < bstart:
+ acuri, done = a.findNextIntervalThatIntersectsStartingFrom(acuri+1, bstart)
+ if done {
+ break toploop
+ }
+ astart = int64(a.iv[acuri].start)
+
+ case astart > bstart:
+ bcuri, done = b.findNextIntervalThatIntersectsStartingFrom(bcuri+1, astart)
+ if done {
+ break toploop
+ }
+ bstart = int64(b.iv[bcuri].start)
+
+ //default:
+ // panic("impossible that astart == bstart, since !isOverlap")
+ }
+
+ } else {
+ // isOverlap
+ output = append(output, intersection)
+ switch {
+ case isLeftoverA:
+ // note that we change astart without advancing acuri,
+ // since we need to capture any 2ndary intersections with a.iv[acuri]
+ astart = leftoverstart
+ bcuri++
+ if bcuri >= numb {
+ break toploop
+ }
+ bstart = int64(b.iv[bcuri].start)
+ case isLeftoverB:
+ // note that we change bstart without advancing bcuri,
+ // since we need to capture any 2ndary intersections with b.iv[bcuri]
+ bstart = leftoverstart
+ acuri++
+ if acuri >= numa {
+ break toploop
+ }
+ astart = int64(a.iv[acuri].start)
+ default:
+ // neither had leftover, both completely consumed
+ // optionally, assert for sanity:
+ //if a.iv[acuri].endx != b.iv[bcuri].endx {
+ // panic("huh? should only be possible that endx agree now!")
+ //}
+
+ // advance to next a interval
+ acuri++
+ if acuri >= numa {
+ break toploop
+ }
+ astart = int64(a.iv[acuri].start)
+
+ // advance to next b interval
+ bcuri++
+ if bcuri >= numb {
+ break toploop
+ }
+ bstart = int64(b.iv[bcuri].start)
+ }
+ }
+ } // end for toploop
+
+ if len(output) == 0 {
+ return res
+ }
+
+ res.iv = output
+ return res
+}
+
+// intersectCardinality returns the cardinality of the
+// intersection of rc (also known as 'a') and b.
+func (rc *runContainer32) intersectCardinality(b *runContainer32) int64 {
+ answer := int64(0)
+
+ a := rc
+ numa := int64(len(a.iv))
+ numb := int64(len(b.iv))
+ if numa == 0 || numb == 0 {
+ return 0
+ }
+
+ if numa == 1 && numb == 1 {
+ if !haveOverlap32(a.iv[0], b.iv[0]) {
+ return 0
+ }
+ }
+
+ var acuri int64
+ var bcuri int64
+
+ astart := int64(a.iv[acuri].start)
+ bstart := int64(b.iv[bcuri].start)
+
+ var intersection interval32
+ var leftoverstart int64
+ var isOverlap, isLeftoverA, isLeftoverB bool
+ var done bool
+ pass := 0
+toploop:
+ for acuri < numa && bcuri < numb {
+ pass++
+
+ isOverlap, isLeftoverA, isLeftoverB, leftoverstart, intersection = intersectWithLeftover32(astart, int64(a.iv[acuri].last), bstart, int64(b.iv[bcuri].last))
+
+ if !isOverlap {
+ switch {
+ case astart < bstart:
+ acuri, done = a.findNextIntervalThatIntersectsStartingFrom(acuri+1, bstart)
+ if done {
+ break toploop
+ }
+ astart = int64(a.iv[acuri].start)
+
+ case astart > bstart:
+ bcuri, done = b.findNextIntervalThatIntersectsStartingFrom(bcuri+1, astart)
+ if done {
+ break toploop
+ }
+ bstart = int64(b.iv[bcuri].start)
+
+ //default:
+ // panic("impossible that astart == bstart, since !isOverlap")
+ }
+
+ } else {
+ // isOverlap
+ answer += int64(intersection.last) - int64(intersection.start) + 1
+ switch {
+ case isLeftoverA:
+ // note that we change astart without advancing acuri,
+ // since we need to capture any 2ndary intersections with a.iv[acuri]
+ astart = leftoverstart
+ bcuri++
+ if bcuri >= numb {
+ break toploop
+ }
+ bstart = int64(b.iv[bcuri].start)
+ case isLeftoverB:
+ // note that we change bstart without advancing bcuri,
+ // since we need to capture any 2ndary intersections with b.iv[bcuri]
+ bstart = leftoverstart
+ acuri++
+ if acuri >= numa {
+ break toploop
+ }
+ astart = int64(a.iv[acuri].start)
+ default:
+ // neither had leftover, both completely consumed
+ // optionally, assert for sanity:
+ //if a.iv[acuri].endx != b.iv[bcuri].endx {
+ // panic("huh? should only be possible that endx agree now!")
+ //}
+
+ // advance to next a interval
+ acuri++
+ if acuri >= numa {
+ break toploop
+ }
+ astart = int64(a.iv[acuri].start)
+
+ // advance to next b interval
+ bcuri++
+ if bcuri >= numb {
+ break toploop
+ }
+ bstart = int64(b.iv[bcuri].start)
+ }
+ }
+ } // end for toploop
+
+ return answer
+}
+
+// get returns true if key is in the container.
+func (rc *runContainer32) contains(key uint32) bool {
+ _, in, _ := rc.search(int64(key), nil)
+ return in
+}
+
+// numIntervals returns the count of intervals in the container.
+func (rc *runContainer32) numIntervals() int {
+ return len(rc.iv)
+}
+
+// search returns alreadyPresent to indicate if the
+// key is already in one of our interval32s.
+//
+// If key is alreadyPresent, then whichInterval32 tells
+// you where.
+//
+// If key is not already present, then whichInterval32 is
+// set as follows:
+//
+// a) whichInterval32 == len(rc.iv)-1 if key is beyond our
+// last interval32 in rc.iv;
+//
+// b) whichInterval32 == -1 if key is before our first
+// interval32 in rc.iv;
+//
+// c) whichInterval32 is set to the minimum index of rc.iv
+// which comes strictly before the key;
+// so rc.iv[whichInterval32].last < key,
+// and if whichInterval32+1 exists, then key < rc.iv[whichInterval32+1].start
+// (Note that whichInterval32+1 won't exist when
+// whichInterval32 is the last interval.)
+//
+// runContainer32.search always returns whichInterval32 < len(rc.iv).
+//
+// If not nil, opts can be used to further restrict
+// the search space.
+//
+func (rc *runContainer32) search(key int64, opts *searchOptions) (whichInterval32 int64, alreadyPresent bool, numCompares int) {
+ n := int64(len(rc.iv))
+ if n == 0 {
+ return -1, false, 0
+ }
+
+ startIndex := int64(0)
+ endxIndex := n
+ if opts != nil {
+ startIndex = opts.startIndex
+
+ // let endxIndex == 0 mean no effect
+ if opts.endxIndex > 0 {
+ endxIndex = opts.endxIndex
+ }
+ }
+
+ // sort.Search returns the smallest index i
+ // in [0, n) at which f(i) is true, assuming that on the range [0, n),
+ // f(i) == true implies f(i+1) == true.
+ // If there is no such index, Search returns n.
+
+ // For correctness, this began as verbatim snippet from
+ // sort.Search in the Go standard lib.
+ // We inline our comparison function for speed, and
+ // annotate with numCompares
+ // to observe and test that extra bounds are utilized.
+ i, j := startIndex, endxIndex
+ for i < j {
+ h := i + (j-i)/2 // avoid overflow when computing h as the bisector
+ // i <= h < j
+ numCompares++
+ if !(key < int64(rc.iv[h].start)) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ below := i
+ // end std lib snippet.
+
+ // The above is a simple in-lining and annotation of:
+ /* below := sort.Search(n,
+ func(i int) bool {
+ return key < rc.iv[i].start
+ })
+ */
+ whichInterval32 = below - 1
+
+ if below == n {
+ // all falses => key is >= start of all interval32s
+ // ... so does it belong to the last interval32?
+ if key < int64(rc.iv[n-1].last)+1 {
+ // yes, it belongs to the last interval32
+ alreadyPresent = true
+ return
+ }
+ // no, it is beyond the last interval32.
+ // leave alreadyPreset = false
+ return
+ }
+
+ // INVAR: key is below rc.iv[below]
+ if below == 0 {
+ // key is before the first first interval32.
+ // leave alreadyPresent = false
+ return
+ }
+
+ // INVAR: key is >= rc.iv[below-1].start and
+ // key is < rc.iv[below].start
+
+ // is key in below-1 interval32?
+ if key >= int64(rc.iv[below-1].start) && key < int64(rc.iv[below-1].last)+1 {
+ // yes, it is. key is in below-1 interval32.
+ alreadyPresent = true
+ return
+ }
+
+ // INVAR: key >= rc.iv[below-1].endx && key < rc.iv[below].start
+ // leave alreadyPresent = false
+ return
+}
+
+// cardinality returns the count of the integers stored in the
+// runContainer32.
+func (rc *runContainer32) cardinality() int64 {
+ if len(rc.iv) == 0 {
+ rc.card = 0
+ return 0
+ }
+ if rc.card > 0 {
+ return rc.card // already cached
+ }
+ // have to compute it
+ var n int64
+ for _, p := range rc.iv {
+ n += p.runlen()
+ }
+ rc.card = n // cache it
+ return n
+}
+
+// AsSlice decompresses the contents into a []uint32 slice.
+func (rc *runContainer32) AsSlice() []uint32 {
+ s := make([]uint32, rc.cardinality())
+ j := 0
+ for _, p := range rc.iv {
+ for i := p.start; i <= p.last; i++ {
+ s[j] = i
+ j++
+ }
+ }
+ return s
+}
+
+// newRunContainer32 creates an empty run container.
+func newRunContainer32() *runContainer32 {
+ return &runContainer32{}
+}
+
+// newRunContainer32CopyIv creates a run container, initializing
+// with a copy of the supplied iv slice.
+//
+func newRunContainer32CopyIv(iv []interval32) *runContainer32 {
+ rc := &runContainer32{
+ iv: make([]interval32, len(iv)),
+ }
+ copy(rc.iv, iv)
+ return rc
+}
+
+func (rc *runContainer32) Clone() *runContainer32 {
+ rc2 := newRunContainer32CopyIv(rc.iv)
+ return rc2
+}
+
+// newRunContainer32TakeOwnership returns a new runContainer32
+// backed by the provided iv slice, which we will
+// assume exclusive control over from now on.
+//
+func newRunContainer32TakeOwnership(iv []interval32) *runContainer32 {
+ rc := &runContainer32{
+ iv: iv,
+ }
+ return rc
+}
+
+const baseRc32Size = int(unsafe.Sizeof(runContainer32{}))
+const perIntervalRc32Size = int(unsafe.Sizeof(interval32{}))
+
+const baseDiskRc32Size = int(unsafe.Sizeof(uint32(0)))
+
+// see also runContainer32SerializedSizeInBytes(numRuns int) int
+
+// getSizeInBytes returns the number of bytes of memory
+// required by this runContainer32.
+func (rc *runContainer32) getSizeInBytes() int {
+ return perIntervalRc32Size*len(rc.iv) + baseRc32Size
+}
+
+// runContainer32SerializedSizeInBytes returns the number of bytes of disk
+// required to hold numRuns in a runContainer32.
+func runContainer32SerializedSizeInBytes(numRuns int) int {
+ return perIntervalRc32Size*numRuns + baseDiskRc32Size
+}
+
+// Add adds a single value k to the set.
+func (rc *runContainer32) Add(k uint32) (wasNew bool) {
+ // TODO comment from runContainer32.java:
+ // it might be better and simpler to do return
+ // toBitmapOrArrayContainer(getCardinality()).add(k)
+ // but note that some unit tests use this method to build up test
+ // runcontainers without calling runOptimize
+
+ k64 := int64(k)
+
+ index, present, _ := rc.search(k64, nil)
+ if present {
+ return // already there
+ }
+ wasNew = true
+
+ // increment card if it is cached already
+ if rc.card > 0 {
+ rc.card++
+ }
+ n := int64(len(rc.iv))
+ if index == -1 {
+ // we may need to extend the first run
+ if n > 0 {
+ if rc.iv[0].start == k+1 {
+ rc.iv[0].start = k
+ return
+ }
+ }
+ // nope, k stands alone, starting the new first interval32.
+ rc.iv = append([]interval32{{start: k, last: k}}, rc.iv...)
+ return
+ }
+
+ // are we off the end? handle both index == n and index == n-1:
+ if index >= n-1 {
+ if int64(rc.iv[n-1].last)+1 == k64 {
+ rc.iv[n-1].last++
+ return
+ }
+ rc.iv = append(rc.iv, interval32{start: k, last: k})
+ return
+ }
+
+ // INVAR: index and index+1 both exist, and k goes between them.
+ //
+ // Now: add k into the middle,
+ // possibly fusing with index or index+1 interval32
+ // and possibly resulting in fusing of two interval32s
+ // that had a one integer gap.
+
+ left := index
+ right := index + 1
+
+ // are we fusing left and right by adding k?
+ if int64(rc.iv[left].last)+1 == k64 && int64(rc.iv[right].start) == k64+1 {
+ // fuse into left
+ rc.iv[left].last = rc.iv[right].last
+ // remove redundant right
+ rc.iv = append(rc.iv[:left+1], rc.iv[right+1:]...)
+ return
+ }
+
+ // are we an addition to left?
+ if int64(rc.iv[left].last)+1 == k64 {
+ // yes
+ rc.iv[left].last++
+ return
+ }
+
+ // are we an addition to right?
+ if int64(rc.iv[right].start) == k64+1 {
+ // yes
+ rc.iv[right].start = k
+ return
+ }
+
+ // k makes a standalone new interval32, inserted in the middle
+ tail := append([]interval32{{start: k, last: k}}, rc.iv[right:]...)
+ rc.iv = append(rc.iv[:left+1], tail...)
+ return
+}
+
+//msgp:ignore runIterator
+
+// runIterator32 advice: you must call Next() at least once
+// before calling Cur(); and you should call HasNext()
+// before calling Next() to insure there are contents.
+type runIterator32 struct {
+ rc *runContainer32
+ curIndex int64
+ curPosInIndex uint32
+ curSeq int64
+}
+
+// newRunIterator32 returns a new empty run container.
+func (rc *runContainer32) newRunIterator32() *runIterator32 {
+ return &runIterator32{rc: rc, curIndex: -1}
+}
+
+// HasNext returns false if calling Next will panic. It
+// returns true when there is at least one more value
+// available in the iteration sequence.
+func (ri *runIterator32) hasNext() bool {
+ if len(ri.rc.iv) == 0 {
+ return false
+ }
+ if ri.curIndex == -1 {
+ return true
+ }
+ return ri.curSeq+1 < ri.rc.cardinality()
+}
+
+// cur returns the current value pointed to by the iterator.
+func (ri *runIterator32) cur() uint32 {
+ return ri.rc.iv[ri.curIndex].start + ri.curPosInIndex
+}
+
+// Next returns the next value in the iteration sequence.
+func (ri *runIterator32) next() uint32 {
+ if !ri.hasNext() {
+ panic("no Next available")
+ }
+ if ri.curIndex >= int64(len(ri.rc.iv)) {
+ panic("runIterator.Next() going beyond what is available")
+ }
+ if ri.curIndex == -1 {
+ // first time is special
+ ri.curIndex = 0
+ } else {
+ ri.curPosInIndex++
+ if int64(ri.rc.iv[ri.curIndex].start)+int64(ri.curPosInIndex) == int64(ri.rc.iv[ri.curIndex].last)+1 {
+ ri.curPosInIndex = 0
+ ri.curIndex++
+ }
+ ri.curSeq++
+ }
+ return ri.cur()
+}
+
+// remove removes the element that the iterator
+// is on from the run container. You can use
+// Cur if you want to double check what is about
+// to be deleted.
+func (ri *runIterator32) remove() uint32 {
+ n := ri.rc.cardinality()
+ if n == 0 {
+ panic("runIterator.Remove called on empty runContainer32")
+ }
+ cur := ri.cur()
+
+ ri.rc.deleteAt(&ri.curIndex, &ri.curPosInIndex, &ri.curSeq)
+ return cur
+}
+
+// remove removes key from the container.
+func (rc *runContainer32) removeKey(key uint32) (wasPresent bool) {
+
+ var index int64
+ var curSeq int64
+ index, wasPresent, _ = rc.search(int64(key), nil)
+ if !wasPresent {
+ return // already removed, nothing to do.
+ }
+ pos := key - rc.iv[index].start
+ rc.deleteAt(&index, &pos, &curSeq)
+ return
+}
+
+// internal helper functions
+
+func (rc *runContainer32) deleteAt(curIndex *int64, curPosInIndex *uint32, curSeq *int64) {
+ rc.card--
+ (*curSeq)--
+ ci := *curIndex
+ pos := *curPosInIndex
+
+ // are we first, last, or in the middle of our interval32?
+ switch {
+ case pos == 0:
+ if int64(rc.iv[ci].start) == int64(rc.iv[ci].last) {
+ // our interval disappears
+ rc.iv = append(rc.iv[:ci], rc.iv[ci+1:]...)
+ // curIndex stays the same, since the delete did
+ // the advance for us.
+ *curPosInIndex = 0
+ } else {
+ rc.iv[ci].start++ // no longer overflowable
+ }
+ case int64(pos) == rc.iv[ci].runlen()-1:
+ // last
+ rc.iv[ci].last--
+ // our interval32 cannot disappear, else we would have been pos == 0, case first above.
+ (*curPosInIndex)--
+ // if we leave *curIndex alone, then Next() will work properly even after the delete.
+ default:
+ //middle
+ // split into two, adding an interval32
+ new0 := interval32{
+ start: rc.iv[ci].start,
+ last: rc.iv[ci].start + *curPosInIndex - 1}
+
+ new1start := int64(rc.iv[ci].start) + int64(*curPosInIndex) + 1
+ if new1start > int64(MaxUint32) {
+ panic("overflow?!?!")
+ }
+ new1 := interval32{
+ start: uint32(new1start),
+ last: rc.iv[ci].last}
+ tail := append([]interval32{new0, new1}, rc.iv[ci+1:]...)
+ rc.iv = append(rc.iv[:ci], tail...)
+ // update curIndex and curPosInIndex
+ (*curIndex)++
+ *curPosInIndex = 0
+ }
+
+}
+
+func have4Overlap32(astart, alast, bstart, blast int64) bool {
+ if alast+1 <= bstart {
+ return false
+ }
+ return blast+1 > astart
+}
+
+func intersectWithLeftover32(astart, alast, bstart, blast int64) (isOverlap, isLeftoverA, isLeftoverB bool, leftoverstart int64, intersection interval32) {
+ if !have4Overlap32(astart, alast, bstart, blast) {
+ return
+ }
+ isOverlap = true
+
+ // do the intersection:
+ if bstart > astart {
+ intersection.start = uint32(bstart)
+ } else {
+ intersection.start = uint32(astart)
+ }
+ switch {
+ case blast < alast:
+ isLeftoverA = true
+ leftoverstart = blast + 1
+ intersection.last = uint32(blast)
+ case alast < blast:
+ isLeftoverB = true
+ leftoverstart = alast + 1
+ intersection.last = uint32(alast)
+ default:
+ // alast == blast
+ intersection.last = uint32(alast)
+ }
+
+ return
+}
+
+func (rc *runContainer32) findNextIntervalThatIntersectsStartingFrom(startIndex int64, key int64) (index int64, done bool) {
+
+ rc.myOpts.startIndex = startIndex
+ rc.myOpts.endxIndex = 0
+
+ w, _, _ := rc.search(key, &rc.myOpts)
+ // rc.search always returns w < len(rc.iv)
+ if w < startIndex {
+ // not found and comes before lower bound startIndex,
+ // so just use the lower bound.
+ if startIndex == int64(len(rc.iv)) {
+ // also this bump up means that we are done
+ return startIndex, true
+ }
+ return startIndex, false
+ }
+
+ return w, false
+}
+
+func sliceToString32(m []interval32) string {
+ s := ""
+ for i := range m {
+ s += fmt.Sprintf("%v: %s, ", i, m[i])
+ }
+ return s
+}
+
+// selectInt32 returns the j-th value in the container.
+// We panic of j is out of bounds.
+func (rc *runContainer32) selectInt32(j uint32) int {
+ n := rc.cardinality()
+ if int64(j) > n {
+ panic(fmt.Sprintf("Cannot select %v since Cardinality is %v", j, n))
+ }
+
+ var offset int64
+ for k := range rc.iv {
+ nextOffset := offset + rc.iv[k].runlen() + 1
+ if nextOffset > int64(j) {
+ return int(int64(rc.iv[k].start) + (int64(j) - offset))
+ }
+ offset = nextOffset
+ }
+ panic(fmt.Sprintf("Cannot select %v since Cardinality is %v", j, n))
+}
+
+// helper for invert
+func (rc *runContainer32) invertlastInterval(origin uint32, lastIdx int) []interval32 {
+ cur := rc.iv[lastIdx]
+ if cur.last == MaxUint32 {
+ if cur.start == origin {
+ return nil // empty container
+ }
+ return []interval32{{start: origin, last: cur.start - 1}}
+ }
+ if cur.start == origin {
+ return []interval32{{start: cur.last + 1, last: MaxUint32}}
+ }
+ // invert splits
+ return []interval32{
+ {start: origin, last: cur.start - 1},
+ {start: cur.last + 1, last: MaxUint32},
+ }
+}
+
+// invert returns a new container (not inplace), that is
+// the inversion of rc. For each bit b in rc, the
+// returned value has !b
+func (rc *runContainer32) invert() *runContainer32 {
+ ni := len(rc.iv)
+ var m []interval32
+ switch ni {
+ case 0:
+ return &runContainer32{iv: []interval32{{0, MaxUint32}}}
+ case 1:
+ return &runContainer32{iv: rc.invertlastInterval(0, 0)}
+ }
+ var invstart int64
+ ult := ni - 1
+ for i, cur := range rc.iv {
+ if i == ult {
+ // invertlastInteval will add both intervals (b) and (c) in
+ // diagram below.
+ m = append(m, rc.invertlastInterval(uint32(invstart), i)...)
+ break
+ }
+ // INVAR: i and cur are not the last interval, there is a next at i+1
+ //
+ // ........[cur.start, cur.last] ...... [next.start, next.last]....
+ // ^ ^ ^
+ // (a) (b) (c)
+ //
+ // Now: we add interval (a); but if (a) is empty, for cur.start==0, we skip it.
+ if cur.start > 0 {
+ m = append(m, interval32{start: uint32(invstart), last: cur.start - 1})
+ }
+ invstart = int64(cur.last + 1)
+ }
+ return &runContainer32{iv: m}
+}
+
+func (iv interval32) equal(b interval32) bool {
+ if iv.start == b.start {
+ return iv.last == b.last
+ }
+ return false
+}
+
+func (iv interval32) isSuperSetOf(b interval32) bool {
+ return iv.start <= b.start && b.last <= iv.last
+}
+
+func (iv interval32) subtractInterval(del interval32) (left []interval32, delcount int64) {
+ isect, isEmpty := intersectInterval32s(iv, del)
+
+ if isEmpty {
+ return nil, 0
+ }
+ if del.isSuperSetOf(iv) {
+ return nil, iv.runlen()
+ }
+
+ switch {
+ case isect.start > iv.start && isect.last < iv.last:
+ new0 := interval32{start: iv.start, last: isect.start - 1}
+ new1 := interval32{start: isect.last + 1, last: iv.last}
+ return []interval32{new0, new1}, isect.runlen()
+ case isect.start == iv.start:
+ return []interval32{{start: isect.last + 1, last: iv.last}}, isect.runlen()
+ default:
+ return []interval32{{start: iv.start, last: isect.start - 1}}, isect.runlen()
+ }
+}
+
+func (rc *runContainer32) isubtract(del interval32) {
+ origiv := make([]interval32, len(rc.iv))
+ copy(origiv, rc.iv)
+ n := int64(len(rc.iv))
+ if n == 0 {
+ return // already done.
+ }
+
+ _, isEmpty := intersectInterval32s(
+ interval32{
+ start: rc.iv[0].start,
+ last: rc.iv[n-1].last,
+ }, del)
+ if isEmpty {
+ return // done
+ }
+ // INVAR there is some intersection between rc and del
+ istart, startAlready, _ := rc.search(int64(del.start), nil)
+ ilast, lastAlready, _ := rc.search(int64(del.last), nil)
+ rc.card = -1
+ if istart == -1 {
+ if ilast == n-1 && !lastAlready {
+ rc.iv = nil
+ return
+ }
+ }
+ // some intervals will remain
+ switch {
+ case startAlready && lastAlready:
+ res0, _ := rc.iv[istart].subtractInterval(del)
+
+ // would overwrite values in iv b/c res0 can have len 2. so
+ // write to origiv instead.
+ lost := 1 + ilast - istart
+ changeSize := int64(len(res0)) - lost
+ newSize := int64(len(rc.iv)) + changeSize
+
+ // rc.iv = append(pre, caboose...)
+ // return
+
+ if ilast != istart {
+ res1, _ := rc.iv[ilast].subtractInterval(del)
+ res0 = append(res0, res1...)
+ changeSize = int64(len(res0)) - lost
+ newSize = int64(len(rc.iv)) + changeSize
+ }
+ switch {
+ case changeSize < 0:
+ // shrink
+ copy(rc.iv[istart+int64(len(res0)):], rc.iv[ilast+1:])
+ copy(rc.iv[istart:istart+int64(len(res0))], res0)
+ rc.iv = rc.iv[:newSize]
+ return
+ case changeSize == 0:
+ // stay the same
+ copy(rc.iv[istart:istart+int64(len(res0))], res0)
+ return
+ default:
+ // changeSize > 0 is only possible when ilast == istart.
+ // Hence we now know: changeSize == 1 and len(res0) == 2
+ rc.iv = append(rc.iv, interval32{})
+ // len(rc.iv) is correct now, no need to rc.iv = rc.iv[:newSize]
+
+ // copy the tail into place
+ copy(rc.iv[ilast+2:], rc.iv[ilast+1:])
+ // copy the new item(s) into place
+ copy(rc.iv[istart:istart+2], res0)
+ return
+ }
+
+ case !startAlready && !lastAlready:
+ // we get to discard whole intervals
+
+ // from the search() definition:
+
+ // if del.start is not present, then istart is
+ // set as follows:
+ //
+ // a) istart == n-1 if del.start is beyond our
+ // last interval32 in rc.iv;
+ //
+ // b) istart == -1 if del.start is before our first
+ // interval32 in rc.iv;
+ //
+ // c) istart is set to the minimum index of rc.iv
+ // which comes strictly before the del.start;
+ // so del.start > rc.iv[istart].last,
+ // and if istart+1 exists, then del.start < rc.iv[istart+1].startx
+
+ // if del.last is not present, then ilast is
+ // set as follows:
+ //
+ // a) ilast == n-1 if del.last is beyond our
+ // last interval32 in rc.iv;
+ //
+ // b) ilast == -1 if del.last is before our first
+ // interval32 in rc.iv;
+ //
+ // c) ilast is set to the minimum index of rc.iv
+ // which comes strictly before the del.last;
+ // so del.last > rc.iv[ilast].last,
+ // and if ilast+1 exists, then del.last < rc.iv[ilast+1].start
+
+ // INVAR: istart >= 0
+ pre := rc.iv[:istart+1]
+ if ilast == n-1 {
+ rc.iv = pre
+ return
+ }
+ // INVAR: ilast < n-1
+ lost := ilast - istart
+ changeSize := -lost
+ newSize := int64(len(rc.iv)) + changeSize
+ if changeSize != 0 {
+ copy(rc.iv[ilast+1+changeSize:], rc.iv[ilast+1:])
+ }
+ rc.iv = rc.iv[:newSize]
+ return
+
+ case startAlready && !lastAlready:
+ // we can only shrink or stay the same size
+ // i.e. we either eliminate the whole interval,
+ // or just cut off the right side.
+ res0, _ := rc.iv[istart].subtractInterval(del)
+ if len(res0) > 0 {
+ // len(res) must be 1
+ rc.iv[istart] = res0[0]
+ }
+ lost := 1 + (ilast - istart)
+ changeSize := int64(len(res0)) - lost
+ newSize := int64(len(rc.iv)) + changeSize
+ if changeSize != 0 {
+ copy(rc.iv[ilast+1+changeSize:], rc.iv[ilast+1:])
+ }
+ rc.iv = rc.iv[:newSize]
+ return
+
+ case !startAlready && lastAlready:
+ // we can only shrink or stay the same size
+ res1, _ := rc.iv[ilast].subtractInterval(del)
+ lost := ilast - istart
+ changeSize := int64(len(res1)) - lost
+ newSize := int64(len(rc.iv)) + changeSize
+ if changeSize != 0 {
+ // move the tail first to make room for res1
+ copy(rc.iv[ilast+1+changeSize:], rc.iv[ilast+1:])
+ }
+ copy(rc.iv[istart+1:], res1)
+ rc.iv = rc.iv[:newSize]
+ return
+ }
+}
+
+// compute rc minus b, and return the result as a new value (not inplace).
+// port of run_container_andnot from CRoaring...
+// https://github.com/RoaringBitmap/CRoaring/blob/master/src/containers/run.c#L435-L496
+func (rc *runContainer32) AndNotRunContainer32(b *runContainer32) *runContainer32 {
+
+ if len(b.iv) == 0 || len(rc.iv) == 0 {
+ return rc
+ }
+
+ dst := newRunContainer32()
+ apos := 0
+ bpos := 0
+
+ a := rc
+
+ astart := a.iv[apos].start
+ alast := a.iv[apos].last
+ bstart := b.iv[bpos].start
+ blast := b.iv[bpos].last
+
+ alen := len(a.iv)
+ blen := len(b.iv)
+
+ for apos < alen && bpos < blen {
+ switch {
+ case alast < bstart:
+ // output the first run
+ dst.iv = append(dst.iv, interval32{start: astart, last: alast})
+ apos++
+ if apos < alen {
+ astart = a.iv[apos].start
+ alast = a.iv[apos].last
+ }
+ case blast < astart:
+ // exit the second run
+ bpos++
+ if bpos < blen {
+ bstart = b.iv[bpos].start
+ blast = b.iv[bpos].last
+ }
+ default:
+ // a: [ ]
+ // b: [ ]
+ // alast >= bstart
+ // blast >= astart
+ if astart < bstart {
+ dst.iv = append(dst.iv, interval32{start: astart, last: bstart - 1})
+ }
+ if alast > blast {
+ astart = blast + 1
+ } else {
+ apos++
+ if apos < alen {
+ astart = a.iv[apos].start
+ alast = a.iv[apos].last
+ }
+ }
+ }
+ }
+ if apos < alen {
+ dst.iv = append(dst.iv, interval32{start: astart, last: alast})
+ apos++
+ if apos < alen {
+ dst.iv = append(dst.iv, a.iv[apos:]...)
+ }
+ }
+
+ return dst
+}
+
+func (rc *runContainer32) numberOfRuns() (nr int) {
+ return len(rc.iv)
+}
+
+func (rc *runContainer32) containerType() contype {
+ return run32Contype
+}
+
+func (rc *runContainer32) equals32(srb *runContainer32) bool {
+ //p("both rc32")
+ // Check if the containers are the same object.
+ if rc == srb {
+ //p("same object")
+ return true
+ }
+
+ if len(srb.iv) != len(rc.iv) {
+ //p("iv len differ")
+ return false
+ }
+
+ for i, v := range rc.iv {
+ if v != srb.iv[i] {
+ //p("differ at iv i=%v, srb.iv[i]=%v, rc.iv[i]=%v", i, srb.iv[i], rc.iv[i])
+ return false
+ }
+ }
+ //p("all intervals same, returning true")
+ return true
+}
diff --git a/vendor/github.com/RoaringBitmap/roaring/rle16.go b/vendor/github.com/RoaringBitmap/roaring/rle16.go
new file mode 100644
index 0000000000..951af65f3f
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/rle16.go
@@ -0,0 +1,1747 @@
+package roaring
+
+//
+// Copyright (c) 2016 by the roaring authors.
+// Licensed under the Apache License, Version 2.0.
+//
+// We derive a few lines of code from the sort.Search
+// function in the golang standard library. That function
+// is Copyright 2009 The Go Authors, and licensed
+// under the following BSD-style license.
+/*
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+import (
+ "fmt"
+ "sort"
+ "unsafe"
+)
+
+//go:generate msgp -unexported
+
+// runContainer16 does run-length encoding of sets of
+// uint16 integers.
+type runContainer16 struct {
+ iv []interval16
+ card int64
+
+ // avoid allocation during search
+ myOpts searchOptions `msg:"-"`
+}
+
+// interval16 is the internal to runContainer16
+// structure that maintains the individual [start, last]
+// closed intervals.
+type interval16 struct {
+ start uint16
+ length uint16 // length minus 1
+}
+
+func newInterval16Range(start, last uint16) interval16 {
+ if last < start {
+ panic(fmt.Sprintf("last (%d) cannot be smaller than start (%d)", last, start))
+ }
+
+ return interval16{
+ start,
+ last - start,
+ }
+}
+
+// runlen returns the count of integers in the interval.
+func (iv interval16) runlen() int64 {
+ return int64(iv.length) + 1
+}
+
+func (iv interval16) last() uint16 {
+ return iv.start + iv.length
+}
+
+// String produces a human viewable string of the contents.
+func (iv interval16) String() string {
+ return fmt.Sprintf("[%d, %d]", iv.start, iv.length)
+}
+
+func ivalString16(iv []interval16) string {
+ var s string
+ var j int
+ var p interval16
+ for j, p = range iv {
+ s += fmt.Sprintf("%v:[%d, %d], ", j, p.start, p.last())
+ }
+ return s
+}
+
+// String produces a human viewable string of the contents.
+func (rc *runContainer16) String() string {
+ if len(rc.iv) == 0 {
+ return "runContainer16{}"
+ }
+ is := ivalString16(rc.iv)
+ return `runContainer16{` + is + `}`
+}
+
+// uint16Slice is a sort.Sort convenience method
+type uint16Slice []uint16
+
+// Len returns the length of p.
+func (p uint16Slice) Len() int { return len(p) }
+
+// Less returns p[i] < p[j]
+func (p uint16Slice) Less(i, j int) bool { return p[i] < p[j] }
+
+// Swap swaps elements i and j.
+func (p uint16Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+//msgp:ignore addHelper
+
+// addHelper helps build a runContainer16.
+type addHelper16 struct {
+ runstart uint16
+ runlen uint16
+ actuallyAdded uint16
+ m []interval16
+ rc *runContainer16
+}
+
+func (ah *addHelper16) storeIval(runstart, runlen uint16) {
+ mi := interval16{start: runstart, length: runlen}
+ ah.m = append(ah.m, mi)
+}
+
+func (ah *addHelper16) add(cur, prev uint16, i int) {
+ if cur == prev+1 {
+ ah.runlen++
+ ah.actuallyAdded++
+ } else {
+ if cur < prev {
+ panic(fmt.Sprintf("newRunContainer16FromVals sees "+
+ "unsorted vals; vals[%v]=cur=%v < prev=%v. Sort your vals"+
+ " before calling us with alreadySorted == true.", i, cur, prev))
+ }
+ if cur == prev {
+ // ignore duplicates
+ } else {
+ ah.actuallyAdded++
+ ah.storeIval(ah.runstart, ah.runlen)
+ ah.runstart = cur
+ ah.runlen = 0
+ }
+ }
+}
+
+// newRunContainerRange makes a new container made of just the specified closed interval [rangestart,rangelast]
+func newRunContainer16Range(rangestart uint16, rangelast uint16) *runContainer16 {
+ rc := &runContainer16{}
+ rc.iv = append(rc.iv, newInterval16Range(rangestart, rangelast))
+ return rc
+}
+
+// newRunContainer16FromVals makes a new container from vals.
+//
+// For efficiency, vals should be sorted in ascending order.
+// Ideally vals should not contain duplicates, but we detect and
+// ignore them. If vals is already sorted in ascending order, then
+// pass alreadySorted = true. Otherwise, for !alreadySorted,
+// we will sort vals before creating a runContainer16 of them.
+// We sort the original vals, so this will change what the
+// caller sees in vals as a side effect.
+func newRunContainer16FromVals(alreadySorted bool, vals ...uint16) *runContainer16 {
+ // keep this in sync with newRunContainer16FromArray below
+
+ rc := &runContainer16{}
+ ah := addHelper16{rc: rc}
+
+ if !alreadySorted {
+ sort.Sort(uint16Slice(vals))
+ }
+ n := len(vals)
+ var cur, prev uint16
+ switch {
+ case n == 0:
+ // nothing more
+ case n == 1:
+ ah.m = append(ah.m, newInterval16Range(vals[0], vals[0]))
+ ah.actuallyAdded++
+ default:
+ ah.runstart = vals[0]
+ ah.actuallyAdded++
+ for i := 1; i < n; i++ {
+ prev = vals[i-1]
+ cur = vals[i]
+ ah.add(cur, prev, i)
+ }
+ ah.storeIval(ah.runstart, ah.runlen)
+ }
+ rc.iv = ah.m
+ rc.card = int64(ah.actuallyAdded)
+ return rc
+}
+
+// newRunContainer16FromBitmapContainer makes a new run container from bc,
+// somewhat efficiently. For reference, see the Java
+// https://github.com/RoaringBitmap/RoaringBitmap/blob/master/src/main/java/org/roaringbitmap/RunContainer.java#L145-L192
+func newRunContainer16FromBitmapContainer(bc *bitmapContainer) *runContainer16 {
+
+ rc := &runContainer16{}
+ nbrRuns := bc.numberOfRuns()
+ if nbrRuns == 0 {
+ return rc
+ }
+ rc.iv = make([]interval16, nbrRuns)
+
+ longCtr := 0 // index of current long in bitmap
+ curWord := bc.bitmap[0] // its value
+ runCount := 0
+ for {
+ // potentially multiword advance to first 1 bit
+ for curWord == 0 && longCtr < len(bc.bitmap)-1 {
+ longCtr++
+ curWord = bc.bitmap[longCtr]
+ }
+
+ if curWord == 0 {
+ // wrap up, no more runs
+ return rc
+ }
+ localRunStart := countTrailingZeros(curWord)
+ runStart := localRunStart + 64*longCtr
+ // stuff 1s into number's LSBs
+ curWordWith1s := curWord | (curWord - 1)
+
+ // find the next 0, potentially in a later word
+ runEnd := 0
+ for curWordWith1s == maxWord && longCtr < len(bc.bitmap)-1 {
+ longCtr++
+ curWordWith1s = bc.bitmap[longCtr]
+ }
+
+ if curWordWith1s == maxWord {
+ // a final unterminated run of 1s
+ runEnd = wordSizeInBits + longCtr*64
+ rc.iv[runCount].start = uint16(runStart)
+ rc.iv[runCount].length = uint16(runEnd) - uint16(runStart) - 1
+ return rc
+ }
+ localRunEnd := countTrailingZeros(^curWordWith1s)
+ runEnd = localRunEnd + longCtr*64
+ rc.iv[runCount].start = uint16(runStart)
+ rc.iv[runCount].length = uint16(runEnd) - 1 - uint16(runStart)
+ runCount++
+ // now, zero out everything right of runEnd.
+ curWord = curWordWith1s & (curWordWith1s + 1)
+ // We've lathered and rinsed, so repeat...
+ }
+
+}
+
+//
+// newRunContainer16FromArray populates a new
+// runContainer16 from the contents of arr.
+//
+func newRunContainer16FromArray(arr *arrayContainer) *runContainer16 {
+ // keep this in sync with newRunContainer16FromVals above
+
+ rc := &runContainer16{}
+ ah := addHelper16{rc: rc}
+
+ n := arr.getCardinality()
+ var cur, prev uint16
+ switch {
+ case n == 0:
+ // nothing more
+ case n == 1:
+ ah.m = append(ah.m, newInterval16Range(arr.content[0], arr.content[0]))
+ ah.actuallyAdded++
+ default:
+ ah.runstart = arr.content[0]
+ ah.actuallyAdded++
+ for i := 1; i < n; i++ {
+ prev = arr.content[i-1]
+ cur = arr.content[i]
+ ah.add(cur, prev, i)
+ }
+ ah.storeIval(ah.runstart, ah.runlen)
+ }
+ rc.iv = ah.m
+ rc.card = int64(ah.actuallyAdded)
+ return rc
+}
+
+// set adds the integers in vals to the set. Vals
+// must be sorted in increasing order; if not, you should set
+// alreadySorted to false, and we will sort them in place for you.
+// (Be aware of this side effect -- it will affect the callers
+// view of vals).
+//
+// If you have a small number of additions to an already
+// big runContainer16, calling Add() may be faster.
+func (rc *runContainer16) set(alreadySorted bool, vals ...uint16) {
+
+ rc2 := newRunContainer16FromVals(alreadySorted, vals...)
+ un := rc.union(rc2)
+ rc.iv = un.iv
+ rc.card = 0
+}
+
+// canMerge returns true iff the intervals
+// a and b either overlap or they are
+// contiguous and so can be merged into
+// a single interval.
+func canMerge16(a, b interval16) bool {
+ if int64(a.last())+1 < int64(b.start) {
+ return false
+ }
+ return int64(b.last())+1 >= int64(a.start)
+}
+
+// haveOverlap differs from canMerge in that
+// it tells you if the intersection of a
+// and b would contain an element (otherwise
+// it would be the empty set, and we return
+// false).
+func haveOverlap16(a, b interval16) bool {
+ if int64(a.last())+1 <= int64(b.start) {
+ return false
+ }
+ return int64(b.last())+1 > int64(a.start)
+}
+
+// mergeInterval16s joins a and b into a
+// new interval, and panics if it cannot.
+func mergeInterval16s(a, b interval16) (res interval16) {
+ if !canMerge16(a, b) {
+ panic(fmt.Sprintf("cannot merge %#v and %#v", a, b))
+ }
+
+ if b.start < a.start {
+ res.start = b.start
+ } else {
+ res.start = a.start
+ }
+
+ if b.last() > a.last() {
+ res.length = b.last() - res.start
+ } else {
+ res.length = a.last() - res.start
+ }
+
+ return
+}
+
+// intersectInterval16s returns the intersection
+// of a and b. The isEmpty flag will be true if
+// a and b were disjoint.
+func intersectInterval16s(a, b interval16) (res interval16, isEmpty bool) {
+ if !haveOverlap16(a, b) {
+ isEmpty = true
+ return
+ }
+ if b.start > a.start {
+ res.start = b.start
+ } else {
+ res.start = a.start
+ }
+
+ bEnd := b.last()
+ aEnd := a.last()
+ var resEnd uint16
+
+ if bEnd < aEnd {
+ resEnd = bEnd
+ } else {
+ resEnd = aEnd
+ }
+ res.length = resEnd - res.start
+ return
+}
+
+// union merges two runContainer16s, producing
+// a new runContainer16 with the union of rc and b.
+func (rc *runContainer16) union(b *runContainer16) *runContainer16 {
+
+ // rc is also known as 'a' here, but golint insisted we
+ // call it rc for consistency with the rest of the methods.
+
+ var m []interval16
+
+ alim := int64(len(rc.iv))
+ blim := int64(len(b.iv))
+
+ var na int64 // next from a
+ var nb int64 // next from b
+
+ // merged holds the current merge output, which might
+ // get additional merges before being appended to m.
+ var merged interval16
+ var mergedUsed bool // is merged being used at the moment?
+
+ var cura interval16 // currently considering this interval16 from a
+ var curb interval16 // currently considering this interval16 from b
+
+ pass := 0
+ for na < alim && nb < blim {
+ pass++
+ cura = rc.iv[na]
+ curb = b.iv[nb]
+
+ if mergedUsed {
+ mergedUpdated := false
+ if canMerge16(cura, merged) {
+ merged = mergeInterval16s(cura, merged)
+ na = rc.indexOfIntervalAtOrAfter(int64(merged.last())+1, na+1)
+ mergedUpdated = true
+ }
+ if canMerge16(curb, merged) {
+ merged = mergeInterval16s(curb, merged)
+ nb = b.indexOfIntervalAtOrAfter(int64(merged.last())+1, nb+1)
+ mergedUpdated = true
+ }
+ if !mergedUpdated {
+ // we know that merged is disjoint from cura and curb
+ m = append(m, merged)
+ mergedUsed = false
+ }
+ continue
+
+ } else {
+ // !mergedUsed
+ if !canMerge16(cura, curb) {
+ if cura.start < curb.start {
+ m = append(m, cura)
+ na++
+ } else {
+ m = append(m, curb)
+ nb++
+ }
+ } else {
+ merged = mergeInterval16s(cura, curb)
+ mergedUsed = true
+ na = rc.indexOfIntervalAtOrAfter(int64(merged.last())+1, na+1)
+ nb = b.indexOfIntervalAtOrAfter(int64(merged.last())+1, nb+1)
+ }
+ }
+ }
+ var aDone, bDone bool
+ if na >= alim {
+ aDone = true
+ }
+ if nb >= blim {
+ bDone = true
+ }
+ // finish by merging anything remaining into merged we can:
+ if mergedUsed {
+ if !aDone {
+ aAdds:
+ for na < alim {
+ cura = rc.iv[na]
+ if canMerge16(cura, merged) {
+ merged = mergeInterval16s(cura, merged)
+ na = rc.indexOfIntervalAtOrAfter(int64(merged.last())+1, na+1)
+ } else {
+ break aAdds
+ }
+ }
+
+ }
+
+ if !bDone {
+ bAdds:
+ for nb < blim {
+ curb = b.iv[nb]
+ if canMerge16(curb, merged) {
+ merged = mergeInterval16s(curb, merged)
+ nb = b.indexOfIntervalAtOrAfter(int64(merged.last())+1, nb+1)
+ } else {
+ break bAdds
+ }
+ }
+
+ }
+
+ m = append(m, merged)
+ }
+ if na < alim {
+ m = append(m, rc.iv[na:]...)
+ }
+ if nb < blim {
+ m = append(m, b.iv[nb:]...)
+ }
+
+ res := &runContainer16{iv: m}
+ return res
+}
+
+// unionCardinality returns the cardinality of the merger of two runContainer16s, the union of rc and b.
+func (rc *runContainer16) unionCardinality(b *runContainer16) uint64 {
+
+ // rc is also known as 'a' here, but golint insisted we
+ // call it rc for consistency with the rest of the methods.
+ answer := uint64(0)
+
+ alim := int64(len(rc.iv))
+ blim := int64(len(b.iv))
+
+ var na int64 // next from a
+ var nb int64 // next from b
+
+ // merged holds the current merge output, which might
+ // get additional merges before being appended to m.
+ var merged interval16
+ var mergedUsed bool // is merged being used at the moment?
+
+ var cura interval16 // currently considering this interval16 from a
+ var curb interval16 // currently considering this interval16 from b
+
+ pass := 0
+ for na < alim && nb < blim {
+ pass++
+ cura = rc.iv[na]
+ curb = b.iv[nb]
+
+ if mergedUsed {
+ mergedUpdated := false
+ if canMerge16(cura, merged) {
+ merged = mergeInterval16s(cura, merged)
+ na = rc.indexOfIntervalAtOrAfter(int64(merged.last())+1, na+1)
+ mergedUpdated = true
+ }
+ if canMerge16(curb, merged) {
+ merged = mergeInterval16s(curb, merged)
+ nb = b.indexOfIntervalAtOrAfter(int64(merged.last())+1, nb+1)
+ mergedUpdated = true
+ }
+ if !mergedUpdated {
+ // we know that merged is disjoint from cura and curb
+ //m = append(m, merged)
+ answer += uint64(merged.last()) - uint64(merged.start) + 1
+ mergedUsed = false
+ }
+ continue
+
+ } else {
+ // !mergedUsed
+ if !canMerge16(cura, curb) {
+ if cura.start < curb.start {
+ answer += uint64(cura.last()) - uint64(cura.start) + 1
+ //m = append(m, cura)
+ na++
+ } else {
+ answer += uint64(curb.last()) - uint64(curb.start) + 1
+ //m = append(m, curb)
+ nb++
+ }
+ } else {
+ merged = mergeInterval16s(cura, curb)
+ mergedUsed = true
+ na = rc.indexOfIntervalAtOrAfter(int64(merged.last())+1, na+1)
+ nb = b.indexOfIntervalAtOrAfter(int64(merged.last())+1, nb+1)
+ }
+ }
+ }
+ var aDone, bDone bool
+ if na >= alim {
+ aDone = true
+ }
+ if nb >= blim {
+ bDone = true
+ }
+ // finish by merging anything remaining into merged we can:
+ if mergedUsed {
+ if !aDone {
+ aAdds:
+ for na < alim {
+ cura = rc.iv[na]
+ if canMerge16(cura, merged) {
+ merged = mergeInterval16s(cura, merged)
+ na = rc.indexOfIntervalAtOrAfter(int64(merged.last())+1, na+1)
+ } else {
+ break aAdds
+ }
+ }
+
+ }
+
+ if !bDone {
+ bAdds:
+ for nb < blim {
+ curb = b.iv[nb]
+ if canMerge16(curb, merged) {
+ merged = mergeInterval16s(curb, merged)
+ nb = b.indexOfIntervalAtOrAfter(int64(merged.last())+1, nb+1)
+ } else {
+ break bAdds
+ }
+ }
+
+ }
+
+ //m = append(m, merged)
+ answer += uint64(merged.last()) - uint64(merged.start) + 1
+ }
+ for _, r := range rc.iv[na:] {
+ answer += uint64(r.last()) - uint64(r.start) + 1
+ }
+ for _, r := range b.iv[nb:] {
+ answer += uint64(r.last()) - uint64(r.start) + 1
+ }
+ return answer
+}
+
+// indexOfIntervalAtOrAfter is a helper for union.
+func (rc *runContainer16) indexOfIntervalAtOrAfter(key int64, startIndex int64) int64 {
+ rc.myOpts.startIndex = startIndex
+ rc.myOpts.endxIndex = 0
+
+ w, already, _ := rc.search(key, &rc.myOpts)
+ if already {
+ return w
+ }
+ return w + 1
+}
+
+// intersect returns a new runContainer16 holding the
+// intersection of rc (also known as 'a') and b.
+func (rc *runContainer16) intersect(b *runContainer16) *runContainer16 {
+
+ a := rc
+ numa := int64(len(a.iv))
+ numb := int64(len(b.iv))
+ res := &runContainer16{}
+ if numa == 0 || numb == 0 {
+ return res
+ }
+
+ if numa == 1 && numb == 1 {
+ if !haveOverlap16(a.iv[0], b.iv[0]) {
+ return res
+ }
+ }
+
+ var output []interval16
+
+ var acuri int64
+ var bcuri int64
+
+ astart := int64(a.iv[acuri].start)
+ bstart := int64(b.iv[bcuri].start)
+
+ var intersection interval16
+ var leftoverstart int64
+ var isOverlap, isLeftoverA, isLeftoverB bool
+ var done bool
+toploop:
+ for acuri < numa && bcuri < numb {
+
+ isOverlap, isLeftoverA, isLeftoverB, leftoverstart, intersection =
+ intersectWithLeftover16(astart, int64(a.iv[acuri].last()), bstart, int64(b.iv[bcuri].last()))
+
+ if !isOverlap {
+ switch {
+ case astart < bstart:
+ acuri, done = a.findNextIntervalThatIntersectsStartingFrom(acuri+1, bstart)
+ if done {
+ break toploop
+ }
+ astart = int64(a.iv[acuri].start)
+
+ case astart > bstart:
+ bcuri, done = b.findNextIntervalThatIntersectsStartingFrom(bcuri+1, astart)
+ if done {
+ break toploop
+ }
+ bstart = int64(b.iv[bcuri].start)
+
+ //default:
+ // panic("impossible that astart == bstart, since !isOverlap")
+ }
+
+ } else {
+ // isOverlap
+ output = append(output, intersection)
+ switch {
+ case isLeftoverA:
+ // note that we change astart without advancing acuri,
+ // since we need to capture any 2ndary intersections with a.iv[acuri]
+ astart = leftoverstart
+ bcuri++
+ if bcuri >= numb {
+ break toploop
+ }
+ bstart = int64(b.iv[bcuri].start)
+ case isLeftoverB:
+ // note that we change bstart without advancing bcuri,
+ // since we need to capture any 2ndary intersections with b.iv[bcuri]
+ bstart = leftoverstart
+ acuri++
+ if acuri >= numa {
+ break toploop
+ }
+ astart = int64(a.iv[acuri].start)
+ default:
+ // neither had leftover, both completely consumed
+ // optionally, assert for sanity:
+ //if a.iv[acuri].endx != b.iv[bcuri].endx {
+ // panic("huh? should only be possible that endx agree now!")
+ //}
+
+ // advance to next a interval
+ acuri++
+ if acuri >= numa {
+ break toploop
+ }
+ astart = int64(a.iv[acuri].start)
+
+ // advance to next b interval
+ bcuri++
+ if bcuri >= numb {
+ break toploop
+ }
+ bstart = int64(b.iv[bcuri].start)
+ }
+ }
+ } // end for toploop
+
+ if len(output) == 0 {
+ return res
+ }
+
+ res.iv = output
+ return res
+}
+
+// intersectCardinality returns the cardinality of the
+// intersection of rc (also known as 'a') and b.
+func (rc *runContainer16) intersectCardinality(b *runContainer16) int64 {
+ answer := int64(0)
+
+ a := rc
+ numa := int64(len(a.iv))
+ numb := int64(len(b.iv))
+ if numa == 0 || numb == 0 {
+ return 0
+ }
+
+ if numa == 1 && numb == 1 {
+ if !haveOverlap16(a.iv[0], b.iv[0]) {
+ return 0
+ }
+ }
+
+ var acuri int64
+ var bcuri int64
+
+ astart := int64(a.iv[acuri].start)
+ bstart := int64(b.iv[bcuri].start)
+
+ var intersection interval16
+ var leftoverstart int64
+ var isOverlap, isLeftoverA, isLeftoverB bool
+ var done bool
+ pass := 0
+toploop:
+ for acuri < numa && bcuri < numb {
+ pass++
+
+ isOverlap, isLeftoverA, isLeftoverB, leftoverstart, intersection =
+ intersectWithLeftover16(astart, int64(a.iv[acuri].last()), bstart, int64(b.iv[bcuri].last()))
+
+ if !isOverlap {
+ switch {
+ case astart < bstart:
+ acuri, done = a.findNextIntervalThatIntersectsStartingFrom(acuri+1, bstart)
+ if done {
+ break toploop
+ }
+ astart = int64(a.iv[acuri].start)
+
+ case astart > bstart:
+ bcuri, done = b.findNextIntervalThatIntersectsStartingFrom(bcuri+1, astart)
+ if done {
+ break toploop
+ }
+ bstart = int64(b.iv[bcuri].start)
+
+ //default:
+ // panic("impossible that astart == bstart, since !isOverlap")
+ }
+
+ } else {
+ // isOverlap
+ answer += int64(intersection.last()) - int64(intersection.start) + 1
+ switch {
+ case isLeftoverA:
+ // note that we change astart without advancing acuri,
+ // since we need to capture any 2ndary intersections with a.iv[acuri]
+ astart = leftoverstart
+ bcuri++
+ if bcuri >= numb {
+ break toploop
+ }
+ bstart = int64(b.iv[bcuri].start)
+ case isLeftoverB:
+ // note that we change bstart without advancing bcuri,
+ // since we need to capture any 2ndary intersections with b.iv[bcuri]
+ bstart = leftoverstart
+ acuri++
+ if acuri >= numa {
+ break toploop
+ }
+ astart = int64(a.iv[acuri].start)
+ default:
+ // neither had leftover, both completely consumed
+ // optionally, assert for sanity:
+ //if a.iv[acuri].endx != b.iv[bcuri].endx {
+ // panic("huh? should only be possible that endx agree now!")
+ //}
+
+ // advance to next a interval
+ acuri++
+ if acuri >= numa {
+ break toploop
+ }
+ astart = int64(a.iv[acuri].start)
+
+ // advance to next b interval
+ bcuri++
+ if bcuri >= numb {
+ break toploop
+ }
+ bstart = int64(b.iv[bcuri].start)
+ }
+ }
+ } // end for toploop
+
+ return answer
+}
+
+// get returns true iff key is in the container.
+func (rc *runContainer16) contains(key uint16) bool {
+ _, in, _ := rc.search(int64(key), nil)
+ return in
+}
+
+// numIntervals returns the count of intervals in the container.
+func (rc *runContainer16) numIntervals() int {
+ return len(rc.iv)
+}
+
+// search returns alreadyPresent to indicate if the
+// key is already in one of our interval16s.
+//
+// If key is alreadyPresent, then whichInterval16 tells
+// you where.
+//
+// If key is not already present, then whichInterval16 is
+// set as follows:
+//
+// a) whichInterval16 == len(rc.iv)-1 if key is beyond our
+// last interval16 in rc.iv;
+//
+// b) whichInterval16 == -1 if key is before our first
+// interval16 in rc.iv;
+//
+// c) whichInterval16 is set to the minimum index of rc.iv
+// which comes strictly before the key;
+// so rc.iv[whichInterval16].last < key,
+// and if whichInterval16+1 exists, then key < rc.iv[whichInterval16+1].start
+// (Note that whichInterval16+1 won't exist when
+// whichInterval16 is the last interval.)
+//
+// runContainer16.search always returns whichInterval16 < len(rc.iv).
+//
+// If not nil, opts can be used to further restrict
+// the search space.
+//
+func (rc *runContainer16) search(key int64, opts *searchOptions) (whichInterval16 int64, alreadyPresent bool, numCompares int) {
+ n := int64(len(rc.iv))
+ if n == 0 {
+ return -1, false, 0
+ }
+
+ startIndex := int64(0)
+ endxIndex := n
+ if opts != nil {
+ startIndex = opts.startIndex
+
+ // let endxIndex == 0 mean no effect
+ if opts.endxIndex > 0 {
+ endxIndex = opts.endxIndex
+ }
+ }
+
+ // sort.Search returns the smallest index i
+ // in [0, n) at which f(i) is true, assuming that on the range [0, n),
+ // f(i) == true implies f(i+1) == true.
+ // If there is no such index, Search returns n.
+
+ // For correctness, this began as verbatim snippet from
+ // sort.Search in the Go standard lib.
+ // We inline our comparison function for speed, and
+ // annotate with numCompares
+ // to observe and test that extra bounds are utilized.
+ i, j := startIndex, endxIndex
+ for i < j {
+ h := i + (j-i)/2 // avoid overflow when computing h as the bisector
+ // i <= h < j
+ numCompares++
+ if !(key < int64(rc.iv[h].start)) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ below := i
+ // end std lib snippet.
+
+ // The above is a simple in-lining and annotation of:
+ /* below := sort.Search(n,
+ func(i int) bool {
+ return key < rc.iv[i].start
+ })
+ */
+ whichInterval16 = below - 1
+
+ if below == n {
+ // all falses => key is >= start of all interval16s
+ // ... so does it belong to the last interval16?
+ if key < int64(rc.iv[n-1].last())+1 {
+ // yes, it belongs to the last interval16
+ alreadyPresent = true
+ return
+ }
+ // no, it is beyond the last interval16.
+ // leave alreadyPreset = false
+ return
+ }
+
+ // INVAR: key is below rc.iv[below]
+ if below == 0 {
+ // key is before the first first interval16.
+ // leave alreadyPresent = false
+ return
+ }
+
+ // INVAR: key is >= rc.iv[below-1].start and
+ // key is < rc.iv[below].start
+
+ // is key in below-1 interval16?
+ if key >= int64(rc.iv[below-1].start) && key < int64(rc.iv[below-1].last())+1 {
+ // yes, it is. key is in below-1 interval16.
+ alreadyPresent = true
+ return
+ }
+
+ // INVAR: key >= rc.iv[below-1].endx && key < rc.iv[below].start
+ // leave alreadyPresent = false
+ return
+}
+
+// cardinality returns the count of the integers stored in the
+// runContainer16.
+func (rc *runContainer16) cardinality() int64 {
+ if len(rc.iv) == 0 {
+ rc.card = 0
+ return 0
+ }
+ if rc.card > 0 {
+ return rc.card // already cached
+ }
+ // have to compute it
+ var n int64
+ for _, p := range rc.iv {
+ n += p.runlen()
+ }
+ rc.card = n // cache it
+ return n
+}
+
+// AsSlice decompresses the contents into a []uint16 slice.
+func (rc *runContainer16) AsSlice() []uint16 {
+ s := make([]uint16, rc.cardinality())
+ j := 0
+ for _, p := range rc.iv {
+ for i := p.start; i <= p.last(); i++ {
+ s[j] = i
+ j++
+ }
+ }
+ return s
+}
+
+// newRunContainer16 creates an empty run container.
+func newRunContainer16() *runContainer16 {
+ return &runContainer16{}
+}
+
+// newRunContainer16CopyIv creates a run container, initializing
+// with a copy of the supplied iv slice.
+//
+func newRunContainer16CopyIv(iv []interval16) *runContainer16 {
+ rc := &runContainer16{
+ iv: make([]interval16, len(iv)),
+ }
+ copy(rc.iv, iv)
+ return rc
+}
+
+func (rc *runContainer16) Clone() *runContainer16 {
+ rc2 := newRunContainer16CopyIv(rc.iv)
+ return rc2
+}
+
+// newRunContainer16TakeOwnership returns a new runContainer16
+// backed by the provided iv slice, which we will
+// assume exclusive control over from now on.
+//
+func newRunContainer16TakeOwnership(iv []interval16) *runContainer16 {
+ rc := &runContainer16{
+ iv: iv,
+ }
+ return rc
+}
+
+const baseRc16Size = int(unsafe.Sizeof(runContainer16{}))
+const perIntervalRc16Size = int(unsafe.Sizeof(interval16{}))
+
+const baseDiskRc16Size = int(unsafe.Sizeof(uint16(0)))
+
+// see also runContainer16SerializedSizeInBytes(numRuns int) int
+
+// getSizeInBytes returns the number of bytes of memory
+// required by this runContainer16.
+func (rc *runContainer16) getSizeInBytes() int {
+ return perIntervalRc16Size*len(rc.iv) + baseRc16Size
+}
+
+// runContainer16SerializedSizeInBytes returns the number of bytes of disk
+// required to hold numRuns in a runContainer16.
+func runContainer16SerializedSizeInBytes(numRuns int) int {
+ return perIntervalRc16Size*numRuns + baseDiskRc16Size
+}
+
+// Add adds a single value k to the set.
+func (rc *runContainer16) Add(k uint16) (wasNew bool) {
+ // TODO comment from runContainer16.java:
+ // it might be better and simpler to do return
+ // toBitmapOrArrayContainer(getCardinality()).add(k)
+ // but note that some unit tests use this method to build up test
+ // runcontainers without calling runOptimize
+
+ k64 := int64(k)
+
+ index, present, _ := rc.search(k64, nil)
+ if present {
+ return // already there
+ }
+ wasNew = true
+
+ // increment card if it is cached already
+ if rc.card > 0 {
+ rc.card++
+ }
+ n := int64(len(rc.iv))
+ if index == -1 {
+ // we may need to extend the first run
+ if n > 0 {
+ if rc.iv[0].start == k+1 {
+ rc.iv[0].start = k
+ rc.iv[0].length++
+ return
+ }
+ }
+ // nope, k stands alone, starting the new first interval16.
+ rc.iv = append([]interval16{newInterval16Range(k, k)}, rc.iv...)
+ return
+ }
+
+ // are we off the end? handle both index == n and index == n-1:
+ if index >= n-1 {
+ if int64(rc.iv[n-1].last())+1 == k64 {
+ rc.iv[n-1].length++
+ return
+ }
+ rc.iv = append(rc.iv, newInterval16Range(k, k))
+ return
+ }
+
+ // INVAR: index and index+1 both exist, and k goes between them.
+ //
+ // Now: add k into the middle,
+ // possibly fusing with index or index+1 interval16
+ // and possibly resulting in fusing of two interval16s
+ // that had a one integer gap.
+
+ left := index
+ right := index + 1
+
+ // are we fusing left and right by adding k?
+ if int64(rc.iv[left].last())+1 == k64 && int64(rc.iv[right].start) == k64+1 {
+ // fuse into left
+ rc.iv[left].length = rc.iv[right].last() - rc.iv[left].start
+ // remove redundant right
+ rc.iv = append(rc.iv[:left+1], rc.iv[right+1:]...)
+ return
+ }
+
+ // are we an addition to left?
+ if int64(rc.iv[left].last())+1 == k64 {
+ // yes
+ rc.iv[left].length++
+ return
+ }
+
+ // are we an addition to right?
+ if int64(rc.iv[right].start) == k64+1 {
+ // yes
+ rc.iv[right].start = k
+ rc.iv[right].length++
+ return
+ }
+
+ // k makes a standalone new interval16, inserted in the middle
+ tail := append([]interval16{newInterval16Range(k, k)}, rc.iv[right:]...)
+ rc.iv = append(rc.iv[:left+1], tail...)
+ return
+}
+
+//msgp:ignore runIterator
+
+// runIterator16 advice: you must call Next() at least once
+// before calling Cur(); and you should call HasNext()
+// before calling Next() to insure there are contents.
+type runIterator16 struct {
+ rc *runContainer16
+ curIndex int64
+ curPosInIndex uint16
+ curSeq int64
+}
+
+// newRunIterator16 returns a new empty run container.
+func (rc *runContainer16) newRunIterator16() *runIterator16 {
+ return &runIterator16{rc: rc, curIndex: -1}
+}
+
+// HasNext returns false if calling Next will panic. It
+// returns true when there is at least one more value
+// available in the iteration sequence.
+func (ri *runIterator16) hasNext() bool {
+ if len(ri.rc.iv) == 0 {
+ return false
+ }
+ if ri.curIndex == -1 {
+ return true
+ }
+ return ri.curSeq+1 < ri.rc.cardinality()
+}
+
+// cur returns the current value pointed to by the iterator.
+func (ri *runIterator16) cur() uint16 {
+ return ri.rc.iv[ri.curIndex].start + ri.curPosInIndex
+}
+
+// Next returns the next value in the iteration sequence.
+func (ri *runIterator16) next() uint16 {
+ if !ri.hasNext() {
+ panic("no Next available")
+ }
+ if ri.curIndex >= int64(len(ri.rc.iv)) {
+ panic("runIterator.Next() going beyond what is available")
+ }
+ if ri.curIndex == -1 {
+ // first time is special
+ ri.curIndex = 0
+ } else {
+ ri.curPosInIndex++
+ if int64(ri.rc.iv[ri.curIndex].start)+int64(ri.curPosInIndex) == int64(ri.rc.iv[ri.curIndex].last())+1 {
+ ri.curPosInIndex = 0
+ ri.curIndex++
+ }
+ ri.curSeq++
+ }
+ return ri.cur()
+}
+
+// remove removes the element that the iterator
+// is on from the run container. You can use
+// Cur if you want to double check what is about
+// to be deleted.
+func (ri *runIterator16) remove() uint16 {
+ n := ri.rc.cardinality()
+ if n == 0 {
+ panic("runIterator.Remove called on empty runContainer16")
+ }
+ cur := ri.cur()
+
+ ri.rc.deleteAt(&ri.curIndex, &ri.curPosInIndex, &ri.curSeq)
+ return cur
+}
+
+type manyRunIterator16 struct {
+ rc *runContainer16
+ curIndex int64
+ curPosInIndex uint16
+ curSeq int64
+}
+
+func (rc *runContainer16) newManyRunIterator16() *manyRunIterator16 {
+ return &manyRunIterator16{rc: rc, curIndex: -1}
+}
+
+func (ri *manyRunIterator16) hasNext() bool {
+ if len(ri.rc.iv) == 0 {
+ return false
+ }
+ if ri.curIndex == -1 {
+ return true
+ }
+ return ri.curSeq+1 < ri.rc.cardinality()
+}
+
+// hs are the high bits to include to avoid needing to reiterate over the buffer in NextMany
+func (ri *manyRunIterator16) nextMany(hs uint32, buf []uint32) int {
+ n := 0
+ if !ri.hasNext() {
+ return n
+ }
+ // start and end are inclusive
+ for n < len(buf) {
+ if ri.curIndex == -1 || int(ri.rc.iv[ri.curIndex].length-ri.curPosInIndex) <= 0 {
+ ri.curPosInIndex = 0
+ ri.curIndex++
+ if ri.curIndex == int64(len(ri.rc.iv)) {
+ break
+ }
+ buf[n] = uint32(ri.rc.iv[ri.curIndex].start) | hs
+ if ri.curIndex != 0 {
+ ri.curSeq += 1
+ }
+ n += 1
+ // not strictly necessarily due to len(buf)-n min check, but saves some work
+ continue
+ }
+ // add as many as you can from this seq
+ moreVals := minOfInt(int(ri.rc.iv[ri.curIndex].length-ri.curPosInIndex), len(buf)-n)
+
+ base := uint32(ri.rc.iv[ri.curIndex].start+ri.curPosInIndex+1) | hs
+
+ // allows BCE
+ buf2 := buf[n : n+moreVals]
+ for i := range buf2 {
+ buf2[i] = base + uint32(i)
+ }
+
+ // update values
+ ri.curPosInIndex += uint16(moreVals) //moreVals always fits in uint16
+ ri.curSeq += int64(moreVals)
+ n += moreVals
+ }
+ return n
+}
+
+// remove removes key from the container.
+func (rc *runContainer16) removeKey(key uint16) (wasPresent bool) {
+
+ var index int64
+ var curSeq int64
+ index, wasPresent, _ = rc.search(int64(key), nil)
+ if !wasPresent {
+ return // already removed, nothing to do.
+ }
+ pos := key - rc.iv[index].start
+ rc.deleteAt(&index, &pos, &curSeq)
+ return
+}
+
+// internal helper functions
+
+func (rc *runContainer16) deleteAt(curIndex *int64, curPosInIndex *uint16, curSeq *int64) {
+ rc.card--
+ *curSeq--
+ ci := *curIndex
+ pos := *curPosInIndex
+
+ // are we first, last, or in the middle of our interval16?
+ switch {
+ case pos == 0:
+ if int64(rc.iv[ci].length) == 0 {
+ // our interval disappears
+ rc.iv = append(rc.iv[:ci], rc.iv[ci+1:]...)
+ // curIndex stays the same, since the delete did
+ // the advance for us.
+ *curPosInIndex = 0
+ } else {
+ rc.iv[ci].start++ // no longer overflowable
+ rc.iv[ci].length--
+ }
+ case pos == rc.iv[ci].length:
+ // length
+ rc.iv[ci].length--
+ // our interval16 cannot disappear, else we would have been pos == 0, case first above.
+ *curPosInIndex--
+ // if we leave *curIndex alone, then Next() will work properly even after the delete.
+ default:
+ //middle
+ // split into two, adding an interval16
+ new0 := newInterval16Range(rc.iv[ci].start, rc.iv[ci].start+*curPosInIndex-1)
+
+ new1start := int64(rc.iv[ci].start+*curPosInIndex) + 1
+ if new1start > int64(MaxUint16) {
+ panic("overflow?!?!")
+ }
+ new1 := newInterval16Range(uint16(new1start), rc.iv[ci].last())
+ tail := append([]interval16{new0, new1}, rc.iv[ci+1:]...)
+ rc.iv = append(rc.iv[:ci], tail...)
+ // update curIndex and curPosInIndex
+ *curIndex++
+ *curPosInIndex = 0
+ }
+
+}
+
+func have4Overlap16(astart, alast, bstart, blast int64) bool {
+ if alast+1 <= bstart {
+ return false
+ }
+ return blast+1 > astart
+}
+
+func intersectWithLeftover16(astart, alast, bstart, blast int64) (isOverlap, isLeftoverA, isLeftoverB bool, leftoverstart int64, intersection interval16) {
+ if !have4Overlap16(astart, alast, bstart, blast) {
+ return
+ }
+ isOverlap = true
+
+ // do the intersection:
+ if bstart > astart {
+ intersection.start = uint16(bstart)
+ } else {
+ intersection.start = uint16(astart)
+ }
+
+ switch {
+ case blast < alast:
+ isLeftoverA = true
+ leftoverstart = blast + 1
+ intersection.length = uint16(blast) - intersection.start
+ case alast < blast:
+ isLeftoverB = true
+ leftoverstart = alast + 1
+ intersection.length = uint16(alast) - intersection.start
+ default:
+ // alast == blast
+ intersection.length = uint16(alast) - intersection.start
+ }
+
+ return
+}
+
+func (rc *runContainer16) findNextIntervalThatIntersectsStartingFrom(startIndex int64, key int64) (index int64, done bool) {
+
+ rc.myOpts.startIndex = startIndex
+ rc.myOpts.endxIndex = 0
+
+ w, _, _ := rc.search(key, &rc.myOpts)
+ // rc.search always returns w < len(rc.iv)
+ if w < startIndex {
+ // not found and comes before lower bound startIndex,
+ // so just use the lower bound.
+ if startIndex == int64(len(rc.iv)) {
+ // also this bump up means that we are done
+ return startIndex, true
+ }
+ return startIndex, false
+ }
+
+ return w, false
+}
+
+func sliceToString16(m []interval16) string {
+ s := ""
+ for i := range m {
+ s += fmt.Sprintf("%v: %s, ", i, m[i])
+ }
+ return s
+}
+
+// selectInt16 returns the j-th value in the container.
+// We panic of j is out of bounds.
+func (rc *runContainer16) selectInt16(j uint16) int {
+ n := rc.cardinality()
+ if int64(j) > n {
+ panic(fmt.Sprintf("Cannot select %v since Cardinality is %v", j, n))
+ }
+
+ var offset int64
+ for k := range rc.iv {
+ nextOffset := offset + rc.iv[k].runlen() + 1
+ if nextOffset > int64(j) {
+ return int(int64(rc.iv[k].start) + (int64(j) - offset))
+ }
+ offset = nextOffset
+ }
+ panic(fmt.Sprintf("Cannot select %v since Cardinality is %v", j, n))
+}
+
+// helper for invert
+func (rc *runContainer16) invertlastInterval(origin uint16, lastIdx int) []interval16 {
+ cur := rc.iv[lastIdx]
+ if cur.last() == MaxUint16 {
+ if cur.start == origin {
+ return nil // empty container
+ }
+ return []interval16{newInterval16Range(origin, cur.start-1)}
+ }
+ if cur.start == origin {
+ return []interval16{newInterval16Range(cur.last()+1, MaxUint16)}
+ }
+ // invert splits
+ return []interval16{
+ newInterval16Range(origin, cur.start-1),
+ newInterval16Range(cur.last()+1, MaxUint16),
+ }
+}
+
+// invert returns a new container (not inplace), that is
+// the inversion of rc. For each bit b in rc, the
+// returned value has !b
+func (rc *runContainer16) invert() *runContainer16 {
+ ni := len(rc.iv)
+ var m []interval16
+ switch ni {
+ case 0:
+ return &runContainer16{iv: []interval16{newInterval16Range(0, MaxUint16)}}
+ case 1:
+ return &runContainer16{iv: rc.invertlastInterval(0, 0)}
+ }
+ var invstart int64
+ ult := ni - 1
+ for i, cur := range rc.iv {
+ if i == ult {
+ // invertlastInteval will add both intervals (b) and (c) in
+ // diagram below.
+ m = append(m, rc.invertlastInterval(uint16(invstart), i)...)
+ break
+ }
+ // INVAR: i and cur are not the last interval, there is a next at i+1
+ //
+ // ........[cur.start, cur.last] ...... [next.start, next.last]....
+ // ^ ^ ^
+ // (a) (b) (c)
+ //
+ // Now: we add interval (a); but if (a) is empty, for cur.start==0, we skip it.
+ if cur.start > 0 {
+ m = append(m, newInterval16Range(uint16(invstart), cur.start-1))
+ }
+ invstart = int64(cur.last() + 1)
+ }
+ return &runContainer16{iv: m}
+}
+
+func (iv interval16) equal(b interval16) bool {
+ return iv.start == b.start && iv.length == b.length
+}
+
+func (iv interval16) isSuperSetOf(b interval16) bool {
+ return iv.start <= b.start && b.last() <= iv.last()
+}
+
+func (iv interval16) subtractInterval(del interval16) (left []interval16, delcount int64) {
+ isect, isEmpty := intersectInterval16s(iv, del)
+
+ if isEmpty {
+ return nil, 0
+ }
+ if del.isSuperSetOf(iv) {
+ return nil, iv.runlen()
+ }
+
+ switch {
+ case isect.start > iv.start && isect.last() < iv.last():
+ new0 := newInterval16Range(iv.start, isect.start-1)
+ new1 := newInterval16Range(isect.last()+1, iv.last())
+ return []interval16{new0, new1}, isect.runlen()
+ case isect.start == iv.start:
+ return []interval16{newInterval16Range(isect.last()+1, iv.last())}, isect.runlen()
+ default:
+ return []interval16{newInterval16Range(iv.start, isect.start-1)}, isect.runlen()
+ }
+}
+
+func (rc *runContainer16) isubtract(del interval16) {
+ origiv := make([]interval16, len(rc.iv))
+ copy(origiv, rc.iv)
+ n := int64(len(rc.iv))
+ if n == 0 {
+ return // already done.
+ }
+
+ _, isEmpty := intersectInterval16s(newInterval16Range(rc.iv[0].start, rc.iv[n-1].last()), del)
+ if isEmpty {
+ return // done
+ }
+
+ // INVAR there is some intersection between rc and del
+ istart, startAlready, _ := rc.search(int64(del.start), nil)
+ ilast, lastAlready, _ := rc.search(int64(del.last()), nil)
+ rc.card = -1
+ if istart == -1 {
+ if ilast == n-1 && !lastAlready {
+ rc.iv = nil
+ return
+ }
+ }
+ // some intervals will remain
+ switch {
+ case startAlready && lastAlready:
+ res0, _ := rc.iv[istart].subtractInterval(del)
+
+ // would overwrite values in iv b/c res0 can have len 2. so
+ // write to origiv instead.
+ lost := 1 + ilast - istart
+ changeSize := int64(len(res0)) - lost
+ newSize := int64(len(rc.iv)) + changeSize
+
+ // rc.iv = append(pre, caboose...)
+ // return
+
+ if ilast != istart {
+ res1, _ := rc.iv[ilast].subtractInterval(del)
+ res0 = append(res0, res1...)
+ changeSize = int64(len(res0)) - lost
+ newSize = int64(len(rc.iv)) + changeSize
+ }
+ switch {
+ case changeSize < 0:
+ // shrink
+ copy(rc.iv[istart+int64(len(res0)):], rc.iv[ilast+1:])
+ copy(rc.iv[istart:istart+int64(len(res0))], res0)
+ rc.iv = rc.iv[:newSize]
+ return
+ case changeSize == 0:
+ // stay the same
+ copy(rc.iv[istart:istart+int64(len(res0))], res0)
+ return
+ default:
+ // changeSize > 0 is only possible when ilast == istart.
+ // Hence we now know: changeSize == 1 and len(res0) == 2
+ rc.iv = append(rc.iv, interval16{})
+ // len(rc.iv) is correct now, no need to rc.iv = rc.iv[:newSize]
+
+ // copy the tail into place
+ copy(rc.iv[ilast+2:], rc.iv[ilast+1:])
+ // copy the new item(s) into place
+ copy(rc.iv[istart:istart+2], res0)
+ return
+ }
+
+ case !startAlready && !lastAlready:
+ // we get to discard whole intervals
+
+ // from the search() definition:
+
+ // if del.start is not present, then istart is
+ // set as follows:
+ //
+ // a) istart == n-1 if del.start is beyond our
+ // last interval16 in rc.iv;
+ //
+ // b) istart == -1 if del.start is before our first
+ // interval16 in rc.iv;
+ //
+ // c) istart is set to the minimum index of rc.iv
+ // which comes strictly before the del.start;
+ // so del.start > rc.iv[istart].last,
+ // and if istart+1 exists, then del.start < rc.iv[istart+1].startx
+
+ // if del.last is not present, then ilast is
+ // set as follows:
+ //
+ // a) ilast == n-1 if del.last is beyond our
+ // last interval16 in rc.iv;
+ //
+ // b) ilast == -1 if del.last is before our first
+ // interval16 in rc.iv;
+ //
+ // c) ilast is set to the minimum index of rc.iv
+ // which comes strictly before the del.last;
+ // so del.last > rc.iv[ilast].last,
+ // and if ilast+1 exists, then del.last < rc.iv[ilast+1].start
+
+ // INVAR: istart >= 0
+ pre := rc.iv[:istart+1]
+ if ilast == n-1 {
+ rc.iv = pre
+ return
+ }
+ // INVAR: ilast < n-1
+ lost := ilast - istart
+ changeSize := -lost
+ newSize := int64(len(rc.iv)) + changeSize
+ if changeSize != 0 {
+ copy(rc.iv[ilast+1+changeSize:], rc.iv[ilast+1:])
+ }
+ rc.iv = rc.iv[:newSize]
+ return
+
+ case startAlready && !lastAlready:
+ // we can only shrink or stay the same size
+ // i.e. we either eliminate the whole interval,
+ // or just cut off the right side.
+ res0, _ := rc.iv[istart].subtractInterval(del)
+ if len(res0) > 0 {
+ // len(res) must be 1
+ rc.iv[istart] = res0[0]
+ }
+ lost := 1 + (ilast - istart)
+ changeSize := int64(len(res0)) - lost
+ newSize := int64(len(rc.iv)) + changeSize
+ if changeSize != 0 {
+ copy(rc.iv[ilast+1+changeSize:], rc.iv[ilast+1:])
+ }
+ rc.iv = rc.iv[:newSize]
+ return
+
+ case !startAlready && lastAlready:
+ // we can only shrink or stay the same size
+ res1, _ := rc.iv[ilast].subtractInterval(del)
+ lost := ilast - istart
+ changeSize := int64(len(res1)) - lost
+ newSize := int64(len(rc.iv)) + changeSize
+ if changeSize != 0 {
+ // move the tail first to make room for res1
+ copy(rc.iv[ilast+1+changeSize:], rc.iv[ilast+1:])
+ }
+ copy(rc.iv[istart+1:], res1)
+ rc.iv = rc.iv[:newSize]
+ return
+ }
+}
+
+// compute rc minus b, and return the result as a new value (not inplace).
+// port of run_container_andnot from CRoaring...
+// https://github.com/RoaringBitmap/CRoaring/blob/master/src/containers/run.c#L435-L496
+func (rc *runContainer16) AndNotRunContainer16(b *runContainer16) *runContainer16 {
+
+ if len(b.iv) == 0 || len(rc.iv) == 0 {
+ return rc
+ }
+
+ dst := newRunContainer16()
+ apos := 0
+ bpos := 0
+
+ a := rc
+
+ astart := a.iv[apos].start
+ alast := a.iv[apos].last()
+ bstart := b.iv[bpos].start
+ blast := b.iv[bpos].last()
+
+ alen := len(a.iv)
+ blen := len(b.iv)
+
+ for apos < alen && bpos < blen {
+ switch {
+ case alast < bstart:
+ // output the first run
+ dst.iv = append(dst.iv, newInterval16Range(astart, alast))
+ apos++
+ if apos < alen {
+ astart = a.iv[apos].start
+ alast = a.iv[apos].last()
+ }
+ case blast < astart:
+ // exit the second run
+ bpos++
+ if bpos < blen {
+ bstart = b.iv[bpos].start
+ blast = b.iv[bpos].last()
+ }
+ default:
+ // a: [ ]
+ // b: [ ]
+ // alast >= bstart
+ // blast >= astart
+ if astart < bstart {
+ dst.iv = append(dst.iv, newInterval16Range(astart, bstart-1))
+ }
+ if alast > blast {
+ astart = blast + 1
+ } else {
+ apos++
+ if apos < alen {
+ astart = a.iv[apos].start
+ alast = a.iv[apos].last()
+ }
+ }
+ }
+ }
+ if apos < alen {
+ dst.iv = append(dst.iv, newInterval16Range(astart, alast))
+ apos++
+ if apos < alen {
+ dst.iv = append(dst.iv, a.iv[apos:]...)
+ }
+ }
+
+ return dst
+}
+
+func (rc *runContainer16) numberOfRuns() (nr int) {
+ return len(rc.iv)
+}
+
+func (rc *runContainer16) containerType() contype {
+ return run16Contype
+}
+
+func (rc *runContainer16) equals16(srb *runContainer16) bool {
+ //p("both rc16")
+ // Check if the containers are the same object.
+ if rc == srb {
+ //p("same object")
+ return true
+ }
+
+ if len(srb.iv) != len(rc.iv) {
+ //p("iv len differ")
+ return false
+ }
+
+ for i, v := range rc.iv {
+ if v != srb.iv[i] {
+ //p("differ at iv i=%v, srb.iv[i]=%v, rc.iv[i]=%v", i, srb.iv[i], rc.iv[i])
+ return false
+ }
+ }
+ //p("all intervals same, returning true")
+ return true
+}
diff --git a/vendor/github.com/RoaringBitmap/roaring/rle16_gen.go b/vendor/github.com/RoaringBitmap/roaring/rle16_gen.go
new file mode 100644
index 0000000000..05bf4463f1
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/rle16_gen.go
@@ -0,0 +1,1126 @@
+package roaring
+
+// NOTE: THIS FILE WAS PRODUCED BY THE
+// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp)
+// DO NOT EDIT
+
+import "github.com/tinylib/msgp/msgp"
+
+// DecodeMsg implements msgp.Decodable
+func (z *addHelper16) DecodeMsg(dc *msgp.Reader) (err error) {
+ var field []byte
+ _ = field
+ var zbai uint32
+ zbai, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zbai > 0 {
+ zbai--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "runstart":
+ z.runstart, err = dc.ReadUint16()
+ if err != nil {
+ return
+ }
+ case "runlen":
+ z.runlen, err = dc.ReadUint16()
+ if err != nil {
+ return
+ }
+ case "actuallyAdded":
+ z.actuallyAdded, err = dc.ReadUint16()
+ if err != nil {
+ return
+ }
+ case "m":
+ var zcmr uint32
+ zcmr, err = dc.ReadArrayHeader()
+ if err != nil {
+ return
+ }
+ if cap(z.m) >= int(zcmr) {
+ z.m = (z.m)[:zcmr]
+ } else {
+ z.m = make([]interval16, zcmr)
+ }
+ for zxvk := range z.m {
+ var zajw uint32
+ zajw, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zajw > 0 {
+ zajw--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "start":
+ z.m[zxvk].start, err = dc.ReadUint16()
+ if err != nil {
+ return
+ }
+ case "last":
+ z.m[zxvk].length, err = dc.ReadUint16()
+ z.m[zxvk].length -= z.m[zxvk].start
+ if err != nil {
+ return
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ }
+ case "rc":
+ if dc.IsNil() {
+ err = dc.ReadNil()
+ if err != nil {
+ return
+ }
+ z.rc = nil
+ } else {
+ if z.rc == nil {
+ z.rc = new(runContainer16)
+ }
+ var zwht uint32
+ zwht, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zwht > 0 {
+ zwht--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "iv":
+ var zhct uint32
+ zhct, err = dc.ReadArrayHeader()
+ if err != nil {
+ return
+ }
+ if cap(z.rc.iv) >= int(zhct) {
+ z.rc.iv = (z.rc.iv)[:zhct]
+ } else {
+ z.rc.iv = make([]interval16, zhct)
+ }
+ for zbzg := range z.rc.iv {
+ var zcua uint32
+ zcua, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zcua > 0 {
+ zcua--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "start":
+ z.rc.iv[zbzg].start, err = dc.ReadUint16()
+ if err != nil {
+ return
+ }
+ case "last":
+ z.rc.iv[zbzg].length, err = dc.ReadUint16()
+ z.rc.iv[zbzg].length -= z.rc.iv[zbzg].start
+ if err != nil {
+ return
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ }
+ case "card":
+ z.rc.card, err = dc.ReadInt64()
+ if err != nil {
+ return
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z *addHelper16) EncodeMsg(en *msgp.Writer) (err error) {
+ // map header, size 5
+ // write "runstart"
+ err = en.Append(0x85, 0xa8, 0x72, 0x75, 0x6e, 0x73, 0x74, 0x61, 0x72, 0x74)
+ if err != nil {
+ return err
+ }
+ err = en.WriteUint16(z.runstart)
+ if err != nil {
+ return
+ }
+ // write "runlen"
+ err = en.Append(0xa6, 0x72, 0x75, 0x6e, 0x6c, 0x65, 0x6e)
+ if err != nil {
+ return err
+ }
+ err = en.WriteUint16(z.runlen)
+ if err != nil {
+ return
+ }
+ // write "actuallyAdded"
+ err = en.Append(0xad, 0x61, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x6c, 0x79, 0x41, 0x64, 0x64, 0x65, 0x64)
+ if err != nil {
+ return err
+ }
+ err = en.WriteUint16(z.actuallyAdded)
+ if err != nil {
+ return
+ }
+ // write "m"
+ err = en.Append(0xa1, 0x6d)
+ if err != nil {
+ return err
+ }
+ err = en.WriteArrayHeader(uint32(len(z.m)))
+ if err != nil {
+ return
+ }
+ for zxvk := range z.m {
+ // map header, size 2
+ // write "start"
+ err = en.Append(0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74)
+ if err != nil {
+ return err
+ }
+ err = en.WriteUint16(z.m[zxvk].start)
+ if err != nil {
+ return
+ }
+ // write "last"
+ err = en.Append(0xa4, 0x6c, 0x61, 0x73, 0x74)
+ if err != nil {
+ return err
+ }
+ err = en.WriteUint16(z.m[zxvk].last())
+ if err != nil {
+ return
+ }
+ }
+ // write "rc"
+ err = en.Append(0xa2, 0x72, 0x63)
+ if err != nil {
+ return err
+ }
+ if z.rc == nil {
+ err = en.WriteNil()
+ if err != nil {
+ return
+ }
+ } else {
+ // map header, size 2
+ // write "iv"
+ err = en.Append(0x82, 0xa2, 0x69, 0x76)
+ if err != nil {
+ return err
+ }
+ err = en.WriteArrayHeader(uint32(len(z.rc.iv)))
+ if err != nil {
+ return
+ }
+ for zbzg := range z.rc.iv {
+ // map header, size 2
+ // write "start"
+ err = en.Append(0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74)
+ if err != nil {
+ return err
+ }
+ err = en.WriteUint16(z.rc.iv[zbzg].start)
+ if err != nil {
+ return
+ }
+ // write "last"
+ err = en.Append(0xa4, 0x6c, 0x61, 0x73, 0x74)
+ if err != nil {
+ return err
+ }
+ err = en.WriteUint16(z.rc.iv[zbzg].last())
+ if err != nil {
+ return
+ }
+ }
+ // write "card"
+ err = en.Append(0xa4, 0x63, 0x61, 0x72, 0x64)
+ if err != nil {
+ return err
+ }
+ err = en.WriteInt64(z.rc.card)
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *addHelper16) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 5
+ // string "runstart"
+ o = append(o, 0x85, 0xa8, 0x72, 0x75, 0x6e, 0x73, 0x74, 0x61, 0x72, 0x74)
+ o = msgp.AppendUint16(o, z.runstart)
+ // string "runlen"
+ o = append(o, 0xa6, 0x72, 0x75, 0x6e, 0x6c, 0x65, 0x6e)
+ o = msgp.AppendUint16(o, z.runlen)
+ // string "actuallyAdded"
+ o = append(o, 0xad, 0x61, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x6c, 0x79, 0x41, 0x64, 0x64, 0x65, 0x64)
+ o = msgp.AppendUint16(o, z.actuallyAdded)
+ // string "m"
+ o = append(o, 0xa1, 0x6d)
+ o = msgp.AppendArrayHeader(o, uint32(len(z.m)))
+ for zxvk := range z.m {
+ // map header, size 2
+ // string "start"
+ o = append(o, 0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74)
+ o = msgp.AppendUint16(o, z.m[zxvk].start)
+ // string "last"
+ o = append(o, 0xa4, 0x6c, 0x61, 0x73, 0x74)
+ o = msgp.AppendUint16(o, z.m[zxvk].last())
+ }
+ // string "rc"
+ o = append(o, 0xa2, 0x72, 0x63)
+ if z.rc == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ // map header, size 2
+ // string "iv"
+ o = append(o, 0x82, 0xa2, 0x69, 0x76)
+ o = msgp.AppendArrayHeader(o, uint32(len(z.rc.iv)))
+ for zbzg := range z.rc.iv {
+ // map header, size 2
+ // string "start"
+ o = append(o, 0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74)
+ o = msgp.AppendUint16(o, z.rc.iv[zbzg].start)
+ // string "last"
+ o = append(o, 0xa4, 0x6c, 0x61, 0x73, 0x74)
+ o = msgp.AppendUint16(o, z.rc.iv[zbzg].last())
+ }
+ // string "card"
+ o = append(o, 0xa4, 0x63, 0x61, 0x72, 0x64)
+ o = msgp.AppendInt64(o, z.rc.card)
+ }
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *addHelper16) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zxhx uint32
+ zxhx, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zxhx > 0 {
+ zxhx--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "runstart":
+ z.runstart, bts, err = msgp.ReadUint16Bytes(bts)
+ if err != nil {
+ return
+ }
+ case "runlen":
+ z.runlen, bts, err = msgp.ReadUint16Bytes(bts)
+ if err != nil {
+ return
+ }
+ case "actuallyAdded":
+ z.actuallyAdded, bts, err = msgp.ReadUint16Bytes(bts)
+ if err != nil {
+ return
+ }
+ case "m":
+ var zlqf uint32
+ zlqf, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ if cap(z.m) >= int(zlqf) {
+ z.m = (z.m)[:zlqf]
+ } else {
+ z.m = make([]interval16, zlqf)
+ }
+ for zxvk := range z.m {
+ var zdaf uint32
+ zdaf, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zdaf > 0 {
+ zdaf--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "start":
+ z.m[zxvk].start, bts, err = msgp.ReadUint16Bytes(bts)
+ if err != nil {
+ return
+ }
+ case "last":
+ z.m[zxvk].length, bts, err = msgp.ReadUint16Bytes(bts)
+ z.m[zxvk].length -= z.m[zxvk].start
+ if err != nil {
+ return
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ }
+ case "rc":
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ z.rc = nil
+ } else {
+ if z.rc == nil {
+ z.rc = new(runContainer16)
+ }
+ var zpks uint32
+ zpks, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zpks > 0 {
+ zpks--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "iv":
+ var zjfb uint32
+ zjfb, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ if cap(z.rc.iv) >= int(zjfb) {
+ z.rc.iv = (z.rc.iv)[:zjfb]
+ } else {
+ z.rc.iv = make([]interval16, zjfb)
+ }
+ for zbzg := range z.rc.iv {
+ var zcxo uint32
+ zcxo, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zcxo > 0 {
+ zcxo--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "start":
+ z.rc.iv[zbzg].start, bts, err = msgp.ReadUint16Bytes(bts)
+ if err != nil {
+ return
+ }
+ case "last":
+ z.rc.iv[zbzg].length, bts, err = msgp.ReadUint16Bytes(bts)
+ z.rc.iv[zbzg].length -= z.rc.iv[zbzg].start
+ if err != nil {
+ return
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ }
+ case "card":
+ z.rc.card, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ return
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *addHelper16) Msgsize() (s int) {
+ s = 1 + 9 + msgp.Uint16Size + 7 + msgp.Uint16Size + 14 + msgp.Uint16Size + 2 + msgp.ArrayHeaderSize + (len(z.m) * (12 + msgp.Uint16Size + msgp.Uint16Size)) + 3
+ if z.rc == nil {
+ s += msgp.NilSize
+ } else {
+ s += 1 + 3 + msgp.ArrayHeaderSize + (len(z.rc.iv) * (12 + msgp.Uint16Size + msgp.Uint16Size)) + 5 + msgp.Int64Size
+ }
+ return
+}
+
+// DecodeMsg implements msgp.Decodable
+func (z *interval16) DecodeMsg(dc *msgp.Reader) (err error) {
+ var field []byte
+ _ = field
+ var zeff uint32
+ zeff, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zeff > 0 {
+ zeff--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "start":
+ z.start, err = dc.ReadUint16()
+ if err != nil {
+ return
+ }
+ case "last":
+ z.length, err = dc.ReadUint16()
+ z.length = -z.start
+ if err != nil {
+ return
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z interval16) EncodeMsg(en *msgp.Writer) (err error) {
+ // map header, size 2
+ // write "start"
+ err = en.Append(0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74)
+ if err != nil {
+ return err
+ }
+ err = en.WriteUint16(z.start)
+ if err != nil {
+ return
+ }
+ // write "last"
+ err = en.Append(0xa4, 0x6c, 0x61, 0x73, 0x74)
+ if err != nil {
+ return err
+ }
+ err = en.WriteUint16(z.last())
+ if err != nil {
+ return
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z interval16) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 2
+ // string "start"
+ o = append(o, 0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74)
+ o = msgp.AppendUint16(o, z.start)
+ // string "last"
+ o = append(o, 0xa4, 0x6c, 0x61, 0x73, 0x74)
+ o = msgp.AppendUint16(o, z.last())
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *interval16) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zrsw uint32
+ zrsw, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zrsw > 0 {
+ zrsw--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "start":
+ z.start, bts, err = msgp.ReadUint16Bytes(bts)
+ if err != nil {
+ return
+ }
+ case "last":
+ z.length, bts, err = msgp.ReadUint16Bytes(bts)
+ z.length -= z.start
+ if err != nil {
+ return
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z interval16) Msgsize() (s int) {
+ s = 1 + 6 + msgp.Uint16Size + 5 + msgp.Uint16Size
+ return
+}
+
+// DecodeMsg implements msgp.Decodable
+func (z *runContainer16) DecodeMsg(dc *msgp.Reader) (err error) {
+ var field []byte
+ _ = field
+ var zdnj uint32
+ zdnj, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zdnj > 0 {
+ zdnj--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "iv":
+ var zobc uint32
+ zobc, err = dc.ReadArrayHeader()
+ if err != nil {
+ return
+ }
+ if cap(z.iv) >= int(zobc) {
+ z.iv = (z.iv)[:zobc]
+ } else {
+ z.iv = make([]interval16, zobc)
+ }
+ for zxpk := range z.iv {
+ var zsnv uint32
+ zsnv, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zsnv > 0 {
+ zsnv--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "start":
+ z.iv[zxpk].start, err = dc.ReadUint16()
+ if err != nil {
+ return
+ }
+ case "last":
+ z.iv[zxpk].length, err = dc.ReadUint16()
+ z.iv[zxpk].length -= z.iv[zxpk].start
+ if err != nil {
+ return
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ }
+ case "card":
+ z.card, err = dc.ReadInt64()
+ if err != nil {
+ return
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z *runContainer16) EncodeMsg(en *msgp.Writer) (err error) {
+ // map header, size 2
+ // write "iv"
+ err = en.Append(0x82, 0xa2, 0x69, 0x76)
+ if err != nil {
+ return err
+ }
+ err = en.WriteArrayHeader(uint32(len(z.iv)))
+ if err != nil {
+ return
+ }
+ for zxpk := range z.iv {
+ // map header, size 2
+ // write "start"
+ err = en.Append(0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74)
+ if err != nil {
+ return err
+ }
+ err = en.WriteUint16(z.iv[zxpk].start)
+ if err != nil {
+ return
+ }
+ // write "last"
+ err = en.Append(0xa4, 0x6c, 0x61, 0x73, 0x74)
+ if err != nil {
+ return err
+ }
+ err = en.WriteUint16(z.iv[zxpk].last())
+ if err != nil {
+ return
+ }
+ }
+ // write "card"
+ err = en.Append(0xa4, 0x63, 0x61, 0x72, 0x64)
+ if err != nil {
+ return err
+ }
+ err = en.WriteInt64(z.card)
+ if err != nil {
+ return
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *runContainer16) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 2
+ // string "iv"
+ o = append(o, 0x82, 0xa2, 0x69, 0x76)
+ o = msgp.AppendArrayHeader(o, uint32(len(z.iv)))
+ for zxpk := range z.iv {
+ // map header, size 2
+ // string "start"
+ o = append(o, 0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74)
+ o = msgp.AppendUint16(o, z.iv[zxpk].start)
+ // string "last"
+ o = append(o, 0xa4, 0x6c, 0x61, 0x73, 0x74)
+ o = msgp.AppendUint16(o, z.iv[zxpk].last())
+ }
+ // string "card"
+ o = append(o, 0xa4, 0x63, 0x61, 0x72, 0x64)
+ o = msgp.AppendInt64(o, z.card)
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *runContainer16) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zkgt uint32
+ zkgt, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zkgt > 0 {
+ zkgt--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "iv":
+ var zema uint32
+ zema, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ if cap(z.iv) >= int(zema) {
+ z.iv = (z.iv)[:zema]
+ } else {
+ z.iv = make([]interval16, zema)
+ }
+ for zxpk := range z.iv {
+ var zpez uint32
+ zpez, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zpez > 0 {
+ zpez--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "start":
+ z.iv[zxpk].start, bts, err = msgp.ReadUint16Bytes(bts)
+ if err != nil {
+ return
+ }
+ case "last":
+ z.iv[zxpk].length, bts, err = msgp.ReadUint16Bytes(bts)
+ z.iv[zxpk].length -= z.iv[zxpk].start
+ if err != nil {
+ return
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ }
+ case "card":
+ z.card, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ return
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *runContainer16) Msgsize() (s int) {
+ s = 1 + 3 + msgp.ArrayHeaderSize + (len(z.iv) * (12 + msgp.Uint16Size + msgp.Uint16Size)) + 5 + msgp.Int64Size
+ return
+}
+
+// DecodeMsg implements msgp.Decodable
+func (z *runIterator16) DecodeMsg(dc *msgp.Reader) (err error) {
+ var field []byte
+ _ = field
+ var zqke uint32
+ zqke, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zqke > 0 {
+ zqke--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "rc":
+ if dc.IsNil() {
+ err = dc.ReadNil()
+ if err != nil {
+ return
+ }
+ z.rc = nil
+ } else {
+ if z.rc == nil {
+ z.rc = new(runContainer16)
+ }
+ err = z.rc.DecodeMsg(dc)
+ if err != nil {
+ return
+ }
+ }
+ case "curIndex":
+ z.curIndex, err = dc.ReadInt64()
+ if err != nil {
+ return
+ }
+ case "curPosInIndex":
+ z.curPosInIndex, err = dc.ReadUint16()
+ if err != nil {
+ return
+ }
+ case "curSeq":
+ z.curSeq, err = dc.ReadInt64()
+ if err != nil {
+ return
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z *runIterator16) EncodeMsg(en *msgp.Writer) (err error) {
+ // map header, size 4
+ // write "rc"
+ err = en.Append(0x84, 0xa2, 0x72, 0x63)
+ if err != nil {
+ return err
+ }
+ if z.rc == nil {
+ err = en.WriteNil()
+ if err != nil {
+ return
+ }
+ } else {
+ err = z.rc.EncodeMsg(en)
+ if err != nil {
+ return
+ }
+ }
+ // write "curIndex"
+ err = en.Append(0xa8, 0x63, 0x75, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78)
+ if err != nil {
+ return err
+ }
+ err = en.WriteInt64(z.curIndex)
+ if err != nil {
+ return
+ }
+ // write "curPosInIndex"
+ err = en.Append(0xad, 0x63, 0x75, 0x72, 0x50, 0x6f, 0x73, 0x49, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78)
+ if err != nil {
+ return err
+ }
+ err = en.WriteUint16(z.curPosInIndex)
+ if err != nil {
+ return
+ }
+ // write "curSeq"
+ err = en.Append(0xa6, 0x63, 0x75, 0x72, 0x53, 0x65, 0x71)
+ if err != nil {
+ return err
+ }
+ err = en.WriteInt64(z.curSeq)
+ if err != nil {
+ return
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *runIterator16) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 4
+ // string "rc"
+ o = append(o, 0x84, 0xa2, 0x72, 0x63)
+ if z.rc == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o, err = z.rc.MarshalMsg(o)
+ if err != nil {
+ return
+ }
+ }
+ // string "curIndex"
+ o = append(o, 0xa8, 0x63, 0x75, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78)
+ o = msgp.AppendInt64(o, z.curIndex)
+ // string "curPosInIndex"
+ o = append(o, 0xad, 0x63, 0x75, 0x72, 0x50, 0x6f, 0x73, 0x49, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78)
+ o = msgp.AppendUint16(o, z.curPosInIndex)
+ // string "curSeq"
+ o = append(o, 0xa6, 0x63, 0x75, 0x72, 0x53, 0x65, 0x71)
+ o = msgp.AppendInt64(o, z.curSeq)
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *runIterator16) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zqyh uint32
+ zqyh, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zqyh > 0 {
+ zqyh--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "rc":
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ z.rc = nil
+ } else {
+ if z.rc == nil {
+ z.rc = new(runContainer16)
+ }
+ bts, err = z.rc.UnmarshalMsg(bts)
+ if err != nil {
+ return
+ }
+ }
+ case "curIndex":
+ z.curIndex, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ return
+ }
+ case "curPosInIndex":
+ z.curPosInIndex, bts, err = msgp.ReadUint16Bytes(bts)
+ if err != nil {
+ return
+ }
+ case "curSeq":
+ z.curSeq, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ return
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *runIterator16) Msgsize() (s int) {
+ s = 1 + 3
+ if z.rc == nil {
+ s += msgp.NilSize
+ } else {
+ s += z.rc.Msgsize()
+ }
+ s += 9 + msgp.Int64Size + 14 + msgp.Uint16Size + 7 + msgp.Int64Size
+ return
+}
+
+// DecodeMsg implements msgp.Decodable
+func (z *uint16Slice) DecodeMsg(dc *msgp.Reader) (err error) {
+ var zjpj uint32
+ zjpj, err = dc.ReadArrayHeader()
+ if err != nil {
+ return
+ }
+ if cap((*z)) >= int(zjpj) {
+ (*z) = (*z)[:zjpj]
+ } else {
+ (*z) = make(uint16Slice, zjpj)
+ }
+ for zywj := range *z {
+ (*z)[zywj], err = dc.ReadUint16()
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z uint16Slice) EncodeMsg(en *msgp.Writer) (err error) {
+ err = en.WriteArrayHeader(uint32(len(z)))
+ if err != nil {
+ return
+ }
+ for zzpf := range z {
+ err = en.WriteUint16(z[zzpf])
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z uint16Slice) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ o = msgp.AppendArrayHeader(o, uint32(len(z)))
+ for zzpf := range z {
+ o = msgp.AppendUint16(o, z[zzpf])
+ }
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *uint16Slice) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var zgmo uint32
+ zgmo, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ if cap((*z)) >= int(zgmo) {
+ (*z) = (*z)[:zgmo]
+ } else {
+ (*z) = make(uint16Slice, zgmo)
+ }
+ for zrfe := range *z {
+ (*z)[zrfe], bts, err = msgp.ReadUint16Bytes(bts)
+ if err != nil {
+ return
+ }
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z uint16Slice) Msgsize() (s int) {
+ s = msgp.ArrayHeaderSize + (len(z) * (msgp.Uint16Size))
+ return
+}
diff --git a/vendor/github.com/RoaringBitmap/roaring/rle_gen.go b/vendor/github.com/RoaringBitmap/roaring/rle_gen.go
new file mode 100644
index 0000000000..bc9da75f3a
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/rle_gen.go
@@ -0,0 +1,1118 @@
+package roaring
+
+// NOTE: THIS FILE WAS PRODUCED BY THE
+// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp)
+// DO NOT EDIT
+
+import "github.com/tinylib/msgp/msgp"
+
+// DecodeMsg implements msgp.Decodable
+func (z *addHelper32) DecodeMsg(dc *msgp.Reader) (err error) {
+ var field []byte
+ _ = field
+ var zbai uint32
+ zbai, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zbai > 0 {
+ zbai--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "runstart":
+ z.runstart, err = dc.ReadUint32()
+ if err != nil {
+ return
+ }
+ case "runlen":
+ z.runlen, err = dc.ReadUint32()
+ if err != nil {
+ return
+ }
+ case "actuallyAdded":
+ z.actuallyAdded, err = dc.ReadUint32()
+ if err != nil {
+ return
+ }
+ case "m":
+ var zcmr uint32
+ zcmr, err = dc.ReadArrayHeader()
+ if err != nil {
+ return
+ }
+ if cap(z.m) >= int(zcmr) {
+ z.m = (z.m)[:zcmr]
+ } else {
+ z.m = make([]interval32, zcmr)
+ }
+ for zxvk := range z.m {
+ var zajw uint32
+ zajw, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zajw > 0 {
+ zajw--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "start":
+ z.m[zxvk].start, err = dc.ReadUint32()
+ if err != nil {
+ return
+ }
+ case "last":
+ z.m[zxvk].last, err = dc.ReadUint32()
+ if err != nil {
+ return
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ }
+ case "rc":
+ if dc.IsNil() {
+ err = dc.ReadNil()
+ if err != nil {
+ return
+ }
+ z.rc = nil
+ } else {
+ if z.rc == nil {
+ z.rc = new(runContainer32)
+ }
+ var zwht uint32
+ zwht, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zwht > 0 {
+ zwht--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "iv":
+ var zhct uint32
+ zhct, err = dc.ReadArrayHeader()
+ if err != nil {
+ return
+ }
+ if cap(z.rc.iv) >= int(zhct) {
+ z.rc.iv = (z.rc.iv)[:zhct]
+ } else {
+ z.rc.iv = make([]interval32, zhct)
+ }
+ for zbzg := range z.rc.iv {
+ var zcua uint32
+ zcua, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zcua > 0 {
+ zcua--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "start":
+ z.rc.iv[zbzg].start, err = dc.ReadUint32()
+ if err != nil {
+ return
+ }
+ case "last":
+ z.rc.iv[zbzg].last, err = dc.ReadUint32()
+ if err != nil {
+ return
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ }
+ case "card":
+ z.rc.card, err = dc.ReadInt64()
+ if err != nil {
+ return
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z *addHelper32) EncodeMsg(en *msgp.Writer) (err error) {
+ // map header, size 5
+ // write "runstart"
+ err = en.Append(0x85, 0xa8, 0x72, 0x75, 0x6e, 0x73, 0x74, 0x61, 0x72, 0x74)
+ if err != nil {
+ return err
+ }
+ err = en.WriteUint32(z.runstart)
+ if err != nil {
+ return
+ }
+ // write "runlen"
+ err = en.Append(0xa6, 0x72, 0x75, 0x6e, 0x6c, 0x65, 0x6e)
+ if err != nil {
+ return err
+ }
+ err = en.WriteUint32(z.runlen)
+ if err != nil {
+ return
+ }
+ // write "actuallyAdded"
+ err = en.Append(0xad, 0x61, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x6c, 0x79, 0x41, 0x64, 0x64, 0x65, 0x64)
+ if err != nil {
+ return err
+ }
+ err = en.WriteUint32(z.actuallyAdded)
+ if err != nil {
+ return
+ }
+ // write "m"
+ err = en.Append(0xa1, 0x6d)
+ if err != nil {
+ return err
+ }
+ err = en.WriteArrayHeader(uint32(len(z.m)))
+ if err != nil {
+ return
+ }
+ for zxvk := range z.m {
+ // map header, size 2
+ // write "start"
+ err = en.Append(0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74)
+ if err != nil {
+ return err
+ }
+ err = en.WriteUint32(z.m[zxvk].start)
+ if err != nil {
+ return
+ }
+ // write "last"
+ err = en.Append(0xa4, 0x6c, 0x61, 0x73, 0x74)
+ if err != nil {
+ return err
+ }
+ err = en.WriteUint32(z.m[zxvk].last)
+ if err != nil {
+ return
+ }
+ }
+ // write "rc"
+ err = en.Append(0xa2, 0x72, 0x63)
+ if err != nil {
+ return err
+ }
+ if z.rc == nil {
+ err = en.WriteNil()
+ if err != nil {
+ return
+ }
+ } else {
+ // map header, size 2
+ // write "iv"
+ err = en.Append(0x82, 0xa2, 0x69, 0x76)
+ if err != nil {
+ return err
+ }
+ err = en.WriteArrayHeader(uint32(len(z.rc.iv)))
+ if err != nil {
+ return
+ }
+ for zbzg := range z.rc.iv {
+ // map header, size 2
+ // write "start"
+ err = en.Append(0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74)
+ if err != nil {
+ return err
+ }
+ err = en.WriteUint32(z.rc.iv[zbzg].start)
+ if err != nil {
+ return
+ }
+ // write "last"
+ err = en.Append(0xa4, 0x6c, 0x61, 0x73, 0x74)
+ if err != nil {
+ return err
+ }
+ err = en.WriteUint32(z.rc.iv[zbzg].last)
+ if err != nil {
+ return
+ }
+ }
+ // write "card"
+ err = en.Append(0xa4, 0x63, 0x61, 0x72, 0x64)
+ if err != nil {
+ return err
+ }
+ err = en.WriteInt64(z.rc.card)
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *addHelper32) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 5
+ // string "runstart"
+ o = append(o, 0x85, 0xa8, 0x72, 0x75, 0x6e, 0x73, 0x74, 0x61, 0x72, 0x74)
+ o = msgp.AppendUint32(o, z.runstart)
+ // string "runlen"
+ o = append(o, 0xa6, 0x72, 0x75, 0x6e, 0x6c, 0x65, 0x6e)
+ o = msgp.AppendUint32(o, z.runlen)
+ // string "actuallyAdded"
+ o = append(o, 0xad, 0x61, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x6c, 0x79, 0x41, 0x64, 0x64, 0x65, 0x64)
+ o = msgp.AppendUint32(o, z.actuallyAdded)
+ // string "m"
+ o = append(o, 0xa1, 0x6d)
+ o = msgp.AppendArrayHeader(o, uint32(len(z.m)))
+ for zxvk := range z.m {
+ // map header, size 2
+ // string "start"
+ o = append(o, 0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74)
+ o = msgp.AppendUint32(o, z.m[zxvk].start)
+ // string "last"
+ o = append(o, 0xa4, 0x6c, 0x61, 0x73, 0x74)
+ o = msgp.AppendUint32(o, z.m[zxvk].last)
+ }
+ // string "rc"
+ o = append(o, 0xa2, 0x72, 0x63)
+ if z.rc == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ // map header, size 2
+ // string "iv"
+ o = append(o, 0x82, 0xa2, 0x69, 0x76)
+ o = msgp.AppendArrayHeader(o, uint32(len(z.rc.iv)))
+ for zbzg := range z.rc.iv {
+ // map header, size 2
+ // string "start"
+ o = append(o, 0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74)
+ o = msgp.AppendUint32(o, z.rc.iv[zbzg].start)
+ // string "last"
+ o = append(o, 0xa4, 0x6c, 0x61, 0x73, 0x74)
+ o = msgp.AppendUint32(o, z.rc.iv[zbzg].last)
+ }
+ // string "card"
+ o = append(o, 0xa4, 0x63, 0x61, 0x72, 0x64)
+ o = msgp.AppendInt64(o, z.rc.card)
+ }
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *addHelper32) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zxhx uint32
+ zxhx, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zxhx > 0 {
+ zxhx--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "runstart":
+ z.runstart, bts, err = msgp.ReadUint32Bytes(bts)
+ if err != nil {
+ return
+ }
+ case "runlen":
+ z.runlen, bts, err = msgp.ReadUint32Bytes(bts)
+ if err != nil {
+ return
+ }
+ case "actuallyAdded":
+ z.actuallyAdded, bts, err = msgp.ReadUint32Bytes(bts)
+ if err != nil {
+ return
+ }
+ case "m":
+ var zlqf uint32
+ zlqf, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ if cap(z.m) >= int(zlqf) {
+ z.m = (z.m)[:zlqf]
+ } else {
+ z.m = make([]interval32, zlqf)
+ }
+ for zxvk := range z.m {
+ var zdaf uint32
+ zdaf, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zdaf > 0 {
+ zdaf--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "start":
+ z.m[zxvk].start, bts, err = msgp.ReadUint32Bytes(bts)
+ if err != nil {
+ return
+ }
+ case "last":
+ z.m[zxvk].last, bts, err = msgp.ReadUint32Bytes(bts)
+ if err != nil {
+ return
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ }
+ case "rc":
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ z.rc = nil
+ } else {
+ if z.rc == nil {
+ z.rc = new(runContainer32)
+ }
+ var zpks uint32
+ zpks, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zpks > 0 {
+ zpks--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "iv":
+ var zjfb uint32
+ zjfb, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ if cap(z.rc.iv) >= int(zjfb) {
+ z.rc.iv = (z.rc.iv)[:zjfb]
+ } else {
+ z.rc.iv = make([]interval32, zjfb)
+ }
+ for zbzg := range z.rc.iv {
+ var zcxo uint32
+ zcxo, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zcxo > 0 {
+ zcxo--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "start":
+ z.rc.iv[zbzg].start, bts, err = msgp.ReadUint32Bytes(bts)
+ if err != nil {
+ return
+ }
+ case "last":
+ z.rc.iv[zbzg].last, bts, err = msgp.ReadUint32Bytes(bts)
+ if err != nil {
+ return
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ }
+ case "card":
+ z.rc.card, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ return
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *addHelper32) Msgsize() (s int) {
+ s = 1 + 9 + msgp.Uint32Size + 7 + msgp.Uint32Size + 14 + msgp.Uint32Size + 2 + msgp.ArrayHeaderSize + (len(z.m) * (12 + msgp.Uint32Size + msgp.Uint32Size)) + 3
+ if z.rc == nil {
+ s += msgp.NilSize
+ } else {
+ s += 1 + 3 + msgp.ArrayHeaderSize + (len(z.rc.iv) * (12 + msgp.Uint32Size + msgp.Uint32Size)) + 5 + msgp.Int64Size
+ }
+ return
+}
+
+// DecodeMsg implements msgp.Decodable
+func (z *interval32) DecodeMsg(dc *msgp.Reader) (err error) {
+ var field []byte
+ _ = field
+ var zeff uint32
+ zeff, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zeff > 0 {
+ zeff--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "start":
+ z.start, err = dc.ReadUint32()
+ if err != nil {
+ return
+ }
+ case "last":
+ z.last, err = dc.ReadUint32()
+ if err != nil {
+ return
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z interval32) EncodeMsg(en *msgp.Writer) (err error) {
+ // map header, size 2
+ // write "start"
+ err = en.Append(0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74)
+ if err != nil {
+ return err
+ }
+ err = en.WriteUint32(z.start)
+ if err != nil {
+ return
+ }
+ // write "last"
+ err = en.Append(0xa4, 0x6c, 0x61, 0x73, 0x74)
+ if err != nil {
+ return err
+ }
+ err = en.WriteUint32(z.last)
+ if err != nil {
+ return
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z interval32) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 2
+ // string "start"
+ o = append(o, 0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74)
+ o = msgp.AppendUint32(o, z.start)
+ // string "last"
+ o = append(o, 0xa4, 0x6c, 0x61, 0x73, 0x74)
+ o = msgp.AppendUint32(o, z.last)
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *interval32) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zrsw uint32
+ zrsw, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zrsw > 0 {
+ zrsw--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "start":
+ z.start, bts, err = msgp.ReadUint32Bytes(bts)
+ if err != nil {
+ return
+ }
+ case "last":
+ z.last, bts, err = msgp.ReadUint32Bytes(bts)
+ if err != nil {
+ return
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z interval32) Msgsize() (s int) {
+ s = 1 + 6 + msgp.Uint32Size + 5 + msgp.Uint32Size
+ return
+}
+
+// DecodeMsg implements msgp.Decodable
+func (z *runContainer32) DecodeMsg(dc *msgp.Reader) (err error) {
+ var field []byte
+ _ = field
+ var zdnj uint32
+ zdnj, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zdnj > 0 {
+ zdnj--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "iv":
+ var zobc uint32
+ zobc, err = dc.ReadArrayHeader()
+ if err != nil {
+ return
+ }
+ if cap(z.iv) >= int(zobc) {
+ z.iv = (z.iv)[:zobc]
+ } else {
+ z.iv = make([]interval32, zobc)
+ }
+ for zxpk := range z.iv {
+ var zsnv uint32
+ zsnv, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zsnv > 0 {
+ zsnv--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "start":
+ z.iv[zxpk].start, err = dc.ReadUint32()
+ if err != nil {
+ return
+ }
+ case "last":
+ z.iv[zxpk].last, err = dc.ReadUint32()
+ if err != nil {
+ return
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ }
+ case "card":
+ z.card, err = dc.ReadInt64()
+ if err != nil {
+ return
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z *runContainer32) EncodeMsg(en *msgp.Writer) (err error) {
+ // map header, size 2
+ // write "iv"
+ err = en.Append(0x82, 0xa2, 0x69, 0x76)
+ if err != nil {
+ return err
+ }
+ err = en.WriteArrayHeader(uint32(len(z.iv)))
+ if err != nil {
+ return
+ }
+ for zxpk := range z.iv {
+ // map header, size 2
+ // write "start"
+ err = en.Append(0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74)
+ if err != nil {
+ return err
+ }
+ err = en.WriteUint32(z.iv[zxpk].start)
+ if err != nil {
+ return
+ }
+ // write "last"
+ err = en.Append(0xa4, 0x6c, 0x61, 0x73, 0x74)
+ if err != nil {
+ return err
+ }
+ err = en.WriteUint32(z.iv[zxpk].last)
+ if err != nil {
+ return
+ }
+ }
+ // write "card"
+ err = en.Append(0xa4, 0x63, 0x61, 0x72, 0x64)
+ if err != nil {
+ return err
+ }
+ err = en.WriteInt64(z.card)
+ if err != nil {
+ return
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *runContainer32) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 2
+ // string "iv"
+ o = append(o, 0x82, 0xa2, 0x69, 0x76)
+ o = msgp.AppendArrayHeader(o, uint32(len(z.iv)))
+ for zxpk := range z.iv {
+ // map header, size 2
+ // string "start"
+ o = append(o, 0x82, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74)
+ o = msgp.AppendUint32(o, z.iv[zxpk].start)
+ // string "last"
+ o = append(o, 0xa4, 0x6c, 0x61, 0x73, 0x74)
+ o = msgp.AppendUint32(o, z.iv[zxpk].last)
+ }
+ // string "card"
+ o = append(o, 0xa4, 0x63, 0x61, 0x72, 0x64)
+ o = msgp.AppendInt64(o, z.card)
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *runContainer32) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zkgt uint32
+ zkgt, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zkgt > 0 {
+ zkgt--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "iv":
+ var zema uint32
+ zema, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ if cap(z.iv) >= int(zema) {
+ z.iv = (z.iv)[:zema]
+ } else {
+ z.iv = make([]interval32, zema)
+ }
+ for zxpk := range z.iv {
+ var zpez uint32
+ zpez, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zpez > 0 {
+ zpez--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "start":
+ z.iv[zxpk].start, bts, err = msgp.ReadUint32Bytes(bts)
+ if err != nil {
+ return
+ }
+ case "last":
+ z.iv[zxpk].last, bts, err = msgp.ReadUint32Bytes(bts)
+ if err != nil {
+ return
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ }
+ case "card":
+ z.card, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ return
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *runContainer32) Msgsize() (s int) {
+ s = 1 + 3 + msgp.ArrayHeaderSize + (len(z.iv) * (12 + msgp.Uint32Size + msgp.Uint32Size)) + 5 + msgp.Int64Size
+ return
+}
+
+// DecodeMsg implements msgp.Decodable
+func (z *runIterator32) DecodeMsg(dc *msgp.Reader) (err error) {
+ var field []byte
+ _ = field
+ var zqke uint32
+ zqke, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zqke > 0 {
+ zqke--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "rc":
+ if dc.IsNil() {
+ err = dc.ReadNil()
+ if err != nil {
+ return
+ }
+ z.rc = nil
+ } else {
+ if z.rc == nil {
+ z.rc = new(runContainer32)
+ }
+ err = z.rc.DecodeMsg(dc)
+ if err != nil {
+ return
+ }
+ }
+ case "curIndex":
+ z.curIndex, err = dc.ReadInt64()
+ if err != nil {
+ return
+ }
+ case "curPosInIndex":
+ z.curPosInIndex, err = dc.ReadUint32()
+ if err != nil {
+ return
+ }
+ case "curSeq":
+ z.curSeq, err = dc.ReadInt64()
+ if err != nil {
+ return
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z *runIterator32) EncodeMsg(en *msgp.Writer) (err error) {
+ // map header, size 4
+ // write "rc"
+ err = en.Append(0x84, 0xa2, 0x72, 0x63)
+ if err != nil {
+ return err
+ }
+ if z.rc == nil {
+ err = en.WriteNil()
+ if err != nil {
+ return
+ }
+ } else {
+ err = z.rc.EncodeMsg(en)
+ if err != nil {
+ return
+ }
+ }
+ // write "curIndex"
+ err = en.Append(0xa8, 0x63, 0x75, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78)
+ if err != nil {
+ return err
+ }
+ err = en.WriteInt64(z.curIndex)
+ if err != nil {
+ return
+ }
+ // write "curPosInIndex"
+ err = en.Append(0xad, 0x63, 0x75, 0x72, 0x50, 0x6f, 0x73, 0x49, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78)
+ if err != nil {
+ return err
+ }
+ err = en.WriteUint32(z.curPosInIndex)
+ if err != nil {
+ return
+ }
+ // write "curSeq"
+ err = en.Append(0xa6, 0x63, 0x75, 0x72, 0x53, 0x65, 0x71)
+ if err != nil {
+ return err
+ }
+ err = en.WriteInt64(z.curSeq)
+ if err != nil {
+ return
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *runIterator32) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 4
+ // string "rc"
+ o = append(o, 0x84, 0xa2, 0x72, 0x63)
+ if z.rc == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o, err = z.rc.MarshalMsg(o)
+ if err != nil {
+ return
+ }
+ }
+ // string "curIndex"
+ o = append(o, 0xa8, 0x63, 0x75, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x78)
+ o = msgp.AppendInt64(o, z.curIndex)
+ // string "curPosInIndex"
+ o = append(o, 0xad, 0x63, 0x75, 0x72, 0x50, 0x6f, 0x73, 0x49, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78)
+ o = msgp.AppendUint32(o, z.curPosInIndex)
+ // string "curSeq"
+ o = append(o, 0xa6, 0x63, 0x75, 0x72, 0x53, 0x65, 0x71)
+ o = msgp.AppendInt64(o, z.curSeq)
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *runIterator32) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zqyh uint32
+ zqyh, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zqyh > 0 {
+ zqyh--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "rc":
+ if msgp.IsNil(bts) {
+ bts, err = msgp.ReadNilBytes(bts)
+ if err != nil {
+ return
+ }
+ z.rc = nil
+ } else {
+ if z.rc == nil {
+ z.rc = new(runContainer32)
+ }
+ bts, err = z.rc.UnmarshalMsg(bts)
+ if err != nil {
+ return
+ }
+ }
+ case "curIndex":
+ z.curIndex, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ return
+ }
+ case "curPosInIndex":
+ z.curPosInIndex, bts, err = msgp.ReadUint32Bytes(bts)
+ if err != nil {
+ return
+ }
+ case "curSeq":
+ z.curSeq, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ return
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *runIterator32) Msgsize() (s int) {
+ s = 1 + 3
+ if z.rc == nil {
+ s += msgp.NilSize
+ } else {
+ s += z.rc.Msgsize()
+ }
+ s += 9 + msgp.Int64Size + 14 + msgp.Uint32Size + 7 + msgp.Int64Size
+ return
+}
+
+// DecodeMsg implements msgp.Decodable
+func (z *uint32Slice) DecodeMsg(dc *msgp.Reader) (err error) {
+ var zjpj uint32
+ zjpj, err = dc.ReadArrayHeader()
+ if err != nil {
+ return
+ }
+ if cap((*z)) >= int(zjpj) {
+ (*z) = (*z)[:zjpj]
+ } else {
+ (*z) = make(uint32Slice, zjpj)
+ }
+ for zywj := range *z {
+ (*z)[zywj], err = dc.ReadUint32()
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z uint32Slice) EncodeMsg(en *msgp.Writer) (err error) {
+ err = en.WriteArrayHeader(uint32(len(z)))
+ if err != nil {
+ return
+ }
+ for zzpf := range z {
+ err = en.WriteUint32(z[zzpf])
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z uint32Slice) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ o = msgp.AppendArrayHeader(o, uint32(len(z)))
+ for zzpf := range z {
+ o = msgp.AppendUint32(o, z[zzpf])
+ }
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *uint32Slice) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var zgmo uint32
+ zgmo, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ if cap((*z)) >= int(zgmo) {
+ (*z) = (*z)[:zgmo]
+ } else {
+ (*z) = make(uint32Slice, zgmo)
+ }
+ for zrfe := range *z {
+ (*z)[zrfe], bts, err = msgp.ReadUint32Bytes(bts)
+ if err != nil {
+ return
+ }
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z uint32Slice) Msgsize() (s int) {
+ s = msgp.ArrayHeaderSize + (len(z) * (msgp.Uint32Size))
+ return
+}
diff --git a/vendor/github.com/RoaringBitmap/roaring/rlecommon.go b/vendor/github.com/RoaringBitmap/roaring/rlecommon.go
new file mode 100644
index 0000000000..133636787a
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/rlecommon.go
@@ -0,0 +1,163 @@
+package roaring
+
+import (
+ "fmt"
+)
+
+// common to rle32.go and rle16.go
+
+// rleVerbose controls whether p() prints show up.
+// The testing package sets this based on
+// testing.Verbose().
+var rleVerbose bool
+
+// p is a shorthand for fmt.Printf with beginning and
+// trailing newlines. p() makes it easy
+// to add diagnostic print statements.
+func p(format string, args ...interface{}) {
+ if rleVerbose {
+ fmt.Printf("\n"+format+"\n", args...)
+ }
+}
+
+// MaxUint32 is the largest uint32 value.
+const MaxUint32 = 4294967295
+
+// MaxUint16 is the largest 16 bit unsigned int.
+// This is the largest value an interval16 can store.
+const MaxUint16 = 65535
+
+// searchOptions allows us to accelerate runContainer32.search with
+// prior knowledge of (mostly lower) bounds. This is used by Union
+// and Intersect.
+type searchOptions struct {
+ // start here instead of at 0
+ startIndex int64
+
+ // upper bound instead of len(rc.iv);
+ // endxIndex == 0 means ignore the bound and use
+ // endxIndex == n ==len(rc.iv) which is also
+ // naturally the default for search()
+ // when opt = nil.
+ endxIndex int64
+}
+
+// And finds the intersection of rc and b.
+func (rc *runContainer32) And(b *Bitmap) *Bitmap {
+ out := NewBitmap()
+ for _, p := range rc.iv {
+ for i := p.start; i <= p.last; i++ {
+ if b.Contains(i) {
+ out.Add(i)
+ }
+ }
+ }
+ return out
+}
+
+// Xor returns the exclusive-or of rc and b.
+func (rc *runContainer32) Xor(b *Bitmap) *Bitmap {
+ out := b.Clone()
+ for _, p := range rc.iv {
+ for v := p.start; v <= p.last; v++ {
+ if out.Contains(v) {
+ out.RemoveRange(uint64(v), uint64(v+1))
+ } else {
+ out.Add(v)
+ }
+ }
+ }
+ return out
+}
+
+// Or returns the union of rc and b.
+func (rc *runContainer32) Or(b *Bitmap) *Bitmap {
+ out := b.Clone()
+ for _, p := range rc.iv {
+ for v := p.start; v <= p.last; v++ {
+ out.Add(v)
+ }
+ }
+ return out
+}
+
+// trial is used in the randomized testing of runContainers
+type trial struct {
+ n int
+ percentFill float64
+ ntrial int
+
+ // only in the union test
+ // only subtract test
+ percentDelete float64
+
+ // only in 067 randomized operations
+ // we do this + 1 passes
+ numRandomOpsPass int
+
+ // allow sampling range control
+ // only recent tests respect this.
+ srang *interval16
+}
+
+// And finds the intersection of rc and b.
+func (rc *runContainer16) And(b *Bitmap) *Bitmap {
+ out := NewBitmap()
+ for _, p := range rc.iv {
+ plast := p.last()
+ for i := p.start; i <= plast; i++ {
+ if b.Contains(uint32(i)) {
+ out.Add(uint32(i))
+ }
+ }
+ }
+ return out
+}
+
+// Xor returns the exclusive-or of rc and b.
+func (rc *runContainer16) Xor(b *Bitmap) *Bitmap {
+ out := b.Clone()
+ for _, p := range rc.iv {
+ plast := p.last()
+ for v := p.start; v <= plast; v++ {
+ w := uint32(v)
+ if out.Contains(w) {
+ out.RemoveRange(uint64(w), uint64(w+1))
+ } else {
+ out.Add(w)
+ }
+ }
+ }
+ return out
+}
+
+// Or returns the union of rc and b.
+func (rc *runContainer16) Or(b *Bitmap) *Bitmap {
+ out := b.Clone()
+ for _, p := range rc.iv {
+ plast := p.last()
+ for v := p.start; v <= plast; v++ {
+ out.Add(uint32(v))
+ }
+ }
+ return out
+}
+
+//func (rc *runContainer32) and(container) container {
+// panic("TODO. not yet implemented")
+//}
+
+// serializedSizeInBytes returns the number of bytes of memory
+// required by this runContainer16. This is for the
+// Roaring format, as specified https://github.com/RoaringBitmap/RoaringFormatSpec/
+func (rc *runContainer16) serializedSizeInBytes() int {
+ // number of runs in one uint16, then each run
+ // needs two more uint16
+ return 2 + len(rc.iv)*4
+}
+
+// serializedSizeInBytes returns the number of bytes of memory
+// required by this runContainer32.
+func (rc *runContainer32) serializedSizeInBytes() int {
+ return 4 + len(rc.iv)*8
+}
diff --git a/vendor/github.com/RoaringBitmap/roaring/rlei.go b/vendor/github.com/RoaringBitmap/roaring/rlei.go
new file mode 100644
index 0000000000..a15a017e47
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/rlei.go
@@ -0,0 +1,695 @@
+package roaring
+
+///////////////////////////////////////////////////
+//
+// container interface methods for runContainer16
+//
+///////////////////////////////////////////////////
+
+import (
+ "fmt"
+)
+
+// compile time verify we meet interface requirements
+var _ container = &runContainer16{}
+
+func (rc *runContainer16) clone() container {
+ return newRunContainer16CopyIv(rc.iv)
+}
+
+func (rc *runContainer16) minimum() uint16 {
+ return rc.iv[0].start // assume not empty
+}
+
+func (rc *runContainer16) maximum() uint16 {
+ return rc.iv[len(rc.iv)-1].last() // assume not empty
+}
+
+func (rc *runContainer16) isFull() bool {
+ return (len(rc.iv) == 1) && ((rc.iv[0].start == 0) && (rc.iv[0].last() == MaxUint16))
+}
+
+func (rc *runContainer16) and(a container) container {
+ if rc.isFull() {
+ return a.clone()
+ }
+ switch c := a.(type) {
+ case *runContainer16:
+ return rc.intersect(c)
+ case *arrayContainer:
+ return rc.andArray(c)
+ case *bitmapContainer:
+ return rc.andBitmapContainer(c)
+ }
+ panic("unsupported container type")
+}
+
+func (rc *runContainer16) andCardinality(a container) int {
+ switch c := a.(type) {
+ case *runContainer16:
+ return int(rc.intersectCardinality(c))
+ case *arrayContainer:
+ return rc.andArrayCardinality(c)
+ case *bitmapContainer:
+ return rc.andBitmapContainerCardinality(c)
+ }
+ panic("unsupported container type")
+}
+
+// andBitmapContainer finds the intersection of rc and b.
+func (rc *runContainer16) andBitmapContainer(bc *bitmapContainer) container {
+ bc2 := newBitmapContainerFromRun(rc)
+ return bc2.andBitmap(bc)
+}
+
+func (rc *runContainer16) andArrayCardinality(ac *arrayContainer) int {
+ pos := 0
+ answer := 0
+ maxpos := ac.getCardinality()
+ if maxpos == 0 {
+ return 0 // won't happen in actual code
+ }
+ v := ac.content[pos]
+mainloop:
+ for _, p := range rc.iv {
+ for v < p.start {
+ pos++
+ if pos == maxpos {
+ break mainloop
+ }
+ v = ac.content[pos]
+ }
+ for v <= p.last() {
+ answer++
+ pos++
+ if pos == maxpos {
+ break mainloop
+ }
+ v = ac.content[pos]
+ }
+ }
+ return answer
+}
+
+func (rc *runContainer16) iand(a container) container {
+ if rc.isFull() {
+ return a.clone()
+ }
+ switch c := a.(type) {
+ case *runContainer16:
+ return rc.inplaceIntersect(c)
+ case *arrayContainer:
+ return rc.andArray(c)
+ case *bitmapContainer:
+ return rc.iandBitmapContainer(c)
+ }
+ panic("unsupported container type")
+}
+
+func (rc *runContainer16) inplaceIntersect(rc2 *runContainer16) container {
+ // TODO: optimize by doing less allocation, possibly?
+
+ // sect will be new
+ sect := rc.intersect(rc2)
+ *rc = *sect
+ return rc
+}
+
+func (rc *runContainer16) iandBitmapContainer(bc *bitmapContainer) container {
+ isect := rc.andBitmapContainer(bc)
+ *rc = *newRunContainer16FromContainer(isect)
+ return rc
+}
+
+func (rc *runContainer16) andArray(ac *arrayContainer) container {
+ if len(rc.iv) == 0 {
+ return newArrayContainer()
+ }
+
+ acCardinality := ac.getCardinality()
+ c := newArrayContainerCapacity(acCardinality)
+
+ for rlePos, arrayPos := 0, 0; arrayPos < acCardinality; {
+ iv := rc.iv[rlePos]
+ arrayVal := ac.content[arrayPos]
+
+ for iv.last() < arrayVal {
+ rlePos++
+ if rlePos == len(rc.iv) {
+ return c
+ }
+ iv = rc.iv[rlePos]
+ }
+
+ if iv.start > arrayVal {
+ arrayPos = advanceUntil(ac.content, arrayPos, len(ac.content), iv.start)
+ } else {
+ c.content = append(c.content, arrayVal)
+ arrayPos++
+ }
+ }
+ return c
+}
+
+func (rc *runContainer16) andNot(a container) container {
+ switch c := a.(type) {
+ case *arrayContainer:
+ return rc.andNotArray(c)
+ case *bitmapContainer:
+ return rc.andNotBitmap(c)
+ case *runContainer16:
+ return rc.andNotRunContainer16(c)
+ }
+ panic("unsupported container type")
+}
+
+func (rc *runContainer16) fillLeastSignificant16bits(x []uint32, i int, mask uint32) {
+ k := 0
+ var val int64
+ for _, p := range rc.iv {
+ n := p.runlen()
+ for j := int64(0); j < n; j++ {
+ val = int64(p.start) + j
+ x[k+i] = uint32(val) | mask
+ k++
+ }
+ }
+}
+
+func (rc *runContainer16) getShortIterator() shortIterable {
+ return rc.newRunIterator16()
+}
+
+func (rc *runContainer16) getManyIterator() manyIterable {
+ return rc.newManyRunIterator16()
+}
+
+// add the values in the range [firstOfRange, endx). endx
+// is still abe to express 2^16 because it is an int not an uint16.
+func (rc *runContainer16) iaddRange(firstOfRange, endx int) container {
+
+ if firstOfRange >= endx {
+ panic(fmt.Sprintf("invalid %v = endx >= firstOfRange", endx))
+ }
+ addme := newRunContainer16TakeOwnership([]interval16{
+ {
+ start: uint16(firstOfRange),
+ length: uint16(endx - 1 - firstOfRange),
+ },
+ })
+ *rc = *rc.union(addme)
+ return rc
+}
+
+// remove the values in the range [firstOfRange,endx)
+func (rc *runContainer16) iremoveRange(firstOfRange, endx int) container {
+ if firstOfRange >= endx {
+ panic(fmt.Sprintf("request to iremove empty set [%v, %v),"+
+ " nothing to do.", firstOfRange, endx))
+ //return rc
+ }
+ x := newInterval16Range(uint16(firstOfRange), uint16(endx-1))
+ rc.isubtract(x)
+ return rc
+}
+
+// not flip the values in the range [firstOfRange,endx)
+func (rc *runContainer16) not(firstOfRange, endx int) container {
+ if firstOfRange >= endx {
+ panic(fmt.Sprintf("invalid %v = endx >= firstOfRange = %v", endx, firstOfRange))
+ }
+
+ return rc.Not(firstOfRange, endx)
+}
+
+// Not flips the values in the range [firstOfRange,endx).
+// This is not inplace. Only the returned value has the flipped bits.
+//
+// Currently implemented as (!A intersect B) union (A minus B),
+// where A is rc, and B is the supplied [firstOfRange, endx) interval.
+//
+// TODO(time optimization): convert this to a single pass
+// algorithm by copying AndNotRunContainer16() and modifying it.
+// Current routine is correct but
+// makes 2 more passes through the arrays than should be
+// strictly necessary. Measure both ways though--this may not matter.
+//
+func (rc *runContainer16) Not(firstOfRange, endx int) *runContainer16 {
+
+ if firstOfRange >= endx {
+ panic(fmt.Sprintf("invalid %v = endx >= firstOfRange == %v", endx, firstOfRange))
+ }
+
+ if firstOfRange >= endx {
+ return rc.Clone()
+ }
+
+ a := rc
+ // algo:
+ // (!A intersect B) union (A minus B)
+
+ nota := a.invert()
+
+ bs := []interval16{newInterval16Range(uint16(firstOfRange), uint16(endx-1))}
+ b := newRunContainer16TakeOwnership(bs)
+
+ notAintersectB := nota.intersect(b)
+
+ aMinusB := a.AndNotRunContainer16(b)
+
+ rc2 := notAintersectB.union(aMinusB)
+ return rc2
+}
+
+// equals is now logical equals; it does not require the
+// same underlying container type.
+func (rc *runContainer16) equals(o container) bool {
+ srb, ok := o.(*runContainer16)
+
+ if !ok {
+ // maybe value instead of pointer
+ val, valok := o.(*runContainer16)
+ if valok {
+ srb = val
+ ok = true
+ }
+ }
+ if ok {
+ // Check if the containers are the same object.
+ if rc == srb {
+ return true
+ }
+
+ if len(srb.iv) != len(rc.iv) {
+ return false
+ }
+
+ for i, v := range rc.iv {
+ if v != srb.iv[i] {
+ return false
+ }
+ }
+ return true
+ }
+
+ // use generic comparison
+ if o.getCardinality() != rc.getCardinality() {
+ return false
+ }
+ rit := rc.getShortIterator()
+ bit := o.getShortIterator()
+
+ //k := 0
+ for rit.hasNext() {
+ if bit.next() != rit.next() {
+ return false
+ }
+ //k++
+ }
+ return true
+}
+
+func (rc *runContainer16) iaddReturnMinimized(x uint16) container {
+ rc.Add(x)
+ return rc
+}
+
+func (rc *runContainer16) iadd(x uint16) (wasNew bool) {
+ return rc.Add(x)
+}
+
+func (rc *runContainer16) iremoveReturnMinimized(x uint16) container {
+ rc.removeKey(x)
+ return rc
+}
+
+func (rc *runContainer16) iremove(x uint16) bool {
+ return rc.removeKey(x)
+}
+
+func (rc *runContainer16) or(a container) container {
+ if rc.isFull() {
+ return rc.clone()
+ }
+ switch c := a.(type) {
+ case *runContainer16:
+ return rc.union(c)
+ case *arrayContainer:
+ return rc.orArray(c)
+ case *bitmapContainer:
+ return rc.orBitmapContainer(c)
+ }
+ panic("unsupported container type")
+}
+
+func (rc *runContainer16) orCardinality(a container) int {
+ switch c := a.(type) {
+ case *runContainer16:
+ return int(rc.unionCardinality(c))
+ case *arrayContainer:
+ return rc.orArrayCardinality(c)
+ case *bitmapContainer:
+ return rc.orBitmapContainerCardinality(c)
+ }
+ panic("unsupported container type")
+}
+
+// orBitmapContainer finds the union of rc and bc.
+func (rc *runContainer16) orBitmapContainer(bc *bitmapContainer) container {
+ bc2 := newBitmapContainerFromRun(rc)
+ return bc2.iorBitmap(bc)
+}
+
+func (rc *runContainer16) andBitmapContainerCardinality(bc *bitmapContainer) int {
+ answer := 0
+ for i := range rc.iv {
+ answer += bc.getCardinalityInRange(uint(rc.iv[i].start), uint(rc.iv[i].last())+1)
+ }
+ //bc.computeCardinality()
+ return answer
+}
+
+func (rc *runContainer16) orBitmapContainerCardinality(bc *bitmapContainer) int {
+ return rc.getCardinality() + bc.getCardinality() - rc.andBitmapContainerCardinality(bc)
+}
+
+// orArray finds the union of rc and ac.
+func (rc *runContainer16) orArray(ac *arrayContainer) container {
+ bc1 := newBitmapContainerFromRun(rc)
+ bc2 := ac.toBitmapContainer()
+ return bc1.orBitmap(bc2)
+}
+
+// orArray finds the union of rc and ac.
+func (rc *runContainer16) orArrayCardinality(ac *arrayContainer) int {
+ return ac.getCardinality() + rc.getCardinality() - rc.andArrayCardinality(ac)
+}
+
+func (rc *runContainer16) ior(a container) container {
+ if rc.isFull() {
+ return rc
+ }
+ switch c := a.(type) {
+ case *runContainer16:
+ return rc.inplaceUnion(c)
+ case *arrayContainer:
+ return rc.iorArray(c)
+ case *bitmapContainer:
+ return rc.iorBitmapContainer(c)
+ }
+ panic("unsupported container type")
+}
+
+func (rc *runContainer16) inplaceUnion(rc2 *runContainer16) container {
+ p("rc.inplaceUnion with len(rc2.iv)=%v", len(rc2.iv))
+ for _, p := range rc2.iv {
+ last := int64(p.last())
+ for i := int64(p.start); i <= last; i++ {
+ rc.Add(uint16(i))
+ }
+ }
+ return rc
+}
+
+func (rc *runContainer16) iorBitmapContainer(bc *bitmapContainer) container {
+
+ it := bc.getShortIterator()
+ for it.hasNext() {
+ rc.Add(it.next())
+ }
+ return rc
+}
+
+func (rc *runContainer16) iorArray(ac *arrayContainer) container {
+ it := ac.getShortIterator()
+ for it.hasNext() {
+ rc.Add(it.next())
+ }
+ return rc
+}
+
+// lazyIOR is described (not yet implemented) in
+// this nice note from @lemire on
+// https://github.com/RoaringBitmap/roaring/pull/70#issuecomment-263613737
+//
+// Description of lazyOR and lazyIOR from @lemire:
+//
+// Lazy functions are optional and can be simply
+// wrapper around non-lazy functions.
+//
+// The idea of "laziness" is as follows. It is
+// inspired by the concept of lazy evaluation
+// you might be familiar with (functional programming
+// and all that). So a roaring bitmap is
+// such that all its containers are, in some
+// sense, chosen to use as little memory as
+// possible. This is nice. Also, all bitsets
+// are "cardinality aware" so that you can do
+// fast rank/select queries, or query the
+// cardinality of the whole bitmap... very fast,
+// without latency.
+//
+// However, imagine that you are aggregating 100
+// bitmaps together. So you OR the first two, then OR
+// that with the third one and so forth. Clearly,
+// intermediate bitmaps don't need to be as
+// compressed as possible, right? They can be
+// in a "dirty state". You only need the end
+// result to be in a nice state... which you
+// can achieve by calling repairAfterLazy at the end.
+//
+// The Java/C code does something special for
+// the in-place lazy OR runs. The idea is that
+// instead of taking two run containers and
+// generating a new one, we actually try to
+// do the computation in-place through a
+// technique invented by @gssiyankai (pinging him!).
+// What you do is you check whether the host
+// run container has lots of extra capacity.
+// If it does, you move its data at the end of
+// the backing array, and then you write
+// the answer at the beginning. What this
+// trick does is minimize memory allocations.
+//
+func (rc *runContainer16) lazyIOR(a container) container {
+ // not lazy at the moment
+ // TODO: make it lazy
+ return rc.ior(a)
+
+ /*
+ switch c := a.(type) {
+ case *arrayContainer:
+ return rc.lazyIorArray(c)
+ case *bitmapContainer:
+ return rc.lazyIorBitmap(c)
+ case *runContainer16:
+ return rc.lazyIorRun16(c)
+ }
+ panic("unsupported container type")
+ */
+}
+
+// lazyOR is described above in lazyIOR.
+func (rc *runContainer16) lazyOR(a container) container {
+
+ // not lazy at the moment
+ // TODO: make it lazy
+ return rc.or(a)
+
+ /*
+ switch c := a.(type) {
+ case *arrayContainer:
+ return rc.lazyOrArray(c)
+ case *bitmapContainer:
+ return rc.lazyOrBitmap(c)
+ case *runContainer16:
+ return rc.lazyOrRunContainer16(c)
+ }
+ panic("unsupported container type")
+ */
+}
+
+func (rc *runContainer16) intersects(a container) bool {
+ // TODO: optimize by doing inplace/less allocation, possibly?
+ isect := rc.and(a)
+ return isect.getCardinality() > 0
+}
+
+func (rc *runContainer16) xor(a container) container {
+ switch c := a.(type) {
+ case *arrayContainer:
+ return rc.xorArray(c)
+ case *bitmapContainer:
+ return rc.xorBitmap(c)
+ case *runContainer16:
+ return rc.xorRunContainer16(c)
+ }
+ panic("unsupported container type")
+}
+
+func (rc *runContainer16) iandNot(a container) container {
+ switch c := a.(type) {
+ case *arrayContainer:
+ return rc.iandNotArray(c)
+ case *bitmapContainer:
+ return rc.iandNotBitmap(c)
+ case *runContainer16:
+ return rc.iandNotRunContainer16(c)
+ }
+ panic("unsupported container type")
+}
+
+// flip the values in the range [firstOfRange,endx)
+func (rc *runContainer16) inot(firstOfRange, endx int) container {
+ if firstOfRange >= endx {
+ panic(fmt.Sprintf("invalid %v = endx >= firstOfRange = %v", endx, firstOfRange))
+ }
+ // TODO: minimize copies, do it all inplace; not() makes a copy.
+ rc = rc.Not(firstOfRange, endx)
+ return rc
+}
+
+func (rc *runContainer16) getCardinality() int {
+ return int(rc.cardinality())
+}
+
+func (rc *runContainer16) rank(x uint16) int {
+ n := int64(len(rc.iv))
+ xx := int64(x)
+ w, already, _ := rc.search(xx, nil)
+ if w < 0 {
+ return 0
+ }
+ if !already && w == n-1 {
+ return rc.getCardinality()
+ }
+ var rnk int64
+ if !already {
+ for i := int64(0); i <= w; i++ {
+ rnk += rc.iv[i].runlen()
+ }
+ return int(rnk)
+ }
+ for i := int64(0); i < w; i++ {
+ rnk += rc.iv[i].runlen()
+ }
+ rnk += int64(x-rc.iv[w].start) + 1
+ return int(rnk)
+}
+
+func (rc *runContainer16) selectInt(x uint16) int {
+ return rc.selectInt16(x)
+}
+
+func (rc *runContainer16) andNotRunContainer16(b *runContainer16) container {
+ return rc.AndNotRunContainer16(b)
+}
+
+func (rc *runContainer16) andNotArray(ac *arrayContainer) container {
+ rcb := rc.toBitmapContainer()
+ acb := ac.toBitmapContainer()
+ return rcb.andNotBitmap(acb)
+}
+
+func (rc *runContainer16) andNotBitmap(bc *bitmapContainer) container {
+ rcb := rc.toBitmapContainer()
+ return rcb.andNotBitmap(bc)
+}
+
+func (rc *runContainer16) toBitmapContainer() *bitmapContainer {
+ p("run16 toBitmap starting; rc has %v ranges", len(rc.iv))
+ bc := newBitmapContainer()
+ for i := range rc.iv {
+ bc.iaddRange(int(rc.iv[i].start), int(rc.iv[i].last())+1)
+ }
+ bc.computeCardinality()
+ return bc
+}
+
+func (rc *runContainer16) iandNotRunContainer16(x2 *runContainer16) container {
+ rcb := rc.toBitmapContainer()
+ x2b := x2.toBitmapContainer()
+ rcb.iandNotBitmapSurely(x2b)
+ // TODO: check size and optimize the return value
+ // TODO: is inplace modification really required? If not, elide the copy.
+ rc2 := newRunContainer16FromBitmapContainer(rcb)
+ *rc = *rc2
+ return rc
+}
+
+func (rc *runContainer16) iandNotArray(ac *arrayContainer) container {
+ rcb := rc.toBitmapContainer()
+ acb := ac.toBitmapContainer()
+ rcb.iandNotBitmapSurely(acb)
+ // TODO: check size and optimize the return value
+ // TODO: is inplace modification really required? If not, elide the copy.
+ rc2 := newRunContainer16FromBitmapContainer(rcb)
+ *rc = *rc2
+ return rc
+}
+
+func (rc *runContainer16) iandNotBitmap(bc *bitmapContainer) container {
+ rcb := rc.toBitmapContainer()
+ rcb.iandNotBitmapSurely(bc)
+ // TODO: check size and optimize the return value
+ // TODO: is inplace modification really required? If not, elide the copy.
+ rc2 := newRunContainer16FromBitmapContainer(rcb)
+ *rc = *rc2
+ return rc
+}
+
+func (rc *runContainer16) xorRunContainer16(x2 *runContainer16) container {
+ rcb := rc.toBitmapContainer()
+ x2b := x2.toBitmapContainer()
+ return rcb.xorBitmap(x2b)
+}
+
+func (rc *runContainer16) xorArray(ac *arrayContainer) container {
+ rcb := rc.toBitmapContainer()
+ acb := ac.toBitmapContainer()
+ return rcb.xorBitmap(acb)
+}
+
+func (rc *runContainer16) xorBitmap(bc *bitmapContainer) container {
+ rcb := rc.toBitmapContainer()
+ return rcb.xorBitmap(bc)
+}
+
+// convert to bitmap or array *if needed*
+func (rc *runContainer16) toEfficientContainer() container {
+
+ // runContainer16SerializedSizeInBytes(numRuns)
+ sizeAsRunContainer := rc.getSizeInBytes()
+ sizeAsBitmapContainer := bitmapContainerSizeInBytes()
+ card := int(rc.cardinality())
+ sizeAsArrayContainer := arrayContainerSizeInBytes(card)
+ if sizeAsRunContainer <= minOfInt(sizeAsBitmapContainer, sizeAsArrayContainer) {
+ return rc
+ }
+ if card <= arrayDefaultMaxSize {
+ return rc.toArrayContainer()
+ }
+ bc := newBitmapContainerFromRun(rc)
+ return bc
+}
+
+func (rc *runContainer16) toArrayContainer() *arrayContainer {
+ ac := newArrayContainer()
+ for i := range rc.iv {
+ ac.iaddRange(int(rc.iv[i].start), int(rc.iv[i].last())+1)
+ }
+ return ac
+}
+
+func newRunContainer16FromContainer(c container) *runContainer16 {
+
+ switch x := c.(type) {
+ case *runContainer16:
+ return x.Clone()
+ case *arrayContainer:
+ return newRunContainer16FromArray(x)
+ case *bitmapContainer:
+ return newRunContainer16FromBitmapContainer(x)
+ }
+ panic("unsupported container type")
+}
diff --git a/vendor/github.com/RoaringBitmap/roaring/roaring.go b/vendor/github.com/RoaringBitmap/roaring/roaring.go
new file mode 100644
index 0000000000..5045a41933
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/roaring.go
@@ -0,0 +1,1345 @@
+// Package roaring is an implementation of Roaring Bitmaps in Go.
+// They provide fast compressed bitmap data structures (also called bitset).
+// They are ideally suited to represent sets of integers over
+// relatively small ranges.
+// See http://roaringbitmap.org for details.
+package roaring
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "strconv"
+)
+
+// Bitmap represents a compressed bitmap where you can add integers.
+type Bitmap struct {
+ highlowcontainer roaringArray
+}
+
+// ToBase64 serializes a bitmap as Base64
+func (rb *Bitmap) ToBase64() (string, error) {
+ buf := new(bytes.Buffer)
+ _, err := rb.WriteTo(buf)
+ return base64.StdEncoding.EncodeToString(buf.Bytes()), err
+
+}
+
+// FromBase64 deserializes a bitmap from Base64
+func (rb *Bitmap) FromBase64(str string) (int64, error) {
+ data, err := base64.StdEncoding.DecodeString(str)
+ if err != nil {
+ return 0, err
+ }
+ buf := bytes.NewBuffer(data)
+
+ return rb.ReadFrom(buf)
+}
+
+// WriteTo writes a serialized version of this bitmap to stream.
+// The format is compatible with other RoaringBitmap
+// implementations (Java, C) and is documented here:
+// https://github.com/RoaringBitmap/RoaringFormatSpec
+func (rb *Bitmap) WriteTo(stream io.Writer) (int64, error) {
+ return rb.highlowcontainer.writeTo(stream)
+}
+
+// ToBytes returns an array of bytes corresponding to what is written
+// when calling WriteTo
+func (rb *Bitmap) ToBytes() ([]byte, error) {
+ return rb.highlowcontainer.toBytes()
+}
+
+// WriteToMsgpack writes a msgpack2/snappy-streaming compressed serialized
+// version of this bitmap to stream. The format is not
+// compatible with the WriteTo() format, and is
+// experimental: it may produce smaller on disk
+// footprint and/or be faster to read, depending
+// on your content. Currently only the Go roaring
+// implementation supports this format.
+func (rb *Bitmap) WriteToMsgpack(stream io.Writer) (int64, error) {
+ return 0, rb.highlowcontainer.writeToMsgpack(stream)
+}
+
+// ReadFrom reads a serialized version of this bitmap from stream.
+// The format is compatible with other RoaringBitmap
+// implementations (Java, C) and is documented here:
+// https://github.com/RoaringBitmap/RoaringFormatSpec
+func (rb *Bitmap) ReadFrom(stream io.Reader) (int64, error) {
+ return rb.highlowcontainer.readFrom(stream)
+}
+
+// FromBuffer creates a bitmap from its serialized version stored in buffer
+//
+// The format specification is available here:
+// https://github.com/RoaringBitmap/RoaringFormatSpec
+//
+// The provided byte array (buf) is expected to be a constant.
+// The function makes the best effort attempt not to copy data.
+// You should take care not to modify buff as it will
+// likely result in unexpected program behavior.
+//
+// Resulting bitmaps are effectively immutable in the following sense:
+// a copy-on-write marker is used so that when you modify the resulting
+// bitmap, copies of selected data (containers) are made.
+// You should *not* change the copy-on-write status of the resulting
+// bitmaps (SetCopyOnWrite).
+//
+func (rb *Bitmap) FromBuffer(buf []byte) (int64, error) {
+ return rb.highlowcontainer.fromBuffer(buf)
+}
+
+// RunOptimize attempts to further compress the runs of consecutive values found in the bitmap
+func (rb *Bitmap) RunOptimize() {
+ rb.highlowcontainer.runOptimize()
+}
+
+// HasRunCompression returns true if the bitmap benefits from run compression
+func (rb *Bitmap) HasRunCompression() bool {
+ return rb.highlowcontainer.hasRunCompression()
+}
+
+// ReadFromMsgpack reads a msgpack2/snappy-streaming serialized
+// version of this bitmap from stream. The format is
+// expected is that written by the WriteToMsgpack()
+// call; see additional notes there.
+func (rb *Bitmap) ReadFromMsgpack(stream io.Reader) (int64, error) {
+ return 0, rb.highlowcontainer.readFromMsgpack(stream)
+}
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface for the bitmap
+func (rb *Bitmap) MarshalBinary() ([]byte, error) {
+ var buf bytes.Buffer
+ writer := bufio.NewWriter(&buf)
+ _, err := rb.WriteTo(writer)
+ if err != nil {
+ return nil, err
+ }
+ err = writer.Flush()
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface for the bitmap
+func (rb *Bitmap) UnmarshalBinary(data []byte) error {
+ var buf bytes.Buffer
+ _, err := buf.Write(data)
+ if err != nil {
+ return err
+ }
+ reader := bufio.NewReader(&buf)
+ _, err = rb.ReadFrom(reader)
+ return err
+}
+
+// NewBitmap creates a new empty Bitmap (see also New)
+func NewBitmap() *Bitmap {
+ return &Bitmap{}
+}
+
+// New creates a new empty Bitmap (same as NewBitmap)
+func New() *Bitmap {
+ return &Bitmap{}
+}
+
+// Clear resets the Bitmap to be logically empty, but may retain
+// some memory allocations that may speed up future operations
+func (rb *Bitmap) Clear() {
+ rb.highlowcontainer.clear()
+}
+
+// ToArray creates a new slice containing all of the integers stored in the Bitmap in sorted order
+func (rb *Bitmap) ToArray() []uint32 {
+ array := make([]uint32, rb.GetCardinality())
+ pos := 0
+ pos2 := 0
+
+ for pos < rb.highlowcontainer.size() {
+ hs := uint32(rb.highlowcontainer.getKeyAtIndex(pos)) << 16
+ c := rb.highlowcontainer.getContainerAtIndex(pos)
+ pos++
+ c.fillLeastSignificant16bits(array, pos2, hs)
+ pos2 += c.getCardinality()
+ }
+ return array
+}
+
+// GetSizeInBytes estimates the memory usage of the Bitmap. Note that this
+// might differ slightly from the amount of bytes required for persistent storage
+func (rb *Bitmap) GetSizeInBytes() uint64 {
+ size := uint64(8)
+ for _, c := range rb.highlowcontainer.containers {
+ size += uint64(2) + uint64(c.getSizeInBytes())
+ }
+ return size
+}
+
+// GetSerializedSizeInBytes computes the serialized size in bytes
+// of the Bitmap. It should correspond to the
+// number of bytes written when invoking WriteTo. You can expect
+// that this function is much cheaper computationally than WriteTo.
+func (rb *Bitmap) GetSerializedSizeInBytes() uint64 {
+ return rb.highlowcontainer.serializedSizeInBytes()
+}
+
+// BoundSerializedSizeInBytes returns an upper bound on the serialized size in bytes
+// assuming that one wants to store "cardinality" integers in [0, universe_size)
+func BoundSerializedSizeInBytes(cardinality uint64, universeSize uint64) uint64 {
+ contnbr := (universeSize + uint64(65535)) / uint64(65536)
+ if contnbr > cardinality {
+ contnbr = cardinality
+ // we can't have more containers than we have values
+ }
+ headermax := 8*contnbr + 4
+ if 4 > (contnbr+7)/8 {
+ headermax += 4
+ } else {
+ headermax += (contnbr + 7) / 8
+ }
+ valsarray := uint64(arrayContainerSizeInBytes(int(cardinality)))
+ valsbitmap := contnbr * uint64(bitmapContainerSizeInBytes())
+ valsbest := valsarray
+ if valsbest > valsbitmap {
+ valsbest = valsbitmap
+ }
+ return valsbest + headermax
+}
+
+// IntIterable allows you to iterate over the values in a Bitmap
+type IntIterable interface {
+ HasNext() bool
+ Next() uint32
+}
+
+type intIterator struct {
+ pos int
+ hs uint32
+ iter shortIterable
+ highlowcontainer *roaringArray
+}
+
+// HasNext returns true if there are more integers to iterate over
+func (ii *intIterator) HasNext() bool {
+ return ii.pos < ii.highlowcontainer.size()
+}
+
+func (ii *intIterator) init() {
+ if ii.highlowcontainer.size() > ii.pos {
+ ii.iter = ii.highlowcontainer.getContainerAtIndex(ii.pos).getShortIterator()
+ ii.hs = uint32(ii.highlowcontainer.getKeyAtIndex(ii.pos)) << 16
+ }
+}
+
+// Next returns the next integer
+func (ii *intIterator) Next() uint32 {
+ x := uint32(ii.iter.next()) | ii.hs
+ if !ii.iter.hasNext() {
+ ii.pos = ii.pos + 1
+ ii.init()
+ }
+ return x
+}
+
+func newIntIterator(a *Bitmap) *intIterator {
+ p := new(intIterator)
+ p.pos = 0
+ p.highlowcontainer = &a.highlowcontainer
+ p.init()
+ return p
+}
+
+// ManyIntIterable allows you to iterate over the values in a Bitmap
+type ManyIntIterable interface {
+ // pass in a buffer to fill up with values, returns how many values were returned
+ NextMany([]uint32) int
+}
+
+type manyIntIterator struct {
+ pos int
+ hs uint32
+ iter manyIterable
+ highlowcontainer *roaringArray
+}
+
+func (ii *manyIntIterator) init() {
+ if ii.highlowcontainer.size() > ii.pos {
+ ii.iter = ii.highlowcontainer.getContainerAtIndex(ii.pos).getManyIterator()
+ ii.hs = uint32(ii.highlowcontainer.getKeyAtIndex(ii.pos)) << 16
+ } else {
+ ii.iter = nil
+ }
+}
+
+func (ii *manyIntIterator) NextMany(buf []uint32) int {
+ n := 0
+ for n < len(buf) {
+ if ii.iter == nil {
+ break
+ }
+ moreN := ii.iter.nextMany(ii.hs, buf[n:])
+ n += moreN
+ if moreN == 0 {
+ ii.pos = ii.pos + 1
+ ii.init()
+ }
+ }
+
+ return n
+}
+
+func newManyIntIterator(a *Bitmap) *manyIntIterator {
+ p := new(manyIntIterator)
+ p.pos = 0
+ p.highlowcontainer = &a.highlowcontainer
+ p.init()
+ return p
+}
+
+// String creates a string representation of the Bitmap
+func (rb *Bitmap) String() string {
+ // inspired by https://github.com/fzandona/goroar/
+ var buffer bytes.Buffer
+ start := []byte("{")
+ buffer.Write(start)
+ i := rb.Iterator()
+ counter := 0
+ if i.HasNext() {
+ counter = counter + 1
+ buffer.WriteString(strconv.FormatInt(int64(i.Next()), 10))
+ }
+ for i.HasNext() {
+ buffer.WriteString(",")
+ counter = counter + 1
+ // to avoid exhausting the memory
+ if counter > 0x40000 {
+ buffer.WriteString("...")
+ break
+ }
+ buffer.WriteString(strconv.FormatInt(int64(i.Next()), 10))
+ }
+ buffer.WriteString("}")
+ return buffer.String()
+}
+
+// Iterator creates a new IntIterable to iterate over the integers contained in the bitmap, in sorted order
+func (rb *Bitmap) Iterator() IntIterable {
+ return newIntIterator(rb)
+}
+
+// Iterator creates a new ManyIntIterable to iterate over the integers contained in the bitmap, in sorted order
+func (rb *Bitmap) ManyIterator() ManyIntIterable {
+ return newManyIntIterator(rb)
+}
+
+// Clone creates a copy of the Bitmap
+func (rb *Bitmap) Clone() *Bitmap {
+ ptr := new(Bitmap)
+ ptr.highlowcontainer = *rb.highlowcontainer.clone()
+ return ptr
+}
+
+// Minimum get the smallest value stored in this roaring bitmap, assumes that it is not empty
+func (rb *Bitmap) Minimum() uint32 {
+ return uint32(rb.highlowcontainer.containers[0].minimum()) | (uint32(rb.highlowcontainer.keys[0]) << 16)
+}
+
+// Maximum get the largest value stored in this roaring bitmap, assumes that it is not empty
+func (rb *Bitmap) Maximum() uint32 {
+ lastindex := len(rb.highlowcontainer.containers) - 1
+ return uint32(rb.highlowcontainer.containers[lastindex].maximum()) | (uint32(rb.highlowcontainer.keys[lastindex]) << 16)
+}
+
+// Contains returns true if the integer is contained in the bitmap
+func (rb *Bitmap) Contains(x uint32) bool {
+ hb := highbits(x)
+ c := rb.highlowcontainer.getContainer(hb)
+ return c != nil && c.contains(lowbits(x))
+}
+
+// ContainsInt returns true if the integer is contained in the bitmap (this is a convenience method, the parameter is casted to uint32 and Contains is called)
+func (rb *Bitmap) ContainsInt(x int) bool {
+ return rb.Contains(uint32(x))
+}
+
+// Equals returns true if the two bitmaps contain the same integers
+func (rb *Bitmap) Equals(o interface{}) bool {
+ srb, ok := o.(*Bitmap)
+ if ok {
+ return srb.highlowcontainer.equals(rb.highlowcontainer)
+ }
+ return false
+}
+
+// Add the integer x to the bitmap
+func (rb *Bitmap) Add(x uint32) {
+ hb := highbits(x)
+ ra := &rb.highlowcontainer
+ i := ra.getIndex(hb)
+ if i >= 0 {
+ var c container
+ c = ra.getWritableContainerAtIndex(i).iaddReturnMinimized(lowbits(x))
+ rb.highlowcontainer.setContainerAtIndex(i, c)
+ } else {
+ newac := newArrayContainer()
+ rb.highlowcontainer.insertNewKeyValueAt(-i-1, hb, newac.iaddReturnMinimized(lowbits(x)))
+ }
+}
+
+// add the integer x to the bitmap, return the container and its index
+func (rb *Bitmap) addwithptr(x uint32) (int, container) {
+ hb := highbits(x)
+ ra := &rb.highlowcontainer
+ i := ra.getIndex(hb)
+ var c container
+ if i >= 0 {
+ c = ra.getWritableContainerAtIndex(i).iaddReturnMinimized(lowbits(x))
+ rb.highlowcontainer.setContainerAtIndex(i, c)
+ return i, c
+ }
+ newac := newArrayContainer()
+ c = newac.iaddReturnMinimized(lowbits(x))
+ rb.highlowcontainer.insertNewKeyValueAt(-i-1, hb, c)
+ return -i - 1, c
+}
+
+// CheckedAdd adds the integer x to the bitmap and return true if it was added (false if the integer was already present)
+func (rb *Bitmap) CheckedAdd(x uint32) bool {
+ // TODO: add unit tests for this method
+ hb := highbits(x)
+ i := rb.highlowcontainer.getIndex(hb)
+ if i >= 0 {
+ C := rb.highlowcontainer.getWritableContainerAtIndex(i)
+ oldcard := C.getCardinality()
+ C = C.iaddReturnMinimized(lowbits(x))
+ rb.highlowcontainer.setContainerAtIndex(i, C)
+ return C.getCardinality() > oldcard
+ }
+ newac := newArrayContainer()
+ rb.highlowcontainer.insertNewKeyValueAt(-i-1, hb, newac.iaddReturnMinimized(lowbits(x)))
+ return true
+
+}
+
+// AddInt adds the integer x to the bitmap (convenience method: the parameter is casted to uint32 and we call Add)
+func (rb *Bitmap) AddInt(x int) {
+ rb.Add(uint32(x))
+}
+
+// Remove the integer x from the bitmap
+func (rb *Bitmap) Remove(x uint32) {
+ hb := highbits(x)
+ i := rb.highlowcontainer.getIndex(hb)
+ if i >= 0 {
+ c := rb.highlowcontainer.getWritableContainerAtIndex(i).iremoveReturnMinimized(lowbits(x))
+ rb.highlowcontainer.setContainerAtIndex(i, c)
+ if rb.highlowcontainer.getContainerAtIndex(i).getCardinality() == 0 {
+ rb.highlowcontainer.removeAtIndex(i)
+ }
+ }
+}
+
+// CheckedRemove removes the integer x from the bitmap and return true if the integer was effectively remove (and false if the integer was not present)
+func (rb *Bitmap) CheckedRemove(x uint32) bool {
+ // TODO: add unit tests for this method
+ hb := highbits(x)
+ i := rb.highlowcontainer.getIndex(hb)
+ if i >= 0 {
+ C := rb.highlowcontainer.getWritableContainerAtIndex(i)
+ oldcard := C.getCardinality()
+ C = C.iremoveReturnMinimized(lowbits(x))
+ rb.highlowcontainer.setContainerAtIndex(i, C)
+ if rb.highlowcontainer.getContainerAtIndex(i).getCardinality() == 0 {
+ rb.highlowcontainer.removeAtIndex(i)
+ return true
+ }
+ return C.getCardinality() < oldcard
+ }
+ return false
+
+}
+
+// IsEmpty returns true if the Bitmap is empty (it is faster than doing (GetCardinality() == 0))
+func (rb *Bitmap) IsEmpty() bool {
+ return rb.highlowcontainer.size() == 0
+}
+
+// GetCardinality returns the number of integers contained in the bitmap
+func (rb *Bitmap) GetCardinality() uint64 {
+ size := uint64(0)
+ for _, c := range rb.highlowcontainer.containers {
+ size += uint64(c.getCardinality())
+ }
+ return size
+}
+
+// Rank returns the number of integers that are smaller or equal to x (Rank(infinity) would be GetCardinality())
+func (rb *Bitmap) Rank(x uint32) uint64 {
+ size := uint64(0)
+ for i := 0; i < rb.highlowcontainer.size(); i++ {
+ key := rb.highlowcontainer.getKeyAtIndex(i)
+ if key > highbits(x) {
+ return size
+ }
+ if key < highbits(x) {
+ size += uint64(rb.highlowcontainer.getContainerAtIndex(i).getCardinality())
+ } else {
+ return size + uint64(rb.highlowcontainer.getContainerAtIndex(i).rank(lowbits(x)))
+ }
+ }
+ return size
+}
+
+// Select returns the xth integer in the bitmap
+func (rb *Bitmap) Select(x uint32) (uint32, error) {
+ if rb.GetCardinality() <= uint64(x) {
+ return 0, fmt.Errorf("can't find %dth integer in a bitmap with only %d items", x, rb.GetCardinality())
+ }
+
+ remaining := x
+ for i := 0; i < rb.highlowcontainer.size(); i++ {
+ c := rb.highlowcontainer.getContainerAtIndex(i)
+ if remaining >= uint32(c.getCardinality()) {
+ remaining -= uint32(c.getCardinality())
+ } else {
+ key := rb.highlowcontainer.getKeyAtIndex(i)
+ return uint32(key)<<16 + uint32(c.selectInt(uint16(remaining))), nil
+ }
+ }
+ return 0, fmt.Errorf("can't find %dth integer in a bitmap with only %d items", x, rb.GetCardinality())
+}
+
+// And computes the intersection between two bitmaps and stores the result in the current bitmap
+func (rb *Bitmap) And(x2 *Bitmap) {
+ pos1 := 0
+ pos2 := 0
+ intersectionsize := 0
+ length1 := rb.highlowcontainer.size()
+ length2 := x2.highlowcontainer.size()
+
+main:
+ for {
+ if pos1 < length1 && pos2 < length2 {
+ s1 := rb.highlowcontainer.getKeyAtIndex(pos1)
+ s2 := x2.highlowcontainer.getKeyAtIndex(pos2)
+ for {
+ if s1 == s2 {
+ c1 := rb.highlowcontainer.getWritableContainerAtIndex(pos1)
+ c2 := x2.highlowcontainer.getContainerAtIndex(pos2)
+ diff := c1.iand(c2)
+ if diff.getCardinality() > 0 {
+ rb.highlowcontainer.replaceKeyAndContainerAtIndex(intersectionsize, s1, diff, false)
+ intersectionsize++
+ }
+ pos1++
+ pos2++
+ if (pos1 == length1) || (pos2 == length2) {
+ break main
+ }
+ s1 = rb.highlowcontainer.getKeyAtIndex(pos1)
+ s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
+ } else if s1 < s2 {
+ pos1 = rb.highlowcontainer.advanceUntil(s2, pos1)
+ if pos1 == length1 {
+ break main
+ }
+ s1 = rb.highlowcontainer.getKeyAtIndex(pos1)
+ } else { //s1 > s2
+ pos2 = x2.highlowcontainer.advanceUntil(s1, pos2)
+ if pos2 == length2 {
+ break main
+ }
+ s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
+ }
+ }
+ } else {
+ break
+ }
+ }
+ rb.highlowcontainer.resize(intersectionsize)
+}
+
+// OrCardinality returns the cardinality of the union between two bitmaps, bitmaps are not modified
+func (rb *Bitmap) OrCardinality(x2 *Bitmap) uint64 {
+ pos1 := 0
+ pos2 := 0
+ length1 := rb.highlowcontainer.size()
+ length2 := x2.highlowcontainer.size()
+ answer := uint64(0)
+main:
+ for {
+ if (pos1 < length1) && (pos2 < length2) {
+ s1 := rb.highlowcontainer.getKeyAtIndex(pos1)
+ s2 := x2.highlowcontainer.getKeyAtIndex(pos2)
+
+ for {
+ if s1 < s2 {
+ answer += uint64(rb.highlowcontainer.getContainerAtIndex(pos1).getCardinality())
+ pos1++
+ if pos1 == length1 {
+ break main
+ }
+ s1 = rb.highlowcontainer.getKeyAtIndex(pos1)
+ } else if s1 > s2 {
+ answer += uint64(x2.highlowcontainer.getContainerAtIndex(pos2).getCardinality())
+ pos2++
+ if pos2 == length2 {
+ break main
+ }
+ s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
+ } else {
+ // TODO: could be faster if we did not have to materialize the container
+ answer += uint64(rb.highlowcontainer.getContainerAtIndex(pos1).or(x2.highlowcontainer.getContainerAtIndex(pos2)).getCardinality())
+ pos1++
+ pos2++
+ if (pos1 == length1) || (pos2 == length2) {
+ break main
+ }
+ s1 = rb.highlowcontainer.getKeyAtIndex(pos1)
+ s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
+ }
+ }
+ } else {
+ break
+ }
+ }
+ for ; pos1 < length1; pos1++ {
+ answer += uint64(rb.highlowcontainer.getContainerAtIndex(pos1).getCardinality())
+ }
+ for ; pos2 < length2; pos2++ {
+ answer += uint64(x2.highlowcontainer.getContainerAtIndex(pos2).getCardinality())
+ }
+ return answer
+}
+
+// AndCardinality returns the cardinality of the intersection between two bitmaps, bitmaps are not modified
+func (rb *Bitmap) AndCardinality(x2 *Bitmap) uint64 {
+ pos1 := 0
+ pos2 := 0
+ answer := uint64(0)
+ length1 := rb.highlowcontainer.size()
+ length2 := x2.highlowcontainer.size()
+
+main:
+ for {
+ if pos1 < length1 && pos2 < length2 {
+ s1 := rb.highlowcontainer.getKeyAtIndex(pos1)
+ s2 := x2.highlowcontainer.getKeyAtIndex(pos2)
+ for {
+ if s1 == s2 {
+ c1 := rb.highlowcontainer.getContainerAtIndex(pos1)
+ c2 := x2.highlowcontainer.getContainerAtIndex(pos2)
+ answer += uint64(c1.andCardinality(c2))
+ pos1++
+ pos2++
+ if (pos1 == length1) || (pos2 == length2) {
+ break main
+ }
+ s1 = rb.highlowcontainer.getKeyAtIndex(pos1)
+ s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
+ } else if s1 < s2 {
+ pos1 = rb.highlowcontainer.advanceUntil(s2, pos1)
+ if pos1 == length1 {
+ break main
+ }
+ s1 = rb.highlowcontainer.getKeyAtIndex(pos1)
+ } else { //s1 > s2
+ pos2 = x2.highlowcontainer.advanceUntil(s1, pos2)
+ if pos2 == length2 {
+ break main
+ }
+ s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
+ }
+ }
+ } else {
+ break
+ }
+ }
+ return answer
+}
+
+// Intersects checks whether two bitmap intersects, bitmaps are not modified
+func (rb *Bitmap) Intersects(x2 *Bitmap) bool {
+ pos1 := 0
+ pos2 := 0
+ length1 := rb.highlowcontainer.size()
+ length2 := x2.highlowcontainer.size()
+
+main:
+ for {
+ if pos1 < length1 && pos2 < length2 {
+ s1 := rb.highlowcontainer.getKeyAtIndex(pos1)
+ s2 := x2.highlowcontainer.getKeyAtIndex(pos2)
+ for {
+ if s1 == s2 {
+ c1 := rb.highlowcontainer.getContainerAtIndex(pos1)
+ c2 := x2.highlowcontainer.getContainerAtIndex(pos2)
+ if c1.intersects(c2) {
+ return true
+ }
+ pos1++
+ pos2++
+ if (pos1 == length1) || (pos2 == length2) {
+ break main
+ }
+ s1 = rb.highlowcontainer.getKeyAtIndex(pos1)
+ s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
+ } else if s1 < s2 {
+ pos1 = rb.highlowcontainer.advanceUntil(s2, pos1)
+ if pos1 == length1 {
+ break main
+ }
+ s1 = rb.highlowcontainer.getKeyAtIndex(pos1)
+ } else { //s1 > s2
+ pos2 = x2.highlowcontainer.advanceUntil(s1, pos2)
+ if pos2 == length2 {
+ break main
+ }
+ s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
+ }
+ }
+ } else {
+ break
+ }
+ }
+ return false
+}
+
+// Xor computes the symmetric difference between two bitmaps and stores the result in the current bitmap
+func (rb *Bitmap) Xor(x2 *Bitmap) {
+ pos1 := 0
+ pos2 := 0
+ length1 := rb.highlowcontainer.size()
+ length2 := x2.highlowcontainer.size()
+ for {
+ if (pos1 < length1) && (pos2 < length2) {
+ s1 := rb.highlowcontainer.getKeyAtIndex(pos1)
+ s2 := x2.highlowcontainer.getKeyAtIndex(pos2)
+ if s1 < s2 {
+ pos1 = rb.highlowcontainer.advanceUntil(s2, pos1)
+ if pos1 == length1 {
+ break
+ }
+ } else if s1 > s2 {
+ c := x2.highlowcontainer.getWritableContainerAtIndex(pos2)
+ rb.highlowcontainer.insertNewKeyValueAt(pos1, x2.highlowcontainer.getKeyAtIndex(pos2), c)
+ length1++
+ pos1++
+ pos2++
+ } else {
+ // TODO: couple be computed in-place for reduced memory usage
+ c := rb.highlowcontainer.getContainerAtIndex(pos1).xor(x2.highlowcontainer.getContainerAtIndex(pos2))
+ if c.getCardinality() > 0 {
+ rb.highlowcontainer.setContainerAtIndex(pos1, c)
+ pos1++
+ } else {
+ rb.highlowcontainer.removeAtIndex(pos1)
+ length1--
+ }
+ pos2++
+ }
+ } else {
+ break
+ }
+ }
+ if pos1 == length1 {
+ rb.highlowcontainer.appendCopyMany(x2.highlowcontainer, pos2, length2)
+ }
+}
+
+// Or computes the union between two bitmaps and stores the result in the current bitmap
+func (rb *Bitmap) Or(x2 *Bitmap) {
+ pos1 := 0
+ pos2 := 0
+ length1 := rb.highlowcontainer.size()
+ length2 := x2.highlowcontainer.size()
+main:
+ for (pos1 < length1) && (pos2 < length2) {
+ s1 := rb.highlowcontainer.getKeyAtIndex(pos1)
+ s2 := x2.highlowcontainer.getKeyAtIndex(pos2)
+
+ for {
+ if s1 < s2 {
+ pos1++
+ if pos1 == length1 {
+ break main
+ }
+ s1 = rb.highlowcontainer.getKeyAtIndex(pos1)
+ } else if s1 > s2 {
+ rb.highlowcontainer.insertNewKeyValueAt(pos1, s2, x2.highlowcontainer.getContainerAtIndex(pos2).clone())
+ pos1++
+ length1++
+ pos2++
+ if pos2 == length2 {
+ break main
+ }
+ s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
+ } else {
+ rb.highlowcontainer.replaceKeyAndContainerAtIndex(pos1, s1, rb.highlowcontainer.getWritableContainerAtIndex(pos1).ior(x2.highlowcontainer.getContainerAtIndex(pos2)), false)
+ pos1++
+ pos2++
+ if (pos1 == length1) || (pos2 == length2) {
+ break main
+ }
+ s1 = rb.highlowcontainer.getKeyAtIndex(pos1)
+ s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
+ }
+ }
+ }
+ if pos1 == length1 {
+ rb.highlowcontainer.appendCopyMany(x2.highlowcontainer, pos2, length2)
+ }
+}
+
+/*func (rb *Bitmap) Or(x2 *Bitmap) {
+ results := Or(rb, x2) // Todo: could be computed in-place for reduced memory usage
+ rb.highlowcontainer = results.highlowcontainer
+}*/
+
+// AndNot computes the difference between two bitmaps and stores the result in the current bitmap
+func (rb *Bitmap) AndNot(x2 *Bitmap) {
+ pos1 := 0
+ pos2 := 0
+ intersectionsize := 0
+ length1 := rb.highlowcontainer.size()
+ length2 := x2.highlowcontainer.size()
+
+main:
+ for {
+ if pos1 < length1 && pos2 < length2 {
+ s1 := rb.highlowcontainer.getKeyAtIndex(pos1)
+ s2 := x2.highlowcontainer.getKeyAtIndex(pos2)
+ for {
+ if s1 == s2 {
+ c1 := rb.highlowcontainer.getWritableContainerAtIndex(pos1)
+ c2 := x2.highlowcontainer.getContainerAtIndex(pos2)
+ diff := c1.iandNot(c2)
+ if diff.getCardinality() > 0 {
+ rb.highlowcontainer.replaceKeyAndContainerAtIndex(intersectionsize, s1, diff, false)
+ intersectionsize++
+ }
+ pos1++
+ pos2++
+ if (pos1 == length1) || (pos2 == length2) {
+ break main
+ }
+ s1 = rb.highlowcontainer.getKeyAtIndex(pos1)
+ s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
+ } else if s1 < s2 {
+ c1 := rb.highlowcontainer.getContainerAtIndex(pos1)
+ mustCopyOnWrite := rb.highlowcontainer.needsCopyOnWrite(pos1)
+ rb.highlowcontainer.replaceKeyAndContainerAtIndex(intersectionsize, s1, c1, mustCopyOnWrite)
+ intersectionsize++
+ pos1++
+ if pos1 == length1 {
+ break main
+ }
+ s1 = rb.highlowcontainer.getKeyAtIndex(pos1)
+ } else { //s1 > s2
+ pos2 = x2.highlowcontainer.advanceUntil(s1, pos2)
+ if pos2 == length2 {
+ break main
+ }
+ s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
+ }
+ }
+ } else {
+ break
+ }
+ }
+ // TODO:implement as a copy
+ for pos1 < length1 {
+ c1 := rb.highlowcontainer.getContainerAtIndex(pos1)
+ s1 := rb.highlowcontainer.getKeyAtIndex(pos1)
+ mustCopyOnWrite := rb.highlowcontainer.needsCopyOnWrite(pos1)
+ rb.highlowcontainer.replaceKeyAndContainerAtIndex(intersectionsize, s1, c1, mustCopyOnWrite)
+ intersectionsize++
+ pos1++
+ }
+ rb.highlowcontainer.resize(intersectionsize)
+}
+
+// Or computes the union between two bitmaps and returns the result
+func Or(x1, x2 *Bitmap) *Bitmap {
+ answer := NewBitmap()
+ pos1 := 0
+ pos2 := 0
+ length1 := x1.highlowcontainer.size()
+ length2 := x2.highlowcontainer.size()
+main:
+ for (pos1 < length1) && (pos2 < length2) {
+ s1 := x1.highlowcontainer.getKeyAtIndex(pos1)
+ s2 := x2.highlowcontainer.getKeyAtIndex(pos2)
+
+ for {
+ if s1 < s2 {
+ answer.highlowcontainer.appendCopy(x1.highlowcontainer, pos1)
+ pos1++
+ if pos1 == length1 {
+ break main
+ }
+ s1 = x1.highlowcontainer.getKeyAtIndex(pos1)
+ } else if s1 > s2 {
+ answer.highlowcontainer.appendCopy(x2.highlowcontainer, pos2)
+ pos2++
+ if pos2 == length2 {
+ break main
+ }
+ s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
+ } else {
+
+ answer.highlowcontainer.appendContainer(s1, x1.highlowcontainer.getContainerAtIndex(pos1).or(x2.highlowcontainer.getContainerAtIndex(pos2)), false)
+ pos1++
+ pos2++
+ if (pos1 == length1) || (pos2 == length2) {
+ break main
+ }
+ s1 = x1.highlowcontainer.getKeyAtIndex(pos1)
+ s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
+ }
+ }
+ }
+ if pos1 == length1 {
+ answer.highlowcontainer.appendCopyMany(x2.highlowcontainer, pos2, length2)
+ } else if pos2 == length2 {
+ answer.highlowcontainer.appendCopyMany(x1.highlowcontainer, pos1, length1)
+ }
+ return answer
+}
+
+// And computes the intersection between two bitmaps and returns the result
+func And(x1, x2 *Bitmap) *Bitmap {
+ answer := NewBitmap()
+ pos1 := 0
+ pos2 := 0
+ length1 := x1.highlowcontainer.size()
+ length2 := x2.highlowcontainer.size()
+main:
+ for pos1 < length1 && pos2 < length2 {
+ s1 := x1.highlowcontainer.getKeyAtIndex(pos1)
+ s2 := x2.highlowcontainer.getKeyAtIndex(pos2)
+ for {
+ if s1 == s2 {
+ C := x1.highlowcontainer.getContainerAtIndex(pos1)
+ C = C.and(x2.highlowcontainer.getContainerAtIndex(pos2))
+
+ if C.getCardinality() > 0 {
+ answer.highlowcontainer.appendContainer(s1, C, false)
+ }
+ pos1++
+ pos2++
+ if (pos1 == length1) || (pos2 == length2) {
+ break main
+ }
+ s1 = x1.highlowcontainer.getKeyAtIndex(pos1)
+ s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
+ } else if s1 < s2 {
+ pos1 = x1.highlowcontainer.advanceUntil(s2, pos1)
+ if pos1 == length1 {
+ break main
+ }
+ s1 = x1.highlowcontainer.getKeyAtIndex(pos1)
+ } else { // s1 > s2
+ pos2 = x2.highlowcontainer.advanceUntil(s1, pos2)
+ if pos2 == length2 {
+ break main
+ }
+ s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
+ }
+ }
+ }
+ return answer
+}
+
+// Xor computes the symmetric difference between two bitmaps and returns the result
+func Xor(x1, x2 *Bitmap) *Bitmap {
+ answer := NewBitmap()
+ pos1 := 0
+ pos2 := 0
+ length1 := x1.highlowcontainer.size()
+ length2 := x2.highlowcontainer.size()
+ for {
+ if (pos1 < length1) && (pos2 < length2) {
+ s1 := x1.highlowcontainer.getKeyAtIndex(pos1)
+ s2 := x2.highlowcontainer.getKeyAtIndex(pos2)
+ if s1 < s2 {
+ answer.highlowcontainer.appendCopy(x1.highlowcontainer, pos1)
+ pos1++
+ } else if s1 > s2 {
+ answer.highlowcontainer.appendCopy(x2.highlowcontainer, pos2)
+ pos2++
+ } else {
+ c := x1.highlowcontainer.getContainerAtIndex(pos1).xor(x2.highlowcontainer.getContainerAtIndex(pos2))
+ if c.getCardinality() > 0 {
+ answer.highlowcontainer.appendContainer(s1, c, false)
+ }
+ pos1++
+ pos2++
+ }
+ } else {
+ break
+ }
+ }
+ if pos1 == length1 {
+ answer.highlowcontainer.appendCopyMany(x2.highlowcontainer, pos2, length2)
+ } else if pos2 == length2 {
+ answer.highlowcontainer.appendCopyMany(x1.highlowcontainer, pos1, length1)
+ }
+ return answer
+}
+
+// AndNot computes the difference between two bitmaps and returns the result
+func AndNot(x1, x2 *Bitmap) *Bitmap {
+ answer := NewBitmap()
+ pos1 := 0
+ pos2 := 0
+ length1 := x1.highlowcontainer.size()
+ length2 := x2.highlowcontainer.size()
+
+main:
+ for {
+ if pos1 < length1 && pos2 < length2 {
+ s1 := x1.highlowcontainer.getKeyAtIndex(pos1)
+ s2 := x2.highlowcontainer.getKeyAtIndex(pos2)
+ for {
+ if s1 < s2 {
+ answer.highlowcontainer.appendCopy(x1.highlowcontainer, pos1)
+ pos1++
+ if pos1 == length1 {
+ break main
+ }
+ s1 = x1.highlowcontainer.getKeyAtIndex(pos1)
+ } else if s1 == s2 {
+ c1 := x1.highlowcontainer.getContainerAtIndex(pos1)
+ c2 := x2.highlowcontainer.getContainerAtIndex(pos2)
+ diff := c1.andNot(c2)
+ if diff.getCardinality() > 0 {
+ answer.highlowcontainer.appendContainer(s1, diff, false)
+ }
+ pos1++
+ pos2++
+ if (pos1 == length1) || (pos2 == length2) {
+ break main
+ }
+ s1 = x1.highlowcontainer.getKeyAtIndex(pos1)
+ s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
+ } else { //s1 > s2
+ pos2 = x2.highlowcontainer.advanceUntil(s1, pos2)
+ if pos2 == length2 {
+ break main
+ }
+ s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
+ }
+ }
+ } else {
+ break
+ }
+ }
+ if pos2 == length2 {
+ answer.highlowcontainer.appendCopyMany(x1.highlowcontainer, pos1, length1)
+ }
+ return answer
+}
+
+// AddMany add all of the values in dat
+func (rb *Bitmap) AddMany(dat []uint32) {
+ if len(dat) == 0 {
+ return
+ }
+ prev := dat[0]
+ idx, c := rb.addwithptr(prev)
+ for _, i := range dat[1:] {
+ if highbits(prev) == highbits(i) {
+ c = c.iaddReturnMinimized(lowbits(i))
+ rb.highlowcontainer.setContainerAtIndex(idx, c)
+ } else {
+ idx, c = rb.addwithptr(i)
+ }
+ prev = i
+ }
+}
+
+// BitmapOf generates a new bitmap filled with the specified integers
+func BitmapOf(dat ...uint32) *Bitmap {
+ ans := NewBitmap()
+ ans.AddMany(dat)
+ return ans
+}
+
+// Flip negates the bits in the given range (i.e., [rangeStart,rangeEnd)), any integer present in this range and in the bitmap is removed,
+// and any integer present in the range and not in the bitmap is added.
+// The function uses 64-bit parameters even though a Bitmap stores 32-bit values because it is allowed and meaningful to use [0,uint64(0x100000000)) as a range
+// while uint64(0x100000000) cannot be represented as a 32-bit value.
+func (rb *Bitmap) Flip(rangeStart, rangeEnd uint64) {
+
+ if rangeEnd > MaxUint32+1 {
+ panic("rangeEnd > MaxUint32+1")
+ }
+ if rangeStart > MaxUint32+1 {
+ panic("rangeStart > MaxUint32+1")
+ }
+
+ if rangeStart >= rangeEnd {
+ return
+ }
+
+ hbStart := highbits(uint32(rangeStart))
+ lbStart := lowbits(uint32(rangeStart))
+ hbLast := highbits(uint32(rangeEnd - 1))
+ lbLast := lowbits(uint32(rangeEnd - 1))
+
+ var max uint32 = maxLowBit
+ for hb := hbStart; hb <= hbLast; hb++ {
+ var containerStart uint32
+ if hb == hbStart {
+ containerStart = uint32(lbStart)
+ }
+ containerLast := max
+ if hb == hbLast {
+ containerLast = uint32(lbLast)
+ }
+
+ i := rb.highlowcontainer.getIndex(hb)
+
+ if i >= 0 {
+ c := rb.highlowcontainer.getWritableContainerAtIndex(i).inot(int(containerStart), int(containerLast)+1)
+ if c.getCardinality() > 0 {
+ rb.highlowcontainer.setContainerAtIndex(i, c)
+ } else {
+ rb.highlowcontainer.removeAtIndex(i)
+ }
+ } else { // *think* the range of ones must never be
+ // empty.
+ rb.highlowcontainer.insertNewKeyValueAt(-i-1, hb, rangeOfOnes(int(containerStart), int(containerLast)))
+ }
+ }
+}
+
+// FlipInt calls Flip after casting the parameters (convenience method)
+func (rb *Bitmap) FlipInt(rangeStart, rangeEnd int) {
+ rb.Flip(uint64(rangeStart), uint64(rangeEnd))
+}
+
+// AddRange adds the integers in [rangeStart, rangeEnd) to the bitmap.
+// The function uses 64-bit parameters even though a Bitmap stores 32-bit values because it is allowed and meaningful to use [0,uint64(0x100000000)) as a range
+// while uint64(0x100000000) cannot be represented as a 32-bit value.
+func (rb *Bitmap) AddRange(rangeStart, rangeEnd uint64) {
+ if rangeStart >= rangeEnd {
+ return
+ }
+ if rangeEnd-1 > MaxUint32 {
+ panic("rangeEnd-1 > MaxUint32")
+ }
+ hbStart := uint32(highbits(uint32(rangeStart)))
+ lbStart := uint32(lowbits(uint32(rangeStart)))
+ hbLast := uint32(highbits(uint32(rangeEnd - 1)))
+ lbLast := uint32(lowbits(uint32(rangeEnd - 1)))
+
+ var max uint32 = maxLowBit
+ for hb := uint16(hbStart); hb <= uint16(hbLast); hb++ {
+ containerStart := uint32(0)
+ if hb == uint16(hbStart) {
+ containerStart = lbStart
+ }
+ containerLast := max
+ if hb == uint16(hbLast) {
+ containerLast = lbLast
+ }
+
+ i := rb.highlowcontainer.getIndex(hb)
+
+ if i >= 0 {
+ c := rb.highlowcontainer.getWritableContainerAtIndex(i).iaddRange(int(containerStart), int(containerLast)+1)
+ rb.highlowcontainer.setContainerAtIndex(i, c)
+ } else { // *think* the range of ones must never be
+ // empty.
+ rb.highlowcontainer.insertNewKeyValueAt(-i-1, hb, rangeOfOnes(int(containerStart), int(containerLast)))
+ }
+ }
+}
+
+// RemoveRange removes the integers in [rangeStart, rangeEnd) from the bitmap.
+// The function uses 64-bit parameters even though a Bitmap stores 32-bit values because it is allowed and meaningful to use [0,uint64(0x100000000)) as a range
+// while uint64(0x100000000) cannot be represented as a 32-bit value.
+func (rb *Bitmap) RemoveRange(rangeStart, rangeEnd uint64) {
+ if rangeStart >= rangeEnd {
+ return
+ }
+ if rangeEnd-1 > MaxUint32 {
+ // logically, we should assume that the user wants to
+ // remove all values from rangeStart to infinity
+ // see https://github.com/RoaringBitmap/roaring/issues/141
+ rangeEnd = uint64(0x100000000)
+ }
+ hbStart := uint32(highbits(uint32(rangeStart)))
+ lbStart := uint32(lowbits(uint32(rangeStart)))
+ hbLast := uint32(highbits(uint32(rangeEnd - 1)))
+ lbLast := uint32(lowbits(uint32(rangeEnd - 1)))
+
+ var max uint32 = maxLowBit
+
+ if hbStart == hbLast {
+ i := rb.highlowcontainer.getIndex(uint16(hbStart))
+ if i < 0 {
+ return
+ }
+ c := rb.highlowcontainer.getWritableContainerAtIndex(i).iremoveRange(int(lbStart), int(lbLast+1))
+ if c.getCardinality() > 0 {
+ rb.highlowcontainer.setContainerAtIndex(i, c)
+ } else {
+ rb.highlowcontainer.removeAtIndex(i)
+ }
+ return
+ }
+ ifirst := rb.highlowcontainer.getIndex(uint16(hbStart))
+ ilast := rb.highlowcontainer.getIndex(uint16(hbLast))
+
+ if ifirst >= 0 {
+ if lbStart != 0 {
+ c := rb.highlowcontainer.getWritableContainerAtIndex(ifirst).iremoveRange(int(lbStart), int(max+1))
+ if c.getCardinality() > 0 {
+ rb.highlowcontainer.setContainerAtIndex(ifirst, c)
+ ifirst++
+ }
+ }
+ } else {
+ ifirst = -ifirst - 1
+ }
+ if ilast >= 0 {
+ if lbLast != max {
+ c := rb.highlowcontainer.getWritableContainerAtIndex(ilast).iremoveRange(int(0), int(lbLast+1))
+ if c.getCardinality() > 0 {
+ rb.highlowcontainer.setContainerAtIndex(ilast, c)
+ } else {
+ ilast++
+ }
+ } else {
+ ilast++
+ }
+ } else {
+ ilast = -ilast - 1
+ }
+ rb.highlowcontainer.removeIndexRange(ifirst, ilast)
+}
+
+// Flip negates the bits in the given range (i.e., [rangeStart,rangeEnd)), any integer present in this range and in the bitmap is removed,
+// and any integer present in the range and not in the bitmap is added, a new bitmap is returned leaving
+// the current bitmap unchanged.
+// The function uses 64-bit parameters even though a Bitmap stores 32-bit values because it is allowed and meaningful to use [0,uint64(0x100000000)) as a range
+// while uint64(0x100000000) cannot be represented as a 32-bit value.
+func Flip(bm *Bitmap, rangeStart, rangeEnd uint64) *Bitmap {
+ if rangeStart >= rangeEnd {
+ return bm.Clone()
+ }
+
+ if rangeStart > MaxUint32 {
+ panic("rangeStart > MaxUint32")
+ }
+ if rangeEnd-1 > MaxUint32 {
+ panic("rangeEnd-1 > MaxUint32")
+ }
+
+ answer := NewBitmap()
+ hbStart := highbits(uint32(rangeStart))
+ lbStart := lowbits(uint32(rangeStart))
+ hbLast := highbits(uint32(rangeEnd - 1))
+ lbLast := lowbits(uint32(rangeEnd - 1))
+
+ // copy the containers before the active area
+ answer.highlowcontainer.appendCopiesUntil(bm.highlowcontainer, hbStart)
+
+ var max uint32 = maxLowBit
+ for hb := hbStart; hb <= hbLast; hb++ {
+ var containerStart uint32
+ if hb == hbStart {
+ containerStart = uint32(lbStart)
+ }
+ containerLast := max
+ if hb == hbLast {
+ containerLast = uint32(lbLast)
+ }
+
+ i := bm.highlowcontainer.getIndex(hb)
+ j := answer.highlowcontainer.getIndex(hb)
+
+ if i >= 0 {
+ c := bm.highlowcontainer.getContainerAtIndex(i).not(int(containerStart), int(containerLast)+1)
+ if c.getCardinality() > 0 {
+ answer.highlowcontainer.insertNewKeyValueAt(-j-1, hb, c)
+ }
+
+ } else { // *think* the range of ones must never be
+ // empty.
+ answer.highlowcontainer.insertNewKeyValueAt(-j-1, hb,
+ rangeOfOnes(int(containerStart), int(containerLast)))
+ }
+ }
+ // copy the containers after the active area.
+ answer.highlowcontainer.appendCopiesAfter(bm.highlowcontainer, hbLast)
+
+ return answer
+}
+
+// SetCopyOnWrite sets this bitmap to use copy-on-write so that copies are fast and memory conscious
+// if the parameter is true, otherwise we leave the default where hard copies are made
+// (copy-on-write requires extra care in a threaded context).
+// Calling SetCopyOnWrite(true) on a bitmap created with FromBuffer is unsafe.
+func (rb *Bitmap) SetCopyOnWrite(val bool) {
+ rb.highlowcontainer.copyOnWrite = val
+}
+
+// GetCopyOnWrite gets this bitmap's copy-on-write property
+func (rb *Bitmap) GetCopyOnWrite() (val bool) {
+ return rb.highlowcontainer.copyOnWrite
+}
+
+// FlipInt calls Flip after casting the parameters (convenience method)
+func FlipInt(bm *Bitmap, rangeStart, rangeEnd int) *Bitmap {
+ return Flip(bm, uint64(rangeStart), uint64(rangeEnd))
+}
+
+// Statistics provides details on the container types in use.
+type Statistics struct {
+ Cardinality uint64
+ Containers uint64
+
+ ArrayContainers uint64
+ ArrayContainerBytes uint64
+ ArrayContainerValues uint64
+
+ BitmapContainers uint64
+ BitmapContainerBytes uint64
+ BitmapContainerValues uint64
+
+ RunContainers uint64
+ RunContainerBytes uint64
+ RunContainerValues uint64
+}
+
+// Stats returns details on container type usage in a Statistics struct.
+func (rb *Bitmap) Stats() Statistics {
+ stats := Statistics{}
+ stats.Containers = uint64(len(rb.highlowcontainer.containers))
+ for _, c := range rb.highlowcontainer.containers {
+ stats.Cardinality += uint64(c.getCardinality())
+
+ switch c.(type) {
+ case *arrayContainer:
+ stats.ArrayContainers++
+ stats.ArrayContainerBytes += uint64(c.getSizeInBytes())
+ stats.ArrayContainerValues += uint64(c.getCardinality())
+ case *bitmapContainer:
+ stats.BitmapContainers++
+ stats.BitmapContainerBytes += uint64(c.getSizeInBytes())
+ stats.BitmapContainerValues += uint64(c.getCardinality())
+ case *runContainer16:
+ stats.RunContainers++
+ stats.RunContainerBytes += uint64(c.getSizeInBytes())
+ stats.RunContainerValues += uint64(c.getCardinality())
+ }
+ }
+ return stats
+}
diff --git a/vendor/github.com/RoaringBitmap/roaring/roaringarray.go b/vendor/github.com/RoaringBitmap/roaring/roaringarray.go
new file mode 100644
index 0000000000..d9659159d6
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/roaringarray.go
@@ -0,0 +1,893 @@
+package roaring
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "io/ioutil"
+
+ snappy "github.com/glycerine/go-unsnap-stream"
+ "github.com/tinylib/msgp/msgp"
+)
+
+//go:generate msgp -unexported
+
+type container interface {
+ clone() container
+ and(container) container
+ andCardinality(container) int
+ iand(container) container // i stands for inplace
+ andNot(container) container
+ iandNot(container) container // i stands for inplace
+ getCardinality() int
+ // rank returns the number of integers that are
+ // smaller or equal to x. rank(infinity) would be getCardinality().
+ rank(uint16) int
+
+ iadd(x uint16) bool // inplace, returns true if x was new.
+ iaddReturnMinimized(uint16) container // may change return type to minimize storage.
+
+ //addRange(start, final int) container // range is [firstOfRange,lastOfRange) (unused)
+ iaddRange(start, endx int) container // i stands for inplace, range is [firstOfRange,endx)
+
+ iremove(x uint16) bool // inplace, returns true if x was present.
+ iremoveReturnMinimized(uint16) container // may change return type to minimize storage.
+
+ not(start, final int) container // range is [firstOfRange,lastOfRange)
+ inot(firstOfRange, endx int) container // i stands for inplace, range is [firstOfRange,endx)
+ xor(r container) container
+ getShortIterator() shortIterable
+ getManyIterator() manyIterable
+ contains(i uint16) bool
+ maximum() uint16
+ minimum() uint16
+
+ // equals is now logical equals; it does not require the
+ // same underlying container types, but compares across
+ // any of the implementations.
+ equals(r container) bool
+
+ fillLeastSignificant16bits(array []uint32, i int, mask uint32)
+ or(r container) container
+ orCardinality(r container) int
+ isFull() bool
+ ior(r container) container // i stands for inplace
+ intersects(r container) bool // whether the two containers intersect
+ lazyOR(r container) container
+ lazyIOR(r container) container
+ getSizeInBytes() int
+ //removeRange(start, final int) container // range is [firstOfRange,lastOfRange) (unused)
+ iremoveRange(start, final int) container // i stands for inplace, range is [firstOfRange,lastOfRange)
+ selectInt(x uint16) int // selectInt returns the xth integer in the container
+ serializedSizeInBytes() int
+ readFrom(io.Reader) (int, error)
+ writeTo(io.Writer) (int, error)
+
+ numberOfRuns() int
+ toEfficientContainer() container
+ String() string
+ containerType() contype
+}
+
+type contype uint8
+
+const (
+ bitmapContype contype = iota
+ arrayContype
+ run16Contype
+ run32Contype
+)
+
+// careful: range is [firstOfRange,lastOfRange]
+func rangeOfOnes(start, last int) container {
+ if start > MaxUint16 {
+ panic("rangeOfOnes called with start > MaxUint16")
+ }
+ if last > MaxUint16 {
+ panic("rangeOfOnes called with last > MaxUint16")
+ }
+ if start < 0 {
+ panic("rangeOfOnes called with start < 0")
+ }
+ if last < 0 {
+ panic("rangeOfOnes called with last < 0")
+ }
+ return newRunContainer16Range(uint16(start), uint16(last))
+}
+
+type roaringArray struct {
+ keys []uint16
+ containers []container `msg:"-"` // don't try to serialize directly.
+ needCopyOnWrite []bool
+ copyOnWrite bool
+
+ // conserz is used at serialization time
+ // to serialize containers. Otherwise empty.
+ conserz []containerSerz
+}
+
+// containerSerz facilitates serializing container (tricky to
+// serialize because it is an interface) by providing a
+// light wrapper with a type identifier.
+type containerSerz struct {
+ t contype `msg:"t"` // type
+ r msgp.Raw `msg:"r"` // Raw msgpack of the actual container type
+}
+
+func newRoaringArray() *roaringArray {
+ return &roaringArray{}
+}
+
+// runOptimize compresses the element containers to minimize space consumed.
+// Q: how does this interact with copyOnWrite and needCopyOnWrite?
+// A: since we aren't changing the logical content, just the representation,
+// we don't bother to check the needCopyOnWrite bits. We replace
+// (possibly all) elements of ra.containers in-place with space
+// optimized versions.
+func (ra *roaringArray) runOptimize() {
+ for i := range ra.containers {
+ ra.containers[i] = ra.containers[i].toEfficientContainer()
+ }
+}
+
+func (ra *roaringArray) appendContainer(key uint16, value container, mustCopyOnWrite bool) {
+ ra.keys = append(ra.keys, key)
+ ra.containers = append(ra.containers, value)
+ ra.needCopyOnWrite = append(ra.needCopyOnWrite, mustCopyOnWrite)
+}
+
+func (ra *roaringArray) appendWithoutCopy(sa roaringArray, startingindex int) {
+ mustCopyOnWrite := sa.needCopyOnWrite[startingindex]
+ ra.appendContainer(sa.keys[startingindex], sa.containers[startingindex], mustCopyOnWrite)
+}
+
+func (ra *roaringArray) appendCopy(sa roaringArray, startingindex int) {
+ // cow only if the two request it, or if we already have a lightweight copy
+ copyonwrite := (ra.copyOnWrite && sa.copyOnWrite) || sa.needsCopyOnWrite(startingindex)
+ if !copyonwrite {
+ // since there is no copy-on-write, we need to clone the container (this is important)
+ ra.appendContainer(sa.keys[startingindex], sa.containers[startingindex].clone(), copyonwrite)
+ } else {
+ ra.appendContainer(sa.keys[startingindex], sa.containers[startingindex], copyonwrite)
+ if !sa.needsCopyOnWrite(startingindex) {
+ sa.setNeedsCopyOnWrite(startingindex)
+ }
+ }
+}
+
+func (ra *roaringArray) appendWithoutCopyMany(sa roaringArray, startingindex, end int) {
+ for i := startingindex; i < end; i++ {
+ ra.appendWithoutCopy(sa, i)
+ }
+}
+
+func (ra *roaringArray) appendCopyMany(sa roaringArray, startingindex, end int) {
+ for i := startingindex; i < end; i++ {
+ ra.appendCopy(sa, i)
+ }
+}
+
+func (ra *roaringArray) appendCopiesUntil(sa roaringArray, stoppingKey uint16) {
+ // cow only if the two request it, or if we already have a lightweight copy
+ copyonwrite := ra.copyOnWrite && sa.copyOnWrite
+
+ for i := 0; i < sa.size(); i++ {
+ if sa.keys[i] >= stoppingKey {
+ break
+ }
+ thiscopyonewrite := copyonwrite || sa.needsCopyOnWrite(i)
+ if thiscopyonewrite {
+ ra.appendContainer(sa.keys[i], sa.containers[i], thiscopyonewrite)
+ if !sa.needsCopyOnWrite(i) {
+ sa.setNeedsCopyOnWrite(i)
+ }
+
+ } else {
+ // since there is no copy-on-write, we need to clone the container (this is important)
+ ra.appendContainer(sa.keys[i], sa.containers[i].clone(), thiscopyonewrite)
+
+ }
+ }
+}
+
+func (ra *roaringArray) appendCopiesAfter(sa roaringArray, beforeStart uint16) {
+ // cow only if the two request it, or if we already have a lightweight copy
+ copyonwrite := ra.copyOnWrite && sa.copyOnWrite
+
+ startLocation := sa.getIndex(beforeStart)
+ if startLocation >= 0 {
+ startLocation++
+ } else {
+ startLocation = -startLocation - 1
+ }
+
+ for i := startLocation; i < sa.size(); i++ {
+ thiscopyonewrite := copyonwrite || sa.needsCopyOnWrite(i)
+ if thiscopyonewrite {
+ ra.appendContainer(sa.keys[i], sa.containers[i], thiscopyonewrite)
+ if !sa.needsCopyOnWrite(i) {
+ sa.setNeedsCopyOnWrite(i)
+ }
+ } else {
+ // since there is no copy-on-write, we need to clone the container (this is important)
+ ra.appendContainer(sa.keys[i], sa.containers[i].clone(), thiscopyonewrite)
+
+ }
+ }
+}
+
+func (ra *roaringArray) removeIndexRange(begin, end int) {
+ if end <= begin {
+ return
+ }
+
+ r := end - begin
+
+ copy(ra.keys[begin:], ra.keys[end:])
+ copy(ra.containers[begin:], ra.containers[end:])
+ copy(ra.needCopyOnWrite[begin:], ra.needCopyOnWrite[end:])
+
+ ra.resize(len(ra.keys) - r)
+}
+
+func (ra *roaringArray) resize(newsize int) {
+ for k := newsize; k < len(ra.containers); k++ {
+ ra.containers[k] = nil
+ }
+
+ ra.keys = ra.keys[:newsize]
+ ra.containers = ra.containers[:newsize]
+ ra.needCopyOnWrite = ra.needCopyOnWrite[:newsize]
+}
+
+func (ra *roaringArray) clear() {
+ ra.resize(0)
+ ra.copyOnWrite = false
+ ra.conserz = nil
+}
+
+func (ra *roaringArray) clone() *roaringArray {
+
+ sa := roaringArray{}
+ sa.copyOnWrite = ra.copyOnWrite
+
+ // this is where copyOnWrite is used.
+ if ra.copyOnWrite {
+ sa.keys = make([]uint16, len(ra.keys))
+ copy(sa.keys, ra.keys)
+ sa.containers = make([]container, len(ra.containers))
+ copy(sa.containers, ra.containers)
+ sa.needCopyOnWrite = make([]bool, len(ra.needCopyOnWrite))
+
+ ra.markAllAsNeedingCopyOnWrite()
+ sa.markAllAsNeedingCopyOnWrite()
+
+ // sa.needCopyOnWrite is shared
+ } else {
+ // make a full copy
+
+ sa.keys = make([]uint16, len(ra.keys))
+ copy(sa.keys, ra.keys)
+
+ sa.containers = make([]container, len(ra.containers))
+ for i := range sa.containers {
+ sa.containers[i] = ra.containers[i].clone()
+ }
+
+ sa.needCopyOnWrite = make([]bool, len(ra.needCopyOnWrite))
+ }
+ return &sa
+}
+
+// unused function:
+//func (ra *roaringArray) containsKey(x uint16) bool {
+// return (ra.binarySearch(0, int64(len(ra.keys)), x) >= 0)
+//}
+
+func (ra *roaringArray) getContainer(x uint16) container {
+ i := ra.binarySearch(0, int64(len(ra.keys)), x)
+ if i < 0 {
+ return nil
+ }
+ return ra.containers[i]
+}
+
+func (ra *roaringArray) getContainerAtIndex(i int) container {
+ return ra.containers[i]
+}
+
+func (ra *roaringArray) getFastContainerAtIndex(i int, needsWriteable bool) container {
+ c := ra.getContainerAtIndex(i)
+ switch t := c.(type) {
+ case *arrayContainer:
+ c = t.toBitmapContainer()
+ case *runContainer16:
+ if !t.isFull() {
+ c = t.toBitmapContainer()
+ }
+ case *bitmapContainer:
+ if needsWriteable && ra.needCopyOnWrite[i] {
+ c = ra.containers[i].clone()
+ }
+ }
+ return c
+}
+
+func (ra *roaringArray) getWritableContainerAtIndex(i int) container {
+ if ra.needCopyOnWrite[i] {
+ ra.containers[i] = ra.containers[i].clone()
+ ra.needCopyOnWrite[i] = false
+ }
+ return ra.containers[i]
+}
+
+func (ra *roaringArray) getIndex(x uint16) int {
+ // before the binary search, we optimize for frequent cases
+ size := len(ra.keys)
+ if (size == 0) || (ra.keys[size-1] == x) {
+ return size - 1
+ }
+ return ra.binarySearch(0, int64(size), x)
+}
+
+func (ra *roaringArray) getKeyAtIndex(i int) uint16 {
+ return ra.keys[i]
+}
+
+func (ra *roaringArray) insertNewKeyValueAt(i int, key uint16, value container) {
+ ra.keys = append(ra.keys, 0)
+ ra.containers = append(ra.containers, nil)
+
+ copy(ra.keys[i+1:], ra.keys[i:])
+ copy(ra.containers[i+1:], ra.containers[i:])
+
+ ra.keys[i] = key
+ ra.containers[i] = value
+
+ ra.needCopyOnWrite = append(ra.needCopyOnWrite, false)
+ copy(ra.needCopyOnWrite[i+1:], ra.needCopyOnWrite[i:])
+ ra.needCopyOnWrite[i] = false
+}
+
+func (ra *roaringArray) remove(key uint16) bool {
+ i := ra.binarySearch(0, int64(len(ra.keys)), key)
+ if i >= 0 { // if a new key
+ ra.removeAtIndex(i)
+ return true
+ }
+ return false
+}
+
+func (ra *roaringArray) removeAtIndex(i int) {
+ copy(ra.keys[i:], ra.keys[i+1:])
+ copy(ra.containers[i:], ra.containers[i+1:])
+
+ copy(ra.needCopyOnWrite[i:], ra.needCopyOnWrite[i+1:])
+
+ ra.resize(len(ra.keys) - 1)
+}
+
+func (ra *roaringArray) setContainerAtIndex(i int, c container) {
+ ra.containers[i] = c
+}
+
+func (ra *roaringArray) replaceKeyAndContainerAtIndex(i int, key uint16, c container, mustCopyOnWrite bool) {
+ ra.keys[i] = key
+ ra.containers[i] = c
+ ra.needCopyOnWrite[i] = mustCopyOnWrite
+}
+
+func (ra *roaringArray) size() int {
+ return len(ra.keys)
+}
+
+func (ra *roaringArray) binarySearch(begin, end int64, ikey uint16) int {
+ low := begin
+ high := end - 1
+ for low+16 <= high {
+ middleIndex := low + (high-low)/2 // avoid overflow
+ middleValue := ra.keys[middleIndex]
+
+ if middleValue < ikey {
+ low = middleIndex + 1
+ } else if middleValue > ikey {
+ high = middleIndex - 1
+ } else {
+ return int(middleIndex)
+ }
+ }
+ for ; low <= high; low++ {
+ val := ra.keys[low]
+ if val >= ikey {
+ if val == ikey {
+ return int(low)
+ }
+ break
+ }
+ }
+ return -int(low + 1)
+}
+
+func (ra *roaringArray) equals(o interface{}) bool {
+ srb, ok := o.(roaringArray)
+ if ok {
+
+ if srb.size() != ra.size() {
+ return false
+ }
+ for i, k := range ra.keys {
+ if k != srb.keys[i] {
+ return false
+ }
+ }
+
+ for i, c := range ra.containers {
+ if !c.equals(srb.containers[i]) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+func (ra *roaringArray) headerSize() uint64 {
+ size := uint64(len(ra.keys))
+ if ra.hasRunCompression() {
+ if size < noOffsetThreshold { // for small bitmaps, we omit the offsets
+ return 4 + (size+7)/8 + 4*size
+ }
+ return 4 + (size+7)/8 + 8*size // - 4 because we pack the size with the cookie
+ }
+ return 4 + 4 + 8*size
+
+}
+
+// should be dirt cheap
+func (ra *roaringArray) serializedSizeInBytes() uint64 {
+ answer := ra.headerSize()
+ for _, c := range ra.containers {
+ answer += uint64(c.serializedSizeInBytes())
+ }
+ return answer
+}
+
+//
+// spec: https://github.com/RoaringBitmap/RoaringFormatSpec
+//
+func (ra *roaringArray) toBytes() ([]byte, error) {
+ stream := &bytes.Buffer{}
+ hasRun := ra.hasRunCompression()
+ isRunSizeInBytes := 0
+ cookieSize := 8
+ if hasRun {
+ cookieSize = 4
+ isRunSizeInBytes = (len(ra.keys) + 7) / 8
+ }
+ descriptiveHeaderSize := 4 * len(ra.keys)
+ preambleSize := cookieSize + isRunSizeInBytes + descriptiveHeaderSize
+
+ buf := make([]byte, preambleSize+4*len(ra.keys))
+
+ nw := 0
+
+ if hasRun {
+ binary.LittleEndian.PutUint16(buf[0:], uint16(serialCookie))
+ nw += 2
+ binary.LittleEndian.PutUint16(buf[2:], uint16(len(ra.keys)-1))
+ nw += 2
+
+ // compute isRun bitmap
+ var ir []byte
+
+ isRun := newBitmapContainer()
+ for i, c := range ra.containers {
+ switch c.(type) {
+ case *runContainer16:
+ isRun.iadd(uint16(i))
+ }
+ }
+ // convert to little endian
+ ir = isRun.asLittleEndianByteSlice()[:isRunSizeInBytes]
+ nw += copy(buf[nw:], ir)
+ } else {
+ binary.LittleEndian.PutUint32(buf[0:], uint32(serialCookieNoRunContainer))
+ nw += 4
+ binary.LittleEndian.PutUint32(buf[4:], uint32(len(ra.keys)))
+ nw += 4
+ }
+
+ // descriptive header
+ for i, key := range ra.keys {
+ binary.LittleEndian.PutUint16(buf[nw:], key)
+ nw += 2
+ c := ra.containers[i]
+ binary.LittleEndian.PutUint16(buf[nw:], uint16(c.getCardinality()-1))
+ nw += 2
+ }
+
+ startOffset := int64(preambleSize + 4*len(ra.keys))
+ if !hasRun || (len(ra.keys) >= noOffsetThreshold) {
+ // offset header
+ for _, c := range ra.containers {
+ binary.LittleEndian.PutUint32(buf[nw:], uint32(startOffset))
+ nw += 4
+ switch rc := c.(type) {
+ case *runContainer16:
+ startOffset += 2 + int64(len(rc.iv))*4
+ default:
+ startOffset += int64(getSizeInBytesFromCardinality(c.getCardinality()))
+ }
+ }
+ }
+
+ _, err := stream.Write(buf[:nw])
+ if err != nil {
+ return nil, err
+ }
+ for i, c := range ra.containers {
+ _ = i
+ _, err := c.writeTo(stream)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return stream.Bytes(), nil
+}
+
+//
+// spec: https://github.com/RoaringBitmap/RoaringFormatSpec
+//
+func (ra *roaringArray) writeTo(out io.Writer) (int64, error) {
+ by, err := ra.toBytes()
+ if err != nil {
+ return 0, err
+ }
+ n, err := out.Write(by)
+ if err == nil && n < len(by) {
+ err = io.ErrShortWrite
+ }
+ return int64(n), err
+}
+
+func (ra *roaringArray) fromBuffer(buf []byte) (int64, error) {
+ pos := 0
+ if len(buf) < 8 {
+ return 0, fmt.Errorf("buffer too small, expecting at least 8 bytes, was %d", len(buf))
+ }
+
+ cookie := binary.LittleEndian.Uint32(buf)
+ pos += 4
+ var size uint32 // number of containers
+ haveRunContainers := false
+ var isRunBitmap []byte
+
+ // cookie header
+ if cookie&0x0000FFFF == serialCookie {
+ haveRunContainers = true
+ size = uint32(uint16(cookie>>16) + 1) // number of containers
+
+ // create is-run-container bitmap
+ isRunBitmapSize := (int(size) + 7) / 8
+ if pos+isRunBitmapSize > len(buf) {
+ return 0, fmt.Errorf("malformed bitmap, is-run bitmap overruns buffer at %d", pos+isRunBitmapSize)
+ }
+
+ isRunBitmap = buf[pos : pos+isRunBitmapSize]
+ pos += isRunBitmapSize
+ } else if cookie == serialCookieNoRunContainer {
+ size = binary.LittleEndian.Uint32(buf[pos:])
+ pos += 4
+ } else {
+ return 0, fmt.Errorf("error in roaringArray.readFrom: did not find expected serialCookie in header")
+ }
+ if size > (1 << 16) {
+ return 0, fmt.Errorf("It is logically impossible to have more than (1<<16) containers.")
+ }
+ // descriptive header
+ // keycard - is {key, cardinality} tuple slice
+ if pos+2*2*int(size) > len(buf) {
+ return 0, fmt.Errorf("malfomred bitmap, key-cardinality slice overruns buffer at %d", pos+2*2*int(size))
+ }
+ keycard := byteSliceAsUint16Slice(buf[pos : pos+2*2*int(size)])
+ pos += 2 * 2 * int(size)
+
+ if !haveRunContainers || size >= noOffsetThreshold {
+ pos += 4 * int(size)
+ }
+
+ // Allocate slices upfront as number of containers is known
+ if cap(ra.containers) >= int(size) {
+ ra.containers = ra.containers[:size]
+ } else {
+ ra.containers = make([]container, size)
+ }
+ if cap(ra.keys) >= int(size) {
+ ra.keys = ra.keys[:size]
+ } else {
+ ra.keys = make([]uint16, size)
+ }
+ if cap(ra.needCopyOnWrite) >= int(size) {
+ ra.needCopyOnWrite = ra.needCopyOnWrite[:size]
+ } else {
+ ra.needCopyOnWrite = make([]bool, size)
+ }
+
+ for i := uint32(0); i < size; i++ {
+ key := uint16(keycard[2*i])
+ card := int(keycard[2*i+1]) + 1
+ ra.keys[i] = key
+ ra.needCopyOnWrite[i] = true
+
+ if haveRunContainers && isRunBitmap[i/8]&(1<<(i%8)) != 0 {
+ // run container
+ nr := binary.LittleEndian.Uint16(buf[pos:])
+ pos += 2
+ if pos+int(nr)*4 > len(buf) {
+ return 0, fmt.Errorf("malformed bitmap, a run container overruns buffer at %d:%d", pos, pos+int(nr)*4)
+ }
+ nb := runContainer16{
+ iv: byteSliceAsInterval16Slice(buf[pos : pos+int(nr)*4]),
+ card: int64(card),
+ }
+ pos += int(nr) * 4
+ ra.containers[i] = &nb
+ } else if card > arrayDefaultMaxSize {
+ // bitmap container
+ nb := bitmapContainer{
+ cardinality: card,
+ bitmap: byteSliceAsUint64Slice(buf[pos : pos+arrayDefaultMaxSize*2]),
+ }
+ pos += arrayDefaultMaxSize * 2
+ ra.containers[i] = &nb
+ } else {
+ // array container
+ nb := arrayContainer{
+ byteSliceAsUint16Slice(buf[pos : pos+card*2]),
+ }
+ pos += card * 2
+ ra.containers[i] = &nb
+ }
+ }
+
+ return int64(pos), nil
+}
+
+func (ra *roaringArray) readFrom(stream io.Reader) (int64, error) {
+ pos := 0
+ var cookie uint32
+ err := binary.Read(stream, binary.LittleEndian, &cookie)
+ if err != nil {
+ return 0, fmt.Errorf("error in roaringArray.readFrom: could not read initial cookie: %s", err)
+ }
+ pos += 4
+ var size uint32
+ haveRunContainers := false
+ var isRun *bitmapContainer
+ if cookie&0x0000FFFF == serialCookie {
+ haveRunContainers = true
+ size = uint32(uint16(cookie>>16) + 1)
+ bytesToRead := (int(size) + 7) / 8
+ numwords := (bytesToRead + 7) / 8
+ by := make([]byte, bytesToRead, numwords*8)
+ nr, err := io.ReadFull(stream, by)
+ if err != nil {
+ return 8 + int64(nr), fmt.Errorf("error in readFrom: could not read the "+
+ "runContainer bit flags of length %v bytes: %v", bytesToRead, err)
+ }
+ pos += bytesToRead
+ by = by[:cap(by)]
+ isRun = newBitmapContainer()
+ for i := 0; i < numwords; i++ {
+ isRun.bitmap[i] = binary.LittleEndian.Uint64(by)
+ by = by[8:]
+ }
+ } else if cookie == serialCookieNoRunContainer {
+ err = binary.Read(stream, binary.LittleEndian, &size)
+ if err != nil {
+ return 0, fmt.Errorf("error in roaringArray.readFrom: when reading size, got: %s", err)
+ }
+ pos += 4
+ } else {
+ return 0, fmt.Errorf("error in roaringArray.readFrom: did not find expected serialCookie in header")
+ }
+ if size > (1 << 16) {
+ return 0, fmt.Errorf("It is logically impossible to have more than (1<<16) containers.")
+ }
+ // descriptive header
+ keycard := make([]uint16, 2*size, 2*size)
+ err = binary.Read(stream, binary.LittleEndian, keycard)
+ if err != nil {
+ return 0, err
+ }
+ pos += 2 * 2 * int(size)
+ // offset header
+ if !haveRunContainers || size >= noOffsetThreshold {
+ io.CopyN(ioutil.Discard, stream, 4*int64(size)) // we never skip ahead so this data can be ignored
+ pos += 4 * int(size)
+ }
+ for i := uint32(0); i < size; i++ {
+ key := int(keycard[2*i])
+ card := int(keycard[2*i+1]) + 1
+ if haveRunContainers && isRun.contains(uint16(i)) {
+ nb := newRunContainer16()
+ nr, err := nb.readFrom(stream)
+ if err != nil {
+ return 0, err
+ }
+ pos += nr
+ ra.appendContainer(uint16(key), nb, false)
+ } else if card > arrayDefaultMaxSize {
+ nb := newBitmapContainer()
+ nr, err := nb.readFrom(stream)
+ if err != nil {
+ return 0, err
+ }
+ nb.cardinality = card
+ pos += nr
+ ra.appendContainer(keycard[2*i], nb, false)
+ } else {
+ nb := newArrayContainerSize(card)
+ nr, err := nb.readFrom(stream)
+ if err != nil {
+ return 0, err
+ }
+ pos += nr
+ ra.appendContainer(keycard[2*i], nb, false)
+ }
+ }
+ return int64(pos), nil
+}
+
+func (ra *roaringArray) hasRunCompression() bool {
+ for _, c := range ra.containers {
+ switch c.(type) {
+ case *runContainer16:
+ return true
+ }
+ }
+ return false
+}
+
+func (ra *roaringArray) writeToMsgpack(stream io.Writer) error {
+
+ ra.conserz = make([]containerSerz, len(ra.containers))
+ for i, v := range ra.containers {
+ switch cn := v.(type) {
+ case *bitmapContainer:
+ bts, err := cn.MarshalMsg(nil)
+ if err != nil {
+ return err
+ }
+ ra.conserz[i].t = bitmapContype
+ ra.conserz[i].r = bts
+ case *arrayContainer:
+ bts, err := cn.MarshalMsg(nil)
+ if err != nil {
+ return err
+ }
+ ra.conserz[i].t = arrayContype
+ ra.conserz[i].r = bts
+ case *runContainer16:
+ bts, err := cn.MarshalMsg(nil)
+ if err != nil {
+ return err
+ }
+ ra.conserz[i].t = run16Contype
+ ra.conserz[i].r = bts
+ default:
+ panic(fmt.Errorf("Unrecognized container implementation: %T", cn))
+ }
+ }
+ w := snappy.NewWriter(stream)
+ err := msgp.Encode(w, ra)
+ ra.conserz = nil
+ return err
+}
+
+func (ra *roaringArray) readFromMsgpack(stream io.Reader) error {
+ r := snappy.NewReader(stream)
+ err := msgp.Decode(r, ra)
+ if err != nil {
+ return err
+ }
+
+ if len(ra.containers) != len(ra.keys) {
+ ra.containers = make([]container, len(ra.keys))
+ }
+
+ for i, v := range ra.conserz {
+ switch v.t {
+ case bitmapContype:
+ c := &bitmapContainer{}
+ _, err = c.UnmarshalMsg(v.r)
+ if err != nil {
+ return err
+ }
+ ra.containers[i] = c
+ case arrayContype:
+ c := &arrayContainer{}
+ _, err = c.UnmarshalMsg(v.r)
+ if err != nil {
+ return err
+ }
+ ra.containers[i] = c
+ case run16Contype:
+ c := &runContainer16{}
+ _, err = c.UnmarshalMsg(v.r)
+ if err != nil {
+ return err
+ }
+ ra.containers[i] = c
+ default:
+ return fmt.Errorf("unrecognized contype serialization code: '%v'", v.t)
+ }
+ }
+ ra.conserz = nil
+ return nil
+}
+
+func (ra *roaringArray) advanceUntil(min uint16, pos int) int {
+ lower := pos + 1
+
+ if lower >= len(ra.keys) || ra.keys[lower] >= min {
+ return lower
+ }
+
+ spansize := 1
+
+ for lower+spansize < len(ra.keys) && ra.keys[lower+spansize] < min {
+ spansize *= 2
+ }
+ var upper int
+ if lower+spansize < len(ra.keys) {
+ upper = lower + spansize
+ } else {
+ upper = len(ra.keys) - 1
+ }
+
+ if ra.keys[upper] == min {
+ return upper
+ }
+
+ if ra.keys[upper] < min {
+ // means
+ // array
+ // has no
+ // item
+ // >= min
+ // pos = array.length;
+ return len(ra.keys)
+ }
+
+ // we know that the next-smallest span was too small
+ lower += (spansize >> 1)
+
+ mid := 0
+ for lower+1 != upper {
+ mid = (lower + upper) >> 1
+ if ra.keys[mid] == min {
+ return mid
+ } else if ra.keys[mid] < min {
+ lower = mid
+ } else {
+ upper = mid
+ }
+ }
+ return upper
+}
+
+func (ra *roaringArray) markAllAsNeedingCopyOnWrite() {
+ for i := range ra.needCopyOnWrite {
+ ra.needCopyOnWrite[i] = true
+ }
+}
+
+func (ra *roaringArray) needsCopyOnWrite(i int) bool {
+ return ra.needCopyOnWrite[i]
+}
+
+func (ra *roaringArray) setNeedsCopyOnWrite(i int) {
+ ra.needCopyOnWrite[i] = true
+}
diff --git a/vendor/github.com/RoaringBitmap/roaring/roaringarray_gen.go b/vendor/github.com/RoaringBitmap/roaring/roaringarray_gen.go
new file mode 100644
index 0000000000..99fb0f6972
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/roaringarray_gen.go
@@ -0,0 +1,529 @@
+package roaring
+
+// NOTE: THIS FILE WAS PRODUCED BY THE
+// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp)
+// DO NOT EDIT
+
+import (
+ "github.com/tinylib/msgp/msgp"
+)
+
+// DecodeMsg implements msgp.Decodable
+func (z *containerSerz) DecodeMsg(dc *msgp.Reader) (err error) {
+ var field []byte
+ _ = field
+ var zxvk uint32
+ zxvk, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zxvk > 0 {
+ zxvk--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "t":
+ {
+ var zbzg uint8
+ zbzg, err = dc.ReadUint8()
+ z.t = contype(zbzg)
+ }
+ if err != nil {
+ return
+ }
+ case "r":
+ err = z.r.DecodeMsg(dc)
+ if err != nil {
+ return
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z *containerSerz) EncodeMsg(en *msgp.Writer) (err error) {
+ // map header, size 2
+ // write "t"
+ err = en.Append(0x82, 0xa1, 0x74)
+ if err != nil {
+ return err
+ }
+ err = en.WriteUint8(uint8(z.t))
+ if err != nil {
+ return
+ }
+ // write "r"
+ err = en.Append(0xa1, 0x72)
+ if err != nil {
+ return err
+ }
+ err = z.r.EncodeMsg(en)
+ if err != nil {
+ return
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *containerSerz) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 2
+ // string "t"
+ o = append(o, 0x82, 0xa1, 0x74)
+ o = msgp.AppendUint8(o, uint8(z.t))
+ // string "r"
+ o = append(o, 0xa1, 0x72)
+ o, err = z.r.MarshalMsg(o)
+ if err != nil {
+ return
+ }
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *containerSerz) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zbai uint32
+ zbai, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zbai > 0 {
+ zbai--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "t":
+ {
+ var zcmr uint8
+ zcmr, bts, err = msgp.ReadUint8Bytes(bts)
+ z.t = contype(zcmr)
+ }
+ if err != nil {
+ return
+ }
+ case "r":
+ bts, err = z.r.UnmarshalMsg(bts)
+ if err != nil {
+ return
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *containerSerz) Msgsize() (s int) {
+ s = 1 + 2 + msgp.Uint8Size + 2 + z.r.Msgsize()
+ return
+}
+
+// DecodeMsg implements msgp.Decodable
+func (z *contype) DecodeMsg(dc *msgp.Reader) (err error) {
+ {
+ var zajw uint8
+ zajw, err = dc.ReadUint8()
+ (*z) = contype(zajw)
+ }
+ if err != nil {
+ return
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z contype) EncodeMsg(en *msgp.Writer) (err error) {
+ err = en.WriteUint8(uint8(z))
+ if err != nil {
+ return
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z contype) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ o = msgp.AppendUint8(o, uint8(z))
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *contype) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ {
+ var zwht uint8
+ zwht, bts, err = msgp.ReadUint8Bytes(bts)
+ (*z) = contype(zwht)
+ }
+ if err != nil {
+ return
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z contype) Msgsize() (s int) {
+ s = msgp.Uint8Size
+ return
+}
+
+// DecodeMsg implements msgp.Decodable
+func (z *roaringArray) DecodeMsg(dc *msgp.Reader) (err error) {
+ var field []byte
+ _ = field
+ var zlqf uint32
+ zlqf, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zlqf > 0 {
+ zlqf--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "keys":
+ var zdaf uint32
+ zdaf, err = dc.ReadArrayHeader()
+ if err != nil {
+ return
+ }
+ if cap(z.keys) >= int(zdaf) {
+ z.keys = (z.keys)[:zdaf]
+ } else {
+ z.keys = make([]uint16, zdaf)
+ }
+ for zhct := range z.keys {
+ z.keys[zhct], err = dc.ReadUint16()
+ if err != nil {
+ return
+ }
+ }
+ case "needCopyOnWrite":
+ var zpks uint32
+ zpks, err = dc.ReadArrayHeader()
+ if err != nil {
+ return
+ }
+ if cap(z.needCopyOnWrite) >= int(zpks) {
+ z.needCopyOnWrite = (z.needCopyOnWrite)[:zpks]
+ } else {
+ z.needCopyOnWrite = make([]bool, zpks)
+ }
+ for zcua := range z.needCopyOnWrite {
+ z.needCopyOnWrite[zcua], err = dc.ReadBool()
+ if err != nil {
+ return
+ }
+ }
+ case "copyOnWrite":
+ z.copyOnWrite, err = dc.ReadBool()
+ if err != nil {
+ return
+ }
+ case "conserz":
+ var zjfb uint32
+ zjfb, err = dc.ReadArrayHeader()
+ if err != nil {
+ return
+ }
+ if cap(z.conserz) >= int(zjfb) {
+ z.conserz = (z.conserz)[:zjfb]
+ } else {
+ z.conserz = make([]containerSerz, zjfb)
+ }
+ for zxhx := range z.conserz {
+ var zcxo uint32
+ zcxo, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zcxo > 0 {
+ zcxo--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "t":
+ {
+ var zeff uint8
+ zeff, err = dc.ReadUint8()
+ z.conserz[zxhx].t = contype(zeff)
+ }
+ if err != nil {
+ return
+ }
+ case "r":
+ err = z.conserz[zxhx].r.DecodeMsg(dc)
+ if err != nil {
+ return
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z *roaringArray) EncodeMsg(en *msgp.Writer) (err error) {
+ // map header, size 4
+ // write "keys"
+ err = en.Append(0x84, 0xa4, 0x6b, 0x65, 0x79, 0x73)
+ if err != nil {
+ return err
+ }
+ err = en.WriteArrayHeader(uint32(len(z.keys)))
+ if err != nil {
+ return
+ }
+ for zhct := range z.keys {
+ err = en.WriteUint16(z.keys[zhct])
+ if err != nil {
+ return
+ }
+ }
+ // write "needCopyOnWrite"
+ err = en.Append(0xaf, 0x6e, 0x65, 0x65, 0x64, 0x43, 0x6f, 0x70, 0x79, 0x4f, 0x6e, 0x57, 0x72, 0x69, 0x74, 0x65)
+ if err != nil {
+ return err
+ }
+ err = en.WriteArrayHeader(uint32(len(z.needCopyOnWrite)))
+ if err != nil {
+ return
+ }
+ for zcua := range z.needCopyOnWrite {
+ err = en.WriteBool(z.needCopyOnWrite[zcua])
+ if err != nil {
+ return
+ }
+ }
+ // write "copyOnWrite"
+ err = en.Append(0xab, 0x63, 0x6f, 0x70, 0x79, 0x4f, 0x6e, 0x57, 0x72, 0x69, 0x74, 0x65)
+ if err != nil {
+ return err
+ }
+ err = en.WriteBool(z.copyOnWrite)
+ if err != nil {
+ return
+ }
+ // write "conserz"
+ err = en.Append(0xa7, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x72, 0x7a)
+ if err != nil {
+ return err
+ }
+ err = en.WriteArrayHeader(uint32(len(z.conserz)))
+ if err != nil {
+ return
+ }
+ for zxhx := range z.conserz {
+ // map header, size 2
+ // write "t"
+ err = en.Append(0x82, 0xa1, 0x74)
+ if err != nil {
+ return err
+ }
+ err = en.WriteUint8(uint8(z.conserz[zxhx].t))
+ if err != nil {
+ return
+ }
+ // write "r"
+ err = en.Append(0xa1, 0x72)
+ if err != nil {
+ return err
+ }
+ err = z.conserz[zxhx].r.EncodeMsg(en)
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *roaringArray) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 4
+ // string "keys"
+ o = append(o, 0x84, 0xa4, 0x6b, 0x65, 0x79, 0x73)
+ o = msgp.AppendArrayHeader(o, uint32(len(z.keys)))
+ for zhct := range z.keys {
+ o = msgp.AppendUint16(o, z.keys[zhct])
+ }
+ // string "needCopyOnWrite"
+ o = append(o, 0xaf, 0x6e, 0x65, 0x65, 0x64, 0x43, 0x6f, 0x70, 0x79, 0x4f, 0x6e, 0x57, 0x72, 0x69, 0x74, 0x65)
+ o = msgp.AppendArrayHeader(o, uint32(len(z.needCopyOnWrite)))
+ for zcua := range z.needCopyOnWrite {
+ o = msgp.AppendBool(o, z.needCopyOnWrite[zcua])
+ }
+ // string "copyOnWrite"
+ o = append(o, 0xab, 0x63, 0x6f, 0x70, 0x79, 0x4f, 0x6e, 0x57, 0x72, 0x69, 0x74, 0x65)
+ o = msgp.AppendBool(o, z.copyOnWrite)
+ // string "conserz"
+ o = append(o, 0xa7, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x72, 0x7a)
+ o = msgp.AppendArrayHeader(o, uint32(len(z.conserz)))
+ for zxhx := range z.conserz {
+ // map header, size 2
+ // string "t"
+ o = append(o, 0x82, 0xa1, 0x74)
+ o = msgp.AppendUint8(o, uint8(z.conserz[zxhx].t))
+ // string "r"
+ o = append(o, 0xa1, 0x72)
+ o, err = z.conserz[zxhx].r.MarshalMsg(o)
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *roaringArray) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zrsw uint32
+ zrsw, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zrsw > 0 {
+ zrsw--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "keys":
+ var zxpk uint32
+ zxpk, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ if cap(z.keys) >= int(zxpk) {
+ z.keys = (z.keys)[:zxpk]
+ } else {
+ z.keys = make([]uint16, zxpk)
+ }
+ for zhct := range z.keys {
+ z.keys[zhct], bts, err = msgp.ReadUint16Bytes(bts)
+ if err != nil {
+ return
+ }
+ }
+ case "needCopyOnWrite":
+ var zdnj uint32
+ zdnj, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ if cap(z.needCopyOnWrite) >= int(zdnj) {
+ z.needCopyOnWrite = (z.needCopyOnWrite)[:zdnj]
+ } else {
+ z.needCopyOnWrite = make([]bool, zdnj)
+ }
+ for zcua := range z.needCopyOnWrite {
+ z.needCopyOnWrite[zcua], bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ return
+ }
+ }
+ case "copyOnWrite":
+ z.copyOnWrite, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ return
+ }
+ case "conserz":
+ var zobc uint32
+ zobc, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ if cap(z.conserz) >= int(zobc) {
+ z.conserz = (z.conserz)[:zobc]
+ } else {
+ z.conserz = make([]containerSerz, zobc)
+ }
+ for zxhx := range z.conserz {
+ var zsnv uint32
+ zsnv, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zsnv > 0 {
+ zsnv--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "t":
+ {
+ var zkgt uint8
+ zkgt, bts, err = msgp.ReadUint8Bytes(bts)
+ z.conserz[zxhx].t = contype(zkgt)
+ }
+ if err != nil {
+ return
+ }
+ case "r":
+ bts, err = z.conserz[zxhx].r.UnmarshalMsg(bts)
+ if err != nil {
+ return
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *roaringArray) Msgsize() (s int) {
+ s = 1 + 5 + msgp.ArrayHeaderSize + (len(z.keys) * (msgp.Uint16Size)) + 16 + msgp.ArrayHeaderSize + (len(z.needCopyOnWrite) * (msgp.BoolSize)) + 12 + msgp.BoolSize + 8 + msgp.ArrayHeaderSize
+ for zxhx := range z.conserz {
+ s += 1 + 2 + msgp.Uint8Size + 2 + z.conserz[zxhx].r.Msgsize()
+ }
+ return
+}
diff --git a/vendor/github.com/RoaringBitmap/roaring/serialization.go b/vendor/github.com/RoaringBitmap/roaring/serialization.go
new file mode 100644
index 0000000000..59c39a6630
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/serialization.go
@@ -0,0 +1,83 @@
+package roaring
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/tinylib/msgp/msgp"
+)
+
+// writeTo for runContainer16 follows this
+// spec: https://github.com/RoaringBitmap/RoaringFormatSpec
+//
+func (b *runContainer16) writeTo(stream io.Writer) (int, error) {
+ buf := make([]byte, 2+4*len(b.iv))
+ binary.LittleEndian.PutUint16(buf[0:], uint16(len(b.iv)))
+ for i, v := range b.iv {
+ binary.LittleEndian.PutUint16(buf[2+i*4:], v.start)
+ binary.LittleEndian.PutUint16(buf[2+2+i*4:], v.length)
+ }
+ return stream.Write(buf)
+}
+
+func (b *runContainer32) writeToMsgpack(stream io.Writer) (int, error) {
+ bts, err := b.MarshalMsg(nil)
+ if err != nil {
+ return 0, err
+ }
+ return stream.Write(bts)
+}
+
+func (b *runContainer16) writeToMsgpack(stream io.Writer) (int, error) {
+ bts, err := b.MarshalMsg(nil)
+ if err != nil {
+ return 0, err
+ }
+ return stream.Write(bts)
+}
+
+func (b *runContainer32) readFromMsgpack(stream io.Reader) (int, error) {
+ err := msgp.Decode(stream, b)
+ return 0, err
+}
+
+func (b *runContainer16) readFromMsgpack(stream io.Reader) (int, error) {
+ err := msgp.Decode(stream, b)
+ return 0, err
+}
+
+var errCorruptedStream = errors.New("insufficient/odd number of stored bytes, corrupted stream detected")
+
+func (b *runContainer16) readFrom(stream io.Reader) (int, error) {
+ b.iv = b.iv[:0]
+ b.card = 0
+ var numRuns uint16
+ err := binary.Read(stream, binary.LittleEndian, &numRuns)
+ if err != nil {
+ return 0, err
+ }
+ nr := int(numRuns)
+ encRun := make([]uint16, 2*nr)
+ by := make([]byte, 4*nr)
+ err = binary.Read(stream, binary.LittleEndian, &by)
+ if err != nil {
+ return 0, err
+ }
+ for i := range encRun {
+ if len(by) < 2 {
+ return 0, errCorruptedStream
+ }
+ encRun[i] = binary.LittleEndian.Uint16(by)
+ by = by[2:]
+ }
+ for i := 0; i < nr; i++ {
+ if i > 0 && b.iv[i-1].last() >= encRun[i*2] {
+ return 0, fmt.Errorf("error: stored runContainer had runs that were not in sorted order!! (b.iv[i-1=%v].last = %v >= encRun[i=%v] = %v)", i-1, b.iv[i-1].last(), i, encRun[i*2])
+ }
+ b.iv = append(b.iv, interval16{start: encRun[i*2], length: encRun[i*2+1]})
+ b.card += int64(encRun[i*2+1]) + 1
+ }
+ return 0, err
+}
diff --git a/vendor/github.com/RoaringBitmap/roaring/serialization_generic.go b/vendor/github.com/RoaringBitmap/roaring/serialization_generic.go
new file mode 100644
index 0000000000..7fcef7691b
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/serialization_generic.go
@@ -0,0 +1,118 @@
+// +build !amd64,!386 appengine
+
+package roaring
+
+import (
+ "encoding/binary"
+ "io"
+)
+
+func (b *arrayContainer) writeTo(stream io.Writer) (int, error) {
+ buf := make([]byte, 2*len(b.content))
+ for i, v := range b.content {
+ base := i * 2
+ buf[base] = byte(v)
+ buf[base+1] = byte(v >> 8)
+ }
+ return stream.Write(buf)
+}
+
+func (b *arrayContainer) readFrom(stream io.Reader) (int, error) {
+ err := binary.Read(stream, binary.LittleEndian, b.content)
+ if err != nil {
+ return 0, err
+ }
+ return 2 * len(b.content), nil
+}
+
+func (b *bitmapContainer) writeTo(stream io.Writer) (int, error) {
+ // Write set
+ buf := make([]byte, 8*len(b.bitmap))
+ for i, v := range b.bitmap {
+ base := i * 8
+ buf[base] = byte(v)
+ buf[base+1] = byte(v >> 8)
+ buf[base+2] = byte(v >> 16)
+ buf[base+3] = byte(v >> 24)
+ buf[base+4] = byte(v >> 32)
+ buf[base+5] = byte(v >> 40)
+ buf[base+6] = byte(v >> 48)
+ buf[base+7] = byte(v >> 56)
+ }
+ return stream.Write(buf)
+}
+
+func (b *bitmapContainer) readFrom(stream io.Reader) (int, error) {
+ err := binary.Read(stream, binary.LittleEndian, b.bitmap)
+ if err != nil {
+ return 0, err
+ }
+ b.computeCardinality()
+ return 8 * len(b.bitmap), nil
+}
+
+func (bc *bitmapContainer) asLittleEndianByteSlice() []byte {
+ by := make([]byte, len(bc.bitmap)*8)
+ for i := range bc.bitmap {
+ binary.LittleEndian.PutUint64(by[i*8:], bc.bitmap[i])
+ }
+ return by
+}
+
+func uint64SliceAsByteSlice(slice []uint64) []byte {
+ by := make([]byte, len(slice)*8)
+
+ for i, v := range slice {
+ binary.LittleEndian.PutUint64(by[i*8:], v)
+ }
+
+ return by
+}
+
+func byteSliceAsUint16Slice(slice []byte) []uint16 {
+ if len(slice)%2 != 0 {
+ panic("Slice size should be divisible by 2")
+ }
+
+ b := make([]uint16, len(slice)/2)
+
+ for i := range b {
+ b[i] = binary.LittleEndian.Uint16(slice[2*i:])
+ }
+
+ return b
+}
+
+func byteSliceAsUint64Slice(slice []byte) []uint64 {
+ if len(slice)%8 != 0 {
+ panic("Slice size should be divisible by 8")
+ }
+
+ b := make([]uint64, len(slice)/8)
+
+ for i := range b {
+ b[i] = binary.LittleEndian.Uint64(slice[8*i:])
+ }
+
+ return b
+}
+
+// Converts a byte slice to a interval16 slice.
+// The function assumes that the slice byte buffer is run container data
+// encoded according to Roaring Format Spec
+func byteSliceAsInterval16Slice(byteSlice []byte) []interval16 {
+ if len(byteSlice)%4 != 0 {
+ panic("Slice size should be divisible by 4")
+ }
+
+ intervalSlice := make([]interval16, len(byteSlice)/4)
+
+ for i := range intervalSlice {
+ intervalSlice[i] = interval16{
+ start: binary.LittleEndian.Uint16(byteSlice[i*4:]),
+ length: binary.LittleEndian.Uint16(byteSlice[i*4+2:]),
+ }
+ }
+
+ return intervalSlice
+}
diff --git a/vendor/github.com/RoaringBitmap/roaring/serialization_littleendian.go b/vendor/github.com/RoaringBitmap/roaring/serialization_littleendian.go
new file mode 100644
index 0000000000..c1d3ad3046
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/serialization_littleendian.go
@@ -0,0 +1,113 @@
+// +build 386 amd64,!appengine
+
+package roaring
+
+import (
+ "io"
+ "reflect"
+ "unsafe"
+)
+
+func (ac *arrayContainer) writeTo(stream io.Writer) (int, error) {
+ buf := uint16SliceAsByteSlice(ac.content)
+ return stream.Write(buf)
+}
+
+func (bc *bitmapContainer) writeTo(stream io.Writer) (int, error) {
+ buf := uint64SliceAsByteSlice(bc.bitmap)
+ return stream.Write(buf)
+}
+
+// readFrom reads an arrayContainer from stream.
+// PRE-REQUISITE: you must size the arrayContainer correctly (allocate b.content)
+// *before* you call readFrom. We can't guess the size in the stream
+// by this point.
+func (ac *arrayContainer) readFrom(stream io.Reader) (int, error) {
+ buf := uint16SliceAsByteSlice(ac.content)
+ return io.ReadFull(stream, buf)
+}
+
+func (bc *bitmapContainer) readFrom(stream io.Reader) (int, error) {
+ buf := uint64SliceAsByteSlice(bc.bitmap)
+ n, err := io.ReadFull(stream, buf)
+ bc.computeCardinality()
+ return n, err
+}
+
+func uint64SliceAsByteSlice(slice []uint64) []byte {
+ // make a new slice header
+ header := *(*reflect.SliceHeader)(unsafe.Pointer(&slice))
+
+ // update its capacity and length
+ header.Len *= 8
+ header.Cap *= 8
+
+ // return it
+ return *(*[]byte)(unsafe.Pointer(&header))
+}
+
+func uint16SliceAsByteSlice(slice []uint16) []byte {
+ // make a new slice header
+ header := *(*reflect.SliceHeader)(unsafe.Pointer(&slice))
+
+ // update its capacity and length
+ header.Len *= 2
+ header.Cap *= 2
+
+ // return it
+ return *(*[]byte)(unsafe.Pointer(&header))
+}
+
+func (bc *bitmapContainer) asLittleEndianByteSlice() []byte {
+ return uint64SliceAsByteSlice(bc.bitmap)
+}
+
+// Deserialization code follows
+
+func byteSliceAsUint16Slice(slice []byte) []uint16 {
+ if len(slice)%2 != 0 {
+ panic("Slice size should be divisible by 2")
+ }
+
+ // make a new slice header
+ header := *(*reflect.SliceHeader)(unsafe.Pointer(&slice))
+
+ // update its capacity and length
+ header.Len /= 2
+ header.Cap /= 2
+
+ // return it
+ return *(*[]uint16)(unsafe.Pointer(&header))
+}
+
+func byteSliceAsUint64Slice(slice []byte) []uint64 {
+ if len(slice)%8 != 0 {
+ panic("Slice size should be divisible by 8")
+ }
+
+ // make a new slice header
+ header := *(*reflect.SliceHeader)(unsafe.Pointer(&slice))
+
+ // update its capacity and length
+ header.Len /= 8
+ header.Cap /= 8
+
+ // return it
+ return *(*[]uint64)(unsafe.Pointer(&header))
+}
+
+func byteSliceAsInterval16Slice(slice []byte) []interval16 {
+ if len(slice)%4 != 0 {
+ panic("Slice size should be divisible by 4")
+ }
+
+ // make a new slice header
+ header := *(*reflect.SliceHeader)(unsafe.Pointer(&slice))
+
+ // update its capacity and length
+ header.Len /= 4
+ header.Cap /= 4
+
+ // return it
+ return *(*[]interval16)(unsafe.Pointer(&header))
+}
diff --git a/vendor/github.com/RoaringBitmap/roaring/serializationfuzz.go b/vendor/github.com/RoaringBitmap/roaring/serializationfuzz.go
new file mode 100644
index 0000000000..5eaa22202c
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/serializationfuzz.go
@@ -0,0 +1,21 @@
+// +build gofuzz
+
+package roaring
+
+import "bytes"
+
+func FuzzSerializationStream(data []byte) int {
+ newrb := NewBitmap()
+ if _, err := newrb.ReadFrom(bytes.NewReader(data)); err != nil {
+ return 0
+ }
+ return 1
+}
+
+func FuzzSerializationBuffer(data []byte) int {
+ newrb := NewBitmap()
+ if _, err := newrb.FromBuffer(data); err != nil {
+ return 0
+ }
+ return 1
+}
diff --git a/vendor/github.com/RoaringBitmap/roaring/setutil.go b/vendor/github.com/RoaringBitmap/roaring/setutil.go
new file mode 100644
index 0000000000..3e8c01dd1f
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/setutil.go
@@ -0,0 +1,609 @@
+package roaring
+
+func equal(a, b []uint16) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := range a {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func difference(set1 []uint16, set2 []uint16, buffer []uint16) int {
+ if 0 == len(set2) {
+ for k := 0; k < len(set1); k++ {
+ buffer[k] = set1[k]
+ }
+ return len(set1)
+ }
+ if 0 == len(set1) {
+ return 0
+ }
+ pos := 0
+ k1 := 0
+ k2 := 0
+ buffer = buffer[:cap(buffer)]
+ s1 := set1[k1]
+ s2 := set2[k2]
+ for {
+ if s1 < s2 {
+ buffer[pos] = s1
+ pos++
+ k1++
+ if k1 >= len(set1) {
+ break
+ }
+ s1 = set1[k1]
+ } else if s1 == s2 {
+ k1++
+ k2++
+ if k1 >= len(set1) {
+ break
+ }
+ s1 = set1[k1]
+ if k2 >= len(set2) {
+ for ; k1 < len(set1); k1++ {
+ buffer[pos] = set1[k1]
+ pos++
+ }
+ break
+ }
+ s2 = set2[k2]
+ } else { // if (val1>val2)
+ k2++
+ if k2 >= len(set2) {
+ for ; k1 < len(set1); k1++ {
+ buffer[pos] = set1[k1]
+ pos++
+ }
+ break
+ }
+ s2 = set2[k2]
+ }
+ }
+ return pos
+
+}
+
+func exclusiveUnion2by2(set1 []uint16, set2 []uint16, buffer []uint16) int {
+ if 0 == len(set2) {
+ buffer = buffer[:len(set1)]
+ copy(buffer, set1[:])
+ return len(set1)
+ }
+ if 0 == len(set1) {
+ buffer = buffer[:len(set2)]
+ copy(buffer, set2[:])
+ return len(set2)
+ }
+ pos := 0
+ k1 := 0
+ k2 := 0
+ s1 := set1[k1]
+ s2 := set2[k2]
+ buffer = buffer[:cap(buffer)]
+ for {
+ if s1 < s2 {
+ buffer[pos] = s1
+ pos++
+ k1++
+ if k1 >= len(set1) {
+ for ; k2 < len(set2); k2++ {
+ buffer[pos] = set2[k2]
+ pos++
+ }
+ break
+ }
+ s1 = set1[k1]
+ } else if s1 == s2 {
+ k1++
+ k2++
+ if k1 >= len(set1) {
+ for ; k2 < len(set2); k2++ {
+ buffer[pos] = set2[k2]
+ pos++
+ }
+ break
+ }
+ if k2 >= len(set2) {
+ for ; k1 < len(set1); k1++ {
+ buffer[pos] = set1[k1]
+ pos++
+ }
+ break
+ }
+ s1 = set1[k1]
+ s2 = set2[k2]
+ } else { // if (val1>val2)
+ buffer[pos] = s2
+ pos++
+ k2++
+ if k2 >= len(set2) {
+ for ; k1 < len(set1); k1++ {
+ buffer[pos] = set1[k1]
+ pos++
+ }
+ break
+ }
+ s2 = set2[k2]
+ }
+ }
+ return pos
+}
+
+func union2by2(set1 []uint16, set2 []uint16, buffer []uint16) int {
+ pos := 0
+ k1 := 0
+ k2 := 0
+ if 0 == len(set2) {
+ buffer = buffer[:len(set1)]
+ copy(buffer, set1[:])
+ return len(set1)
+ }
+ if 0 == len(set1) {
+ buffer = buffer[:len(set2)]
+ copy(buffer, set2[:])
+ return len(set2)
+ }
+ s1 := set1[k1]
+ s2 := set2[k2]
+ buffer = buffer[:cap(buffer)]
+ for {
+ if s1 < s2 {
+ buffer[pos] = s1
+ pos++
+ k1++
+ if k1 >= len(set1) {
+ copy(buffer[pos:], set2[k2:])
+ pos += len(set2) - k2
+ break
+ }
+ s1 = set1[k1]
+ } else if s1 == s2 {
+ buffer[pos] = s1
+ pos++
+ k1++
+ k2++
+ if k1 >= len(set1) {
+ copy(buffer[pos:], set2[k2:])
+ pos += len(set2) - k2
+ break
+ }
+ if k2 >= len(set2) {
+ copy(buffer[pos:], set1[k1:])
+ pos += len(set1) - k1
+ break
+ }
+ s1 = set1[k1]
+ s2 = set2[k2]
+ } else { // if (set1[k1]>set2[k2])
+ buffer[pos] = s2
+ pos++
+ k2++
+ if k2 >= len(set2) {
+ copy(buffer[pos:], set1[k1:])
+ pos += len(set1) - k1
+ break
+ }
+ s2 = set2[k2]
+ }
+ }
+ return pos
+}
+
+func union2by2Cardinality(set1 []uint16, set2 []uint16) int {
+ pos := 0
+ k1 := 0
+ k2 := 0
+ if 0 == len(set2) {
+ return len(set1)
+ }
+ if 0 == len(set1) {
+ return len(set2)
+ }
+ s1 := set1[k1]
+ s2 := set2[k2]
+ for {
+ if s1 < s2 {
+ pos++
+ k1++
+ if k1 >= len(set1) {
+ pos += len(set2) - k2
+ break
+ }
+ s1 = set1[k1]
+ } else if s1 == s2 {
+ pos++
+ k1++
+ k2++
+ if k1 >= len(set1) {
+ pos += len(set2) - k2
+ break
+ }
+ if k2 >= len(set2) {
+ pos += len(set1) - k1
+ break
+ }
+ s1 = set1[k1]
+ s2 = set2[k2]
+ } else { // if (set1[k1]>set2[k2])
+ pos++
+ k2++
+ if k2 >= len(set2) {
+ pos += len(set1) - k1
+ break
+ }
+ s2 = set2[k2]
+ }
+ }
+ return pos
+}
+
+func intersection2by2(
+ set1 []uint16,
+ set2 []uint16,
+ buffer []uint16) int {
+
+ if len(set1)*64 < len(set2) {
+ return onesidedgallopingintersect2by2(set1, set2, buffer)
+ } else if len(set2)*64 < len(set1) {
+ return onesidedgallopingintersect2by2(set2, set1, buffer)
+ } else {
+ return localintersect2by2(set1, set2, buffer)
+ }
+}
+
+func intersection2by2Cardinality(
+ set1 []uint16,
+ set2 []uint16) int {
+
+ if len(set1)*64 < len(set2) {
+ return onesidedgallopingintersect2by2Cardinality(set1, set2)
+ } else if len(set2)*64 < len(set1) {
+ return onesidedgallopingintersect2by2Cardinality(set2, set1)
+ } else {
+ return localintersect2by2Cardinality(set1, set2)
+ }
+}
+
+func intersects2by2(
+ set1 []uint16,
+ set2 []uint16) bool {
+ // could be optimized if one set is much larger than the other one
+ if (0 == len(set1)) || (0 == len(set2)) {
+ return false
+ }
+ k1 := 0
+ k2 := 0
+ s1 := set1[k1]
+ s2 := set2[k2]
+mainwhile:
+ for {
+
+ if s2 < s1 {
+ for {
+ k2++
+ if k2 == len(set2) {
+ break mainwhile
+ }
+ s2 = set2[k2]
+ if s2 >= s1 {
+ break
+ }
+ }
+ }
+ if s1 < s2 {
+ for {
+ k1++
+ if k1 == len(set1) {
+ break mainwhile
+ }
+ s1 = set1[k1]
+ if s1 >= s2 {
+ break
+ }
+ }
+
+ } else {
+ // (set2[k2] == set1[k1])
+ return true
+ }
+ }
+ return false
+}
+
+func localintersect2by2(
+ set1 []uint16,
+ set2 []uint16,
+ buffer []uint16) int {
+
+ if (0 == len(set1)) || (0 == len(set2)) {
+ return 0
+ }
+ k1 := 0
+ k2 := 0
+ pos := 0
+ buffer = buffer[:cap(buffer)]
+ s1 := set1[k1]
+ s2 := set2[k2]
+mainwhile:
+ for {
+ if s2 < s1 {
+ for {
+ k2++
+ if k2 == len(set2) {
+ break mainwhile
+ }
+ s2 = set2[k2]
+ if s2 >= s1 {
+ break
+ }
+ }
+ }
+ if s1 < s2 {
+ for {
+ k1++
+ if k1 == len(set1) {
+ break mainwhile
+ }
+ s1 = set1[k1]
+ if s1 >= s2 {
+ break
+ }
+ }
+
+ } else {
+ // (set2[k2] == set1[k1])
+ buffer[pos] = s1
+ pos++
+ k1++
+ if k1 == len(set1) {
+ break
+ }
+ s1 = set1[k1]
+ k2++
+ if k2 == len(set2) {
+ break
+ }
+ s2 = set2[k2]
+ }
+ }
+ return pos
+}
+
+func localintersect2by2Cardinality(
+ set1 []uint16,
+ set2 []uint16) int {
+
+ if (0 == len(set1)) || (0 == len(set2)) {
+ return 0
+ }
+ k1 := 0
+ k2 := 0
+ pos := 0
+ s1 := set1[k1]
+ s2 := set2[k2]
+mainwhile:
+ for {
+ if s2 < s1 {
+ for {
+ k2++
+ if k2 == len(set2) {
+ break mainwhile
+ }
+ s2 = set2[k2]
+ if s2 >= s1 {
+ break
+ }
+ }
+ }
+ if s1 < s2 {
+ for {
+ k1++
+ if k1 == len(set1) {
+ break mainwhile
+ }
+ s1 = set1[k1]
+ if s1 >= s2 {
+ break
+ }
+ }
+
+ } else {
+ // (set2[k2] == set1[k1])
+ pos++
+ k1++
+ if k1 == len(set1) {
+ break
+ }
+ s1 = set1[k1]
+ k2++
+ if k2 == len(set2) {
+ break
+ }
+ s2 = set2[k2]
+ }
+ }
+ return pos
+}
+
+func advanceUntil(
+ array []uint16,
+ pos int,
+ length int,
+ min uint16) int {
+ lower := pos + 1
+
+ if lower >= length || array[lower] >= min {
+ return lower
+ }
+
+ spansize := 1
+
+ for lower+spansize < length && array[lower+spansize] < min {
+ spansize *= 2
+ }
+ var upper int
+ if lower+spansize < length {
+ upper = lower + spansize
+ } else {
+ upper = length - 1
+ }
+
+ if array[upper] == min {
+ return upper
+ }
+
+ if array[upper] < min {
+ // means
+ // array
+ // has no
+ // item
+ // >= min
+ // pos = array.length;
+ return length
+ }
+
+ // we know that the next-smallest span was too small
+ lower += (spansize >> 1)
+
+ mid := 0
+ for lower+1 != upper {
+ mid = (lower + upper) >> 1
+ if array[mid] == min {
+ return mid
+ } else if array[mid] < min {
+ lower = mid
+ } else {
+ upper = mid
+ }
+ }
+ return upper
+
+}
+
+func onesidedgallopingintersect2by2(
+ smallset []uint16,
+ largeset []uint16,
+ buffer []uint16) int {
+
+ if 0 == len(smallset) {
+ return 0
+ }
+ buffer = buffer[:cap(buffer)]
+ k1 := 0
+ k2 := 0
+ pos := 0
+ s1 := largeset[k1]
+ s2 := smallset[k2]
+mainwhile:
+
+ for {
+ if s1 < s2 {
+ k1 = advanceUntil(largeset, k1, len(largeset), s2)
+ if k1 == len(largeset) {
+ break mainwhile
+ }
+ s1 = largeset[k1]
+ }
+ if s2 < s1 {
+ k2++
+ if k2 == len(smallset) {
+ break mainwhile
+ }
+ s2 = smallset[k2]
+ } else {
+
+ buffer[pos] = s2
+ pos++
+ k2++
+ if k2 == len(smallset) {
+ break
+ }
+ s2 = smallset[k2]
+ k1 = advanceUntil(largeset, k1, len(largeset), s2)
+ if k1 == len(largeset) {
+ break mainwhile
+ }
+ s1 = largeset[k1]
+ }
+
+ }
+ return pos
+}
+
+func onesidedgallopingintersect2by2Cardinality(
+ smallset []uint16,
+ largeset []uint16) int {
+
+ if 0 == len(smallset) {
+ return 0
+ }
+ k1 := 0
+ k2 := 0
+ pos := 0
+ s1 := largeset[k1]
+ s2 := smallset[k2]
+mainwhile:
+
+ for {
+ if s1 < s2 {
+ k1 = advanceUntil(largeset, k1, len(largeset), s2)
+ if k1 == len(largeset) {
+ break mainwhile
+ }
+ s1 = largeset[k1]
+ }
+ if s2 < s1 {
+ k2++
+ if k2 == len(smallset) {
+ break mainwhile
+ }
+ s2 = smallset[k2]
+ } else {
+
+ pos++
+ k2++
+ if k2 == len(smallset) {
+ break
+ }
+ s2 = smallset[k2]
+ k1 = advanceUntil(largeset, k1, len(largeset), s2)
+ if k1 == len(largeset) {
+ break mainwhile
+ }
+ s1 = largeset[k1]
+ }
+
+ }
+ return pos
+}
+
+func binarySearch(array []uint16, ikey uint16) int {
+ low := 0
+ high := len(array) - 1
+ for low+16 <= high {
+ middleIndex := int(uint32(low+high) >> 1)
+ middleValue := array[middleIndex]
+ if middleValue < ikey {
+ low = middleIndex + 1
+ } else if middleValue > ikey {
+ high = middleIndex - 1
+ } else {
+ return middleIndex
+ }
+ }
+ for ; low <= high; low++ {
+ val := array[low]
+ if val >= ikey {
+ if val == ikey {
+ return low
+ }
+ break
+ }
+ }
+ return -(low + 1)
+}
diff --git a/vendor/github.com/RoaringBitmap/roaring/shortiterator.go b/vendor/github.com/RoaringBitmap/roaring/shortiterator.go
new file mode 100644
index 0000000000..ef0acbd1ca
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/shortiterator.go
@@ -0,0 +1,21 @@
+package roaring
+
+type shortIterable interface {
+ hasNext() bool
+ next() uint16
+}
+
+type shortIterator struct {
+ slice []uint16
+ loc int
+}
+
+func (si *shortIterator) hasNext() bool {
+ return si.loc < len(si.slice)
+}
+
+func (si *shortIterator) next() uint16 {
+ a := si.slice[si.loc]
+ si.loc++
+ return a
+}
diff --git a/vendor/github.com/RoaringBitmap/roaring/smat.go b/vendor/github.com/RoaringBitmap/roaring/smat.go
new file mode 100644
index 0000000000..9da4756349
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/smat.go
@@ -0,0 +1,383 @@
+// +build gofuzz
+
+/*
+# Instructions for smat testing for roaring
+
+[smat](https://github.com/mschoch/smat) is a framework that provides
+state machine assisted fuzz testing.
+
+To run the smat tests for roaring...
+
+## Prerequisites
+
+ $ go get github.com/dvyukov/go-fuzz/go-fuzz
+ $ go get github.com/dvyukov/go-fuzz/go-fuzz-build
+
+## Steps
+
+1. Generate initial smat corpus:
+```
+ go test -tags=gofuzz -run=TestGenerateSmatCorpus
+```
+
+2. Build go-fuzz test program with instrumentation:
+```
+ go-fuzz-build -func FuzzSmat github.com/RoaringBitmap/roaring
+```
+
+3. Run go-fuzz:
+```
+ go-fuzz -bin=./roaring-fuzz.zip -workdir=workdir/ -timeout=200
+```
+
+You should see output like...
+```
+2016/09/16 13:58:35 slaves: 8, corpus: 1 (3s ago), crashers: 0, restarts: 1/0, execs: 0 (0/sec), cover: 0, uptime: 3s
+2016/09/16 13:58:38 slaves: 8, corpus: 1 (6s ago), crashers: 0, restarts: 1/0, execs: 0 (0/sec), cover: 0, uptime: 6s
+2016/09/16 13:58:41 slaves: 8, corpus: 1 (9s ago), crashers: 0, restarts: 1/44, execs: 44 (5/sec), cover: 0, uptime: 9s
+2016/09/16 13:58:44 slaves: 8, corpus: 1 (12s ago), crashers: 0, restarts: 1/45, execs: 45 (4/sec), cover: 0, uptime: 12s
+2016/09/16 13:58:47 slaves: 8, corpus: 1 (15s ago), crashers: 0, restarts: 1/46, execs: 46 (3/sec), cover: 0, uptime: 15s
+2016/09/16 13:58:50 slaves: 8, corpus: 1 (18s ago), crashers: 0, restarts: 1/47, execs: 47 (3/sec), cover: 0, uptime: 18s
+2016/09/16 13:58:53 slaves: 8, corpus: 1 (21s ago), crashers: 0, restarts: 1/63, execs: 63 (3/sec), cover: 0, uptime: 21s
+2016/09/16 13:58:56 slaves: 8, corpus: 1 (24s ago), crashers: 0, restarts: 1/65, execs: 65 (3/sec), cover: 0, uptime: 24s
+2016/09/16 13:58:59 slaves: 8, corpus: 1 (27s ago), crashers: 0, restarts: 1/66, execs: 66 (2/sec), cover: 0, uptime: 27s
+2016/09/16 13:59:02 slaves: 8, corpus: 1 (30s ago), crashers: 0, restarts: 1/67, execs: 67 (2/sec), cover: 0, uptime: 30s
+2016/09/16 13:59:05 slaves: 8, corpus: 1 (33s ago), crashers: 0, restarts: 1/83, execs: 83 (3/sec), cover: 0, uptime: 33s
+2016/09/16 13:59:08 slaves: 8, corpus: 1 (36s ago), crashers: 0, restarts: 1/84, execs: 84 (2/sec), cover: 0, uptime: 36s
+2016/09/16 13:59:11 slaves: 8, corpus: 2 (0s ago), crashers: 0, restarts: 1/85, execs: 85 (2/sec), cover: 0, uptime: 39s
+2016/09/16 13:59:14 slaves: 8, corpus: 17 (2s ago), crashers: 0, restarts: 1/86, execs: 86 (2/sec), cover: 480, uptime: 42s
+2016/09/16 13:59:17 slaves: 8, corpus: 17 (5s ago), crashers: 0, restarts: 1/66, execs: 132 (3/sec), cover: 487, uptime: 45s
+2016/09/16 13:59:20 slaves: 8, corpus: 17 (8s ago), crashers: 0, restarts: 1/440, execs: 2645 (55/sec), cover: 487, uptime: 48s
+
+```
+
+Let it run, and if the # of crashers is > 0, check out the reports in
+the workdir where you should be able to find the panic goroutine stack
+traces.
+*/
+
+package roaring
+
+import (
+ "fmt"
+ "sort"
+
+ "github.com/mschoch/smat"
+ "github.com/willf/bitset"
+)
+
+// fuzz test using state machine driven by byte stream.
+func FuzzSmat(data []byte) int {
+ return smat.Fuzz(&smatContext{}, smat.ActionID('S'), smat.ActionID('T'),
+ smatActionMap, data)
+}
+
+var smatDebug = false
+
+func smatLog(prefix, format string, args ...interface{}) {
+ if smatDebug {
+ fmt.Print(prefix)
+ fmt.Printf(format, args...)
+ }
+}
+
+type smatContext struct {
+ pairs []*smatPair
+
+ // Two registers, x & y.
+ x int
+ y int
+
+ actions int
+}
+
+type smatPair struct {
+ bm *Bitmap
+ bs *bitset.BitSet
+}
+
+// ------------------------------------------------------------------
+
+var smatActionMap = smat.ActionMap{
+ smat.ActionID('X'): smatAction("x++", smatWrap(func(c *smatContext) { c.x++ })),
+ smat.ActionID('x'): smatAction("x--", smatWrap(func(c *smatContext) { c.x-- })),
+ smat.ActionID('Y'): smatAction("y++", smatWrap(func(c *smatContext) { c.y++ })),
+ smat.ActionID('y'): smatAction("y--", smatWrap(func(c *smatContext) { c.y-- })),
+ smat.ActionID('*'): smatAction("x*y", smatWrap(func(c *smatContext) { c.x = c.x * c.y })),
+ smat.ActionID('<'): smatAction("x<<", smatWrap(func(c *smatContext) { c.x = c.x << 1 })),
+
+ smat.ActionID('^'): smatAction("swap", smatWrap(func(c *smatContext) { c.x, c.y = c.y, c.x })),
+
+ smat.ActionID('['): smatAction(" pushPair", smatWrap(smatPushPair)),
+ smat.ActionID(']'): smatAction(" popPair", smatWrap(smatPopPair)),
+
+ smat.ActionID('B'): smatAction(" setBit", smatWrap(smatSetBit)),
+ smat.ActionID('b'): smatAction(" removeBit", smatWrap(smatRemoveBit)),
+
+ smat.ActionID('o'): smatAction(" or", smatWrap(smatOr)),
+ smat.ActionID('a'): smatAction(" and", smatWrap(smatAnd)),
+
+ smat.ActionID('#'): smatAction(" cardinality", smatWrap(smatCardinality)),
+
+ smat.ActionID('O'): smatAction(" orCardinality", smatWrap(smatOrCardinality)),
+ smat.ActionID('A'): smatAction(" andCardinality", smatWrap(smatAndCardinality)),
+
+ smat.ActionID('c'): smatAction(" clear", smatWrap(smatClear)),
+ smat.ActionID('r'): smatAction(" runOptimize", smatWrap(smatRunOptimize)),
+
+ smat.ActionID('e'): smatAction(" isEmpty", smatWrap(smatIsEmpty)),
+
+ smat.ActionID('i'): smatAction(" intersects", smatWrap(smatIntersects)),
+
+ smat.ActionID('f'): smatAction(" flip", smatWrap(smatFlip)),
+
+ smat.ActionID('-'): smatAction(" difference", smatWrap(smatDifference)),
+}
+
+var smatRunningPercentActions []smat.PercentAction
+
+func init() {
+ var ids []int
+ for actionId := range smatActionMap {
+ ids = append(ids, int(actionId))
+ }
+ sort.Ints(ids)
+
+ pct := 100 / len(smatActionMap)
+ for _, actionId := range ids {
+ smatRunningPercentActions = append(smatRunningPercentActions,
+ smat.PercentAction{pct, smat.ActionID(actionId)})
+ }
+
+ smatActionMap[smat.ActionID('S')] = smatAction("SETUP", smatSetupFunc)
+ smatActionMap[smat.ActionID('T')] = smatAction("TEARDOWN", smatTeardownFunc)
+}
+
+// We only have one smat state: running.
+func smatRunning(next byte) smat.ActionID {
+ return smat.PercentExecute(next, smatRunningPercentActions...)
+}
+
+func smatAction(name string, f func(ctx smat.Context) (smat.State, error)) func(smat.Context) (smat.State, error) {
+ return func(ctx smat.Context) (smat.State, error) {
+ c := ctx.(*smatContext)
+ c.actions++
+
+ smatLog(" ", "%s\n", name)
+
+ return f(ctx)
+ }
+}
+
+// Creates an smat action func based on a simple callback.
+func smatWrap(cb func(c *smatContext)) func(smat.Context) (next smat.State, err error) {
+ return func(ctx smat.Context) (next smat.State, err error) {
+ c := ctx.(*smatContext)
+ cb(c)
+ return smatRunning, nil
+ }
+}
+
+// Invokes a callback function with the input v bounded to len(c.pairs).
+func (c *smatContext) withPair(v int, cb func(*smatPair)) {
+ if len(c.pairs) > 0 {
+ if v < 0 {
+ v = -v
+ }
+ v = v % len(c.pairs)
+ cb(c.pairs[v])
+ }
+}
+
+// ------------------------------------------------------------------
+
+func smatSetupFunc(ctx smat.Context) (next smat.State, err error) {
+ return smatRunning, nil
+}
+
+func smatTeardownFunc(ctx smat.Context) (next smat.State, err error) {
+ return nil, err
+}
+
+// ------------------------------------------------------------------
+
+func smatPushPair(c *smatContext) {
+ c.pairs = append(c.pairs, &smatPair{
+ bm: NewBitmap(),
+ bs: bitset.New(100),
+ })
+}
+
+func smatPopPair(c *smatContext) {
+ if len(c.pairs) > 0 {
+ c.pairs = c.pairs[0 : len(c.pairs)-1]
+ }
+}
+
+func smatSetBit(c *smatContext) {
+ c.withPair(c.x, func(p *smatPair) {
+ y := uint32(c.y)
+ p.bm.AddInt(int(y))
+ p.bs.Set(uint(y))
+ p.checkEquals()
+ })
+}
+
+func smatRemoveBit(c *smatContext) {
+ c.withPair(c.x, func(p *smatPair) {
+ y := uint32(c.y)
+ p.bm.Remove(y)
+ p.bs.Clear(uint(y))
+ p.checkEquals()
+ })
+}
+
+func smatAnd(c *smatContext) {
+ c.withPair(c.x, func(px *smatPair) {
+ c.withPair(c.y, func(py *smatPair) {
+ px.bm.And(py.bm)
+ px.bs = px.bs.Intersection(py.bs)
+ px.checkEquals()
+ py.checkEquals()
+ })
+ })
+}
+
+func smatOr(c *smatContext) {
+ c.withPair(c.x, func(px *smatPair) {
+ c.withPair(c.y, func(py *smatPair) {
+ px.bm.Or(py.bm)
+ px.bs = px.bs.Union(py.bs)
+ px.checkEquals()
+ py.checkEquals()
+ })
+ })
+}
+
+func smatAndCardinality(c *smatContext) {
+ c.withPair(c.x, func(px *smatPair) {
+ c.withPair(c.y, func(py *smatPair) {
+ c0 := px.bm.AndCardinality(py.bm)
+ c1 := px.bs.IntersectionCardinality(py.bs)
+ if c0 != uint64(c1) {
+ panic("expected same add cardinality")
+ }
+ px.checkEquals()
+ py.checkEquals()
+ })
+ })
+}
+
+func smatOrCardinality(c *smatContext) {
+ c.withPair(c.x, func(px *smatPair) {
+ c.withPair(c.y, func(py *smatPair) {
+ c0 := px.bm.OrCardinality(py.bm)
+ c1 := px.bs.UnionCardinality(py.bs)
+ if c0 != uint64(c1) {
+ panic("expected same or cardinality")
+ }
+ px.checkEquals()
+ py.checkEquals()
+ })
+ })
+}
+
+func smatRunOptimize(c *smatContext) {
+ c.withPair(c.x, func(px *smatPair) {
+ px.bm.RunOptimize()
+ px.checkEquals()
+ })
+}
+
+func smatClear(c *smatContext) {
+ c.withPair(c.x, func(px *smatPair) {
+ px.bm.Clear()
+ px.bs = px.bs.ClearAll()
+ px.checkEquals()
+ })
+}
+
+func smatCardinality(c *smatContext) {
+ c.withPair(c.x, func(px *smatPair) {
+ c0 := px.bm.GetCardinality()
+ c1 := px.bs.Count()
+ if c0 != uint64(c1) {
+ panic("expected same cardinality")
+ }
+ })
+}
+
+func smatIsEmpty(c *smatContext) {
+ c.withPair(c.x, func(px *smatPair) {
+ c0 := px.bm.IsEmpty()
+ c1 := px.bs.None()
+ if c0 != c1 {
+ panic("expected same is empty")
+ }
+ })
+}
+
+func smatIntersects(c *smatContext) {
+ c.withPair(c.x, func(px *smatPair) {
+ c.withPair(c.y, func(py *smatPair) {
+ v0 := px.bm.Intersects(py.bm)
+ v1 := px.bs.IntersectionCardinality(py.bs) > 0
+ if v0 != v1 {
+ panic("intersects not equal")
+ }
+
+ px.checkEquals()
+ py.checkEquals()
+ })
+ })
+}
+
+func smatFlip(c *smatContext) {
+ c.withPair(c.x, func(p *smatPair) {
+ y := uint32(c.y)
+ p.bm.Flip(uint64(y), uint64(y)+1)
+ p.bs = p.bs.Flip(uint(y))
+ p.checkEquals()
+ })
+}
+
+func smatDifference(c *smatContext) {
+ c.withPair(c.x, func(px *smatPair) {
+ c.withPair(c.y, func(py *smatPair) {
+ px.bm.AndNot(py.bm)
+ px.bs = px.bs.Difference(py.bs)
+ px.checkEquals()
+ py.checkEquals()
+ })
+ })
+}
+
+func (p *smatPair) checkEquals() {
+ if !p.equalsBitSet(p.bs, p.bm) {
+ panic("bitset mismatch")
+ }
+}
+
+func (p *smatPair) equalsBitSet(a *bitset.BitSet, b *Bitmap) bool {
+ for i, e := a.NextSet(0); e; i, e = a.NextSet(i + 1) {
+ if !b.ContainsInt(int(i)) {
+ fmt.Printf("in a bitset, not b bitmap, i: %d\n", i)
+ fmt.Printf(" a bitset: %s\n b bitmap: %s\n",
+ a.String(), b.String())
+ return false
+ }
+ }
+
+ i := b.Iterator()
+ for i.HasNext() {
+ v := i.Next()
+ if !a.Test(uint(v)) {
+ fmt.Printf("in b bitmap, not a bitset, v: %d\n", v)
+ fmt.Printf(" a bitset: %s\n b bitmap: %s\n",
+ a.String(), b.String())
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/RoaringBitmap/roaring/util.go b/vendor/github.com/RoaringBitmap/roaring/util.go
new file mode 100644
index 0000000000..d212660d58
--- /dev/null
+++ b/vendor/github.com/RoaringBitmap/roaring/util.go
@@ -0,0 +1,315 @@
+package roaring
+
+import (
+ "math/rand"
+ "sort"
+)
+
+const (
+ arrayDefaultMaxSize = 4096 // containers with 4096 or fewer integers should be array containers.
+ arrayLazyLowerBound = 1024
+ maxCapacity = 1 << 16
+ serialCookieNoRunContainer = 12346 // only arrays and bitmaps
+ invalidCardinality = -1
+ serialCookie = 12347 // runs, arrays, and bitmaps
+ noOffsetThreshold = 4
+
+ // Compute wordSizeInBytes, the size of a word in bytes.
+ _m = ^uint64(0)
+ _logS = _m>>8&1 + _m>>16&1 + _m>>32&1
+ wordSizeInBytes = 1 << _logS
+
+ // other constants used in ctz_generic.go
+ wordSizeInBits = wordSizeInBytes << 3 // word size in bits
+)
+
+const maxWord = 1<<wordSizeInBits - 1
+
+// doesn't apply to runContainers
+func getSizeInBytesFromCardinality(card int) int {
+ if card > arrayDefaultMaxSize {
+ // bitmapContainer
+ return maxCapacity / 8
+ }
+ // arrayContainer
+ return 2 * card
+}
+
+func fill(arr []uint64, val uint64) {
+ for i := range arr {
+ arr[i] = val
+ }
+}
+func fillRange(arr []uint64, start, end int, val uint64) {
+ for i := start; i < end; i++ {
+ arr[i] = val
+ }
+}
+
+func fillArrayAND(container []uint16, bitmap1, bitmap2 []uint64) {
+ if len(bitmap1) != len(bitmap2) {
+ panic("array lengths don't match")
+ }
+ // TODO: rewrite in assembly
+ pos := 0
+ for k := range bitmap1 {
+ bitset := bitmap1[k] & bitmap2[k]
+ for bitset != 0 {
+ t := bitset & -bitset
+ container[pos] = uint16((k*64 + int(popcount(t-1))))
+ pos = pos + 1
+ bitset ^= t
+ }
+ }
+}
+
+func fillArrayANDNOT(container []uint16, bitmap1, bitmap2 []uint64) {
+ if len(bitmap1) != len(bitmap2) {
+ panic("array lengths don't match")
+ }
+ // TODO: rewrite in assembly
+ pos := 0
+ for k := range bitmap1 {
+ bitset := bitmap1[k] &^ bitmap2[k]
+ for bitset != 0 {
+ t := bitset & -bitset
+ container[pos] = uint16((k*64 + int(popcount(t-1))))
+ pos = pos + 1
+ bitset ^= t
+ }
+ }
+}
+
+func fillArrayXOR(container []uint16, bitmap1, bitmap2 []uint64) {
+ if len(bitmap1) != len(bitmap2) {
+ panic("array lengths don't match")
+ }
+ // TODO: rewrite in assembly
+ pos := 0
+ for k := 0; k < len(bitmap1); k++ {
+ bitset := bitmap1[k] ^ bitmap2[k]
+ for bitset != 0 {
+ t := bitset & -bitset
+ container[pos] = uint16((k*64 + int(popcount(t-1))))
+ pos = pos + 1
+ bitset ^= t
+ }
+ }
+}
+
+func highbits(x uint32) uint16 {
+ return uint16(x >> 16)
+}
+func lowbits(x uint32) uint16 {
+ return uint16(x & 0xFFFF)
+}
+
+const maxLowBit = 0xFFFF
+
+func flipBitmapRange(bitmap []uint64, start int, end int) {
+ if start >= end {
+ return
+ }
+ firstword := start / 64
+ endword := (end - 1) / 64
+ bitmap[firstword] ^= ^(^uint64(0) << uint(start%64))
+ for i := firstword; i < endword; i++ {
+ //p("flipBitmapRange on i=%v", i)
+ bitmap[i] = ^bitmap[i]
+ }
+ bitmap[endword] ^= ^uint64(0) >> (uint(-end) % 64)
+}
+
+func resetBitmapRange(bitmap []uint64, start int, end int) {
+ if start >= end {
+ return
+ }
+ firstword := start / 64
+ endword := (end - 1) / 64
+ if firstword == endword {
+ bitmap[firstword] &= ^((^uint64(0) << uint(start%64)) & (^uint64(0) >> (uint(-end) % 64)))
+ return
+ }
+ bitmap[firstword] &= ^(^uint64(0) << uint(start%64))
+ for i := firstword + 1; i < endword; i++ {
+ bitmap[i] = 0
+ }
+ bitmap[endword] &= ^(^uint64(0) >> (uint(-end) % 64))
+
+}
+
+func setBitmapRange(bitmap []uint64, start int, end int) {
+ if start >= end {
+ return
+ }
+ firstword := start / 64
+ endword := (end - 1) / 64
+ if firstword == endword {
+ bitmap[firstword] |= (^uint64(0) << uint(start%64)) & (^uint64(0) >> (uint(-end) % 64))
+ return
+ }
+ bitmap[firstword] |= ^uint64(0) << uint(start%64)
+ for i := firstword + 1; i < endword; i++ {
+ bitmap[i] = ^uint64(0)
+ }
+ bitmap[endword] |= ^uint64(0) >> (uint(-end) % 64)
+}
+
+func flipBitmapRangeAndCardinalityChange(bitmap []uint64, start int, end int) int {
+ before := wordCardinalityForBitmapRange(bitmap, start, end)
+ flipBitmapRange(bitmap, start, end)
+ after := wordCardinalityForBitmapRange(bitmap, start, end)
+ return int(after - before)
+}
+
+func resetBitmapRangeAndCardinalityChange(bitmap []uint64, start int, end int) int {
+ before := wordCardinalityForBitmapRange(bitmap, start, end)
+ resetBitmapRange(bitmap, start, end)
+ after := wordCardinalityForBitmapRange(bitmap, start, end)
+ return int(after - before)
+}
+
+func setBitmapRangeAndCardinalityChange(bitmap []uint64, start int, end int) int {
+ before := wordCardinalityForBitmapRange(bitmap, start, end)
+ setBitmapRange(bitmap, start, end)
+ after := wordCardinalityForBitmapRange(bitmap, start, end)
+ return int(after - before)
+}
+
+func wordCardinalityForBitmapRange(bitmap []uint64, start int, end int) uint64 {
+ answer := uint64(0)
+ if start >= end {
+ return answer
+ }
+ firstword := start / 64
+ endword := (end - 1) / 64
+ for i := firstword; i <= endword; i++ {
+ answer += popcount(bitmap[i])
+ }
+ return answer
+}
+
+func selectBitPosition(w uint64, j int) int {
+ seen := 0
+
+ // Divide 64bit
+ part := w & 0xFFFFFFFF
+ n := popcount(part)
+ if n <= uint64(j) {
+ part = w >> 32
+ seen += 32
+ j -= int(n)
+ }
+ w = part
+
+ // Divide 32bit
+ part = w & 0xFFFF
+ n = popcount(part)
+ if n <= uint64(j) {
+ part = w >> 16
+ seen += 16
+ j -= int(n)
+ }
+ w = part
+
+ // Divide 16bit
+ part = w & 0xFF
+ n = popcount(part)
+ if n <= uint64(j) {
+ part = w >> 8
+ seen += 8
+ j -= int(n)
+ }
+ w = part
+
+ // Lookup in final byte
+ var counter uint
+ for counter = 0; counter < 8; counter++ {
+ j -= int((w >> counter) & 1)
+ if j < 0 {
+ break
+ }
+ }
+ return seen + int(counter)
+
+}
+
+func panicOn(err error) {
+ if err != nil {
+ panic(err)
+ }
+}
+
+type ph struct {
+ orig int
+ rand int
+}
+
+type pha []ph
+
+func (p pha) Len() int { return len(p) }
+func (p pha) Less(i, j int) bool { return p[i].rand < p[j].rand }
+func (p pha) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func getRandomPermutation(n int) []int {
+ r := make([]ph, n)
+ for i := 0; i < n; i++ {
+ r[i].orig = i
+ r[i].rand = rand.Intn(1 << 29)
+ }
+ sort.Sort(pha(r))
+ m := make([]int, n)
+ for i := range m {
+ m[i] = r[i].orig
+ }
+ return m
+}
+
+func minOfInt(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func maxOfInt(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+func maxOfUint16(a, b uint16) uint16 {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+func minOfUint16(a, b uint16) uint16 {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func maxInt(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+func maxUint16(a, b uint16) uint16 {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+func minUint16(a, b uint16) uint16 {
+ if a < b {
+ return a
+ }
+ return b
+}