You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

merge.go 22KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856
  1. // Copyright (c) 2017 Couchbase, Inc.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. package zap
  15. import (
  16. "bufio"
  17. "bytes"
  18. "encoding/binary"
  19. "fmt"
  20. "math"
  21. "os"
  22. "sort"
  23. "github.com/RoaringBitmap/roaring"
  24. seg "github.com/blevesearch/scorch_segment_api/v2"
  25. "github.com/blevesearch/vellum"
  26. "github.com/golang/snappy"
  27. )
  28. var DefaultFileMergerBufferSize = 1024 * 1024
  29. const docDropped = math.MaxUint64 // sentinel docNum to represent a deleted doc
  30. // Merge takes a slice of segments and bit masks describing which
  31. // documents may be dropped, and creates a new segment containing the
  32. // remaining data. This new segment is built at the specified path.
  33. func (*ZapPlugin) Merge(segments []seg.Segment, drops []*roaring.Bitmap, path string,
  34. closeCh chan struct{}, s seg.StatsReporter) (
  35. [][]uint64, uint64, error) {
  36. segmentBases := make([]*SegmentBase, len(segments))
  37. for segmenti, segment := range segments {
  38. switch segmentx := segment.(type) {
  39. case *Segment:
  40. segmentBases[segmenti] = &segmentx.SegmentBase
  41. case *SegmentBase:
  42. segmentBases[segmenti] = segmentx
  43. default:
  44. panic(fmt.Sprintf("oops, unexpected segment type: %T", segment))
  45. }
  46. }
  47. return mergeSegmentBases(segmentBases, drops, path, defaultChunkFactor, closeCh, s)
  48. }
  49. func mergeSegmentBases(segmentBases []*SegmentBase, drops []*roaring.Bitmap, path string,
  50. chunkFactor uint32, closeCh chan struct{}, s seg.StatsReporter) (
  51. [][]uint64, uint64, error) {
  52. flag := os.O_RDWR | os.O_CREATE
  53. f, err := os.OpenFile(path, flag, 0600)
  54. if err != nil {
  55. return nil, 0, err
  56. }
  57. cleanup := func() {
  58. _ = f.Close()
  59. _ = os.Remove(path)
  60. }
  61. // buffer the output
  62. br := bufio.NewWriterSize(f, DefaultFileMergerBufferSize)
  63. // wrap it for counting (tracking offsets)
  64. cr := NewCountHashWriterWithStatsReporter(br, s)
  65. newDocNums, numDocs, storedIndexOffset, fieldsIndexOffset, docValueOffset, _, _, _, err :=
  66. MergeToWriter(segmentBases, drops, chunkFactor, cr, closeCh)
  67. if err != nil {
  68. cleanup()
  69. return nil, 0, err
  70. }
  71. err = persistFooter(numDocs, storedIndexOffset, fieldsIndexOffset,
  72. docValueOffset, chunkFactor, cr.Sum32(), cr)
  73. if err != nil {
  74. cleanup()
  75. return nil, 0, err
  76. }
  77. err = br.Flush()
  78. if err != nil {
  79. cleanup()
  80. return nil, 0, err
  81. }
  82. err = f.Sync()
  83. if err != nil {
  84. cleanup()
  85. return nil, 0, err
  86. }
  87. err = f.Close()
  88. if err != nil {
  89. cleanup()
  90. return nil, 0, err
  91. }
  92. return newDocNums, uint64(cr.Count()), nil
  93. }
  94. func MergeToWriter(segments []*SegmentBase, drops []*roaring.Bitmap,
  95. chunkFactor uint32, cr *CountHashWriter, closeCh chan struct{}) (
  96. newDocNums [][]uint64,
  97. numDocs, storedIndexOffset, fieldsIndexOffset, docValueOffset uint64,
  98. dictLocs []uint64, fieldsInv []string, fieldsMap map[string]uint16,
  99. err error) {
  100. docValueOffset = uint64(fieldNotUninverted)
  101. var fieldsSame bool
  102. fieldsSame, fieldsInv = mergeFields(segments)
  103. fieldsMap = mapFields(fieldsInv)
  104. numDocs = computeNewDocCount(segments, drops)
  105. if isClosed(closeCh) {
  106. return nil, 0, 0, 0, 0, nil, nil, nil, seg.ErrClosed
  107. }
  108. if numDocs > 0 {
  109. storedIndexOffset, newDocNums, err = mergeStoredAndRemap(segments, drops,
  110. fieldsMap, fieldsInv, fieldsSame, numDocs, cr, closeCh)
  111. if err != nil {
  112. return nil, 0, 0, 0, 0, nil, nil, nil, err
  113. }
  114. dictLocs, docValueOffset, err = persistMergedRest(segments, drops,
  115. fieldsInv, fieldsMap, fieldsSame,
  116. newDocNums, numDocs, chunkFactor, cr, closeCh)
  117. if err != nil {
  118. return nil, 0, 0, 0, 0, nil, nil, nil, err
  119. }
  120. } else {
  121. dictLocs = make([]uint64, len(fieldsInv))
  122. }
  123. fieldsIndexOffset, err = persistFields(fieldsInv, cr, dictLocs)
  124. if err != nil {
  125. return nil, 0, 0, 0, 0, nil, nil, nil, err
  126. }
  127. return newDocNums, numDocs, storedIndexOffset, fieldsIndexOffset, docValueOffset, dictLocs, fieldsInv, fieldsMap, nil
  128. }
  129. // mapFields takes the fieldsInv list and returns a map of fieldName
  130. // to fieldID+1
  131. func mapFields(fields []string) map[string]uint16 {
  132. rv := make(map[string]uint16, len(fields))
  133. for i, fieldName := range fields {
  134. rv[fieldName] = uint16(i) + 1
  135. }
  136. return rv
  137. }
  138. // computeNewDocCount determines how many documents will be in the newly
  139. // merged segment when obsoleted docs are dropped
  140. func computeNewDocCount(segments []*SegmentBase, drops []*roaring.Bitmap) uint64 {
  141. var newDocCount uint64
  142. for segI, segment := range segments {
  143. newDocCount += segment.numDocs
  144. if drops[segI] != nil {
  145. newDocCount -= drops[segI].GetCardinality()
  146. }
  147. }
  148. return newDocCount
  149. }
  150. func persistMergedRest(segments []*SegmentBase, dropsIn []*roaring.Bitmap,
  151. fieldsInv []string, fieldsMap map[string]uint16, fieldsSame bool,
  152. newDocNumsIn [][]uint64, newSegDocCount uint64, chunkFactor uint32,
  153. w *CountHashWriter, closeCh chan struct{}) ([]uint64, uint64, error) {
  154. var bufMaxVarintLen64 []byte = make([]byte, binary.MaxVarintLen64)
  155. var bufLoc []uint64
  156. var postings *PostingsList
  157. var postItr *PostingsIterator
  158. rv := make([]uint64, len(fieldsInv))
  159. fieldDvLocsStart := make([]uint64, len(fieldsInv))
  160. fieldDvLocsEnd := make([]uint64, len(fieldsInv))
  161. tfEncoder := newChunkedIntCoder(uint64(chunkFactor), newSegDocCount-1)
  162. locEncoder := newChunkedIntCoder(uint64(chunkFactor), newSegDocCount-1)
  163. var vellumBuf bytes.Buffer
  164. newVellum, err := vellum.New(&vellumBuf, nil)
  165. if err != nil {
  166. return nil, 0, err
  167. }
  168. newRoaring := roaring.NewBitmap()
  169. // for each field
  170. for fieldID, fieldName := range fieldsInv {
  171. // collect FST iterators from all active segments for this field
  172. var newDocNums [][]uint64
  173. var drops []*roaring.Bitmap
  174. var dicts []*Dictionary
  175. var itrs []vellum.Iterator
  176. var segmentsInFocus []*SegmentBase
  177. for segmentI, segment := range segments {
  178. // check for the closure in meantime
  179. if isClosed(closeCh) {
  180. return nil, 0, seg.ErrClosed
  181. }
  182. dict, err2 := segment.dictionary(fieldName)
  183. if err2 != nil {
  184. return nil, 0, err2
  185. }
  186. if dict != nil && dict.fst != nil {
  187. itr, err2 := dict.fst.Iterator(nil, nil)
  188. if err2 != nil && err2 != vellum.ErrIteratorDone {
  189. return nil, 0, err2
  190. }
  191. if itr != nil {
  192. newDocNums = append(newDocNums, newDocNumsIn[segmentI])
  193. if dropsIn[segmentI] != nil && !dropsIn[segmentI].IsEmpty() {
  194. drops = append(drops, dropsIn[segmentI])
  195. } else {
  196. drops = append(drops, nil)
  197. }
  198. dicts = append(dicts, dict)
  199. itrs = append(itrs, itr)
  200. segmentsInFocus = append(segmentsInFocus, segment)
  201. }
  202. }
  203. }
  204. var prevTerm []byte
  205. newRoaring.Clear()
  206. var lastDocNum, lastFreq, lastNorm uint64
  207. // determines whether to use "1-hit" encoding optimization
  208. // when a term appears in only 1 doc, with no loc info,
  209. // has freq of 1, and the docNum fits into 31-bits
  210. use1HitEncoding := func(termCardinality uint64) (bool, uint64, uint64) {
  211. if termCardinality == uint64(1) && locEncoder.FinalSize() <= 0 {
  212. docNum := uint64(newRoaring.Minimum())
  213. if under32Bits(docNum) && docNum == lastDocNum && lastFreq == 1 {
  214. return true, docNum, lastNorm
  215. }
  216. }
  217. return false, 0, 0
  218. }
  219. finishTerm := func(term []byte) error {
  220. tfEncoder.Close()
  221. locEncoder.Close()
  222. postingsOffset, err := writePostings(newRoaring,
  223. tfEncoder, locEncoder, use1HitEncoding, w, bufMaxVarintLen64)
  224. if err != nil {
  225. return err
  226. }
  227. if postingsOffset > 0 {
  228. err = newVellum.Insert(term, postingsOffset)
  229. if err != nil {
  230. return err
  231. }
  232. }
  233. newRoaring.Clear()
  234. tfEncoder.Reset()
  235. locEncoder.Reset()
  236. lastDocNum = 0
  237. lastFreq = 0
  238. lastNorm = 0
  239. return nil
  240. }
  241. enumerator, err := newEnumerator(itrs)
  242. for err == nil {
  243. term, itrI, postingsOffset := enumerator.Current()
  244. if !bytes.Equal(prevTerm, term) {
  245. // check for the closure in meantime
  246. if isClosed(closeCh) {
  247. return nil, 0, seg.ErrClosed
  248. }
  249. // if the term changed, write out the info collected
  250. // for the previous term
  251. err = finishTerm(prevTerm)
  252. if err != nil {
  253. return nil, 0, err
  254. }
  255. }
  256. postings, err = dicts[itrI].postingsListFromOffset(
  257. postingsOffset, drops[itrI], postings)
  258. if err != nil {
  259. return nil, 0, err
  260. }
  261. postItr = postings.iterator(true, true, true, postItr)
  262. if fieldsSame {
  263. // can optimize by copying freq/norm/loc bytes directly
  264. lastDocNum, lastFreq, lastNorm, err = mergeTermFreqNormLocsByCopying(
  265. term, postItr, newDocNums[itrI], newRoaring,
  266. tfEncoder, locEncoder)
  267. } else {
  268. lastDocNum, lastFreq, lastNorm, bufLoc, err = mergeTermFreqNormLocs(
  269. fieldsMap, term, postItr, newDocNums[itrI], newRoaring,
  270. tfEncoder, locEncoder, bufLoc)
  271. }
  272. if err != nil {
  273. return nil, 0, err
  274. }
  275. prevTerm = prevTerm[:0] // copy to prevTerm in case Next() reuses term mem
  276. prevTerm = append(prevTerm, term...)
  277. err = enumerator.Next()
  278. }
  279. if err != vellum.ErrIteratorDone {
  280. return nil, 0, err
  281. }
  282. err = finishTerm(prevTerm)
  283. if err != nil {
  284. return nil, 0, err
  285. }
  286. dictOffset := uint64(w.Count())
  287. err = newVellum.Close()
  288. if err != nil {
  289. return nil, 0, err
  290. }
  291. vellumData := vellumBuf.Bytes()
  292. // write out the length of the vellum data
  293. n := binary.PutUvarint(bufMaxVarintLen64, uint64(len(vellumData)))
  294. _, err = w.Write(bufMaxVarintLen64[:n])
  295. if err != nil {
  296. return nil, 0, err
  297. }
  298. // write this vellum to disk
  299. _, err = w.Write(vellumData)
  300. if err != nil {
  301. return nil, 0, err
  302. }
  303. rv[fieldID] = dictOffset
  304. // get the field doc value offset (start)
  305. fieldDvLocsStart[fieldID] = uint64(w.Count())
  306. // update the field doc values
  307. fdvEncoder := newChunkedContentCoder(uint64(chunkFactor), newSegDocCount-1, w, true)
  308. fdvReadersAvailable := false
  309. var dvIterClone *docValueReader
  310. for segmentI, segment := range segmentsInFocus {
  311. // check for the closure in meantime
  312. if isClosed(closeCh) {
  313. return nil, 0, seg.ErrClosed
  314. }
  315. fieldIDPlus1 := uint16(segment.fieldsMap[fieldName])
  316. if dvIter, exists := segment.fieldDvReaders[fieldIDPlus1-1]; exists &&
  317. dvIter != nil {
  318. fdvReadersAvailable = true
  319. dvIterClone = dvIter.cloneInto(dvIterClone)
  320. err = dvIterClone.iterateAllDocValues(segment, func(docNum uint64, terms []byte) error {
  321. if newDocNums[segmentI][docNum] == docDropped {
  322. return nil
  323. }
  324. err := fdvEncoder.Add(newDocNums[segmentI][docNum], terms)
  325. if err != nil {
  326. return err
  327. }
  328. return nil
  329. })
  330. if err != nil {
  331. return nil, 0, err
  332. }
  333. }
  334. }
  335. if fdvReadersAvailable {
  336. err = fdvEncoder.Close()
  337. if err != nil {
  338. return nil, 0, err
  339. }
  340. // persist the doc value details for this field
  341. _, err = fdvEncoder.Write()
  342. if err != nil {
  343. return nil, 0, err
  344. }
  345. // get the field doc value offset (end)
  346. fieldDvLocsEnd[fieldID] = uint64(w.Count())
  347. } else {
  348. fieldDvLocsStart[fieldID] = fieldNotUninverted
  349. fieldDvLocsEnd[fieldID] = fieldNotUninverted
  350. }
  351. // reset vellum buffer and vellum builder
  352. vellumBuf.Reset()
  353. err = newVellum.Reset(&vellumBuf)
  354. if err != nil {
  355. return nil, 0, err
  356. }
  357. }
  358. fieldDvLocsOffset := uint64(w.Count())
  359. buf := bufMaxVarintLen64
  360. for i := 0; i < len(fieldDvLocsStart); i++ {
  361. n := binary.PutUvarint(buf, fieldDvLocsStart[i])
  362. _, err := w.Write(buf[:n])
  363. if err != nil {
  364. return nil, 0, err
  365. }
  366. n = binary.PutUvarint(buf, fieldDvLocsEnd[i])
  367. _, err = w.Write(buf[:n])
  368. if err != nil {
  369. return nil, 0, err
  370. }
  371. }
  372. return rv, fieldDvLocsOffset, nil
  373. }
  374. func mergeTermFreqNormLocs(fieldsMap map[string]uint16, term []byte, postItr *PostingsIterator,
  375. newDocNums []uint64, newRoaring *roaring.Bitmap,
  376. tfEncoder *chunkedIntCoder, locEncoder *chunkedIntCoder, bufLoc []uint64) (
  377. lastDocNum uint64, lastFreq uint64, lastNorm uint64, bufLocOut []uint64, err error) {
  378. next, err := postItr.Next()
  379. for next != nil && err == nil {
  380. hitNewDocNum := newDocNums[next.Number()]
  381. if hitNewDocNum == docDropped {
  382. return 0, 0, 0, nil, fmt.Errorf("see hit with dropped docNum")
  383. }
  384. newRoaring.Add(uint32(hitNewDocNum))
  385. nextFreq := next.Frequency()
  386. nextNorm := uint64(math.Float32bits(float32(next.Norm())))
  387. locs := next.Locations()
  388. err = tfEncoder.Add(hitNewDocNum,
  389. encodeFreqHasLocs(nextFreq, len(locs) > 0), nextNorm)
  390. if err != nil {
  391. return 0, 0, 0, nil, err
  392. }
  393. if len(locs) > 0 {
  394. numBytesLocs := 0
  395. for _, loc := range locs {
  396. ap := loc.ArrayPositions()
  397. numBytesLocs += totalUvarintBytes(uint64(fieldsMap[loc.Field()]-1),
  398. loc.Pos(), loc.Start(), loc.End(), uint64(len(ap)), ap)
  399. }
  400. err = locEncoder.Add(hitNewDocNum, uint64(numBytesLocs))
  401. if err != nil {
  402. return 0, 0, 0, nil, err
  403. }
  404. for _, loc := range locs {
  405. ap := loc.ArrayPositions()
  406. if cap(bufLoc) < 5+len(ap) {
  407. bufLoc = make([]uint64, 0, 5+len(ap))
  408. }
  409. args := bufLoc[0:5]
  410. args[0] = uint64(fieldsMap[loc.Field()] - 1)
  411. args[1] = loc.Pos()
  412. args[2] = loc.Start()
  413. args[3] = loc.End()
  414. args[4] = uint64(len(ap))
  415. args = append(args, ap...)
  416. err = locEncoder.Add(hitNewDocNum, args...)
  417. if err != nil {
  418. return 0, 0, 0, nil, err
  419. }
  420. }
  421. }
  422. lastDocNum = hitNewDocNum
  423. lastFreq = nextFreq
  424. lastNorm = nextNorm
  425. next, err = postItr.Next()
  426. }
  427. return lastDocNum, lastFreq, lastNorm, bufLoc, err
  428. }
  429. func mergeTermFreqNormLocsByCopying(term []byte, postItr *PostingsIterator,
  430. newDocNums []uint64, newRoaring *roaring.Bitmap,
  431. tfEncoder *chunkedIntCoder, locEncoder *chunkedIntCoder) (
  432. lastDocNum uint64, lastFreq uint64, lastNorm uint64, err error) {
  433. nextDocNum, nextFreq, nextNorm, nextFreqNormBytes, nextLocBytes, err :=
  434. postItr.nextBytes()
  435. for err == nil && len(nextFreqNormBytes) > 0 {
  436. hitNewDocNum := newDocNums[nextDocNum]
  437. if hitNewDocNum == docDropped {
  438. return 0, 0, 0, fmt.Errorf("see hit with dropped doc num")
  439. }
  440. newRoaring.Add(uint32(hitNewDocNum))
  441. err = tfEncoder.AddBytes(hitNewDocNum, nextFreqNormBytes)
  442. if err != nil {
  443. return 0, 0, 0, err
  444. }
  445. if len(nextLocBytes) > 0 {
  446. err = locEncoder.AddBytes(hitNewDocNum, nextLocBytes)
  447. if err != nil {
  448. return 0, 0, 0, err
  449. }
  450. }
  451. lastDocNum = hitNewDocNum
  452. lastFreq = nextFreq
  453. lastNorm = nextNorm
  454. nextDocNum, nextFreq, nextNorm, nextFreqNormBytes, nextLocBytes, err =
  455. postItr.nextBytes()
  456. }
  457. return lastDocNum, lastFreq, lastNorm, err
  458. }
  459. func writePostings(postings *roaring.Bitmap, tfEncoder, locEncoder *chunkedIntCoder,
  460. use1HitEncoding func(uint64) (bool, uint64, uint64),
  461. w *CountHashWriter, bufMaxVarintLen64 []byte) (
  462. offset uint64, err error) {
  463. termCardinality := postings.GetCardinality()
  464. if termCardinality <= 0 {
  465. return 0, nil
  466. }
  467. if use1HitEncoding != nil {
  468. encodeAs1Hit, docNum1Hit, normBits1Hit := use1HitEncoding(termCardinality)
  469. if encodeAs1Hit {
  470. return FSTValEncode1Hit(docNum1Hit, normBits1Hit), nil
  471. }
  472. }
  473. tfOffset := uint64(w.Count())
  474. _, err = tfEncoder.Write(w)
  475. if err != nil {
  476. return 0, err
  477. }
  478. locOffset := uint64(w.Count())
  479. _, err = locEncoder.Write(w)
  480. if err != nil {
  481. return 0, err
  482. }
  483. postingsOffset := uint64(w.Count())
  484. n := binary.PutUvarint(bufMaxVarintLen64, tfOffset)
  485. _, err = w.Write(bufMaxVarintLen64[:n])
  486. if err != nil {
  487. return 0, err
  488. }
  489. n = binary.PutUvarint(bufMaxVarintLen64, locOffset)
  490. _, err = w.Write(bufMaxVarintLen64[:n])
  491. if err != nil {
  492. return 0, err
  493. }
  494. _, err = writeRoaringWithLen(postings, w, bufMaxVarintLen64)
  495. if err != nil {
  496. return 0, err
  497. }
  498. return postingsOffset, nil
  499. }
  500. type varintEncoder func(uint64) (int, error)
  501. func mergeStoredAndRemap(segments []*SegmentBase, drops []*roaring.Bitmap,
  502. fieldsMap map[string]uint16, fieldsInv []string, fieldsSame bool, newSegDocCount uint64,
  503. w *CountHashWriter, closeCh chan struct{}) (uint64, [][]uint64, error) {
  504. var rv [][]uint64 // The remapped or newDocNums for each segment.
  505. var newDocNum uint64
  506. var curr int
  507. var data, compressed []byte
  508. var metaBuf bytes.Buffer
  509. varBuf := make([]byte, binary.MaxVarintLen64)
  510. metaEncode := func(val uint64) (int, error) {
  511. wb := binary.PutUvarint(varBuf, val)
  512. return metaBuf.Write(varBuf[:wb])
  513. }
  514. vals := make([][][]byte, len(fieldsInv))
  515. typs := make([][]byte, len(fieldsInv))
  516. poss := make([][][]uint64, len(fieldsInv))
  517. var posBuf []uint64
  518. docNumOffsets := make([]uint64, newSegDocCount)
  519. vdc := visitDocumentCtxPool.Get().(*visitDocumentCtx)
  520. defer visitDocumentCtxPool.Put(vdc)
  521. // for each segment
  522. for segI, segment := range segments {
  523. // check for the closure in meantime
  524. if isClosed(closeCh) {
  525. return 0, nil, seg.ErrClosed
  526. }
  527. segNewDocNums := make([]uint64, segment.numDocs)
  528. dropsI := drops[segI]
  529. // optimize when the field mapping is the same across all
  530. // segments and there are no deletions, via byte-copying
  531. // of stored docs bytes directly to the writer
  532. if fieldsSame && (dropsI == nil || dropsI.GetCardinality() == 0) {
  533. err := segment.copyStoredDocs(newDocNum, docNumOffsets, w)
  534. if err != nil {
  535. return 0, nil, err
  536. }
  537. for i := uint64(0); i < segment.numDocs; i++ {
  538. segNewDocNums[i] = newDocNum
  539. newDocNum++
  540. }
  541. rv = append(rv, segNewDocNums)
  542. continue
  543. }
  544. // for each doc num
  545. for docNum := uint64(0); docNum < segment.numDocs; docNum++ {
  546. // TODO: roaring's API limits docNums to 32-bits?
  547. if dropsI != nil && dropsI.Contains(uint32(docNum)) {
  548. segNewDocNums[docNum] = docDropped
  549. continue
  550. }
  551. segNewDocNums[docNum] = newDocNum
  552. curr = 0
  553. metaBuf.Reset()
  554. data = data[:0]
  555. posTemp := posBuf
  556. // collect all the data
  557. for i := 0; i < len(fieldsInv); i++ {
  558. vals[i] = vals[i][:0]
  559. typs[i] = typs[i][:0]
  560. poss[i] = poss[i][:0]
  561. }
  562. err := segment.visitStoredFields(vdc, docNum, func(field string, typ byte, value []byte, pos []uint64) bool {
  563. fieldID := int(fieldsMap[field]) - 1
  564. vals[fieldID] = append(vals[fieldID], value)
  565. typs[fieldID] = append(typs[fieldID], typ)
  566. // copy array positions to preserve them beyond the scope of this callback
  567. var curPos []uint64
  568. if len(pos) > 0 {
  569. if cap(posTemp) < len(pos) {
  570. posBuf = make([]uint64, len(pos)*len(fieldsInv))
  571. posTemp = posBuf
  572. }
  573. curPos = posTemp[0:len(pos)]
  574. copy(curPos, pos)
  575. posTemp = posTemp[len(pos):]
  576. }
  577. poss[fieldID] = append(poss[fieldID], curPos)
  578. return true
  579. })
  580. if err != nil {
  581. return 0, nil, err
  582. }
  583. // _id field special case optimizes ExternalID() lookups
  584. idFieldVal := vals[uint16(0)][0]
  585. _, err = metaEncode(uint64(len(idFieldVal)))
  586. if err != nil {
  587. return 0, nil, err
  588. }
  589. // now walk the non-"_id" fields in order
  590. for fieldID := 1; fieldID < len(fieldsInv); fieldID++ {
  591. storedFieldValues := vals[fieldID]
  592. stf := typs[fieldID]
  593. spf := poss[fieldID]
  594. var err2 error
  595. curr, data, err2 = persistStoredFieldValues(fieldID,
  596. storedFieldValues, stf, spf, curr, metaEncode, data)
  597. if err2 != nil {
  598. return 0, nil, err2
  599. }
  600. }
  601. metaBytes := metaBuf.Bytes()
  602. compressed = snappy.Encode(compressed[:cap(compressed)], data)
  603. // record where we're about to start writing
  604. docNumOffsets[newDocNum] = uint64(w.Count())
  605. // write out the meta len and compressed data len
  606. _, err = writeUvarints(w,
  607. uint64(len(metaBytes)),
  608. uint64(len(idFieldVal)+len(compressed)))
  609. if err != nil {
  610. return 0, nil, err
  611. }
  612. // now write the meta
  613. _, err = w.Write(metaBytes)
  614. if err != nil {
  615. return 0, nil, err
  616. }
  617. // now write the _id field val (counted as part of the 'compressed' data)
  618. _, err = w.Write(idFieldVal)
  619. if err != nil {
  620. return 0, nil, err
  621. }
  622. // now write the compressed data
  623. _, err = w.Write(compressed)
  624. if err != nil {
  625. return 0, nil, err
  626. }
  627. newDocNum++
  628. }
  629. rv = append(rv, segNewDocNums)
  630. }
  631. // return value is the start of the stored index
  632. storedIndexOffset := uint64(w.Count())
  633. // now write out the stored doc index
  634. for _, docNumOffset := range docNumOffsets {
  635. err := binary.Write(w, binary.BigEndian, docNumOffset)
  636. if err != nil {
  637. return 0, nil, err
  638. }
  639. }
  640. return storedIndexOffset, rv, nil
  641. }
  642. // copyStoredDocs writes out a segment's stored doc info, optimized by
  643. // using a single Write() call for the entire set of bytes. The
  644. // newDocNumOffsets is filled with the new offsets for each doc.
  645. func (s *SegmentBase) copyStoredDocs(newDocNum uint64, newDocNumOffsets []uint64,
  646. w *CountHashWriter) error {
  647. if s.numDocs <= 0 {
  648. return nil
  649. }
  650. indexOffset0, storedOffset0, _, _, _ :=
  651. s.getDocStoredOffsets(0) // the segment's first doc
  652. indexOffsetN, storedOffsetN, readN, metaLenN, dataLenN :=
  653. s.getDocStoredOffsets(s.numDocs - 1) // the segment's last doc
  654. storedOffset0New := uint64(w.Count())
  655. storedBytes := s.mem[storedOffset0 : storedOffsetN+readN+metaLenN+dataLenN]
  656. _, err := w.Write(storedBytes)
  657. if err != nil {
  658. return err
  659. }
  660. // remap the storedOffset's for the docs into new offsets relative
  661. // to storedOffset0New, filling the given docNumOffsetsOut array
  662. for indexOffset := indexOffset0; indexOffset <= indexOffsetN; indexOffset += 8 {
  663. storedOffset := binary.BigEndian.Uint64(s.mem[indexOffset : indexOffset+8])
  664. storedOffsetNew := storedOffset - storedOffset0 + storedOffset0New
  665. newDocNumOffsets[newDocNum] = storedOffsetNew
  666. newDocNum += 1
  667. }
  668. return nil
  669. }
  670. // mergeFields builds a unified list of fields used across all the
  671. // input segments, and computes whether the fields are the same across
  672. // segments (which depends on fields to be sorted in the same way
  673. // across segments)
  674. func mergeFields(segments []*SegmentBase) (bool, []string) {
  675. fieldsSame := true
  676. var segment0Fields []string
  677. if len(segments) > 0 {
  678. segment0Fields = segments[0].Fields()
  679. }
  680. fieldsExist := map[string]struct{}{}
  681. for _, segment := range segments {
  682. fields := segment.Fields()
  683. for fieldi, field := range fields {
  684. fieldsExist[field] = struct{}{}
  685. if len(segment0Fields) != len(fields) || segment0Fields[fieldi] != field {
  686. fieldsSame = false
  687. }
  688. }
  689. }
  690. rv := make([]string, 0, len(fieldsExist))
  691. // ensure _id stays first
  692. rv = append(rv, "_id")
  693. for k := range fieldsExist {
  694. if k != "_id" {
  695. rv = append(rv, k)
  696. }
  697. }
  698. sort.Strings(rv[1:]) // leave _id as first
  699. return fieldsSame, rv
  700. }
  701. func isClosed(closeCh chan struct{}) bool {
  702. select {
  703. case <-closeCh:
  704. return true
  705. default:
  706. return false
  707. }
  708. }