hsync.go 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658
  1. // Copyright © 2015 Pierre Neidhardt <ambrevar at gmail dot com>
  2. // Use of this file is governed by the license that can be found in LICENSE.
  3. /*
  4. A filesystem hierarchy synchronizer
  5. Rename files in TARGET so that identical files found in SOURCE and TARGET have
  6. the same relative path.
  7. The main goal of the program is to make folders synchronization faster by
  8. sparing big file transfers when a simple rename suffices. It complements other
  9. synchronization programs that lack this capability.
  10. See http://ambrevar.bitbucket.org/hsync and 'hsync -h' for more details.
  11. Usage:
  12. hsync [OPTIONS] SOURCE TARGET
  13. For usage options, see:
  14. hsync -h
  15. Implementation details
  16. We store the file entries in the following structure:
  17. entries := map[partialHash struct{size int64, pos int64, hash string}]fileMatch struct{
  18. sourceID *fileID{path string, h hash.Hash},
  19. targetID *fileID{path string, h hash.Hash}
  20. }
  21. This 'entries' map indexes the possible file matches by content ('partialHash').
  22. A file match references the paths that will be used to rename the file in TARGET
  23. from 'oldpath' to 'newpath'. Note that 'newpath' is given by 'sourceID.path',
  24. and 'oldpath' by 'targetID.path'.
  25. The algorithm is centered around one main optimization: rolling-checksums. We
  26. assume that two files match if they have the same partial hash.
  27. The initial partial hash is just the size with an empty hash. This speeds up the
  28. process since this saves an open/close of the file. We just need a 'stat'. Files
  29. will not be read unless a rolling-checksum is required. As a consequence,
  30. unreadable files with a unique size will be stored in 'entries', while
  31. unreadable conflicting files will be discarded. Note that the system allows to
  32. rename files that cannot be read.
  33. One checksum roll increments 'pos' and updates the hash by hashing the next
  34. BLOCKSIZE bytes of the file. BLOCKSIZE is set to a value that is commonly
  35. believed to be optimal in most cases. The optimal value would be the device
  36. blocksize where the file resides. It would be more complex and memory consuming
  37. to query this value for each file.
  38. We choose md5 (128 bits) as the checksum algorithm. Adler32, CRC-32 and CRC-64
  39. are only a tiny little faster while suffering from more clashes. This choice
  40. should be backed up with a proper benchmark.
  41. A conflict arises when two files in either SOURCE or TARGET have the same
  42. partial hash. We solve the conflict by updating the partial hashes until they
  43. differ. If the partial hashes cannot be updated any further (i.e. we reached
  44. end-of-file), it means that the files are duplicates.
  45. Notes:
  46. - Partial hashes of conflicting files will be complete at the same roll since
  47. they have the same size.
  48. - When a partial hash is complete, we have the following relation:
  49. (pos-1)*BLOCKSIZE < filesize <= pos*BLOCKSIZE
  50. - There is only one possible conflicting file at a time.
  51. A file match may be erroneous if the partial hash is not complete. The most
  52. obvious case is when two different files are the only ones of size N in SOURCE
  53. and TARGET. This down-side is a consequence of the design choice, i.e. focus on
  54. speed. Erroneous matches can be corrected in the preview file. If we wanted no
  55. ambiguity, we would have to compute the full hashes and this would take
  56. approximately as much time as copying files from SOURCE to TARGET, like a
  57. regular synchronization tool would do.
  58. We store the digest 'hash.Hash' together with the file path for when we update a
  59. partial hash.
  60. Process:
  61. 1. We walk SOURCE completely. Only regular files are processed. The 'sourceID'
  62. are stored. If two entries conflict (they have the same partial hash), we
  63. compute update the partial hashes until they do not conflict anymore. If the
  64. conflict is not resolvable, i.e. the partial hash is complete and files are
  65. identical, we drop both files from 'entries'.
  66. Future files can have the same partial hash that led to a former conflict. To
  67. distinguish the content from former conflicts when adding a new file, we must
  68. compute the partial hash up to the 'pos' of the last conflict (the number of
  69. checksum rolls). To keep track of this 'pos' when there is a conflict, we mark
  70. all computed partial hash as dummy values. When the next entry will be added, we
  71. will have to compute the partial hash until it does not match a dummy value in
  72. 'entries'.
  73. Duplicates are not processed but display a warning. Usually the user does not
  74. want duplicates, so she is better off fixing them before processing with the
  75. renames. It would add a lot of complexity to handle duplicates properly.
  76. 2. We walk TARGET completely. We skip all dummies as source the SOURCE walk.
  77. We need to analyze SOURCE completely before we can check for matches.
  78. - If there are only dummy entries, there was an unsolvable conflict in SOURCE.
  79. We drop the file.
  80. - If we end on a non-empty entry with an 'unsolvable' targetID, it means that an
  81. unsolvable conflict with target files happened with this partial hash. This is
  82. only possible at end-of-file. We drop the file.
  83. - If we end on an empty entry, there is no match with SOURCE and we drop the
  84. file.
  85. - If we end on a non-empty entry without previous matches, we store the
  86. match.
  87. - Else we end on a non-empty entry with one match already present. This is a
  88. conflict. We solve the conflict as for the SOURCE walk except that we need to
  89. update the partial hashes of three files: the SOURCE file, the first TARGET
  90. match and the new TARGET match.
  91. 3. We generate the 'renameOps' and 'reverseOps' maps. They map 'oldpath' to
  92. 'newpath' and 'newpath' to 'oldpath' respectively. We drop entries where
  93. 'oldpath==newpath' to spare a lot of noise.
  94. Note that file names are not used to compute a match since they could be
  95. identical while the content would be different.
  96. 4. We proceed with the renames. Chains and cycles may occur.
  97. - Example of a chain of renames: a->b, b->c, c->d.
  98. - Example of a cycle of renames: a->b, b->c, c->a.
  99. TARGET must be fully analyzed before proceeding with the renames so that we can
  100. detect chains.
  101. We always traverse chains until we reach the end, then rename the elements while
  102. going backward till the beginning. The beginning can be before the entry point.
  103. 'reverseOps' is used for going backward.
  104. When a cycle is detected, we break it by renaming one file to a temporary name.
  105. Then we process everything as for a chain. Finally we rename the temporary file
  106. to its original newpath.
  107. */
  108. package main
  109. import (
  110. "crypto/md5"
  111. "encoding/json"
  112. "flag"
  113. "fmt"
  114. "hash"
  115. "io"
  116. "io/ioutil"
  117. "log"
  118. "os"
  119. "path/filepath"
  120. )
  121. const (
  122. APPLICATION = "hsync"
  123. VERSION = "1.1"
  124. COPYRIGHT = "Copyright (C) 2015 Pierre Neidhardt"
  125. BLOCKSIZE = 4096
  126. SEPARATOR = string(os.PathSeparator)
  127. )
  128. var usage = `Filesystem hierarchy synchronizer
  129. Rename files in TARGET so that identical files found in SOURCE and TARGET have
  130. the same relative path.
  131. The main goal of the program is to make folders synchronization faster by
  132. sparing big file transfers when a simple rename suffices. It complements other
  133. synchronization programs that lack this capability.
  134. By default, files are not renamed and a preview is printed to standard output.
  135. False positives can happen, e.g. if two different files in SOURCE and TARGET are
  136. the only ones of this size. Use the preview to spot false positives and make sure
  137. all files get renamed properly.
  138. You can redirect the preview to a file. If you run the program using this
  139. preview file as SOURCE, the analysis will be skipped. This is useful if you want
  140. to tweak the result of the analysis.
  141. Notes:
  142. - Duplicate files in either folder are skipped.
  143. - Only regular files are processed. In particular, empty folders and symbolic
  144. links are ignored.
  145. `
  146. // We attach a hash digest to the path so that we can update partial hashes with
  147. // the rolling-checksum function.
  148. type fileID struct {
  149. path string
  150. h hash.Hash
  151. }
  152. var unsolvable = fileID{path: SEPARATOR}
  153. // A fileMatch stores 2 fileID with matching content. A match can be partial and
  154. // further processing can disprove it.
  155. // - If 'sourceID==nil', this is a dummy match. It means that a file of the same
  156. // size with a longer partialHash has been processed.
  157. // - If 'targetID==nil', a match is yet to be found.
  158. // - If 'targetID==&unsolvable', several TARGET files conflict together for this
  159. // SOURCE file, the entry should be skipped.
  160. type fileMatch struct {
  161. sourceID *fileID
  162. targetID *fileID
  163. }
  164. // partialHash is used as a key to identify the file content.
  165. // The 'size' field should always be set, however the 'pos' and 'hash' fields
  166. // are computed only when required. No hash has been computed when 'pos==0'.
  167. type partialHash struct {
  168. size int64
  169. pos int64
  170. hash string
  171. }
  172. // rollingChecksum returns io.EOF on last roll.
  173. // The caller needs not open `file`; it needs to close it however. This manual
  174. // management avoids having to open and close the file repeatedly.
  175. func rollingChecksum(fid *fileID, key *partialHash, file **os.File) (err error) {
  176. if *file == nil {
  177. *file, err = os.Open(fid.path)
  178. if err != nil {
  179. return
  180. }
  181. }
  182. buf := [BLOCKSIZE]byte{}
  183. n, err := (*file).ReadAt(buf[:], key.pos*BLOCKSIZE)
  184. if err != nil && err != io.EOF {
  185. return
  186. }
  187. fid.h.Write(buf[:n])
  188. key.pos++
  189. key.hash = string(fid.h.Sum(nil))
  190. return
  191. }
  192. func newFileEntry(path string, size int64) (fileID, partialHash) {
  193. return fileID{path: path, h: md5.New()}, partialHash{size: size}
  194. }
  195. func visitSource(root string, entries map[partialHash]fileMatch) {
  196. // Change folder to 'root' so that 'root' does not get stored in fileID.path.
  197. oldroot, err := os.Getwd()
  198. if err != nil {
  199. log.Fatal(err)
  200. }
  201. err = os.Chdir(root)
  202. if err != nil {
  203. log.Fatal(err)
  204. }
  205. defer os.Chdir(oldroot)
  206. visitor := func(input string, info os.FileInfo, ignored error) error {
  207. if !info.Mode().IsRegular() {
  208. return nil
  209. }
  210. inputID, inputKey := newFileEntry(input, info.Size())
  211. var err error
  212. var inputFile, conflictFile *os.File
  213. defer func() {
  214. if inputFile != nil {
  215. inputFile.Close()
  216. }
  217. }()
  218. defer func() {
  219. if conflictFile != nil {
  220. conflictFile.Close()
  221. }
  222. }()
  223. // Skip dummy matches.
  224. v, ok := entries[inputKey]
  225. for ok && v.sourceID == nil && err != io.EOF {
  226. err = rollingChecksum(&inputID, &inputKey, &inputFile)
  227. if err != nil && err != io.EOF {
  228. log.Println(err)
  229. return nil
  230. }
  231. v, ok = entries[inputKey]
  232. }
  233. if ok && v.sourceID == nil {
  234. log.Printf("Source duplicate: %v (%x)\n", inputID.path, inputKey.hash)
  235. return nil
  236. } else if !ok {
  237. entries[inputKey] = fileMatch{sourceID: &inputID}
  238. return nil
  239. }
  240. // Else there is a conflict.
  241. conflictKey := inputKey
  242. conflictID := entries[inputKey].sourceID
  243. for inputKey == conflictKey && err == nil {
  244. // Set dummy value to mark the key as visited for future files.
  245. entries[inputKey] = fileMatch{}
  246. err = rollingChecksum(&inputID, &inputKey, &inputFile)
  247. if err != nil && err != io.EOF {
  248. // Read error. Drop input.
  249. log.Println(err)
  250. return nil
  251. }
  252. err = rollingChecksum(conflictID, &conflictKey, &conflictFile)
  253. if err != nil && err != io.EOF {
  254. // Read error. We will replace conflict with input.
  255. log.Println(err)
  256. break
  257. }
  258. }
  259. if inputKey == conflictKey && err == io.EOF {
  260. entries[inputKey] = fileMatch{}
  261. log.Printf("Source duplicate: %v (%x)\n", inputID.path, inputKey.hash)
  262. log.Printf("Source duplicate: %v (%x)\n", conflictID.path, conflictKey.hash)
  263. } else {
  264. // Resolved conflict.
  265. entries[inputKey] = fileMatch{sourceID: &inputID}
  266. if err == nil || err == io.EOF {
  267. // Re-add conflicting file except on read error.
  268. entries[conflictKey] = fileMatch{sourceID: conflictID}
  269. }
  270. }
  271. return nil
  272. }
  273. // Since we do not stop on read errors while walking, the returned error is
  274. // always nil.
  275. filepath.Walk(".", visitor)
  276. }
  277. // See comments in visitSource.
  278. func visitTarget(root, sourceRoot string, entries map[partialHash]fileMatch) {
  279. oldroot, err := os.Getwd()
  280. if err != nil {
  281. log.Fatal(err)
  282. }
  283. err = os.Chdir(root)
  284. if err != nil {
  285. log.Fatal(err)
  286. }
  287. defer os.Chdir(oldroot)
  288. visitor := func(input string, info os.FileInfo, ignored error) error {
  289. if !info.Mode().IsRegular() {
  290. return nil
  291. }
  292. inputID, inputKey := newFileEntry(input, info.Size())
  293. var err error
  294. var inputFile, conflictFile, sourceFile *os.File
  295. defer func() {
  296. if inputFile != nil {
  297. inputFile.Close()
  298. }
  299. }()
  300. defer func() {
  301. if conflictFile != nil {
  302. conflictFile.Close()
  303. }
  304. }()
  305. defer func() {
  306. if sourceFile != nil {
  307. sourceFile.Close()
  308. }
  309. }()
  310. // Skip dummy matches.
  311. v, ok := entries[inputKey]
  312. for ok && v.sourceID == nil && err != io.EOF {
  313. err = rollingChecksum(&inputID, &inputKey, &inputFile)
  314. if err != nil && err != io.EOF {
  315. log.Println(err)
  316. return nil
  317. }
  318. v, ok = entries[inputKey]
  319. }
  320. if ok && v.sourceID == nil {
  321. log.Printf("Target duplicate match: %v (%x)\n", inputID.path, inputKey.hash)
  322. return nil
  323. } else if ok && v.targetID != nil && v.targetID == &unsolvable {
  324. // Unresolved conflict happened previously.
  325. log.Printf("Target duplicate: %v (%x), source match: %v\n", inputID.path, inputKey.hash, v.sourceID.path)
  326. return nil
  327. } else if !ok {
  328. // No matching file in source.
  329. return nil
  330. } else if v.targetID == nil {
  331. // First match.
  332. entries[inputKey] = fileMatch{sourceID: entries[inputKey].sourceID, targetID: &inputID}
  333. return nil
  334. }
  335. // Else there is a conflict.
  336. sourceKey := inputKey
  337. sourceID := entries[inputKey].sourceID
  338. conflictKey := inputKey
  339. conflictID := entries[inputKey].targetID
  340. for inputKey == conflictKey && inputKey == sourceKey && err == nil {
  341. // Set dummy value to mark the key as visited for future files.
  342. entries[inputKey] = fileMatch{}
  343. // Since we change folders, we don't have to store the root in fileID, nor
  344. // we have to compute sourceRoot's realpath to open the file from this
  345. // point.
  346. os.Chdir(oldroot)
  347. os.Chdir(sourceRoot)
  348. err = rollingChecksum(sourceID, &sourceKey, &sourceFile)
  349. os.Chdir(oldroot)
  350. os.Chdir(root)
  351. if err != nil && err != io.EOF {
  352. // Read error. Drop all entries.
  353. log.Println(err)
  354. return nil
  355. }
  356. err = rollingChecksum(&inputID, &inputKey, &inputFile)
  357. inputErr := err
  358. if err != nil && err != io.EOF {
  359. // Read error. Drop input.
  360. log.Println(err)
  361. // We don't break now as there is still a chance that the conflicting
  362. // file matches the source.
  363. }
  364. err = rollingChecksum(conflictID, &conflictKey, &conflictFile)
  365. if err != nil && err != io.EOF {
  366. // Read error. We will replace conflict with input if the latter has
  367. // been read correctly.
  368. log.Println(err)
  369. break
  370. }
  371. if inputErr != nil && inputErr != io.EOF {
  372. break
  373. }
  374. }
  375. if inputKey == sourceKey && inputKey == conflictKey && err == io.EOF {
  376. log.Printf("Target duplicate: %v (%x), source match: %v\n", inputID.path, inputKey.hash, v.sourceID.path)
  377. log.Printf("Target duplicate: %v (%x), source match: %v\n", conflictID.path, conflictKey.hash, v.sourceID.path)
  378. // We mark the source file with an unresolved conflict for future target files.
  379. entries[sourceKey] = fileMatch{sourceID: sourceID, targetID: &unsolvable}
  380. } else if inputKey == sourceKey && inputKey != conflictKey {
  381. // Resolution: drop conflicting entry.
  382. entries[sourceKey] = fileMatch{sourceID: sourceID, targetID: &inputID}
  383. } else if conflictKey == sourceKey && conflictKey != inputKey {
  384. // Resolution: drop input entry.
  385. entries[sourceKey] = fileMatch{sourceID: sourceID, targetID: conflictID}
  386. } else if conflictKey != sourceKey && inputKey != sourceKey {
  387. // Resolution: drop both entries.
  388. entries[sourceKey] = fileMatch{sourceID: sourceID}
  389. }
  390. // Else we drop all entries.
  391. return nil
  392. }
  393. filepath.Walk(".", visitor)
  394. }
  395. // Rename files as specified in renameOps.
  396. // Chains and cycles may occur. See the implementation details.
  397. func processRenames(root string, renameOps, reverseOps map[string]string, clobber bool) {
  398. // Change folder since the renames are made relatively to 'root'.
  399. oldroot, err := os.Getwd()
  400. if err != nil {
  401. log.Fatal(err)
  402. }
  403. err = os.Chdir(root)
  404. if err != nil {
  405. log.Fatal(err)
  406. }
  407. defer os.Chdir(oldroot)
  408. for oldpath, newpath := range renameOps {
  409. if oldpath == newpath {
  410. continue
  411. }
  412. cycleMarker := oldpath
  413. // Go forward to the end of the chain or the cycle.
  414. for newpath != cycleMarker {
  415. _, ok := renameOps[newpath]
  416. if !ok {
  417. break
  418. }
  419. oldpath = newpath
  420. newpath = renameOps[newpath]
  421. }
  422. // Break the cycle if any.
  423. tmp := ""
  424. if cycleMarker == newpath {
  425. f, err := ioutil.TempFile(".", APPLICATION)
  426. if err != nil {
  427. log.Fatal(err)
  428. }
  429. newpath = f.Name()
  430. tmp = newpath
  431. f.Close()
  432. delete(reverseOps, cycleMarker)
  433. }
  434. // Process the chain of renames. Renaming can still fail, in which case we
  435. // output the error and go on with the chain.
  436. for oldpath != "" {
  437. err = os.MkdirAll(filepath.Dir(newpath), 0777)
  438. if err != nil {
  439. log.Println(err)
  440. } else {
  441. var err error
  442. if clobber {
  443. err = os.Rename(oldpath, newpath)
  444. if err != nil {
  445. log.Println(err)
  446. }
  447. } else {
  448. err = os.Link(oldpath, newpath)
  449. if err != nil {
  450. log.Println(err)
  451. } else {
  452. err = os.Remove(oldpath)
  453. if err != nil {
  454. log.Println(err)
  455. }
  456. }
  457. }
  458. if err == nil {
  459. log.Printf("Rename '%v' -> '%v'", oldpath, newpath)
  460. }
  461. }
  462. // Go backward.
  463. delete(renameOps, oldpath)
  464. newpath = oldpath
  465. oldpath = reverseOps[oldpath]
  466. }
  467. // Restore the cycle if needs be.
  468. if tmp != "" {
  469. err = os.Rename(tmp, cycleMarker)
  470. if err != nil {
  471. log.Println(err)
  472. } else {
  473. log.Printf("Rename '%v' -> '%v'", tmp, cycleMarker)
  474. }
  475. }
  476. }
  477. }
  478. func init() {
  479. log.SetFlags(0)
  480. }
  481. func main() {
  482. flag.Usage = func() {
  483. fmt.Fprintf(os.Stderr, "Usage: %v SOURCE TARGET\n\n", os.Args[0])
  484. fmt.Fprintln(os.Stderr, usage)
  485. fmt.Fprintln(os.Stderr, "Options:")
  486. flag.PrintDefaults()
  487. }
  488. var flagClobber = flag.Bool("f", false, "Overwrite existing files in TARGETS.")
  489. var flagProcess = flag.Bool("p", false, "Rename the files in TARGETS.")
  490. var flagVersion = flag.Bool("v", false, "Print version and exit.")
  491. flag.Parse()
  492. if *flagVersion {
  493. fmt.Println(APPLICATION, VERSION, COPYRIGHT)
  494. return
  495. }
  496. if flag.Arg(0) == "" || flag.Arg(1) == "" {
  497. flag.Usage()
  498. return
  499. }
  500. renameOps := make(map[string]string)
  501. reverseOps := make(map[string]string)
  502. s, err := os.Stat(flag.Arg(0))
  503. if err != nil {
  504. log.Fatal(err)
  505. }
  506. if s.IsDir() {
  507. entries := make(map[partialHash]fileMatch)
  508. log.Printf(":: Analyzing '%v'", flag.Arg(0))
  509. visitSource(flag.Arg(0), entries)
  510. log.Printf(":: Analyzing '%v'", flag.Arg(1))
  511. visitTarget(flag.Arg(1), flag.Arg(0), entries)
  512. for _, v := range entries {
  513. if v.targetID != nil && v.targetID != &unsolvable && v.targetID.path != v.sourceID.path {
  514. renameOps[v.targetID.path] = v.sourceID.path
  515. reverseOps[v.sourceID.path] = v.targetID.path
  516. }
  517. }
  518. } else {
  519. buf, err := ioutil.ReadFile(flag.Arg(0))
  520. if err != nil {
  521. log.Fatal(err)
  522. }
  523. err = json.Unmarshal(buf, &renameOps)
  524. if err != nil {
  525. log.Fatal(err)
  526. }
  527. for oldpath, newpath := range renameOps {
  528. if oldpath == newpath {
  529. delete(renameOps, oldpath)
  530. continue
  531. }
  532. _, err := os.Stat(flag.Arg(1) + SEPARATOR + oldpath)
  533. if err != nil && os.IsNotExist(err) {
  534. // Remove non-existing entries.
  535. delete(renameOps, oldpath)
  536. continue
  537. }
  538. reverseOps[newpath] = oldpath
  539. }
  540. }
  541. if *flagProcess {
  542. log.Println(":: Processing renames")
  543. processRenames(flag.Arg(1), renameOps, reverseOps, *flagClobber)
  544. } else {
  545. log.Println(":: Previewing renames")
  546. // There should be no error.
  547. buf, _ := json.MarshalIndent(renameOps, "", "\t")
  548. os.Stdout.Write(buf)
  549. fmt.Println()
  550. }
  551. }