|
@@ -177,6 +177,17 @@ def view(i):
|
|
|
# The help thing
|
|
|
center("--- for comment commands list type 'help' --- ")
|
|
|
|
|
|
+ # List of commands for autocomplete feature.
|
|
|
+ complete([
|
|
|
+ "help",
|
|
|
+ "read",
|
|
|
+ "channel",
|
|
|
+ "comments",
|
|
|
+ "reply",
|
|
|
+ "delete",
|
|
|
+ "edit"
|
|
|
+ ])
|
|
|
+
|
|
|
# let's implement commands
|
|
|
while True:
|
|
|
c = input(typing_dots())
|
|
@@ -204,6 +215,19 @@ def view(i):
|
|
|
elif c.startswith("reply"):
|
|
|
c = c + ' '
|
|
|
post(i["claim_id"], c[c.find(" "):], i["comment_id"])
|
|
|
+ elif c == "delete":
|
|
|
+ out = check_output(["flbry/lbrynet", "comment", "abandon", i["comment_id"]])
|
|
|
+ out = json.loads(out)
|
|
|
+ try:
|
|
|
+ if out["abandoned"] == True:
|
|
|
+ center("Comment deleted!", "bdgr")
|
|
|
+ break
|
|
|
+ except:
|
|
|
+ if out["message"].startswith("Couldn't find channel with channel_id"):
|
|
|
+ center("You can't delete a comment you didn't post", "bdrd")
|
|
|
+ elif c.startswith("edit"):
|
|
|
+ c = c + ' '
|
|
|
+ update(i, c[c.find(" "):])
|
|
|
|
|
|
|
|
|
def post(claim_id, args, parent_id=""):
|
|
@@ -240,6 +264,7 @@ def post(claim_id, args, parent_id=""):
|
|
|
os.system(a+" /tmp/fastlbrycommentwriter.txt")
|
|
|
|
|
|
center("Press Enter when the file is ready and saved.")
|
|
|
+ input()
|
|
|
|
|
|
text = open("/tmp/fastlbrycommentwriter.txt", "r")
|
|
|
text = text.read()
|
|
@@ -271,6 +296,288 @@ def post(claim_id, args, parent_id=""):
|
|
|
|
|
|
out = json.loads(out)
|
|
|
if "message" in out:
|
|
|
- print(" "+clr["bbrd"]+clr["bold"]+wdth(out["message"], 89)+clr["norm"])
|
|
|
+ print("ERROR! "+out["message"], "bdrf")
|
|
|
+ else:
|
|
|
+ center("Comment is sent.", "bdgr")
|
|
|
+
|
|
|
+def inbox(opt=10):
|
|
|
+
|
|
|
+ # This function will return the latest comments from the latest
|
|
|
+ # publications. Similar to an email inbox. But with a limitation.
|
|
|
+
|
|
|
+ # There is no system in the SDK to implement a history of comments
|
|
|
+ # seamlessly. So then I need to cash a large file of comments. Or
|
|
|
+ # do something clever. I think there will be a few options.
|
|
|
+
|
|
|
+ # You noticed the opt=10 preset on the top. It's the default value.
|
|
|
+ # Basically the user might type one of 4 things.
|
|
|
+
|
|
|
+ # inbox
|
|
|
+ # inbox 40 (or any number what so ever)
|
|
|
+ # inbox all
|
|
|
+ # inbox cashed
|
|
|
+
|
|
|
+ # Each will run a slightly different algorithm to get the inbox
|
|
|
+ # comments.
|
|
|
+
|
|
|
+ # inbox
|
|
|
+
|
|
|
+ # This will use the predefined 10 and read last 10 publications
|
|
|
+ # comments to add. It will combine them with the pre-cashed ones
|
|
|
+ # for the user to view. As you may imagine, giving it a number as
|
|
|
+ # in:
|
|
|
+
|
|
|
+ # inbox 40
|
|
|
+ # inbox 2
|
|
|
+ # inbox 50
|
|
|
+
|
|
|
+ # Will load this number of publications. To update with them the
|
|
|
+ # cash and then present it to the user.
|
|
|
+
|
|
|
+ # inbox all
|
|
|
+
|
|
|
+ # This one will take longest. But might be useful for some users.
|
|
|
+ # This will go through all publications and cash comments from all
|
|
|
+ # of them.
|
|
|
+
|
|
|
+ # inbox cashed
|
|
|
+
|
|
|
+ # This one is the fastest of them. It will only read the cash file
|
|
|
+ # and present it to the user. So for instance you want to quickly
|
|
|
+ # go back to the inbox without loading anything at all.
|
|
|
+
|
|
|
+
|
|
|
+ try:
|
|
|
+ opt = int(opt)
|
|
|
+ reached = opt
|
|
|
+ goal = opt
|
|
|
+ except:
|
|
|
+ goal = 0
|
|
|
+ if opt == "all":
|
|
|
+ reached = True
|
|
|
+ else:
|
|
|
+ reached = False
|
|
|
+
|
|
|
+ # Updating the cash file ( inbox.json )
|
|
|
+ page = 0
|
|
|
+ items_total = 0
|
|
|
+ current_item = 0
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+ try:
|
|
|
+ with open('inbox.json') as json_file:
|
|
|
+ comments_cache = json.load(json_file)
|
|
|
+ except:
|
|
|
+ comments_cache = []
|
|
|
+
|
|
|
+ checked_publications = []
|
|
|
+
|
|
|
+ while reached > 0:
|
|
|
+
|
|
|
+ if type(reached) == int:
|
|
|
+ reached = reached - 50
|
|
|
+
|
|
|
+ page = page + 1
|
|
|
+ page_size = 50
|
|
|
+
|
|
|
+ # Getting data about publications.
|
|
|
+
|
|
|
+
|
|
|
+ if page != 1:
|
|
|
+ out = check_output(["flbry/lbrynet",
|
|
|
+ "stream", "list",
|
|
|
+ '--page='+str(page),
|
|
|
+ '--page_size='+str(page_size),
|
|
|
+ "--no_totals"])
|
|
|
+ else:
|
|
|
+ out = check_output(["flbry/lbrynet",
|
|
|
+ "stream", "list",
|
|
|
+ '--page='+str(page),
|
|
|
+ '--page_size='+str(page_size)])
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+ # Now we want to parse the json
|
|
|
+ items = []
|
|
|
+ try:
|
|
|
+ out = json.loads(out)
|
|
|
+ items = out["items"]
|
|
|
+ except:
|
|
|
+ break
|
|
|
+
|
|
|
+ if not items:
|
|
|
+ break
|
|
|
+
|
|
|
+ if page == 1:
|
|
|
+ # Getting Totals to calculate the progress bar
|
|
|
+ if reached == True:
|
|
|
+ items_total = out["total_items"]
|
|
|
+ else:
|
|
|
+ try:
|
|
|
+ items_total = int(opt)
|
|
|
+ except:
|
|
|
+ items_total = 0
|
|
|
+
|
|
|
+ # Reading items from the items
|
|
|
+
|
|
|
+ for publication in items:
|
|
|
+
|
|
|
+ # skip dublicate publications. ( like when you edited
|
|
|
+ # a publication )
|
|
|
+ if publication["name"] in checked_publications:
|
|
|
+ continue
|
|
|
+ checked_publications.append(publication["name"])
|
|
|
+
|
|
|
+ current_item = current_item + 1
|
|
|
+
|
|
|
+ # If above the requested amount.
|
|
|
+ if current_item > items_total:
|
|
|
+ break
|
|
|
+
|
|
|
+ # Draw progress bar
|
|
|
+ progress_bar(current_item, items_total, publication["name"])
|
|
|
+
|
|
|
+ # let's now get all the comments
|
|
|
+ claim_id = publication["claim_id"]
|
|
|
+
|
|
|
+ comment_page = 0
|
|
|
+
|
|
|
+ while True:
|
|
|
+
|
|
|
+ comment_page = comment_page + 1
|
|
|
+
|
|
|
+ cout = check_output(["flbry/lbrynet",
|
|
|
+ "comment", "list", '--claim_id='+claim_id,
|
|
|
+ '--page='+str(comment_page),
|
|
|
+ '--page_size='+str(50),
|
|
|
+ '--include_replies'])
|
|
|
+
|
|
|
+ try:
|
|
|
+ cout = json.loads(cout)
|
|
|
+ except:
|
|
|
+ break
|
|
|
+
|
|
|
+ if "items" not in cout:
|
|
|
+ break
|
|
|
+ for i in cout["items"]:
|
|
|
+
|
|
|
+ # I want to add a few things into the comment data
|
|
|
+ i["publication_url"] = publication["permanent_url"]
|
|
|
+ i["publication_name"] = publication["name"]
|
|
|
+ try:
|
|
|
+ i["publication_title"] = publication["value"]["title"]
|
|
|
+ except:
|
|
|
+ i["publication_title"] = publication["name"]
|
|
|
+
|
|
|
+ if i not in comments_cache:
|
|
|
+ comments_cache.append(i)
|
|
|
+
|
|
|
+
|
|
|
+ print()
|
|
|
+
|
|
|
+ # Let's sort the comments based on the time they were sent
|
|
|
+ comments_cache = sorted(comments_cache, key=lambda k: k['timestamp'], reverse=True)
|
|
|
+
|
|
|
+ with open('inbox.json', 'w') as fp:
|
|
|
+ json.dump(comments_cache, fp , indent=4)
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+ # Now that we have comments cached and ready. I can start actually showing
|
|
|
+ # them.
|
|
|
+
|
|
|
+ w, h = tsize()
|
|
|
+ page_size = (h-5)
|
|
|
+ page = 0
|
|
|
+
|
|
|
+ while True:
|
|
|
+
|
|
|
+ d = {"categories":["Tip LBC", "Comments", "Publication", "Channel", "Preview"],
|
|
|
+ "size":[1,1,4,2,4],
|
|
|
+ "data":[]}
|
|
|
+
|
|
|
+ items = []
|
|
|
+
|
|
|
+ for n, i in enumerate(comments_cache):
|
|
|
+
|
|
|
+ startfrom = int( page * page_size )
|
|
|
+ endat = int( startfrom + page_size )
|
|
|
+
|
|
|
+ if n in range(startfrom, endat):
|
|
|
+
|
|
|
+ items.append(i)
|
|
|
+
|
|
|
+ preview = "---!Failed Loading comment---"
|
|
|
+ support = 0
|
|
|
+ replies = 0
|
|
|
+ where = "[some publication]"
|
|
|
+ bywho = "@Anonymous"
|
|
|
+
|
|
|
+ try:
|
|
|
+ comment = i["comment"]
|
|
|
+ preview = comment.replace("\n", " ")
|
|
|
+ where = i["publication_title"]
|
|
|
+ support = i["support_amount"]
|
|
|
+ bywho = i["channel_name"]
|
|
|
+ replies = i["replies"]
|
|
|
+
|
|
|
+ except:
|
|
|
+ pass
|
|
|
+
|
|
|
+ d["data"].append([support, replies, where, bywho, preview])
|
|
|
+ table(d)
|
|
|
+
|
|
|
+ # Tell the user that he might want to load more
|
|
|
+ center("---type 'more' to load more---")
|
|
|
+
|
|
|
+
|
|
|
+ # Making sure that we stop every time a new page is reached
|
|
|
+ c = input(typing_dots())
|
|
|
+ if c == "more":
|
|
|
+ page = page +1
|
|
|
+ continue
|
|
|
+
|
|
|
+ try:
|
|
|
+ c = int(c)
|
|
|
+ except:
|
|
|
+ return
|
|
|
+
|
|
|
+ view(items[c])
|
|
|
+ c = input(typing_dots())
|
|
|
+
|
|
|
+def update(i, args):
|
|
|
+ comment = i["comment"]
|
|
|
+
|
|
|
+ if len(args) > 1:
|
|
|
+ a = args.split()[0]
|
|
|
+
|
|
|
+ try:
|
|
|
+ text = open(a, "r")
|
|
|
+ text = text.read()
|
|
|
+ except:
|
|
|
+ text = open("/tmp/fastlbrycommentwriter.txt", "w")
|
|
|
+ text.write(comment)
|
|
|
+ text.close()
|
|
|
+
|
|
|
+ import os
|
|
|
+ os.system(a+" /tmp/fastlbrycommentwriter.txt")
|
|
|
+
|
|
|
+ center("Press Enter when the file is ready and saved.")
|
|
|
+ input()
|
|
|
+
|
|
|
+ text = open("/tmp/fastlbrycommentwriter.txt", "r")
|
|
|
+ text = text.read()
|
|
|
else:
|
|
|
- print(" "+clr["bdgr"]+clr["bold"]+wdth("Comment is sent.", 89)+clr["norm"])
|
|
|
+ print("Comment: "+comment)
|
|
|
+ text = input("Edited comment: ")
|
|
|
+
|
|
|
+ out = check_output(["flbry/lbrynet",
|
|
|
+ "comment", "update", "--comment_id="+i["comment_id"],
|
|
|
+ "--comment="+text])
|
|
|
+ out = json.loads(out)
|
|
|
+ try:
|
|
|
+ if out["message"].startswith("Couldn't find channel with channel_id"):
|
|
|
+ center("You cant' edit a comment that isn't yours", "bdrd")
|
|
|
+ except:
|
|
|
+ center("Comment edited!", "bdgr")
|