diff options
| author | auric <auric7@protonmail.com> | 2025-09-08 21:27:55 -0500 |
|---|---|---|
| committer | auric <auric7@protonmail.com> | 2025-09-08 21:27:55 -0500 |
| commit | de07b49b3249da05605f8c802b991a8588ab63b3 (patch) | |
| tree | 295d6003f08dfce1d1b779892ce9c0be97d426a3 /archive/st/patch/copyurl.c | |
| parent | 04cfeeb799b4ee6ac990e5d6e1b5302251133d77 (diff) | |
| parent | e61da07522a060da98fa3a56db3d0360469b26cf (diff) | |
Resolved conflict, indulging new file layout
Diffstat (limited to 'archive/st/patch/copyurl.c')
| -rw-r--r-- | archive/st/patch/copyurl.c | 180 |
1 files changed, 180 insertions, 0 deletions
diff --git a/archive/st/patch/copyurl.c b/archive/st/patch/copyurl.c new file mode 100644 index 0000000..90d96db --- /dev/null +++ b/archive/st/patch/copyurl.c @@ -0,0 +1,180 @@ +#if COPYURL_HIGHLIGHT_SELECTED_URLS_PATCH +void +tsetcolor( int row, int start, int end, uint32_t fg, uint32_t bg ) +{ + int i = start; + for( ; i < end; ++i ) + { + term.line[row][i].fg = fg; + term.line[row][i].bg = bg; + } +} + +char * +findlastany(char *str, const char** find, size_t len) +{ + char* found = NULL; + int i = 0; + for(found = str + strlen(str) - 1; found >= str; --found) { + for(i = 0; i < len; i++) { + if(strncmp(found, find[i], strlen(find[i])) == 0) { + return found; + } + } + } + + return NULL; +} + +/* +** Select and copy the previous url on screen (do nothing if there's no url). +** +** FIXME: doesn't handle urls that span multiple lines; will need to add support +** for multiline "getsel()" first +*/ +void +copyurl(const Arg *arg) { + /* () and [] can appear in urls, but excluding them here will reduce false + * positives when figuring out where a given url ends. + */ + static char URLCHARS[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz" + "0123456789-._~:/?#@!$&'*+,;=%"; + + static const char* URLSTRINGS[] = {"http://", "https://"}; + + /* remove highlighting from previous selection if any */ + if(sel.ob.x >= 0 && sel.oe.x >= 0) + tsetcolor(sel.nb.y, sel.ob.x, sel.oe.x + 1, defaultfg, defaultbg); + + int i = 0, + row = 0, /* row of current URL */ + col = 0, /* column of current URL start */ + startrow = 0, /* row of last occurrence */ + colend = 0, /* column of last occurrence */ + passes = 0; /* how many rows have been scanned */ + + char *linestr = calloc(term.col+1, sizeof(Rune)); + char *c = NULL, + *match = NULL; + + row = (sel.ob.x >= 0 && sel.nb.y > 0) ? sel.nb.y : term.bot; + LIMIT(row, term.top, term.bot); + startrow = row; + + colend = (sel.ob.x >= 0 && sel.nb.y > 0) ? sel.nb.x : term.col; + LIMIT(colend, 0, term.col); + + /* + ** Scan from (term.bot,term.col) to (0,0) and find + ** next occurrance of a URL + */ + while (passes !=term.bot + 2) { + /* Read in each column of every row until + ** we hit previous occurrence of URL + */ + for (col = 0, i = 0; col < colend; ++col,++i) { + linestr[i] = term.line[row][col].u; + } + linestr[term.col] = '\0'; + + if ((match = findlastany(linestr, URLSTRINGS, + sizeof(URLSTRINGS)/sizeof(URLSTRINGS[0])))) + break; + + if (--row < term.top) + row = term.bot; + + colend = term.col; + passes++; + }; + + if (match) { + /* must happen before trim */ + selclear(); + sel.ob.x = strlen(linestr) - strlen(match); + + /* trim the rest of the line from the url match */ + for (c = match; *c != '\0'; ++c) + if (!strchr(URLCHARS, *c)) { + *c = '\0'; + break; + } + + /* highlight selection by inverting terminal colors */ + tsetcolor(row, sel.ob.x, sel.ob.x + strlen( match ), defaultbg, defaultfg); + + /* select and copy */ + sel.mode = 1; + sel.type = SEL_REGULAR; + sel.oe.x = sel.ob.x + strlen(match)-1; + sel.ob.y = sel.oe.y = row; + selnormalize(); + tsetdirt(sel.nb.y, sel.ne.y); + xsetsel(getsel()); + xclipcopy(); + } + + free(linestr); +} +#else +/* select and copy the previous url on screen (do nothing if there's no url). + * known bug: doesn't handle urls that span multiple lines (wontfix), depends on multiline "getsel()" + * known bug: only finds first url on line (mightfix) + */ +void +copyurl(const Arg *arg) { + /* () and [] can appear in urls, but excluding them here will reduce false + * positives when figuring out where a given url ends. + */ + static char URLCHARS[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz" + "0123456789-._~:/?#@!$&'*+,;=%"; + + int i, row, startrow; + char *linestr = calloc(term.col+1, sizeof(Rune)); + char *c, *match = NULL; + + row = (sel.ob.x >= 0 && sel.nb.y > 0) ? sel.nb.y-1 : term.bot; + LIMIT(row, term.top, term.bot); + startrow = row; + + /* find the start of the last url before selection */ + do { + for (i = 0; i < term.col; ++i) { + linestr[i] = term.line[row][i].u; + } + linestr[term.col] = '\0'; + if ((match = strstr(linestr, "http://")) + || (match = strstr(linestr, "https://"))) + break; + if (--row < term.top) + row = term.bot; + } while (row != startrow); + + if (match) { + /* must happen before trim */ + selclear(); + sel.ob.x = strlen(linestr) - strlen(match); + + /* trim the rest of the line from the url match */ + for (c = match; *c != '\0'; ++c) + if (!strchr(URLCHARS, *c)) { + *c = '\0'; + break; + } + + /* select and copy */ + sel.mode = 1; + sel.type = SEL_REGULAR; + sel.oe.x = sel.ob.x + strlen(match)-1; + sel.ob.y = sel.oe.y = row; + selnormalize(); + tsetdirt(sel.nb.y, sel.ne.y); + xsetsel(getsel()); + xclipcopy(); + } + + free(linestr); +} +#endif // COPYURL_HIGHLIGHT_SELECTED_URLS_PATCH
\ No newline at end of file |
