Showing
25 changed files
with
659 additions
and
130 deletions
... | ... | @@ -17,7 +17,7 @@ endef |
17 | 17 | |
18 | 18 | start: |
19 | 19 | systemfd --no-pid -s 0.0.0.0:3000 -- \ |
20 | - cargo watch -i static/ -s "PROFILE=$(PROFILE) make run" | |
20 | + cargo watch -i static/ -i var/ -s "PROFILE=$(PROFILE) make run" | |
21 | 21 | |
22 | 22 | wasm: |
23 | 23 | $(call msg,BUILD WASM UI) |
... | ... | @@ -35,6 +35,22 @@ run: build wasm |
35 | 35 | release: |
36 | 36 | docker build -t artshop -f build/Dockerfile . |
37 | 37 | |
38 | +devdb: | |
39 | + docker network create mariadb-dev-network | |
40 | + docker run --detach --network mariadb-dev-network --name mariadb-dev \ | |
41 | + -p 3306:3306 \ | |
42 | + --env MARIADB_USER=artshop \ | |
43 | + --env MARIADB_PASSWORD=123456 \ | |
44 | + --env MARIADB_ROOT_PASSWORD=123456 mariadb:latest | |
45 | + | |
46 | +enterdb: | |
47 | + docker exec -it mariadb-dev mysql -D artshop -u artshop -p | |
48 | +# docker run -it --network mariadb-dev-network --rm mariadb:latest \ | |
49 | +# mysql -h mariadb-dev -u artshop -p | |
50 | + | |
51 | +rootdb: | |
52 | + docker exec -it mariadb-dev mysql -p | |
53 | + | |
38 | 54 | clean: |
39 | 55 | cargo clean |
40 | 56 | rm -Rf ./static/ui | ... | ... |
artshop.toml
0 → 100644
... | ... | @@ -20,3 +20,15 @@ pub struct MarkdownDiffJson { |
20 | 20 | pub id: i32, |
21 | 21 | pub date_created: String, |
22 | 22 | } |
23 | + | |
24 | +#[derive(Clone, Debug, Serialize, Deserialize)] | |
25 | +pub struct ImageJson { | |
26 | + pub upload_uuid :Option<Vec<u8>>, | |
27 | + pub uuid :Option<Vec<u8>>, | |
28 | + pub size :i32, | |
29 | + pub dim_x :Option<i32>, | |
30 | + pub dim_y :Option<i32>, | |
31 | + pub mime_type :String, | |
32 | + pub date_created :String, | |
33 | + pub date_updated :String | |
34 | +} | ... | ... |
No preview for this file type
... | ... | @@ -59,3 +59,8 @@ web::block already for write actions. |
59 | 59 | Simple explanation on technical terms synchronous, asynchronous, concurrent |
60 | 60 | and parallel: |
61 | 61 | [synchronous vs. asynchronous vs. concurrent vs. parallel](https://medium.com/plain-and-simple/synchronous-vs-asynchronous-vs-concurrent-vs-parallel-4342bfb8b9f2, 'synchronous vs. asynchronous vs. concurrent vs. parallel') |
62 | + | |
63 | +# Docker mariadb preparation | |
64 | + | |
65 | + GRANT ALL PRIVILEGES ON artshop.* TO 'artshop'@'%'; | |
66 | + CREATE DATABASE artshop CHARACTER SET = 'utf8mb3' COLLATE = 'utf8mb3_general_ci'; | ... | ... |
1 | 1 | -- Your SQL goes here |
2 | -CREATE TABLE "users" ( | |
3 | - id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, | |
2 | +CREATE TABLE users ( | |
3 | + id INTEGER PRIMARY KEY AUTO_INCREMENT NOT NULL, | |
4 | 4 | name TEXT NOT NULL, |
5 | 5 | address TEXT NOT NULL, |
6 | 6 | date_created TEXT NOT NULL |
7 | 7 | ); |
8 | 8 | |
9 | -INSERT INTO | |
10 | - "users"(name, address, date_created) | |
9 | +INSERT INTO users | |
10 | + (name, address, date_created) | |
11 | 11 | VALUES |
12 | - ("John", "123 Av Q", "Today"); | |
12 | + ('John', '123 Av Q', 'Today'); | ... | ... |
1 | -CREATE TABLE "markdowns" ( | |
2 | - id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, | |
1 | +CREATE TABLE markdowns ( | |
2 | + id INTEGER PRIMARY KEY AUTO_INCREMENT NOT NULL, | |
3 | 3 | name VARCHAR(256) NOT NULL, |
4 | 4 | content TEXT NOT NULL, |
5 | 5 | number_of_versions INTEGER NOT NULL DEFAULT (1), |
... | ... | @@ -13,7 +13,7 @@ CREATE TABLE "markdowns" ( |
13 | 13 | -- The date_created here should be set to the value of |
14 | 14 | -- markdown.date_updated when the patch was created. This diff_id |
15 | 15 | -- is always current last max diff_id for given markdown_id plus 1. |
16 | -CREATE TABLE "markdown_diffs" ( | |
16 | +CREATE TABLE markdown_diffs ( | |
17 | 17 | markdown_id INTEGER NOT NULL, |
18 | 18 | diff_id INTEGER NOT NULL, |
19 | 19 | diff BLOB NOT NULL, |
... | ... | @@ -21,84 +21,120 @@ CREATE TABLE "markdown_diffs" ( |
21 | 21 | PRIMARY KEY (markdown_id, diff_id) |
22 | 22 | ); |
23 | 23 | |
24 | -INSERT INTO | |
25 | - "markdowns"(name, content, date_created, date_updated) | |
24 | +INSERT INTO markdowns | |
25 | + (name, content, date_created, date_updated) | |
26 | 26 | VALUES |
27 | - ( "md-example" | |
28 | - , "# Ein sehr schöner Titel | |
27 | + ( 'md-example' | |
28 | + , "# Markdown Cheatsheat | |
29 | 29 | |
30 | -## Ein sinnloser Text | |
30 | +## Überschriften | |
31 | +--- | |
31 | 32 | |
32 | -Hier kommt ganz viel Text der irgendwie auch was machen soll, aber Zeilen | |
33 | -sollen auch im <pre> Eingabefeld automatisch umbrechen. | |
33 | +# # <H1> | |
34 | +## ## <H2> | |
35 | +### ### <H3> | |
36 | +#### #### <H4> | |
37 | +##### ##### <H5> | |
38 | +###### ###### <H6> | |
34 | 39 | |
35 | -Ein neuner Paragraph beginnt nach einer Leerzeile. | |
36 | -Ein Umbruch entsteht wie gewohnt durch 2 spaces am Ende einer | |
37 | -Zeile. | |
40 | +## Absätze und Umbrüche | |
41 | +--- | |
38 | 42 | |
39 | -## Fußnoten | |
43 | +Ein einfacher Zeilenumbruch | |
44 | +verändert den Textfluss nicht. Spaces haben auch keinen Einfluss auf den Textfluss. Es ist selten eine gute Idee große Abstände innerhalb eines Textes zu habe, sollte man diese aber wirklich brauchen kann man auf inline html zurückgreifen. | |
40 | 45 | |
41 | -Vllt. kann man sogar so was wie Fussnoten[^1] in den Markdown Text | |
42 | -einbinden... diese kann man dann irgendwo einbauen... | |
46 | +Leerzeilen erzeugen neue Paragraphen. Um im formatierten Text einen Zeilenumbruch zu erzeugen verwendet man zwei Spaces vor einem Zeilenumbruch im Eingabetext. | |
47 | +Dies führt nicht zu einem Paragraphen. | |
43 | 48 | |
49 | +## Hervorhebungen | |
44 | 50 | --- |
45 | 51 | |
46 | -[^1]: Zum Beispiel so... | |
47 | - | |
48 | -[^2]: Oder so... | |
49 | - | |
50 | -## inline html ist im Moment auch ok. | |
52 | +- *kursive (schwache) Hervorhebung* | |
53 | +- **fette (starke) Hervorhebung** | |
54 | +- ***kursiv und fette (sehr starke) Hervorhebung*** | |
55 | +- *schwache mit **eingebetteter starker** Hervorhebung* | |
56 | +- **starke mit *eingebetteter schwacher* Hervorhebung** | |
57 | +- ~~durchgestrichen~~ | |
58 | +- <u>unterstreichen nur mit HTML</u> | |
59 | +- ~~*durchgestrichen kursiv*~~ | |
60 | +- **~~fett durchgestrichen~~** | |
61 | +- *<u>kursiv unterstrichen</u>* | |
62 | +- <u>**unterstrichen fett**</u> | |
63 | + | |
64 | +## Gedanken- und Binde--strich | |
65 | +--- | |
51 | 66 | |
52 | -<pre>Lustigerweise geht auch inline html</pre> | |
67 | +Ein Einfaches Minus bildet im Text den Gedankenstrich *hyphen* (-). Zwei oder mehr Minus werden zu verschieden langen Bindestrichen *dash* (--, ---). In regulärem | |
68 | +Text kommen zwei Minuszeichen hintereinander in der Regel nicht vor. In | |
69 | +Programmcode, der in ASCII geschrieben ist allerdings schon. Will man also zwei | |
70 | +Minuszeichen separat darstellen kann man diese in inline code packen (`--, ---`). | |
53 | 71 | |
54 | -## Listen for fun | |
72 | +## Listen | |
73 | +--- | |
55 | 74 | |
56 | -- ein Liste | |
57 | - - mehr Liste | |
58 | - - diesmal als Subliste. | |
59 | -- und was auch immer... | |
60 | - 1. und nun Verschachtelt. | |
61 | - 1. Numeriert. | |
62 | - 2. huhuhu | |
63 | - 3. wie bitte. | |
64 | - 2. juhu | |
65 | -- noch mehr Liste | |
75 | +- erster Listeneintrag | |
76 | + - erster Unterlisteneintrag | |
77 | + - zweiter Unterlisteneintrag | |
78 | +- zweiter Listeneintrag | |
79 | + 1. erster nummerierter Listeneintrag | |
80 | + 1. erster nummerierter Unterlisteneintrag | |
81 | + 2. zweiter nummerierter Unterlisteneintrag | |
82 | + 3. dritter nummerierter Unterlisteneintrag | |
83 | + 2. zweiter nummerierter Listeneintrag | |
84 | +- dritter Listeneintrag | |
85 | + - [x] erster Auswahllisteneintrag | |
86 | + - [ ] zweiter Auswahllisteneintrag | |
87 | + - [x] dritter Auswahllisteneintrag | |
88 | +- vierter Listeneintrag | |
89 | + 1. [ ] erster nummerierter Auswahllisteneintrag | |
90 | + 2. [x] zweiter nummerierter Auswahllisteneintrag | |
91 | + | |
92 | +## Code Blöcke | |
93 | +--- | |
66 | 94 | |
67 | -## Preformated Text | |
95 | + Dies ist ein codeblock durch Einrückung. | |
96 | + In diesem werden keine Formatierungen | |
97 | + vorgenommen. | |
68 | 98 | |
69 | -```Hier kommt der code``` | |
99 | +Mit backticks lassen sich Codeblöcke mit Sprachinformation | |
100 | +erstellen. Theoretisch könnte für solche Böcke dann Syntax-Highliting eingebaut werden. | |
70 | 101 | |
71 | -Und hier der Paragraph mit `inline code` der auch sehr schön aussehen kann. | |
102 | +```shell | |
103 | +#!/bin/env sh | |
72 | 104 | |
73 | -## Hervorhebungen | |
105 | +FOO="foo" | |
74 | 106 | |
75 | -Man kann Text auch sehr schön formatieren. So ist es z.B. möglich | |
76 | -*Worte kursiv zu stellen* oder man kann **sie auch fett schreiben**. | |
77 | -Als spezielles feature kann der von mir verwendete Parser auch | |
78 | -~~Texte durchstreichen~~. | |
107 | +function func() { | |
108 | + local BAR=bar | |
109 | +} | |
110 | +``` | |
79 | 111 | |
80 | -Nur wenn man Text <u>unterstreichen</u> will muss man auf inline html | |
81 | -zurückgreifen. | |
112 | +Auch in den Fließtext lassen sich `inline code` Elemente einfügen um z.B. einzelne Kommandos hervorzuheben. | |
82 | 113 | |
83 | -## Blockquotes und horizontale Linie | |
114 | +## Zitate und horizontale Linie | |
115 | +--- | |
84 | 116 | |
85 | -> Dies sollte jetzt als quote erkennbar sein. | |
117 | +> Dies ist ein Zitat. | |
86 | 118 | > |
87 | ->> Auch diese sind schachtelbar | |
119 | +>> Zitate können verschachtelt sein. | |
88 | 120 | > |
89 | -> Und weiter gehts. | |
121 | +> Wir können also zitieren was jemand zitiert hat. | |
122 | +> Solange die Zeilen ohne Unterbrechung mit einem > | |
123 | +> beginnen bleibt es ein Zitat | |
90 | 124 | |
91 | ---- | |
92 | 125 | |
93 | -> Aber dies ist ein neuer quote. | |
126 | +> Sobald eine Zeile ohne führendes > auftaucht endet ein | |
127 | +> Zitat. | |
94 | 128 | |
95 | 129 | ## Links |
130 | +--- | |
96 | 131 | |
97 | -Ein link kann inline geschrieben werden, so wie diese zu | |
132 | +Ein Link kann inline geschrieben werden, so wie diese zu | |
98 | 133 | [Heise.de](https://heise.de/ 'Heise.de') oder als Referenz am Ende des Textes |
99 | 134 | wie diese nach [Telepolis][lnk1]. |
100 | 135 | |
101 | -## Bilder koennte man auch einbinden. | |
136 | +## Bilder | |
137 | +--- | |
102 | 138 | |
103 | 139 | Wie Links lassen sich auch Bilder wie mein |
104 | 140 | ![Gravatar](https://www.gravatar.com/avatar/fd016c954ec4ed3a4315eeed6c8b97b8) |
... | ... | @@ -106,36 +142,57 @@ in den Text ein. |
106 | 142 | |
107 | 143 | Im Fließtext sieht das allerdings ein bisschen dumm aus es sei denn man hat |
108 | 144 | entsprechend angepasste styles. Besser scheint mir daher Bilder nur zwischen |
109 | -Paragraphen zu plazieren. | |
145 | +Paragraphen zu platzieren. | |
110 | 146 | |
111 | 147 | ![Gravatar](https://www.gravatar.com/avatar/fd016c954ec4ed3a4315eeed6c8b97b8) |
112 | 148 | |
113 | 149 | Etwas so wie hier. |
114 | 150 | |
115 | -## Tabellen sollten auch gehen... | |
151 | +## Tabellen | |
152 | +--- | |
116 | 153 | |
117 | 154 | Die folgenden Beispiele kommen von [markdown.land][lnk2]: |
118 | 155 | |
119 | -| Item | Price | # In stock | | |
120 | -|--------------|-----------|------------| | |
121 | -| Juicy Apples | 1.99 | *8* | | |
122 | -| Bananas | **1.89** | 5234 | | |
156 | +| Artikel | Preis | Bestand | | |
157 | +|---------------|-----------|------------| | |
158 | +| Saftige Äpfel | 1.99 | *8* | | |
159 | +| Bananen | **1.89** | 5234 | | |
123 | 160 | |
124 | 161 | Man braucht sie nicht schön zu formatieren. |
125 | 162 | |
126 | -Item | Price | # In stock | |
163 | +Artikel | Preis | Bestand | |
127 | 164 | ---|---|--- |
128 | -Juicy Apples | 1.99 | 739 | |
129 | -Bananas | 1.89 | 6 | |
165 | +Saftige Äpfel | 1.99 | 739 | |
166 | +Bananen | 1.89 | 6 | |
130 | 167 | |
131 | 168 | und die Spaltenausrichtung kann man auch einstellen: |
132 | 169 | |
133 | -| Item | Price | # In stock | | |
134 | -|--------------|:-----:|-----------:| | |
135 | -| Juicy Apples | 1.99 | 739 | | |
136 | -| Bananas | 1.8900 | 6 | | |
170 | +| Artikel | Preis | Bestand | | |
171 | +|---------------|:-------:|-----------:| | |
172 | +| Saftige Äpfel | 1.99 | 739 | | |
173 | +| Bananen | 1.8900 | 6 | | |
174 | + | |
175 | +## Fußnoten | |
176 | +--- | |
177 | + | |
178 | +Man kann auch verlinkte Fußnoten[^1] in den Text | |
179 | +einbinden. Die Fußnote selber kann dann an beliebige stelle im Text stehen. | |
180 | + | |
181 | +--- | |
182 | +[^1]: Zum Beispiel so. | |
183 | + | |
184 | +[^2]: Diese Fußnote hat keine Verlinkung im Text. | |
185 | + | |
186 | +## HTML einbetten | |
187 | + | |
188 | +<ul> | |
189 | +<li> | |
190 | +<pre>Man kann auch direkt HTML tags einbetten, | |
191 | +wie hier.</pre> | |
192 | +</li> | |
193 | +</ul> | |
137 | 194 | |
138 | 195 | [lnk1]: https://heise.de/tp/ 'Telepolis' |
139 | -[lnk2]: https://markdown.land/markdown-table 'markdown.land'" | |
140 | - , "Today" | |
141 | - , "Today" ); | |
196 | +[lnk2]: https://arkdown.land/markdown-table 'markdown.land' " | |
197 | + , '2022-01-29 21:33:34.000' | |
198 | + , '2022-01-29 21:33:34.000' ); | ... | ... |
migrations/2022-01-28-163413_images/down.sql
0 → 100644
migrations/2022-01-28-163413_images/up.sql
0 → 100644
1 | +CREATE TABLE images ( | |
2 | + id INTEGER PRIMARY KEY AUTO_INCREMENT NOT NULL, | |
3 | + -- identical uuid means identical file. | |
4 | + upload_uuid BLOB(16) UNIQUE, | |
5 | + uuid BLOB(16) UNIQUE, | |
6 | + size INTEGER NOT NULL, | |
7 | + dim_x INTEGER, | |
8 | + dim_y INTEGER, | |
9 | + mime_type VARCHAR(256) NOT NULL, | |
10 | + date_created TEXT NOT NULL, | |
11 | + date_updated TEXT NOT NULL | |
12 | +); | ... | ... |
... | ... | @@ -15,14 +15,18 @@ anyhow = "1.0" |
15 | 15 | artshop-common = { path = "../common" } |
16 | 16 | async-std = "^1.10" |
17 | 17 | chrono = "0.4.15" |
18 | -diesel = { version = "1.4.7", features = ["sqlite", "r2d2"]} | |
18 | +diesel = { version = "1.4.7", features = ["mysql", "sqlite", "r2d2"]} | |
19 | 19 | diffy = "0.2" |
20 | 20 | dotenv = "0.15.0" |
21 | 21 | flate2 = "^1.0" |
22 | 22 | futures = "^0.3" |
23 | +futures-util = { version = "0", features = ["std"] } | |
24 | +image = "^0.23" | |
23 | 25 | listenfd = "0.3" |
26 | +once_cell = "^1.9" | |
24 | 27 | r2d2 = "0.8.9" |
25 | 28 | serde = { version = "^1.0", features = ["derive"] } |
26 | 29 | serde_derive = "1.0" |
27 | 30 | serde_json = "1.0" |
28 | -uuid = { version = "^0.8", features = ["v4"] } | |
31 | +toml = "^0.5" | |
32 | +uuid = { version = "^0.8", features = ["v4", "v5"] } | ... | ... |
server/src/config.rs
0 → 100644
1 | +use std::fs::File; | |
2 | +use std::io::Read; | |
3 | +use once_cell::sync::Lazy; | |
4 | +use serde::Deserialize; | |
5 | + | |
6 | +#[derive(Debug, Deserialize)] | |
7 | +struct Database { url :Option<String> } | |
8 | + | |
9 | +#[derive(Debug, Deserialize)] | |
10 | +struct Locations { upload :String | |
11 | + , images :String } | |
12 | + | |
13 | +#[derive(Debug, Deserialize)] | |
14 | +pub(crate) struct Config { namespace :String | |
15 | + , database :Database | |
16 | + , locations :Locations } | |
17 | + | |
18 | +pub(crate) static CONFIG :Lazy<Config> = Lazy::new(|| Config::load()); | |
19 | + | |
20 | +impl Config { | |
21 | + pub fn load() -> Config { | |
22 | + let filename = std::env::var("CONFIG").unwrap(); | |
23 | + | |
24 | + let mut buffer = vec![]; | |
25 | + let mut file = File::open(filename).unwrap(); | |
26 | + | |
27 | + file.read_to_end(&mut buffer).unwrap(); | |
28 | + let mut config :Config = toml::from_slice(&buffer).unwrap(); | |
29 | + | |
30 | + config.database.url = match config.database.url { | |
31 | + Some(url) => Some(url), | |
32 | + None => std::env::var("DATABASE_URL").ok() | |
33 | + }; | |
34 | + | |
35 | + config | |
36 | + } | |
37 | + | |
38 | + pub fn namespace(&self) -> &str { | |
39 | + self.namespace.as_str() | |
40 | + } | |
41 | + | |
42 | + pub fn upload_dir(&self) -> &str { | |
43 | + self.locations.upload.as_str() | |
44 | + } | |
45 | + | |
46 | + pub fn images_dir(&self) -> &str { | |
47 | + self.locations.images.as_str() | |
48 | + } | |
49 | +} | ... | ... |
... | ... | @@ -7,7 +7,7 @@ use r2d2; |
7 | 7 | type ParentError = Option<Pin<Box<dyn std::error::Error>>>; |
8 | 8 | |
9 | 9 | #[derive(Debug)] |
10 | -pub(crate) struct Error { | |
10 | +pub struct Error { | |
11 | 11 | source: ParentError, |
12 | 12 | message: String, |
13 | 13 | } |
... | ... | @@ -77,3 +77,11 @@ impl From<ParsePatchError> for Error { |
77 | 77 | } |
78 | 78 | } |
79 | 79 | } |
80 | + | |
81 | +impl From<uuid::Error> for Error { | |
82 | + fn from(source: uuid::Error) -> Self { | |
83 | + Self { source: Some(Box::pin(source)) | |
84 | + , message: String::from("UUID error") | |
85 | + } | |
86 | + } | |
87 | +} | ... | ... |
1 | 1 | #[macro_use] |
2 | 2 | extern crate diesel; |
3 | 3 | |
4 | +mod config; | |
4 | 5 | mod error; |
5 | 6 | mod models; |
6 | 7 | mod routes; |
7 | 8 | mod schema; |
9 | +mod uuid; | |
10 | +mod upload_worker; | |
8 | 11 | |
9 | -use crate::routes::markdown::*; | |
10 | -use crate::routes::other::*; | |
11 | -use crate::routes::user::*; | |
12 | +use models::image::Image; | |
13 | +use routes::markdown::*; | |
14 | +use routes::other::*; | |
15 | +use routes::user::*; | |
16 | +use routes::upload::*; | |
12 | 17 | |
13 | 18 | use actix_web::{guard, web, App, HttpResponse, HttpServer}; |
19 | +use async_std::channel::Sender; | |
14 | 20 | use diesel::r2d2::{self, ConnectionManager}; |
15 | -use diesel::SqliteConnection; | |
21 | +use diesel::MysqlConnection; | |
16 | 22 | use listenfd::ListenFd; |
17 | -use routes::markdown::get_markdown; | |
18 | -use routes::upload::upload; | |
23 | +use std::sync::Arc; | |
24 | +use std::ops::Deref; | |
19 | 25 | |
20 | -pub(crate) type Pool = r2d2::Pool<ConnectionManager<SqliteConnection>>; | |
26 | +pub(crate) type Pool = r2d2::Pool<ConnectionManager<MysqlConnection>>; | |
27 | + | |
28 | +#[derive(Clone)] | |
29 | +pub struct AppData { | |
30 | + pub database_pool: Arc<Pool>, | |
31 | + pub tx_upload_worker: Sender<(Arc<Pool>, Image)>, | |
32 | +} | |
21 | 33 | |
22 | 34 | #[actix_rt::main] |
23 | 35 | async fn main() -> std::io::Result<()> { |
... | ... | @@ -25,13 +37,20 @@ async fn main() -> std::io::Result<()> { |
25 | 37 | |
26 | 38 | dotenv::dotenv().ok(); |
27 | 39 | |
40 | + println!("CONFIG: {:?}", config::CONFIG.deref()); | |
41 | + | |
42 | + let tx_upload_worker = upload_worker::launch(); | |
43 | + | |
28 | 44 | let database_url = std::env::var("DATABASE_URL").expect("NOT FOUND"); |
29 | 45 | let database_pool = Pool::builder() |
30 | 46 | .build(ConnectionManager::new(database_url)) |
31 | 47 | .unwrap(); |
32 | 48 | |
49 | + let database_pool = Arc::new(database_pool); | |
50 | + let app_data = AppData { database_pool, tx_upload_worker }; | |
51 | + | |
33 | 52 | let server = HttpServer::new(move || { |
34 | - App::new() . data(database_pool.clone()) | |
53 | + App::new() . data(app_data.clone()) | |
35 | 54 | . service(actix_files::Files::new("/static", "./static")) |
36 | 55 | . service( web::scope("/api/v0") |
37 | 56 | . service( web::resource("/upload") | ... | ... |
server/src/models/image.rs
0 → 100644
1 | +use std::sync::Arc; | |
2 | + | |
3 | +use crate::error::*; | |
4 | +use crate::{schema::*, Pool}; | |
5 | +use diesel::{Connection, insert_into, delete, update}; | |
6 | +use diesel::prelude::*; | |
7 | +use serde::{Deserialize, Serialize}; | |
8 | + | |
9 | +#[derive(Clone, Debug, Serialize, Deserialize, Queryable, Identifiable)] | |
10 | +pub struct Image { | |
11 | + pub id :i32, | |
12 | + pub upload_uuid :Option<Vec<u8>>, | |
13 | + pub uuid :Option<Vec<u8>>, | |
14 | + pub size :i32, | |
15 | + pub dim_x :Option<i32>, | |
16 | + pub dim_y :Option<i32>, | |
17 | + pub mime_type :String, | |
18 | + pub date_created :String, | |
19 | + pub date_updated :String | |
20 | +} | |
21 | + | |
22 | +#[derive(Debug, Insertable)] | |
23 | +#[table_name = "images"] | |
24 | +pub struct ImageNew<'a> { | |
25 | + pub upload_uuid :Option<&'a [u8]>, | |
26 | + pub size :i32, | |
27 | + pub mime_type :&'a str, | |
28 | + pub date_created :&'a str, | |
29 | + pub date_updated :&'a str | |
30 | +} | |
31 | + | |
32 | +#[derive(Clone, Debug, Serialize, Deserialize, AsChangeset)] | |
33 | +#[table_name = "images"] | |
34 | +pub struct Upload { | |
35 | + pub upload_uuid :Option<Vec<u8>>, | |
36 | + pub size :i32, | |
37 | + pub mime_type :String, | |
38 | +} | |
39 | + | |
40 | +#[derive(Clone, Debug, Serialize, Deserialize, AsChangeset)] | |
41 | +#[table_name = "images"] | |
42 | +#[changeset_options(treat_none_as_null = "true")] | |
43 | +pub struct ImagePatch { | |
44 | + pub upload_uuid :Option<Vec<u8>>, | |
45 | + pub uuid :Option<Vec<u8>>, | |
46 | + pub size :i32, | |
47 | + pub dim_x :Option<i32>, | |
48 | + pub dim_y :Option<i32>, | |
49 | + pub mime_type :String, | |
50 | + pub date_updated :String | |
51 | +} | |
52 | + | |
53 | +impl From<Image> for ImagePatch { | |
54 | + fn from(image: Image) -> Self { | |
55 | + let now = chrono::Local::now().naive_local(); | |
56 | + | |
57 | + Self { upload_uuid :image.upload_uuid | |
58 | + , uuid :image.uuid | |
59 | + , size :image.size | |
60 | + , dim_x :image.dim_x | |
61 | + , dim_y :image.dim_y | |
62 | + , mime_type :image.mime_type | |
63 | + , date_updated :format!("{}", now) | |
64 | + } | |
65 | + } | |
66 | +} | |
67 | + | |
68 | +#[macro_export] | |
69 | +macro_rules! upload_uuid { | |
70 | + ($u:expr) => { | |
71 | + match &$u.upload_uuid { | |
72 | + Some(uuid) => $crate::uuid::Uuid::try_from(uuid.as_slice()).ok(), | |
73 | + None => None, | |
74 | + } | |
75 | + }; | |
76 | +} | |
77 | + | |
78 | +#[macro_export] | |
79 | +macro_rules! upload_filename { | |
80 | + ($u:expr) => { | |
81 | + $crate::upload_uuid!($u) | |
82 | + . and_then(|uuid| Some(format!( "{}/upload_{}" | |
83 | + , $crate::config::CONFIG.upload_dir() | |
84 | + , uuid ))) | |
85 | + }; | |
86 | +} | |
87 | + | |
88 | + | |
89 | +pub(crate) fn upload( pool: Arc<Pool> | |
90 | + , item: Upload ) -> Result<Image> { | |
91 | + use crate::schema::images::dsl::*; | |
92 | + let db_connection = pool.get()?; | |
93 | + | |
94 | + let now = chrono::Local::now().naive_local(); | |
95 | + let new_image = ImageNew { | |
96 | + upload_uuid : item.upload_uuid.as_deref(), | |
97 | + size : item.size, | |
98 | + mime_type : &item.mime_type, | |
99 | + date_created : &format!("{}", now), | |
100 | + date_updated : &format!("{}", now) | |
101 | + }; | |
102 | + | |
103 | + Ok(db_connection.transaction(|| { | |
104 | + insert_into(images) . values(&new_image) | |
105 | + . execute(&db_connection)?; | |
106 | + images . order(id.desc()) | |
107 | + . first::<Image>(&db_connection) | |
108 | + })?) | |
109 | +} | |
110 | + | |
111 | +pub(crate) fn finalize( pool: Arc<Pool> | |
112 | + , item: Image ) -> Result<Image> { | |
113 | + use crate::schema::images::dsl::*; | |
114 | + | |
115 | + let db_connection = pool.get()?; | |
116 | + let item_uuid = item.uuid.clone(); | |
117 | + | |
118 | + match images . filter(uuid.eq(item_uuid)) | |
119 | + . first::<Image>(&db_connection) { | |
120 | + Ok(image) => { | |
121 | + delete(images.find(item.id)).execute(&db_connection)?; | |
122 | + Ok(image) | |
123 | + }, | |
124 | + Err(_) => { | |
125 | + let image = images.find(item.id); | |
126 | + let patch = ImagePatch::from(item.clone()); | |
127 | + update(image).set(&patch).execute(&db_connection)?; | |
128 | + Ok(item) | |
129 | + }, | |
130 | + } | |
131 | +} | ... | ... |
1 | -use crate::models::markdown; | |
2 | -use crate::Pool; | |
1 | +use crate::{models::markdown, AppData}; | |
3 | 2 | |
4 | 3 | use actix_web::{Error, HttpResponse, web}; |
5 | 4 | use anyhow::Result; |
... | ... | @@ -11,36 +10,40 @@ pub struct Patchset { |
11 | 10 | patch: Option<i32>, |
12 | 11 | } |
13 | 12 | |
14 | -pub async fn get_markdowns(pool: web::Data<Pool>) | |
13 | +pub async fn get_markdowns(app_data: web::Data<AppData>) | |
15 | 14 | -> Result<HttpResponse, Error> |
16 | 15 | { |
17 | - Ok( web::block(move || markdown::get_markdowns(pool.into_inner())) | |
16 | + let pool = app_data.database_pool.clone(); | |
17 | + | |
18 | + Ok( web::block(move || markdown::get_markdowns(pool)) | |
18 | 19 | . await |
19 | 20 | . map(|markdowns| HttpResponse::Ok().json(markdowns)) |
20 | 21 | . map_err(|_| HttpResponse::InternalServerError())? |
21 | 22 | ) |
22 | 23 | } |
23 | 24 | |
24 | -pub async fn get_markdown( pool: web::Data<Pool> | |
25 | +pub async fn get_markdown( app_data: web::Data<AppData> | |
25 | 26 | , name: web::Path<String> |
26 | 27 | , patch: web::Query<Patchset> |
27 | 28 | ) -> Result<HttpResponse, Error> |
28 | 29 | { |
29 | - let pool = pool.into_inner(); | |
30 | + let pool = app_data.database_pool.clone(); | |
30 | 31 | let name = name.into_inner(); |
31 | 32 | let patch = patch.into_inner(); |
32 | 33 | |
33 | - Ok( web::block(move || markdown::get_markdown(pool, name.as_str(), patch.patch)) | |
34 | + Ok( web::block(move || markdown::get_markdown( pool | |
35 | + , name.as_str() | |
36 | + , patch.patch) ) | |
34 | 37 | . await |
35 | 38 | . map(|markdowns| HttpResponse::Ok().json(markdowns)) |
36 | 39 | . map_err(|_| HttpResponse::InternalServerError())? |
37 | 40 | ) |
38 | 41 | } |
39 | 42 | |
40 | -pub async fn get_patches( pool: web::Data<Pool> | |
43 | +pub async fn get_patches( app_data: web::Data<AppData> | |
41 | 44 | , name: web::Path<String> |
42 | 45 | ) -> Result<HttpResponse, Error> { |
43 | - let pool = pool.into_inner(); | |
46 | + let pool = app_data.database_pool.clone(); | |
44 | 47 | let name = name.into_inner(); |
45 | 48 | |
46 | 49 | Ok( web::block(move || markdown::get_patches(pool, name.as_str())) |
... | ... | @@ -50,12 +53,12 @@ pub async fn get_patches( pool: web::Data<Pool> |
50 | 53 | ) |
51 | 54 | } |
52 | 55 | |
53 | -pub async fn update_markdown( pool: web::Data<Pool> | |
56 | +pub async fn update_markdown( app_data: web::Data<AppData> | |
54 | 57 | , name: web::Path<String> |
55 | 58 | , item: web::Json<MarkdownJson> ) |
56 | 59 | -> Result<HttpResponse, Error> |
57 | 60 | { |
58 | - let pool = pool.into_inner(); | |
61 | + let pool = app_data.database_pool.clone(); | |
59 | 62 | let name = name.into_inner(); |
60 | 63 | let item = item.into_inner(); |
61 | 64 | ... | ... |
1 | 1 | use actix_web::{Error, HttpResponse, web}; |
2 | 2 | use anyhow::Result; |
3 | -use futures::stream::StreamExt; | |
4 | -use async_std::{fs::OpenOptions, io::WriteExt}; | |
5 | -use uuid::Uuid; | |
3 | +use async_std::fs::DirBuilder; | |
4 | +use futures::{stream::StreamExt, AsyncWriteExt}; | |
5 | +use async_std::fs::OpenOptions; | |
6 | 6 | |
7 | -pub async fn upload(mut body: web::Payload) -> Result<HttpResponse, Error> | |
7 | +use crate::{AppData, models::image::{Upload, self}, upload_filename}; | |
8 | +use crate::config::CONFIG; | |
9 | +use std::convert::TryFrom; | |
10 | + | |
11 | +pub async fn upload( app_data :web::Data<AppData> | |
12 | + , mut body :web::Payload | |
13 | + , request :web::HttpRequest ) -> Result<HttpResponse, Error> | |
8 | 14 | { |
15 | + let pool = app_data.database_pool.clone(); | |
16 | + let worker = app_data.tx_upload_worker.clone(); | |
17 | + | |
18 | + let upload_uuid = Some(uuid::Uuid::new_v4().as_bytes().to_vec()); | |
19 | + let size = request.headers().get("content-length") | |
20 | + . and_then(|h| Some(h.to_str().unwrap().parse::<i32>())) | |
21 | + . unwrap().unwrap(); | |
22 | + let mime_type = String::from( request.headers().get("content-type") | |
23 | + . and_then(|h| Some(h.to_str().unwrap())) | |
24 | + . unwrap() ); | |
25 | + | |
26 | + let upload = Upload { | |
27 | + upload_uuid, | |
28 | + size, | |
29 | + mime_type | |
30 | + }; | |
31 | + | |
32 | + DirBuilder::new() . recursive(true) | |
33 | + . create(CONFIG.upload_dir()) | |
34 | + . await?; | |
35 | + | |
36 | + let upload_filename = upload_filename!(upload).unwrap(); | |
9 | 37 | let mut output = OpenOptions::new(); |
10 | - output . create(true) | |
11 | - . write(true); | |
12 | 38 | let mut output = output |
13 | - . open(format!("/tmp/upload_{}", Uuid::new_v4())) | |
14 | - . await | |
15 | - . unwrap(); | |
16 | - | |
39 | + . create(true) | |
40 | + . write(true) | |
41 | + . open(&upload_filename).await?; | |
17 | 42 | while let Some(item) = body.next().await { |
18 | - output.write_all(&item?).await.unwrap(); | |
43 | + output.write_all(&item?).await?; | |
19 | 44 | } |
45 | + output.flush().await.unwrap(); | |
20 | 46 | |
21 | - Ok(HttpResponse::Ok().finish()) | |
47 | + let pool_for_worker = pool.clone(); | |
48 | + Ok( match web::block(move || image::upload(pool, upload)).await { | |
49 | + Ok(image) => { | |
50 | + // TODO handle this as error response... | |
51 | + worker.send((pool_for_worker, image.clone())).await.unwrap(); | |
52 | + HttpResponse::Ok().json(image) | |
53 | + }, | |
54 | + Err(_) => HttpResponse::InternalServerError().finish() | |
55 | + } ) | |
22 | 56 | } | ... | ... |
1 | 1 | use crate::models::user::{self, Action}; |
2 | -use crate::Pool; | |
2 | +use crate::AppData; | |
3 | 3 | |
4 | 4 | use actix_web::{Error, HttpResponse, web}; |
5 | 5 | use anyhow::Result; |
6 | 6 | |
7 | -pub async fn create_user( pool: web::Data<Pool> | |
7 | +pub async fn create_user( app_data: web::Data<AppData> | |
8 | 8 | , item: web::Json<user::UserJson> ) |
9 | 9 | -> Result<HttpResponse, Error> |
10 | 10 | { |
11 | - let pool = pool.into_inner(); | |
11 | + let pool = app_data.database_pool.clone(); | |
12 | 12 | let item = item.into_inner(); |
13 | 13 | |
14 | 14 | Ok(web::block(move || user::create_user(pool, item)) |
... | ... | @@ -21,19 +21,21 @@ pub async fn create_user( pool: web::Data<Pool> |
21 | 21 | . map_err(|_| HttpResponse::InternalServerError())?) |
22 | 22 | } |
23 | 23 | |
24 | -pub async fn get_users(pool: web::Data<Pool>) | |
24 | +pub async fn get_users(app_data: web::Data<AppData>) | |
25 | 25 | -> Result<HttpResponse, Error> |
26 | 26 | { |
27 | - Ok(web::block(move || user::get_users(pool.into_inner())) | |
27 | + let pool = app_data.database_pool.clone(); | |
28 | + | |
29 | + Ok(web::block(move || user::get_users(pool)) | |
28 | 30 | . await |
29 | 31 | . map(|users| HttpResponse::Ok().json(users)) |
30 | 32 | . map_err(|_| HttpResponse::InternalServerError())?) |
31 | 33 | } |
32 | 34 | |
33 | -pub async fn get_user(pool: web::Data<Pool>, id: web::Path<i32>) | |
35 | +pub async fn get_user(app_data: web::Data<AppData>, id: web::Path<i32>) | |
34 | 36 | -> Result<HttpResponse, Error> |
35 | 37 | { |
36 | - let pool = pool.into_inner(); | |
38 | + let pool = app_data.database_pool.clone(); | |
37 | 39 | let id = id.into_inner(); |
38 | 40 | |
39 | 41 | Ok(web::block(move || user::get_user(pool, id)) |
... | ... | @@ -42,10 +44,10 @@ pub async fn get_user(pool: web::Data<Pool>, id: web::Path<i32>) |
42 | 44 | . map_err(|_| HttpResponse::InternalServerError())?) |
43 | 45 | } |
44 | 46 | |
45 | -pub async fn delete_user(pool: web::Data<Pool>, id: web::Path<i32>) | |
47 | +pub async fn delete_user(app_data: web::Data<AppData>, id: web::Path<i32>) | |
46 | 48 | -> Result<HttpResponse, Error> |
47 | 49 | { |
48 | - let pool = pool.into_inner(); | |
50 | + let pool = app_data.database_pool.clone(); | |
49 | 51 | let id = id.into_inner(); |
50 | 52 | |
51 | 53 | Ok(web::block(move || user::delete_user(pool, id)) |
... | ... | @@ -54,12 +56,12 @@ pub async fn delete_user(pool: web::Data<Pool>, id: web::Path<i32>) |
54 | 56 | . map_err(|_| HttpResponse::InternalServerError())?) |
55 | 57 | } |
56 | 58 | |
57 | -pub async fn update_user( pool: web::Data<Pool> | |
59 | +pub async fn update_user( app_data: web::Data<AppData> | |
58 | 60 | , id: web::Path<i32> |
59 | 61 | , item: web::Json<user::UserJson> ) |
60 | 62 | -> Result<HttpResponse, Error> |
61 | 63 | { |
62 | - let pool = pool.into_inner(); | |
64 | + let pool = app_data.database_pool.clone(); | |
63 | 65 | let id = id.into_inner(); |
64 | 66 | let item = item.into_inner(); |
65 | 67 | ... | ... |
1 | 1 | table! { |
2 | - markdown_diffs (markdown_id, diff_id) { | |
3 | - markdown_id -> Integer, | |
4 | - diff_id -> Integer, | |
5 | - diff -> Binary, | |
2 | + images (id) { | |
3 | + id -> Integer, | |
4 | + upload_uuid -> Nullable<Tinyblob>, | |
5 | + uuid -> Nullable<Tinyblob>, | |
6 | + size -> Integer, | |
7 | + dim_x -> Nullable<Integer>, | |
8 | + dim_y -> Nullable<Integer>, | |
9 | + mime_type -> Varchar, | |
6 | 10 | date_created -> Text, |
11 | + date_updated -> Text, | |
7 | 12 | } |
8 | 13 | } |
9 | 14 | |
10 | 15 | table! { |
11 | 16 | markdowns (id) { |
12 | 17 | id -> Integer, |
13 | - name -> Text, | |
18 | + name -> Varchar, | |
14 | 19 | content -> Text, |
15 | 20 | number_of_versions -> Integer, |
16 | 21 | date_created -> Text, |
... | ... | @@ -19,6 +24,15 @@ table! { |
19 | 24 | } |
20 | 25 | |
21 | 26 | table! { |
27 | + markdown_diffs (markdown_id, diff_id) { | |
28 | + markdown_id -> Integer, | |
29 | + diff_id -> Integer, | |
30 | + diff -> Blob, | |
31 | + date_created -> Text, | |
32 | + } | |
33 | +} | |
34 | + | |
35 | +table! { | |
22 | 36 | users (id) { |
23 | 37 | id -> Integer, |
24 | 38 | name -> Text, |
... | ... | @@ -28,7 +42,8 @@ table! { |
28 | 42 | } |
29 | 43 | |
30 | 44 | allow_tables_to_appear_in_same_query!( |
31 | - markdown_diffs, | |
45 | + images, | |
32 | 46 | markdowns, |
47 | + markdown_diffs, | |
33 | 48 | users, |
34 | 49 | ); | ... | ... |
server/src/upload_worker.rs
0 → 100644
1 | +use std::{io::{SeekFrom, ErrorKind}, sync::Arc}; | |
2 | +use actix_web::web; | |
3 | +use async_std::{ fs::{File, DirBuilder, copy, metadata, remove_file} | |
4 | + , channel::{Sender, Receiver, bounded} | |
5 | + , path::PathBuf | |
6 | + , io::Result }; | |
7 | +use futures::{ AsyncSeekExt, AsyncReadExt, FutureExt, StreamExt, select | |
8 | + , stream::FuturesUnordered}; | |
9 | + | |
10 | +use crate::{models::image::{Image, finalize}, upload_filename, config::CONFIG, Pool}; | |
11 | +use crate::uuid::Uuid; | |
12 | + | |
13 | +use std::convert::TryFrom; | |
14 | +use image::{io::Reader as ImageReader, GenericImageView}; | |
15 | + | |
16 | +pub fn launch() -> Sender<(Arc<Pool>, Image)> { | |
17 | + let (tx_upload_worker, rx_upload_worker) | |
18 | + : (Sender<(Arc<Pool>, Image)>, Receiver<(Arc<Pool>, Image)>) = bounded(32); | |
19 | + | |
20 | + actix_rt::spawn(async move { | |
21 | + let mut workers = FuturesUnordered::new(); | |
22 | + | |
23 | + loop { | |
24 | + select! { | |
25 | + image = rx_upload_worker.recv().fuse() => { | |
26 | + match image { | |
27 | + Err(_) => break, | |
28 | + Ok((pool, image)) => workers.push(worker(pool, image)), | |
29 | + } | |
30 | + }, | |
31 | + _result = workers.next() => {}, | |
32 | + } | |
33 | + } | |
34 | + | |
35 | + while workers.len() > 0 { | |
36 | + workers.next().await; | |
37 | + } | |
38 | + }); | |
39 | + | |
40 | + tx_upload_worker | |
41 | +} | |
42 | + | |
43 | + | |
44 | +async fn worker(pool :Arc<Pool>, mut image :Image) { | |
45 | + let upload_filename = upload_filename!(image).unwrap(); | |
46 | + let mut f = File::open(&upload_filename).await.unwrap(); | |
47 | + | |
48 | + let mut buf = vec!['.' as u8; 3 * 3 * 4096]; | |
49 | + get_sample(&mut f, buf.as_mut()).await.unwrap(); | |
50 | + let uuid = Uuid::get(CONFIG.namespace(), buf.as_mut()); | |
51 | + let uuid_string = format!("{}", uuid); | |
52 | + | |
53 | + let mut image_path = PathBuf::from(CONFIG.images_dir()); | |
54 | + image_path.push(&uuid_string.as_str()[..2]); | |
55 | + image_path.push(&uuid_string.as_str()[..5]); | |
56 | + | |
57 | + DirBuilder::new() . recursive(true) | |
58 | + . create(&image_path) | |
59 | + . await | |
60 | + . unwrap(); | |
61 | + | |
62 | + image_path.push(&uuid_string); | |
63 | + | |
64 | + image.upload_uuid = None; | |
65 | + image.uuid = Some(uuid.0.as_bytes().to_vec()); | |
66 | + | |
67 | + match metadata(&image_path).await { | |
68 | + Err(e) if e.kind() == ErrorKind::NotFound => { | |
69 | + copy(&upload_filename, &image_path).await.unwrap(); | |
70 | + | |
71 | + let img = ImageReader::open(&image_path).unwrap() | |
72 | + . with_guessed_format().unwrap() | |
73 | + . decode().unwrap(); | |
74 | + let (dim_x, dim_y) = img.dimensions(); | |
75 | + | |
76 | + image.dim_x = Some(dim_x as i32); | |
77 | + image.dim_y = Some(dim_y as i32); | |
78 | + }, | |
79 | + Err(e) => { | |
80 | + let e :Result<()> = Err(e); | |
81 | + e.unwrap(); | |
82 | + }, | |
83 | + Ok(_) => {}, | |
84 | + } | |
85 | + | |
86 | + remove_file(&upload_filename).await.unwrap(); | |
87 | + web::block(move || finalize(pool, image)).await.unwrap(); | |
88 | +} | |
89 | + | |
90 | +async fn read_at( f :&mut File | |
91 | + , pos :SeekFrom | |
92 | + , buf :&mut [u8]) -> std::io::Result<()> { | |
93 | + f.seek(pos).await?; | |
94 | + f.read_exact(buf).await | |
95 | +} | |
96 | + | |
97 | +async fn get_sample( f :&mut File | |
98 | + , buf :&mut [u8]) -> std::io::Result<()> { | |
99 | + let file_len = f.metadata().await?.len(); | |
100 | + let chunk_size = buf.len() / 3; | |
101 | + | |
102 | + read_at(f, SeekFrom::Start(0), &mut buf[0..chunk_size]).await?; | |
103 | + if file_len >= 2 * chunk_size as u64 { | |
104 | + read_at( f | |
105 | + , SeekFrom::End(-(chunk_size as i64)) | |
106 | + , &mut buf[2*chunk_size..]).await?; | |
107 | + } | |
108 | + if file_len >= 3 * chunk_size as u64 { | |
109 | + read_at( f | |
110 | + , SeekFrom::Start((file_len-chunk_size as u64) / 2) | |
111 | + , &mut buf[chunk_size..2*chunk_size]).await?; | |
112 | + } | |
113 | + | |
114 | + Ok(()) | |
115 | +} | ... | ... |
server/src/uuid.rs
0 → 100644
1 | +use std::{fmt::Display, convert::TryFrom}; | |
2 | + | |
3 | +use crate::error::Error; | |
4 | + | |
5 | +#[derive(Clone,Copy,Debug)] | |
6 | +pub struct Uuid(pub uuid::Uuid); | |
7 | + | |
8 | +impl Display for Uuid { | |
9 | + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { | |
10 | + write!(f, "{}", self.0) | |
11 | + } | |
12 | +} | |
13 | + | |
14 | +macro_rules! ns { | |
15 | + ($n:expr) => { | |
16 | + uuid::Uuid::new_v5(&uuid::Uuid::NAMESPACE_DNS, $n.as_bytes()) | |
17 | + } | |
18 | +} | |
19 | + | |
20 | +impl Uuid { | |
21 | + pub fn get(ns: &str, buf: &mut [u8]) -> Self { | |
22 | + Self(uuid::Uuid::new_v5(&ns!(ns), buf)) | |
23 | + } | |
24 | +} | |
25 | + | |
26 | +impl TryFrom<&[u8]> for Uuid { | |
27 | + type Error = Error; | |
28 | + | |
29 | + fn try_from(value: &[u8]) -> Result<Self, Self::Error> { | |
30 | + Ok(Self(uuid::Uuid::from_slice(value)?)) | |
31 | + } | |
32 | +} | ... | ... |
Please
register
or
login
to post a comment