Compare commits
145 Commits
githubacti
...
add-nohost
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
28851da83c | ||
|
|
aae1cb5147 | ||
|
|
af391fa9a3 | ||
|
|
1046e2faac | ||
|
|
589a4bb69d | ||
|
|
c4aa9735c8 | ||
|
|
e7dce43d4f | ||
|
|
44c16b8cc4 | ||
|
|
157c317d4c | ||
|
|
47ad75c831 | ||
|
|
59aa59e96e | ||
|
|
fd2eed86d2 | ||
|
|
0dca03f7d7 | ||
|
|
5863dda3fa | ||
|
|
c650b46fd4 | ||
|
|
cd0a9b711b | ||
|
|
1364740597 | ||
|
|
6d574aa91c | ||
|
|
f75255f809 | ||
|
|
e3684b779e | ||
|
|
05281a78e7 | ||
|
|
e7cca2a574 | ||
|
|
a2d0e64dd2 | ||
|
|
5240ba2e61 | ||
|
|
b8f11d2daf | ||
|
|
9e088d4166 | ||
|
|
ec244db92f | ||
|
|
b63347341b | ||
|
|
223a5c2d5f | ||
|
|
9c54140259 | ||
|
|
72816cfbf8 | ||
|
|
b5625f23a9 | ||
|
|
5984fab155 | ||
|
|
f6e4a0b669 | ||
|
|
c44ef3fcf3 | ||
|
|
1627a4859a | ||
|
|
12c97076f9 | ||
|
|
465ab2699b | ||
|
|
f932159488 | ||
|
|
deebe963d1 | ||
|
|
c87cc4c88c | ||
|
|
2e6bf83275 | ||
|
|
795791ed9a | ||
|
|
3f0801f1f3 | ||
|
|
0706d6fde9 | ||
|
|
58abab75fa | ||
|
|
07a7dddb8b | ||
|
|
6fbd3ce18b | ||
|
|
04c7f36d6e | ||
|
|
5af143ab8f | ||
|
|
b941c7faf4 | ||
|
|
b87d0b8ecd | ||
|
|
9f4f687010 | ||
|
|
d4132c4170 | ||
|
|
c2a84c4269 | ||
|
|
1c3e854f70 | ||
|
|
145122ee04 | ||
|
|
89471f11e9 | ||
|
|
f6841a42e9 | ||
|
|
6471d9f65e | ||
|
|
83e3ab39e0 | ||
|
|
82da27d7e9 | ||
|
|
a5c3dfdc24 | ||
|
|
37e7d7d518 | ||
|
|
6ee3eab209 | ||
|
|
89521c340e | ||
|
|
f17052b0e8 | ||
|
|
53690965e2 | ||
|
|
dee6bcf8de | ||
|
|
fbd720db74 | ||
|
|
21279f0dd0 | ||
|
|
55af49fc60 | ||
|
|
129bf382df | ||
|
|
d8c775eae2 | ||
|
|
59152063a9 | ||
|
|
32ccc676a1 | ||
|
|
374543d468 | ||
|
|
0a5dd968fa | ||
|
|
5f89271350 | ||
|
|
a0f523ec01 | ||
|
|
6d368c4a2e | ||
|
|
8e98e13922 | ||
|
|
d08fc7cbae | ||
|
|
e7dbac489e | ||
|
|
ed6ef407f4 | ||
|
|
0e846d919d | ||
|
|
d28f61562e | ||
|
|
23678aca1e | ||
|
|
a7e8038d5a | ||
|
|
69463881cf | ||
|
|
9b2af09faa | ||
|
|
f603218e16 | ||
|
|
ba35d13d19 | ||
|
|
a2efdda1f5 | ||
|
|
cbd362c3e6 | ||
|
|
93bdd71fd8 | ||
|
|
c3eb00ae80 | ||
|
|
d3ee6ba72f | ||
|
|
0bbefbd45e | ||
|
|
477ed5f848 | ||
|
|
3b3359cc9a | ||
|
|
c266a92be1 | ||
|
|
082ad182ea | ||
|
|
910498404c | ||
|
|
d78ec2094d | ||
|
|
da78c476ac | ||
|
|
3b219e9f9c | ||
|
|
8442cdc37b | ||
|
|
8f16f53b02 | ||
|
|
1c22e51168 | ||
|
|
1f3cf0e6db | ||
|
|
43f8d51184 | ||
|
|
3eeef86818 | ||
|
|
f474c040ee | ||
|
|
91992d015d | ||
|
|
e744e08d94 | ||
|
|
130cf23f20 | ||
|
|
06e34221ab | ||
|
|
0b4d7aa9de | ||
|
|
136a8b4fc7 | ||
|
|
01332d5e6d | ||
|
|
e40d083c43 | ||
|
|
fa96668946 | ||
|
|
0e9afaeb74 | ||
|
|
506c2a76a8 | ||
|
|
fd5fd97a58 | ||
|
|
371dca6a44 | ||
|
|
24869c3cba | ||
|
|
25d0b51b25 | ||
|
|
57d7276a9f | ||
|
|
789cfa894f | ||
|
|
ca4c13fae5 | ||
|
|
00e656c4bd | ||
|
|
c22a20473d | ||
|
|
41e0c20519 | ||
|
|
3af0e7180b | ||
|
|
5d4507831d | ||
|
|
afa256da3b | ||
|
|
b6dc646e56 | ||
|
|
653bc599ae | ||
|
|
db223ea026 | ||
|
|
afb3ac1ed2 | ||
|
|
0bc035fd63 | ||
|
|
e754e0485d | ||
|
|
d8ba638845 |
@@ -50,8 +50,8 @@ Now edit the metadata at the top of the file.
|
||||
- `description` - used as the meta description tag on the post-page. **required**
|
||||
- `date` - the "_published at_" date, shown on the [blog index page](https://blog.ipfs.io), please update at posting time to reflect current date - **required** (posts will not be displayed until this date on the live blog, but you will see them locally when using `make dev`)
|
||||
- `author` - used to give you credit for your words - **required**
|
||||
- `permalink` - the path to the blog post. Please start and end URLs with a `/` (`/my/url/`). **required**
|
||||
- `tags` - used to categorize the blog post
|
||||
- `permalink` - can be used to override the post URL if needed. Please start and end URLs with a `/` (`/my/url/`).
|
||||
- `header_image` - name of the image displayed on the [blog homepage](https://blog.ipfs.tech/). See [Custom header image](#custom-header-image) for more details.
|
||||
|
||||
#### Custom header image
|
||||
@@ -105,6 +105,12 @@ To build a local copy, run the following:
|
||||
npm start
|
||||
```
|
||||
|
||||
1. On the latest version of Node (>=18) you'll encounter `ERR_OSSL_EVP_UNSUPPORTED` errors. To fix this, either use Node 16 or:
|
||||
|
||||
```bash
|
||||
NODE_OPTIONS=--openssl-legacy-provider npm start
|
||||
```
|
||||
|
||||
1. Open [localhost:8080](http://localhost:8080) in your browser.
|
||||
|
||||
You can close the local server with `CTRL` + `c`. To restart the local server, run `npm start` from inside the `ipfs-blog` directory.
|
||||
|
||||
16
package-lock.json
generated
@@ -6364,9 +6364,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/caniuse-lite": {
|
||||
"version": "1.0.30001470",
|
||||
"resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001470.tgz",
|
||||
"integrity": "sha512-065uNwY6QtHCBOExzbV6m236DDhYCCtPmQUCoQtwkVqzud8v5QPidoMr6CoMkC2nfp6nksjttqWQRRh75LqUmA==",
|
||||
"version": "1.0.30001549",
|
||||
"resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001549.tgz",
|
||||
"integrity": "sha512-qRp48dPYSCYaP+KurZLhDYdVE+yEyht/3NlmcJgVQ2VMGt6JL36ndQ/7rgspdZsJuxDPFIo/OzBT2+GmIJ53BA==",
|
||||
"dev": true,
|
||||
"funding": [
|
||||
{
|
||||
@@ -6376,6 +6376,10 @@
|
||||
{
|
||||
"type": "tidelift",
|
||||
"url": "https://tidelift.com/funding/github/npm/caniuse-lite"
|
||||
},
|
||||
{
|
||||
"type": "github",
|
||||
"url": "https://github.com/sponsors/ai"
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -30067,9 +30071,9 @@
|
||||
}
|
||||
},
|
||||
"caniuse-lite": {
|
||||
"version": "1.0.30001470",
|
||||
"resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001470.tgz",
|
||||
"integrity": "sha512-065uNwY6QtHCBOExzbV6m236DDhYCCtPmQUCoQtwkVqzud8v5QPidoMr6CoMkC2nfp6nksjttqWQRRh75LqUmA==",
|
||||
"version": "1.0.30001549",
|
||||
"resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001549.tgz",
|
||||
"integrity": "sha512-qRp48dPYSCYaP+KurZLhDYdVE+yEyht/3NlmcJgVQ2VMGt6JL36ndQ/7rgspdZsJuxDPFIo/OzBT2+GmIJ53BA==",
|
||||
"dev": true
|
||||
},
|
||||
"caseless": {
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
>
|
||||
<div class="flex-shrink lg:max-w-sm xl:max-w-xl mb-4 lg:mb-0">
|
||||
<h2 class="type-h2">Stay informed</h2>
|
||||
<p class="mt-2 mr-2">
|
||||
<p class="mt-2 mb-6 mr-2">
|
||||
Sign up for the IPFS newsletter (<router-link
|
||||
:to="latestWeeklyPost ? latestWeeklyPost.path : ''"
|
||||
class="text-blueGreenLight hover:underline"
|
||||
@@ -13,69 +13,43 @@
|
||||
>) for the latest on releases, upcoming developments, community events,
|
||||
and more.
|
||||
</p>
|
||||
<a target="_blank" href="https://ipfs.fyi/newsletter">
|
||||
<button
|
||||
type="button"
|
||||
class="
|
||||
px-3
|
||||
py-2
|
||||
text-white
|
||||
bg-blueGreen
|
||||
font-semibold
|
||||
rounded
|
||||
hover:bg-blueGreenScreen
|
||||
transition
|
||||
duration-300
|
||||
"
|
||||
>
|
||||
Sign up
|
||||
</button>
|
||||
</a>
|
||||
</div>
|
||||
<form
|
||||
|
||||
<div
|
||||
id="mc-embedded-subscribe-form"
|
||||
name="mc-embedded-subscribe-form"
|
||||
class="flex lg:justify-end max-w-lg xl:w-2/5"
|
||||
action="https://ipfs.us4.list-manage.com/subscribe/post?u=25473244c7d18b897f5a1ff6b&id=cad54b2230"
|
||||
method="post"
|
||||
target="_blank"
|
||||
@submit="subscribeClick"
|
||||
>
|
||||
<div id="mc_embed_signup_scroll" class="grid gric-col-2 w-full">
|
||||
<div class="fields flex flex-col sm:flex-row col-start-1 col-span-2">
|
||||
<input
|
||||
id="mce-EMAIL"
|
||||
v-model="email"
|
||||
required
|
||||
type="email"
|
||||
aria-label="Email Address"
|
||||
class="flex-grow text-black p-2 rounded"
|
||||
placeholder="email@your.domain"
|
||||
name="EMAIL"
|
||||
/>
|
||||
<div class="sm:ml-4 sm:pt-0 pt-2">
|
||||
<input
|
||||
id="mc-embedded-subscribe"
|
||||
type="submit"
|
||||
value="Subscribe"
|
||||
name="subscribe"
|
||||
class="p-2 text-white font-semibold bg-blueGreen hover:bg-blueGreenScreen transition duration-300 rounded cursor-pointer w-full"
|
||||
/>
|
||||
</div>
|
||||
<div class="sm:ml-4 sm:pt-0"></div>
|
||||
</div>
|
||||
<label class="pt-2 col-start-1 col-span-2" for="gdpr_28879">
|
||||
<input
|
||||
id="gdpr_28879"
|
||||
type="checkbox"
|
||||
class=""
|
||||
required
|
||||
name="gdpr[28879]"
|
||||
value="Y"
|
||||
/><span class="pl-2">Please send me the newsletter</span>
|
||||
</label>
|
||||
</div>
|
||||
<div id="mergeRow-gdpr">
|
||||
<div style="position: absolute; left: -5000px" aria-hidden="true">
|
||||
<input
|
||||
type="text"
|
||||
name="b_25473244c7d18b897f5a1ff6b_cad54b2230"
|
||||
tabindex="-1"
|
||||
value=""
|
||||
/>
|
||||
</div>
|
||||
<!-- real people should not fill this in and expect good things - do not remove this or risk form bot signups-->
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<script>
|
||||
import { mapState } from 'vuex'
|
||||
|
||||
import countly from '../../util/countly'
|
||||
|
||||
export default {
|
||||
name: 'NewsletterForm',
|
||||
props: {},
|
||||
@@ -85,10 +59,6 @@ export default {
|
||||
computed: {
|
||||
...mapState('appState', ['latestWeeklyPost']),
|
||||
},
|
||||
methods: {
|
||||
subscribeClick() {
|
||||
countly.trackEvent(countly.events.NEWSLETTER_SUBSCRIBE)
|
||||
},
|
||||
},
|
||||
methods: {},
|
||||
}
|
||||
</script>
|
||||
|
||||
@@ -19,7 +19,14 @@
|
||||
:block-lazy-load="blockLazyLoad"
|
||||
/>
|
||||
<div
|
||||
class="grid-margins pt-8 grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 gap-8"
|
||||
class="
|
||||
grid-margins
|
||||
pt-8
|
||||
grid grid-cols-1
|
||||
md:grid-cols-2
|
||||
lg:grid-cols-3
|
||||
gap-8
|
||||
"
|
||||
itemscope
|
||||
itemtype="http://schema.org/Blog"
|
||||
>
|
||||
@@ -37,7 +44,17 @@
|
||||
class="flex justify-center mt-8 pb-4"
|
||||
>
|
||||
<button
|
||||
class="px-3 py-2 text-white text-xl bg-blueGreen font-semibold rounded hover:bg-blueGreenScreen transition duration-300"
|
||||
class="
|
||||
px-3
|
||||
py-2
|
||||
text-white text-xl
|
||||
bg-blueGreen
|
||||
font-semibold
|
||||
rounded
|
||||
hover:bg-blueGreenScreen
|
||||
transition
|
||||
duration-300
|
||||
"
|
||||
@click="handleLoadMoreClick"
|
||||
>
|
||||
Load More
|
||||
@@ -327,7 +344,7 @@ export default {
|
||||
(item) =>
|
||||
item.frontmatter &&
|
||||
item.frontmatter.tags &&
|
||||
item.frontmatter.tags.find((tag) => tag.name === 'weekly')
|
||||
item.frontmatter.tags.find((tag) => tag.name === 'newsletter')
|
||||
)
|
||||
.sort(
|
||||
(a, b) => new Date(b.frontmatter.date) - new Date(a.frontmatter.date)
|
||||
|
||||
@@ -8,3 +8,7 @@
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.type-rich {
|
||||
@apply text-18
|
||||
}
|
||||
@@ -79,7 +79,7 @@ We have two consecutive goals regarding Wikipedia on IPFS: Our first goal is to
|
||||
|
||||
The easy way to get Wikipedia content on IPFS is to periodically -- say every week -- take snapshots of all the content and add it to IPFS. That way the majority of Wikipedia users -- who only read Wikipedia and don’t edit -- could use all the information on Wikipedia with all the benefits of IPFS. Users couldn't edit it, but users could download and archive swaths of articles, or even the whole thing. People could serve it to each other peer-to-peer, reducing the bandwidth load on Wikipedia servers. People could even distribute it to each other in closed, censored, or resource-constrained networks -- with IPFS, peers do not need to be connected to the original source of the content, being connected to anyone who has the content is enough. Effectively, the content can jump from computer to computer in a peer-to-peer way, and avoid having to connect to the content source or even the internet backbone. We've been in discussions with many groups about the potential of this kind of thing, and how it could help billions of people around the world to access information better -- either free of censorship, or circumventing serious bandwidth or latency constraints.
|
||||
|
||||
So far, we have achieved part of this goal: we have static snapshots of all of Wikipedia on IPFS. This is already a huge result that will help people access, keep, archive, cite, and distribute lots of content. In particular, we hope that this distribution helps people in Turkey, who find themselves in a tough situation. We are still working out a process to continue updating these snapshots, we hope to have someone at Wikimedia in the loop as they are the authoritative source of the content. **If you could help with this, please get in touch with us at wikipedia-project@ipfs.io.**
|
||||
So far, we have achieved part of this goal: we have static snapshots of all of Wikipedia on IPFS. This is already a huge result that will help people access, keep, archive, cite, and distribute lots of content. In particular, we hope that this distribution helps people in Turkey, who find themselves in a tough situation. We are still working out a process to continue updating these snapshots, we hope to have someone at Wikimedia in the loop as they are the authoritative source of the content. **If you could help with this, please get in touch with us at `wikipedia-project [AT] ipfs.io`**
|
||||
|
||||
### (Goal 2) Fully Read-Write Wikipedia on IPFS
|
||||
|
||||
@@ -193,4 +193,4 @@ If people start relying on this information over time, it will be important to e
|
||||
* Turkish Wikipedia (most recent snapshot): [/ipns/tr.wikipedia-on-ipfs.org/](https://ipfs.io/ipns/tr.wikipedia-on-ipfs.org/)
|
||||
* If people start relying on this information, we will encourage Wikimedia to take over generating these snapshots
|
||||
* We are encouraging Wikimedia to publish DNSLink or even IPNS record that is always up to date AND is cryptographically signed by Wikimedia
|
||||
* If you want to mirror the data, run an ipfs node and pin the Wikipedia data onto your node
|
||||
* If you want to mirror the data, run an ipfs node and pin the Wikipedia data onto your node
|
||||
|
||||
130
src/_blog/2023-11-introducing-nabu.md
Normal file
@@ -0,0 +1,130 @@
|
||||
---
|
||||
title: 'Introducing Nabu: Unleashing IPFS on the JVM'
|
||||
description: 'Learn about a new fast IPFS implementation in Java'
|
||||
author: Ian Preston
|
||||
date: 2023-11-07
|
||||
permalink: '/2023-11-introducing-nabu/'
|
||||
header_image: '/nabu-banner-2023.png'
|
||||
tags:
|
||||
- 'ipfs'
|
||||
- 'nabu'
|
||||
- 'bitswap'
|
||||
---
|
||||
|
||||
Greetings from the [Peergos](https://peergos.org) team! We are thrilled to unveil what we've been working on this year: [Nabu](https://github.com/peergos/nabu) – our sleek and versatile Java implementation of IPFS. Named after the ancient Mesopotamian god of literacy, rational arts, and wisdom, Nabu makes decentralised data storage and retrieval available to the large JVM ecosystem. It's now *production ready*, as we are using it in Peeergos - a decentralised, secure file storage, sharing and social network.
|
||||
|
||||
## Introducing Nabu: Empowering Java with IPFS Magic
|
||||
At its core, Nabu is a minimal IPFS implementation for storing and retrieving data blocks over the libp2p protocol. But we didn't stop there – we've also added a touch of innovation with features like authed bitswap. This addition enables the creation of private data blocks, accessible only to those with authorized permissions. Intrigued? Dive into the finer details of this innovation in our dedicated post on [authed bitswap](https://peergos.org/posts/bats).
|
||||
|
||||
Our journey in crafting Nabu involved the implementation of additional libp2p protocols, including:
|
||||
|
||||
* Kademlia (including IPNS): The very backbone of IPFS, aiding in the discovery of blocks and their owners.
|
||||
* Bitswap + Auth Extension: A protocol that facilitates the exchange of data blocks.
|
||||
|
||||
We built upon the solid foundation of [jvm-libp2p](https://github.com/libp2p/jvm-libp2p). As we delved deeper, we realized the need to implement several crucial components. These include the [yamux muxer](https://github.com/libp2p/jvm-libp2p/tree/develop/libp2p/src/main/kotlin/io/libp2p/mux/yamux), the [TLS security provider](https://github.com/libp2p/jvm-libp2p/blob/develop/libp2p/src/main/kotlin/io/libp2p/security/tls/TLSSecureChannel.kt) (complete with ALPN early muxer negotiation), and a substantial portion of a quic transport (still a work in progress). While much of this effort started in a fork, we collaborated with [Consensys](https://consensys.io) to upstream our contributions into the main project which has now released [v1.0.0](https://github.com/libp2p/jvm-libp2p/releases/tag/1.0.0) as a result. This is used in [Teku](https://github.com/ConsenSys/teku), a Java Ethereum 2 implementation.
|
||||
|
||||
[<img src="../assets/nabu/modules.png" width="500" height="300"/>](../assets/nabu/modules.png)
|
||||
|
||||
Nabu's API empowers developers with the following methods:
|
||||
* [id](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-id)
|
||||
* [version](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-version)
|
||||
* [block/get](https://github.com/Peergos/nabu/blob/master/src/main/java/org/peergos/BlockService.java#L12)
|
||||
* [block/put](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-block-put)
|
||||
* [block/rm](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-block-rm)
|
||||
* [block/stat](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-block-stat)
|
||||
* [block/has](https://github.com/Peergos/nabu/blob/master/src/main/java/org/peergos/blockstore/Blockstore.java#L25)
|
||||
* [refs/local](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-refs-local)
|
||||
* [bloom/add](https://github.com/Peergos/nabu/blob/master/src/main/java/org/peergos/blockstore/Blockstore.java#L37)
|
||||
* [dht/findprovs](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-dht-findprovs)
|
||||
|
||||
Most of these functions align with [Kubo](https://github.com/ipfs/kubo), but we've added block/has, which is a much more efficient way to ask if we have a block or not, as well as bloom/add which is useful if you are adding blocks to the blockstore externally (typically with multiple servers and S3 blockstore, and using a bloom filter). In addition we've added a few extra optional parameters to block/get, which you'll hear more about in the Performance section below.
|
||||
|
||||
## Unique Features
|
||||
|
||||
Nabu boasts some distinctive features that simplify building on IPFS:
|
||||
|
||||
* P2P HTTP Proxy: This feature facilitates HTTP requests to listening peers, encrypting communication over libp2p streams. Bid farewell to the complexities of TLS certificate authorities and DNS.
|
||||
* Built-in S3 Blockstore: Seamlessly integrate external blockstores like S3.
|
||||
* [Infini-Filter](https://dl.acm.org/doi/10.1145/3589285): A bloom filter replacement that offers infinite expandability.
|
||||
* Peer-Specific Block Retrieval: Nabu empowers developers to fetch blocks from specific peers, streamlining data retrieval and improving privacy (See Performance section below).
|
||||
|
||||
Let's shed some light on the first of these gems – the P2P HTTP proxy. A component we initially implemented in [Kubo in 2018](https://peergos.org/posts/dev-update#Decentralization) (behind an experimental flag), this feature introduces a new gateway endpoint with paths in the format:
|
||||
|
||||
**/p2p/$peerid/http/**
|
||||
|
||||
Its function is simple yet transformative: it proxies incoming HTTP requests to the specified $peerid while trimming the preceding "/p2p/$peerid/http" path. On the other end, the setup forwards incoming requests to a designated endpoint. This paradigm grants the convenience of traditional HTTP-based architecture, sans the complexities of DNS and TLS certificate authorities. By addressing the node using its public key, secure connections become effortlessly achievable. The diagram below illustrates how we use this proxy in Peergos.
|
||||
|
||||
[<img src="../assets/nabu/p2p-http-proxy.png" width="640" height="340"/>](../assets/nabu/p2p-http-proxy.png)
|
||||
|
||||
For a simpler example of using this, see our single file demo [chat app](https://github.com/Peergos/nabu-chat/blob/main/src/main/java/chat/Chat.java).
|
||||
|
||||
## Performance
|
||||
### Faster, more private block retrieval
|
||||
Drawing from experience, we recognized the inefficiency of requesting every single block from the DHT or connected peers. This practice leads to excessive bandwidth consumption and sluggish content retrieval. Enter our solution: a new optional parameter,"peers" in block/get allowing retrieval from pre-specified peer IDs. In cases of unreachability, a DHT lookup through dht/findprovs api serves as a fallback option. This design of taking a set of peerids that you want to ask for blocks, encourages users to design their programs to route at a higher level than blocks, improving speed, bandwidth usage and privacy. Many apps will know which peers they want to retrieve data from in advance, and with this parameter they can massively reduce bandwidth and speed up retrieval. The motto here is "Route your peers, not your data". In Peergos, for example, given a capability to a file we can lookup the file owner's home server (specifically its peer id) and directly send bitswap requests there, so we only need to fallback to DHT lookups if their home server is unreachable.
|
||||
|
||||
### Reduced bandwidth and CPU usage
|
||||
We believe that *providing* (announcing to the DHT that you have a given cid) every single block of data you have does not scale, This is because the number of DHT lookups and provide calls increases with the amount of data you are storing. The issues trying to scale this have been [documented](https://blog.ipfs.tech/2023-09-amino-refactoring/#making-reprovides-to-amino-lightning-fast). Compare this to bittorrent, which has been around much longer and has a much larger DHT, but where providing doesn't scale with the amount of data in a torrent and idle bandwidth usage is much lower. For this reason, we've made providing blocks in Nabu optional, and disabled it in Peergos (unless you are running a mirror).
|
||||
|
||||
This leads us to the next optimisation, enabled by only sending block requests to peers we think have the data. In Kubo, bitswap will broadcast block wants to all connected peers (typically in the 100s). This is both a privacy issue and a bandwidth hog as it means joining the main IPFS DHT is very resource intensive. Nabu has an option to block such aggressive peers that flood us with requests for blocks we don't have. With this option enabled, the incoming idle bandwidth usage is reduced by 10X.
|
||||
|
||||
### Benchmark
|
||||
We benchmarked Nabu against a real-world dataset – the Peergos PKI – consisting of a [CHAMP](https://blog.acolyer.org/2015/11/27/hamt/) structure with six layers, 6000 blocks, and a total size of ~2 MiB. The results speak volumes: while standard Kubo took 120 seconds to retrieve this dataset using the pin command, Nabu accomplished the task in a mere 5 seconds. And, this was achieved without any significant optimization or parallelisation, leaving much room for further enhancement.
|
||||
|
||||
[<img src="../assets/nabu/nabu-speed.png" width="604" height="340"/>](../assets/nabu/nabu-speed.png)
|
||||
|
||||
## Compatibility
|
||||
|
||||
Ensuring seamless integration, we subjected Nabu to a suite of interoperability tests against all libp2p implementations, including go-libp2p, rust-libp2p, js-libp2p, and nim-libp2p across historical versions. The results of these tests are documented [here](https://github.com/libp2p/test-plans/actions/runs/5671451848/attempts/1#summary-15368587233). Some of the results are below.
|
||||
|
||||
[<img src="../assets/nabu/nabu-interop.png" width="808" height="340"/>](../assets/nabu/nabu-interop.png)
|
||||
|
||||
## Bringing Nabu to Life: Integration and Usage
|
||||
Getting started with Nabu is simple. Choose between utilizing it through the HTTP API or embedding it directly into your process. Here's a compilable example of the embedding process in Java:
|
||||
```java
|
||||
List<MultiAddress> swarmAddresses = List.of(new MultiAddress("/ip6/::/tcp/4001"));
|
||||
List<MultiAddress> bootstrapAddresses = List.of(new MultiAddress("/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa"));
|
||||
BlockRequestAuthoriser authoriser = (cid, block, peerid, auth) -> CompletableFuture.completedFuture(true);
|
||||
HostBuilder builder = new HostBuilder().generateIdentity();
|
||||
PrivKey privKey = builder.getPrivateKey();
|
||||
PeerId peerId = builder.getPeerId();
|
||||
IdentitySection identity = new IdentitySection(privKey.bytes(), peerId);
|
||||
boolean provideBlocks = true;
|
||||
|
||||
SocketAddress httpTarget = new InetSocketAddress("localhost", 10000);
|
||||
Optional<HttpProtocol.HttpRequestProcessor> httpProxyTarget =
|
||||
Optional.of((s, req, h) -> HttpProtocol.proxyRequest(req, httpTarget, h));
|
||||
|
||||
EmbeddedIpfs ipfs = EmbeddedIpfs.build(new RamRecordStore(),
|
||||
new FileBlockstore(Path.of("/home/alice/ipfs")),
|
||||
provideBlocks,
|
||||
swarmAddresses,
|
||||
bootstrapAddresses,
|
||||
identity,
|
||||
authoriser,
|
||||
httpProxyTarget
|
||||
);
|
||||
ipfs.start();
|
||||
|
||||
List<Want> wants = List.of(new Want(Cid.decode("zdpuAwfJrGYtiGFDcSV3rDpaUrqCtQZRxMjdC6Eq9PNqLqTGg")));
|
||||
Set<PeerId> retrieveFrom = Set.of(PeerId.fromBase58("QmVdFZgHnEgcedCS2G2ZNiEN59LuVrnRm7z3yXtEBv2XiF"));
|
||||
boolean addToLocal = true;
|
||||
List<HashedBlock> blocks = ipfs.getBlocks(wants, retrieveFrom, addToLocal);
|
||||
byte[] data = blocks.get(0).block;
|
||||
```
|
||||
|
||||
If you want a working example app you can fork, have a look at our [chat example](https://github.com/Peergos/nabu-chat). This is a simple CLI app where two users exchange peerid (out of band) and then connect and send messages via p2p http requests, which are printed to the console.
|
||||
|
||||
## Future plans
|
||||
We still have lots planned for Nabu including the following:
|
||||
* NAT traversal with circuit-relay-v2, dcutr and AutoRelay
|
||||
* mDNS peer discovery
|
||||
* Android compatibility and demo app
|
||||
* Quic integration
|
||||
|
||||
## Gratitude and Acknowledgments
|
||||
None of this would have been possible without the support of the [IPFS Implementations Fund](https://arcological.xyz/#ipfs-pool). We extend our heartfelt thanks for making this endeavor a reality.
|
||||
|
||||
## Experience Nabu Today!
|
||||
We invite you to embark on an exploration of Nabu's capabilities. Feel free to give it a whirl, and we eagerly await your feedback and suggestions for improving Nabu. The easiest route is to open an issue on the github repo.
|
||||
|
||||
[Discover Nabu on GitHub](https://github.com/peergos/nabu) and unlock a world of decentralized possibilities.
|
||||
42
src/_blog/2023-11_connect-with-us-in-istanbul-and-prague.md
Normal file
@@ -0,0 +1,42 @@
|
||||
---
|
||||
title: 'Connect with us in Istanbul and Prague'
|
||||
description: 'Connect with the PL IPFS Implementers in Istanbul and Prague for DevConnect and DCxPrague! We want to hear from IPFS users to shape our 2024 plans.'
|
||||
author: Cameron Wood
|
||||
date: 2023-11-06
|
||||
permalink: '/2023-11-connect-in-istanbul-and-prauge'
|
||||
header_image: ''
|
||||
tags:
|
||||
- 'ipfs'
|
||||
- 'kubo'
|
||||
- 'helia'
|
||||
- 'event'
|
||||
- 'community'
|
||||
---
|
||||
|
||||
Hello, IPFS enthusiasts and users! We want to connect with you and hear your thoughts as we shape the future of IPFS for 2024. Your input is invaluable in guiding our efforts, so we're inviting you to meet with us in Istanbul and Prague at two exciting events: DevConnect / IPFS Connect in Istanbul 🇹🇷 and DCxPrague in Prague 🇨🇿.
|
||||
|
||||
|
||||
## 🌐 Who We Are: The PL IPFS Implementers and Network Infrastructure Operators
|
||||
|
||||
We are the PL IPFS implementers and network infrastructure operators, working on projects like Kubo, Helia, and managing the IPFS.io gateway. Our goal is to create a better IPFS ecosystem, and your insights are a crucial part of this journey.
|
||||
|
||||
|
||||
## 👋 We Want to Hear from You
|
||||
|
||||
Your input matters! We would be thrilled to connect with as many of our current and prospective users as possible during these upcoming events. Your thoughts and experiences will help us understand your needs and use cases, ultimately guiding our plans for 2024.
|
||||
|
||||
|
||||
## 👂 We're Eager to Listen
|
||||
|
||||
Are you planning to attend any of these events? If so, we would love to connect with you and learn more about your experiences with IPFS. Whether you have feedback, insights, or simply want to share your thoughts, we're all ears. Your feedback will help us figure out how to make the most of our time and resources for the IPFS community.
|
||||
|
||||
|
||||
## ❓ How Can You Get Involved?
|
||||
|
||||
If you're interested in sharing your thoughts and connecting with us during these events, please fill out [this form](https://forms.gle/CxUQPsEUg2CGkLgh6). We're eager to schedule time to meet with you to discuss your current IPFS challenges, needs, and hopes.
|
||||
|
||||
<br />
|
||||
<a href="https://forms.gle/CxUQPsEUg2CGkLgh6" class="cta-button"> Fill out this form to connect</a>
|
||||
|
||||
|
||||
🙏 Thank You!
|
||||
95
src/_blog/2023-12-introducing-dappling.md
Normal file
@@ -0,0 +1,95 @@
|
||||
---
|
||||
title: dAppling - a New Way to Deploy IPFS Sites in Minutes
|
||||
description: Introducing a seamless way to launch your code on IPFS, featuring straightforward setup, automatic deployments, and more.
|
||||
author: 🙏 namaskar
|
||||
date: 2023-11-28
|
||||
permalink: '/2023-11-dappling/'
|
||||
header_image: '/2023-12-introducing-dappling-header.png'
|
||||
tags:
|
||||
- 'web3'
|
||||
- 'tooling'
|
||||
- 'ipns'
|
||||
---
|
||||
|
||||
Welcome! I would love to share what I'm building at [dAppling](https://dappling.network), a platform that aims to simplify the build and deployment process for sites hosted on IPFS. I'll share a bit about us, a bit about the platform, and a bit about what you will get. By the end, it should be clear if [dAppling](https://dappling.network) is a tool you'll want to add to your developer toolbox.
|
||||
|
||||
## A Bit about Us
|
||||
|
||||
I'm Kyle. My co-founder Russell and I have been professional developers (whatever that means) for the last 7 years. We've worked at startups, big tech, and things in between. The last 2 of those years has been in the web3 space; started with the creation of a DeFi protocol. We're excited to now be building tools for developers working on the next generation of the web.
|
||||
|
||||
## A Bit about dAppling
|
||||
|
||||
The first of those tools is dAppling. The word is a portmanteau of "dApp", a term short for decentralized application, and "sapling," because nature is wonderful 🌱. However, we support all kinds of web projects, not just [dApps](https://app.gogopool.com.dappling.eth.limo/): [landing pages](https://arbor-landing.dappling.eth.limo/), [blogs](https://blog.dappling.network), or even a simple page of content arguing against the [usage of acronyms](https://nomoreacronyms-0u5spi.dappling.network).
|
||||
|
||||
Basically, we fetch your code, build it into html/css/js files, and host those files on IPFS. What makes us special are the features we provide to make your experience easier. Even if you have an existing site, you can use [dAppling](https://dappling.network) to create a resilient "alternative frontend" that is hosted on IPFS.
|
||||
|
||||
## A Bit about What You Get
|
||||
|
||||
When you add a project to [dAppling](https://dappling.network), you will tell us where the code is and what commands to use. After it's built you will get:
|
||||
|
||||
- automatic updates when your code on **GitHub** changes
|
||||
- hosting on the **InterPlanetary File System** (IPFS)
|
||||
- a working **dappling.network** subdomain
|
||||
- a working **dappling.eth** ENS subdomain
|
||||
- an automatically updating **IPNS** key
|
||||
|
||||
## Our Focuses
|
||||
|
||||
We have two major focuses at [dAppling](https://dappling.network): **simplicity** and **access**.
|
||||
|
||||
We want to make it as easy as possible to get your code hosted. After that, we want it to be accessible and fast. What we want to avoid is a first-time experience where you only see an error screen or have your users waiting forever to load your site.
|
||||
|
||||
### Simplicity
|
||||
|
||||
We simplify the setup process by automatically detecting your app's configuration. If something does go wrong, we have easy to use debugging tools.
|
||||
|
||||
#### Simple Setup
|
||||
|
||||
Since we have access to your code, we look at a few things like what package manager you use, what sort of framework the project is built with, and certain configuration files. We use this information to prefill the configuration form, so you don't have to.
|
||||
|
||||
We have support for environment variables to use during the build process that can be used to configure things like your database URL. Additionally, we support monorepos.
|
||||
|
||||

|
||||
|
||||
#### Simple Debugging
|
||||
|
||||
Try as we might, projects fail to build. Quite a bit! From a linting error to a missing dependency, seeing the error screen seems inevitable. We want to make it as easy as possible to understand what went wrong and how to fix it. We parse the logs and show you the error in, what I think, is a pretty readable format.
|
||||
|
||||

|
||||
|
||||
If reading logs isn't your thing, we have a button that sends your logs to be parsed by AI and returns a summary of the error. And while it's not perfect, the output has been helpful more often than not.
|
||||
|
||||
### Accessibility
|
||||
|
||||
Websites need to be accessed, even if the reader is only you! We think the more points of access the better, and each should be available and fast.
|
||||
|
||||
#### Speed of Access
|
||||
|
||||
The foundation of our storage starts with [Filebase](https://filebase.com/) whose geo-redundant storage locations keep your files available. On top of that, the CDN quickly fetches and caches those files.
|
||||
|
||||
#### Points of Access
|
||||
|
||||
There are a couple of ways to access your site. When the code is built and uploaded to IPFS, you will receive what is called a [Content Identifier (CID)](https://docs.ipfs.tech/concepts/content-addressing/). It's basically the hash of all your files.
|
||||
|
||||
You will receive a new CID every time your site is re-built because the resulting files have changed. Luckily, we use the [InterPlanetary Name System (IPNS)](https://docs.ipfs.tech/concepts/ipns/) to create a key that will always point to the most recent CID.
|
||||
|
||||
So the most straightforward way to fetch your content would be directly from an [IPFS node](https://docs.ipfs.tech/concepts/nodes/). Since not everyone is running an IPFS node (yet), you can instead use an [IPFS gateway](https://docs.ipfs.tech/concepts/ipfs-gateway/) in which a third party fetches the content from their node and serves it over HTTPS.
|
||||
|
||||
Since we store the on our `dappling.eth` ENS name, you can also fetch the content through a service like [eth.limo](https://eth.limo). This service first reads the IPNS key that we set, resolves it to a CID, and then serves the content like a gateway.
|
||||
|
||||
Even simpler would be using the existing DNS system either using our custom `*.dappling.network` subdomain that we created for you. We also allow adding your custom domain like `ipfs.crypto-protocol.app`.
|
||||
|
||||
## Future
|
||||
|
||||
We plan to be constantly upgrading the platform as new decentralization techniques appear. As a user, you will notice more points of access, quicker speeds, and features to make usage easier. We hope to increase decentralization
|
||||
|
||||
- SSR: Serverless applications are popular on platforms like Next.js and we will be using decentralized compute to increase the types of applications we support.
|
||||
- Collaboration: The more participants in a project the better the decentralizaton becomes. We are working on tools to allow multiple people configure the project.
|
||||
|
||||
## Get Involved
|
||||
|
||||
As we continue to improve [dAppling](https://dappling.network), we're always looking for user feedback to guide us. Our focus remains on providing a platform that is not just decentralized but also highly performant and user-friendly.
|
||||
|
||||
[Deploy a site](https://dappling.network), and if you run into **any** problems, want to connect, or just say hi, my DMs are open on [𝕏](https://x.com/0xBookland). I would love to hear about what you're building and help you get all of your projects deployed as we transition to the infrastructure of the future.
|
||||
|
||||
🙏
|
||||
85
src/_blog/2023-ipfs-companion-MV3-update.md
Normal file
@@ -0,0 +1,85 @@
|
||||
---
|
||||
title: 'IPFS Companion MV3 Update'
|
||||
description: 'A quick update on the status of IPFS Companion in the MV3 world.'
|
||||
author: Whizzzkid
|
||||
date: 2023-12-05
|
||||
permalink: '/2023-ipfs-companion-mv3-update/'
|
||||
header_image: '/ipfs-companion-mv3-banner.png'
|
||||
tags:
|
||||
- 'ipfs'
|
||||
- 'chrome extension'
|
||||
- 'firefox extension'
|
||||
- 'mv3'
|
||||
- 'web-extension'
|
||||
- 'ipfs-companion'
|
||||
---
|
||||
|
||||
[IPFS companion](https://docs.ipfs.tech/install/ipfs-companion/#install) is a browser extension, one of the key tools that enhances the IPFS experience in the browser. It is available for [Firefox](https://addons.mozilla.org/en-US/firefox/addon/ipfs-companion/) and [Chrome/Brave/Opera/Edge](https://chrome.google.com/webstore/detail/ipfs-companion/nibjojkomfdiaoajekhjakgkdhaomnch) (and all other Chromium-based browsers) and is used by thousands of people every day.
|
||||
|
||||
In September, IPFS-Companion built on MV3 (Manifest V3) was shipped on the main channel, which brings exciting improvements and changes the way you interact with this powerful tool. This blog post will give you a quick overview of the journey, changes, and what to expect.
|
||||
|
||||
## What is MV3?
|
||||
|
||||
MV3, or Manifest V3, is the latest iteration of the manifest file format used by browser extensions. MV3 introduces several key changes compared to the previous MV2, such as the adoption of a service worker model for background scripts, increased permissions granularity, a few new APIs like declarativeNetRequest, and deprecation in the behavior of a few APIs like webRequest and their blocking nature on intercepted requests.
|
||||
|
||||
Both [Chrome](https://developer.chrome.com/docs/extensions/mv3/intro/mv3-overview/) and [Firefox](https://extensionworkshop.com/documentation/develop/manifest-v3-migration-guide/) have published documentation on the changes and how to migrate your extension to MV3, but both of them are pretty recent and are still evolving. They also don't seem to agree on how the background scripts should behave or the `host_permissions` should be handled, which makes it even more challenging to build a cross-browser extension.
|
||||
|
||||
Chrome's changes have been much more invasive, as they remove support for blocking `webRequest` API, push for the use of background service workers, and the adoption of the `declarativeNetRequest` API. Firefox, on the other hand, has been more conservative and has been trying to keep the extension ecosystem as close to MV2 as possible, with the exception of the `host_permissions` change.
|
||||
|
||||
While we are making sure that the extension continues to work on Firefox without regressions, Chromium-based browsers make up ~90% of the IPFS-Companion user base, which makes Chrome's implementation of MV3 the lowest common denominator that informs our design decisions and feature set.
|
||||
|
||||
## What's the fuss around `declarativeNetRequest` API?
|
||||
|
||||
When MV3 changes got announced, there was [uproar in the community](https://arstechnica.com/gadgets/2022/09/chromes-new-ad-blocker-limiting-extension-platform-will-launch-in-2023/) that the `webRequest` API was going to be deprecated and replaced by `declarativeNetRequest`. The main reason for this was that the `declarativeNetRequest` API is not as powerful as the `webRequest` API, and it doesn't allow extensions to block requests. Instead, it allows the extension to add and update a limited number of rules per-extension. This was promoted as a way to improve performance and privacy, as the browser would be able to block requests without having to load the extension's code. However, this also meant that the extension would not be able to intercept the requests, and extensions would have to rely on the browser to do that, and for a limited number of domains.
|
||||
|
||||
In practice, there are no privacy or security benefits of `declarativeNetRequest`, and the old behaviour (required by IPFS Companion) can be reimplemented with extra steps. Even though the MV3 extension cannot intercept and block a request in-flight, it can still observe all HTTP requests without blocking them and work around rule count limit by adding or updating dynamic rules on the fly. When a matching request is found by read-only observer, extension can programmatically reload document to force fresh page load against updated dynamic rules. In other words, the MV3 version of Companion extension can emulate the behaviour from MV2:
|
||||
|
||||

|
||||
|
||||
This type of additional complexity is necessary in MV3 world if a genuine extension like IPFS-Companion wants to intercept requests to a given IPFS resource and redirect those to be loaded via the local gateway. This is a key feature of the extension, as it allows users to access the content via the local gateway instead of delegating trust to the public HTTP gateway, which is a centralized service.
|
||||
|
||||
## The Plan
|
||||
|
||||
The discussions around this topic started soon after the announcement of MV3. There are many scenarios and information in [Issue #666](https://github.com/ipfs/ipfs-companion/issues/666). The first step was to prototype a rudimentary version of the extension using the MV3 APIs and see if the MV3 version could achieve comparable functionality. That work was done in [PR #1078](https://github.com/ipfs/ipfs-companion/pull/1078) by [@MeanDaveJustice](https://github.com/meandavejustice), which helped a lot in understanding the challenges.
|
||||
|
||||
Based on both of these a detailed migration plan was laid out in [Issue #1152](https://github.com/ipfs/ipfs-companion/issues/1152), a few important points from the plan were:
|
||||
|
||||
- Implementing request manipulation logic in the browser to support both Chromium and Firefox. The extension will need to identify capabilities of the browser runtime and use the appropriate logic.
|
||||
- Patching packages that now need to account to the new `ServiceWorker` scope. Packages like [`debug`](https://www.npmjs.com/package/debug) and [`countly-sdk-web`](https://www.npmjs.com/package/countly-sdk-web) rely on `window`, `localStorage`, `XMLHttpRequest`, etc which are not available in the service worker scope.
|
||||
- Implementing a collector branch to collect all changes, because the transition in this case could not be done incrementally and instead had to be done in one go. This meant that the collector branch would have to be maintained for a while until the migration was complete. In the meanwhile the `main` branch was still being used to ship security and bug fixes.
|
||||
- Migrating all of the existing battery of tests that used to test various scenarios collected over the years in the MV2 world, over to the MV3 world. This was a huge task and took a lot of time and effort. The tests had to be refactored as such that it would work for browsers that supported request blocking (Firefox) and those that didn't (Chromium).
|
||||
- Implement improved metrics collection to understand IPFS users, by understanding the number of IPFS resources resolved by Companion running in the browser.
|
||||
|
||||
## The migration
|
||||
|
||||
The migration was done in multiple steps:
|
||||
|
||||
- The first step was to implement the [standard checklist](https://github.com/ipfs/ipfs-companion/pull/1170) which outlined the known breaking changes in MV3 and how to fix those.
|
||||
- A parallel step was to implement a [collector branch](https://github.com/ipfs/ipfs-companion/pull/1182) and [build-pipeline](https://github.com/ipfs/ipfs-companion/pull/1183) to go with it.
|
||||
- The next step was to implement the replacement APIs in the `ServiceWorker` context, e.g. [`XMLHttpRequest` migration](https://github.com/ipfs/ipfs-companion/pull/1179)
|
||||
- This was soon followed by the [first iteration](https://github.com/ipfs/ipfs-companion/pull/1181) of blocking by observing logic.
|
||||
- Which allowed for the publication of the [first RC](https://github.com/ipfs/ipfs-companion/pull/1192) and a corresponding announcement on the [discuss forum](https://discuss.ipfs.tech/t/announcing-ipfs-companion-mv3-rc-beta/16442).
|
||||
- This was followed by a series of bug fixes which is a list item in the original [migration plan](https://github.com/ipfs/ipfs-companion/issues/1152).
|
||||
- It was also decided that it would be the right time to remove the experimental embedded `js-ipfs` backend, as it was not useful due to the lack of gateway functionality in extension context, and `js-ipfs` itself being superseded by [`helia`](https://helia.io). This was done in [PR #1225](https://github.com/ipfs/ipfs-companion/pull/1225).
|
||||
- Apart from the plethora of UX regression fixes around context menus (because MV3 changed how context menus were handled), timing issues between observing a request, and actually adding a rule to block those, the most important PR was the test migration which affirmed that the solution handled all the scenarios covered in the MV2 world. This was done in [PR #1236](https://github.com/ipfs/ipfs-companion/pull/1236)
|
||||
- One of the last fixes was to add an additional permission check for requesting [`host_permissions`](https://github.com/ipfs/ipfs-companion/pull/1250) on Firefox which allowed the actual blocking of requests on Firefox.
|
||||
|
||||
## The learnings
|
||||
|
||||
It took more than 6 months to plan, implement, and test the migration. Over 40 PRs were merged and more than 18k lines of code were touched. The migration was a huge effort, and it was only possible because of the amazing community that helped in testing and reporting issues. The migration also highlighted a few key learnings:
|
||||
|
||||
- The world of web browsers is constantly evolving, and it is important to keep up with the changes. This is especially true for extensions, as they are the first to be impacted by these changes. It is important to keep an eye on the changes and plan ahead. The browser vendors also don't seem to agree on how to handle the changes, which makes it even more challenging to build a cross-browser extension.
|
||||
- The MV3 changes will be a huge effort for any extension that relies on observing, intercepting, or blocking a user request... was this needed? Probably not, as the `declarativeNetRequest` API is not as different from the `webRequest` API as it was made out to be. A motivated entity can still implement comparable functionality but that now involves more steps. However, it is important to note that the `ServiceWorker` based background scripts are potentially a huge improvement especially on low-end devices as it allows extension to `sleep` when not in use. This may not be true in every case (e.g. IPFS-companion is always observing requests, so the service-worker may never go to sleep) but it is a step in the right direction as it allows the browser to manage the resources better and would probably someday allow extensions to be used on mobile devices.
|
||||
- A better rollout strategy and feature-flagging capabilities would have helped testing changes in the wild, but this is not possible with the current extension ecosystem. The only way to test changes is to publish them on the main channel and hope that the users report issues. This is not ideal, as it can lead to a bad user experience and a lot of frustration. Transition from MV2 to MV3 was even more challenging as Chrome Webstore would not allow downgrade from MV3 to MV2 in case a faulty release went out. It had to be perfect the first time, otherwise the users would be stuck with a broken extension until the next release.
|
||||
|
||||
## Current status
|
||||
|
||||
MV3 changes went live towards the end of September 2023 when the collector-branch was merged to main and released on both the [Chrome Web Store](https://chrome.google.com/webstore/detail/ipfs-companion/nibjojkomfdiaoajekhjakgkdhaomnch) and [Firefox Add-ons](https://addons.mozilla.org/en-US/firefox/addon/ipfs-companion/). We've not seen any major issues so far. The extension is working as expected and the user base is consistent. There were some minor regressions and bugs reported, but nothing that's a show-stopper. Those are being fixed as they are reported.
|
||||
|
||||
## What's next?
|
||||
|
||||
The next step will be to implement the [new Brave APIs](https://github.com/ipfs/ipfs-companion/issues/1281) to allow for finer control of the IPFS node provided in the Brave Browser. This will provide a much more polished experience for the users of Brave Browser that also want to enable the IPFS Companion extension for additional UI and more file-grained control over redirects. There also are plans to experiment with a [Helia based built-in gateway](https://github.com/ipfs/ipfs-companion/issues/1284) on which some progress has already been made as proof-of-concepts in [helia-service-worker-gateway](https://github.com/ipfs-shipyard/helia-service-worker-gateway) and [helia-http-gateway](https://github.com/ipfs/helia-http-gateway). The learnings from these projects will be used to work with browser vendors to close API gaps and eventually implement a gateway that can be used by IPFS Companion to host an IPFS node in the browser extension itself, which will allow users to access IPFS resources when not running a local node.
|
||||
|
||||
## Conclusion
|
||||
|
||||
It's an exciting new world! Try out [IPFS-companion](https://github.com/ipfs/ipfs-companion) and share your thoughts on [discuss](https://discuss.ipfs.tech/tag/ipfs-companion) or [github](https://github.com/ipfs/ipfs-companion/issues).
|
||||
532
src/_blog/dapps-ipfs.md
Normal file
@@ -0,0 +1,532 @@
|
||||
---
|
||||
date: 2024-01-29
|
||||
permalink: /dapps-ipfs/
|
||||
title: 'The State of Dapps on IPFS: Trust vs. Verification'
|
||||
description: 'Overview of the current landscape of dapps on IPFS through the lens of trust and verifiability'
|
||||
author: Daniel Norman
|
||||
header_image: /dapps-ipfs/header.png
|
||||
tags:
|
||||
- ipfs
|
||||
- dapps
|
||||
- Helia
|
||||
- js-ipfs
|
||||
- ipns
|
||||
- ens
|
||||
---
|
||||
|
||||
## Preface <!-- omit from toc -->
|
||||
|
||||
This blog post provides a comprehensive overview of the current landscape of dapps on IPFS through the lens of trust and verifiability. Given the nuance and breadth of this topic, this blog post is rather long.
|
||||
|
||||
For easier navigation, use the [table of contents](#contents).
|
||||
|
||||
## Contents <!-- omit from toc -->
|
||||
|
||||
- [Trust vs. verification in dapps](#trust-vs-verification-in-dapps)
|
||||
- [The benefits of IPFS for (d)app developers and users](#the-benefits-of-ipfs-for-dapp-developers-and-users)
|
||||
- [Primer on web app architectures: SPAs, MPA, PWA and dapps](#primer-on-web-app-architectures-spas-mpa-pwa-and-dapps)
|
||||
- [The client-server spectrum](#the-client-server-spectrum)
|
||||
- [SPA and MPA can be easily published to IPFS](#spa-and-mpa-can-be-easily-published-to-ipfs)
|
||||
- [SPA and MPA can also be PWA](#spa-and-mpa-can-also-be-pwa)
|
||||
- [Dapps](#dapps)
|
||||
- [How dapps get chain state](#how-dapps-get-chain-state)
|
||||
- [Publishing dapps: approaches and trade-offs](#publishing-dapps-approaches-and-trade-offs)
|
||||
- [Without IPFS](#without-ipfs)
|
||||
- [Publishing to IPFS](#publishing-to-ipfs)
|
||||
- [Loading dapps from IPFS: approaches and trade-offs](#loading-dapps-from-ipfs-approaches-and-trade-offs)
|
||||
- [From a public gateway](#from-a-public-gateway)
|
||||
- [With a local IPFS node](#with-a-local-ipfs-node)
|
||||
- [With a local IPFS node \& IPFS Companion browser extension](#with-a-local-ipfs-node--ipfs-companion-browser-extension)
|
||||
- [With the Brave browser](#with-the-brave-browser)
|
||||
- [When running a Kubo node is not an option](#when-running-a-kubo-node-is-not-an-option)
|
||||
- [What if content addressing were native to the web?](#what-if-content-addressing-were-native-to-the-web)
|
||||
- [In-browser CID verification with JavaScript](#in-browser-cid-verification-with-javascript)
|
||||
- [Browser constraints](#browser-constraints)
|
||||
- [Approaches to IPFS in the browser](#approaches-to-ipfs-in-the-browser)
|
||||
- [Helia and IPFS in the browser](#helia-and-ipfs-in-the-browser)
|
||||
- [Verifying top-level pages, sub-resources, and async data](#verifying-top-level-pages-sub-resources-and-async-data)
|
||||
- [Fetching and verifying async data with Helia](#fetching-and-verifying-async-data-with-helia)
|
||||
- [Making Helia lighter and developer-friendly](#making-helia-lighter-and-developer-friendly)
|
||||
- [Helia in a Service Worker](#helia-in-a-service-worker)
|
||||
- [Local app installer](#local-app-installer)
|
||||
- [Most users don’t use CIDs directly](#most-users-dont-use-cids-directly)
|
||||
- [Naming systems and mutable pointer](#naming-systems-and-mutable-pointer)
|
||||
- [DNSLink](#dnslink)
|
||||
- [Ethereum Name System (ENS)](#ethereum-name-system-ens)
|
||||
- [IPNS](#ipns)
|
||||
- [Conclusion](#conclusion)
|
||||
|
||||
## Trust vs. verification in dapps
|
||||
|
||||
If you are a decentralized web app (dapp) developer, there’s a good chance that you already publish the frontend of your dapp to IPFS. However, today, even if you do so, your users cannot benefit from the integrity IPFS provides without running their own IPFS node. If your users’ browser isn’t verifying that the frontend's source and resources match the CID you published, they are exposed to a wider attack surface, which can lead in the worst case to stolen funds.
|
||||
|
||||
The root of the problem lies in the **difficulty users face verifying the integrity of dapps deployed to IPFS in a browser without running an IPFS node**. This hurdle means that many users are **trusting** —often unknowingly– a specific IPFS gateway. This goes against the IPFS principle that [**verification matters**](https://specs.ipfs.tech/architecture/principles/#verification-matters) and puts users at risk.
|
||||
|
||||
Over the last couple of months, the [IPFS Shipyard](https://ipfs-shipyard.org/) team has been working with several teams in the dapp ecosystem to understand the challenges they face and the broader problem space. With the formation of the [IPFS Dapps Working Group](https://github.com/ipfs/dapps-wg/) by the IPFS Shipyard team along with the [Liquity team](https://www.liquity.org/) and the IPFS community, we aim to address some of the immediate pain points faced by the dapp developers and users and provide better tooling. One of the main goals is to **establish verified retrieval as the norm for retrieving CIDs on the web**.
|
||||
|
||||
This is not a new problem. Making CIDs native to the web platform has been a long-time goal of the IPFS project and remains a core goal of the [IPFS Ecosystem Working Group](https://blog.ipfs.tech/2023-introducing-the-ecosystem-working-group/). Making CIDs native to the web is an arduous road that involves wide-spanning collaboration with stakeholders including standard bodies, spec writers, browser vendors, and IPFS implementors.
|
||||
|
||||
It’s worth noting that _trustlessness_ is an aspirational property of dapps, but a misleading term because trust cannot be completely eliminated. A better way to look at this is in terms of **verifiability** that content addressing enables, in addition to less reliance on authority, e.g. PKI, DNS and authoritative servers. Moreover, the web’s trust model is deeply ingrained and rooted in the [**Same-origin policy**](https://developer.mozilla.org/en-US/docs/Web/Security/Same-origin_policy). One of the engineering challenges the working group faces is to meet the goal above within the boundaries and constraints of the same-origin policy.
|
||||
|
||||
> **Note:** while this blog post is heavily focused on Dapps, almost all of it applies to any static web application that can be published to IPFS. That is, Progressive Web Apps (**PWA**), Single Page Applications (**SPA**) and any app that does not rely on server side rendering.
|
||||
|
||||
## The benefits of IPFS for (d)app developers and users
|
||||
|
||||
IPFS is supposed to provide several benefits for web app developers and users:
|
||||
|
||||
- **Resilience & censorship resistance:** by having multiple copies of the frontend’s CID on the IPFS network you ensure that even if multiple providers are unavailable or censored, the frontend is still retrievable and usable. In the most extreme case, it’s enough for there to be a single provider for content to be retrievable.
|
||||
- **End-to-end integrity:** as long as a user of your Dapp has the CID you shared, they can be sure they are running the exact code that you published by **verifying** locally. Local verification is crucial since Dapps interact with a blockchain and malicious code can lead to loss of user funds. Integrity is adjacent to trustlessness — because verification eliminates the need to trust the source of data.
|
||||
- **Legal and regulatory compliance:** as regulatory bodies adopt regulation, e.g. **[MiCA](https://www.esma.europa.eu/esmas-activities/digital-finance-and-innovation/markets-crypto-assets-regulation-mica),** which applies to crypto assets and their Dapps, the degree to which services are decentralized comes under scrutiny. While the legal question cannot be answered by the blog post (this is not legal advice), IPFS, through the former two points, should provide the means to maximize decentralization and do so provably.
|
||||
- **Data portability, no vendor lock-in, and [credible exit](https://subconscious.substack.com/p/credible-exit):** once you onboard data and make it content-addressed with CIDs, you are free to move it between implementations, services, and jurisdictions, theoretically, without paying the onboarding cost again.
|
||||
|
||||
The reality, however, is more complex because there are various approaches to publishing and fetching dapps from IPFS that make subtle trade-offs between **trustlessness, resilience, UX, and performance.**
|
||||
|
||||
In the next section, we’ll take a look at web app architectures, and what dapps are, and then dive deeper into the actual approaches you see in the wild.
|
||||
|
||||
## Primer on web app architectures: SPAs, MPA, PWA and dapps
|
||||
|
||||
The rapidly evolving nature of web application architectures has given birth to many terms, abbreviations, and web development frameworks. This section will attempt to provide a high-level overview of some of the main web app architecture patterns, how dapps and how they relate to publishing to IPFS. If you are already familiar with these, feel free to skip ahead.
|
||||
|
||||
### The client-server spectrum
|
||||
|
||||
Today’s web applications can be seen as being positioned somewhere on a **server-client spectrum** regarding where the logic (rendering, authorization, processing user input) lives. On the server end of the spectrum, you have server-rendered apps where most logic is encapsulated in the server, e.g. WordPress, Laravel, and Ruby on Rail apps. On the client end, you have Single Page Applications (SPA), where all routing and rendering logic is client side. SPAs typically have a single entry index.html with a JavaScript bundle that routes all use. Once the JS is loaded, it takes over rendering, navigation, and network (asynchronously submitting user input) responsibilities. Another approach that sits somewhere in the middle is the multi-page application (MPA) with a pre-rendered HTML file per route that typically contains only the necessary JS for the given route.
|
||||
|
||||
It’s worth noting that many modern web development frameworks support more than one architecture and even the blending of different approaches on a per-route basis. For example, a [Next.js supports both MPAs with Static Exports](https://nextjs.org/docs/pages/building-your-application/deploying/static-exports) and server-side rendering.
|
||||
|
||||
[web.dev has a useful article that delves into this topic in more detail](https://web.dev/articles/rendering-on-the-web).
|
||||
|
||||
### SPA and MPA can be easily published to IPFS
|
||||
|
||||
Because SPA and MPA are statically generated, you can easily host them on any server that can serve static files (HTML, JS, CSS, etc.). That makes them a great fit for publishing on both traditional CDNs and IPFS.
|
||||
|
||||
### SPA and MPA can also be PWA
|
||||
|
||||
A progressive web app ([PWA](https://developer.mozilla.org/en-US/docs/Web/Progressive_web_apps/Guides/What_is_a_progressive_web_app)), is a web app that runs in a browser while providing a user experience like that of a platform-specific native app, e.g. the ability to function offline, update in the background, and send notifications to the OS.
|
||||
|
||||
The key thing to understand is that what makes an app a PWA (web app manifest and and service worker) is complementary to MPAs and SPAs. [In other words, both SPA and MPA architectures can be used to build a PWA.](https://web.dev/learn/pwa/architecture)
|
||||
|
||||
### Dapps
|
||||
|
||||
Dapps, short for Decentralised Apps, is an umbrella term for applications deployed as a smart contract to a blockchain. Since interacting with smart contracts directly can be a clunky experience, dapps are typically comprised of two components:
|
||||
|
||||
- Smart contracts deployed to a smart contract blockchain like Ethereum (and other EVM chains, e.g. Filecoin).
|
||||
- A frontend to interact with those contracts from the web browser. Typically the frontend will be a static app (SPA/MPA) that is deployed to a CDN and/or published to IPFS.
|
||||
|
||||
### How dapps get chain state
|
||||
|
||||
In this architecture, the static site will need to fetch blockchain state, specifically the state associated with the Dapp’s smart contracts. This can be done using the following approaches:
|
||||
|
||||
- The most naive is to use the [Ethereum JSON-RPC API](https://ethereum.org/en/developers/docs/apis/json-rpc/) which every Ethereum execution client/node exposes. The Ethereum execution client is software that keeps an up-to-date full state by synching with the rest of the network and updating the state tree every time a new block is produced. Dapps that rely on the JSON-RPC API will either use a hosted Ethereum node provider like Quicknode, Alchemy, and Infura, or run their own node.
|
||||
- Since the JSON-RPC API is usually too low-level with unindexed data to provide rich frontend functionality, many Dapps will instead query an additional indexing layer like [The Graph](https://thegraph.com/). The Graph is a protocol for indexing and querying blockchain data and makes it possible to efficiently query chain state using GraphQL. For example, Uniswap uses [this approach](https://docs.uniswap.org/api/subgraph/overview) to fetch data from the Uniswap smart contracts.
|
||||
|
||||
In both approaches, retrieval of chain state happens as async requests invoked by the frontend code.
|
||||
|
||||
It’s also pretty common for the smart contracts and frontend of a dapp to be open source, which allows anyone to audit the code. For example, Uniswap publishes both the source of their [smart contracts](https://github.com/Uniswap/v3-core) and [interface](https://github.com/Uniswap/interface) on [GitHub](https://github.com/Uniswap).
|
||||
|
||||
One thing to note is that the degree of decentralization of a Dapp can vary based on several factors that are beyond the scope of this post.
|
||||
|
||||
**As a general rule of thumb, it’s only as decentralized as the least decentralized component.**
|
||||
|
||||
This blog post is mostly concerned with the frontend component and the different ways that IPFS enables maximizing decentralization of its distribution and trustlessness. Throughout the post, we’ll be looking at Uniswap as an example, given its importance and the amount of money it secures. That being said, the insights apply to any Dapp of this structure.
|
||||
|
||||
## Publishing dapps: approaches and trade-offs
|
||||
|
||||
### Without IPFS
|
||||
|
||||
The most naive and common approach is to just deploy the dapp to a web server or CDN like Vercel, AWS, Netlify, and Cloudflare.
|
||||
|
||||
For example, [the Uniswap team deploys](https://github.com/Uniswap/interface/actions/runs/7036990525/job/19150799879#step:11:1) their frontend to Cloudflare Pages (as well as IPFS as we'll see in the section below) and makes the latest version available at https://app.uniswap.org.
|
||||
|
||||
From the perspective of a user, this is arguably the most user-friendly and performant (with Cloudflare’s CDN), at the cost of being the least verifiable.
|
||||
|
||||
Dapp users have no way to verify that the source of the frontend matches the original code published on GitHub. Moreover, the reliance on DNS comes with risks such as fat finger human errors and other DNS attacks, e.g. DNS takeovers — these are admittedly unlikely but important to consider.
|
||||
|
||||
| | Rating |
|
||||
| -------------------------------- | ------ |
|
||||
| Verifiable | ❌ |
|
||||
| Resilience/Censorship resistance | ❌ |
|
||||
|
||||
#### At the mercy of multiple authorities
|
||||
|
||||
Another thing to consider about deploying without IPFS is that the app must comply with **the terms of service of multiple authorities**:
|
||||
|
||||
1. “.org” TLD owner
|
||||
2. “uniswap.org” DNS Registrar
|
||||
3. “uniswap.org” DNS Nameserver (when different to the registrar)
|
||||
4. Certificate Authority (CA) that provides TLS cert for https://app.uniswap.org
|
||||
5. CDN/HTTP Hosting service serves the site traffic
|
||||
6. ISP/[AS](<https://en.wikipedia.org/wiki/Autonomous_system_(Internet)>) of the HTTP Hosting provider
|
||||
|
||||
### Publishing to IPFS
|
||||
|
||||
From the perspective of a Dapp developer, publishing to IPFS is pretty straightforward. You take your frontend build and add it to your IPFS node or to a pinning service. Publishing to IPFS results in a CID which represents that version of the frontend.
|
||||
|
||||
Uniswap, for example, has automated [publishing to IPFS with Pinata](https://github.com/Uniswap/interface/actions/runs/7036990525/job/19150799879#step:8:21) as part of their build process, and they publish the CID for each version in the release:
|
||||
|
||||

|
||||
|
||||
One thing to consider here is where the CID is generated. In the ideal case, this should happen in the build process, e.g. by packing the build outputs into a CAR file with a CID in the build process. If you upload the raw files to a pinning service, you are trusting the pinning service to generate the CID for the input data.
|
||||
|
||||
To increase the resilience and censorship resistance of your deployed app, you can pin the CID to more than one pinning service or IPFS node.
|
||||
|
||||
| | Rating |
|
||||
| -------------------------------- | ------ |
|
||||
| Verifiable | 👍 |
|
||||
| Resilience/Censorship resistance | 👍 |
|
||||
|
||||
## Loading dapps from IPFS: approaches and trade-offs
|
||||
|
||||
### From a public gateway
|
||||
|
||||
With the CID of a dapp at hand, you can load the frontend from any public IPFS gateway directly in your browser, e.g.:
|
||||
|
||||
https://bafybeihwj3n7fgccypsiisijwuklg3souaoiqs7yosk5k5lc6ngnhnmnu4.ipfs.dweb.link/
|
||||
|
||||
https://bafybeihwj3n7fgccypsiisijwuklg3souaoiqs7yosk5k5lc6ngnhnmnu4.ipfs.cf-ipfs.com/
|
||||
|
||||
The problem with this approach is that you haven’t verified the response, so you don’t know if you the response **matches the CID.** In effect, you are **trusting the gateway** to return the correct response.
|
||||
|
||||
Another minor challenge that arises is that each version you load and each gateway you load it from will have a different origin, so any local state the dapp relies on in localStorage or IndexedDB will be tied to that specific version of the dapp (CID) at that specific gateway, i.e., `bafy1.ipfs.cf-ipfs.com` is a different origin to `bafy1.ipfs.dweb.link` even though they are the same CID.
|
||||
|
||||
| | Rating |
|
||||
| -------------------------------- | ------------------- |
|
||||
| Verifiable | ❌ |
|
||||
| Resilience/Censorship resistance | 👍 (other gateways) |
|
||||
|
||||
> **Note:** Resilience depends on whether the content has been cached and the number of providers/copies on the network
|
||||
|
||||
Note that some Dapp developers will run their own dedicated gateways either on their infrastructure or by using a dedicated gateway service, e.g. Pinata, Filebase. This can result in better performance. As for trust, it shifts it around, and without verification, the users are left to decide whether they trust the gateway operator.
|
||||
|
||||
### With a local IPFS node
|
||||
|
||||
If you have a local IPFS node installed, e.g. [Kubo](https://docs.ipfs.tech/install/command-line/) or [IPFS Desktop](https://docs.ipfs.io/install/ipfs-desktop/), then you can use the IPFS gateway exposed by your local node. It looks as follows: http://bafybeihwj3n7fgccypsiisijwuklg3souaoiqs7yosk5k5lc6ngnhnmnu4.ipfs.localhost:8080/
|
||||
|
||||
Note that it will only work if you are running an IPFS node with the gateway listening on port 8080)
|
||||
|
||||
When you open this URL, the local IPFS node will handle content routing (finding providers for the CID), fetching the content, and verification.
|
||||
|
||||
The main hurdle with this approach is that it requires running an IPFS node in addition to typing a long URL. But you get the full benefits of **verifiability. The only thing you need to trust is the CID you received is indeed the one published by Uniswap.**
|
||||
|
||||
From a performance perspective, it may be slow on the first load, but once fetched and cached locally, a given CID will essentially load instantly.
|
||||
|
||||
| | Rating |
|
||||
| -------------------------------- | ------ |
|
||||
| Verifiable | 👍 |
|
||||
| Resilience/Censorship resistance | 👍 |
|
||||
|
||||
(Depends on whether the gateway has it cached and the number of providers/copies on the network)
|
||||
|
||||
### With a local IPFS node & IPFS Companion browser extension
|
||||
|
||||
[IPFS Companion](https://docs.ipfs.tech/install/ipfs-companion/) is a browser extension that simplifies access to IPFS resources and adds browser support for the IPFS protocol. It allows you to type IPFS protocol URLs, i.e., `ipfs://bafy...` directly in the browser, thereby improving the UX.
|
||||
|
||||
Under the hood, IPFS companion handles IPFS URLs and redirects them to the gateway of the local IPFS node.
|
||||
|
||||
| | Rating |
|
||||
| -------------------------------- | ------ |
|
||||
| Verifiable | 👍 |
|
||||
| Resilience/Censorship resistance | 👍 |
|
||||
|
||||
IPFS Companion also supports [DNSLink](https://dnslink.dev/) resolution (DNSLink is covered in more detail at the bottom of the article). When a user visits a URL, Companion will check for a [DNSLink](https://dnslink.dev/) DNS record for the hostname and, if found, will load the dapp from the local gateway instead of the remote origin. In this instance, trust is only delegated for the DNS resolution (hostname → CID).
|
||||
|
||||
### With the Brave browser
|
||||
|
||||
[Brave Browser](https://brave.com/ipfs-support/) comes with native support for IPFS URLs that can be resolved by a public gateway or the built-in IPFS node. The latter is practically the same as the previous approach with a local IPFS node and the IPFS companion browser extension, though the user experience is better because it works out of the box.
|
||||
|
||||
| | Rating |
|
||||
| -------------------------------- | ------ |
|
||||
| Verifiable | 👍 |
|
||||
| Resilience/Censorship resistance | 👍 |
|
||||
|
||||
## When running a Kubo node is not an option
|
||||
|
||||
All the previous examples that are verifiable depend on the user running an IPFS node, typically Kubo, a Go-based implementation of IPFS that runs as a separate process to the browser. Having a separate process frees you from the constraints imposed by browsers and affords more resources for the node to establish more connectivity to other IPFS nodes.
|
||||
|
||||

|
||||
|
||||
**However, running a Kubo node comes at the cost of a higher barrier to adoption, and in the case of mobile phones, is not an option.**
|
||||
|
||||
## What if content addressing were native to the web?
|
||||
|
||||
In an ideal world, content addressing would be native to the web, but what could that look like?
|
||||
|
||||
Content addressing is a paradigm shift to security on the web that is rooted in the same-origin policy. In many ways, this requires a reimagining of parts of the web which is beyond the scope of this post (though if you’re interested, check out Robin Berjon’s work on the [Web Tiles](https://berjon.com/web-tiles/).)
|
||||
|
||||
Browser vendors tend to be defensive about adding new browser APIs and implementing specs for a myriad of reasons: maintenance burden, security risks, and lack of financial incentive.
|
||||
|
||||
At a minimum, native IPFS support would involve the ability for the web browser itself to verify the integrity of content-addressed sites. A glimpse into that future is presented by `ipfs://` and `ipns://` in [Brave](https://brave.com/ipfs-support/) and [ipfs-chromium](https://github.com/little-bear-labs/ipfs-chromium/). It may arrive sooner in mainstream browsers if WebExtensions like [IPFS Companion](https://github.com/ipfs/ipfs-companion) can [register a protocol handler that is backed by a Service Worker](https://github.com/ipfs/in-web-browsers/issues/212).
|
||||
|
||||

|
||||
|
||||
Since it will likely take time to come to fruition, the next section below will cover the pragmatic interim approaches to in-browser verified retrieval of CIDs.
|
||||
|
||||
## In-browser verified retrieval of CIDs
|
||||
|
||||
To understand the emerging landscape of approaches to IPFS in the browser, it’s crucial to first understand some of the inherent constraints of the browser.
|
||||
|
||||
### Browser constraints
|
||||
|
||||
Browsers are sandboxed runtime environments that place critical constraints for using IPFS:
|
||||
|
||||
- Limits on the type (WebSockets, WebRTC, WebTransport) and number of connections a browser tab can open and/or [fail to dial before being blocked or throttled](https://github.com/ipfs/in-web-browsers/issues/211). This can hinder content routing DHT traversals and content retrieval connections.
|
||||
- If a website is in a [secure context](https://developer.mozilla.org/en-US/docs/Web/Security/Secure_Contexts) when served over HTTPS, like most websites today, you are constrained to only opening connections to origins with a CA-signed TLS certificate, something that peers in the IPFS network rarely have. As you’ll see, there are two exceptions to this, namely WebTransport and WebRTC, that we’ll look into in the next section.
|
||||
- Limits on the resources an inactive browser tab consumes, i.e., when you keep a tab open but it becomes inactive by moving to a different tab.
|
||||
|
||||
### Approaches to IPFS in the browser
|
||||
|
||||
From a high level, several threads of work remove the need to run a Kubo node:
|
||||
|
||||
- [**Trustless Gateway**](https://specs.ipfs.tech/http-gateways/trustless-gateway/): a *subset* of the [path-gateway](https://specs.ipfs.tech/http-gateways/path-gateway/) that allows for light IPFS clients to retrieve both the CIDs bytes and verification metadata (the Merkle DAG), thereby allowing you to [verify its integrity without trusting the gateway](https://docs.ipfs.tech/reference/http/gateway/#trustless-verifiable-retrieval).
|
||||
- [Delegated routing](https://docs.ipfs.tech/concepts/how-ipfs-works/#how-content-routing-works-in-ipfs): a mechanism for IPFS implementations to use for [offloading content routing, peer routing, and naming to another server over HTTP](https://specs.ipfs.tech/routing/http-routing-v1/). This allows browsers to skip traversing the DHT and opening many connections in the process.
|
||||
- [WebTransport](https://connectivity.libp2p.io/#webtransport): a new browser API to open persistent duplex connections from the browser in a similar fashion to WebSockets. But in contrast with WebSocket, [WebTransport supports self-signed certificates](https://connectivity.libp2p.io/#webtransport?tab=certificate-hashes), allowing its use in a p2p setting without reliance on certificate authorities. WebTransport support was also added to Kubo over a year ago, which in theory means that browsers should be able to connect to any arbitrary Kubo node even in a [Secure Context](https://developer.mozilla.org/en-US/docs/Web/Security/Secure_Contexts).
|
||||
- WebRTC Direct: though WebRTC was designed for browser-to-browser, it can also be used for [browser-to-server connectivity](https://connectivity.libp2p.io/#webrtc?tab=browser-to-standalone-node) without trusted TLS certificates (see [spec](https://github.com/libp2p/specs/blob/master/webrtc/webrtc-direct.md)). This is useful in browsers like Safari and Firefox where WebTransport might not be available (as of Q1 2024).
|
||||
- [Service Worker API](https://developer.mozilla.org/en-US/docs/Web/API/Service_Worker_API): a browser API that allows, among other things, intercepting network requests in web applications for either caching or providing offline functionality. Service workers can be used to implement a caching and verification layer by intercepting HTTP requests to IPFS gateways in existing apps that already use IPFS gateways without verifying.
|
||||
|
||||
[**Helia**](https://helia.io/) is where most of the active work is happening and implements many of these approaches for better IPFS support in the browser.
|
||||
|
||||
### Helia and IPFS in the browser
|
||||
|
||||
[Helia](https://github.com/ipfs/helia) is a lean, modular TypeScript implementation of IPFS that can run in server JS runtimes, e.g. Node.js and Deno, as well as in the browser. To explain browser-specific use-cases, this section will focus solely on **Helia in the browser.**
|
||||
|
||||
From a high level, Helia can do two main things in the browser:
|
||||
|
||||
- **Manage content-addressed data:** serializing user input and objects into content-addressable representation like dag-json or UnixFS (typically referred to as codecs in IPLD land), and packing CAR files.
|
||||
- **Verified retrieval of CIDs**: e.g. given a CID, find providers for it, fetch it and verify the data for it. Helia can retrieve CIDs using both [Bitswap](https://specs.ipfs.tech/bitswap-protocol/) (over libp2p) and the [Trustless Gateway](https://specs.ipfs.tech/http-gateways/trustless-gateway/) (over HTTPS).
|
||||
|
||||
> **Note:** the short-lived nature of a browser tab makes it **unsuitable for providing CIDs to the network**. Even though in theory, Helia is capable of this, it's not recommended. The most practical approach to publishing CIDs from the browser is to delegate that to a long-running IPFS node, either by uploading directly to a [pinning service](https://docs.ipfs.tech/concepts/persistence/#pinning-services) or uploading CIDs to a self-hosted IPFS node.
|
||||
|
||||
### Verifying top-level pages, sub-resources, and async data
|
||||
|
||||
An important distinction to make in web applications is between top-level pages, sub-resources, and async resources and how they can be verified:
|
||||
|
||||
- **Top-level pages** refers initial HTML payload that is returned to the first request by the browser to a given address and bootstraps the loading of an app. For example, the `index.html` file in a given version of the IPFS website: [bafybeidfqp36qutohidaaapir743mvjefv5ipkbrvqx3li3x6vm47vrdam](https://explore.ipld.io/#/explore/bafybeidfqp36qutohidaaapir743mvjefv5ipkbrvqx3li3x6vm47vrdam/index.html).
|
||||
**Verification:** as discussed above, this is currently only possible with a local IPFS node that does top level verification when you load a CID via the local gateway, i.e. `cid.ipfs.localhost:8080`.
|
||||
- **Sub-resources** refer to resources loaded after the initial HTML of the page was loaded, like a JS, CSS, and image files files that are included in script tags of the initial HTML. These resources may be from the same or other origins (unless explicitly prohibited by the [Content security policy](https://web.dev/articles/csp) set by the server).
|
||||
**Verification:** Either by loading the top level CID from a local gateway and ensuring that sub-resources are also loaded from the local node by using relative path.
|
||||
Another way relies on a feature called [Subresource Integrity (SRI)](https://developer.mozilla.org/en-US/docs/Web/Security/Subresource_Integrity) that ensures the browser verifies the hash of `<script>` and `<link>` elements with the integrity attribute, however, this has limited utility for CIDs since the SHA256 hash matches the hash in the CID only if the resources were encoded with raw leaves and fit into a single IPFS Block; because [IPFS chunks files before hashing and may result in different hashes](https://docs.ipfs.tech/concepts/faq/#why-doesn-t-my-sha-hash-match-my-cid).
|
||||
Another way, which is explored in more detail below, is to abstract much of the verification and fetching of CIDs into service workers, which allows you to intercept network requests and verify resources.
|
||||
- **Async data** refers to data that is fetched asynchronously during the runtime of the app with the `fetch` API, e.g. JSON returned from an API.
|
||||
**Verification:** possible by using Helia or one of the abstractions on top of Helia to fetch CIDs. Like sub-resources, this can be abstracted into a service worker, so that the application code is just making fetch requests to relative path style gateways, e.g. `/ipfs/[CID]` in the app.
|
||||
|
||||
ℹ️ **Today, Helia can fetch and verify async data and sub-resources. However, top-level verification without deeper browser integration remains an open engineering problem that the [IPFS Dapps working group](https://ipfs.fyi/dapps-wg) is working on.**
|
||||
|
||||
### Verified retrieval data with Helia
|
||||
|
||||
Let’s look at a real-world example, and how you could add Helia (or another library) to add verification. The Uniswap frontend makes a bunch of trusted async fetch requests to the Cloudflare IPFS gateway without verifying the response.
|
||||
|
||||
One of them is to the following URL: `https://cloudflare-ipfs.com/ipns/tokens.uniswap.org` whose response is a JSON object of the tokens supported by Uniswap. This URL contains a [DNSlink](#dnslink) (which is covered in more detail below) to resolve to a CID. For the sake of simplicity, let's assume that we already have the resolved CID: `bafybeia5ci747h54m2ybc4rf6yqdtm6nzdisxv57pk66fgubjsnnja6wq4`.
|
||||
|
||||
The code for fetching this token list JSON from a trusted gateway looks along the lines of :
|
||||
|
||||
```jsx
|
||||
const fetchJsonFromGateway = async (url) => {
|
||||
const response = await fetch(url)
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error('failed to fetch')
|
||||
}
|
||||
|
||||
json = await response.json()
|
||||
|
||||
return json
|
||||
}
|
||||
|
||||
const tokenListUrl = `https://cloudflare-ipfs.com/ipfs/bafybeia5ci747h54m2ybc4rf6yqdtm6nzdisxv57pk66fgubjsnnja6wq4`
|
||||
const tokenList = await fetchJsonFromGateway(tokenListUrl)
|
||||
```
|
||||
|
||||
With Helia, fetching and verifying the CID could look as follows:
|
||||
|
||||
```ts
|
||||
import { createHeliaHTTP } from '@helia/http'
|
||||
import { CID } from 'multiformats'
|
||||
import { unixfs } from '@helia/unixfs'
|
||||
|
||||
const verifiedFetch = async (cid: string) => {
|
||||
const helia = await createHeliaHTTP()
|
||||
const fs = unixfs(helia)
|
||||
|
||||
const decoder = new TextDecoder()
|
||||
let unparsedJson = ''
|
||||
|
||||
for await (const chunk of fs.cat(CID.parse(cid))) {
|
||||
unparsedJson += decoder.decode(chunk, {
|
||||
stream: true,
|
||||
})
|
||||
}
|
||||
|
||||
return JSON.parse(unparsedJson)
|
||||
}
|
||||
|
||||
const tokenListCid = `bafybeia5ci747h54m2ybc4rf6yqdtm6nzdisxv57pk66fgubjsnnja6wq4`
|
||||
const tokenList = await verifiedFetch()
|
||||
```
|
||||
|
||||
The example above is more convoluted than necessary because the JSON is encoded as UnixFS, which is the default encoding for files and directories in IPFS. When working with JSON, it's better to to encode the data with one of `json`, `dag-json`, or `dag-cbor` codecs which are more suitable and provide better ergonomics for working with JSON data.
|
||||
|
||||
To demonstrate, here's an example with the same token list JSON encoded as `json` which has the CID `bagaaieracglt4ey6qsxtvzqsgwnsw3b6p2tb7nmx5wdgxur2zia7q6nnzh7q`
|
||||
|
||||
```ts
|
||||
import { CID } from 'multiformats'
|
||||
import { createHeliaHTTP } from '@helia/http'
|
||||
import { json } from '@helia/json'
|
||||
|
||||
const fetchJsonCid = async (cid: string) => {
|
||||
const helia = await createHeliaHTTP()
|
||||
const j = json(helia)
|
||||
|
||||
return await j.get(CID.parse(cid))
|
||||
}
|
||||
|
||||
const tokenListCid = `bagaaieracglt4ey6qsxtvzqsgwnsw3b6p2tb7nmx5wdgxur2zia7q6nnzh7q`
|
||||
const tokenList = await fetchJsonCid(tokenListCid)
|
||||
```
|
||||
|
||||
See how these two compare below:
|
||||
|
||||
<iframe src="https://codesandbox.io/embed/qx7tw3?view=Editor+%2B+Preview&module=%2Fsrc%2Findex.ts"
|
||||
style="width:100%; height: 500px; border:0; border-radius: 4px; overflow:hidden;"
|
||||
title="helia-json-vs-unixfs-fetch (@helia/http@1)"
|
||||
allow="accelerometer; ambient-light-sensor; camera; encrypted-media; geolocation; gyroscope; hid; microphone; midi; payment; usb; vr; xr-spatial-tracking"
|
||||
sandbox="allow-forms allow-modals allow-popups allow-presentation allow-same-origin allow-scripts"
|
||||
></iframe>
|
||||
|
||||
This is more involved than the `fetch` API, but comes with all the benefits of IPFS: data is verified and can be fetched from more than one gateway, thereby increasing resilience.
|
||||
|
||||
### Making Helia lighter and developer-friendly
|
||||
|
||||
To make it easier for developers to adopt Helia in dapps that lean heavily on gateways, we've been working on a couple of improvements:
|
||||
|
||||
- [Configurable block brokers](https://github.com/ipfs/helia/pull/280): a generic interface for resolving CIDs to blocks. Allows developers to choose (and even implement their own) block fetching approach for their app, e.g. Trustless Gateways, Bitswap, or a combination of the two. [Released in Helia v2.1.0](https://github.com/ipfs/helia/releases/tag/helia-v2.1.0)
|
||||
- [@helia/http](https://github.com/ipfs/helia/issues/289): A browser optimised version of Helia that leans on trustless gateways and delegated routing to enable verified retrieval. This was the package that was used in the examples above.
|
||||
- [@helia/verified-fetch](https://github.com/ipfs/helia/issues/348): A library that would provide a similar interface to the [Fetch API](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API) and accept native `ipfs://` and `ipns://` URIs and function like an IPFS gateway. We intend for it to serve as a drop-in replacement for `fetch` requests to trusted gateways.
|
||||
|
||||
### Helia in a Service Worker
|
||||
|
||||
Another thread of work involves a [Service Worker](https://github.com/w3c/ServiceWorker/blob/main/explainer.md) registered by the app that intercepts CID requests to gateways (that are unverified) and uses Helia to fetch and verify. This works for sub-resources and async data and assumes that the app already fetches CIDs from a trusted IPFS gateway, e.g. `fetch('/ipfs/[CID]')...` , because they can be detected and handled by the service worker.
|
||||
|
||||
From a technical perspective, the service worker is tied to the app’s origin and registered by the app’s code. Helia is imported and handles CID requests by fetching the raw blocks of the requested CID from trustless gateways (or directly from peers with supported transports), verifying, and caching.
|
||||
|
||||
It’s worth noting that caching is one of the primary reasons that service workers allow intercepting HTTP requests. Since CIDs are immutable, they make for an easy cache.
|
||||
|
||||
The benefit of this approach is that it can be adopted by apps that already rely on trusted gateways without significant architectural changes.
|
||||
|
||||
Check out the [Helia service worker gateway repo](https://github.com/ipfs-shipyard/helia-service-worker-gateway) to learn more about this approach or try it out on https://helia-service-worker-gateway.on.fleek.co/.
|
||||
|
||||
### Local app installer
|
||||
|
||||
The local app installer approach was recently laid out in a [blog post](https://www.liquity.org//blog/decentralizing-defi-frontends-protecting-users-and-protocol-authors) by the Liquity team. The idea is that you have a static web app that serves as a local installer which facilitates the fetching and verifying of dapps directly in the browser. The [local app installer](https://github.com/edmulraney/nohost) consists of PWA and utilizes a service worker with the ENS client library and Helia to resolve ENS names, download and verify dapps and cache them locally. The local app installer is developed in the [nohost](https://github.com/edmulraney/nohost) repository.
|
||||
|
||||

|
||||
|
||||
Top level integrity remains a challenge for verifying the initial installer PWA code. To address this, the Liquity team is exploring packaging the installer as part of a browser extension.
|
||||
|
||||
It’s worth pointing out that in this approach, each locally installed dapp must still be isolated into its own origin. The challenge here is that the initial payload (for the first load) for each origin, must still come from somewhere, i.e. a trusted server. Following initial payload, the frontend must only be fetched and verified once because it’s locally cached by the service worker.
|
||||
|
||||
For this reason, along with the inherent challenges of the web security model laid out earlier in this post, it’s useful to think about trust as a spectrum. In this approach trust is minimised to the initial interaction. To delve deeper into this approach, check out Liquity’s blog [post](https://www.liquity.org/blog/decentralizing-defi-frontends-protecting-users-and-protocol-authors).
|
||||
|
||||
## Most users don’t use CIDs directly
|
||||
|
||||
For the sake of simplicity, we assumed throughout this post that the starting point for a user is a CID, but in reality, this is rarely the case.
|
||||
|
||||
CIDs are long and not very human-readable, so they tend to be abstracted from the user. Moreover, because CIDs represent an immutable version of the frontend, giving users the latest versions requires something like a persistent address that can be updated upon every release.
|
||||
|
||||
## Naming systems and mutable pointer
|
||||
|
||||
There are three common approaches to this problem that provide a **stable identifier** that can change upon version releases. The following is a high level comparison:
|
||||
|
||||
- **DNSLink**
|
||||
- **What are they:** A DNS TXT record points to a specific CID.
|
||||
- **Human friendly:** 👍
|
||||
- **Verifiable:** 👎
|
||||
- **Example name:** [`blog.ipfs.tech`](http://blog.ipfs.tech) (technically `_dnslink.blog.ipfs.tech`)
|
||||
- **Integration with the IPFS:** through IPFS gateways under the `/ipns` namespace: [`ipfs.io/ipns/blog.ipfs.tech/`](http://ipfs.io/ipns/DNS.NAME) or using subdomain resolution: [`https://blog-ipfs-tech.ipns.cf-ipfs.com/`](https://blog-ipfs-tech.ipns.cf-ipfs.com/)
|
||||
- **Ethereum Name System** (**ENS):**
|
||||
- **What are they:** records for a `.ETH` name are stored on-chain and can point to any URL or CID, e.g. `ipfs://bafy...`
|
||||
- **Human friendly:** 👍
|
||||
- **Verifiable:** Potentially
|
||||
- **Example name:** `vitalik.eth`
|
||||
- **Integration with the IPFS:**
|
||||
- **IPFS path gateways:** under the `/ipns` namespace: [ipfs.io/ipns/vitalik.eth](http://ipfs.io/ipns/vitalik.eth)`
|
||||
- **Subdomain gateways:** subdomain resolution (dots become dashes): [vitalik-eth.ipns.dweb.link](https://vitalik-eth.ipns.dweb.link/)
|
||||
- Using an ENS resolver like [eth.link](http://eth.link) or eth.limo: [vitalik.eth.limo](https://vitalik.eth.limo)
|
||||
- **IPNS**
|
||||
- **What are they:** mutable pointers based on public keys and signed IPNS records pointing to a CID. Typically published to the DHT, though IPNS is transport agnostic and can be resolved and advertised using the delegated routing HTTP API.
|
||||
- **Human friendly:** 👎
|
||||
- **Verifiable:** 👍
|
||||
- **Example name:** `k51qzi5uqu5dhp48cti0590jyvwgxssrii0zdf19pyfsxwoqomqvfg6bg8qj3s`
|
||||
- **Integration with the IPFS:** through IPFS gateways
|
||||
- Path resolution: `https://cloudflare-ipfs.com/ipns/k51qzi5uqu5dhp48cti0590jyvwgxssrii0zdf19pyfsxwoqomqvfg6bg8qj3s`
|
||||
- Subdomain resolution : `https://k51qzi5uqu5dhp48cti0590jyvwgxssrii0zdf19pyfsxwoqomqvfg6bg8qj3s.ipns.dweb.link/`
|
||||
|
||||
Some of these approaches can be combined, and there are some crucial security implications to each of the approaches and the way they are implemented.
|
||||
|
||||
In the next paragraph, we’ll dive into the details and trade-offs of how each of these approaches.
|
||||
|
||||
### DNSLink
|
||||
|
||||
[DNSLink](https://dnslink.dev/) uses DNS [TXT](https://en.wikipedia.org/wiki/TXT_record) records in the `_dnslink` subdomain to map a DNS name, such as `blog.ipfs.tech` to an IPFS path, e.g. `/ipfs/bafy..`
|
||||
|
||||
The main benefit of DNSLink is that it relies on all existing DNS infrastructure and tooling to provide stable human-friendly names that can be updated. The main drawback of DNSLink is that it comes with the same risks and attack surface associated with DNS records mentioned earlier in the post, most notably is the lack of verifiability. This can potentially be addressed by things like DNSSec and querying multiple DNS resolvers.
|
||||
|
||||
For example, the Spark UI from the MakerDAO ecosystem is published to IPFS and uses DNSLink. Their DNSLink TXT record is `_dnslink.app.spark.fi` and has the value set to (at the time of writing):
|
||||
|
||||
`dnslink=/ipfs/bafybeihxc3olye3k2z4ty6ete7qe6mvtplq52ixpqgwkaupqxwxsmduscm`
|
||||
|
||||
DNSLinks can be resolved in a browser in ways:
|
||||
|
||||
- Using an IPFS gateway, under the ipns namespace, e.g. [ipfs.io/ipns/blog.ipfs.tech/](http://ipfs.io/ipns/DNS.NAME) or to ensure origin isolation, with the subdomain gateway would be [https://blog-ipfs-tech.ipns.dweb.link](https://blog-ipfs-tech.ipns.dweb.link/). (when using the subdomain gateway, dots are converted to dashes to avoid origin and TLS certificate complexity).
|
||||
- Directly with the DNS name when its pointing to an IPFS Gateway. The IPFS gateway will resolve the DNSLink based on the `host:` header, e.g. https://app.spark.fi/.
|
||||
|
||||
### Ethereum Name System (ENS)
|
||||
|
||||
ENS is a crypto native on-chain domain registry. Records for a `.ETH` namespace can be purchased and configured on-chain, by interacting with the ENS smart contracts.
|
||||
|
||||
Each ENS name can have multiple records to link different profiles, e.g. GitHub, Twitter, and IPFS CIDs. The `contenthash` field can be used to point to a `ipfs://bafy...` URL, as specified [ENSIP-7](https://docs.ens.domains/ens-improvement-proposals/ensip-7-contenthash-field).
|
||||
|
||||
While ENS has a lot of similarities with DNS, like the dot-separated hierarchical structure, it is a fundamentally different system. Most notably, `.eth` is not a valid TLD in DNS, which means that it doesn’t natively resolve in most browsers.
|
||||
|
||||
To address this challenge, several solutions have emerged to allow easily resolving `.eth` domains in the browser:
|
||||
|
||||
- Cloudflare operates [eth.link](http://eth.link), which allows resolving ENS names with a content hash by appending `.link` to the ENS name. For example, [vitalik.eth.link](http://vitalik.eth.link) will load the content hash set on `vitalik.eth`.
|
||||
Under the hood, eth.link uses EthDNS to access information from ENS via DNS. In other words, it provides a DNS interface to the on-chain ENS registry. eth.link also provides a [DNS-over-HTTPS](https://en.wikipedia.org/wiki/DNS_over_HTTPS) endpoint to perform DNS resolution of .eth records: `https://eth.link/dns-query`. For example, `curl -s -H "accept: application/dns-json" "https://eth.link/dns-query?name=vitalik.eth&type=TXT"` will return the ENS records of `vitalik.eth`.
|
||||
- [Eth.limo](http://Eth.limo) is a similar service to [eth.link](http://eth.link) that functions similarly, e.g. [vitalik.eth.limo](http://vitalik.eth.limo).
|
||||
- Using an IPFS gateway, under the `ipns` namespace, e.g. [ipfs.io/ipns/vitalik.eth](http://ipfs.io/ipns/vitalik.eth) (path resolution) or [vitalik-eth.ipns.dweb.link](http://vitalik-eth.ipns.dweb.link) for subdomain resolution (when using the subdomain gateway, dots are converted to dashes to avoid origin and TLS certificate complexity).
|
||||
Under the hood, the IPFS gateway treats these the same way as DNSLink, but resolves `.eth` TLD via special ENS2DNS bridges (the default one is DoH at `resolver.cloudflare-eth.com`, [configurable in Kubo](https://github.com/ipfs/kubo/blob/master/docs/config.md#dnsresolvers)).
|
||||
- The Metamask browser plugin will automatically redirect .eth addresses to an IPFS gateway, as described above.
|
||||
- The Brave browser supports `.eth` domains and resolves them using the Cloudflare EthDNS resolver.
|
||||
|
||||
#### Verifiability of ENS
|
||||
|
||||
The fact that ENS domains are registered is on-chain makes them verifiable in principle. However, in the solutions laid out above, trust is delegated to a trusted server which handles the resolution of the ENS name to the CID, e.g. [eth.limo](http://eth.limo), or the DoH resolver at https://resolver.cloudflare-eth.com/dns-query.
|
||||
|
||||
ENS names can be resolved in the browser using the Ethereum RPC API by retrieving the state from the chain, howerver, trust is just shifted to the Ethereum RPC API endpoint.
|
||||
|
||||
A more verifiable approach would be to use an Ethereum light client, like [Helios](https://github.com/a16z/helios) or [eth-verifiable-rpc](https://github.com/dappnetbby/eth-verifiable-rpc), to verify ENS state using merkle proofs and the Ethereum state root hash, though this is still experimental and far from a common pattern in dapps.
|
||||
|
||||
### IPNS
|
||||
|
||||
IPNS is a system for creating [cryptographically verifiable mutable pointers](https://specs.ipfs.tech/ipns/ipns-record/) to CIDs known as **IPNS names**, for example, [`k51qzi5uqu5dhp48cti0590jyvwgxssrii0zdf19pyfsxwoqomqvfg6bg8qj3s`](https://cid.ipfs.tech/#k51qzi5uqu5dhp48cti0590jyvwgxssrii0zdf19pyfsxwoqomqvfg6bg8qj3s) is a base36-encoded IPNS name with its public key in-line. The public key can be used to verify corresponding IPNS records, which point to a CID and are signed by the private key. In other words, an IPNS name can be thought of as stable link that can be updated over time.
|
||||
|
||||
IPNS names are key pairs that are not human-friendly (like DNS and ENS), so while they offer a stable pointer that can change over time, you still need to get the IPNS name from _somewhere_.
|
||||
|
||||
A pretty common pattern is for ENS names to point to an IPNS name. Since updating ENS names requires paying gas for the on-chain transaction, this can be avoided by pointing the ENS name to an IPNS name, and updating the IPNS name to a new CID, upon new releases or updates.
|
||||
|
||||
Like CIDs, IPNS names can be resolved using IPFS gateways, either in a [verifiable](https://specs.ipfs.tech/http-gateways/trustless-gateway/#ipns-record-responses-application-vnd-ipfs-ipns-record) or trusted way. Trusted resolution is as simple as adding the name to the URL: `https://cloudflare-ipfs.com/ipns/k51qzi5uqu5dhp48cti0590jyvwgxssrii0zdf19pyfsxwoqomqvfg6bg8qj3s`. Verified IPNS resolution is a bit [more involved](https://specs.ipfs.tech/ipns/ipns-record/#record-verification), but can be done end-to-end with Helia in the browser as follows:
|
||||
|
||||
<iframe src="https://codesandbox.io/embed/f59ttx?view=Editor+%2B+Preview&module=%2Fsrc%2Findex.ts"
|
||||
style="width:100%; height: 500px; border:0; border-radius: 4px; overflow:hidden;"
|
||||
title="Helia-ipns"
|
||||
allow="accelerometer; ambient-light-sensor; camera; encrypted-media; geolocation; gyroscope; hid; microphone; midi; payment; usb; vr; xr-spatial-tracking"
|
||||
sandbox="allow-forms allow-modals allow-popups allow-presentation allow-same-origin allow-scripts"
|
||||
></iframe>
|
||||
|
||||
## Conclusion
|
||||
|
||||
If you reached this far, congratulations. Hopefully, this blog post gave you an overview of the state of dapps on IPFS and the ongoing efforts to make verified retrieval of CIDs the norm.
|
||||
|
||||
While trust remains central to the web, leaning on the verifiability of CIDs is a net win for both dapp developers and users.
|
||||
|
||||
As we make more progress on the [`@helia/verified-fetch`](https://github.com/ipfs/helia/pull/392) library, we will publish more guides and examples demonstrating its broad applicability in dapps.
|
||||
|
||||
If you’re a dapp developer or user using IPFS, your input is valuable. We invite you to join the [IPFS Dapps Working Group](https://ipfs.fyi/dapps-wg) and help us shape the future of dapps on IPFS.
|
||||
195
src/_blog/ipfs-uri-support-in-curl.md
Normal file
@@ -0,0 +1,195 @@
|
||||
---
|
||||
title: IPFS URL support in CURL
|
||||
description: 'CURL 8.4.0 shipped with built-in support for ipfs:// and ipns:// addresses.'
|
||||
author: Mark Gaiser
|
||||
date: 2023-10-16
|
||||
permalink: '/ipfs-uri-support-in-curl/'
|
||||
header_image: '/curl.png'
|
||||
tags:
|
||||
- 'community'
|
||||
- 'URI'
|
||||
- 'URL'
|
||||
- 'HTTP'
|
||||
- 'curl'
|
||||
---
|
||||
|
||||
# `ipfs://` URL support in `curl`
|
||||
|
||||
[CURL 8.4.0](https://github.com/curl/curl/releases/tag/curl-8_4_0) shipped with built-in support for `ipfs://` and `ipns://` addresses.
|
||||
|
||||
This enables `curl` to seamlessly integrate with the user's preferred [IPFS gateway](https://docs.ipfs.tech/reference/http/gateway/) through the `IPFS_GATEWAY` environment variable or a `gateway` file. Best of all, these capabilities are available for immediate use today:
|
||||
|
||||
```bash
|
||||
$ export IPFS_GATEWAY="http://127.0.0.1:8080" # local (trusted) gateway provided by ipfs daemon like Kubo
|
||||
$ curl ipfs://bafkreih3wifdszgljcae7eu2qtpbgaedfkcvgnh4liq7rturr2crqlsuey
|
||||
hello from IPFS
|
||||
```
|
||||
|
||||
In this blog post, we will:
|
||||
- explore the journey of implementing IPFS URI support in CURL,
|
||||
- delve into the mechanics of [how CURL locates an IPFS gateway](#how-does-curl-find-an-ipfs-gateway),
|
||||
- learn how to be immune to [malicious gateways](#malicious-gateways-and-data-integrity),
|
||||
- and finally, provide [practical CURL examples](#curl-examples) for leveraging IPFS URLs for either deserialized or verifiable responses.
|
||||
|
||||
## A brief history
|
||||
|
||||
Supporting IPFS in CURL has been attempted [before](https://github.com/curl/curl/pull/8468) as a CURL library feature. Some discussions lead to a belief that this should be implemented in the CURL tool itself, not its library. A renewed [implementation attempt](https://github.com/curl/curl/pull/8805) took the tool-side approach which ultimately was accepted and is available right now in CURL 8.4.0!
|
||||
|
||||
The support of IPFS in CURL is effectively consisting of two implementation details.
|
||||
|
||||
1. CURL tries to find a locally installed or [configured gateway](#how-does-curl-find-an-ipfs-gateway).
|
||||
2. It then rewrites an `ipfs://bafybeigagd5nmnn2iys2f3doro7ydrevyr2mzarwidgadawmamiteydbzi` to a gateway URL. This is how curl handles it internally, you see nothing of this URL rewriting.
|
||||
|
||||
If you have IPFS installed locally then running `curl ipfs://` will Just Work™. If not, CURL will return an error with details about how to set up the gateway preference. This ensures the user agency is respected, no third-party gateway is used as implicit default.
|
||||
|
||||
## Why `ipfs://` URL support is so important?
|
||||
|
||||
Why isn't `https://ipfs.io/ipfs/bafybeigagd5nmnn2iys2f3doro7ydrevyr2mzarwidgadawmamiteydbzi` equally acceptable?
|
||||
Or why isn't a local URL `http://localhost:8080/ipfs/bafybeigagd5nmnn2iys2f3doro7ydrevyr2mzarwidgadawmamiteydbzi` fine?
|
||||
|
||||
Both addresses are tied to a specific _location_.
|
||||
|
||||
IPFS is a modular suite of protocols purpose built for the organization and transfer of [content-addressed](https://docs.ipfs.tech/concepts/content-addressing) data. It shouldn't matter where the content is. Content Identifier ([CID](https://docs.ipfs.tech/concepts/glossary/#cid)) is all that is required. The "where" part is implementation detail an IPFS system takes care of. Hardcoding a location in addition to a CID (like a specific HTTP gateway) limits end users to IPFS resources available through that one specific, centralized point of entry.
|
||||
|
||||
If we pull the URL apart we see:
|
||||
|
||||

|
||||
|
||||
Users of the IPFS system should not care about the _where_ part, nor be coerced to use a specific, hard-coded entry point into the system.
|
||||
|
||||
Public gateways like `ipfs.io` are always owned by some entity and could get censored or shut down at any time. Many gateways will not allow playback of deserialized videos or only respond to CIDs from allowlists to reduce costs. Other gateways will block specific CIDs from resolving in specific jurisdictions for legal reasons. Community-run public gateways will have limits and throttle usage.
|
||||
|
||||
These are not limitations of IPFS but purely a limitation a specific gateway has set through custom configuration. IPFS user should always have ability to avoid such limitations if they choose to self-host and [run their own IPFS node with a local gateway](https://docs.ipfs.tech/install/).
|
||||
|
||||
<!-- TODO: remove? feels like duplicate of we already say in this and "malicious" sections, but mentioning ffmpeg blogpost feels like something we should keep somewhere
|
||||
|
||||
This is why running a local node (and therefore a local gateway, it's part of a node) is so important. Even though you still effectively use `http://localhost:8080` as gateway, it's hosted by you locally backed by the many peers your node is connected with. Your experience in using IPFS is going to be best and fastest with a local node. Even when your local gateway isn't working it's easy for you to restart your node and get that gateway back and running. You can't do that on public gateways that you don't control.
|
||||
|
||||
One of the many reasons why we're putting in the effort to make applications recognize IPFS URIs (like [ffmpeg](https://blog.ipfs.tech/2022-08-01-ipfs-and-ffmpeg/)) `ipfs://bafybeigagd5nmnn2iys2f3doro7ydrevyr2mzarwidgadawmamiteydbzi` is to let the application in the background find that gateway you're running and giving you the freedom of being truly distributed! This also allows url's to be shared as IPFS url's (like `ipfs://bafybeigagd5nmnn2iys2f3doro7ydrevyr2mzarwidgadawmamiteydbzi`) without any trace of a (central) gateway and bring us one step closer to a distributed world where it doesn't matter anymore where that data is located.
|
||||
|
||||
-->
|
||||
|
||||
## How does CURL find an IPFS Gateway?
|
||||
|
||||
Any IPFS implementation that has support for [IPIP-280](https://github.com/ipfs/specs/pull/280) exposes an IPFS gateway that CURL (and [ffmpeg](https://blog.ipfs.tech/2022-08-01-ipfs-and-ffmpeg/)) can use. At the moment of writing that's just [Kubo](https://github.com/ipfs/kubo/releases).
|
||||
|
||||
CURL 8.4.0 and greater looks for a gateway in the following order:
|
||||
|
||||
1. `IPFS_GATEWAY`, if set it's used.
|
||||
2. The `--ipfs-gateway` CLI argument.
|
||||
3. The `~/.ipfs/gateway` file, where it reads the first line.
|
||||
|
||||
If a gateway hint is found at any of those places, and if that is a valid HTTP URL, then CURL will use it. If not, then you'll be getting an error message pointing to the [CURL documentation related to IPFS](https://curl.se/docs/ipfs.html) to help you further.
|
||||
|
||||
One can specify any IPFS gateway that is in compliance with [Gateway Specifications](https://specs.ipfs.tech/http-gateways/). It is highly recommended to use a local gateway, as it provides the best security guarantees.
|
||||
|
||||
## Malicious gateways and data integrity?
|
||||
|
||||
Requesting deserialized responses and delegating hash verification to a third-party gateway comes with risks. It is possible that a public gateway is malicious. Or, that a well-known and respected gateway gets hacked and changed to return payload that does not match requested CID. How can one protect themselves against that?
|
||||
|
||||
If deserialized responses are necessary, one should run their own gateway in a local, controlled environment. Every block of data retrieved though self-hosted IPFS gateway is verified to match the hash from CID. For the maximum flexibility and security, find an implementation that provides the gateway endpoint (i.e. [Kubo](https://docs.ipfs.tech/install/command-line/)) and run it yourself!
|
||||
|
||||
When using a third-party gateway that one can't fully trust, the only secure option is to [request verifiable response types](https://docs.ipfs.tech/reference/http/gateway/#trustless-verifiable-retrieval) such as [application/vnd.ipld.raw](https://www.iana.org/assignments/media-types/application/vnd.ipld.raw) (a single block) or [application/vnd.ipld.car](https://www.iana.org/assignments/media-types/application/vnd.ipld.car) (multiple blocks in CAR archive). Both allow to locally verify if the data returned by gateway match the requested CID, removing the surface for [Man-in-the-middle attacks](https://en.wikipedia.org/wiki/Man-in-the-middle_attack).
|
||||
|
||||
## CURL Examples
|
||||
|
||||
### Deserialized responses
|
||||
|
||||
::: callout
|
||||
|
||||
By default, a trusted local gateway acts as a bridge between traditional HTTP clients and IPFS.
|
||||
|
||||
It performs necessary hash verification, UnixFS _deserialization_ and return reassembled files to the client, as if they were stored in a traditional HTTP server. This means all validation happens on the gateway, and clients trust that the gateway is correctly validating content-addressed data before returning it to them.
|
||||
|
||||
:::
|
||||
|
||||
#### Downloading a file from IPFS with CURL
|
||||
|
||||
```bash
|
||||
$ curl ipfs://bafkreih3wifdszgljcae7eu2qtpbgaedfkcvgnh4liq7rturr2crqlsuey -o out.txt
|
||||
```
|
||||
|
||||
If curl responds with `curl: IPFS automatic gateway detection failure`, make sure `IPFS_GATEWAY` is set (see examples below).
|
||||
|
||||
#### Explicitly specifying a gateway
|
||||
|
||||
To use local gateway on custom port 48080:
|
||||
|
||||
```bash
|
||||
$ export IPFS_GATEWAY=http://127.0.0.1:48080
|
||||
$ curl ipfs://bafkreih3wifdszgljcae7eu2qtpbgaedfkcvgnh4liq7rturr2crqlsuey
|
||||
hello from IPFS
|
||||
```
|
||||
|
||||
When setting environment variable is not feasible, one can use `--ipfs-gateway` instead:
|
||||
|
||||
```bash
|
||||
$ curl --ipfs-gateway http://127.0.0.1:48080 ipfs://bafkreih3wifdszgljcae7eu2qtpbgaedfkcvgnh4liq7rturr2crqlsuey
|
||||
hello from IPFS
|
||||
```
|
||||
|
||||
#### Following subdomain redirects
|
||||
|
||||
::: callout
|
||||
|
||||
By default, the URL resolution in `curl` does not follow HTTP redirects and assumes the endpoint implements deserializing [path gateway](https://specs.ipfs.tech/http-gateways/path-gateway/), or at the very least, the [trustless gateway](https://specs.ipfs.tech/http-gateways/trustless-gateway/).
|
||||
When pointing `curl` at a [subdomain gateway](https://specs.ipfs.tech/http-gateways/subdomain-gateway) (like `https://dweb.link` or the `http://localhost:8080` provided by a [local Kubo node](https://docs.ipfs.tech/how-to/command-line-quick-start/)) one has to pass `-L` in the curl command to follow the redirect.
|
||||
|
||||
:::
|
||||
|
||||
```bash
|
||||
$ IPFS_GATEWAY=https://localhost:8080 curl -s -L ipfs://bafkreih3wifdszgljcae7eu2qtpbgaedfkcvgnh4liq7rturr2crqlsuey
|
||||
hello from IPFS
|
||||
```
|
||||
|
||||
#### Piping and streaming responses
|
||||
|
||||
Deserialized response returned by CURL can be piped directly to a video player:
|
||||
|
||||
```
|
||||
$ curl ipfs://bafybeigagd5nmnn2iys2f3doro7ydrevyr2mzarwidgadawmamiteydbzi | ffplay -
|
||||
```
|
||||
|
||||
### Verifiable responses
|
||||
|
||||
::: callout
|
||||
|
||||
By explicitly requesting [application/vnd.ipld.raw](https://www.iana.org/assignments/media-types/application/vnd.ipld.raw) (a block) or [application/vnd.ipld.car](https://www.iana.org/assignments/media-types/application/vnd.ipld.car) (a stream of blocks) responses, by means defined in [Trustless Gateway Specification](https://specs.ipfs.tech/http-gateways/trustless-gateway/), the user is able to fetch raw content-addressed data and [perform hash verification themselves](https://docs.ipfs.tech/reference/http/gateway/#trustless-verifiable-retrieval).
|
||||
|
||||
:::
|
||||
|
||||
#### Fetching and verifying a directory from an untrusted gateway
|
||||
|
||||
Requesting [trustless and verifiable](https://docs.ipfs.tech/reference/http/gateway/#trustless-verifiable-retrieval) CAR response via `Accept` HTTP header:
|
||||
|
||||
```bash
|
||||
$ export IPFS_GATEWAY="https://ipfs.io" # using untrusted public gateway
|
||||
$ curl -H "Accept: application/vnd.ipld.car" "ipfs://bafybeiakou6e7hnx4ms2yangplzl6viapsoyo6phlee6bwrg4j2xt37m3q" > dag.car
|
||||
```
|
||||
|
||||
Then, CAR can be moved around and imported into some other IPFS node:
|
||||
|
||||
```bash
|
||||
$ ipfs dag import dag.car
|
||||
```
|
||||
|
||||
or verified and unpacked locally, without having to run a full IPFS node, with tools like [go-car](https://github.com/ipld/go-car/tree/master/cmd/car#readme) or [ipfs-car](https://www.npmjs.com/package/ipfs-car):
|
||||
|
||||
```
|
||||
$ npm i -g ipfs-car
|
||||
$ ipfs-car unpack dag.car --output dag.out
|
||||
$ ls dag.out
|
||||
1007 - Sustainable - alt.txt
|
||||
1007 - Sustainable - transcript.txt
|
||||
1007 - Sustainable.png
|
||||
```
|
||||
|
||||
## What's next?
|
||||
|
||||
More places supporting IPFS addresses. Everyone can integrate `ipfs://` and `ipns://` URL support into their application. See specifications proposed in [IPIP-280](https://github.com/ipfs/specs/pull/280) for technical details. We are [tracking potential project](https://github.com/ipfs/integrations/issues) where an integration makes sense! If you feel up to the challenge, don't hesitate to drop a comment in one of the [potential projects](https://github.com/ipfs/integrations/issues) for IPFS URL integration or find us on:
|
||||
|
||||
* [Matrix](https://matrix.to/#/#ipfs-space:ipfs.io), [Discord](https://discord.com/invite/ipfs) or [Slack](https://filecoin.io/slack)
|
||||
* [Discussion Forum](https://discuss.ipfs.tech/)
|
||||
|
||||
Or one of the other many places where the [IPFS community](https://docs.ipfs.tech/community/) is active.
|
||||
|
||||
199
src/_blog/major-improvements-to-omnilingo.md
Normal file
@@ -0,0 +1,199 @@
|
||||
---
|
||||
title: Introducing Major Improvements to Omnilingo
|
||||
description: 'We’re happy to introduce some major improvements to Omnilingo, the decentralised language learning platform designed with special attention to small and marginalised language communities.'
|
||||
date: 2023-11-20
|
||||
permalink: '/major-improvements-to-omnilingo/'
|
||||
header_image: "/omnilingo-x-ipfs.jpg"
|
||||
tags:
|
||||
- omnilingo
|
||||
---
|
||||
|
||||
## Introduction
|
||||
|
||||
Nearly two years ago, the IPFS Dev Grants program funded the first grant for Omnilingo to explore how IPFS could meet the needs of their users - groups with limited bandwidth and applications which work offline-first, allowing full user control of data. You can read the [original post from 2021](https://blog.ipfs.tech/2021-12-17-omnilingo/), and several iterations of the grant later (generously provided by the Filecoin Foundation) we're happy to share an update.
|
||||
|
||||
The mission of Omnilingo is inspiring, and its authors are an incredible team who are pushing on a lot of hard problems all at once, including new approaches to consent-driven data access and revocation patterns. This is critical work and an extraordinarily important use of IPFS that we are happy to shine a light on.
|
||||
|
||||
-- Dietrich Ayala, technical grant advisor to Omnilingo
|
||||
|
||||
## Project Update: Omnilingo
|
||||
|
||||
We're happy to introduce some major improvements to Omnilingo, the decentralised language learning platform designed with special attention to small and marginalised language communities. We now have an experimental
|
||||
contribution system, including an encryption-based consent model.
|
||||
|
||||
## Overview
|
||||
|
||||
We developed Omnilingo two years ago with the goal of making it possible for minority and marginalised language communities to create and curate language-learning data in their languages by developing and publishing formats for language source material hosted on the decentralised filesystem IPFS. Anyone can publish new source material on IPFS, and a compatible Omnilingo client can use this source material to generate language-learning exercises.
|
||||
|
||||
The source material is published in the form of Omnilingo data structures on IPFS; previously this had to be done by a knowledgeable web developer operating an IPFS node. We are happy to present now an interface for contributing samples from our demonstration web client!
|
||||
|
||||
As with any networked system, collecting and preserving data from our users can be done only with their consent. Managing that consent within the context of a decentralised filesystem comes with its own special challenges, and we designed what we think is as good of a privacy- and consent-respecting system as we can.
|
||||
|
||||
Here's a sample user story illustrating how this might be used:
|
||||
|
||||
A language activist encourages members of their endangered language community to contribute their voices, producing a large corpus of spoken audio clips; children of their community and in diaspora can now use Omnilingo to practise outside of the classroom, supporting revitalisation of the language. Decentralisation and the consent system allow the community as a whole as well as individuals to decide who has access to their voices.
|
||||
|
||||
As opposed to most current systems for data collection via crowd sourcing, in Omnilingo, contributors own their own data and can define their own terms and conditions for its use.
|
||||
|
||||
## Omnilingo privacy structures
|
||||
|
||||
Our contribution privacy initiative brings with it a handful of new structures. These are introduced bottom-up; read this section backwards if you prefer a top-down introduction.
|
||||
|
||||
### Omnilingo session keys
|
||||
|
||||
An Omnilingo session key is a [JSON Web Key]; our implementation uses the [SubtleCrypto WebAPI] to generate and encode these keys. Currently we recommend only 256-bit AES-GCM keys, and our Web client supports only this configuration.
|
||||
|
||||
[JSON Web Key]: https://datatracker.ietf.org/doc/html/rfc7517
|
||||
[SubtleCrypto WebAPI]: https://developer.mozilla.org/en-US/docs/Web/API/SubtleCrypto
|
||||
|
||||
Omnilingo session keys form the unit of "consent": for a given session key, users may have contributed several samples. If a user wishes to revoke their consent for a sample, they signal this by unpublishing the session key, thus revoking consent for all samples contributed with that key.
|
||||
|
||||
For a more positive user experience, we recommend the user-facing interface reference session keys by the [pgpfone wordlist] encoding of their fingerprint.
|
||||
|
||||
[pgpfone wordlist]: https://web.archive.org/web/20100326141145/http://web.mit.edu/network/pgpfone/manual/index.html#PGP000062
|
||||
|
||||
### Omnilingo encrypted object
|
||||
|
||||
An Omnilingo encrypted object is an object which has been encrypted by an Omnilingo session key; the structure is:
|
||||
|
||||
```
|
||||
{ "alg": alg // AesKeyGenParams
|
||||
, "keyfpr": keyfpr // key fingerprint: hexadecimal string encoding of the SHA-1 digest of the key
|
||||
, "iv": iv // initialisation vector used
|
||||
, "encdata": encdata // Uint8Array of the encrypted data
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
See [MDN SubtleCrypto digest documentation] for details of how we generate the fingerprint.
|
||||
|
||||
[MDN SubtleCrypto digest documentation]: https://developer.mozilla.org/en-US/docs/Web/API/SubtleCrypto/digest
|
||||
|
||||
We wrap in encrypted objects the MP3 of the contribution as well as the list of Omnilingo clip structures.
|
||||
|
||||
Encrypted clip:
|
||||
```
|
||||
{ "chars_sec": chars_sec
|
||||
, "clip_cid": CID(encrypt(clip_mp3))
|
||||
, "length": length
|
||||
, "meta_cid": meta_cid
|
||||
, "sentence_cid": sentence_cid
|
||||
}
|
||||
```
|
||||
|
||||
### Omnilingo encrypted index
|
||||
|
||||
An Omnilingo encrypted index is similar to the classic Omnilingo root index: a JSON dictionary with language codes as keys and Omnilingo language indices as the values. The `cids` entry of the Omnilingo language index is a list of IPFS CIDs referencing the encrypted lists of Omnilingo clip structures.
|
||||
|
||||
An example:
|
||||
```
|
||||
{ "ab": { "cids": CID(encrypt(clip_list)) } }
|
||||
```
|
||||
|
||||
### Omnilingo encrypted root
|
||||
|
||||
An Omnilingo encrypted root is a JSON dictionary; the keys are fingerprints of Omnilingo session keys, and each value is the CID of an Omnilingo encrypted index encrypted with the corresponding session key.
|
||||
|
||||
```
|
||||
{ "ea6b0c9b2f697c3cbc16fb7978af16aae53bdeb8": "QmdzHipTQWgguLci211Cp3Eh8SWhEnsZA34mGJgGQXYcUV" }
|
||||
```
|
||||
|
||||
Encrypted roots can optionally contain some of the referenced session keys, allowing decryption. In this example, the key `ea6b0c9b...` is included.
|
||||
|
||||
```
|
||||
{ "keys": {
|
||||
"ea6b0c9b2f697c3cbc16fb7978af16aae53bdeb8": JWK(key)
|
||||
}
|
||||
, "dab24db69f6856652275e06c5f092f68623a4041": "QmWug9ie3bpkzVvKDVfuLtksaWsa5Q1DZxsnwmCCAASYj8"
|
||||
, "ea6b0c9b2f697c3cbc16fb7978af16aae53bdeb8": "QmdzHipTQWgguLci211Cp3Eh8SWhEnsZA34mGJgGQXYcUV"
|
||||
}
|
||||
```
|
||||
|
||||
### Omnilingo identity
|
||||
|
||||
An Omnilingo identity is a IPNS key (colloquially referred to as a `k5`). Published to this `k5` is an encrypted root, containing the session keys for which the user (the one controlling the private part of the `k5`). The Omnilingo client has been updated to accept Omnilingo identities, fetching and decrypting the contained encrypted indices.
|
||||
|
||||
In the example encrypted root:
|
||||
```
|
||||
{ "keys":{
|
||||
"ea6b0c9b2f697c3cbc16fb7978af16aae53bdeb8": JWK(key)
|
||||
}
|
||||
, "dab24db69f6856652275e06c5f092f68623a4041": "QmWug9ie3bpkzVvKDVfuLtksaWsa5Q1DZxsnwmCCAASYj8"
|
||||
, "ea6b0c9b2f697c3cbc16fb7978af16aae53bdeb8": "QmdzHipTQWgguLci211Cp3Eh8SWhEnsZA34mGJgGQXYcUV"
|
||||
}
|
||||
```
|
||||
|
||||
The material encrypted by session key `ea6b0c9b2` can be used with the controlling user's consent, whereas the material encrypted by session key `dab24db6` cannot be any longer, as the user has unpublished the key.
|
||||
|
||||
## Data flows
|
||||
|
||||
There are two new data flows introduced with this system: contributing data, and retrieving contributed data.
|
||||
|
||||
### Contribution
|
||||
|
||||
A contributor client will be drawing sentences from a (presumably classic) Omnilingo language index, and contributing new clips. They start by generating an Omnilingo identity (`k5`) and a session key. The session key is stored locally.
|
||||
|
||||
When the user makes their first contribution (an MP3 recording of them reading a sentence), a new Omnilingo encrypted root index is published to their `k5`:
|
||||
|
||||
```
|
||||
{ "keys": {
|
||||
fpr(key): JWK(key)
|
||||
}
|
||||
, fpr(key): CID({ // encrypted language index
|
||||
"XX": {
|
||||
"cids": [CID(encrypt([ // encrypted clip list
|
||||
encrypted_clip
|
||||
]))]
|
||||
}
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
As the user makes more contributions, the encrypted clip list grows in length, updating the encrypted language index and encrypted root index, each time republished to the `k5`, all under the same session key:
|
||||
|
||||
```
|
||||
{ "keys": {
|
||||
fpr(key): JWK(key)
|
||||
}
|
||||
, fpr(key): CID({ "XX": { "cids": [CID(encrypt(clip_list))] } })
|
||||
}
|
||||
```
|
||||
|
||||
At some point, the user decides to "roll" their session key, creating a new session. (A client might decide to do this automatically, e.g. each time it is opened, or each time the language is switched.) A new session key is generated, and everything propagates up to the user identity (`k5`):
|
||||
|
||||
```
|
||||
{ "keys": {
|
||||
fpr(key1): JWK(key1)
|
||||
, fpr(key2): JWK(key2)
|
||||
}
|
||||
, fpr(key1): CID({ "XX": { "cids": [CID(encrypt(clip_list1))] } })
|
||||
, fpr(key2): CID({ "XX": { "cids": [CID(encrypt(clip_list2))] } })
|
||||
}
|
||||
```
|
||||
|
||||
At some later time, the user decides to revoke consent to use the material recorded under `key1`; the JSON Web Key encoded copy of `key1` is removed, only `fpr(key1)` remains published under their identity:
|
||||
|
||||
```
|
||||
{ "keys": {
|
||||
fpr(key2): JWK(key2)
|
||||
}
|
||||
, fpr(key1): CID({ "XX": { "cids": [CID(encrypt(clip_list1))] } }) // consent revoked
|
||||
, fpr(key2): CID({ "XX": { "cids": [CID(encrypt(clip_list2))] } })
|
||||
}
|
||||
```
|
||||
|
||||
Consumers who have stored `key1` will retain access to this data, just as they would if they had stored the decrypted copies; however, use of it would constitute a violation of the user's consent.
|
||||
|
||||
### Consumption
|
||||
|
||||
Omnilingo consumers now have two types of root indices to deal with: classic root indices and encrypted root indices. An encrypted root index may be detected by the presence of the `keys` field; iterating over this dictionary then gives the consumer a list of fingerprints to look up in the encrypted root index, as well as the key needed to decode the resulting encrypted language index.
|
||||
|
||||
## Concluding remarks
|
||||
|
||||
Omnilingo now has support for user contributions with sovereignty protections, enabling marginalised language communities to produce and control their own data and integrate it into compatible Omnilingo clients in a user-respecting way. Due to the decentralisation allowed by IPFS, such clients can be hosted anywhere on anyone's infrastructure. We look forward to continuing to improve language learner and language activist access to decentralised and sovereignty-preserving language learning systems.
|
||||
|
||||
We invite everyone interested to get involved! Read our [technical paper](https://arxiv.org/abs/2310.06764), check out our [live demo](https://demo.omnilingo.cc), [fork us on GitHub](https://github.com/omnilingo/omnilingo), and join us on Matrix in `#OmniLingo:matrix.org` ([chat now](https://app.element.io/#/room/#OmniLingo:matrix.org)). Our near-term plans include:
|
||||
* full p2p (dropping the required remote Kubo instance)
|
||||
* experimenting with isolated networks (useful e.g. for rural communities)
|
||||
* integration with FileCoin and/or pinning services
|
||||
|
||||
@@ -4,6 +4,22 @@ type: News coverage
|
||||
sitemap:
|
||||
exclude: true
|
||||
data:
|
||||
- title: Filecoin Foundation Successfully Deploys IPFS in Space
|
||||
date: 2024-01-16
|
||||
publish_date:
|
||||
path: https://fil.org/blog/filecoin-foundation-successfully-deploys-interplanetary-file-system-ipfs-in-space/
|
||||
tags:
|
||||
- space
|
||||
- IPFS
|
||||
- satellite
|
||||
- title: Advancing IPFS and libp2p Governance
|
||||
date: 2023-11-14
|
||||
publish_date:
|
||||
path: https://protocol.ai/blog/advancing-ipfs-and-libp2p-governance/
|
||||
tags:
|
||||
- IPFS
|
||||
- libp2p
|
||||
- governance
|
||||
- title: Brave announces automatic NFT backups and enhanced IPFS/Filecoin support in Brave Wallet
|
||||
date: 2023-05-02
|
||||
publish_date:
|
||||
|
||||
78
src/_blog/newsletter-198.md
Normal file
@@ -0,0 +1,78 @@
|
||||
---
|
||||
title: Welcome to IPFS News 198!
|
||||
description: Featuring announcements about Brave's New IPFS Infobar, Amino, and IPFS Connect!
|
||||
date: 2023-10-03
|
||||
permalink: "/newsletter-198"
|
||||
header_image: "/ipfsnews.png"
|
||||
tags:
|
||||
- newsletter
|
||||
---
|
||||
|
||||
## **IPFS Connect 2023 Istanbul 🔭**
|
||||
|
||||
IPFS Connect is a community-run regional conference bringing together all of the builders and ecosystems that rely on and use IPFS as the most widely used decentralized content addressing protocol for files and data. This year's event is happening alongside Devconnect and LabWeek23 in Istanbul, Turkey on November 16. Join the IPFS Community for a full day of workshops, lightning talks, and demos showcasing technology, tools, and innovative projects in the IPFS ecosystem.
|
||||
|
||||
There are several opportunities for you to get involved with this event whether you're a business, organization, or individual.
|
||||
|
||||
<a href="https://blog.ipfs.tech/_2023-ipfs-connect-istanbul/" class="cta-button">Read the blog post</a>
|
||||
|
||||
## **Brand New on IPFS ✨**
|
||||
|
||||
[Brave Browser's New IPFS Infobar](https://blog.ipfs.tech/_2023-brave-infobar/)
|
||||
|
||||
- We’re excited to share a new IPFS-related feature that appears in the most recent version of Brave’s web browser. A new IPFS Infobar will appear at the top of the browser when you visit an IPFS compatible resource such as a CID on a public gateway or a website with a DNSLink. [Learn more here!](https://blog.ipfs.tech/_2023-brave-infobar/)
|
||||
|
||||
[IPFS support was merged into curl](https://twitter.com/bmann/status/1705572964068930010?s=20)
|
||||
|
||||
- Thanks to the hard work and dedication of [Mark Gaiser](https://github.com/markg85), IPFS support was recently [merged into curl](https://github.com/curl/curl/pull/8805#issuecomment-1732260385), a command line tool and library for transferring data with URL syntax. More information and an official announcement are to come, but we're excited for this important milestone. IPFS is already in the curl documentation: [https://curl.se/docs/ipfs.html](https://curl.se/docs/ipfs.html)
|
||||
|
||||
[Amino (the Public IPFS DHT) is getting a facelift](https://blog.ipfs.tech/2023-09-amino-refactoring/)
|
||||
|
||||
- [Read the blog post](https://blog.ipfs.tech/2023-09-amino-refactoring/) to learn all the details and follow this discussion forum thread if you want to be kept up-to-date about further developments: [https://discuss.ipfs.tech/t/dht-discussion-and-contribution-opportunities-in-2023q4/16937/2](https://discuss.ipfs.tech/t/dht-discussion-and-contribution-opportunities-in-2023q4/16937/2)
|
||||
|
||||
[The ProbeLab team needs your help — fill out this survey!](https://tally.so/r/npoo6q)
|
||||
|
||||
- The ProbeLab team developed tools and infrastructure to capture the metrics you see at [https://probelab.io/](https://probelab.io). We want to expand the list of metrics we capture and build new open-source tools that will help protocol designers and application developers get a better idea of where the performance of their application can improve. This is your chance to influence where our team focuses next. [Please fill in the survey and let us know if and how you would be interested to contribute to this line of work.](https://tally.so/r/npoo6q)
|
||||
|
||||
[awesome-ipfs reboot](https://awesome.ipfs.tech/)
|
||||
|
||||
- After lying dormant for many months, the awesome-ipfs website has been cleaned up and rebooted. [Check out the updated version here!](https://awesome.ipfs.tech/)
|
||||
|
||||
[IPFS & Filecoin Ecosystem Roundup](https://www.youtube.com/watch?v=bdOPPnuZnhw)
|
||||
|
||||
- The September Filecoin & IPFS Ecosystem Roundup is online! Check out the video for the latest updates, developments, and insights straight from the community. [Watch it here!](https://www.youtube.com/watch?v=bdOPPnuZnhw)
|
||||
|
||||
## **Around the Ecosystem 🌎**
|
||||
|
||||
[IPFS on AWS, Part 1 – Discover IPFS on a virtual machine](https://aws.amazon.com/blogs/database/part-1-ipfs-on-aws-discover-ipfs-on-a-virtual-machine/)
|
||||
|
||||
- Did you know you can run IPFS on AWS? In this 3-part series on the AWS Database Blog, you'll learn how to do it thanks to a step-by-step guide. [Check it out!](https://aws.amazon.com/blogs/database/part-1-ipfs-on-aws-discover-ipfs-on-a-virtual-machine/)
|
||||
|
||||
[OrbitDB v1.0 releases](https://github.com/orbitdb/orbitdb)
|
||||
|
||||
- "OrbitDB is a serverless, distributed, peer-to-peer database. OrbitDB uses IPFS as its data storage and Libp2p Pubsub to automatically sync databases with peers. It's an eventually consistent database that uses Merkle-CRDTs for conflict-free database writes and merges making OrbitDB an excellent choice for p2p and decentralized apps, blockchain applications and local-first web applications." [Learn more here!](https://github.com/orbitdb/orbitdb)
|
||||
|
||||
[New in the Ecoystem Directory: dAppling](https://ecosystem.ipfs.tech/project/dappling/)
|
||||
|
||||
- Easy way for web3 developers to deploy their frontend to IPFS with a great developer experience. Connect your github and have a deployed site in a few clicks. Automatic CI/CD / Preview Builds / ENS support. [Check it out here!](https://ecosystem.ipfs.tech/project/dappling/)
|
||||
|
||||
[New in the Ecosystem Directory: ODD SDK](https://ecosystem.ipfs.tech/project/odd-sdk/)
|
||||
|
||||
- ODD SDK is Fission's true local-first, edge computing stack. ODD SDK empowers you to build fully distributed web applications with auth and storage without needing a complex backend. [View it here!](https://ecosystem.ipfs.tech/project/odd-sdk/)
|
||||
|
||||
[Popular on the Forums: Questions about a Private IPFS Setup](https://discuss.ipfs.tech/t/how-to-set-up-my-own-bootstrap-nodes-to-enable-discovery-and-connection-between-nodes-with-public-ip-and-nodes-on-a-local-network/16910)
|
||||
|
||||
- "How [do I] set up my own bootstrap nodes to enable discovery and connection between nodes with public IP and nodes on a local network?" [Read the discussion.](https://discuss.ipfs.tech/t/how-to-set-up-my-own-bootstrap-nodes-to-enable-discovery-and-connection-between-nodes-with-public-ip-and-nodes-on-a-local-network/16910)
|
||||
|
||||
[Job Alert: Filebase is hiring a Senior Digital Marketing Strategist](https://wellfound.com/jobs/2807523-senior-digital-marketing-strategist)
|
||||
|
||||
- "Are you a creative and strategic thinker with a passion for driving digital marketing excellence? Do you thrive in dynamic, cutting-edge environments and have a deep understanding of the tech industry? Join us at Filebase, a leading player in the decentralized storage revolution, as a Senior Digital Marketing Strategist." [Learn more here!](https://wellfound.com/jobs/2807523-senior-digital-marketing-strategist)
|
||||
|
||||
[LabWeek23 is happening November 13-17](https://23.labweek.io/)
|
||||
|
||||
- Have you booked your travel yet? LabWeek23 is happening in Istanbul, Türkiye, from November 13-17, alongside Devconnect! This is your chance to connect and collaborate with visionaries and teams that are domain leaders in ZK Proofs, AI and blockchain, DeSci, decentralized storage, gaming in Web3, public goods funding, cryptoeconomics, and much more. [Learn more about it here!](https://23.labweek.io/)
|
||||
|
||||
## **Have something you want featured? 📥**
|
||||
|
||||
As part of our ongoing efforts to empower and promote community contributors, we're providing a new way for you to have a chance to influence the monthly IPFS newsletter! If you have something exciting or important that you think the IPFS community should know about, then you can [submit this form](https://airtable.com/appjqlMYucNiOYHl7/shrfPrKe112FW3ucv) to have it be considered for promotion via IPFS communication channels.
|
||||
|
||||
91
src/_blog/newsletter-199.md
Normal file
@@ -0,0 +1,91 @@
|
||||
---
|
||||
title: Welcome to IPFS News 199!
|
||||
description: Featuring CURL supporting IPFS and a new IPFS implementation called Nabu.
|
||||
date: 2023-11-09
|
||||
permalink: "/newsletter-199"
|
||||
header_image: "/ipfsnews.png"
|
||||
tags:
|
||||
- newsletter
|
||||
---
|
||||
|
||||
## **IPFS URL support in CURL 🔭**
|
||||
|
||||
We're excited to share that thanks to the hard work of Mark Gaiser, CURL 8.4.0 shipped with built-in support for ipfs:// and ipns:// addresses. This is an important advancement, and we've got a blog you can read to learn more:
|
||||
|
||||
<a href="https://blog.ipfs.tech/ipfs-uri-support-in-curl/" class="cta-button">Read the blog post</a>
|
||||
|
||||
## **Brand New on IPFS ✨**
|
||||
|
||||
[Introducing Nabu: Unleashing IPFS on the JVM](https://blog.ipfs.tech/2023-11-introducing-nabu/)
|
||||
|
||||
- Learn about a new fast IPFS implementation in Java by checking out this recent post on the IPFS blog. [Read it here!](https://blog.ipfs.tech/2023-11-introducing-nabu/)
|
||||
|
||||
[IPFS Connect Istanbul](https://istanbul2023.ipfsconnect.org/)
|
||||
|
||||
- IPFS Connect is a community-run regional conference bringing together all of the builders and ecosystems that rely on and use IPFS as the most widely used decentralized content addressing protocol for files and data. This year's event is happening alongside Devconnect and LabWeek23 in Istanbul, Turkey on November 16. [Register today!](https://istanbul2023.ipfsconnect.org/)
|
||||
|
||||
[Connect with the PL IPFS Implementers in Istanbul and Prague](https://forms.gle/CxUQPsEUg2CGkLgh6)
|
||||
|
||||
- We want to connect with you and hear your thoughts as we shape the future of IPFS for 2024. Your input is invaluable in guiding our efforts, so we're inviting you to meet with us in Istanbul and Prague at two exciting events: DevConnect / IPFS Connect in Istanbul 🇹🇷 and DCxPrague in Prague 🇨🇿. If you're interested in sharing your thoughts and connecting with us during these events, [please fill out this form.](https://forms.gle/CxUQPsEUg2CGkLgh6)
|
||||
|
||||
[New Release: Kubo v0.24.0](https://github.com/ipfs/kubo/releases/tag/v0.24.0)
|
||||
|
||||
- Support for content blocking
|
||||
- Gateway: the root of the CARs are no longer meaningful
|
||||
- IPNS: improved publishing defaults
|
||||
- IPNS: record TTL is used for caching
|
||||
- Experimental Transport: WebRTC Direct
|
||||
|
||||
[New Release: Kubo v0.23.0](https://github.com/ipfs/kubo/releases/tag/v0.23.0)
|
||||
|
||||
[New Release: Boxo v0.15.0](https://discuss.ipfs.tech/t/boxo-v0-15-0-is-out/17175)
|
||||
|
||||
[New Release: Iroh v0.10.0](https://github.com/n0-computer/iroh/releases/tag/v0.10.0)
|
||||
|
||||
[Popular on the Forums](https://discuss.ipfs.tech/top?period=monthly)
|
||||
|
||||
- Help: [How to diagnose file not propagating to other Gateways?](https://discuss.ipfs.tech/t/how-to-diagnose-file-not-propagating-to-other-gateways/17071)
|
||||
- Help: [Files pinned to my IPFS node don’t show up on any other gateway](https://discuss.ipfs.tech/t/files-pinned-to-my-ipfs-node-dont-show-up-on-any-other-gateway/17132)
|
||||
- Helia: [Connection closes during bitswap fetches](https://discuss.ipfs.tech/t/connection-closes-during-bitswap-fetches/17041)
|
||||
|
||||
[IPFS & Filecoin Ecosystem Roundup](https://www.youtube.com/watch?v=rn1nLUqJ4HM)
|
||||
|
||||
- The October Filecoin & IPFS Ecosystem Roundup is online! Check out the video for the latest updates, developments, and insights straight from the community. [Watch it here!](https://www.youtube.com/watch?v=rn1nLUqJ4HM)
|
||||
|
||||
[Helia Report 2023-10](https://pl-strflt.notion.site/Helia-Report-2023-10-ddd18180aec54ff9ad06f0771340b850)
|
||||
|
||||
[ProbeLab Network Weekly Reports](https://github.com/plprobelab/network-measurements/tree/master/reports/2023)
|
||||
|
||||
|
||||
## **Around the Ecosystem 🌎**
|
||||
|
||||
[Call for submissions: awesome-ipfs](https://github.com/ipfs/awesome-ipfs)
|
||||
|
||||
- This is a community list of awesome projects, apps, tools, and services related to IPFS. We'd love to see more projects added to it, [so submit yours today!](https://github.com/ipfs/awesome-ipfs)
|
||||
|
||||
[IPFS Naming from Scaleaway](https://labs.scaleway.com/en/ipfs-naming/)
|
||||
|
||||
- Scaleway is opening a new service around called IPFS Naming. It is an IPNS managed service to solve the problem of managing and dynamically updating immutable IPFS addresses. [Learn more about it here!](https://labs.scaleway.com/en/ipfs-naming/)
|
||||
|
||||
[The Principles and Practices of IPFS](https://www.amazon.co.jp/o/ASIN/4297138379/gihyojp-22)
|
||||
|
||||
- This book has been translated to Japanese and will be published on November 8, [pre-order is available on Amazon.](https://www.amazon.co.jp/o/ASIN/4297138379/gihyojp-22)
|
||||
|
||||
[Peergos v0.14.0 featuring Nabu](https://peergos.net/public/peergos/releases)
|
||||
|
||||
- The Peergos team just published a new Peergos release, v0.14.0, in which they switch to their new Java implementation of IPFS, Nabu. This reduces idle bandwidth usage by about 10x, as well as CPU and RAM usage, and generally makes p2p stuff faster. [Check out the release notes here!](https://peergos.net/public/peergos/releases)
|
||||
|
||||
[Job Alert: Filebase is hiring a Senior Digital Marketing Strategist](https://wellfound.com/jobs/2807523-senior-digital-marketing-strategist)
|
||||
|
||||
- "Are you a creative and strategic thinker with a passion for driving digital marketing excellence? Do you thrive in dynamic, cutting-edge environments and have a deep understanding of the tech industry? Join us at Filebase, a leading player in the decentralized storage revolution, as a Senior Digital Marketing Strategist." [Learn more here!](https://wellfound.com/jobs/2807523-senior-digital-marketing-strategist)
|
||||
|
||||
[Reality Studies Podcast](https://www.youtube.com/watch?v=902OA94avbY)
|
||||
|
||||
- A recent episode of the Reality Studies podcast, by Protocol Labs Arts & Culture Advisor Jesse Damiani, features Asad J. Malik, CEO of Jadu AR. In 2021 and 2022, Jadu released successful NFT collections which were stored using IPFS. Now, owners of those NFTs can integrate them into the company's recently launched mobile AR game. [Watch the interview here!](https://www.youtube.com/watch?v=902OA94avbY)
|
||||
|
||||
|
||||
## **Have something you want featured? 📥**
|
||||
|
||||
If you have something exciting or important that you think the IPFS community should know about, then you can [submit this form](https://airtable.com/appjqlMYucNiOYHl7/shrfPrKe112FW3ucv) to have it be considered for inclusion in the IPFS newsletter.
|
||||
|
||||
<a href="https://airtable.com/appjqlMYucNiOYHl7/shrfPrKe112FW3ucv" class="cta-button">Submit form</a>
|
||||
85
src/_blog/newsletter-200.md
Normal file
@@ -0,0 +1,85 @@
|
||||
---
|
||||
title: Welcome to IPFS News 200!
|
||||
description: Featuring a big announcement about IPFS and libp2p.
|
||||
date: 2023-12-18
|
||||
permalink: "/newsletter-200"
|
||||
header_image: "/ipfsnews.png"
|
||||
tags:
|
||||
- newsletter
|
||||
---
|
||||
|
||||
## **Advancing IPFS and libp2p Governance 🔭**
|
||||
|
||||
We have some exciting news to share! IPFS and libp2p are officially taking big steps forward in project maturity, with independent foundations and funding structures in the Protocol Labs network! You can learn more about this news [on the Protocol Labs blog.](https://protocol.ai/blog/advancing-ipfs-and-libp2p-governance/)
|
||||
|
||||
<a href="https://protocol.ai/blog/advancing-ipfs-and-libp2p-governance/" class="cta-button">Read the blog post</a>
|
||||
|
||||
## **Brand New on IPFS ✨**
|
||||
|
||||
[Videos: IPFS Connect Istanbul Talks](https://www.youtube.com/playlist?list=PLfW9my7NCey-y5_j6QGCtGoigQuVlZ3Bj)
|
||||
|
||||
- IPFS Connect was a community-run regional conference bringing together all of the builders and ecosystems that rely on and use IPFS as the most widely used decentralized content addressing protocol for files and data. This year's event happened alongside Devconnect and LabWeek23 in Istanbul, Turkey on November 16. [Watch the talks here!](https://www.youtube.com/playlist?list=PLfW9my7NCey-y5_j6QGCtGoigQuVlZ3Bj)
|
||||
|
||||
[IPFS Companion MV3 Update](https://blog.ipfs.tech/2023-ipfs-companion-mv3-update/)
|
||||
|
||||
- In September, IPFS-Companion built on MV3 (Manifest V3) was shipped on the main channel, which brings exciting improvements and changes the way you interact with this powerful tool. [This blog post](https://blog.ipfs.tech/2023-ipfs-companion-mv3-update/) will give you a quick overview of the journey, changes, and what to expect.
|
||||
|
||||
[Incident Report - Increased Latency on the Amino DHT](https://discuss.ipfs.tech/t/incident-report-increased-latency-on-the-amino-dht/17338)
|
||||
|
||||
- Since 4 December, the ProbeLab team which monitors the IPFS network observed two major anomalies in Amino (The public IPFS DHT). [Learn more about it here!](https://discuss.ipfs.tech/t/incident-report-increased-latency-on-the-amino-dht/17338)
|
||||
|
||||
[Built with IPFS - Mintter and The Hypermedia Protocol](https://www.youtube.com/watch?v=K3U6A4sgKo4)
|
||||
|
||||
- In this episode of Built with IPFS, we dive into Mintter and The Hypermedia Protocol. Mintter Hypermedia is an open system, built on IPFS that allows communities to collaborate on content that is structured and deeply linked. All content in the system is cryptographically signed, versioned, and made permanent with IPFS. [Watch the video here!](https://www.youtube.com/watch?v=K3U6A4sgKo4)
|
||||
|
||||
[New Release: Kubo v0.25.0](https://github.com/ipfs/kubo/releases/tag/v0.25.0)
|
||||
|
||||
- Commands `ipfs key sign` and `ipfs key verify`
|
||||
|
||||
[New Release: Boxo v0.16.0](https://github.com/ipfs/boxo/releases/tag/v0.16.0)
|
||||
|
||||
[New Release: Iroh v0.11.0](https://github.com/n0-computer/iroh/releases/tag/v0.11.0)
|
||||
|
||||
[New Release: curl v8.5.0 - improved IPFS and IPNS URL support](https://github.com/zuoxiaofeng/curl/commit/7afa1be7d799a73f3ab6fb0b0072159103da802a)
|
||||
|
||||
[Popular on the Forums](https://discuss.ipfs.tech/top?period=monthly)
|
||||
|
||||
- Help: [Constant 100% CPU utilization on AWS EC2 IPFS node](https://discuss.ipfs.tech/t/constant-100-cpu-utilization-on-aws-ec2-ipfs-node/17172)
|
||||
- Help: [Double-hashed entries in denylists](https://discuss.ipfs.tech/t/double-hashed-entries-in-denylists/17199)
|
||||
- Help: [How to find these files through CID?](https://discuss.ipfs.tech/t/how-to-find-these-files-through-cid-the-files-are-very-important-to-me/17297)
|
||||
|
||||
[Helia Reports](https://pl-strflt.notion.site/Helia-Report-2023-10-ddd18180aec54ff9ad06f0771340b850)
|
||||
|
||||
[ProbeLab Network Weekly Reports](https://github.com/plprobelab/network-measurements/tree/master/reports/2023)
|
||||
|
||||
## **Around the Ecosystem 🌎**
|
||||
|
||||
[Blog: Introducing Major Improvements to Omnilingo](https://blog.ipfs.tech/major-improvements-to-omnilingo/)
|
||||
|
||||
- From the Omnilingo team: "We're happy to introduce some major improvements to Omnilingo, the decentralised language learning platform designed with special attention to small and marginalised language communities. We now have an experimental contribution system, including an encryption-based consent model." [Read more about it here!](https://blog.ipfs.tech/major-improvements-to-omnilingo/)
|
||||
|
||||
[Blog: dAppling - a New Way to Deploy IPFS Sites in Minutes](https://blog.ipfs.tech/2023-11-dappling/)
|
||||
|
||||
- Introducing a seamless way to launch your code on IPFS, featuring straightforward setup, automatic deployments, and more. [Learn more about it here!](https://blog.ipfs.tech/2023-11-dappling/)
|
||||
|
||||
[Dumb Pipe by number 0](https://www.dumbpipe.dev/)
|
||||
|
||||
- Easy, direct connections that punch through NATs & stay connected as network conditions change. [Learn more here!](https://www.dumbpipe.dev/)
|
||||
|
||||
[Call for submissions: awesome-ipfs](https://github.com/ipfs/awesome-ipfs)
|
||||
|
||||
- This is a community list of awesome projects, apps, tools, and services related to IPFS. We'd love to see more projects added to it, so [submit yours today!](https://github.com/ipfs/awesome-ipfs)
|
||||
|
||||
[Upholding a Free Tier: The IPFS Challenge](https://filebase.com/blog/upholding-a-free-tier-the-ipfs-challenge/)
|
||||
|
||||
- From the Filebase Team: "Four years into our journey at Filebase, we stand amid a shifting technological landscape where continuity and reliability are more valued than ever. In such times, we are not just continuing but reaffirming our pledge to offer a free IPFS tier." [Read the blog post here!](https://filebase.com/blog/upholding-a-free-tier-the-ipfs-challenge/)
|
||||
|
||||
[IPFS-based CDN faster than Akamai in some cases](https://github.com/p2p-cdn/speedtest-site/blob/master/final-report.pdf)
|
||||
|
||||
- A group of students at MIT ran a speedtest of IPFS vs a large web2 CDN, and IPFS was significantly faster for some file sizes! [Check out the full report here.](https://github.com/p2p-cdn/speedtest-site/blob/master/final-report.pdf)
|
||||
|
||||
## **Have something you want featured? 📥**
|
||||
|
||||
If you have something exciting or important that you think the IPFS community should know about, then you can [submit this form](https://airtable.com/appjqlMYucNiOYHl7/shrfPrKe112FW3ucv) to have it be considered for inclusion in the IPFS newsletter.
|
||||
|
||||
<a href="https://airtable.com/appjqlMYucNiOYHl7/shrfPrKe112FW3ucv" class="cta-button">Submit form</a>
|
||||
76
src/_blog/newsletter-201.md
Normal file
@@ -0,0 +1,76 @@
|
||||
---
|
||||
title: IPFS News Issue 201
|
||||
description: Featuring the announcement of a recent demonstration of the InterPlanetary File System (IPFS) in space!
|
||||
date: 2024-01-23
|
||||
permalink: "/newsletter-201"
|
||||
header_image: "/ipfsnews.png"
|
||||
tags:
|
||||
- newsletter
|
||||
---
|
||||
|
||||
## **Filecoin Foundation Successfully Demonstrates IPFS in Space 🔭**
|
||||
|
||||
The Filecoin Foundation (FF) successfully completed a first-of-its-kind mission demonstrating the InterPlanetary File System (IPFS) in space. The recent demonstration involved sending files from Earth to orbit and back using an implementation of the IPFS protocol designed for space communications.
|
||||
|
||||
<a href="https://fil.org/blog/filecoin-foundation-successfully-deploys-interplanetary-file-system-ipfs-in-space/" class="cta-button">Read the blog post</a>
|
||||
|
||||
## **Brand New on IPFS ✨**
|
||||
|
||||
[Decentralizing DeFi frontends: protecting users and protocol authors](https://www.liquity.org/blog/decentralizing-defi-frontends-protecting-users-and-protocol-authors)
|
||||
|
||||
- A blog post from Liquity that features IPFS and talks about the need for decentralized frontends that are trustless and verifiable. [Read the post here!](https://www.liquity.org/blog/decentralizing-defi-frontends-protecting-users-and-protocol-authors)
|
||||
|
||||
[New Release: Kubo v0.26.0](https://github.com/ipfs/kubo/releases/tag/v0.26.0)
|
||||
|
||||
- Several deprecated commands have been removed
|
||||
- Support optional pin names
|
||||
- jaeger trace exporter has been removed
|
||||
|
||||
[New Release: Boxo v0.17.0](https://github.com/ipfs/boxo/releases/tag/v0.17.0)
|
||||
|
||||
- pinning/pinner: you can now give a custom name when pinning a CID. To reflect this, the Pinner has been adjusted. Note that calling Pin for the same CID with a different name will replace its current name by the newly given name.
|
||||
|
||||
[New Release: Iroh v0.12.0](https://github.com/n0-computer/iroh/releases/tag/v0.12.0)
|
||||
|
||||
- (bytes) Switch to a single directory for the flat store
|
||||
- (net) Add Magicsock::network_change
|
||||
- Usage metrics reporting
|
||||
- Remove derp regions in favor of direct urls
|
||||
- Additional public get utils
|
||||
|
||||
[New Release: OrbitDB v2.0](https://www.npmjs.com/package/@orbitdb/core)
|
||||
|
||||
- This version of OrbitDB replaces js-ipfs with Helia, IPFS's implementation for Javascript.
|
||||
|
||||
[Popular on the Forums](https://discuss.ipfs.tech/top?period=monthly)
|
||||
|
||||
- Help: [Feasibility for Self-Hosting Scientific Datasets?](https://discuss.ipfs.tech/t/feasibility-for-self-hosting-scientific-datasets/17355)
|
||||
- Ecosystem: [Cloudflare IPFS gateway goes premium, longetivity of free version?](https://discuss.ipfs.tech/t/cloudflare-ipfs-gateway-goes-premium-longetivity-of-free-version/17388)
|
||||
|
||||
[ProbeLab Network Weekly Reports](https://github.com/plprobelab/network-measurements/tree/master/reports/2023)
|
||||
|
||||
## **Around the Ecosystem 🌎**
|
||||
|
||||
[Sendme, built on iroh](https://iroh.computer/sendme)
|
||||
|
||||
- It's like scp without needing to know the IP address. Add some files to sendme, and it will give you a pastable ticket that you can give to anyone who needs your files. Sendme will connect your devices directly & transfer the data without any accounts or configuration. [Learn more here!](https://iroh.computer/sendme)
|
||||
|
||||
[All-in-one Docker image with IPFS node best practices](https://discuss.ipfs.tech/t/all-in-one-docker-image-with-ipfs-node-best-practices/17408)
|
||||
|
||||
- A post in the IPFS community forum about putting together an all-in-one docker image for self hosting an IPFS node / gateway. [Read the forum thread here!](https://discuss.ipfs.tech/t/all-in-one-docker-image-with-ipfs-node-best-practices/17408)
|
||||
|
||||
[What's IPFS and how it compares to BitTorrent](https://norman.life/posts/ipfs-bittorrent)
|
||||
|
||||
- A new blog post from Daniel Norman, Developer Advocate for IPFS, about the differences between IPFS and BitTorrent. [Read the entire post here!](https://norman.life/posts/ipfs-bittorrent)
|
||||
|
||||
[Encrypted Blockstore from Fireproof](https://www.npmjs.com/package/@fireproof/encrypted-blockstore)
|
||||
|
||||
- Multi-writer self-hosted local-first IPFS-compatible blockstore with end-to-end encryption. [Learn more here!](https://www.npmjs.com/package/@fireproof/encrypted-blockstore)
|
||||
|
||||
[Community contribution of nfs mounting as an alternative to fuse in kubo](https://www.youtube.com/watch?v=19FkIxTzavY)
|
||||
|
||||
## **Have something you want featured? 📥**
|
||||
|
||||
If you have something exciting or important that you think the IPFS community should know about, then you can [submit this form](https://airtable.com/appjqlMYucNiOYHl7/shrfPrKe112FW3ucv) to have it be considered for inclusion in the IPFS newsletter.
|
||||
|
||||
<a href="https://airtable.com/appjqlMYucNiOYHl7/shrfPrKe112FW3ucv" class="cta-button">Submit form</a>
|
||||
@@ -1,5 +1,33 @@
|
||||
---
|
||||
data:
|
||||
- title: 'Just released: Kubo 0.26.0!'
|
||||
date: "2024-01-22"
|
||||
publish_date: null
|
||||
path: https://github.com/ipfs/kubo/releases/tag/v0.26.0
|
||||
tags:
|
||||
- go-ipfs
|
||||
- kubo
|
||||
- title: 'Just released: Kubo 0.25.0!'
|
||||
date: "2023-12-14"
|
||||
publish_date: null
|
||||
path: https://github.com/ipfs/kubo/releases/tag/v0.25.0
|
||||
tags:
|
||||
- go-ipfs
|
||||
- kubo
|
||||
- title: 'Just released: Kubo 0.24.0!'
|
||||
date: "2023-11-08"
|
||||
publish_date: null
|
||||
path: https://github.com/ipfs/kubo/releases/tag/v0.24.0
|
||||
tags:
|
||||
- go-ipfs
|
||||
- kubo
|
||||
- title: 'Just released: Kubo 0.23.0!'
|
||||
date: "2023-10-05"
|
||||
publish_date: null
|
||||
path: https://github.com/ipfs/kubo/releases/tag/v0.23.0
|
||||
tags:
|
||||
- go-ipfs
|
||||
- kubo
|
||||
- title: 'Just released: Kubo 0.22.0!'
|
||||
date: "2023-08-08"
|
||||
publish_date: null
|
||||
|
||||
@@ -4,11 +4,39 @@ type: Video
|
||||
sitemap:
|
||||
exclude: true
|
||||
data:
|
||||
- title: 'Built with IPFS - Mintter and The Hypermedia Protocol'
|
||||
date: 2023-11-13
|
||||
publish_date: 2023-11-13T12:00:00+00:00
|
||||
path: https://www.youtube.com/watch?v=K3U6A4sgKo4
|
||||
tags:
|
||||
- Built with IPFS
|
||||
- demo
|
||||
- interview
|
||||
- deep-dive
|
||||
- title: 'This Month in IPFS - March 2023'
|
||||
date: 2023-03-23
|
||||
publish_date: 2023-03-23T12:00:00+00:00
|
||||
path: https://www.youtube.com/watch?v=_vn52temkDU
|
||||
tags:
|
||||
- This Month in IPFS
|
||||
- community
|
||||
- demo
|
||||
- interview
|
||||
- title: 'This Month in IPFS - February 2023'
|
||||
date: 2023-02-23
|
||||
publish_date: 2023-02-23T12:00:00+00:00
|
||||
path: https://www.youtube.com/watch?v=Cflrlv31oW8
|
||||
tags:
|
||||
- This Month in IPFS
|
||||
- community
|
||||
- demo
|
||||
- interview
|
||||
- title: 'This Month in IPFS - January 2023'
|
||||
date: 2023-01-26
|
||||
publish_date: 2023-02-06T12:00:00+00:00
|
||||
path: https://www.youtube.com/watch?v=kRzNohHeRaM
|
||||
tags:
|
||||
- This Month in IPFS
|
||||
- community
|
||||
- demo
|
||||
- interview
|
||||
|
||||
BIN
src/assets/2023-12-introducing-dappling-autodetect.png
Normal file
|
After Width: | Height: | Size: 130 KiB |
BIN
src/assets/2023-12-introducing-dappling-error.png
Normal file
|
After Width: | Height: | Size: 217 KiB |
BIN
src/assets/2023-12-introducing-dappling-header.png
Normal file
|
After Width: | Height: | Size: 2.4 MiB |
BIN
src/assets/Group 1667nabu.png
Normal file
|
After Width: | Height: | Size: 54 KiB |
BIN
src/assets/curl.png
Normal file
|
After Width: | Height: | Size: 4.8 MiB |
BIN
src/assets/dapps-ipfs/header.png
Normal file
|
After Width: | Height: | Size: 1.9 MiB |
BIN
src/assets/dapps-ipfs/helia-ipfs-loader.png
Normal file
|
After Width: | Height: | Size: 33 KiB |
BIN
src/assets/dapps-ipfs/local-installer.png
Normal file
|
After Width: | Height: | Size: 438 KiB |
BIN
src/assets/dapps-ipfs/local-kubo-node.png
Normal file
|
After Width: | Height: | Size: 71 KiB |
BIN
src/assets/dapps-ipfs/service-worker-gateway.png
Normal file
|
After Width: | Height: | Size: 53 KiB |
BIN
src/assets/dapps-ipfs/uniswap-release.png
Normal file
|
After Width: | Height: | Size: 392 KiB |
BIN
src/assets/ipfs-companion-mv3-banner.png
Normal file
|
After Width: | Height: | Size: 585 KiB |
BIN
src/assets/ipfs-companion-mv3-declarativenetrequest.png
Normal file
|
After Width: | Height: | Size: 98 KiB |
BIN
src/assets/ipfs_uri_where_protocol_what.png
Normal file
|
After Width: | Height: | Size: 12 KiB |
BIN
src/assets/nabu-banner-2023.png
Normal file
|
After Width: | Height: | Size: 54 KiB |
BIN
src/assets/nabu/modules.png
Normal file
|
After Width: | Height: | Size: 13 KiB |
BIN
src/assets/nabu/nabu-interop.png
Normal file
|
After Width: | Height: | Size: 53 KiB |
BIN
src/assets/nabu/nabu-logo.png
Normal file
|
After Width: | Height: | Size: 11 KiB |
BIN
src/assets/nabu/nabu-speed.png
Normal file
|
After Width: | Height: | Size: 12 KiB |
BIN
src/assets/nabu/p2p-http-proxy.png
Normal file
|
After Width: | Height: | Size: 34 KiB |