Skip to content

Commit

Permalink
docs: fix all Chinese doc links for the new docs site (minio#6097)
Browse files Browse the repository at this point in the history
Additionally fix typos, default to US locale words
  • Loading branch information
harshavardhana authored Jun 28, 2018
1 parent de25148 commit e5e522f
Show file tree
Hide file tree
Showing 42 changed files with 113 additions and 111 deletions.
8 changes: 5 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -50,9 +50,11 @@ deadcode:
@${GOPATH}/bin/deadcode -test $(shell go list ./...) || true

spelling:
@${GOPATH}/bin/misspell -error `find cmd/`
@${GOPATH}/bin/misspell -error `find pkg/`
@${GOPATH}/bin/misspell -error `find docs/`
@${GOPATH}/bin/misspell -locale US -error `find cmd/`
@${GOPATH}/bin/misspell -locale US -error `find pkg/`
@${GOPATH}/bin/misspell -locale US -error `find docs/`
@${GOPATH}/bin/misspell -locale US -error `find buildscripts/`
@${GOPATH}/bin/misspell -locale US -error `find dockerscripts/`

# Builds minio, runs the verifiers then runs the tests.
check: test
Expand Down
2 changes: 1 addition & 1 deletion buildscripts/checkdeps.sh
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ _init() {

## FIXME:
## In OSX, 'readlink -f' option does not exist, hence
## we have our own readlink -f behaviour here.
## we have our own readlink -f behavior here.
## Once OSX has the option, below function is good enough.
##
## readlink() {
Expand Down
6 changes: 3 additions & 3 deletions cmd/admin-heal-ops.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ const (
var (
errHealIdleTimeout = fmt.Errorf("healing results were not consumed for too long")
errHealPushStopNDiscard = fmt.Errorf("heal push stopped due to heal stop signal")
errHealStopSignalled = fmt.Errorf("heal stop signalled")
errHealStopSignalled = fmt.Errorf("heal stop signaled")

errFnHealFromAPIErr = func(err error) error {
errCode := toAPIErrorCode(err)
Expand Down Expand Up @@ -301,7 +301,7 @@ type healSequence struct {
// current accumulated status of the heal sequence
currentStatus healSequenceStatus

// channel signalled by background routine when traversal has
// channel signaled by background routine when traversal has
// completed
traverseAndHealDoneCh chan error

Expand Down Expand Up @@ -441,7 +441,7 @@ func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
h.currentStatus.updateLock.Unlock()

// This is a "safe" point for the heal sequence to quit if
// signalled externally.
// signaled externally.
if h.isQuitting() {
return errHealStopSignalled
}
Expand Down
4 changes: 2 additions & 2 deletions cmd/bucket-handlers-listobjects.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
}
// Inititate a list objects operation based on the input params.
// On success would return back ListObjectsInfo object to be
// marshalled into S3 compatible XML header.
// marshaled into S3 compatible XML header.
listObjectsV2Info, err := listObjectsV2(ctx, bucket, prefix, marker, delimiter, maxKeys, fetchOwner, startAfter)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
Expand Down Expand Up @@ -166,7 +166,7 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
}
// Inititate a list objects operation based on the input params.
// On success would return back ListObjectsInfo object to be
// marshalled into S3 compatible XML header.
// marshaled into S3 compatible XML header.
listObjectsInfo, err := listObjects(ctx, bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
Expand Down
2 changes: 1 addition & 1 deletion cmd/disk-cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ func (c cacheObjects) GetObject(ctx context.Context, bucket, object string, star
pipeWriter.CloseWithError(err)
return
}
pipeWriter.Close() // Close writer explicitly signalling we wrote all data.
pipeWriter.Close() // Close writer explicitly signaling we wrote all data.
}()
err = dcache.Put(ctx, bucket, object, hashReader, c.getMetadata(objInfo))
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion cmd/fs-v1-multipart.go
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,7 @@ func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, d
}
return
}
// Close writer explicitly signalling we wrote all data.
// Close writer explicitly signaling we wrote all data.
if gerr := srcInfo.Writer.Close(); gerr != nil {
logger.LogIf(ctx, gerr)
return
Expand Down
2 changes: 1 addition & 1 deletion cmd/fs-v1.go
Original file line number Diff line number Diff line change
Expand Up @@ -505,7 +505,7 @@ func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBu
}
return
}
// Close writer explicitly signalling we wrote all data.
// Close writer explicitly signaling we wrote all data.
if gerr := srcInfo.Writer.Close(); gerr != nil {
logger.LogIf(ctx, gerr)
return
Expand Down
2 changes: 1 addition & 1 deletion cmd/generic-handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -688,7 +688,7 @@ func setBucketForwardingHandler(h http.Handler) http.Handler {
// rate.Limiter token bucket configured with maxOpenFileLimit and
// burst set to 1. The request will idle for up to 1*time.Second.
// If the limiter detects the deadline will be exceeded, the request is
// cancelled immediately.
// canceled immediately.
func setRateLimitHandler(h http.Handler) http.Handler {
_, maxLimit, err := sys.GetMaxOpenFileLimit()
logger.FatalIf(err, "Unable to get maximum open file limit", context.Background())
Expand Down
2 changes: 1 addition & 1 deletion cmd/namespace-lock_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@ func TestNamespaceForceUnlockTest(t *testing.T) {

select {
case <-ch:
// Signalled so all is fine.
// Signaled so all is fine.
break

case <-time.After(100 * time.Millisecond):
Expand Down
2 changes: 1 addition & 1 deletion cmd/posix_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -470,7 +470,7 @@ func TestPosixMakeVol(t *testing.T) {
}
}

// TestPosixDeleteVol - Validates the expected behaviour of posix.DeleteVol for various cases.
// TestPosixDeleteVol - Validates the expected behavior of posix.DeleteVol for various cases.
func TestPosixDeleteVol(t *testing.T) {
// create posix test setup
posixStorage, path, err := newPosixTestSetup()
Expand Down
2 changes: 1 addition & 1 deletion cmd/posix_windows_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ func TestUNCPaths(t *testing.T) {
}
}

// Test to validate posix behaviour on windows when a non-final path component is a file.
// Test to validate posix behavior on windows when a non-final path component is a file.
func TestUNCPathENOTDIR(t *testing.T) {
var err error
// Instantiate posix object to manage a disk
Expand Down
2 changes: 1 addition & 1 deletion cmd/server_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2420,7 +2420,7 @@ func (s *TestSuiteCommon) TestBucketMultipartList(c *check) {
// unmarshalling works from a client perspective, specifically
// while unmarshalling time.Time type for 'Initiated' field.
// time.Time does not honor xml marshaler, it means that we need
// to encode/format it before giving it to xml marshalling.
// to encode/format it before giving it to xml marshaling.

// This below check adds client side verification to see if its
// truly parseable.
Expand Down
2 changes: 1 addition & 1 deletion cmd/update-main.go
Original file line number Diff line number Diff line change
Expand Up @@ -458,7 +458,7 @@ func getUpdateInfo(timeout time.Duration, mode string) (updateMsg string, sha256

func doUpdate(sha256Hex string, latestReleaseTime time.Time, ok bool) (successMsg string, err error) {
if !ok {
successMsg = greenColorSprintf("Minio update to version RELEASE.%s cancelled.",
successMsg = greenColorSprintf("Minio update to version RELEASE.%s canceled.",
latestReleaseTime.Format(minioReleaseTagTimeLayout))
return successMsg, nil
}
Expand Down
2 changes: 1 addition & 1 deletion cmd/xl-sets.go
Original file line number Diff line number Diff line change
Expand Up @@ -614,7 +614,7 @@ func (s *xlSets) CopyObject(ctx context.Context, srcBucket, srcObject, destBucke
}
return
}
// Close writer explicitly signalling we wrote all data.
// Close writer explicitly signaling we wrote all data.
if gerr := srcInfo.Writer.Close(); gerr != nil {
logger.LogIf(ctx, gerr)
return
Expand Down
2 changes: 1 addition & 1 deletion cmd/xl-v1-metadata.go
Original file line number Diff line number Diff line change
Expand Up @@ -463,7 +463,7 @@ func writeXLMetadata(ctx context.Context, disk StorageAPI, bucket, prefix string
logger.LogIf(ctx, err)
return err
}
// Persist marshalled data.
// Persist marshaled data.
err = disk.AppendFile(bucket, jsonFile, metadataBytes)
logger.LogIf(ctx, err)
return err
Expand Down
4 changes: 2 additions & 2 deletions cmd/xl-v1-multipart.go
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,7 @@ func (xl xlObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, ds
}
return
}
// Close writer explicitly signalling we wrote all data.
// Close writer explicitly signaling we wrote all data.
if gerr := srcInfo.Writer.Close(); gerr != nil {
logger.LogIf(ctx, gerr)
return
Expand Down Expand Up @@ -482,7 +482,7 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
// to indicate where the listing should begin from.
//
// Implements S3 compatible ListObjectParts API. The resulting
// ListPartsInfo structure is marshalled directly into XML and
// ListPartsInfo structure is marshaled directly into XML and
// replied back to the client.
func (xl xlObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int) (result ListPartsInfo, e error) {
if err := checkListPartsArgs(ctx, bucket, object, xl); err != nil {
Expand Down
2 changes: 1 addition & 1 deletion cmd/xl-v1-object.go
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ func (xl xlObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBuc
pipeWriter.CloseWithError(toObjectErr(gerr, srcBucket, srcObject))
return
}
pipeWriter.Close() // Close writer explicitly signalling we wrote all data.
pipeWriter.Close() // Close writer explicitly signaling we wrote all data.
}()

hashReader, err := hash.NewReader(pipeReader, length, "", "")
Expand Down
2 changes: 1 addition & 1 deletion dockerscripts/healthcheck.sh
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ healthcheck_main () {
http_response=$(curl -s -k -o /dev/null -w "%{http_code}" ${scheme}${address}${resource})
fi

# If http_repsonse is 200 - server is up.
# If http_response is 200 - server is up.
[ "$http_response" = "200" ]
fi
}
Expand Down
2 changes: 1 addition & 1 deletion docs/bucket/notifications/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ Bucket events can be published to the following targets:

## Prerequisites

* Install and configure Minio Server from [here](http://docs.minio.io/docs/minio-quickstart-guide).
* Install and configure Minio Server from [here](https://docs.minio.io/docs/minio-quickstart-guide).
* Install and configure Minio Client from [here](https://docs.minio.io/docs/minio-client-quickstart-guide).

<a name="AMQP"></a>
Expand Down
18 changes: 9 additions & 9 deletions docs/config/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ minio server --config-dir /etc/minio /data
```

### Certificate Directory
TLS certificates are stored under ``${HOME}/.minio/certs`` directory. You need to place certificates here to enable `HTTPS` based access. Read more about [How to secure access to Minio server with TLS](http://docs.minio.io/docs/how-to-secure-access-to-minio-server-with-tls).
TLS certificates are stored under ``${HOME}/.minio/certs`` directory. You need to place certificates here to enable `HTTPS` based access. Read more about [How to secure access to Minio server with TLS](https://docs.minio.io/docs/how-to-secure-access-to-minio-server-with-tls).

Following is the directory structure for Minio server with TLS certificates.

Expand Down Expand Up @@ -115,15 +115,15 @@ By default, parity for objects with standard storage class is set to `N/2`, and
|Field|Type|Description|
|:---|:---|:---|
|``notify``| |Notify enables bucket notification events for lambda computing via the following targets.|
|``notify.amqp``| |[Configure to publish Minio events via AMQP target.](http://docs.minio.io/docs/minio-bucket-notification-guide#AMQP)|
|``notify.nats``| |[Configure to publish Minio events via NATS target.](http://docs.minio.io/docs/minio-bucket-notification-guide#NATS)|
|``notify.elasticsearch``| |[Configure to publish Minio events via Elasticsearch target.](http://docs.minio.io/docs/minio-bucket-notification-guide#Elasticsearch)|
|``notify.redis``| |[Configure to publish Minio events via Redis target.](http://docs.minio.io/docs/minio-bucket-notification-guide#Redis)|
|``notify.postgresql``| |[Configure to publish Minio events via PostgreSQL target.](http://docs.minio.io/docs/minio-bucket-notification-guide#PostgreSQL)|
|``notify.kafka``| |[Configure to publish Minio events via Apache Kafka target.](http://docs.minio.io/docs/minio-bucket-notification-guide#apache-kafka)|
|``notify.webhook``| |[Configure to publish Minio events via Webhooks target.](http://docs.minio.io/docs/minio-bucket-notification-guide#webhooks)|
|``notify.amqp``| |[Configure to publish Minio events via AMQP target.](https://docs.minio.io/docs/minio-bucket-notification-guide#AMQP)|
|``notify.nats``| |[Configure to publish Minio events via NATS target.](https://docs.minio.io/docs/minio-bucket-notification-guide#NATS)|
|``notify.elasticsearch``| |[Configure to publish Minio events via Elasticsearch target.](https://docs.minio.io/docs/minio-bucket-notification-guide#Elasticsearch)|
|``notify.redis``| |[Configure to publish Minio events via Redis target.](https://docs.minio.io/docs/minio-bucket-notification-guide#Redis)|
|``notify.postgresql``| |[Configure to publish Minio events via PostgreSQL target.](https://docs.minio.io/docs/minio-bucket-notification-guide#PostgreSQL)|
|``notify.kafka``| |[Configure to publish Minio events via Apache Kafka target.](https://docs.minio.io/docs/minio-bucket-notification-guide#apache-kafka)|
|``notify.webhook``| |[Configure to publish Minio events via Webhooks target.](https://docs.minio.io/docs/minio-bucket-notification-guide#webhooks)|
|``notify.mysql``| |[Configure to publish Minio events via MySql target.](https://docs.minio.io/docs/minio-bucket-notification-guide#MySQL)|
|``notify.mqtt``| |[Configure to publish Minio events via MQTT target.](http://docs.minio.io/docs/minio-bucket-notification-guide#MQTT)|
|``notify.mqtt``| |[Configure to publish Minio events via MQTT target.](https://docs.minio.io/docs/minio-bucket-notification-guide#MQTT)|

## Explore Further
* [Minio Quickstart Guide](https://docs.minio.io/docs/minio-quickstart-guide)
2 changes: 1 addition & 1 deletion docs/erasure/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ Erasure code protects data from multiple drives failure, unlike RAID or replicat

## What is Bit Rot protection?

Bit Rot, also known as data rot or silent data corruption is a data loss issue faced by disk drives today. Data on the drive may silently get corrupted without signalling an error has occurred, making bit rot more dangerous than a permanent hard drive failure.
Bit Rot, also known as data rot or silent data corruption is a data loss issue faced by disk drives today. Data on the drive may silently get corrupted without signaling an error has occurred, making bit rot more dangerous than a permanent hard drive failure.

Minio's erasure coded backend uses high speed [HighwayHash](https://blog.minio.io/highwayhash-fast-hashing-at-over-10-gb-s-per-core-in-golang-fee938b5218a) checksums to protect against Bit Rot.

Expand Down
4 changes: 2 additions & 2 deletions docs/erasure/storage-class/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@ saving the object in specific number of data and parity disks.

## Storage usage

The selection of varying data and parity drives has a direct impact on the drive space usage. With storage class, you can optimise for high
redundancy or better drive space utilisation.
The selection of varying data and parity drives has a direct impact on the drive space usage. With storage class, you can optimize for high
redundancy or better drive space utilization.

To get an idea of how various combinations of data and parity drives affect the storage usage, let’s take an example of a 100 MiB file stored
on 16 drive Minio deployment. If you use eight data and eight parity drives, the file space usage will be approximately twice, i.e. 100 MiB
Expand Down
8 changes: 4 additions & 4 deletions docs/minio-limits.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,19 +36,19 @@ We found the following APIs to be redundant or less useful outside of AWS S3. If

#### List of Amazon S3 Bucket API's not supported on Minio

- BucketACL (Use [bucket policies](http://docs.minio.io/docs/minio-client-complete-guide#policy) instead)
- BucketACL (Use [bucket policies](https://docs.minio.io/docs/minio-client-complete-guide#policy) instead)
- BucketCORS (CORS enabled by default on all buckets for all HTTP verbs)
- BucketLifecycle (Not required for Minio erasure coded backend)
- BucketReplication (Use [`mc mirror`](http://docs.minio.io/docs/minio-client-complete-guide#mirror) instead)
- BucketReplication (Use [`mc mirror`](https://docs.minio.io/docs/minio-client-complete-guide#mirror) instead)
- BucketVersions, BucketVersioning (Use [`s3git`](https://github.com/s3git/s3git))
- BucketWebsite (Use [`caddy`](https://github.com/mholt/caddy) or [`nginx`](https://www.nginx.com/resources/wiki/))
- BucketAnalytics, BucketMetrics, BucketLogging (Use [bucket notification](http://docs.minio.io/docs/minio-client-complete-guide#events) APIs)
- BucketAnalytics, BucketMetrics, BucketLogging (Use [bucket notification](https://docs.minio.io/docs/minio-client-complete-guide#events) APIs)
- BucketRequestPayment
- BucketTagging

#### List of Amazon S3 Object API's not supported on Minio

- ObjectACL (Use [bucket policies](http://docs.minio.io/docs/minio-client-complete-guide#policy) instead)
- ObjectACL (Use [bucket policies](https://docs.minio.io/docs/minio-client-complete-guide#policy) instead)
- ObjectTorrent
- ObjectVersions

Expand Down
8 changes: 4 additions & 4 deletions docs/orchestration/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,10 @@ Minio is a cloud-native application designed to scale in a sustainable manner in

| Orchestration platforms|
|:---|
| [`Docker Swarm`](http://docs.minio.io/docs/deploy-minio-on-docker-swarm) |
| [`Docker Compose`](http://docs.minio.io/docs/deploy-minio-on-docker-compose) |
| [`Kubernetes`](http://docs.minio.io/docs/deploy-minio-on-kubernetes) |
| [`DC/OS`](http://docs.minio.io/docs/deploy-minio-on-dc-os) |
| [`Docker Swarm`](https://docs.minio.io/docs/deploy-minio-on-docker-swarm) |
| [`Docker Compose`](https://docs.minio.io/docs/deploy-minio-on-docker-compose) |
| [`Kubernetes`](https://docs.minio.io/docs/deploy-minio-on-kubernetes) |
| [`DC/OS`](https://docs.minio.io/docs/deploy-minio-on-dc-os) |

## Why is Minio cloud-native?
The term cloud-native revolves around the idea of applications deployed as micro services, that scale well. It is not about just retrofitting monolithic applications onto modern container based compute environment. A cloud-native application is portable and resilient by design, and can scale horizontally by simply replicating. Modern orchestration platforms like Swarm, Kubernetes and DC/OS make replicating and managing containers in huge clusters easier than ever.
Expand Down
6 changes: 3 additions & 3 deletions docs/zh_CN/bucket/notifications/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@

## 前提条件

*[这里](http://docs.minio.io/docs/zh_CN/minio-quickstart-guide)下载并安装Minio Server。
*[这里](https://docs.minio.io/docs/zh_CN/minio-client-quickstart-guide)下载并安装Minio Client。
*[这里](https://docs.minio.io/cn/minio-quickstart-guide)下载并安装Minio Server。
*[这里](https://docs.minio.io/cn/minio-client-quickstart-guide)下载并安装Minio Client。

<a name="AMQP"></a>
## 使用AMQP发布Minio事件
Expand Down Expand Up @@ -960,4 +960,4 @@ mc ls myminio/images-thumbnail
[2017-02-08 11:39:40 IST] 992B images-thumbnail.jpg
```

*注意* 如果你用的是 [distributed Minio](https://docs.minio.io/docs/zh_CN/distributed-minio-quickstart-guide),请修改所有节点的 ``~/.minio/config.json``
*注意* 如果你用的是 [distributed Minio](https://docs.minio.io/cn/distributed-minio-quickstart-guide),请修改所有节点的 ``~/.minio/config.json``
Loading

0 comments on commit e5e522f

Please sign in to comment.