Winse Blog

走走停停, 熙熙攘攘, 忙忙碌碌, 不知何畏.

在树莓派上部署redmine

变废为宝。

服务器没怎么用,但是偶尔又想访问下文档或者redmine,狠狠心咬咬牙最后把x64上的迁移到了树莓派2: Raspbian GNU/Linux 9 (stretch) 。

温故下原来redmine的安装过程: Redmine部署以及插件安装

过程中涉及到了docker的重新编译,这其中主要使用ubuntu apt源码在本地arm上进行再编译。其中涉及的镜像有:

备份

redmine直接整个目录打包 /srv/docker/redmine/redmine 。数据库的要备份出来,架构不同不兼容。

这是编译后arm postgres直接用x64的/srv/docker/redmine/postgresql的报错信息:

1
2
3
4
5
6
7
8
9
10
root@raspberrypi:~# docker run --rm -ti --entrypoint="bash" -v /srv/docker/redmine/postgresql:/var/lib/postgresql sameersbn/postgresql:9.6-2 

root@8f39c2904607:/var/lib/postgresql# sudo -HEu postgres /usr/lib/postgresql/9.6/bin/pg_ctl -D /var/lib/postgresql/9.6/main -w start
waiting for server to start....FATAL:  database files are incompatible with server
DETAIL:  The database cluster was initialized with USE_FLOAT8_BYVAL but the server was compiled without USE_FLOAT8_BYVAL.
HINT:  It looks like you need to recompile or initdb.
LOG:  database system is shut down
stopped waiting
pg_ctl: could not start server
Examine the log output.

编译ubuntu镜像

直接使用最新的trusty发布版镜像。由于低版本的可能会没有 linux/arm/v7 : https://hub.docker.com/_/ubuntu/?tab=tags&page=1&name=trusty

1
2
3
4
5
root@raspberrypi:~/docker-ubuntu# vi Dockerfile
FROM ubuntu:trusty

docker build -t sameersbn/ubuntu:14.04.20170123 . 
docker tag sameersbn/ubuntu:14.04.20170123 sameersbn/ubuntu:14.04.20170711 

编译postgres镜像

由于postgresql apt的库中没有armhf的deb,需要先手动编译好对应的deb。先运行一个容器,再里面编译好postgres。

1
2
3
4
5
6
7
8
9
10
11
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \
 && echo 'deb-src http://mirrors.zju.edu.cn/postgresql/repos/apt/ trusty-pgdg main' > /etc/apt/sources.list.d/pgdg.list \
 && sed -i 's/ports.ubuntu.com/mirrors.aliyun.com/' /etc/apt/sources.list \
 && sed -Ei 's/^# deb-src /deb-src /' /etc/apt/sources.list \
 && apt-get update

root@7d55994af11e:/# apt-get build-dep dh-exec postgresql-common  pgdg-keyring     
root@7d55994af11e:/# apt-get source --compile dh-exec postgresql-common  pgdg-keyring   

root@7d55994af11e:/# apt-get build-dep postgresql-${PG_VERSION} postgresql-client-${PG_VERSION} postgresql-contrib-${PG_VERSION} dh-exec
root@7d55994af11e:/# apt-get source --compile postgresql-${PG_VERSION} postgresql-client-${PG_VERSION} postgresql-contrib-${PG_VERSION} 

再测试安装一下这些deb:

1
2
3
4
5
6
7
8
9
10
11
12
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \
 && echo 'deb-src http://mirrors.zju.edu.cn/postgresql/repos/apt/ trusty-pgdg main' > /etc/apt/sources.list.d/pgdg.list \
 && sed -i 's/ports.ubuntu.com/mirrors.aliyun.com/' /etc/apt/sources.list \
 && sed -Ei 's/^# deb-src /deb-src /' /etc/apt/sources.list \
 && apt-get update

apt-get install -y acl \
    libpipeline1 debhelper ssl-cert libxml2 libedit2 libxslt1.1 libperl5.18 libpython2.7 libpython3.4 libtcl8.6

dpkg -i pgdg-keyring_2018.2_all.deb postgresql-common_201.pgdg14.04+1_all.deb  postgresql-client-common_201.pgdg14.04+1_all.deb

dpkg -i libpq5_9.6.13-1.pgdg14.04+1_armhf.deb  postgresql-9.6_9.6.13-1.pgdg14.04+1_armhf.deb postgresql-client-9.6_9.6.13-1.pgdg14.04+1_armhf.deb postgresql-contrib-9.6_9.6.13-1.pgdg14.04+1_armhf.deb

修改Dockerfile,直接通过dpkg安装postgresql的deb:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
root@raspberrypi:~/docker-postgresql# less Dockerfile 
...
COPY deb/ /tmp/psql/

RUN wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \
 && echo 'deb-src http://mirrors.zju.edu.cn/postgresql/repos/apt/ trusty-pgdg main' > /etc/apt/sources.list.d/pgdg.list \
 && sed -i 's/ports.ubuntu.com/mirrors.aliyun.com/' /etc/apt/sources.list \
 && sed -Ei 's/^# deb-src /deb-src /' /etc/apt/sources.list \
 && apt-get update \
 && DEBIAN_FRONTEND=noninteractive apt-get install -y acl \
    libpipeline1 debhelper ssl-cert libxml2 libedit2 libxslt1.1 libperl5.18 libpython2.7 libpython3.4 libtcl8.6 \
 && cd /tmp/psql \
 && dpkg -i pgdg-keyring_2018.2_all.deb postgresql-common_201.pgdg14.04+1_all.deb  postgresql-client-common_201.pgdg14.04+1_all.deb \
 && dpkg -i libpq5_9.6.13-1.pgdg14.04+1_armhf.deb  postgresql-9.6_9.6.13-1.pgdg14.04+1_armhf.deb postgresql-client-9.6_9.6.13-1.pgdg14.04+1_armhf.deb postgresql-contrib-9.6_9.6.13-1.pgdg14.04+1_armhf.deb \
 && rm -rf /tmp/psql \
...

root@raspberrypi:~/docker-postgresql# docker build -t sameersbn/postgresql:9.6-2 .

编译redmine镜像

redmine主要是缺少ruby2.3的包,跟上面的步骤类似。先编译出arm的ruby2.3,再编译镜像。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
sed -i 's/ports.ubuntu.com/mirrors.aliyun.com/' /etc/apt/sources.list \
&& sed -Ei 's/^# deb-src /deb-src /' /etc/apt/sources.list

apt-get build-dep ruby${RUBY_VERSION} ruby${RUBY_VERSION}-dev
apt-get source --compile ruby${RUBY_VERSION} ruby${RUBY_VERSION}-dev


root@raspberrypi:~/docker-redmine# vi Dockerfile 
...
COPY deb/ /tmp/ruby/

RUN apt-key adv --keyserver keyserver.ubuntu.com --recv E1DD270288B4E6030699E45FA1715D88E1DF1F24 \
 && echo "deb http://ppa.launchpad.net/git-core/ppa/ubuntu trusty main" >> /etc/apt/sources.list \
 && apt-key adv --keyserver keyserver.ubuntu.com --recv 80F70E11F0F0D5F10CB20E62F5DA5F09C3173AA6 \
 && echo "deb http://ppa.launchpad.net/brightbox/ruby-ng/ubuntu trusty main" >> /etc/apt/sources.list \
 && apt-key adv --keyserver keyserver.ubuntu.com --recv 8B3981E7A6852F782CC4951600A6F0A3C300EE8C \
 && echo "deb http://ppa.launchpad.net/nginx/stable/ubuntu trusty main" >> /etc/apt/sources.list \
 && wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \
 && echo 'deb-src http://apt.postgresql.org/pub/repos/apt/ trusty-pgdg main' > /etc/apt/sources.list.d/pgdg.list \
 && sed -i 's/ports.ubuntu.com/mirrors.aliyun.com/' /etc/apt/sources.list \
 && sed -Ei 's/^# deb-src /deb-src /' /etc/apt/sources.list \
 && apt-get update \
 && DEBIAN_FRONTEND=noninteractive apt-get install -y supervisor logrotate nginx mysql-client postgresql-client \
      imagemagick subversion git cvs bzr mercurial darcs rsync   locales openssh-client \
      gcc g++ make patch pkg-config gettext-base   libc6-dev zlib1g-dev libxml2-dev \
      libmysqlclient18 libpq5 libyaml-0-2 libcurl3 libssl1.0.0 uuid-dev xz-utils \
      libxslt1.1 libffi6 zlib1g gsfonts \
      libgmpxx4ldbl libgmp-dev libtk8.5 libxft2 libxrender1 libxss1 x11-common libtcl8.5 rubygems-integration \
 && dpkg -i /tmp/ruby/*.deb \
 && rm -rf /tmp/ruby \
 && update-locale LANG=C.UTF-8 LC_MESSAGES=POSIX \
 && gem install --no-document --version '< 2.0' bundler \
 && rm -rf /var/lib/apt/lists/*
...

root@raspberrypi:~/docker-redmine# docker build -t sameersbn/redmine:3.4.6 .

如果docker构建失败,可以通过以下两种方式来进行调试:

1
2
3
4
5
6
7
8
# 1
docker commit $container-id image-id
docker run -ti $image-id bash

# 2 
失败也会有前一步骤的镜像已经提交,再次基础上检查后面的步骤
The last successful step is 2. It produced an image a3acfa4ab179. Now you start a container
docker run -it a3acfa4ab179 bash

结合原有数据进行迁移

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
# 备份
psql -U postgres -cC --if-exists -d redmine_production >redmine.dump 

# 安装compose
root@raspberrypi:~/redmine/bin# python -m pip install --upgrade pip
root@raspberrypi:~/redmine/bin# pip install docker-compose

# 启动
root@raspberrypi:~/redmine/bin# docker-compose up -d 

# 数据库还原
docker cp redmine.dump e472d9ec3124:/var/lib/postgresql/
psql -U postgres <redmine.dump

# redmine修复
root@3f0774db222d:/home/redmine/redmine# apt-get install libmysqlclient-dev                                    

https://stackoverflow.com/questions/13086073/rails-install-pg-cant-find-the-libpq-fe-h-header
root@3f0774db222d:/home/redmine/redmine# apt-get install libpq-dev
https://stackoverflow.com/questions/38200015/package-magickcore-was-not-found-in-the-pkg-config-search-path
root@3f0774db222d:/home/redmine/redmine# apt-get install libmagickwand-dev

root@3f0774db222d:/home/redmine/redmine# bundle install                    

后面就直接安装docker-compose.yml中的端口映射配置访问即可。

–END

appium-Android自动化测试

参考链接

步骤

手机端开发这模式需要进行如下设置:

  • (可选)不锁定屏幕
  • USB调试
  • USB安装
  • USB调试(安全设置)

安装启动Appium Desktop

  1. 点击编辑配置

设置ANDROID_HOME C:\Android\Sdk

  1. 启动服务

  2. 使用/应用

按步骤操作,先用[检查器会话]的执行一次。这里会给手机安装两个应用,最开始开发者模式的设置就是为安装应用来修改的。

3.1 启动检查器会话

启动服务后,会进入到黑窗口日志界面。点击界面右上角第一个放大镜按钮:[启动检查器会话],编辑[所需功能]

1
2
3
4
5
6
{
  "platformName": "Android",
  "platformVersion": "7.1",
  "appPackage": "com.miui.calculator",
  "appActivity": ".cal.CalculatorActivity"
}

如果不需要录制脚本,可以使用 C:\Android\Sdk\tools\bin\uiautomatorviewer.bat 查看也可以的。

3.2 Java客户端

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
public static final String APP_ID = "com.taobao.idlefish";

private static AppiumDriverLocalService service;

private static AndroidDriver<AndroidElement> driver;
private static WebDriverWait wait;

private void initialization() throws MalformedURLException {
  initialization(APP_ID, "com.taobao.idlefish.router.JumpActivity", true, false);
}

private void initialization1() throws MalformedURLException {
  initialization("com.miui.calculator", ".cal.CalculatorActivity", null, null);
}

/**
 * @param noReset    null will remove from the caps.
 * @param autoLaunch null will remove from the caps.
 */
protected void initialization(String appPackage, String appActivity, Boolean noReset, Boolean autoLaunch) throws MalformedURLException {
  URL url = new URL("http://localhost:4723/wd/hub");

  DesiredCapabilities capabilities = new DesiredCapabilities();
  capabilities.setCapability("automationName", "UiAutomator2" /* "Appium" */);
  capabilities.setCapability(MobileCapabilityType.PLATFORM_NAME, "Android");
  capabilities.setCapability(MobileCapabilityType.PLATFORM_VERSION, "7.1");

  capabilities.setCapability("appPackage", appPackage);
  capabilities.setCapability("appActivity", appActivity);
  // http://appium.io/docs/en/writing-running-appium/caps/
  capabilities.setCapability("noReset", noReset);
  capabilities.setCapability("autoLaunch", autoLaunch);
  
  capabilities.setCapability(MobileCapabilityType.NEW_COMMAND_TIMEOUT, 100);

  driver = new AndroidDriver<>(url, capabilities);
  wait = new WebDriverWait(driver, 20);
}

protected void finishing() {
  try {
      if (driver != null) {
          driver.quit();
      }
      if (service != null) {
          service.stop();
      }
  } catch (Exception e) {
  } finally {
      driver = null;
      service = null;
  }
}

protected void swiping() {
  Dimension windowSize = getSize();

  int startX = windowSize.width / 2;
  int startY = (int) (windowSize.height * 0.8);
  int endX = windowSize.width / 2;
  int endY = (int) (windowSize.height * 0.1);

  swipe(startX, startY, endX, endY, 1200);
}

protected void swipe(int x1, int y1, int x2, int y2, int waitMillis) {
  new TouchAction<>(driver) //
          .press(ElementOption.point(x1, y1))
          .waitAction(WaitOptions.waitOptions(Duration.ofMillis(waitMillis)))
          .moveTo(ElementOption.point(x2, y2))
          .release()
          .perform();
}

protected void touch(int x, int y) {
  new TouchAction<>(driver).tap(TapOptions.tapOptions().withPosition(PointOption.point(x, y))).perform();
}

protected void touch(Point p) {
  touch(p.x, p.y);
}

protected Dimension getSize() {
  return driver.manage().window().getSize();
}

protected Point getCenter() {
  Dimension size = getSize();
  return new Point(size.width / 2, size.height / 2);
}

private void touchAndWait() throws InterruptedException {
  touchAndWait(null);
}

private void touchAndWait(By by) throws InterruptedException {
  // 等一下界面效果
//        Thread.sleep(500);
  TimeUnit.MILLISECONDS.sleep(500);

  try {
      swiping();
  } catch (Exception e) {
      touch(getCenter());
  }

  if (by != null) {
      wait.until(ExpectedConditions.presenceOfElementLocated(by));
  }
}

private void doCalculator() throws InterruptedException {
  By btn_9_s = By.id("com.miui.calculator:id/btn_9_s");
  By btn_plus_s = By.id("com.miui.calculator:id/btn_plus_s");
  By btn_3_s = By.id("com.miui.calculator:id/btn_3_s");
  By btn_equal_s = By.id("com.miui.calculator:id/btn_equal_s");
  By textViews = By.className("android.widget.TextView");

  touchAndWait(btn_9_s);
  driver.findElement(btn_9_s).click();
  // driver.findElementByAccessibilityId("加");
  driver.findElement(btn_plus_s).click();
  driver.findElement(btn_3_s).click();
  driver.findElement(btn_equal_s).click();

  touchAndWait(textViews);
  List<? extends WebElement> allText = driver.findElements(textViews);
  boolean isOk = false;
  for (int i = 0; i < allText.size(); i++) {
      String txt = allText.get(i).getText();
      System.out.println(txt);

      // 由于第一次无法根据元素的ClassName确定顺序,所以使用遍历的方式进行断言
      if (txt.equals("= 12")) {
          isOk = true;
          break;
      }
  }

  if (isOk) {
      System.out.println("测试成功.");
  } else {
      System.out.println("测试失败.");
  }
}

public void testCalculator() throws Exception {
  try {
      initialization1();

      doCalculator();
  } finally {
      finishing();
  }
}

public void testHello() throws InterruptedException, MalformedURLException {
  try {
      this.initialization1();

      doCalculator();

      driver.activateApp(APP_ID);
//            driver.startActivity(new Activity(APP_ID, "com.taobao.idlefish.router.JumpActivity"));

      test0();
      test1();
  } finally {
      this.finishing();
  }
}

private String toString(Rectangle rect) {
  return String.format("(%s,%s,%s,%s)", rect.x, rect.y, rect.width, rect.height);
}

private void printAllView() throws Exception {
  try {
      List<AndroidElement> allText = driver.findElementsByXPath("//*");
      for (int i = 0; i < allText.size(); i++) {
          AndroidElement ele = allText.get(i);
          
          System.out.println("===========================");
          System.out.println(ele.getTagName() + ","
                  + ele.isSelected() + "," 
                  + ele.isEnabled() + "," 
                  + ele.isDisplayed() + "," 
                  + ele.getText() + "," 
                  + toString(ele.getRect()));
          
          System.out.println(ele.getAttribute("class"));
          System.out.println(ele.getAttribute("content-desc"));
          System.out.println(ele.getAttribute("text"));
      }
      
  } catch (Exception e) {
      String errMsg = e.getMessage();
      System.err.println(errMsg);

      if (errMsg.contains("A session is either terminated or not started")) {
          try {
              this.finishing();
          } catch (Exception ex) {
          }

          this.initialization();
      }
  }
}

private void doProcessManual() throws Exception {
  while (true) {
      printAllView();

      swiping();

      Thread.sleep(1000);
  }
}

public void testXianyu() throws Exception {
  try {
      this.initialization();

      doProcessManual();
  } finally {
      this.finishing();
  }
}

private void test0() {
  System.out.println("appId: " + driver.getCurrentPackage());
  System.out.println("activity: " + driver.currentActivity());
  
  System.out.println(APP_ID + " installed?: " + driver.isAppInstalled(APP_ID));

//        driver.runAppInBackground(Duration.ofSeconds(-1));
//        driver.activateApp(appId);
//        driver.closeApp();
//        driver.resetApp();
//        driver.rotate(new DeviceRotation(0, 0, 0));
//        driver.launchApp();
}

private void test1() {
  System.out.println("AppStringMap size: " + driver.getAppStringMap().size());
  System.out.println("en AppStringMap size: " + driver.getAppStringMap("en").size());
  
  System.out.println("build: " + driver.getStatus().get("build"));
  
  System.out.println("DeviceTime: " + driver.getDeviceTime());
}

–END

Android Linux via Termux

牛逼的都在上面的链接,请点击查阅。


简单摘录

安装

  • Termux is an Android terminal app and Linux environment.
  • Android下的Linux环境和Shell终端程序,无须root,可使用apt包管理工具安装大部分Linux软件。
  • 国内下载通过官网F-Droid ,该网站安装包的各个版本描述下一个 下载APK 的链接。版本 0.72
  • Android源码:termux-app

使用

  • 一些按键
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
长按(more... - keep screen on)

从左往右滑(导航栏)

长按导航栏左下角的KEYBOARD
音量+ - Q 

Ctrl+A -> 将光标移动到行首
Ctrl+E -> 将光标移动到行尾
Ctrl+K -> 从光标删除到行尾
Ctrl+L -> 清除终端
Ctrl+C -> 中止当前进程
Ctrl+D -> 注销终端会话
-- 可使用音量减小按钮来模拟Ctrl键

Ctrl+Z -> 挂起(发送SIGTSTP到)当前进程
bg

音量加+W -> 向上箭头键
音量加+A -> 向左箭头键
音量加+S -> 向下箭头键
音量加+D -> 向右箭头键
(按太麻烦,可以通过 额外的按键视图 添加修改)

音量加+1 -> F1(和音量增加+ 2→F2等)
音量加+0 -> F10
  • otg USB键盘
  • sshd远程连接
1
2
3
4
5
6
pkg install openssh

ssh-keygen
cd .ssh
cat id_rsa.pub >> authorized_keys
chmod 600 authorized_keys

再安装个http-server,可以下载手机上的私钥(懒得接数据线):

1
2
3
apt install nodejs
npm install -g http-server
http-server

下载id_rsa,最后远程连接

1
2
3
4
5
6
7
8
9
10
uname -a
cat /proc/version
ifconfig wlan0
whoami
#u0_a144

sshd

netstat -anp|grep sshd
#8022

客户端访问

1
2
3
4
5
6
ssh -i id_rsa -p 8022 u0_a144@192.168.2.241

$ which sshd
/data/data/com.termux/files/usr/bin/sshd
$ pwd
/data/data/com.termux/files/home

所有文件都放在 /data/data/com.termux/files 操作起来很不方便,没了Linux感觉。 可以通过chroot来切换根目录,安装proot:

1
2
3
4
5
6
7
8
$ pkg install proot
$ termux-chroot
$ vi /etc/ssh/sshd_config
Port 22

pkg install tsu
tsudo sshd # 手机已经root了的
tsudo ps aux|grep sshd

用22端口启动了。用了tsudo启动后,原来的u0_a144登录不了,好像只能谁启动就用谁登陆!

注:伤心,chroot后,nodejs安装的http-server用不了了…暂时不用chroot吧!加 $PREFIX 访问!

如果SecureCRT不兼容手机端生成的公私钥,可以生成一个,使用ssh复制然后写入authorized_keys:

1
scp -i id_rsa -P 8022 ~/.ssh/id_rsa.pub u0_a144@192.168.2.241:~/

注意:PC路径下必须两个key都存在,不能只有一个id_rsa.pub ,还要有一个id_rsa,尽管好象securtCRT没用到它。

修改源、安装常用软件:

1
2
3
4
5
6
7
8
9
10
11
12
export EDITOR=vi

apt edit-sources
deb http://mirrors.tuna.tsinghua.edu.cn/termux stable main

# or direct edit
#vi  $PREFIX/etc/apt/sources.list

apt update
apt upgrade 

pkg install vim curl wget net-tools git less tar unzip unrar tree
  • 安装配置zsh

终端配色
字体fonts-powerline
Themes

1
2
3
4
5
6
7
8
9
10
sh -c "$(curl -fsSL https://github.com/Cabbagec/termux-ohmyzsh/raw/master/install.sh)"
想要继续更改挑选配色的话,继续运行脚本来再次筛选:
$ ~/termux-ohmyzsh/install.sh
exit重启sessions会话生效配置

Enter a number, leave blank to not to change: 14
Enter a number, leave blank to not to change: 6

vi .zshrc
ZSH_THEME="cloud"

以下是zsh的一些使用技巧,因为都很常用:

1
2
3
4
5
6
7
8
9
连按两次Tab会列出所有的补全列表并直接开始选择,补全项可以使用 ctrl+n/p/f/b上下左右切换
命令选项补全。在zsh中只需要键入 tar -<tab> 就会列出所有的选项和帮助说明
命令参数补全。键入 kill <tab> 就会列出所有的进程名和对应的进程号
更智能的历史命令。在用或者方向上键查找历史命令时,zsh支持限制查找。比如,输入ls,然后再按方向上键,则只会查找用过的ls命令。而此时使用则会仍然按之前的方式查找,忽略 ls
智能跳转,安装了 autojump 之后,zsh 会自动记录你访问过的目录,通过 j 目录名 可以直接进行目录跳转,而且目录名支持模糊匹配和自动补全,例如你访问过 hadoop-1.0.0 目录,输入j hado 即可正确跳转。j --stat 可以看你的历史路径库。
目录浏览和跳转:输入 d,即可列出你在这个会话里访问的目录列表,输入列表前的序号,即可直接跳转。
在当前目录下输入 .. 或 ... ,或直接输入当前目录名都可以跳转,你甚至不再需要输入 cd 命令了。在你知道路径的情况下,比如 /usr/local/bin 你可以输入 cd /u/l/b 然后按进行补全快速输入
通配符搜索:ls -l **/*.sh,可以递归显示当前目录下的 shell 文件,文件少时可以代替 find。使用 **/ 来递归搜索
扩展环境变量,输入环境变量然后按 就可以转换成表达的值

切回shell

1
chsh -s bash
  • nginx
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
pkg install php nginx php-fpm mariadb

nginx
netstat -an |grep 8080

fuse -k 8080/tcp
nginx -s reload

termux-chroot
vim /etc/php-fpm.d/www.conf
listen = /data/data/com.termux/files/usr/var/run/php-fpm.sock
->
listen = 127.0.0.1:9000

vim /etc/nginx/nginx.conf
worker_processes  1;
events {
    worker_connections  1024;
}

http {
    include       mime.types;
    default_type  application/octet-stream;
    sendfile        on;
    keepalive_timeout  65;

    server {

        listen       8080;
        server_name  localhost;
        root   /data/data/com.termux/files/usr/share/nginx/html;
        index  index.html index.htm;

        error_page   500 502 503 504  /50x.html;
        location = /50x.html {
            root   /data/data/com.termux/files/usr/share/nginx/html;
        }

        location ~ \.php$ {
            root           html;
            fastcgi_pass   127.0.0.1:9000;
            fastcgi_index  index.php;
            fastcgi_param  SCRIPT_FILENAME  /usr/share/nginx/html$fastcgi_script_name;
            include        fastcgi_params;
        }
    }

}

## -- wordpress

/data/data/com.termux/files/home/wordpress

vim /etc/nginx/nginx.conf
server {

listen 8080;
server_name localhost;
root /data/data/com.termux/files/home/wordpress;
index index.html index.htm index.php;
location ~ \.php$ {
root html;
fastcgi_pass 127.0.0.1:9000;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME  /data/data/com.termux/files/home/wordpress$fastcgi_script_name;
include fastcgi_params;
}

}

启动php-fpm和nginx

在proot环境下面分别启动php-fpm和nginx,这里的nginx不在proot环境下启动后会出一些问题。

1
2
php-fpm
nginx
  • tmux

Tmux是一个优秀的终端复用软件,类似GNU Screen

1
2
pkg install tmux
tmux new -s mysql

让会话后台运行 使用快捷键组合Ctrl+ b + d,三次按键就可以断开当前会话。tmux默认的前置操作是CTRL+b,下面所有的prefix均代表CTRL+b

1
2
3
4
5
6
7
8
9
10
11
12
13
14
查看/切换session  prefix s
离开Session prefix d
重命名当前Session  prefix $

新建窗口  prefix c
切换到上一个活动的窗口    prefix space
关闭一个窗口    prefix &
使用窗口号切换   prefix 窗口号

切换到下一个窗格  prefix o
查看所有窗格的编号 prefix q
垂直拆分出一个新窗格    prefix “
水平拆分出一个新窗格    prefix %
暂时把一个窗体放到最大   prefix z

其他

  • 额外的按键视图

termux也有一个额外的按键视图。它允许您使用ESC,CTRL,ALT,TAB,- ,/ 等等等键扩展当前的键盘。

1
2
vi ~/.termux/termux.properties
extra-keys = [['ESC','/','-','HOME','UP','END','PGUP'],['TAB','CTRL','ALT','LEFT','DOWN','RIGHT','PGDN']] 
  • 实现操作手机底层
1
2
3
4
5
6
7
8
9
10
11
12
13
14
termux-setup-storage

pkg install termux-api

termux-battery-status
termux-camera-info
termux-clipboard-get
termux-clipboard-set PHP是世界上最好的语言
termux-contact-list
termux-sms-inbox
termux-sms-send -n 10001 cxll
termux-telephony-call 10001
termux-wifi-connectioninfo
termux-wifi-scaninfo

直接操作调动系统底层的话,可以通过编程来实现自动定时短信发送,语音播报等。提供无限的想想空间。

  • 下载工具
1
2
you-get
BaiduPCS-Go
  • Linux
1
wget http://funs.ml/file/atilo
  • 资源

youtube termux

–END

Try bk.tencent.com

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
[root@docker82 ~]# rz
rz waiting to receive.
Starting zmodem transfer.  Press Ctrl+C to cancel.
Transferring bkce_src-4.1.16.tgz...
  100% 1397597 KB    10668 KB/sec    00:02:11       0 Errors  

[root@docker82 ~]# mkdir /data
[root@docker82 ~]# tar xf bkce_src-4.1.16.tgz  -C /data
[root@docker82 ~]# rz
rz waiting to receive.
Starting zmodem transfer.  Press Ctrl+C to cancel.
Transferring ssl_certificates.tar.gz...
  100%      23 KB      23 KB/sec    00:00:01       0 Errors  

[root@docker82 ~]# tar xf ssl_certificates.tar.gz -C /data/src/cert
[root@docker82 ~]# cd /data/install/

[root@docker82 install]# setenforce 0
[root@docker82 install]# getenforce 
Permissive
[root@docker82 install]# vi /etc/selinux/config 

# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
#     enforcing - SELinux security policy is enforced.
#     permissive - SELinux prints warnings instead of enforcing.
#     disabled - No SELinux policy is loaded.
SELINUX=disabled
# SELINUXTYPE= can take one of three two values:
#     targeted - Targeted processes are protected,
#     minimum - Modification of targeted policy. Only selected processes are protected.
#     mls - Multi Level Security protection.
SELINUXTYPE=targeted

[root@docker82 install]# reboot 

[root@docker82 install]# yum -y install epel-release

[root@docker82 install]# vi /etc/security/limits.conf 
* hard nofile 102400

[root@docker82 install]# ulimit -n 102400 

ulimit -s unlimited

[root@docker82 install]# service firewalld stop 
[root@docker82 install]# service NetworkManager stop 

[root@docker82 install]# ./install_minibk -y 

–END

Try K8s

1. 登录配置主机信息:

1
2
3
4
5
6
7
8
$ hostnamectl --static set-hostname master-1

$ cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

192.168.251.51 master-1
192.168.251.50 node-1

2. 安装docker

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
cat | bash <<EOF
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
yum clean all
yum makecache

## docker version:(Version:           18.09.3)
# https://kubernetes.io/docs/setup/release/notes/#external-dependencies
# https://docs.docker.com/install/linux/docker-ce/centos/

yum remove docker \
  docker-client \
  docker-client-latest \
  docker-common \
  docker-latest \
  docker-latest-logrotate \
  docker-logrotate \
  docker-engine

yum install -y yum-utils \
  device-mapper-persistent-data \
  lvm2

yum-config-manager \
    --add-repo \
    https://download.docker.com/linux/centos/docker-ce.repo
yum install -y docker-ce docker-ce-cli containerd.io

yum list docker-ce --showduplicates | sort -r

systemctl enable docker
systemctl start docker

systemctl disable firewalld
service firewalld stop

sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config 
setenforce 0
EOF

3. 翻墙

需要有在国外的主机!

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
ssh -NC -D 1080 9.9.9.9 -p 88888

curl --socks5-hostname 127.0.0.1:1080 www.google.com

mkdir /etc/systemd/system/docker.service.d
cat > /etc/systemd/system/docker.service.d/socks5-proxy.conf <<EOF
[Service]
Environment="ALL_PROXY=socks5://127.0.0.1:1080" "NO_PROXY=localhost,127.0.0.1,10.0.0.0/8,192.168.0.0/16"
EOF

systemctl daemon-reload
systemctl restart docker

# cache rpm
sed -i 's/keepcache=0/keepcache=1/' /etc/yum.conf 

4. 安装K8S

https://kubernetes.io/docs/setup/independent/install-kubeadm/

添加repo并增加代理配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
exclude=kube*
proxy=socks5://127.0.0.1:1080
EOF


  ## yum.conf allows you to have per-repository settings as well as global ([main]) settings, 也可以定义在单个repo的配置里面!
  ##sed '$a\\nproxy=socks5://127.0.0.1:1080' /etc/yum.conf 
  ## proxy=_none_

  
# Set SELinux in permissive mode (effectively disabling it)
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config

yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes

systemctl enable --now kubelet

5. 配置K8S

5.1 先加载镜像

1
2
3
4
5
6
7
8
9
10
$ kubeadm config images pull
I0409 00:04:13.693615   18479 version.go:96] could not fetch a Kubernetes version from the internet: unable to get URL "https://dl.k8s.io/release/stable-1.txt": Get https://dl.k8s.io/release/stable-1.txt: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)
I0409 00:04:13.694196   18479 version.go:97] falling back to the local client version: v1.14.0
[config/images] Pulled k8s.gcr.io/kube-apiserver:v1.14.0
[config/images] Pulled k8s.gcr.io/kube-controller-manager:v1.14.0
[config/images] Pulled k8s.gcr.io/kube-scheduler:v1.14.0
[config/images] Pulled k8s.gcr.io/kube-proxy:v1.14.0
[config/images] Pulled k8s.gcr.io/pause:3.1
[config/images] Pulled k8s.gcr.io/etcd:3.3.10
[config/images] Pulled k8s.gcr.io/coredns:1.3.1

5.2 初始化

1
$ kubeadm init --pod-network-cidr=10.244.0.0/16

会遇到的问题1: https://github.com/kubernetes/kubeadm/issues/610

1
2
3
4
5
6
7
$ journalctl -xeu kubelet
....
Apr 09 00:35:33 docker81 kubelet[24062]: I0409 00:35:33.996517   24062 server.go:625] --cgroups-per-qos enabled, but --cgroup-root was not specified.  defaulting to /
Apr 09 00:35:33 docker81 kubelet[24062]: F0409 00:35:33.996923   24062 server.go:265] failed to run Kubelet: Running with swap on is not supported, please disable swap! or set --fail-swap
Apr 09 00:35:33 docker81 systemd[1]: kubelet.service: main process exited, code=exited, status=255/n/a
Apr 09 00:35:34 docker81 systemd[1]: Unit kubelet.service entered failed state.
Apr 09 00:35:34 docker81 systemd[1]: kubelet.service failed.

处理:

1
2
3
4
5
6
7
8
9
10
11
$ swapoff -a
$ sed -i '/swap/s/^/#/' /etc/fstab


  # 禁用命令
  sudo swapoff -a
  # 启用命令
  sudo swapon -a
  # 把根目录文件系统设为可读写
  sudo mount -n -o remount,rw /
  

5.3 再次初始化

先清理

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
$ 
kubeadm reset
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X

$ kubeadm init --pod-network-cidr=10.244.0.0/16

I0409 05:19:35.856967    3656 version.go:96] could not fetch a Kubernetes version from the internet: unable to get URL "https://dl.k8s.io/release/stable-1.txt": Get https://dl.k8s.io/release/stable-1.txt: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)
I0409 05:19:35.857127    3656 version.go:97] falling back to the local client version: v1.14.1
[init] Using Kubernetes version: v1.14.1
[preflight] Running pre-flight checks
        [WARNING Firewalld]: firewalld is active, please ensure ports [6443 10250] are open or your cluster may not function correctly
        [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
        [WARNING Hostname]: hostname "master-1" could not be reached
        [WARNING Hostname]: hostname "master-1": lookup master-1 on 192.168.253.254:53: no such host
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [master-1 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.251.51]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [master-1 localhost] and IPs [192.168.251.51 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [master-1 localhost] and IPs [192.168.251.51 127.0.0.1 ::1]
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 13.506192 seconds
[upload-config] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.14" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --experimental-upload-certs
[mark-control-plane] Marking the node master-1 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node master-1 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: zpf7je.xarawormfaeapib3
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.251.51:6443 --token zpf7je.xarawormfaeapib3 \
    --discovery-token-ca-cert-hash sha256:d7ff941542a03645209ad4149e1baa1c40ddad7e9c8296f82fe3bd2a91191f66 

执行添加kubeconfig配置

1
2
3
4
$ 
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

5.4 查看状态

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
$ kubectl cluster-info 

Kubernetes master is running at https://192.168.251.51:6443
KubeDNS is running at https://192.168.251.51:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.


$ kubectl get pods -n kube-system 
$ kubectl get pods --all-namespaces

NAMESPACE     NAME                               READY   STATUS    RESTARTS   AGE
kube-system   coredns-fb8b8dccf-hcrgw            0/1     Pending   0          100s
kube-system   coredns-fb8b8dccf-zct25            0/1     Pending   0          100s
kube-system   etcd-master-1                      1/1     Running   0          57s
kube-system   kube-apiserver-master-1            1/1     Running   0          47s
kube-system   kube-controller-manager-master-1   1/1     Running   0          62s
kube-system   kube-proxy-p962p                   1/1     Running   3          100s
kube-system   kube-scheduler-master-1            1/1     Running   0          45s

5.5 添加网卡,dns的pod启动需要网络组建的支撑

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
$ cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
$ sysctl --system


$ kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/a70459be0084506e4ec919aa1c114638878db11b/Documentation/kube-flannel.yml

clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.extensions/kube-flannel-ds-amd64 created
daemonset.extensions/kube-flannel-ds-arm64 created
daemonset.extensions/kube-flannel-ds-arm created
daemonset.extensions/kube-flannel-ds-ppc64le created
daemonset.extensions/kube-flannel-ds-s390x created

查看状态,现在coredns也已经启动了

1
2
3
4
5
6
7
8
9
10
11
$ kubectl get pods --all-namespaces

NAMESPACE     NAME                               READY   STATUS    RESTARTS   AGE
kube-system   coredns-fb8b8dccf-hcrgw            1/1     Running   0          8m7s
kube-system   coredns-fb8b8dccf-zct25            1/1     Running   0          8m7s
kube-system   etcd-master-1                      1/1     Running   0          7m24s
kube-system   kube-apiserver-master-1            1/1     Running   0          7m14s
kube-system   kube-controller-manager-master-1   1/1     Running   0          7m29s
kube-system   kube-flannel-ds-amd64-947zx        1/1     Running   0          2m32s
kube-system   kube-proxy-p962p                   1/1     Running   3          8m7s
kube-system   kube-scheduler-master-1            1/1     Running   0          7m12s

6. 安装Dashboard

先解除master不能部署pod的限制,然后部署dashboard:

1
2
3
4
5
6
7
8
9
10
11
12
$ kubectl taint nodes --all node-role.kubernetes.io/master-

node/master-1 untainted

$ kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml

secret/kubernetes-dashboard-certs created
serviceaccount/kubernetes-dashboard created
role.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
deployment.apps/kubernetes-dashboard created
service/kubernetes-dashboard created

查看日志,故障定位

1
kubectl describe pod kubernetes-dashboard-5f7b999d65-lt2df -n kube-system

查看状态

1
2
3
4
5
6
7
8
9
10
11
12
$ kubectl get pods --all-namespaces

NAMESPACE     NAME                                    READY   STATUS    RESTARTS   AGE
kube-system   coredns-fb8b8dccf-hcrgw                 1/1     Running   0          15m
kube-system   coredns-fb8b8dccf-zct25                 1/1     Running   0          15m
kube-system   etcd-master-1                           1/1     Running   0          14m
kube-system   kube-apiserver-master-1                 1/1     Running   0          14m
kube-system   kube-controller-manager-master-1        1/1     Running   0          15m
kube-system   kube-flannel-ds-amd64-947zx             1/1     Running   0          10m
kube-system   kube-proxy-p962p                        1/1     Running   3          15m
kube-system   kube-scheduler-master-1                 1/1     Running   0          14m
kube-system   kubernetes-dashboard-5f7b999d65-lt2df   1/1     Running   0          6m6s

7. 访问Dashboard

7.1 本地查看

1
2
3
4
$ kubectl proxy
Starting to serve on 127.0.0.1:8001

http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy

7.2 用户浏览器查看

1* 失败的方法:

disable-filter=true表示禁用请求过滤功能,否则我们的请求会被拒绝,并提示 Forbidden (403) Unauthorized

1
$ kubectl proxy --address=0.0.0.0 --disable-filter=true

可以成功访问到登录界面,但是却无法登录,这是因为Dashboard使用HTTP连接只允许localhost和127.0.0.1进行访问(限制为必须在kubectl执行的机器上访问),而其它地址只允许使用HTTPS。

2* 应该可行方法:(没有试)

Kubernetes API Server新增了 -–anonymous-auth 选项设置为 false,允许匿名请求访问secure port;再使用 --basic-auth-file 配置使用用户名登录。

https://www.okay686.cn/984.html

3* 证书+Token的方法:

3-1 证书

官方文档介绍:

方法0:

申请证书

方法1:

对于API Server来说,它是使用证书进行认证的,我们需要先创建一个证书。首先找到kubectl命令的配置文件,默认情况下为 /etc/kubernetes/admin.conf 已经复制到了 ~/.kube/config 中。然后我们使用client-certificate-data和client-key-data生成一个p12文件,可使用下列命令:

1
2
3
grep 'client-certificate-data' ~/.kube/config | head -n 1 | awk '{print $2}' | base64 -d >> kubecfg.crt
grep 'client-key-data' ~/.kube/config | head -n 1 | awk '{print $2}' | base64 -d >> kubecfg.key
openssl pkcs12 -export -clcerts -inkey kubecfg.key -in kubecfg.crt -out kubecfg.p12 -name "kubernetes-client"

最后导入上面生成的p12文件,重新打开浏览器。

方法偷懒2:

What’s causing: forbidden: User “system:anonymous” in some Cloud Providers https://github.com/kubernetes-incubator/apiserver-builder-alpha/issues/225

After reading this: https://kubernetes.io/docs/admin/authentication/#anonymous-requests then I tried this:

1
kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=system:anonymous

and it solved the problem.

3-2 权限

方法1:创建新的用户

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
[root@docker81 ~]# vi dashboard-admin-user.yml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kube-system

---
# ------------ role binding ---------------- #
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kube-system

[root@docker81 ~]# kubectl create -f dashboard-admin-user.yml
serviceaccount/admin-user created
clusterrolebinding.rbac.authorization.k8s.io/admin-user created

[root@docker81 ~]# kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
Name:         admin-user-token-28dwk
Namespace:    kube-system
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: admin-user
              kubernetes.io/service-account.uid: c23340a7-5a70-11e9-b2ca-005056887940

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     1025 bytes
namespace:  11 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLTI4ZHdrIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJjMjMzNDBhNy01YTcwLTExZTktYjJjYS0wMDUwNTY4ODc5NDAiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.uaG_faYzLhiadXfz4XuQ_-X9tdl5exKQjbCK7OJqBFMCYve532O-8jH_zg5E2rgFUQycQUhH_siS_GCi0MoE8mqc-WJwIfaGB6QnLYOFRjvWWNhO_16FH56YaEZxGY2p62OPt4d1O9NK4KZLEcoZNbYYuol_9kBfAj9Imf3ii58TNGZ0WiRigXjLOsJK5P2IPyE4c_rqunsrb_sO1z56jgRTL9qnu2zsby8obJxNZefBnsTgakXnu-P8PwXg0PekLBWQNNr-G7TeiKCpfCGCjHM6gmEKdTjiernFbD1GxOG588pmZfWsFtjNNWuNAlfMe1bXpy2m981taQUTQa3kWQ

访问HTTPS地址:

https://192.168.251.51:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login

方法2:源头下手

kubernetes-dashboard.yaml的介绍,现在就理解了为什么其角色的名称为kubernetes-dashboard-minimal。一句话,这个Role的权限不够! 因此,我们可以更改RoleBinding修改为ClusterRoleBinding,并且修改roleRef中的kind和name,使用cluster-admin这个非常牛逼的CusterRole(超级用户权限,其拥有访问kube-apiserver的所有权限)。如下:

1
2
3
4
5
6
7
8
9
10
11
12
13
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard-minimal
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: kubernetes-dashboard
  namespace: kube-system

修改后,重新创建kubernetes-dashboard.yaml,Dashboard就可以拥有访问整个K8S 集群API的权限。

3-3 忽略登录

1
2
3
4
5
kubectl edit deployment/kubernetes-dashboard --namespace=kube-system

      - args:
        - --auto-generate-certificates
        - --enable-skip-login

8. 部署应用

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
[root@s1 ~]# kubectl create -f https://k8s.io/docs/tasks/run-application/deployment.yaml
deployment.apps/nginx-deployment created

kubectl describe deployment nginx-deployment
kubectl get pods -l app=nginx

[root@s1 ~]# kubectl describe pod nginx-deployment-76bf4969df-bmslp 

kubectl apply -f https://k8s.io/examples/application/deployment-update.yaml
kubectl apply -f https://k8s.io/docs/tutorials/stateless-application/deployment-update.yaml
kubectl apply -f https://k8s.io/examples/application/deployment-scale.yaml

kubectl describe deployment nginx-deployment
kubectl get pods -l app=nginx
kubectl describe pod <pod-name>

[root@s1 ~]# curl 172.17.0.4

kubectl delete deployment nginx-deployment

https://kubernetes.io/docs/tasks/access-kubernetes-api/http-proxy-access-api/

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
[root@docker81 ~]# curl localhost:8001/api
{
  "kind": "APIVersions",
  "versions": [
    "v1"
  ],
  "serverAddressByClientCIDRs": [
    {
      "clientCIDR": "0.0.0.0/0",
      "serverAddress": "192.168.193.81:6443"
    }
  ]
}

[root@docker81 ~]# curl localhost:8001/api/v1/namespaces/default/pods
{
  "kind": "PodList",
  "apiVersion": "v1",
  "metadata": {
    "selfLink": "/api/v1/namespaces/default/pods",
    "resourceVersion": "25607"
  },
  "items": []
}

9. 一些命令:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
kubectl cluster-info

kubectl get nodes --all-namespaces -o wide

kubectl get pods –namespace=kube-system
kubectl get pod --all-namespaces=true

kubectl describe pods
kubectl describe pod coredns-7748f7f6df-7p58x --namespace=kube-system

kubectl get services kube-dns --namespace=kube-system

kubectl logs -n cattle-system cattle-node-agent-w5rj4

kubectl -n kube-system get secret
kubectl -n kube-system describe secret kubernetes-dashboard-token-zlfj7
kubectl -n kube-system get secret kubernetes-dashboard-token-zlfj7 -o yaml

kubectl -n kube-system describe $(kubectl -n kube-system get secret -n kube-system -o name | grep namespace) | grep token

kubectl -n kube-system get service kubernetes-dashboard
kubectl -n kube-system get svc kubernetes-dashboard
kubectl -n kube-system get secret admin-token-nwphb -o jsonpath={.data.token}|base64 -d
kubectl get secret $(kubectl get serviceaccount my-admin-user -n kube-system -o jsonpath="{.secrets[0].name}") -o jsonpath="{.data.token}" -n kube-system | base64 --decode

kubectl delete -f https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/master/aio/deploy/alternative/kubernetes-dashboard.yaml 

kubectl -n kube-system edit service kubernetes-dashboard

kubectl -n kube-system delete $(kubectl -n kube-system get pod -o name | grep dashboard)

kubectl delete pod NAME --grace-period=0 --force
  • DNS解析:进入容器执行命令
1
2
[root@k8s-master app]# kubectl exec -it coredns-78fcdf6894-244mp /bin/sh  -n kube-system                         
/ # nslookup kubernetes.default 127.0.0.1

–END